@@ -706,7 +706,14 @@ static inline bool kvm_realm_is_created(struct kvm *kvm)
static inline bool vcpu_is_rec(struct kvm_vcpu *vcpu)
{
+ if (static_branch_unlikely(&kvm_rme_is_available))
+ return vcpu_has_feature(vcpu, KVM_ARM_VCPU_REC);
return false;
}
+static inline bool kvm_arm_rec_finalized(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.rec.mpidr != INVALID_HWID;
+}
+
#endif /* __ARM64_KVM_EMULATE_H__ */
@@ -828,6 +828,9 @@ struct kvm_vcpu_arch {
/* Per-vcpu CCSIDR override or NULL */
u32 *ccsidr;
+
+ /* Realm meta data */
+ struct realm_rec rec;
};
/*
@@ -6,6 +6,7 @@
#ifndef __ASM_KVM_RME_H
#define __ASM_KVM_RME_H
+#include <asm/rmi_smc.h>
#include <uapi/linux/kvm.h>
/**
@@ -65,6 +66,30 @@ struct realm {
unsigned int ia_bits;
};
+/**
+ * struct realm_rec - Additional per VCPU data for a Realm
+ *
+ * @mpidr: MPIDR (Multiprocessor Affinity Register) value to identify this VCPU
+ * @rec_page: Kernel VA of the RMM's private page for this REC
+ * @aux_pages: Additional pages private to the RMM for this REC
+ * @run: Kernel VA of the RmiRecRun structure shared with the RMM
+ */
+struct realm_rec {
+ unsigned long mpidr;
+ void *rec_page;
+ /*
+ * REC_PARAMS_AUX_GRANULES is the maximum number of granules that the
+ * RMM can require. By using that to size the array we know that it
+ * will be big enough as the page size is always at least as large as
+ * the granule size. In the case of a larger page size than 4k (or an
+ * RMM which requires fewer auxiliary granules), the array will be
+ * bigger than needed however the extra memory required is small and
+ * this keeps the code cleaner.
+ */
+ struct page *aux_pages[REC_PARAMS_AUX_GRANULES];
+ struct rec_run *run;
+};
+
void kvm_init_rme(void);
u32 kvm_realm_ipa_limit(void);
@@ -72,6 +97,8 @@ int kvm_realm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap);
int kvm_init_realm_vm(struct kvm *kvm);
void kvm_destroy_realm(struct kvm *kvm);
void kvm_realm_destroy_rtts(struct kvm *kvm, u32 ia_bits);
+int kvm_create_rec(struct kvm_vcpu *vcpu);
+void kvm_destroy_rec(struct kvm_vcpu *vcpu);
static inline bool kvm_realm_is_private_address(struct realm *realm,
unsigned long addr)
@@ -489,6 +489,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* Force users to call KVM_ARM_VCPU_INIT */
vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
+ vcpu->arch.rec.mpidr = INVALID_HWID;
+
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
/* Set up the timer */
@@ -1447,7 +1449,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
return -EINVAL;
}
-static unsigned long system_supported_vcpu_features(void)
+static unsigned long system_supported_vcpu_features(struct kvm *kvm)
{
unsigned long features = KVM_VCPU_VALID_FEATURES;
@@ -1468,6 +1470,9 @@ static unsigned long system_supported_vcpu_features(void)
if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
clear_bit(KVM_ARM_VCPU_HAS_EL2, &features);
+ if (!kvm_is_realm(kvm))
+ clear_bit(KVM_ARM_VCPU_REC, &features);
+
return features;
}
@@ -1485,7 +1490,7 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
return -ENOENT;
}
- if (features & ~system_supported_vcpu_features())
+ if (features & ~system_supported_vcpu_features(vcpu->kvm))
return -EINVAL;
/*
@@ -1507,6 +1512,10 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features))
return -EINVAL;
+ /* RME is incompatible with AArch32 */
+ if (test_bit(KVM_ARM_VCPU_REC, &features))
+ return -EINVAL;
+
return 0;
}
@@ -137,6 +137,11 @@ int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
return -EPERM;
return kvm_vcpu_finalize_sve(vcpu);
+ case KVM_ARM_VCPU_REC:
+ if (!kvm_is_realm(vcpu->kvm) || !vcpu_is_rec(vcpu))
+ return -EINVAL;
+
+ return kvm_create_rec(vcpu);
}
return -EINVAL;
@@ -147,6 +152,11 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
return false;
+ if (kvm_is_realm(vcpu->kvm) &&
+ !(vcpu_is_rec(vcpu) && kvm_arm_rec_finalized(vcpu) &&
+ READ_ONCE(vcpu->kvm->arch.realm.state) == REALM_STATE_ACTIVE))
+ return false;
+
return true;
}
@@ -159,6 +169,7 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
kfree(sve_state);
kfree(vcpu->arch.ccsidr);
+ kvm_destroy_rec(vcpu);
}
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
@@ -474,6 +474,186 @@ void kvm_destroy_realm(struct kvm *kvm)
kvm_free_stage2_pgd(&kvm->arch.mmu);
}
+static void free_rec_aux(struct page **aux_pages,
+ unsigned int num_aux)
+{
+ unsigned int i, j;
+ unsigned int page_count = 0;
+
+ for (i = 0; i < num_aux;) {
+ struct page *aux_page = aux_pages[page_count++];
+ phys_addr_t aux_page_phys = page_to_phys(aux_page);
+ bool should_free = true;
+
+ for (j = 0; j < PAGE_SIZE && i < num_aux; j += RMM_PAGE_SIZE) {
+ if (WARN_ON(rmi_granule_undelegate(aux_page_phys)))
+ should_free = false;
+ aux_page_phys += RMM_PAGE_SIZE;
+ i++;
+ }
+ /* Only free if all the undelegate calls were successful */
+ if (should_free)
+ __free_page(aux_page);
+ }
+}
+
+static int alloc_rec_aux(struct page **aux_pages,
+ u64 *aux_phys_pages,
+ unsigned int num_aux)
+{
+ struct page *aux_page;
+ int page_count = 0;
+ unsigned int i, j;
+ int ret;
+
+ for (i = 0; i < num_aux;) {
+ phys_addr_t aux_page_phys;
+
+ aux_page = alloc_page(GFP_KERNEL);
+ if (!aux_page) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ aux_page_phys = page_to_phys(aux_page);
+ for (j = 0; j < PAGE_SIZE && i < num_aux; j += RMM_PAGE_SIZE) {
+ if (rmi_granule_delegate(aux_page_phys)) {
+ ret = -ENXIO;
+ goto err_undelegate;
+ }
+ aux_phys_pages[i++] = aux_page_phys;
+ aux_page_phys += RMM_PAGE_SIZE;
+ }
+ aux_pages[page_count++] = aux_page;
+ }
+
+ return 0;
+err_undelegate:
+ while (j > 0) {
+ j -= RMM_PAGE_SIZE;
+ i--;
+ if (WARN_ON(rmi_granule_undelegate(aux_phys_pages[i]))) {
+ /* Leak the page if the undelegate fails */
+ goto out_err;
+ }
+ }
+ __free_page(aux_page);
+out_err:
+ free_rec_aux(aux_pages, i);
+ return ret;
+}
+
+int kvm_create_rec(struct kvm_vcpu *vcpu)
+{
+ struct user_pt_regs *vcpu_regs = vcpu_gp_regs(vcpu);
+ unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
+ struct realm *realm = &vcpu->kvm->arch.realm;
+ struct realm_rec *rec = &vcpu->arch.rec;
+ unsigned long rec_page_phys;
+ struct rec_params *params;
+ int r, i;
+
+ if (kvm_realm_state(vcpu->kvm) != REALM_STATE_NEW)
+ return -ENOENT;
+
+ if (rec->run)
+ return -EBUSY;
+
+ /*
+ * The RMM will report PSCI v1.0 to Realms and the KVM_ARM_VCPU_PSCI_0_2
+ * flag covers v0.2 and onwards.
+ */
+ if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2))
+ return -EINVAL;
+
+ BUILD_BUG_ON(sizeof(*params) > PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(*rec->run) > PAGE_SIZE);
+
+ params = (struct rec_params *)get_zeroed_page(GFP_KERNEL);
+ rec->rec_page = (void *)__get_free_page(GFP_KERNEL);
+ rec->run = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!params || !rec->rec_page || !rec->run) {
+ r = -ENOMEM;
+ goto out_free_pages;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(params->gprs); i++)
+ params->gprs[i] = vcpu_regs->regs[i];
+
+ params->pc = vcpu_regs->pc;
+
+ if (vcpu->vcpu_id == 0)
+ params->flags |= REC_PARAMS_FLAG_RUNNABLE;
+
+ rec_page_phys = virt_to_phys(rec->rec_page);
+
+ if (rmi_granule_delegate(rec_page_phys)) {
+ r = -ENXIO;
+ goto out_free_pages;
+ }
+
+ r = alloc_rec_aux(rec->aux_pages, params->aux, realm->num_aux);
+ if (r)
+ goto out_undelegate_rmm_rec;
+
+ params->num_rec_aux = realm->num_aux;
+ params->mpidr = mpidr;
+
+ if (rmi_rec_create(virt_to_phys(realm->rd),
+ rec_page_phys,
+ virt_to_phys(params))) {
+ r = -ENXIO;
+ goto out_free_rec_aux;
+ }
+
+ rec->mpidr = mpidr;
+
+ free_page((unsigned long)params);
+ return 0;
+
+out_free_rec_aux:
+ free_rec_aux(rec->aux_pages, realm->num_aux);
+out_undelegate_rmm_rec:
+ if (WARN_ON(rmi_granule_undelegate(rec_page_phys)))
+ rec->rec_page = NULL;
+out_free_pages:
+ free_page((unsigned long)rec->run);
+ free_page((unsigned long)rec->rec_page);
+ free_page((unsigned long)params);
+ return r;
+}
+
+void kvm_destroy_rec(struct kvm_vcpu *vcpu)
+{
+ struct realm *realm = &vcpu->kvm->arch.realm;
+ struct realm_rec *rec = &vcpu->arch.rec;
+ unsigned long rec_page_phys;
+
+ if (!vcpu_is_rec(vcpu))
+ return;
+
+ if (!rec->run) {
+ /* Nothing to do if the VCPU hasn't been finalized */
+ return;
+ }
+
+ free_page((unsigned long)rec->run);
+
+ rec_page_phys = virt_to_phys(rec->rec_page);
+
+ /*
+ * The REC and any AUX pages cannot be reclaimed until the REC is
+ * destroyed. So if the REC destroy fails then the REC page and any AUX
+ * pages will be leaked.
+ */
+ if (WARN_ON(rmi_rec_destroy(rec_page_phys)))
+ return;
+
+ free_rec_aux(rec->aux_pages, realm->num_aux);
+
+ free_delegated_granule(rec_page_phys);
+}
+
int kvm_init_realm_vm(struct kvm *kvm)
{
kvm->arch.realm.params = (void *)get_zeroed_page(GFP_KERNEL);