@@ -65,9 +65,9 @@
extern char __kvm_hyp_vector[];
extern void __kvm_flush_vm_context(void);
-extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa);
-extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
-extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
+extern void __kvm_tlb_flush_vmid_ipa(u64 vttbr, phys_addr_t ipa);
+extern void __kvm_tlb_flush_vmid(u64 vttbr);
+extern void __kvm_tlb_flush_local_vmid(u64 vttbr);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
@@ -305,4 +305,8 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
}
}
+static inline struct kvm_s2_vmid *vcpu_get_active_vmid(struct kvm_vcpu *vcpu)
+{
+ return &vcpu->kvm->arch.mmu.vmid;
+}
#endif /* __ARM_KVM_EMULATE_H__ */
@@ -53,16 +53,18 @@
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
-struct kvm_s2_mmu {
+struct kvm_s2_vmid {
/* The VMID generation used for the virt. memory system */
u64 vmid_gen;
u32 vmid;
+};
+
+struct kvm_s2_mmu {
+ struct kvm_s2_vmid vmid;
+ struct kvm_s2_vmid el2_vmid;
/* Stage-2 page table */
pgd_t *pgd;
-
- /* VTTBR value associated with above pgd and vmid */
- u64 vttbr;
};
struct kvm_arch {
@@ -196,6 +198,9 @@ struct kvm_vcpu_arch {
/* Stage 2 paging state used by the hardware on next switch */
struct kvm_s2_mmu *hw_mmu;
+
+ /* VTTBR value used by the hardware on next switch */
+ u64 hw_vttbr;
};
struct kvm_vm_stat {
@@ -242,6 +247,7 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
{
}
+unsigned int get_kvm_vmid_bits(void);
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
@@ -230,6 +230,17 @@ static inline unsigned int kvm_get_vmid_bits(void)
return 8;
}
+static inline u64 kvm_get_vttbr(struct kvm_s2_vmid *vmid,
+ struct kvm_s2_mmu *mmu)
+{
+ u64 vmid_field, baddr;
+
+ baddr = virt_to_phys(mmu->pgd);
+ vmid_field = ((u64)vmid->vmid << VTTBR_VMID_SHIFT) &
+ VTTBR_VMID_MASK(get_kvm_vmid_bits());
+ return baddr | vmid_field;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */
@@ -75,6 +75,11 @@ static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
__this_cpu_write(kvm_arm_running_vcpu, vcpu);
}
+unsigned int get_kvm_vmid_bits(void)
+{
+ return kvm_vmid_bits;
+}
+
/**
* kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
* Must be called from non-preemptible context
@@ -139,7 +144,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_timer_init(kvm);
/* Mark the initial VMID generation invalid */
- kvm->arch.mmu.vmid_gen = 0;
+ kvm->arch.mmu.vmid.vmid_gen = 0;
+ kvm->arch.mmu.el2_vmid.vmid_gen = 0;
/* The maximum number of VCPUs is limited by the host's GIC model */
kvm->arch.max_vcpus = vgic_present ?
@@ -312,6 +318,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
+ struct kvm_s2_mmu *mmu = &vcpu->kvm->arch.mmu;
+
/* Force users to call KVM_ARM_VCPU_INIT */
vcpu->arch.target = -1;
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
@@ -321,7 +329,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
kvm_arm_reset_debug_ptr(vcpu);
- vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
+ vcpu->arch.hw_mmu = mmu;
+ vcpu->arch.hw_vttbr = kvm_get_vttbr(&mmu->vmid, mmu);
return 0;
}
@@ -337,7 +346,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* over-invalidation doesn't affect correctness.
*/
if (*last_ran != vcpu->vcpu_id) {
- kvm_call_hyp(__kvm_tlb_flush_local_vmid, &vcpu->kvm->arch.mmu);
+ struct kvm_s2_mmu *mmu = &vcpu->kvm->arch.mmu;
+ u64 vttbr = kvm_get_vttbr(&mmu->vmid, mmu);
+
+ kvm_call_hyp(__kvm_tlb_flush_local_vmid, vttbr);
*last_ran = vcpu->vcpu_id;
}
@@ -415,36 +427,33 @@ void force_vm_exit(const cpumask_t *mask)
/**
* need_new_vmid_gen - check that the VMID is still valid
- * @kvm: The VM's VMID to check
+ * @vmid: The VMID to check
*
* return true if there is a new generation of VMIDs being used
*
- * The hardware supports only 256 values with the value zero reserved for the
- * host, so we check if an assigned value belongs to a previous generation,
- * which which requires us to assign a new value. If we're the first to use a
- * VMID for the new generation, we must flush necessary caches and TLBs on all
- * CPUs.
+ * The hardware supports a limited set of values with the value zero reserved
+ * for the host, so we check if an assigned value belongs to a previous
+ * generation, which which requires us to assign a new value. If we're the
+ * first to use a VMID for the new generation, we must flush necessary caches
+ * and TLBs on all CPUs.
*/
-static bool need_new_vmid_gen(struct kvm_s2_mmu *mmu)
+static bool need_new_vmid_gen(struct kvm_s2_vmid *vmid)
{
- return unlikely(mmu->vmid_gen != atomic64_read(&kvm_vmid_gen));
+ return unlikely(vmid->vmid_gen != atomic64_read(&kvm_vmid_gen));
}
/**
* update_vttbr - Update the VTTBR with a valid VMID before the guest runs
* @kvm: The guest that we are about to run
- * @mmu: The stage-2 translation context to update
+ * @vmid: The stage-2 VMID information struct
*
* Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
* VM has a valid VMID, otherwise assigns a new one and flushes corresponding
* caches and TLBs.
*/
-static void update_vttbr(struct kvm *kvm, struct kvm_s2_mmu *mmu)
+static void update_vttbr(struct kvm *kvm, struct kvm_s2_vmid *vmid)
{
- phys_addr_t pgd_phys;
- u64 vmid;
-
- if (!need_new_vmid_gen(mmu))
+ if (!need_new_vmid_gen(vmid))
return;
spin_lock(&kvm_vmid_lock);
@@ -454,7 +463,7 @@ static void update_vttbr(struct kvm *kvm, struct kvm_s2_mmu *mmu)
* already allocated a valid vmid for this vm, then this vcpu should
* use the same vmid.
*/
- if (!need_new_vmid_gen(mmu)) {
+ if (!need_new_vmid_gen(vmid)) {
spin_unlock(&kvm_vmid_lock);
return;
}
@@ -478,18 +487,11 @@ static void update_vttbr(struct kvm *kvm, struct kvm_s2_mmu *mmu)
kvm_call_hyp(__kvm_flush_vm_context);
}
- mmu->vmid_gen = atomic64_read(&kvm_vmid_gen);
- mmu->vmid = kvm_next_vmid;
+ vmid->vmid_gen = atomic64_read(&kvm_vmid_gen);
+ vmid->vmid = kvm_next_vmid;
kvm_next_vmid++;
kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
- /* update vttbr to be used with the new vmid */
- pgd_phys = virt_to_phys(mmu->pgd);
- BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
- vmid = ((u64)(mmu->vmid) << VTTBR_VMID_SHIFT) &
- VTTBR_VMID_MASK(kvm_vmid_bits);
- mmu->vttbr = pgd_phys | vmid;
-
spin_unlock(&kvm_vmid_lock);
}
@@ -615,7 +617,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
cond_resched();
- update_vttbr(vcpu->kvm, vcpu->arch.hw_mmu);
+ update_vttbr(vcpu->kvm, vcpu_get_active_vmid(vcpu));
if (vcpu->arch.power_off || vcpu->arch.pause)
vcpu_sleep(vcpu);
@@ -640,7 +642,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
run->exit_reason = KVM_EXIT_INTR;
}
- if (ret <= 0 || need_new_vmid_gen(vcpu->arch.hw_mmu) ||
+ if (ret <= 0 || need_new_vmid_gen(vcpu_get_active_vmid(vcpu)) ||
vcpu->arch.power_off || vcpu->arch.pause) {
local_irq_enable();
kvm_pmu_sync_hwstate(vcpu);
@@ -73,9 +73,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
{
- struct kvm_s2_mmu *mmu = kern_hyp_va(vcpu->arch.hw_mmu);
-
- write_sysreg(mmu->vttbr, VTTBR);
+ write_sysreg(vcpu->arch.hw_vttbr, VTTBR);
write_sysreg(vcpu->arch.midr, VPIDR);
}
@@ -34,13 +34,12 @@
* As v7 does not support flushing per IPA, just nuke the whole TLB
* instead, ignoring the ipa value.
*/
-void __hyp_text __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
+void __hyp_text __kvm_tlb_flush_vmid(u64 vttbr)
{
dsb(ishst);
/* Switch to requested VMID */
- mmu = kern_hyp_va(mmu);
- write_sysreg(mmu->vttbr, VTTBR);
+ write_sysreg(vttbr, VTTBR);
isb();
write_sysreg(0, TLBIALLIS);
@@ -50,17 +49,15 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
write_sysreg(0, VTTBR);
}
-void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
- phys_addr_t ipa)
+void __hyp_text __kvm_tlb_flush_vmid_ipa(u64 vttbr, phys_addr_t ipa)
{
- __kvm_tlb_flush_vmid(mmu);
+ __kvm_tlb_flush_vmid(vttbr);
}
-void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
+void __hyp_text __kvm_tlb_flush_local_vmid(u64 vttbr)
{
/* Switch to requested VMID */
- mmu = kern_hyp_va(mmu);
- write_sysreg(mmu->vttbr, VTTBR);
+ write_sysreg(vttbr, VTTBR);
isb();
write_sysreg(0, TLBIALL);
@@ -60,12 +60,17 @@ static bool memslot_is_logging(struct kvm_memory_slot *memslot)
*/
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
- kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
+ struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
+ u64 vttbr = kvm_get_vttbr(&mmu->vmid, mmu);
+
+ kvm_call_hyp(__kvm_tlb_flush_vmid, vttbr);
}
static void kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa)
{
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ipa);
+ u64 vttbr = kvm_get_vttbr(&mmu->vmid, mmu);
+
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, vttbr, ipa);
}
/*
@@ -53,9 +53,9 @@
extern char __kvm_hyp_vector[];
extern void __kvm_flush_vm_context(void);
-extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa);
-extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
-extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
+extern void __kvm_tlb_flush_vmid_ipa(u64 vttbr, phys_addr_t ipa);
+extern void __kvm_tlb_flush_vmid(u64 vttbr);
+extern void __kvm_tlb_flush_local_vmid(u64 vttbr);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
@@ -363,4 +363,12 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
return data; /* Leave LE untouched */
}
+static inline struct kvm_s2_vmid *vcpu_get_active_vmid(struct kvm_vcpu *vcpu)
+{
+ if (unlikely(vcpu_mode_el2(vcpu)))
+ return &vcpu->kvm->arch.mmu.el2_vmid;
+
+ return &vcpu->kvm->arch.mmu.vmid;
+}
+
#endif /* __ARM64_KVM_EMULATE_H__ */
@@ -50,17 +50,19 @@
int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext);
void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
-struct kvm_s2_mmu {
+struct kvm_s2_vmid {
/* The VMID generation used for the virt. memory system */
u64 vmid_gen;
u32 vmid;
+};
+
+struct kvm_s2_mmu {
+ struct kvm_s2_vmid vmid;
+ struct kvm_s2_vmid el2_vmid;
/* 1-level 2nd stage table and lock */
spinlock_t pgd_lock;
pgd_t *pgd;
-
- /* VTTBR value associated with above pgd and vmid */
- u64 vttbr;
};
struct kvm_arch {
@@ -334,6 +336,9 @@ struct kvm_vcpu_arch {
/* Stage 2 paging state used by the hardware on next switch */
struct kvm_s2_mmu *hw_mmu;
+
+ /* VTTBR value used by the hardware on next switch */
+ u64 hw_vttbr;
};
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
@@ -391,6 +396,7 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
{
}
+unsigned int get_kvm_vmid_bits(void);
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
@@ -314,5 +314,16 @@ static inline unsigned int kvm_get_vmid_bits(void)
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
}
+static inline u64 kvm_get_vttbr(struct kvm_s2_vmid *vmid,
+ struct kvm_s2_mmu *mmu)
+{
+ u64 vmid_field, baddr;
+
+ baddr = virt_to_phys(mmu->pgd);
+ vmid_field = ((u64)vmid->vmid << VTTBR_VMID_SHIFT) &
+ VTTBR_VMID_MASK(get_kvm_vmid_bits());
+ return baddr | vmid_field;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
@@ -135,9 +135,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
{
- struct kvm_s2_mmu *mmu = kern_hyp_va(vcpu->arch.hw_mmu);
-
- write_sysreg(mmu->vttbr, vttbr_el2);
+ write_sysreg(vcpu->arch.hw_vttbr, vttbr_el2);
}
static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
@@ -17,14 +17,12 @@
#include <asm/kvm_hyp.h>
-void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
- phys_addr_t ipa)
+void __hyp_text __kvm_tlb_flush_vmid_ipa(u64 vttbr, phys_addr_t ipa)
{
dsb(ishst);
/* Switch to requested VMID */
- mmu = kern_hyp_va(mmu);
- write_sysreg(mmu->vttbr, vttbr_el2);
+ write_sysreg(vttbr, vttbr_el2);
isb();
/*
@@ -49,13 +47,12 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
write_sysreg(0, vttbr_el2);
}
-void __hyp_text __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
+void __hyp_text __kvm_tlb_flush_vmid(u64 vttbr)
{
dsb(ishst);
/* Switch to requested VMID */
- mmu = kern_hyp_va(mmu);
- write_sysreg(mmu->vttbr, vttbr_el2);
+ write_sysreg(vttbr, vttbr_el2);
isb();
asm volatile("tlbi vmalls12e1is" : : );
@@ -65,11 +62,10 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
write_sysreg(0, vttbr_el2);
}
-void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
+void __hyp_text __kvm_tlb_flush_local_vmid(u64 vttbr)
{
/* Switch to requested VMID */
- mmu = kern_hyp_va(mmu);
- write_sysreg(mmu->vttbr, vttbr_el2);
+ write_sysreg(vttbr, vttbr_el2);
isb();
asm volatile("tlbi vmalle1" : : );