@@ -279,6 +279,8 @@ static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch)
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
/* MMU handling */
+void kvm_flush_tlb_all_stage1(void);
+void kvm_flush_tlb_all_stage2(void);
void kvm_flush_tlb_all(void);
void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
@@ -207,15 +207,17 @@ static void kvm_update_vpid(struct kvm_vcpu *vcpu, int cpu)
++vpid; /* vpid 0 reserved for root */
/* start new vpid cycle */
- kvm_flush_tlb_all();
+ if (!cpu_has_guestid)
+ kvm_flush_tlb_all();
+ else
+ kvm_flush_tlb_all_stage1();
}
context->vpid_cache = vpid;
vcpu->arch.vpid = vpid;
- vcpu->arch.vmid = vcpu->arch.vpid & vpid_mask;
}
-void kvm_check_vpid(struct kvm_vcpu *vcpu)
+static void __kvm_check_vpid(struct kvm_vcpu *vcpu)
{
int cpu;
bool migrated;
@@ -243,7 +245,6 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu)
kvm_update_vpid(vcpu, cpu);
trace_kvm_vpid_change(vcpu, vcpu->arch.vpid);
vcpu->cpu = cpu;
- kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
}
/* Restore GSTAT(0x50).vpid */
@@ -251,6 +252,27 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu)
change_csr_gstat(vpid_mask << CSR_GSTAT_GID_SHIFT, vpid);
}
+static void __kvm_check_vmid(struct kvm_vcpu *vcpu)
+{
+ unsigned long vmid;
+
+ /* On some machines like 3A5000, vmid needs the same with vpid */
+ if (!cpu_has_guestid) {
+ vmid = vcpu->arch.vpid & vpid_mask;
+ if (vcpu->arch.vmid != vmid) {
+ vcpu->arch.vmid = vmid;
+ kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
+ }
+ return;
+ }
+}
+
+void kvm_check_vpid(struct kvm_vcpu *vcpu)
+{
+ __kvm_check_vpid(vcpu);
+ __kvm_check_vmid(vcpu);
+}
+
void kvm_init_vmcs(struct kvm *kvm)
{
kvm->arch.vmcs = vmcs;
@@ -21,6 +21,20 @@ void kvm_flush_tlb_all(void)
local_irq_restore(flags);
}
+/* Invalidate all stage1 TLB entries including GVA-->GPA mappings */
+void kvm_flush_tlb_all_stage1(void)
+{
+ lockdep_assert_irqs_disabled();
+ invtlb_all(INVGTLB_ALLGID_GVA_TO_GPA, 0, 0);
+}
+
+/* Invalidate all stage2 TLB entries including GPA-->HPA mappings */
+void kvm_flush_tlb_all_stage2(void)
+{
+ lockdep_assert_irqs_disabled();
+ invtlb_all(INVTLB_ALLGID_GPA_TO_HPA, 0, 0);
+}
+
void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa)
{
unsigned int vmid;
Feature cpu_has_guestid is used to check whether separate vmid/vpid is supported or not. Also add different vmid updating function. Signed-off-by: Bibo Mao <maobibo@loongson.cn> --- arch/loongarch/include/asm/kvm_host.h | 2 ++ arch/loongarch/kvm/main.c | 30 +++++++++++++++++++++++---- arch/loongarch/kvm/tlb.c | 14 +++++++++++++ 3 files changed, 42 insertions(+), 4 deletions(-)