diff mbox

[5/10] KVM: MIPS/T&E: Reduce stale ASID checks

Message ID f8edc1cf1113316aaa81de155435bfc03b1b8158.1483993967.git-series.james.hogan@imgtec.com (mailing list archive)
State New, archived
Headers show

Commit Message

James Hogan Jan. 9, 2017, 8:51 p.m. UTC
The stale ASID checks taking place on VCPU load can be reduced:

- Now that we check for a stale ASID on guest re-entry, there is no need
  to do so when loading the VCPU outside of guest context, since it will
  happen before entering the guest. Note that a lot of KVM VCPU ioctls
  will cause the VCPU to be loaded but guest context won't be entered.

- There is no need to check for a stale kernel_mm ASID when the guest is
  in user mode and vice versa. In fact doing so can potentially be
  problematic since the user_mm ASID regeneration may trigger a new ASID
  cycle, which would cause the kern_mm ASID to become stale after it has
  been checked for staleness.

Therefore only check the ASID for the mm corresponding to the current
guest mode, and only if we're already in guest context. We drop some of
the related kvm_debug() calls here too.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
---
 arch/mips/kvm/trap_emul.c | 35 ++++++-----------------------------
 1 file changed, 6 insertions(+), 29 deletions(-)
diff mbox

Patch

diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index c6a16267f084..0cb76e1aac0e 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -714,35 +714,15 @@  static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 	struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
 	struct mm_struct *mm;
 
-	/* Allocate new kernel and user ASIDs if needed */
-
-	if ((cpu_context(cpu, kern_mm) ^ asid_cache(cpu)) &
-						asid_version_mask(cpu)) {
-		get_new_mmu_context(kern_mm, cpu);
-
-		kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
-			  cpu_context(cpu, current->mm));
-		kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#lx\n",
-			  cpu, cpu_context(cpu, kern_mm));
-	}
-
-	if ((cpu_context(cpu, user_mm) ^ asid_cache(cpu)) &
-						asid_version_mask(cpu)) {
-		get_new_mmu_context(user_mm, cpu);
-
-		kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
-			  cpu_context(cpu, current->mm));
-		kvm_debug("[%d]: Allocated new ASID for Guest User: %#lx\n",
-			  cpu, cpu_context(cpu, user_mm));
-	}
-
 	/*
-	 * Were we in guest context? If so then the pre-empted ASID is
-	 * no longer valid, we need to set it to what it should be based
-	 * on the mode of the Guest (Kernel/User)
+	 * Were we in guest context? If so, restore the appropriate ASID based
+	 * on the mode of the Guest (Kernel/User).
 	 */
 	if (current->flags & PF_VCPU) {
 		mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
+		if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
+		    asid_version_mask(cpu))
+			get_new_mmu_context(mm, cpu);
 		write_c0_entryhi(cpu_asid(cpu, mm));
 		TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
 		cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm));
@@ -760,11 +740,8 @@  static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
 	if (current->flags & PF_VCPU) {
 		/* Restore normal Linux process memory map */
 		if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
-		     asid_version_mask(cpu))) {
-			kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
-				  cpu_context(cpu, current->mm));
+		     asid_version_mask(cpu)))
 			get_new_mmu_context(current->mm, cpu);
-		}
 		write_c0_entryhi(cpu_asid(cpu, current->mm));
 		TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
 		cpumask_set_cpu(cpu, mm_cpumask(current->mm));