@@ -635,11 +635,6 @@ void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
bool user);
-extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
- struct kvm_vcpu *vcpu);
-extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
-extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
-extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
/* Emulation */
u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu);
@@ -1198,8 +1198,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
*/
preempt_disable();
cpu = smp_processor_id();
- kvm_get_new_mmu_context(kern_mm,
- cpu, vcpu);
+ get_new_mmu_context(kern_mm, cpu);
for_each_possible_cpu(i)
if (i != cpu)
cpu_context(i, kern_mm) = 0;
@@ -443,25 +443,6 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
return 0;
}
-void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
- struct kvm_vcpu *vcpu)
-{
- unsigned long asid = asid_cache(cpu);
-
- asid += cpu_asid_inc();
- if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
- if (cpu_has_vtag_icache)
- flush_icache_all();
-
- local_flush_tlb_all(); /* start new asid cycle */
-
- if (!asid) /* fix version if needed */
- asid = asid_first_version(cpu);
- }
-
- cpu_context(cpu, mm) = asid_cache(cpu) = asid;
-}
-
/**
* kvm_mips_migrate_count() - Migrate timer.
* @vcpu: Virtual CPU.
@@ -706,7 +706,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if ((cpu_context(cpu, kern_mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu)) {
- kvm_get_new_mmu_context(kern_mm, cpu, vcpu);
+ get_new_mmu_context(kern_mm, cpu);
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
cpu_context(cpu, current->mm));
@@ -716,7 +716,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if ((cpu_context(cpu, user_mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu)) {
- kvm_get_new_mmu_context(user_mm, cpu, vcpu);
+ get_new_mmu_context(user_mm, cpu);
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
cpu_context(cpu, current->mm));
@@ -781,7 +781,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
if (gasid != vcpu->arch.last_user_gasid) {
kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
- kvm_get_new_mmu_context(user_mm, cpu, vcpu);
+ get_new_mmu_context(user_mm, cpu);
for_each_possible_cpu(i)
if (i != cpu)
cpu_context(i, user_mm) = 0;
MIPS KVM uses its own variation of get_new_mmu_context() which takes an extra vcpu pointer (unused) and does exactly the same thing. Switch to just using get_new_mmu_context() directly and drop KVM's version of it as it doesn't really serve any purpose. The nearby declarations of kvm_mips_alloc_new_mmu_context(), kvm_mips_vcpu_load() and kvm_mips_vcpu_put() are also removed from kvm_host.h, as no definitions or users exist. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/include/asm/kvm_host.h | 5 ----- arch/mips/kvm/emulate.c | 3 +-- arch/mips/kvm/mmu.c | 19 ------------------- arch/mips/kvm/trap_emul.c | 6 +++--- 4 files changed, 4 insertions(+), 29 deletions(-)