Message ID | 1538479892-14835-33-git-send-email-paulus@ozlabs.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: PPC: Book3S HV: Nested HV virtualization | expand |
On Tue, Oct 02, 2018 at 09:31:31PM +1000, Paul Mackerras wrote: > This adds code to call the H_TLB_INVALIDATE hypercall when running as > a guest, in the cases where we need to invalidate TLBs (or other MMU > caches) as part of managing the mappings for a nested guest. Calling > H_TLB_INVALIDATE is an alternative to doing the tlbie instruction and > having it be emulated by our hypervisor. Why is supporting two methods useful, rather than just settling on one? Having both just sounds like asking for untested and therefore broken code paths lying around. > Signed-off-by: Paul Mackerras <paulus@ozlabs.org> > --- > arch/powerpc/include/asm/kvm_book3s_64.h | 5 +++++ > arch/powerpc/kvm/book3s_64_mmu_radix.c | 30 +++++++++++++++++++++++++++-- > arch/powerpc/kvm/book3s_hv_nested.c | 33 +++++++++++++++++++++++--------- > 3 files changed, 57 insertions(+), 11 deletions(-) > > diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h > index f0fa14c..95280e2 100644 > --- a/arch/powerpc/include/asm/kvm_book3s_64.h > +++ b/arch/powerpc/include/asm/kvm_book3s_64.h > @@ -24,6 +24,7 @@ > #include <asm/bitops.h> > #include <asm/book3s/64/mmu-hash.h> > #include <asm/cpu_has_feature.h> > +#include <asm/ppc-opcode.h> > > #ifdef CONFIG_PPC_PSERIES > static inline bool kvmhv_on_pseries(void) > @@ -121,6 +122,10 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, > void kvmhv_put_nested(struct kvm_nested_guest *gp); > int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid); > > +/* Encoding of first parameter for H_TLB_INVALIDATE */ > +#define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \ > + ___PPC_R(r)) > + > /* Power architecture requires HPT is at least 256kiB, at most 64TiB */ > #define PPC_MIN_HPT_ORDER 18 > #define PPC_MAX_HPT_ORDER 46 > diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c > index b74abdd..6c93f5c 100644 > --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c > +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c > @@ -201,17 +201,43 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, > unsigned int pshift, unsigned int lpid) > { > unsigned long psize = PAGE_SIZE; > + int psi; > + long rc; > + unsigned long rb; > > if (pshift) > psize = 1UL << pshift; > + else > + pshift = PAGE_SHIFT; > > addr &= ~(psize - 1); > - radix__flush_tlb_lpid_page(lpid, addr, psize); > + > + if (!kvmhv_on_pseries()) { > + radix__flush_tlb_lpid_page(lpid, addr, psize); > + return; > + } > + > + psi = shift_to_mmu_psize(pshift); > + rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58)); > + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1), > + lpid, rb); > + if (rc) > + pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc); > } > > static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) > { > - radix__flush_pwc_lpid(lpid); > + long rc; > + > + if (!kvmhv_on_pseries()) { > + radix__flush_pwc_lpid(lpid); > + return; > + } > + > + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1), > + lpid, TLBIEL_INVAL_SET_LPID); > + if (rc) > + pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc); > } > > static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, > diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c > index 914ca78..81c81d51 100644 > --- a/arch/powerpc/kvm/book3s_hv_nested.c > +++ b/arch/powerpc/kvm/book3s_hv_nested.c > @@ -368,17 +368,32 @@ void kvmhv_nested_exit(void) > } > } > > +static void kvmhv_flush_lpid(unsigned int lpid) > +{ > + long rc; > + > + if (!kvmhv_on_pseries()) { > + radix__flush_tlb_lpid(lpid); > + return; > + } > + > + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1), > + lpid, TLBIEL_INVAL_SET_LPID); > + if (rc) > + pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc); > +} > + > void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1) > { > - if (cpu_has_feature(CPU_FTR_HVMODE)) { > + if (!kvmhv_on_pseries()) { > mmu_partition_table_set_entry(lpid, dw0, dw1); > - } else { > - pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); > - pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); > - /* this will be emulated, L0 will do the necessary barriers */ > - asm volatile(PPC_TLBIE_5(%0, %1, 2, 0, 1) : : > - "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); > + return; > } > + > + pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); > + pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); > + /* L0 will do the necessary barriers */ > + kvmhv_flush_lpid(lpid); > } > > static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp) > @@ -543,7 +558,7 @@ static void kvmhv_flush_nested(struct kvm_nested_guest *gp) > spin_lock(&kvm->mmu_lock); > kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); > spin_unlock(&kvm->mmu_lock); > - radix__flush_tlb_lpid(gp->shadow_lpid); > + kvmhv_flush_lpid(gp->shadow_lpid); > kvmhv_update_ptbl_cache(gp); > if (gp->l1_gr_to_hr == 0) > kvmhv_remove_nested(gp); > @@ -839,7 +854,7 @@ static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu, > spin_lock(&kvm->mmu_lock); > kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, > gp->shadow_lpid); > - radix__flush_tlb_lpid(gp->shadow_lpid); > + kvmhv_flush_lpid(gp->shadow_lpid); > spin_unlock(&kvm->mmu_lock); > break; > case 1:
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index f0fa14c..95280e2 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -24,6 +24,7 @@ #include <asm/bitops.h> #include <asm/book3s/64/mmu-hash.h> #include <asm/cpu_has_feature.h> +#include <asm/ppc-opcode.h> #ifdef CONFIG_PPC_PSERIES static inline bool kvmhv_on_pseries(void) @@ -121,6 +122,10 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, void kvmhv_put_nested(struct kvm_nested_guest *gp); int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid); +/* Encoding of first parameter for H_TLB_INVALIDATE */ +#define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \ + ___PPC_R(r)) + /* Power architecture requires HPT is at least 256kiB, at most 64TiB */ #define PPC_MIN_HPT_ORDER 18 #define PPC_MAX_HPT_ORDER 46 diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index b74abdd..6c93f5c 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -201,17 +201,43 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, unsigned int pshift, unsigned int lpid) { unsigned long psize = PAGE_SIZE; + int psi; + long rc; + unsigned long rb; if (pshift) psize = 1UL << pshift; + else + pshift = PAGE_SHIFT; addr &= ~(psize - 1); - radix__flush_tlb_lpid_page(lpid, addr, psize); + + if (!kvmhv_on_pseries()) { + radix__flush_tlb_lpid_page(lpid, addr, psize); + return; + } + + psi = shift_to_mmu_psize(pshift); + rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58)); + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1), + lpid, rb); + if (rc) + pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc); } static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) { - radix__flush_pwc_lpid(lpid); + long rc; + + if (!kvmhv_on_pseries()) { + radix__flush_pwc_lpid(lpid); + return; + } + + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1), + lpid, TLBIEL_INVAL_SET_LPID); + if (rc) + pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc); } static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 914ca78..81c81d51 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -368,17 +368,32 @@ void kvmhv_nested_exit(void) } } +static void kvmhv_flush_lpid(unsigned int lpid) +{ + long rc; + + if (!kvmhv_on_pseries()) { + radix__flush_tlb_lpid(lpid); + return; + } + + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1), + lpid, TLBIEL_INVAL_SET_LPID); + if (rc) + pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc); +} + void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1) { - if (cpu_has_feature(CPU_FTR_HVMODE)) { + if (!kvmhv_on_pseries()) { mmu_partition_table_set_entry(lpid, dw0, dw1); - } else { - pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); - pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); - /* this will be emulated, L0 will do the necessary barriers */ - asm volatile(PPC_TLBIE_5(%0, %1, 2, 0, 1) : : - "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); + return; } + + pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); + pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); + /* L0 will do the necessary barriers */ + kvmhv_flush_lpid(lpid); } static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp) @@ -543,7 +558,7 @@ static void kvmhv_flush_nested(struct kvm_nested_guest *gp) spin_lock(&kvm->mmu_lock); kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); spin_unlock(&kvm->mmu_lock); - radix__flush_tlb_lpid(gp->shadow_lpid); + kvmhv_flush_lpid(gp->shadow_lpid); kvmhv_update_ptbl_cache(gp); if (gp->l1_gr_to_hr == 0) kvmhv_remove_nested(gp); @@ -839,7 +854,7 @@ static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu, spin_lock(&kvm->mmu_lock); kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); - radix__flush_tlb_lpid(gp->shadow_lpid); + kvmhv_flush_lpid(gp->shadow_lpid); spin_unlock(&kvm->mmu_lock); break; case 1:
This adds code to call the H_TLB_INVALIDATE hypercall when running as a guest, in the cases where we need to invalidate TLBs (or other MMU caches) as part of managing the mappings for a nested guest. Calling H_TLB_INVALIDATE is an alternative to doing the tlbie instruction and having it be emulated by our hypervisor. Signed-off-by: Paul Mackerras <paulus@ozlabs.org> --- arch/powerpc/include/asm/kvm_book3s_64.h | 5 +++++ arch/powerpc/kvm/book3s_64_mmu_radix.c | 30 +++++++++++++++++++++++++++-- arch/powerpc/kvm/book3s_hv_nested.c | 33 +++++++++++++++++++++++--------- 3 files changed, 57 insertions(+), 11 deletions(-)