Message ID | 1538654169-15602-25-git-send-email-paulus@ozlabs.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: PPC: Book3S HV: Nested HV virtualization | expand |
On Thu, Oct 04, 2018 at 09:56:01PM +1000, Paul Mackerras wrote: > This adds code to call the H_TLB_INVALIDATE hypercall when running as > a guest, in the cases where we need to invalidate TLBs (or other MMU > caches) as part of managing the mappings for a nested guest. Calling > H_TLB_INVALIDATE lets the nested hypervisor inform the parent > hypervisor about changes to partition-scoped page tables or the > partition table without needing to do hypervisor-privileged tlbie > instructions. > > Signed-off-by: Paul Mackerras <paulus@ozlabs.org> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> > --- > arch/powerpc/include/asm/kvm_book3s_64.h | 5 +++++ > arch/powerpc/kvm/book3s_64_mmu_radix.c | 30 ++++++++++++++++++++++++++++-- > arch/powerpc/kvm/book3s_hv_nested.c | 30 ++++++++++++++++++++++++------ > 3 files changed, 57 insertions(+), 8 deletions(-) > > diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h > index a02f0b3..aa5bf85 100644 > --- a/arch/powerpc/include/asm/kvm_book3s_64.h > +++ b/arch/powerpc/include/asm/kvm_book3s_64.h > @@ -24,6 +24,7 @@ > #include <asm/bitops.h> > #include <asm/book3s/64/mmu-hash.h> > #include <asm/cpu_has_feature.h> > +#include <asm/ppc-opcode.h> > > #ifdef CONFIG_PPC_PSERIES > static inline bool kvmhv_on_pseries(void) > @@ -117,6 +118,10 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, > bool create); > void kvmhv_put_nested(struct kvm_nested_guest *gp); > > +/* Encoding of first parameter for H_TLB_INVALIDATE */ > +#define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \ > + ___PPC_R(r)) > + > /* Power architecture requires HPT is at least 256kiB, at most 64TiB */ > #define PPC_MIN_HPT_ORDER 18 > #define PPC_MAX_HPT_ORDER 46 > diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c > index 4c1eccb..ae0e3ed 100644 > --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c > +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c > @@ -201,17 +201,43 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, > unsigned int pshift, unsigned int lpid) > { > unsigned long psize = PAGE_SIZE; > + int psi; > + long rc; > + unsigned long rb; > > if (pshift) > psize = 1UL << pshift; > + else > + pshift = PAGE_SHIFT; > > addr &= ~(psize - 1); > - radix__flush_tlb_lpid_page(lpid, addr, psize); > + > + if (!kvmhv_on_pseries()) { > + radix__flush_tlb_lpid_page(lpid, addr, psize); > + return; > + } > + > + psi = shift_to_mmu_psize(pshift); > + rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58)); > + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1), > + lpid, rb); > + if (rc) > + pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc); > } > > static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) > { > - radix__flush_pwc_lpid(lpid); > + long rc; > + > + if (!kvmhv_on_pseries()) { > + radix__flush_pwc_lpid(lpid); > + return; > + } > + > + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1), > + lpid, TLBIEL_INVAL_SET_LPID); > + if (rc) > + pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc); > } > > static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, > diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c > index 26151e8..35f8111 100644 > --- a/arch/powerpc/kvm/book3s_hv_nested.c > +++ b/arch/powerpc/kvm/book3s_hv_nested.c > @@ -298,14 +298,32 @@ void kvmhv_nested_exit(void) > } > } > > +static void kvmhv_flush_lpid(unsigned int lpid) > +{ > + long rc; > + > + if (!kvmhv_on_pseries()) { > + radix__flush_tlb_lpid(lpid); > + return; > + } > + > + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1), > + lpid, TLBIEL_INVAL_SET_LPID); > + if (rc) > + pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc); > +} > + > void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1) > { > - if (cpu_has_feature(CPU_FTR_HVMODE)) { > + if (!kvmhv_on_pseries()) { > mmu_partition_table_set_entry(lpid, dw0, dw1); > - } else { > - pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); > - pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); > + return; > } > + > + pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); > + pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); > + /* L0 will do the necessary barriers */ > + kvmhv_flush_lpid(lpid); > } > > static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp) > @@ -482,7 +500,7 @@ static void kvmhv_flush_nested(struct kvm_nested_guest *gp) > spin_lock(&kvm->mmu_lock); > kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); > spin_unlock(&kvm->mmu_lock); > - radix__flush_tlb_lpid(gp->shadow_lpid); > + kvmhv_flush_lpid(gp->shadow_lpid); > kvmhv_update_ptbl_cache(gp); > if (gp->l1_gr_to_hr == 0) > kvmhv_remove_nested(gp); > @@ -766,7 +784,7 @@ static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu, > spin_lock(&kvm->mmu_lock); > kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, > gp->shadow_lpid); > - radix__flush_tlb_lpid(gp->shadow_lpid); > + kvmhv_flush_lpid(gp->shadow_lpid); > spin_unlock(&kvm->mmu_lock); > break; > case 1:
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index a02f0b3..aa5bf85 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -24,6 +24,7 @@ #include <asm/bitops.h> #include <asm/book3s/64/mmu-hash.h> #include <asm/cpu_has_feature.h> +#include <asm/ppc-opcode.h> #ifdef CONFIG_PPC_PSERIES static inline bool kvmhv_on_pseries(void) @@ -117,6 +118,10 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, bool create); void kvmhv_put_nested(struct kvm_nested_guest *gp); +/* Encoding of first parameter for H_TLB_INVALIDATE */ +#define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \ + ___PPC_R(r)) + /* Power architecture requires HPT is at least 256kiB, at most 64TiB */ #define PPC_MIN_HPT_ORDER 18 #define PPC_MAX_HPT_ORDER 46 diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 4c1eccb..ae0e3ed 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -201,17 +201,43 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, unsigned int pshift, unsigned int lpid) { unsigned long psize = PAGE_SIZE; + int psi; + long rc; + unsigned long rb; if (pshift) psize = 1UL << pshift; + else + pshift = PAGE_SHIFT; addr &= ~(psize - 1); - radix__flush_tlb_lpid_page(lpid, addr, psize); + + if (!kvmhv_on_pseries()) { + radix__flush_tlb_lpid_page(lpid, addr, psize); + return; + } + + psi = shift_to_mmu_psize(pshift); + rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58)); + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1), + lpid, rb); + if (rc) + pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc); } static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) { - radix__flush_pwc_lpid(lpid); + long rc; + + if (!kvmhv_on_pseries()) { + radix__flush_pwc_lpid(lpid); + return; + } + + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1), + lpid, TLBIEL_INVAL_SET_LPID); + if (rc) + pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc); } static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 26151e8..35f8111 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -298,14 +298,32 @@ void kvmhv_nested_exit(void) } } +static void kvmhv_flush_lpid(unsigned int lpid) +{ + long rc; + + if (!kvmhv_on_pseries()) { + radix__flush_tlb_lpid(lpid); + return; + } + + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1), + lpid, TLBIEL_INVAL_SET_LPID); + if (rc) + pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc); +} + void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1) { - if (cpu_has_feature(CPU_FTR_HVMODE)) { + if (!kvmhv_on_pseries()) { mmu_partition_table_set_entry(lpid, dw0, dw1); - } else { - pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); - pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); + return; } + + pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); + pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); + /* L0 will do the necessary barriers */ + kvmhv_flush_lpid(lpid); } static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp) @@ -482,7 +500,7 @@ static void kvmhv_flush_nested(struct kvm_nested_guest *gp) spin_lock(&kvm->mmu_lock); kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); spin_unlock(&kvm->mmu_lock); - radix__flush_tlb_lpid(gp->shadow_lpid); + kvmhv_flush_lpid(gp->shadow_lpid); kvmhv_update_ptbl_cache(gp); if (gp->l1_gr_to_hr == 0) kvmhv_remove_nested(gp); @@ -766,7 +784,7 @@ static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu, spin_lock(&kvm->mmu_lock); kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); - radix__flush_tlb_lpid(gp->shadow_lpid); + kvmhv_flush_lpid(gp->shadow_lpid); spin_unlock(&kvm->mmu_lock); break; case 1:
This adds code to call the H_TLB_INVALIDATE hypercall when running as a guest, in the cases where we need to invalidate TLBs (or other MMU caches) as part of managing the mappings for a nested guest. Calling H_TLB_INVALIDATE lets the nested hypervisor inform the parent hypervisor about changes to partition-scoped page tables or the partition table without needing to do hypervisor-privileged tlbie instructions. Signed-off-by: Paul Mackerras <paulus@ozlabs.org> --- arch/powerpc/include/asm/kvm_book3s_64.h | 5 +++++ arch/powerpc/kvm/book3s_64_mmu_radix.c | 30 ++++++++++++++++++++++++++++-- arch/powerpc/kvm/book3s_hv_nested.c | 30 ++++++++++++++++++++++++------ 3 files changed, 57 insertions(+), 8 deletions(-)