@@ -24,6 +24,7 @@
#include <asm/bitops.h>
#include <asm/book3s/64/mmu-hash.h>
#include <asm/cpu_has_feature.h>
+#include <asm/ppc-opcode.h>
#ifdef CONFIG_PPC_PSERIES
static inline bool kvmhv_on_pseries(void)
@@ -121,6 +122,10 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
void kvmhv_put_nested(struct kvm_nested_guest *gp);
int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid);
+/* Encoding of first parameter for H_TLB_INVALIDATE */
+#define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
+ ___PPC_R(r))
+
/* Power architecture requires HPT is at least 256kiB, at most 64TiB */
#define PPC_MIN_HPT_ORDER 18
#define PPC_MAX_HPT_ORDER 46
@@ -201,17 +201,43 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
unsigned int pshift, unsigned int lpid)
{
unsigned long psize = PAGE_SIZE;
+ int psi;
+ long rc;
+ unsigned long rb;
if (pshift)
psize = 1UL << pshift;
+ else
+ pshift = PAGE_SHIFT;
addr &= ~(psize - 1);
- radix__flush_tlb_lpid_page(lpid, addr, psize);
+
+ if (!kvmhv_on_pseries()) {
+ radix__flush_tlb_lpid_page(lpid, addr, psize);
+ return;
+ }
+
+ psi = shift_to_mmu_psize(pshift);
+ rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
+ lpid, rb);
+ if (rc)
+ pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
}
static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
{
- radix__flush_pwc_lpid(lpid);
+ long rc;
+
+ if (!kvmhv_on_pseries()) {
+ radix__flush_pwc_lpid(lpid);
+ return;
+ }
+
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
+ lpid, TLBIEL_INVAL_SET_LPID);
+ if (rc)
+ pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
}
static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
@@ -366,15 +366,20 @@ void kvmhv_nested_exit(void)
void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
{
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ long rc;
+
+ if (!kvmhv_on_pseries()) {
mmu_partition_table_set_entry(lpid, dw0, dw1);
- } else {
- pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
- pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
- /* this will be emulated, L0 will do the necessary barriers */
- asm volatile(PPC_TLBIE_5(%0, %1, 2, 0, 1) : :
- "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
+ return;
}
+
+ pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
+ pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
+ /* L0 will do the necessary barriers */
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
+ lpid, TLBIEL_INVAL_SET_LPID);
+ if (rc)
+ pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
}
static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
This adds code to call the H_TLB_INVALIDATE hypercall when running as a guest, in the cases where we need to invalidate TLBs (or other MMU caches) as part of managing the mappings for a nested guest. Calling H_TLB_INVALIDATE is an alternative to doing the tlbie instruction and having it be emulated by our hypervisor. Signed-off-by: Paul Mackerras <paulus@ozlabs.org> --- arch/powerpc/include/asm/kvm_book3s_64.h | 5 +++++ arch/powerpc/kvm/book3s_64_mmu_radix.c | 30 ++++++++++++++++++++++++++++-- arch/powerpc/kvm/book3s_hv_nested.c | 19 ++++++++++++------- 3 files changed, 45 insertions(+), 9 deletions(-)