@@ -371,8 +371,6 @@ long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
if (!psize)
return H_PARAMETER;
- pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
-
/* Find the memslot (if any) for this address */
gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
gfn = gpa >> PAGE_SHIFT;
@@ -408,6 +406,12 @@ long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh,
unsigned long ptel)
{
+ /*
+ * Clear few bits, when called via hcall
+ */
+ pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
+ ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO | HPTE_GR_RESERVED);
+
return kvmppc_virtmode_do_h_enter(vcpu->kvm, flags, pte_index,
pteh, ptel, &vcpu->arch.gpr[4]);
}
@@ -1560,6 +1564,13 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
err = -EIO;
+ /*
+ * Clear few bits we got via read_htab which we
+ * don't need to carry forward.
+ */
+ v &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
+ r &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO | HPTE_GR_RESERVED);
+
ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
tmp);
if (ret != H_SUCCESS) {
@@ -182,8 +182,6 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
if (!psize)
return H_PARAMETER;
writing = hpte_is_writable(ptel);
- pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
- ptel &= ~HPTE_GR_RESERVED;
g_ptel = ptel;
/* used later to detect if we might have been invalidated */
@@ -367,6 +365,12 @@ EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel)
{
+ /*
+ * Clear few bits. when called via hcall.
+ */
+ pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
+ ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO | HPTE_GR_RESERVED);
+
return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
}
We will use this to set HPTE_V_VRMA bit in the later patch. This also make sure we clear the hpte bits only when called via hcall. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 15 +++++++++++++-- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 8 ++++++-- 2 files changed, 19 insertions(+), 4 deletions(-)