@@ -259,7 +259,17 @@ static int is_dirty_gpte(unsigned long pte)
static int is_rmap_spte(u64 pte)
{
- return is_shadow_present_pte(pte);
+ return pte & PT_RMAP_MASK;
+}
+
+static void spte_set_rmap(u64 *spte)
+{
+ *spte |= PT_RMAP_MASK;
+}
+
+static void spte_clear_rmap(u64 *spte)
+{
+ *spte &= ~PT_RMAP_MASK;
}
static int is_last_spte(u64 pte, int level)
@@ -543,7 +553,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
unsigned long *rmapp;
int i, count = 0;
- if (!is_rmap_spte(*spte))
+ if (!is_shadow_present_pte(*spte) || !is_writable_pte(*spte))
return count;
gfn = unalias_gfn(vcpu->kvm, gfn);
sp = page_header(__pa(spte));
@@ -573,6 +583,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
;
desc->sptes[i] = spte;
}
+ spte_set_rmap(spte);
return count;
}
@@ -610,6 +621,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
if (!is_rmap_spte(*spte))
return;
+ spte_clear_rmap(spte);
sp = page_header(__pa(spte));
pfn = spte_to_pfn(*spte);
if (*spte & shadow_accessed_mask)
@@ -646,6 +658,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
pr_err("rmap_remove: %p %llx many->many\n", spte, *spte);
BUG();
}
+
}
static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
@@ -22,6 +22,7 @@
#define PT_PAGE_SIZE_MASK (1ULL << 7)
#define PT_PAT_MASK (1ULL << 7)
#define PT_GLOBAL_MASK (1ULL << 8)
+#define PT_RMAP_MASK (1ULL << 9)
#define PT64_NX_SHIFT 63
#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)