@@ -183,13 +183,6 @@ Shadow pages contain the following information:
perform a reverse map from a pte to a gfn. When role.direct is set, any
element of this array can be calculated from the gfn field when used, in
this case, the array of gfns is not allocated. See role.direct and gfn.
- slot_bitmap:
- A bitmap containing one bit per memory slot. If the page contains a pte
- mapping a page from memory slot n, then bit n of slot_bitmap will be set
- (if a page is aliased among several slots, then it is not guaranteed that
- all slots will be marked).
- Used during dirty logging to avoid scanning a shadow page if none if its
- pages need tracking.
root_count:
A counter keeping track of how many hardware registers (guest cr3 or
pdptrs) are now pointing at the page. While this counter is nonzero, the
@@ -197,11 +197,6 @@ struct kvm_mmu_page {
u64 *spt;
/* hold the gfn of each spte inside spt */
gfn_t *gfns;
- /*
- * One bit set per slot which has memory
- * in this shadow page.
- */
- DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
bool multimapped; /* More than one parent_pte? */
bool unsync;
int root_count; /* Currently serving as active root */
@@ -941,7 +941,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
PAGE_SIZE);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
- bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
sp->multimapped = 0;
sp->parent_pte = parent_pte;
--vcpu->kvm->arch.n_free_mmu_pages;
@@ -1660,14 +1659,6 @@ restart:
}
}
-static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
-{
- int slot = memslot_id(kvm, gfn);
- struct kvm_mmu_page *sp = page_header(__pa(pte));
-
- __set_bit(slot, sp->slot_bitmap);
-}
-
static void mmu_convert_notrap(struct kvm_mmu_page *sp)
{
int i;
@@ -1979,7 +1970,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (!was_rmapped && is_large_pte(*sptep))
++vcpu->kvm->stat.lpages;
- page_header_update_slot(vcpu->kvm, sptep, gfn);
if (!was_rmapped) {
rmap_count = rmap_add(vcpu, sptep, gfn);
kvm_release_pfn_clean(pfn);
@@ -2975,22 +2965,38 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
mmu_free_memory_caches(vcpu);
}
+static void rmapp_remove_write_access(struct kvm *kvm, unsigned long *rmapp)
+{
+ u64 *spte = rmap_next(kvm, rmapp, NULL);
+
+ while (spte) {
+ /* avoid RMW */
+ if (is_writable_pte(*spte))
+ *spte &= ~PT_WRITABLE_MASK;
+ spte = rmap_next(kvm, rmapp, spte);
+ }
+}
+
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
{
- struct kvm_mmu_page *sp;
+ int i;
+ unsigned long gfn_offset;
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ struct kvm_memory_slot *memslot = &slots->memslots[slot];
- list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
- int i;
- u64 *pt;
+ for (gfn_offset = 0; gfn_offset < memslot->npages; gfn_offset++) {
+ rmapp_remove_write_access(kvm, &memslot->rmap[gfn_offset]);
- if (!test_bit(slot, sp->slot_bitmap))
- continue;
+ for (i = 0; i < KVM_NR_PAGE_SIZES - 1; i++) {
+ unsigned long gfn = memslot->base_gfn + gfn_offset;
+ unsigned long huge = KVM_PAGES_PER_HPAGE(i + 2);
+ int idx = gfn / huge - memslot->base_gfn / huge;
- pt = sp->spt;
- for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
- /* avoid RMW */
- if (is_writable_pte(pt[i]))
- pt[i] &= ~PT_WRITABLE_MASK;
+ if (!(gfn_offset || (gfn % huge)))
+ break;
+ rmapp_remove_write_access(kvm,
+ &memslot->lpage_info[i][idx].rmap_pde);
+ }
}
kvm_flush_remote_tlbs(kvm);
}