@@ -1225,16 +1225,16 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
WARN_ON(pte_huge(*ptep));
new_pfn = pte_pfn(*ptep);
- for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
- BUG_ON(!is_shadow_present_pte(*sptep));
- rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", sptep, *sptep);
+ if (pte_write(*ptep))
+ need_flush = kvm_unmap_rmapp(kvm, rmapp, slot, data);
+ else
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+ BUG_ON(!is_shadow_present_pte(*sptep));
+ rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n",
+ sptep, *sptep);
- need_flush = 1;
+ need_flush = 1;
- if (pte_write(*ptep)) {
- drop_spte(kvm, sptep);
- sptep = rmap_get_first(*rmapp, &iter);
- } else {
new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
new_spte |= (u64)new_pfn << PAGE_SHIFT;
@@ -1246,7 +1246,6 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
mmu_spte_set(sptep, new_spte);
sptep = rmap_get_next(&iter);
}
- }
if (need_flush)
kvm_flush_remote_tlbs(kvm);
In kvm_set_pte_rmapp, if the new mapping is writable, we need to remove all spte pointing to that page otherwisewe we only need to adjust the sptes to let them point to the new page. This patch clarifys the logic and makes the later patch more clean [ Impact: no logic changed ] Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> --- arch/x86/kvm/mmu.c | 17 ++++++++--------- 1 files changed, 8 insertions(+), 9 deletions(-)