@@ -10031,6 +10031,9 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
* See the comments in fast_page_fault().
*/
if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+ if (kvm_dirty_log_manual_protect_and_init_set(kvm))
+ bitmap_set(new->dirty_bitmap, 0, new->npages);
+
if (kvm_x86_ops->slot_enable_log_dirty) {
kvm_x86_ops->slot_enable_log_dirty(kvm, new);
} else {
@@ -1289,9 +1289,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
r = kvm_alloc_dirty_bitmap(&new);
if (r)
return r;
-
- if (kvm_dirty_log_manual_protect_and_init_set(kvm))
- bitmap_set(new.dirty_bitmap, 0, new.npages);
}
r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
Currently, when KVM_DIRTY_LOG_INITIALLY_SET is enabled, the initial set of memslot dirty bitmap is located at arch shared path. By making this implementation be architecture depended, we can realize some architecture depended optimizations (e.g. Only mark dirty for these pages with valid translation entries, then userspace can use this information to only send these pages during migration). Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com> --- arch/x86/kvm/x86.c | 3 +++ virt/kvm/kvm_main.c | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-)