@@ -1249,6 +1249,8 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
{
unsigned long *rmapp;
+ spin_lock(&kvm->mmu_lock);
+
while (mask) {
rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PT_PAGE_TABLE_LEVEL, slot);
@@ -1257,6 +1259,8 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
/* clear the first set bit */
mask &= mask - 1;
}
+
+ spin_unlock(&kvm->mmu_lock);
}
static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
@@ -3543,8 +3543,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
memset(dirty_bitmap_buffer, 0, n);
- spin_lock(&kvm->mmu_lock);
-
for (i = 0; i < n / sizeof(long); i++) {
unsigned long mask;
gfn_t offset;
@@ -3563,8 +3561,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
if (is_dirty)
kvm_flush_remote_tlbs(kvm);
- spin_unlock(&kvm->mmu_lock);
-
r = -EFAULT;
if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
goto out;
Xiao's "KVM: MMU: flush tlb if the spte can be locklessly modified" allows us to release mmu_lock before flushing TLBs. Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> Cc: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> --- Xiao can change the remaining mmu_lock to RCU's read-side lock: The grace period will be reasonably limited. arch/x86/kvm/mmu.c | 4 ++++ arch/x86/kvm/x86.c | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-)