@@ -1823,11 +1823,19 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
n = kvm_dirty_bitmap_bytes(memslot);
base = memslot->base_gfn / BITS_PER_LONG;
+ r = -EFAULT;
+ if (!access_ok(VERIFY_WRITE, memslot->dirty_bitmap, n))
+ goto out;
+
for (i = 0; i < n/sizeof(long); ++i) {
if (dirty_bitmap[base + i])
memslot->is_dirty = true;
- memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
+ if (__put_user(dirty_bitmap[base + i],
+ &memslot->dirty_bitmap[i])) {
+ r = -EFAULT;
+ goto out;
+ }
dirty_bitmap[base + i] = 0;
}
r = 0;
@@ -1858,7 +1866,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (memslot->is_dirty) {
kvm_flush_remote_tlbs(kvm);
n = kvm_dirty_bitmap_bytes(memslot);
- memset(memslot->dirty_bitmap, 0, n);
+ clear_user(memslot->dirty_bitmap, n);
memslot->is_dirty = false;
}
r = 0;
@@ -1137,7 +1137,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
n = kvm_dirty_bitmap_bytes(memslot);
- memset(memslot->dirty_bitmap, 0, n);
+ clear_user(memslot->dirty_bitmap, n);
memslot->is_dirty = false;
}
@@ -2727,7 +2727,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
int r;
struct kvm_memory_slot *memslot;
unsigned long n;
- unsigned long *dirty_bitmap = NULL;
+ unsigned long __user *dirty_bitmap;
+ unsigned long __user *dirty_bitmap_old;
mutex_lock(&kvm->slots_lock);
@@ -2742,11 +2743,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
n = kvm_dirty_bitmap_bytes(memslot);
- r = -ENOMEM;
- dirty_bitmap = vmalloc(n);
- if (!dirty_bitmap)
- goto out;
- memset(dirty_bitmap, 0, n);
+ dirty_bitmap = memslot->dirty_bitmap;
+ dirty_bitmap_old = memslot->dirty_bitmap_old;
+ clear_user(dirty_bitmap_old, n);
/* If nothing is dirty, don't bother messing with page tables. */
if (memslot->is_dirty) {
@@ -2756,24 +2755,25 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->mmu_lock);
+ r = -ENOMEM;
slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!slots)
- goto out_free;
+ goto out;
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
- slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
+ slots->memslots[log->slot].dirty_bitmap = dirty_bitmap_old;
+ slots->memslots[log->slot].dirty_bitmap_old = dirty_bitmap;
slots->memslots[log->slot].is_dirty = false;
old_slots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
- dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
kfree(old_slots);
+
+ dirty_bitmap_old = dirty_bitmap;
}
- r = kvm_copy_dirty_bitmap(log->dirty_bitmap, dirty_bitmap, n);
-out_free:
- vfree(dirty_bitmap);
+ r = kvm_copy_dirty_bitmap(log->dirty_bitmap, dirty_bitmap_old, n);
out:
mutex_unlock(&kvm->slots_lock);
return r;
@@ -116,7 +116,8 @@ struct kvm_memory_slot {
unsigned long npages;
unsigned long flags;
unsigned long *rmap;
- unsigned long *dirty_bitmap;
+ unsigned long __user *dirty_bitmap;
+ unsigned long __user *dirty_bitmap_old;
bool is_dirty;
struct {
unsigned long rmap_pde;
@@ -331,7 +332,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
int kvm_dev_ioctl_check_extension(long ext);
int kvm_copy_dirty_bitmap(unsigned long __user *to,
- const unsigned long *from,
+ const unsigned long __user *from,
unsigned long bytes);
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
@@ -433,8 +433,20 @@ out_err_nodisable:
static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
{
- vfree(memslot->dirty_bitmap);
+ unsigned long user_addr;
+ unsigned long n = kvm_dirty_bitmap_bytes(memslot);
+
+ if (!memslot->dirty_bitmap)
+ return;
+
+ user_addr = min((unsigned long)memslot->dirty_bitmap,
+ (unsigned long)memslot->dirty_bitmap_old);
+ down_write(¤t->mm->mmap_sem);
+ do_munmap(current->mm, user_addr, 2 * n);
+ up_write(¤t->mm->mmap_sem);
+
memslot->dirty_bitmap = NULL;
+ memslot->dirty_bitmap_old = NULL;
}
/*
@@ -468,8 +480,12 @@ void kvm_free_physmem(struct kvm *kvm)
int i;
struct kvm_memslots *slots = kvm->memslots;
- for (i = 0; i < slots->nmemslots; ++i)
+ for (i = 0; i < slots->nmemslots; ++i) {
+ /* We don't munmap dirty bitmaps by ourselves. */
+ slots->memslots[i].dirty_bitmap = NULL;
+ slots->memslots[i].dirty_bitmap_old = NULL;
kvm_free_physmem_slot(&slots->memslots[i], NULL);
+ }
kfree(kvm->memslots);
}
@@ -523,13 +539,22 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
{
- unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
+ unsigned long user_addr;
+ unsigned long n = kvm_dirty_bitmap_bytes(memslot);
- memslot->dirty_bitmap = vmalloc(dirty_bytes);
- if (!memslot->dirty_bitmap)
- return -ENOMEM;
+ down_write(¤t->mm->mmap_sem);
+ user_addr = do_mmap(NULL, 0, 2 * n,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, 0);
+ up_write(¤t->mm->mmap_sem);
+
+ if (IS_ERR((void *)user_addr))
+ return PTR_ERR((void *)user_addr);
+
+ memslot->dirty_bitmap = (unsigned long __user *)user_addr;
+ memslot->dirty_bitmap_old = (unsigned long __user *)(user_addr + n);
+ clear_user(memslot->dirty_bitmap, 2 * n);
- memset(memslot->dirty_bitmap, 0, dirty_bytes);
return 0;
}
@@ -778,13 +803,45 @@ int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
}
int kvm_copy_dirty_bitmap(unsigned long __user *to,
- const unsigned long *from,
+ const unsigned long __user *from,
unsigned long bytes)
{
- if (copy_to_user(to, from, bytes))
+#if defined(CONFIG_X86_64) || defined(CONFIG_PPC64) || defined(CONFIG_IA64)
+ if (copy_in_user(to, from, bytes)) {
+ printk(KERN_WARNING "%s: copy_in_user failed.\n", __func__);
return -EFAULT;
+ }
+ return 0;
+#else
+ int num, bufbytes;
+ unsigned long buf[32];
+ if (!access_ok(VERIFY_READ, from, bytes) ||
+ !access_ok(VERIFY_WRITE, to, bytes)) {
+ goto out_fault;
+ }
+
+ bufbytes = sizeof(buf);
+ num = bufbytes / sizeof(buf[0]);
+
+ for (; bytes > bufbytes; bytes -= bufbytes, to += num, from += num) {
+ if (__copy_from_user(buf, from, bufbytes))
+ goto out_fault;
+ if (__copy_to_user(to, buf, bufbytes))
+ goto out_fault;
+ }
+ if (bytes > 0) {
+ if (__copy_from_user(buf, from, bytes))
+ goto out_fault;
+ if (__copy_to_user(to, buf, bytes))
+ goto out_fault;
+ }
return 0;
+
+out_fault:
+ printk(KERN_WARNING "%s: copy to(from) user failed.\n", __func__);
+ return -EFAULT;
+#endif
}
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
@@ -1194,13 +1251,35 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);
+/*
+ * Please use generic *_user bitops once they become available.
+ * Be careful setting the bit won't be done atomically.
+ */
static int __mark_page_dirty(unsigned long nr,
- unsigned long *dirty_bitmap)
+ unsigned long __user *dirty_bitmap)
{
+ unsigned long user_addr;
+ u8 val;
+
#ifdef __BIG_ENDIAN
nr = nr ^ BITOP_LE_SWIZZLE;
#endif
- __set_bit(nr, dirty_bitmap);
+ user_addr = (unsigned long)dirty_bitmap + nr / 8;
+ if (!access_ok(VERIFY_WRITE, user_addr, 1))
+ goto out_fault;
+
+ if (__get_user(val, (u8 __user *)user_addr))
+ goto out_fault;
+
+ val |= 1U << (nr % 8);
+ if (__put_user(val, (u8 __user *)user_addr))
+ goto out_fault;
+
+ return 0;
+
+out_fault:
+ printk(KERN_WARNING "%s: setting user bit failed.\n", __func__);
+ return -EFAULT;
}
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)