@@ -848,6 +848,29 @@ function properly, this is the place to put them.
__u8 pad[64];
};
+4.37 KVM_SWITCH_DIRTY_LOG (vm ioctl)
+
+Capability: basic
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_dirty_log (in/out)
+Returns: 0 on success, -1 on error
+
+/* for KVM_SWITCH_DIRTY_LOG */
+struct kvm_dirty_log {
+ __u32 slot;
+ __u32 padding;
+ union {
+ void __user *dirty_bitmap; /* one bit per page */
+ __u64 addr;
+ };
+};
+
+Given a memory slot, return the address of a bitmap containing any
+pages dirtied since the last call to this ioctl. Bit 0 is the first
+page in the memory slot. Ensure the entire structure is cleared to
+avoid padding issues.
+
5. The kvm_run structure
Application code obtains a pointer to the kvm_run structure by
@@ -1843,8 +1843,8 @@ out:
return r;
}
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log)
+static int kvm_ia64_update_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
+ bool need_copy)
{
int r;
unsigned long n;
@@ -1857,7 +1857,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (r)
goto out;
- r = kvm_get_dirty_log(kvm, log);
+ r = kvm_update_dirty_log(kvm, log, need_copy);
if (r)
goto out;
@@ -1865,10 +1865,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
/* If nothing is dirty, don't bother messing with page tables. */
if (memslot->is_dirty) {
kvm_flush_remote_tlbs(kvm);
- n = kvm_dirty_bitmap_bytes(memslot);
- clear_user(memslot->dirty_bitmap, n);
memslot->is_dirty = false;
}
+
r = 0;
out:
mutex_unlock(&kvm->slots_lock);
@@ -1876,6 +1875,16 @@ out:
return r;
}
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+ return kvm_ia64_update_dirty_log(kvm, log, true);
+}
+
+int kvm_vm_ioctl_switch_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+ return kvm_ia64_update_dirty_log(kvm, log, false);
+}
+
int kvm_arch_hardware_setup(void)
{
return 0;
@@ -1112,8 +1112,9 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log)
+static int kvmppc_update_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log,
+ bool need_copy)
{
struct kvm_memory_slot *memslot;
struct kvm_vcpu *vcpu;
@@ -1123,7 +1124,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
mutex_lock(&kvm->slots_lock);
- r = kvm_get_dirty_log(kvm, log);
+ r = kvm_update_dirty_log(kvm, log, need_copy);
if (r)
goto out;
@@ -1136,8 +1137,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
kvm_for_each_vcpu(n, vcpu, kvm)
kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
- n = kvm_dirty_bitmap_bytes(memslot);
- clear_user(memslot->dirty_bitmap, n);
memslot->is_dirty = false;
}
@@ -1147,6 +1146,16 @@ out:
return r;
}
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+ return kvmppc_update_dirty_log(kvm, log, true);
+}
+
+int kvm_vm_ioctl_switch_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+ return kvmppc_update_dirty_log(kvm, log, false);
+}
+
int kvmppc_core_check_processor_compat(void)
{
return 0;
@@ -2718,11 +2718,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
return 0;
}
-/*
- * Get (and clear) the dirty memory log for a memory slot.
- */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log)
+static int kvm_x86_update_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log,
+ bool need_copy)
{
int r;
struct kvm_memory_slot *memslot;
@@ -2773,12 +2771,34 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
dirty_bitmap_old = dirty_bitmap;
}
- r = kvm_copy_dirty_bitmap(log->dirty_bitmap, dirty_bitmap_old, n);
+ if (need_copy) {
+ r = kvm_copy_dirty_bitmap(log->dirty_bitmap,
+ dirty_bitmap_old, n);
+ } else {
+ log->addr = (unsigned long)dirty_bitmap_old;
+ r = 0;
+ }
out:
mutex_unlock(&kvm->slots_lock);
return r;
}
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+ return kvm_x86_update_dirty_log(kvm, log, true);
+}
+
+/*
+ * Switch to the next dirty bitmap and return the address of the old one.
+ */
+int kvm_vm_ioctl_switch_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+ return kvm_x86_update_dirty_log(kvm, log, false);
+}
+
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -12,7 +12,7 @@
#include <linux/ioctl.h>
#include <asm/kvm.h>
-#define KVM_API_VERSION 12
+#define KVM_API_VERSION 13
/* *** Deprecated interfaces *** */
@@ -318,7 +318,7 @@ struct kvm_dirty_log {
__u32 padding1;
union {
void __user *dirty_bitmap; /* one bit per page */
- __u64 padding2;
+ __u64 addr;
};
};
@@ -524,6 +524,7 @@ struct kvm_enable_cap {
#define KVM_CAP_PPC_OSI 52
#define KVM_CAP_PPC_UNSET_IRQ 53
#define KVM_CAP_ENABLE_CAP 54
+#define KVM_CAP_USER_DIRTY_BITMAP 55
#ifdef KVM_CAP_IRQ_ROUTING
@@ -620,6 +621,7 @@ struct kvm_clock_data {
struct kvm_userspace_memory_region)
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
+#define KVM_SWITCH_DIRTY_LOG _IOW(KVMIO, 0x49, struct kvm_dirty_log)
/* Device model IOC */
#define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60)
#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level)
@@ -334,9 +334,10 @@ int kvm_dev_ioctl_check_extension(long ext);
int kvm_copy_dirty_bitmap(unsigned long __user *to,
const unsigned long __user *from,
unsigned long bytes);
-int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log);
+int kvm_update_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
+ bool need_copy);
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
+int kvm_vm_ioctl_switch_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct
@@ -844,7 +844,8 @@ out_fault:
#endif
}
-int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+int kvm_update_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
+ bool need_copy)
{
struct kvm_memory_slot *memslot;
int r;
@@ -861,9 +862,23 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
n = kvm_dirty_bitmap_bytes(memslot);
- r = -EFAULT;
- if (kvm_copy_dirty_bitmap(log->dirty_bitmap, memslot->dirty_bitmap, n))
- goto out;
+ if (need_copy) {
+ r = -EFAULT;
+ if (kvm_copy_dirty_bitmap(log->dirty_bitmap,
+ memslot->dirty_bitmap, n))
+ goto out;
+
+ if (memslot->is_dirty)
+ clear_user(memslot->dirty_bitmap, n);
+ } else {
+ unsigned long __user *dirty_bitmap;
+
+ dirty_bitmap = memslot->dirty_bitmap;
+ clear_user(memslot->dirty_bitmap_old, n);
+ memslot->dirty_bitmap = memslot->dirty_bitmap_old;
+ memslot->dirty_bitmap_old = dirty_bitmap;
+ log->addr = (unsigned long)dirty_bitmap;
+ }
r = 0;
out:
@@ -1699,6 +1714,21 @@ static long kvm_vm_ioctl(struct file *filp,
goto out;
break;
}
+ case KVM_SWITCH_DIRTY_LOG: {
+ struct kvm_dirty_log log;
+
+ r = -EFAULT;
+ if (copy_from_user(&log, argp, sizeof log))
+ goto out;
+ r = kvm_vm_ioctl_switch_dirty_log(kvm, &log);
+ if (r)
+ goto out;
+ r = -EFAULT;
+ if (copy_to_user(argp, &log, sizeof log))
+ goto out;
+ r = 0;
+ break;
+ }
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
case KVM_REGISTER_COALESCED_MMIO: {
struct kvm_coalesced_mmio_zone zone;
@@ -1790,7 +1820,7 @@ static long kvm_vm_compat_ioctl(struct file *filp,
goto out;
log.slot = compat_log.slot;
log.padding1 = compat_log.padding1;
- log.padding2 = compat_log.padding2;
+ log.addr = compat_log.padding2;
log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
@@ -1879,6 +1909,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
case KVM_CAP_SET_BOOT_CPU_ID:
#endif
case KVM_CAP_INTERNAL_ERROR_DATA:
+ case KVM_CAP_USER_DIRTY_BITMAP:
return 1;
#ifdef CONFIG_HAVE_KVM_IRQCHIP
case KVM_CAP_IRQ_ROUTING: