diff mbox

[v4] kvm tools, vesa: Use guest-mapped memory for framebuffer

Message ID 1307377290-6421-1-git-send-email-penberg@kernel.org (mailing list archive)
State New, archived
Headers show

Commit Message

Pekka Enberg June 6, 2011, 4:21 p.m. UTC
This patch converts hw/vesa.c to use guest-mapped memory for framebuffer and
drops the slow MMIO emulation. This speeds up framebuffer accesses
considerably. Please note that this can be optimized even more with the
KVM_GET_DIRTY_LOG ioctl() as explained by Alexander Graf.

Cc: Alexander Graf <agraf@suse.de>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: John Floren <john@jfloren.net>
Cc: Sasha Levin <levinsasha928@gmail.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
---
v3 -> v4: Fix overlapping memory region when guest has 4 GB of memory.

 tools/kvm/hw/vesa.c         |   17 +++++------------
 tools/kvm/include/kvm/kvm.h |    9 ++++++++-
 tools/kvm/kvm.c             |   15 ++++++++++-----
 3 files changed, 23 insertions(+), 18 deletions(-)
diff mbox

Patch

diff --git a/tools/kvm/hw/vesa.c b/tools/kvm/hw/vesa.c
index 48d31ce..71322fc 100644
--- a/tools/kvm/hw/vesa.c
+++ b/tools/kvm/hw/vesa.c
@@ -8,6 +8,7 @@ 
 #include "kvm/irq.h"
 #include "kvm/kvm.h"
 #include "kvm/pci.h"
+#include <sys/mman.h>
 
 #include <sys/types.h>
 #include <sys/ioctl.h>
@@ -40,14 +41,6 @@  static struct pci_device_header vesa_pci_device = {
 	.bar[1]			= VESA_MEM_ADDR | PCI_BASE_ADDRESS_SPACE_MEMORY,
 };
 
-static void vesa_mmio_callback(u64 addr, u8 *data, u32 len, u8 is_write)
-{
-	if (!is_write)
-		return;
-
-	fb__write(addr, data, len);
-}
-
 static struct framebuffer vesafb;
 
 struct framebuffer *vesa__init(struct kvm *kvm)
@@ -65,12 +58,12 @@  struct framebuffer *vesa__init(struct kvm *kvm)
 	vesa_pci_device.bar[0]		= vesa_base_addr | PCI_BASE_ADDRESS_SPACE_IO;
 	pci__register(&vesa_pci_device, dev);
 
-	kvm__register_mmio(kvm, VESA_MEM_ADDR, VESA_MEM_SIZE, &vesa_mmio_callback);
-
-	mem = calloc(1, VESA_MEM_SIZE);
-	if (!mem)
+	mem = mmap(NULL, VESA_MEM_SIZE, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
+	if (mem == MAP_FAILED)
 		return NULL;
 
+	kvm__register_mem(kvm, VESA_MEM_ADDR, VESA_MEM_SIZE, mem);
+
 	vesafb = (struct framebuffer) {
 		.width			= VESA_WIDTH,
 		.height			= VESA_HEIGHT,
diff --git a/tools/kvm/include/kvm/kvm.h b/tools/kvm/include/kvm/kvm.h
index 55551de..4eeed87 100644
--- a/tools/kvm/include/kvm/kvm.h
+++ b/tools/kvm/include/kvm/kvm.h
@@ -8,7 +8,11 @@ 
 #include <time.h>
 
 #define KVM_NR_CPUS		(255)
-#define KVM_32BIT_GAP_SIZE	(512 << 20)
+
+/*
+ * The hole includes VESA framebuffer and PCI memory.
+ */
+#define KVM_32BIT_GAP_SIZE	(768 << 20)
 #define KVM_32BIT_GAP_START	((1ULL << 32) - KVM_32BIT_GAP_SIZE)
 
 #define SIGKVMEXIT		(SIGRTMIN + 0)
@@ -21,6 +25,8 @@  struct kvm {
 
 	int			nrcpus;		/* Number of cpus to run */
 
+	u32			mem_slots;	/* for KVM_SET_USER_MEMORY_REGION */
+
 	u64			ram_size;
 	void			*ram_start;
 
@@ -49,6 +55,7 @@  void kvm__stop_timer(struct kvm *kvm);
 void kvm__irq_line(struct kvm *kvm, int irq, int level);
 bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int size, u32 count);
 bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write);
+void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr);
 bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write));
 bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr);
 void kvm__pause(void);
diff --git a/tools/kvm/kvm.c b/tools/kvm/kvm.c
index 54e3203..2c56a79 100644
--- a/tools/kvm/kvm.c
+++ b/tools/kvm/kvm.c
@@ -162,13 +162,18 @@  static bool kvm__cpu_supports_vm(void)
 	return regs.ecx & (1 << feature);
 }
 
-static void kvm_register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64 size, void *userspace_addr)
+/*
+ * Note: KVM_SET_USER_MEMORY_REGION assumes that we don't pass overlapping
+ * memory regions to it. Therefore, be careful if you use this function for
+ * registering memory regions for emulating hardware.
+ */
+void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr)
 {
 	struct kvm_userspace_memory_region mem;
 	int ret;
 
 	mem = (struct kvm_userspace_memory_region) {
-		.slot			= slot,
+		.slot			= kvm->mem_slots++,
 		.guest_phys_addr	= guest_phys,
 		.memory_size		= size,
 		.userspace_addr		= (unsigned long)userspace_addr,
@@ -200,7 +205,7 @@  void kvm__init_ram(struct kvm *kvm)
 		phys_size  = kvm->ram_size;
 		host_mem   = kvm->ram_start;
 
-		kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
+		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
 	} else {
 		/* First RAM range from zero to the PCI gap: */
 
@@ -208,7 +213,7 @@  void kvm__init_ram(struct kvm *kvm)
 		phys_size  = KVM_32BIT_GAP_START;
 		host_mem   = kvm->ram_start;
 
-		kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
+		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
 
 		/* Second RAM range from 4GB to the end of RAM: */
 
@@ -216,7 +221,7 @@  void kvm__init_ram(struct kvm *kvm)
 		phys_size  = kvm->ram_size - phys_size;
 		host_mem   = kvm->ram_start + phys_start;
 
-		kvm_register_mem_slot(kvm, 1, phys_start, phys_size, host_mem);
+		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
 	}
 }