@@ -11,11 +11,14 @@
#include <stdio.h>
#include <stdlib.h>
+#include <sys/syscall.h>
#include <unistd.h>
#include <time.h>
+#include <poll.h>
#include <pthread.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/userfaultfd.h>
#include "test_util.h"
#include "kvm_util.h"
@@ -29,6 +32,8 @@
/* Default guest test virtual memory offset */
#define DEFAULT_GUEST_TEST_MEM 0xc0000000
+#define __NR_userfaultfd 323
+
/*
* Guest/Host shared variables. Ensure addr_gva2hva() and/or
* sync_global_to/from_guest() are used when accessing from
@@ -39,6 +44,8 @@ static uint64_t host_page_size;
static uint64_t guest_page_size;
static uint64_t guest_num_pages;
+static char *guest_data_prototype;
+
/*
* Guest physical memory offset of the testing memory slot.
* This will be set to the topmost valid physical address minus
@@ -110,13 +117,153 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
return vm;
}
+static int handle_uffd_page_request(int uffd, uint64_t addr)
+{
+ pid_t tid;
+ struct uffdio_copy copy;
+ int r;
+
+ tid = syscall(__NR_gettid);
+
+ copy.src = (uint64_t)guest_data_prototype;
+ copy.dst = addr;
+ copy.len = host_page_size;
+ copy.mode = 0;
+
+ r = ioctl(uffd, UFFDIO_COPY, ©);
+ if (r == -1) {
+ DEBUG("Failed Paged in 0x%lx from thread %d with errno: %d\n",
+ addr, tid, errno);
+ return r;
+ }
+
+ return 0;
+}
+
+bool quit_uffd_thread;
+
+struct uffd_handler_args {
+ int uffd;
+};
+
+static void *uffd_handler_thread_fn(void *arg)
+{
+ struct uffd_handler_args *uffd_args = (struct uffd_handler_args *)arg;
+ int uffd = uffd_args->uffd;
+ int64_t pages = 0;
+
+ while (!quit_uffd_thread) {
+ struct uffd_msg msg;
+ struct pollfd pollfd[1];
+ int r;
+ uint64_t addr;
+
+ pollfd[0].fd = uffd;
+ pollfd[0].events = POLLIN;
+
+ r = poll(pollfd, 1, 2000);
+ switch (r) {
+ case -1:
+ DEBUG("poll err");
+ continue;
+ case 0:
+ continue;
+ case 1:
+ break;
+ default:
+ DEBUG("Polling uffd returned %d", r);
+ return NULL;
+ }
+
+ if (pollfd[0].revents & POLLERR) {
+ DEBUG("uffd revents has POLLERR");
+ return NULL;
+ }
+
+ if (!pollfd[0].revents & POLLIN)
+ continue;
+
+ r = read(uffd, &msg, sizeof(msg));
+ if (r == -1) {
+ if (errno == EAGAIN)
+ continue;
+ DEBUG("Read of uffd gor errno %d", errno);
+ return NULL;
+ }
+
+ if (r != sizeof(msg)) {
+ DEBUG("Read on uffd returned unexpected size: %d bytes",
+ r);
+ return NULL;
+ }
+
+ if (!(msg.event & UFFD_EVENT_PAGEFAULT))
+ continue;
+
+ addr = msg.arg.pagefault.address;
+ r = handle_uffd_page_request(uffd, addr);
+ if (r < 0)
+ return NULL;
+ pages++;
+ }
+
+ return NULL;
+}
+
+static int setup_demand_paging(struct kvm_vm *vm,
+ pthread_t *uffd_handler_thread)
+{
+ int uffd;
+ struct uffdio_api uffdio_api;
+ struct uffdio_register uffdio_register;
+ struct uffd_handler_args uffd_args;
+
+ guest_data_prototype = malloc(host_page_size);
+ memset(guest_data_prototype, 0xAB, host_page_size);
+
+ uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+ if (uffd == -1) {
+ DEBUG("uffd creation failed\n");
+ return -1;
+ }
+
+ uffdio_api.api = UFFD_API;
+ uffdio_api.features = 0;
+ if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) {
+ DEBUG("ioctl uffdio_api failed\n");
+ return -1;
+ }
+
+ uffdio_register.range.start = (uint64_t)host_test_mem;
+ uffdio_register.range.len = host_num_pages * host_page_size;
+ uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
+ if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1) {
+ DEBUG("ioctl uffdio_register failed\n");
+ return -1;
+ }
+
+ if ((uffdio_register.ioctls & UFFD_API_RANGE_IOCTLS) !=
+ UFFD_API_RANGE_IOCTLS) {
+ DEBUG("unexpected userfaultfd ioctl set\n");
+ return -1;
+ }
+
+ uffd_args.uffd = uffd;
+ pthread_create(uffd_handler_thread, NULL, uffd_handler_thread_fn,
+ &uffd_args);
+
+ return 0;
+}
+
#define GUEST_MEM_SHIFT 30 /* 1G */
#define PAGE_SHIFT_4K 12
static void run_test(enum vm_guest_mode mode)
{
pthread_t vcpu_thread;
+ pthread_t uffd_handler_thread;
struct kvm_vm *vm;
+ int r;
/*
* We reserve page table for 2 times of extra dirty mem which
@@ -173,6 +320,12 @@ static void run_test(enum vm_guest_mode mode)
/* Cache the HVA pointer of the region */
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
+ /* Set up user fault fd to handle demand paging requests. */
+ quit_uffd_thread = false;
+ r = setup_demand_paging(vm, &uffd_handler_thread);
+ if (r < 0)
+ exit(-r);
+
#ifdef __x86_64__
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
#endif
@@ -191,6 +344,10 @@ static void run_test(enum vm_guest_mode mode)
/* Wait for the vcpu thread to quit */
pthread_join(vcpu_thread, NULL);
+ /* Tell the user fault fd handler thread to quit */
+ quit_uffd_thread = true;
+ pthread_join(uffd_handler_thread, NULL);
+
ucall_uninit(vm);
kvm_vm_free(vm);
}
The demand paging test is currently a simple page access test which, while potentially useful, doesn't add much versus the existing dirty logging test. To improve the demand paging test, add a basic userfaultfd demand paging implementation. Signed-off-by: Ben Gardon <bgardon@google.com> --- .../selftests/kvm/demand_paging_test.c | 157 ++++++++++++++++++ 1 file changed, 157 insertions(+)