diff mbox series

[V2,4/6] KVM: selftests: x86: Add helpers to execute VMs with private memory

Message ID 20221205232341.4131240-5-vannapurve@google.com (mailing list archive)
State New
Headers show
Series KVM: selftests: selftests for fd-based private memory | expand

Commit Message

Vishal Annapurve Dec. 5, 2022, 11:23 p.m. UTC
Introduce a set of APIs to execute VM with private memslots.

Host userspace APIs for:
1) Executing a vcpu run loop that handles MAPGPA hypercall
2) Backing/unbacking guest private memory

Guest APIs for:
1) Changing memory mapping type

Signed-off-by: Vishal Annapurve <vannapurve@google.com>
---
 tools/testing/selftests/kvm/Makefile          |   1 +
 .../kvm/include/x86_64/private_mem.h          |  24 +++
 .../selftests/kvm/lib/x86_64/private_mem.c    | 139 ++++++++++++++++++
 3 files changed, 164 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/include/x86_64/private_mem.h
 create mode 100644 tools/testing/selftests/kvm/lib/x86_64/private_mem.c

Comments

Sean Christopherson Jan. 17, 2023, 10:06 p.m. UTC | #1
On Mon, Dec 05, 2022, Vishal Annapurve wrote:
> Introduce a set of APIs to execute VM with private memslots.
> 
> Host userspace APIs for:
> 1) Executing a vcpu run loop that handles MAPGPA hypercall
> 2) Backing/unbacking guest private memory
> 
> Guest APIs for:
> 1) Changing memory mapping type
> 
> Signed-off-by: Vishal Annapurve <vannapurve@google.com>
> ---
>  tools/testing/selftests/kvm/Makefile          |   1 +
>  .../kvm/include/x86_64/private_mem.h          |  24 +++
>  .../selftests/kvm/lib/x86_64/private_mem.c    | 139 ++++++++++++++++++
>  3 files changed, 164 insertions(+)
>  create mode 100644 tools/testing/selftests/kvm/include/x86_64/private_mem.h
>  create mode 100644 tools/testing/selftests/kvm/lib/x86_64/private_mem.c

Given that we _know_ private memory isn't always going to x86 specific, I don't
want to bury any helpers in x86_64 that aren't strictly x86 only.  E.g. helpers
for doing Intel+AMD hypercalls is ok, but "generic" private memory helpers belong
elsewhere.

I experimented with extracting memslots/mmu helpers out of kvm_util.c as prep work,
e.g. to avoid bloating kvm_util.c, but I couldn't come up with an obviously "good"
naming scheme and/or split.  At this time, the common bits are fairly small, so
I think the best approach for now is to simply put vm_mem_map_shared_or_private()
in kvm_util.c.

> +static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
> +	uint64_t flags)
> +{
> +	return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0);
> +}

This can go in tools/testing/selftests/kvm/include/x86_64/processor.h.

> +static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
> +	uint64_t flags)
> +{
> +	uint64_t ret;
> +
> +	GUEST_ASSERT_2(IS_PAGE_ALIGNED(gpa) && IS_PAGE_ALIGNED(size), gpa, size);
> +
> +	ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
> +	GUEST_ASSERT_1(!ret, ret);
> +}
> +
> +void kvm_hypercall_map_shared(uint64_t gpa, uint64_t size)
> +{
> +	kvm_hypercall_map_gpa_range(gpa, size, KVM_MAP_GPA_RANGE_DECRYPTED);
> +}
> +
> +void kvm_hypercall_map_private(uint64_t gpa, uint64_t size)
> +{
> +	kvm_hypercall_map_gpa_range(gpa, size, KVM_MAP_GPA_RANGE_ENCRYPTED);
> +}
> +
> +static void vm_update_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
> +	bool unback_mem)

s/unback_mem/map_shared.  "unback memory" is going to be super confusing to someone
who isn't familiar with UPM.  map_private would be the obvious alternative, but
I like not having to invert the param in the helper.

> +{
> +	int restricted_fd;
> +	uint64_t restricted_fd_offset, guest_phys_base, fd_offset;
> +	struct kvm_memory_attributes attr;
> +	struct kvm_userspace_memory_region_ext *region_ext;
> +	struct kvm_userspace_memory_region *region;
> +	int fallocate_mode = 0;
> +	int ret;
> +
> +	region_ext = kvm_userspace_memory_region_ext_find(vm, gpa, gpa + size);

I forget if I've already mentioned this somewhere, but I'd prefer to use the
"private" userspace_mem_region_find() and delete the existing
kvm_userspace_memory_region_find().

> +	TEST_ASSERT(region_ext != NULL, "Region not found");
> +	region = &region_ext->region;
> +	TEST_ASSERT(region->flags & KVM_MEM_PRIVATE,
> +		"Can not update private memfd for non-private memslot\n");
> +	restricted_fd = region_ext->restricted_fd;
> +	restricted_fd_offset = region_ext->restricted_offset;
> +	guest_phys_base = region->guest_phys_addr;
> +	fd_offset = restricted_fd_offset + (gpa - guest_phys_base);
> +
> +	if (unback_mem)
> +		fallocate_mode = (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE);
> +
> +	printf("restricted_fd %d fallocate_mode 0x%x for offset 0x%lx size 0x%lx\n",
> +		restricted_fd, fallocate_mode, fd_offset, size);

Don't put prints in common helpers, except _maybe_ for error paths.  It's fine
for development and/or debug, but for the final product it ends up being noise 99%
of the time.  If you really, really want printing checked in, then pr_debug() is an
option, but I would generally discourage even that for selftests.  E.g. strace can
give you all the information printed here without needing to rebuild the binary,
and without maintenance burden.

> +	ret = fallocate(restricted_fd, fallocate_mode, fd_offset, size);
> +	TEST_ASSERT(ret == 0, "fallocate failed\n");

Use whitespace to differntiate operations/blocks.

> +	attr.attributes = unback_mem ? 0 : KVM_MEMORY_ATTRIBUTE_PRIVATE;
> +	attr.address = gpa;
> +	attr.size = size;
> +	attr.flags = 0;

Add a helper to do KVM_SET_MEMORY_ATTRIBUTES, e.g. to fill the appropriate struct.

> +	if (unback_mem)
> +		printf("undoing encryption for gpa 0x%lx size 0x%lx\n", gpa, size);
> +	else
> +		printf("doing encryption for gpa 0x%lx size 0x%lx\n", gpa, size);
> +
> +	vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr);
> +}


void vm_mem_map_shared_or_private(struct kvm_vm *vm, uint64_t gpa,
				  uint64_t size, bool map_shared)
{
	struct userspace_mem_region *region;
	uint64_t end = gpa + size - 1;
	off_t fd_offset;
	int mode, ret;

	region = userspace_mem_region_find(vm, gpa, gpa);
	TEST_ASSERT(region && region->region.flags & KVM_MEM_PRIVATE,
		    "Private memory region not found for GPA 0x%lx", gpa);

	TEST_ASSERT(region == userspace_mem_region_find(vm, end, end),
		    "Private/Shared conversions must act on a single memslot");

	fd_offset = region->region.restricted_offset +
		    (gpa - region->region.guest_phys_addr);

	/* To map shared, punch a hole.  To map private, allocate (no flags). */
	mode = map_shared ? (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE) : 0;

	ret = fallocate(region->region.restricted_fd, mode, fd_offset, size);
	TEST_ASSERT(!ret, "fallocate() failed to map %lx[%lu] %s, fd = %d, mode = %x, offset = %lx\n",
		     gpa, size, map_shared ? "shared" : "private",
		     region->region.restricted_fd, mode, fd_offset);

	vm_set_memory_attributes(vm, gpa, size,
				 map_shared ? 0 : KVM_MEMORY_ATTRIBUTE_PRIVATE);
}
Sean Christopherson Jan. 17, 2023, 10:51 p.m. UTC | #2
On Mon, Dec 05, 2022, Vishal Annapurve wrote:
> +void vcpu_run_and_handle_mapgpa(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
> +{
> +	/*
> +	 * Loop until the guest exits with any reason other than
> +	 * KVM_HC_MAP_GPA_RANGE hypercall.
> +	 */
> +
> +	while (true) {
> +		vcpu_run(vcpu);
> +
> +		if ((vcpu->run->exit_reason == KVM_EXIT_HYPERCALL) &&
> +			(vcpu->run->hypercall.nr == KVM_HC_MAP_GPA_RANGE)) {

I get what you're trying to do, and I completely agree that we need better helpers
and/or macros to reduce this type of boilerplate, but adding a one-off helper like
this is going to be a net negative overall.  This helper services exactly one use
case, and also obfuscates what a test does.

In other words, this is yet another thing that needs broad, generic support
(_vcpu_run() is a very special case).  E.g. something like this to make it easy
for tests to run a guest and handle ucalls plus specific exits (just a strawman,
I think we can do better for handling ucalls).

#define vcpu_run_loop(vcpu, handlers, ucalls)				\
do {									\
	uint32_t __exit;						\
	int __r = 0;							\
									\
	while (!r)  {							\
		vcpu_run(vcpu);						\
									\
		__exit = vcpu->run->exit_reason;			\
									\
		if (__exit < ARRAY_SIZE(handlers) && handlers[__exit])	\
			__r = handlers[__exit](vcpu);			\	
		else if (__exit == KVM_EXIT_IO && ucalls)		\
			__r = handle_exit_ucall(vcpu, ucalls,		\
						ARRAY_SIZE(ucalls));	\
		else							\
			TEST_FAIL(...)					\
	}								\
} while (0)


For this series, I think it makes sense to just open code yet another test.  It
really doesn't end up being much code, which is partly why we haven't added
helpers :-/
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 2275ba861e0e..97f7d52c553b 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -55,6 +55,7 @@  LIBKVM_x86_64 += lib/x86_64/apic.c
 LIBKVM_x86_64 += lib/x86_64/handlers.S
 LIBKVM_x86_64 += lib/x86_64/hyperv.c
 LIBKVM_x86_64 += lib/x86_64/memstress.c
+LIBKVM_x86_64 += lib/x86_64/private_mem.c
 LIBKVM_x86_64 += lib/x86_64/processor.c
 LIBKVM_x86_64 += lib/x86_64/svm.c
 LIBKVM_x86_64 += lib/x86_64/ucall.c
diff --git a/tools/testing/selftests/kvm/include/x86_64/private_mem.h b/tools/testing/selftests/kvm/include/x86_64/private_mem.h
new file mode 100644
index 000000000000..3aa6b4d11b28
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/x86_64/private_mem.h
@@ -0,0 +1,24 @@ 
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022, Google LLC.
+ */
+
+#ifndef SELFTEST_KVM_PRIVATE_MEM_H
+#define SELFTEST_KVM_PRIVATE_MEM_H
+
+#include <stdint.h>
+#include <kvm_util.h>
+
+void kvm_hypercall_map_shared(uint64_t gpa, uint64_t size);
+void kvm_hypercall_map_private(uint64_t gpa, uint64_t size);
+
+void vm_unback_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size);
+
+void vm_allocate_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size);
+
+void handle_vm_exit_map_gpa_hypercall(struct kvm_vm *vm, uint64_t gpa,
+	uint64_t npages, uint64_t attrs);
+
+void vcpu_run_and_handle_mapgpa(struct kvm_vm *vm, struct kvm_vcpu *vcpu);
+
+#endif /* SELFTEST_KVM_PRIVATE_MEM_H */
diff --git a/tools/testing/selftests/kvm/lib/x86_64/private_mem.c b/tools/testing/selftests/kvm/lib/x86_64/private_mem.c
new file mode 100644
index 000000000000..2b97fc34ec4a
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/x86_64/private_mem.c
@@ -0,0 +1,139 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022, Google LLC.
+ */
+#define _GNU_SOURCE /* for program_invocation_name */
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/kvm_para.h>
+
+#include <test_util.h>
+#include <kvm_util.h>
+#include <private_mem.h>
+#include <processor.h>
+
+static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
+	uint64_t flags)
+{
+	return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0);
+}
+
+static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
+	uint64_t flags)
+{
+	uint64_t ret;
+
+	GUEST_ASSERT_2(IS_PAGE_ALIGNED(gpa) && IS_PAGE_ALIGNED(size), gpa, size);
+
+	ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
+	GUEST_ASSERT_1(!ret, ret);
+}
+
+void kvm_hypercall_map_shared(uint64_t gpa, uint64_t size)
+{
+	kvm_hypercall_map_gpa_range(gpa, size, KVM_MAP_GPA_RANGE_DECRYPTED);
+}
+
+void kvm_hypercall_map_private(uint64_t gpa, uint64_t size)
+{
+	kvm_hypercall_map_gpa_range(gpa, size, KVM_MAP_GPA_RANGE_ENCRYPTED);
+}
+
+static void vm_update_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
+	bool unback_mem)
+{
+	int restricted_fd;
+	uint64_t restricted_fd_offset, guest_phys_base, fd_offset;
+	struct kvm_memory_attributes attr;
+	struct kvm_userspace_memory_region_ext *region_ext;
+	struct kvm_userspace_memory_region *region;
+	int fallocate_mode = 0;
+	int ret;
+
+	region_ext = kvm_userspace_memory_region_ext_find(vm, gpa, gpa + size);
+	TEST_ASSERT(region_ext != NULL, "Region not found");
+	region = &region_ext->region;
+	TEST_ASSERT(region->flags & KVM_MEM_PRIVATE,
+		"Can not update private memfd for non-private memslot\n");
+	restricted_fd = region_ext->restricted_fd;
+	restricted_fd_offset = region_ext->restricted_offset;
+	guest_phys_base = region->guest_phys_addr;
+	fd_offset = restricted_fd_offset + (gpa - guest_phys_base);
+
+	if (unback_mem)
+		fallocate_mode = (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE);
+
+	printf("restricted_fd %d fallocate_mode 0x%x for offset 0x%lx size 0x%lx\n",
+		restricted_fd, fallocate_mode, fd_offset, size);
+	ret = fallocate(restricted_fd, fallocate_mode, fd_offset, size);
+	TEST_ASSERT(ret == 0, "fallocate failed\n");
+	attr.attributes = unback_mem ? 0 : KVM_MEMORY_ATTRIBUTE_PRIVATE;
+	attr.address = gpa;
+	attr.size = size;
+	attr.flags = 0;
+	if (unback_mem)
+		printf("undoing encryption for gpa 0x%lx size 0x%lx\n", gpa, size);
+	else
+		printf("doing encryption for gpa 0x%lx size 0x%lx\n", gpa, size);
+
+	vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr);
+}
+
+void vm_unback_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size)
+{
+	vm_update_private_mem(vm, gpa, size, true);
+}
+
+void vm_allocate_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size)
+{
+	vm_update_private_mem(vm, gpa, size, false);
+}
+
+void handle_vm_exit_map_gpa_hypercall(struct kvm_vm *vm, uint64_t gpa,
+	uint64_t npages, uint64_t attrs)
+{
+	uint64_t size;
+
+	size = npages << MIN_PAGE_SHIFT;
+	pr_info("Explicit conversion off 0x%lx size 0x%lx to %s\n", gpa, size,
+		(attrs & KVM_MAP_GPA_RANGE_ENCRYPTED) ? "private" : "shared");
+
+	if (attrs & KVM_MAP_GPA_RANGE_ENCRYPTED)
+		vm_allocate_private_mem(vm, gpa, size);
+	else
+		vm_unback_private_mem(vm, gpa, size);
+}
+
+void vcpu_run_and_handle_mapgpa(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
+{
+	/*
+	 * Loop until the guest exits with any reason other than
+	 * KVM_HC_MAP_GPA_RANGE hypercall.
+	 */
+
+	while (true) {
+		vcpu_run(vcpu);
+
+		if ((vcpu->run->exit_reason == KVM_EXIT_HYPERCALL) &&
+			(vcpu->run->hypercall.nr == KVM_HC_MAP_GPA_RANGE)) {
+			uint64_t gpa = vcpu->run->hypercall.args[0];
+			uint64_t npages = vcpu->run->hypercall.args[1];
+			uint64_t attrs = vcpu->run->hypercall.args[2];
+
+			handle_vm_exit_map_gpa_hypercall(vm, gpa, npages, attrs);
+			vcpu->run->hypercall.ret = 0;
+			continue;
+		}
+
+		return;
+	}
+}