diff mbox series

[v4,03/12] KVM: guest_memfd: Add flag to remove from direct map

Message ID 20250221160728.1584559-4-roypat@amazon.co.uk (mailing list archive)
State New
Headers show
Series Direct Map Removal for guest_memfd | expand

Commit Message

Patrick Roy Feb. 21, 2025, 4:07 p.m. UTC
Add KVM_GMEM_NO_DIRECT_MAP flag for KVM_CREATE_GUEST_MEMFD() ioctl. When
set, guest_memfd folios will be removed from the direct map after
preparation, with direct map entries only restored when the folios are
freed.

To ensure these folios do not end up in places where the kernel cannot
deal with them, set AS_NO_DIRECT_MAP on the guest_memfd's struct
address_space if KVM_GMEM_NO_DIRECT_MAP is requested.

Note that this flag causes removal of direct map entries for all
guest_memfd folios independent of whether they are "shared" or "private"
(although current guest_memfd only supports either all folios in the
"shared" state, or all folios in the "private" state if
!IS_ENABLED(CONFIG_KVM_GMEM_SHARED_MEM)). The usecase for removing
direct map entries of also the shared parts of guest_memfd are a special
type of non-CoCo VM where, host userspace is trusted to have access to
all of guest memory, but where Spectre-style transient execution attacks
through the host kernel's direct map should still be mitigated.

Note that KVM retains access to guest memory via userspace
mappings of guest_memfd, which are reflected back into KVM's memslots
via userspace_addr. This is needed for things like MMIO emulation on
x86_64 to work. Previous iterations attempted to instead have KVM
temporarily restore direct map entries whenever such an access to guest
memory was needed, but this turned out to have a significant performance
impact, as well as additional complexity due to needing to refcount
direct map reinsertion operations and making them play nicely with gmem
truncations.

This iteration also doesn't have KVM perform TLB flushes after direct
map manipulations. This is because TLB flushes resulted in a up to 40x
elongation of page faults in guest_memfd (scaling with the number of CPU
cores), or a 5x elongation of memory population. On the one hand, TLB
flushes are not needed for functional correctness (the virt->phys
mapping technically stays "correct",  the kernel should simply to not it
for a while), so this is a correct optimization to make. On the other
hand, it means that the desired protection from Spectre-style attacks is
not perfect, as an attacker could try to prevent a stale TLB entry from
getting evicted, keeping it alive until the page it refers to is used by
the guest for some sensitive data, and then targeting it using a
spectre-gadget.

Signed-off-by: Patrick Roy <roypat@amazon.co.uk>
---
 include/uapi/linux/kvm.h |  2 ++
 virt/kvm/guest_memfd.c   | 28 +++++++++++++++++++++++++++-
 2 files changed, 29 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 117937a895da..4654c01a0a01 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1573,6 +1573,8 @@  struct kvm_create_guest_memfd {
 	__u64 reserved[6];
 };
 
+#define KVM_GMEM_NO_DIRECT_MAP			(1ULL << 0)
+
 #define KVM_PRE_FAULT_MEMORY	_IOWR(KVMIO, 0xd5, struct kvm_pre_fault_memory)
 
 struct kvm_pre_fault_memory {
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 30b47ff0e6d2..bd7d361c9bb7 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -4,6 +4,7 @@ 
 #include <linux/kvm_host.h>
 #include <linux/pagemap.h>
 #include <linux/anon_inodes.h>
+#include <linux/set_memory.h>
 
 #include "kvm_mm.h"
 
@@ -42,8 +43,23 @@  static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slo
 	return 0;
 }
 
+static bool kvm_gmem_test_no_direct_map(struct inode *inode)
+{
+	return ((unsigned long) inode->i_private) & KVM_GMEM_NO_DIRECT_MAP;
+}
+
 static inline void kvm_gmem_mark_prepared(struct folio *folio)
 {
+	struct inode *inode = folio_inode(folio);
+
+	if (kvm_gmem_test_no_direct_map(inode)) {
+		int r = set_direct_map_valid_noflush(folio_page(folio, 0), folio_nr_pages(folio),
+						     false);
+
+		if (!r)
+			folio_set_private(folio);
+	}
+
 	folio_mark_uptodate(folio);
 }
 
@@ -479,6 +495,10 @@  static void kvm_gmem_free_folio(struct folio *folio)
 	kvm_pfn_t pfn = page_to_pfn(page);
 	int order = folio_order(folio);
 
+	if (folio_test_private(folio))
+		WARN_ON_ONCE(set_direct_map_valid_noflush(folio_page(folio, 0),
+							  folio_nr_pages(folio), true));
+
 	kvm_arch_gmem_invalidate(pfn, pfn + (1ul << order));
 }
 #endif
@@ -552,6 +572,9 @@  static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
 	/* Unmovable mappings are supposed to be marked unevictable as well. */
 	WARN_ON_ONCE(!mapping_unevictable(inode->i_mapping));
 
+	if (flags & KVM_GMEM_NO_DIRECT_MAP)
+		mapping_set_no_direct_map(inode->i_mapping);
+
 	kvm_get_kvm(kvm);
 	gmem->kvm = kvm;
 	xa_init(&gmem->bindings);
@@ -571,7 +594,10 @@  int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
 {
 	loff_t size = args->size;
 	u64 flags = args->flags;
-	u64 valid_flags = 0;
+	u64 valid_flags = KVM_GMEM_NO_DIRECT_MAP;
+
+	if (!can_set_direct_map())
+		valid_flags &= ~KVM_GMEM_NO_DIRECT_MAP;
 
 	if (flags & ~valid_flags)
 		return -EINVAL;