@@ -33,6 +33,7 @@ enum guest_memfd_grab_flags {
enum guest_memfd_create_flags {
GUEST_MEMFD_FLAG_CLEAR_INACCESSIBLE = (1UL << 0),
+ GUEST_MEMFD_FLAG_REMOVE_DIRECT_MAP = (1UL << 1),
};
struct folio *guest_memfd_grab_folio(struct file *file, pgoff_t index, u32 flags);
@@ -8,6 +8,7 @@
#include <linux/falloc.h>
#include <linux/guest_memfd.h>
#include <linux/pagemap.h>
+#include <linux/set_memory.h>
#include <linux/wait.h>
#include "internal.h"
@@ -26,6 +27,45 @@ struct guest_memfd_private {
atomic_t safe;
};
+static inline int folio_set_direct_map_invalid_noflush(struct folio *folio)
+{
+ unsigned long i, nr = folio_nr_pages(folio);
+ int r;
+
+ for (i = 0; i < nr; i++) {
+ struct page *page = folio_page(folio, i);
+
+ r = set_direct_map_invalid_noflush(page);
+ if (r)
+ goto out_remap;
+ }
+ /**
+ * Currently no need to flush as hypervisor will also be flushing
+ * tlb when giving the folio to guest.
+ */
+
+ return 0;
+out_remap:
+ for (; i > 0; i--) {
+ struct page *page = folio_page(folio, i - 1);
+
+ BUG_ON(set_direct_map_default_noflush(page));
+ }
+
+ return r;
+}
+
+static inline void folio_set_direct_map_default_noflush(struct folio *folio)
+{
+ unsigned long i, nr = folio_nr_pages(folio);
+
+ for (i = 0; i < nr; i++) {
+ struct page *page = folio_page(folio, i);
+
+ BUG_ON(set_direct_map_default_noflush(page));
+ }
+}
+
static inline int base_safe_refs(struct folio *folio)
{
/* 1 for filemap */
@@ -131,6 +171,12 @@ struct folio *guest_memfd_grab_folio(struct file *file, pgoff_t index, u32 flags
goto out_free;
}
} else {
+ if (gmem_flags & GUEST_MEMFD_FLAG_REMOVE_DIRECT_MAP) {
+ r = folio_set_direct_map_invalid_noflush(folio);
+ if (r < 0)
+ goto out_free;
+ }
+
if (ops->prepare_inaccessible) {
r = ops->prepare_inaccessible(inode, folio);
if (r < 0)
@@ -203,6 +249,7 @@ int guest_memfd_make_accessible(struct folio *folio)
struct guest_memfd_private *private = folio_get_private(folio);
struct inode *inode = folio_inode(folio);
struct guest_memfd_operations *ops = inode->i_private;
+ unsigned long gmem_flags;
int r;
/*
@@ -218,6 +265,10 @@ int guest_memfd_make_accessible(struct folio *folio)
if (!r)
return -EBUSY;
+ gmem_flags = (unsigned long)inode->i_mapping->i_private_data;
+ if (gmem_flags & GUEST_MEMFD_FLAG_REMOVE_DIRECT_MAP)
+ folio_set_direct_map_default_noflush(folio);
+
if (ops->prepare_accessible) {
r = ops->prepare_accessible(inode, folio);
if (r)
@@ -248,6 +299,7 @@ int guest_memfd_make_inaccessible(struct folio *folio)
struct guest_memfd_private *private = folio_get_private(folio);
struct inode *inode = folio_inode(folio);
struct guest_memfd_operations *ops = inode->i_private;
+ unsigned long gmem_flags;
int r;
r = atomic_dec_if_positive(&private->accessible);
@@ -266,6 +318,13 @@ int guest_memfd_make_inaccessible(struct folio *folio)
goto err;
}
+ gmem_flags = (unsigned long)inode->i_mapping->i_private_data;
+ if (gmem_flags & GUEST_MEMFD_FLAG_REMOVE_DIRECT_MAP) {
+ r = folio_set_direct_map_invalid_noflush(folio);
+ if (r)
+ goto err;
+ }
+
if (ops->prepare_inaccessible) {
r = ops->prepare_inaccessible(inode, folio);
if (r)
@@ -454,6 +513,7 @@ static int gmem_error_folio(struct address_space *mapping, struct folio *folio)
struct guest_memfd_operations *ops = inode->i_private;
off_t offset = folio->index;
size_t nr = folio_nr_pages(folio);
+ unsigned long gmem_flags;
int ret;
filemap_invalidate_lock_shared(mapping);
@@ -464,6 +524,10 @@ static int gmem_error_folio(struct address_space *mapping, struct folio *folio)
filemap_invalidate_unlock_shared(mapping);
+ gmem_flags = (unsigned long)inode->i_mapping->i_private_data;
+ if (gmem_flags & GUEST_MEMFD_FLAG_REMOVE_DIRECT_MAP)
+ folio_set_direct_map_default_noflush(folio);
+
return ret;
}
@@ -474,7 +538,7 @@ static bool gmem_release_folio(struct folio *folio, gfp_t gfp)
struct guest_memfd_operations *ops = inode->i_private;
off_t offset = folio->index;
size_t nr = folio_nr_pages(folio);
- unsigned long val, expected;
+ unsigned long val, expected, gmem_flags;
int ret;
ret = ops->invalidate_begin(inode, offset, nr);
@@ -483,6 +547,10 @@ static bool gmem_release_folio(struct folio *folio, gfp_t gfp)
if (ops->invalidate_end)
ops->invalidate_end(inode, offset, nr);
+ gmem_flags = (unsigned long)inode->i_mapping->i_private_data;
+ if (gmem_flags & GUEST_MEMFD_FLAG_REMOVE_DIRECT_MAP)
+ folio_set_direct_map_default_noflush(folio);
+
expected = base_safe_refs(folio);
val = atomic_read(&private->safe);
WARN_ONCE(val != expected, "folio[%x] safe ref: %d != expected %d\n",
@@ -518,7 +586,14 @@ static inline bool guest_memfd_check_ops(const struct guest_memfd_operations *op
static inline unsigned long guest_memfd_valid_flags(void)
{
- return GUEST_MEMFD_FLAG_CLEAR_INACCESSIBLE;
+ unsigned long flags = GUEST_MEMFD_FLAG_CLEAR_INACCESSIBLE;
+
+#ifdef CONFIG_ARCH_HAS_SET_DIRECT_MAP
+ if (can_set_direct_map())
+ flags |= GUEST_MEMFD_FLAG_REMOVE_DIRECT_MAP;
+#endif
+
+ return flags;
}
/**
When memory is made inaccessible to the host, Linux may still speculatively access the folio if a load_unaligned_zeropad is performed at the end of the prior page. To ensure Linux itself catches such errors without hypervisor crashing Linux, unmap the guest-inaccessible pages from the direct map. This feature is made optional because arm64 pKVM can provide a special, detectable fault which can be fixed up directly. Signed-off-by: Elliot Berman <quic_eberman@quicinc.com> --- include/linux/guest_memfd.h | 1 + mm/guest_memfd.c | 79 +++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 78 insertions(+), 2 deletions(-)