diff mbox series

udmabuf: Add support for mapping hugepages (v2)

Message ID 20210603235955.341043-1-vivek.kasireddy@intel.com (mailing list archive)
State New, archived
Headers show
Series udmabuf: Add support for mapping hugepages (v2) | expand

Commit Message

Kasireddy, Vivek June 3, 2021, 11:59 p.m. UTC
If the VMM's (Qemu) memory backend is backed up by memfd + Hugepages
(hugetlbfs and not THP), we have to first find the hugepage(s) where
the Guest allocations are located and then extract the regular 4k
sized subpages from them.

v2: Ensure that the subpage offsets are calculated correctly when the
range of subpage allocations cuts across multiple hugepages.

Cc: Gerd Hoffmann <kraxel@redhat.com>
Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com>
---
 drivers/dma-buf/udmabuf.c | 46 ++++++++++++++++++++++++++++++++-------
 1 file changed, 38 insertions(+), 8 deletions(-)

Comments

Gerd Hoffmann June 4, 2021, 5:59 a.m. UTC | #1
Hi,

>  		for (pgidx = 0; pgidx < pgcnt; pgidx++) {
> +			if (is_file_hugepages(memfd)) {
> +				hpage = find_get_page_flags(
> +						file_inode(memfd)->i_mapping,
> +						pgoff, FGP_ACCESSED);
> +				if (IS_ERR(hpage)) {
> +					ret = PTR_ERR(hpage);
> +					goto err;
> +				}
> +
> +				page = hpage + (subpgoff % maxsubpgs);
> +				get_page(page);
> +				put_page(hpage);

if (hpage && subpgoff == maxsubpgs) {
	put_page(hpage);
	hpage = NULL;
}
if (!hpage) {
	hpage = find_get_page_flags(...)
	[ ... ]
}

Only lookup the huge page when you cross a hugepage border.

take care,
  Gerd
diff mbox series

Patch

diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index db732f71e59a..f053d12a1eb3 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -11,6 +11,7 @@ 
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/udmabuf.h>
+#include <linux/hugetlb.h>
 
 static const u32    list_limit = 1024;  /* udmabuf_create_list->count limit */
 static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes  */
@@ -162,8 +163,10 @@  static long udmabuf_create(struct miscdevice *device,
 	struct file *memfd = NULL;
 	struct udmabuf *ubuf;
 	struct dma_buf *buf;
-	pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
-	struct page *page;
+	pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit, subpgoff;
+	uint32_t maxsubpgs;
+	struct page *page, *hpage = NULL;
+	struct hstate *hpstate;
 	int seals, ret = -EINVAL;
 	u32 i, flags;
 
@@ -194,7 +197,8 @@  static long udmabuf_create(struct miscdevice *device,
 		memfd = fget(list[i].memfd);
 		if (!memfd)
 			goto err;
-		if (!shmem_mapping(file_inode(memfd)->i_mapping))
+		if (!shmem_mapping(file_inode(memfd)->i_mapping) &&
+		    !is_file_hugepages(memfd))
 			goto err;
 		seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
 		if (seals == -EINVAL)
@@ -205,12 +209,38 @@  static long udmabuf_create(struct miscdevice *device,
 			goto err;
 		pgoff = list[i].offset >> PAGE_SHIFT;
 		pgcnt = list[i].size   >> PAGE_SHIFT;
+		if (is_file_hugepages(memfd)) {
+			hpstate = hstate_file(memfd);
+			pgoff = list[i].offset >> huge_page_shift(hpstate);
+			subpgoff = (list[i].offset &
+				    ~huge_page_mask(hpstate)) >> PAGE_SHIFT;
+			maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
+		}
 		for (pgidx = 0; pgidx < pgcnt; pgidx++) {
-			page = shmem_read_mapping_page(
-				file_inode(memfd)->i_mapping, pgoff + pgidx);
-			if (IS_ERR(page)) {
-				ret = PTR_ERR(page);
-				goto err;
+			if (is_file_hugepages(memfd)) {
+				hpage = find_get_page_flags(
+						file_inode(memfd)->i_mapping,
+						pgoff, FGP_ACCESSED);
+				if (IS_ERR(hpage)) {
+					ret = PTR_ERR(hpage);
+					goto err;
+				}
+
+				page = hpage + (subpgoff % maxsubpgs);
+				get_page(page);
+				put_page(hpage);
+
+				subpgoff++;
+				if (subpgoff % maxsubpgs == 0)
+					pgoff++;
+			} else {
+				page = shmem_read_mapping_page(
+					file_inode(memfd)->i_mapping,
+					pgoff + pgidx);
+				if (IS_ERR(page)) {
+					ret = PTR_ERR(page);
+					goto err;
+				}
 			}
 			ubuf->pages[pgbuf++] = page;
 		}