@@ -10,6 +10,7 @@
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/shmem_fs.h>
+#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/udmabuf.h>
#include <linux/vmalloc.h>
@@ -28,6 +29,7 @@ struct udmabuf {
struct page **pages;
struct sg_table *sg;
struct miscdevice *device;
+ pgoff_t *subpgoff;
};
static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
@@ -41,6 +43,10 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
pfn = page_to_pfn(ubuf->pages[pgoff]);
+ if (ubuf->subpgoff) {
+ pfn += ubuf->subpgoff[pgoff] >> PAGE_SHIFT;
+ }
+
return vmf_insert_pfn(vma, vmf->address, pfn);
}
@@ -90,23 +96,31 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
{
struct udmabuf *ubuf = buf->priv;
struct sg_table *sg;
+ struct scatterlist *sgl;
+ pgoff_t offset;
+ unsigned long i = 0;
int ret;
sg = kzalloc(sizeof(*sg), GFP_KERNEL);
if (!sg)
return ERR_PTR(-ENOMEM);
- ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
- 0, ubuf->pagecount << PAGE_SHIFT,
- GFP_KERNEL);
+
+ ret = sg_alloc_table(sg, ubuf->pagecount, GFP_KERNEL);
if (ret < 0)
- goto err;
+ goto err_alloc;
+
+ for_each_sg(sg->sgl, sgl, ubuf->pagecount, i) {
+ offset = ubuf->subpgoff ? ubuf->subpgoff[i] : 0;
+ sg_set_page(sgl, ubuf->pages[i], PAGE_SIZE, offset);
+ }
ret = dma_map_sgtable(dev, sg, direction, 0);
if (ret < 0)
- goto err;
+ goto err_map;
return sg;
-err:
+err_map:
sg_free_table(sg);
+err_alloc:
kfree(sg);
return ERR_PTR(ret);
}
@@ -143,6 +157,7 @@ static void release_udmabuf(struct dma_buf *buf)
for (pg = 0; pg < ubuf->pagecount; pg++)
put_page(ubuf->pages[pg]);
+ kfree(ubuf->subpgoff);
kfree(ubuf->pages);
kfree(ubuf);
}
@@ -206,7 +221,10 @@ static long udmabuf_create(struct miscdevice *device,
struct udmabuf *ubuf;
struct dma_buf *buf;
pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
- struct page *page;
+ struct page *page, *hpage = NULL;
+ struct folio *folio;
+ pgoff_t mapidx, chunkoff, maxchunks;
+ struct hstate *hpstate;
int seals, ret = -EINVAL;
u32 i, flags;
@@ -242,7 +260,7 @@ static long udmabuf_create(struct miscdevice *device,
if (!memfd)
goto err;
mapping = memfd->f_mapping;
- if (!shmem_mapping(mapping))
+ if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
goto err;
seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
if (seals == -EINVAL)
@@ -253,16 +271,58 @@ static long udmabuf_create(struct miscdevice *device,
goto err;
pgoff = list[i].offset >> PAGE_SHIFT;
pgcnt = list[i].size >> PAGE_SHIFT;
+ if (is_file_hugepages(memfd)) {
+ if (!ubuf->subpgoff) {
+ ubuf->subpgoff = kmalloc_array(ubuf->pagecount,
+ sizeof(*ubuf->subpgoff),
+ GFP_KERNEL);
+ if (!ubuf->subpgoff) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+ hpstate = hstate_file(memfd);
+ mapidx = list[i].offset >> huge_page_shift(hpstate);
+ chunkoff = (list[i].offset &
+ ~huge_page_mask(hpstate)) >> PAGE_SHIFT;
+ maxchunks = huge_page_size(hpstate) >> PAGE_SHIFT;
+ }
for (pgidx = 0; pgidx < pgcnt; pgidx++) {
- page = shmem_read_mapping_page(mapping, pgoff + pgidx);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto err;
+ if (is_file_hugepages(memfd)) {
+ if (!hpage) {
+ folio = __filemap_get_folio(mapping, mapidx,
+ FGP_ACCESSED, 0);
+ hpage = IS_ERR(folio) ? NULL: &folio->page;
+ if (!hpage) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+ get_page(hpage);
+ ubuf->pages[pgbuf] = hpage;
+ ubuf->subpgoff[pgbuf++] = chunkoff << PAGE_SHIFT;
+ if (++chunkoff == maxchunks) {
+ put_page(hpage);
+ hpage = NULL;
+ chunkoff = 0;
+ mapidx++;
+ }
+ } else {
+ mapidx = pgoff + pgidx;
+ page = shmem_read_mapping_page(mapping, mapidx);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto err;
+ }
+ ubuf->pages[pgbuf++] = page;
}
- ubuf->pages[pgbuf++] = page;
}
fput(memfd);
memfd = NULL;
+ if (hpage) {
+ put_page(hpage);
+ hpage = NULL;
+ }
}
exp_info.ops = &udmabuf_ops;
@@ -287,6 +347,7 @@ static long udmabuf_create(struct miscdevice *device,
put_page(ubuf->pages[--pgbuf]);
if (memfd)
fput(memfd);
+ kfree(ubuf->subpgoff);
kfree(ubuf->pages);
kfree(ubuf);
return ret;