@@ -2015,6 +2015,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
+int remap_io_sg(struct vm_area_struct *vma,
+ unsigned long addr, resource_size_t io_start, struct scatterlist *sgl,
+ unsigned long size, struct io_mapping *iomap);
static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
{
@@ -32,7 +32,13 @@
struct remap_pfn {
struct mm_struct *mm;
unsigned long pfn;
+ unsigned long start_pfn;
+ unsigned long size;
pgprot_t prot;
+
+ resource_size_t io_start;
+ struct sg_dma_page_iter sgiter;
+ struct scatterlist *sgl;
};
static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
@@ -46,6 +52,31 @@ static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
return 0;
}
+static int remap_io(pte_t *pte, unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+ struct sg_dma_page_iter *sgiter = &r->sgiter;
+ if (!r->pfn)
+ __sg_page_iter_start(&sgiter->base, r->sgl,
+ r->size >> PAGE_SHIFT, 0);
+
+ if (__sg_page_iter_dma_next(sgiter)) {
+ dma_addr_t addr = sg_page_iter_dma_address(sgiter);
+ unsigned long pfn = (r->io_start + addr) >> PAGE_SHIFT;
+
+ if (!r->pfn)
+ r->start_pfn = pfn;
+ r->pfn = pfn;
+ /* Special PTE are not associated with any struct page */
+ set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
+
+ return 0;
+ }
+
+ GEM_WARN_ON("invalid range");
+ return -EINVAL;
+}
+
/**
* remap_io_mapping - remap an IO mapping to userspace
* @vma: user vma to map to
@@ -80,3 +111,39 @@ int remap_io_mapping(struct vm_area_struct *vma,
return 0;
}
+
+/**
+ * remap_io_sg - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @io_start: IO start
+ * @sgl: Start sg entry
+ * @size: size of map area
+ * @iomap: the source io_mapping
+ *
+ * Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_sg(struct vm_area_struct *vma,
+ unsigned long addr, resource_size_t io_start, struct scatterlist *sgl,
+ unsigned long size, struct io_mapping *iomap)
+{
+ struct remap_pfn r = { 0 };
+ int err;
+ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
+
+ /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ r.mm = vma->vm_mm;
+ r.size = size;
+ r.io_start = io_start;
+ r.sgl = sgl;
+ r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
+ (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
+
+ err = apply_to_page_range(r.mm, addr, size, remap_io, &r);
+ if (unlikely(err)) {
+ zap_vma_ptes(vma, addr, (r.pfn - r.start_pfn) << PAGE_SHIFT);
+ return err;
+ }
+
+ return 0;
+}
Provide a way to set the PTE within apply_page_range for discontiguous objects in addition to the existing method of just incrementing the pfn for a page range. Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_drv.h | 3 ++ drivers/gpu/drm/i915/i915_mm.c | 67 +++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+)