@@ -252,6 +252,7 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
struct vm_area_struct *area = vmf->vma;
struct i915_mmap_offset *mmo = area->vm_private_data;
struct drm_i915_gem_object *obj = mmo->obj;
+ unsigned long obj_offset;
resource_size_t iomap;
int err;
@@ -273,10 +274,11 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
iomap -= obj->mm.region->region.start;
}
+ obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
/* PTEs are revoked in obj->ops->put_pages() */
err = remap_io_sg(area,
area->vm_start, area->vm_end - area->vm_start,
- obj->mm.pages->sgl, iomap);
+ obj->mm.pages->sgl, obj_offset, iomap);
if (area->vm_flags & VM_WRITE) {
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -302,14 +304,16 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
bool write = area->vm_flags & VM_WRITE;
struct i915_gem_ww_ctx ww;
+ unsigned long obj_offset;
intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
int srcu;
int ret;
- /* We don't use vmf->pgoff since that has the fake offset */
+ obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
+ page_offset += obj_offset;
trace_i915_gem_object_fault(obj, page_offset, true, write);
@@ -404,7 +408,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
- area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
+ area->vm_start + ((vma->gtt_view.partial.offset - obj_offset) << PAGE_SHIFT),
(ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap);
@@ -122,13 +122,15 @@ int remap_io_mapping(struct vm_area_struct *vma,
* @addr: target user address to start at
* @size: size of map area
* @sgl: Start sg entry
+ * @offset: offset from the start of the page
* @iobase: Use stored dma address offset by this address or pfn if -1
*
* Note: this is only safe if the mm semaphore is held when called.
*/
int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
- struct scatterlist *sgl, resource_size_t iobase)
+ struct scatterlist *sgl, unsigned long offset,
+ resource_size_t iobase)
{
struct remap_pfn r = {
.mm = vma->vm_mm,
@@ -141,6 +143,14 @@ int remap_io_sg(struct vm_area_struct *vma,
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
+ while (offset >= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT) {
+ offset -= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT;
+ r.sgt = __sgt_iter(__sg_next(r.sgt.sgp), use_dma(iobase));
+ if (!r.sgt.sgp)
+ return -EINVAL;
+ }
+ r.sgt.curr = offset << PAGE_SHIFT;
+
if (!use_dma(iobase))
flush_cache_range(vma, addr, size);
@@ -30,6 +30,7 @@ int remap_io_mapping(struct vm_area_struct *vma,
int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
- struct scatterlist *sgl, resource_size_t iobase);
+ struct scatterlist *sgl, unsigned long offset,
+ resource_size_t iobase);
#endif /* __I915_MM_H__ */
To enable partial memory mapping of GPU virtual memory, it's necessary to introduce an offset to the object's memory (obj->mm.pages) scatterlist. This adjustment compensates for instances when userspace mappings do not start from the beginning of the object. Based on a patch by Chris Wilson. Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com> Cc: Chris Wilson <chris.p.wilson@linux.intel.com> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com> --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 10 +++++++--- drivers/gpu/drm/i915/i915_mm.c | 12 +++++++++++- drivers/gpu/drm/i915/i915_mm.h | 3 ++- 3 files changed, 20 insertions(+), 5 deletions(-)