@@ -8,6 +8,177 @@
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
+static int
+i915_ww_pin_lock_interruptible(struct drm_i915_gem_object *obj)
+{
+ struct i915_gem_ww_ctx ww;
+ int ret;
+
+ for_i915_gem_ww(&ww, ret, true) {
+ ret = i915_gem_object_lock(obj, &ww);
+ if (ret)
+ continue;
+
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ continue;
+
+ ret = i915_gem_object_set_to_wc_domain(obj, false);
+ if (ret)
+ goto out_unpin;
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (!ret)
+ continue;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+
+ /* Unlocking is done implicitly */
+ }
+
+ return ret;
+}
+
+int i915_gem_object_lmem_pread(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *arg)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_runtime_pm *rpm = &i915->runtime_pm;
+ intel_wakeref_t wakeref;
+ char __user *user_data;
+ unsigned int offset;
+ unsigned long idx;
+ u64 remain;
+ int ret;
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = i915_ww_pin_lock_interruptible(obj);
+ if (ret)
+ return ret;
+
+ wakeref = intel_runtime_pm_get(rpm);
+
+ remain = arg->size;
+ user_data = u64_to_user_ptr(arg->data_ptr);
+ offset = offset_in_page(arg->offset);
+ for (idx = arg->offset >> PAGE_SHIFT; remain; idx++) {
+ unsigned long unwritten;
+ void __iomem *vaddr;
+ int length;
+
+ length = remain;
+ if (offset + length > PAGE_SIZE)
+ length = PAGE_SIZE - offset;
+
+ vaddr = i915_gem_object_lmem_io_map_page_atomic(obj, idx);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+ unwritten = __copy_to_user_inatomic(user_data,
+ (void __force *)vaddr + offset,
+ length);
+ io_mapping_unmap_atomic(vaddr);
+ if (unwritten) {
+ vaddr = i915_gem_object_lmem_io_map_page(obj, idx);
+ unwritten = copy_to_user(user_data,
+ (void __force *)vaddr + offset,
+ length);
+ io_mapping_unmap(vaddr);
+ }
+ if (unwritten) {
+ ret = -EFAULT;
+ goto out_put;
+ }
+
+ remain -= length;
+ user_data += length;
+ offset = 0;
+ }
+
+out_put:
+ intel_runtime_pm_put(rpm, wakeref);
+ i915_gem_object_unpin_pages(obj);
+
+ return ret;
+}
+
+static int i915_gem_object_lmem_pwrite(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *arg)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_runtime_pm *rpm = &i915->runtime_pm;
+ intel_wakeref_t wakeref;
+ char __user *user_data;
+ unsigned int offset;
+ unsigned long idx;
+ u64 remain;
+ int ret;
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = i915_ww_pin_lock_interruptible(obj);
+ if (ret)
+ return ret;
+
+ wakeref = intel_runtime_pm_get(rpm);
+
+ remain = arg->size;
+ user_data = u64_to_user_ptr(arg->data_ptr);
+ offset = offset_in_page(arg->offset);
+ for (idx = arg->offset >> PAGE_SHIFT; remain; idx++) {
+ unsigned long unwritten;
+ void __iomem *vaddr;
+ int length;
+
+ length = remain;
+ if (offset + length > PAGE_SIZE)
+ length = PAGE_SIZE - offset;
+
+ vaddr = i915_gem_object_lmem_io_map_page_atomic(obj, idx);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
+ user_data, length);
+ io_mapping_unmap_atomic(vaddr);
+ if (unwritten) {
+ vaddr = i915_gem_object_lmem_io_map_page(obj, idx);
+ unwritten = copy_from_user((void __force *)vaddr + offset,
+ user_data, length);
+ io_mapping_unmap(vaddr);
+ }
+ if (unwritten) {
+ ret = -EFAULT;
+ goto out_put;
+ }
+
+ remain -= length;
+ user_data += length;
+ offset = 0;
+ }
+
+out_put:
+ intel_runtime_pm_put(rpm, wakeref);
+ i915_gem_object_unpin_pages(obj);
+
+ return ret;
+}
+
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.name = "i915_gem_object_lmem",
.flags = I915_GEM_OBJECT_HAS_IOMEM,
@@ -15,8 +186,23 @@ const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.get_pages = i915_gem_object_get_pages_buddy,
.put_pages = i915_gem_object_put_pages_buddy,
.release = i915_gem_object_release_memory_region,
+
+ .pread = i915_gem_object_lmem_pread,
+ .pwrite = i915_gem_object_lmem_pwrite,
};
+void __iomem *
+i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ resource_size_t offset;
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= obj->mm.region->region.start;
+
+ return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
+}
+
void __iomem *
i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
unsigned long n)
@@ -14,6 +14,8 @@ struct intel_memory_region;
extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+ unsigned long n);
void __iomem *
i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
unsigned long n);