@@ -2000,6 +2000,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, \
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_FALLOCATE, i915_gem_fallocate_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SET_PLANE_180_ROTATION, \
i915_set_plane_180_rotation, DRM_AUTH | DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_ENABLE_PLANE_RESERVED_REG_BIT_2,
@@ -2210,6 +2210,8 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
int i915_gem_init_userptr(struct drm_device *dev);
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+int i915_gem_fallocate_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
@@ -317,6 +317,213 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
args->size, &args->handle);
}
+static int i915_gem_obj_fallocate(struct drm_i915_gem_object *obj,
+ uint32_t mode, uint32_t start,
+ uint32_t length)
+{
+ int i;
+ int ret;
+ uint32_t start_page, end_page;
+ uint32_t page_count;
+ gfp_t gfp;
+ bool update_sg_table = false;
+ unsigned long scratch_pfn;
+ struct page *scratch;
+ struct page **pages;
+ struct sg_table *sg = NULL;
+ struct sg_page_iter sg_iter;
+ struct address_space *mapping;
+ struct drm_i915_private *dev_priv;
+
+ dev_priv = obj->base.dev->dev_private;
+ start_page = start >> PAGE_SHIFT;
+ end_page = (start + length) >> PAGE_SHIFT;
+ page_count = obj->base.size >> PAGE_SHIFT;
+
+ pages = drm_malloc_ab(page_count, sizeof(*pages));
+ if (pages == NULL)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ pages[i] = sg_page_iter_page(&sg_iter);
+ i++;
+ }
+
+ mapping = file_inode(obj->base.filp)->i_mapping;
+ gfp = mapping_gfp_mask(mapping);
+ gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
+ gfp &= ~(__GFP_IO | __GFP_WAIT);
+ scratch = dev_priv->gtt.base.scratch.page;
+ scratch_pfn = page_to_pfn(scratch);
+
+ if (mode & I915_GEM_FALLOC_MARK_SCRATCH) {
+ /* invalidate page range */
+ for (i = start_page; i < end_page; ++i) {
+ if (scratch_pfn == page_to_pfn(pages[i]))
+ continue;
+
+ update_sg_table = true;
+ drm_clflush_pages((pages + i), 1);
+ if (obj->dirty)
+ set_page_dirty(pages[i]);
+ page_cache_release(pages[i]);
+ pages[i] = scratch;
+ }
+ } else if (mode & I915_GEM_FALLOC_UNMARK_SCRATCH) {
+ struct page *page;
+
+ /* allocate new pages */
+ for (i = start_page; i < end_page; ++i) {
+ if (page_to_pfn(pages[i]) != scratch_pfn)
+ continue;
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (IS_ERR(page)) {
+ i915_gem_purge(dev_priv, page_count);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ }
+ if (IS_ERR(page)) {
+ /* We've tried hard to allocate the memory by reaping
+ * our own buffer, now let the real VM do its job and
+ * go down in flames if truly OOM.
+ */
+ gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
+ gfp |= __GFP_IO | __GFP_WAIT;
+
+ i915_gem_shrink_all(dev_priv);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto err_pages;
+ }
+
+ gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
+ gfp &= ~(__GFP_IO | __GFP_WAIT);
+ }
+ update_sg_table = true;
+ pages[i] = page;
+ }
+ }
+
+ if (update_sg_table == false) {
+ ret = 0;
+ goto out;
+ }
+
+ /**
+ * easier to replace the existing sg_table with
+ * the new one instead of modifying it
+ */
+ sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!sg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sg_alloc_table_from_pages(sg, pages, page_count, 0,
+ page_count << PAGE_SHIFT, gfp);
+ if (ret)
+ goto out;
+
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+
+ obj->pages = sg;
+
+ return 0;
+
+err_pages:
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
+ page_cache_release(sg_page_iter_page(&sg_iter));
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+out:
+ if (pages)
+ drm_free_large(pages);
+
+ return ret;
+}
+
+/**
+ * Changes the effective size of an existing gem object.
+ * The object size is always constant and this fact is tightly
+ * coupled in the driver. This ioctl() allows user to define
+ * certain ranges in the obj to be marked as usable/scratch
+ * thus modifying the effective size of the object used.
+ */
+int i915_gem_fallocate_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ int ret;
+ uint32_t mode, start, length;
+ struct i915_vma *vma;
+ struct drm_i915_private *dev_priv;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+ struct drm_i915_gem_fallocate *args = data;
+
+ mode = args->mode;
+ if (!((mode & I915_GEM_FALLOC_MARK_SCRATCH) ^
+ ((mode & I915_GEM_FALLOC_UNMARK_SCRATCH) >> 1)))
+ return -EINVAL;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL)
+ return -ENOENT;
+
+ start = roundup(args->start, PAGE_SIZE);
+ length = roundup(args->length, PAGE_SIZE);
+ if (length == 0
+ || length > obj->base.size
+ || start > obj->base.size
+ || (start + length) > obj->base.size)
+ return -EINVAL;
+
+ dev_priv = dev->dev_private;
+ vm = &dev_priv->gtt.base;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ if (!i915_gem_obj_bound(obj, vm)) {
+ ret = i915_gem_object_bind_to_vm(obj, vm, 0, true, false);
+ if (ret)
+ goto unlock;
+
+ if (!dev_priv->mm.aliasing_ppgtt)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+ }
+
+ drm_gem_object_reference(&obj->base);
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (!vma) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ goto out;
+
+ ret = i915_gem_obj_fallocate(obj, mode, start, length);
+ if (ret) {
+ DRM_ERROR("fallocate obj range failed\n");
+ goto out;
+ }
+
+ ret = i915_gem_object_bind_to_vm(obj, vm, 0, true, false);
+ if (ret)
+ DRM_ERROR("object couldn't be bound after falloc\n");
+
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset,
@@ -275,6 +275,7 @@ struct csc_coeff {
#define DRM_I915_GET_RESET_STATS 0x32
#define DRM_I915_SET_PLANE_ZORDER 0x33
#define DRM_I915_GEM_USERPTR 0x34
+#define DRM_I915_GEM_FALLOCATE 0x35
#define DRM_I915_SET_PLANE_180_ROTATION 0x36
#define DRM_I915_ENABLE_PLANE_RESERVED_REG_BIT_2 0x37
#define DRM_I915_SET_CSC 0x39
@@ -339,6 +340,9 @@ struct csc_coeff {
#define DRM_IOCTL_I915_GEM_USERPTR \
DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, \
struct drm_i915_gem_userptr)
+#define DRM_IOCTL_I915_GEM_FALLOCATE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_FALLOCATE, \
+ struct drm_i915_gem_fallocate)
#define DRM_IOCTL_I915_SET_PLANE_ALPHA \
DRM_IOW(DRM_COMMAND_BASE + DRM_I915_SET_PLANE_ALPHA, \
struct drm_i915_set_plane_alpha)
@@ -523,6 +527,33 @@ struct drm_i915_gem_create {
__u32 pad;
};
+struct drm_i915_gem_fallocate {
+ /**
+ * Start position of the range
+ *
+ * If the given value is not page-aligned it will be rounded internally.
+ */
+ __u64 start;
+ /**
+ * Length of the range
+ *
+ * If the given value is not page-aligned it will be rounded internally.
+ */
+ __u64 length;
+ /**
+ * Mode applied to the range
+ */
+ __u32 mode;
+#define I915_GEM_FALLOC_MARK_SCRATCH 0x01
+#define I915_GEM_FALLOC_UNMARK_SCRATCH 0x02
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+};
+
struct drm_i915_gem_pread {
/** Handle for the object being read. */
__u32 handle;