@@ -2000,6 +2000,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, \
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_RESIZE, i915_gem_resize_ioctl, \
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SET_PLANE_180_ROTATION, \
i915_set_plane_180_rotation, DRM_AUTH | DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_ENABLE_PLANE_RESERVED_REG_BIT_2,
@@ -2217,6 +2217,8 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
int i915_gem_init_userptr(struct drm_device *dev);
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+int i915_gem_resize_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
@@ -372,6 +372,117 @@ unlock:
return ret;
}
+/**
+ * Resizes an existing gem object.
+ */
+int i915_gem_resize_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ int i, ret;
+ int page_range;
+ int total_page_count, resize_page_count;
+ struct drm_i915_private *dev_priv;
+ struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_resize *args = data;
+ struct address_space *mapping;
+ struct sg_page_iter sg_iter;
+ struct page *page;
+ gfp_t gfp;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL)
+ return -ENOENT;
+
+ /* Not valid to be called on unbound objects. */
+ if (!i915_gem_obj_bound_any(obj))
+ return -EINVAL;
+
+ if (obj->base.size != args->size ||
+ obj->gem_resize.base_size != args->curr_size) {
+ DRM_DEBUG_DRIVER("invalid object size(s)\n");
+ return -EINVAL;
+ }
+
+ total_page_count = obj->base.size / PAGE_SIZE;
+ resize_page_count = args->resize / PAGE_SIZE;
+ if (resize_page_count < obj->gem_resize.stop) {
+ DRM_DEBUG_DRIVER("cannot resize to smaller size\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_DRIVER("resize object from %d to %d pages, total pages %d\n",
+ obj->gem_resize.stop, resize_page_count, total_page_count);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ if (!obj->pages) {
+ DRM_DEBUG_DRIVER("backing store not yet allocated for this obj\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ dev_priv = obj->base.dev->dev_private;
+ page_range = resize_page_count - obj->gem_resize.stop;
+ mapping = file_inode(obj->base.filp)->i_mapping;
+ gfp = mapping_gfp_mask(mapping);
+ gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
+ gfp &= ~(__GFP_IO | __GFP_WAIT);
+
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ page = sg_page_iter_page(&sg_iter);
+
+ if (i < obj->gem_resize.stop) {
+ ++i;
+ continue;
+ }
+ WARN_ON(page != obj->gem_resize.scratch_page);
+
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (IS_ERR(page)) {
+ i915_gem_purge(dev_priv, page_count);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ }
+ if (IS_ERR(page)) {
+ /* We've tried hard to allocate the memory by reaping
+ * our own buffer, now let the real VM do its job and
+ * go down in flames if truly OOM.
+ */
+ gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
+ gfp |= __GFP_IO | __GFP_WAIT;
+
+ i915_gem_shrink_all(dev_priv);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (IS_ERR(page)) {
+ DRM_ERROR("out of memory\n");
+ goto err_pages;
+ }
+
+ gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
+ gfp &= ~(__GFP_IO | __GFP_WAIT);
+ }
+ sg_set_page((&sg_iter)->sg, page, PAGE_SIZE, 0);
+
+ ++i;
+ }
+ obj->gem_resize.stop = resize_page_count;
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+
+err_pages:
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
+ page_cache_release(sg_page_iter_page(&sg_iter));
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+ ret = PTR_ERR(page);
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset,
@@ -275,6 +275,7 @@ struct csc_coeff {
#define DRM_I915_GET_RESET_STATS 0x32
#define DRM_I915_SET_PLANE_ZORDER 0x33
#define DRM_I915_GEM_USERPTR 0x34
+#define DRM_I915_GEM_RESIZE 0x35
#define DRM_I915_SET_PLANE_180_ROTATION 0x36
#define DRM_I915_ENABLE_PLANE_RESERVED_REG_BIT_2 0x37
#define DRM_I915_SET_CSC 0x39
@@ -339,6 +340,9 @@ struct csc_coeff {
#define DRM_IOCTL_I915_GEM_USERPTR \
DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, \
struct drm_i915_gem_userptr)
+#define DRM_IOCTL_I915_GEM_RESIZE \
+ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_RESIZE, \
+ struct drm_i915_gem_resize)
#define DRM_IOCTL_I915_SET_PLANE_ALPHA \
DRM_IOW(DRM_COMMAND_BASE + DRM_I915_SET_PLANE_ALPHA, \
struct drm_i915_set_plane_alpha)
@@ -530,6 +534,28 @@ struct drm_i915_gem_create {
__u64 base_size;
};
+struct drm_i915_gem_resize {
+ /**
+ * The size for which the backing store is created initially
+ */
+ __u64 curr_size;
+ /**
+ * The new size of the object
+ */
+ __u64 resize;
+ /**
+ * Requested size for the object, remains constant.
+ */
+ __u64 size;
+ __u32 flags;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+};
+
struct drm_i915_gem_pread {
/** Handle for the object being read. */
__u32 handle;