@@ -2010,8 +2010,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
@@ -2025,6 +2025,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE2, i915_gem_create2_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
@@ -2157,6 +2157,8 @@ int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_create2_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
@@ -2191,10 +2193,10 @@ int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_set_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_get_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int i915_gem_init_userptr(struct drm_device *dev);
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -2319,6 +2321,8 @@ static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
+int __must_check i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
+ int tiling_mode, int pitch);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
@@ -2351,6 +2355,9 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
+bool
+i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode);
+
uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
uint32_t
@@ -53,6 +53,14 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
+#define PIN_OFFSET_VALID 0x1
+static struct i915_vma *
+i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint64_t offset,
+ unsigned alignment,
+ uint64_t flags);
+
static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
struct shrink_control *sc);
static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
@@ -380,8 +388,7 @@ i915_gem_dumb_create(struct drm_file *file,
/* have to work out size/pitch and return them */
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
- return i915_gem_create(file, dev,
- args->size, &args->handle);
+ return i915_gem_create(file, dev, args->size, &args->handle);
}
/**
@@ -392,9 +399,155 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_create *args = data;
+ return i915_gem_create(file, dev, args->size, &args->handle);
+}
+
+int
+i915_gem_create2_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_create2 *args = data;
+ struct drm_i915_gem_object *obj;
+ unsigned cache_level;
+ enum {
+ ASYNC_CLEAR = 0x1,
+ } flags = 0;
+ int ret;
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (args->flags & ~(0))
+ return -EINVAL;
+
+ if (!i915_tiling_ok(dev, args->stride, args->size, args->tiling_mode))
+ return -EINVAL;
+
+ switch (args->domain) {
+ case 0:
+ case I915_GEM_DOMAIN_CPU:
+ case I915_GEM_DOMAIN_GTT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (args->caching) {
+ case I915_CACHING_NONE:
+ cache_level = I915_CACHE_NONE;
+ break;
+ case I915_CACHING_CACHED:
+ cache_level = I915_CACHE_LLC;
+ break;
+ case I915_CACHING_DISPLAY:
+ cache_level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (args->madvise) {
+ case I915_MADV_DONTNEED:
+ case I915_MADV_WILLNEED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (args->size == 0 || args->size & 4095)
+ return -EINVAL;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = NULL;
+ switch (args->placement) {
+ case I915_CREATE_PLACEMENT_SYSTEM:
+ obj = i915_gem_alloc_object(dev, args->size);
+ break;
+ case I915_CREATE_PLACEMENT_STOLEN:
+ obj = i915_gem_object_create_stolen(dev, args->size);
+ flags |= ASYNC_CLEAR;
+ break;
+ default:
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (obj == NULL) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = i915_gem_object_set_cache_level(obj, cache_level);
+ if (ret)
+ goto err;
+
+ ret = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride);
+ if (ret)
+ goto err;
- return i915_gem_create(file, dev,
- args->size, &args->handle);
+ if (args->offset & I915_CREATE_OFFSET_VALID) {
+ struct intel_context *ctx;
+ struct i915_vma *vma;
+
+ ctx = i915_gem_context_get(file->driver_priv, args->context);
+ if (IS_ERR(ctx)) {
+ ret = PTR_ERR(ctx);
+ goto err;
+ }
+
+ vma = i915_gem_obj_to_vma(obj, ctx->vm);
+ if (vma && drm_mm_node_allocated(&vma->node)) {
+ if (vma->node.start != (args->offset &
+ ~I915_CREATE_OFFSET_VALID)) {
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ goto err;
+
+ vma = NULL;
+ }
+ }
+
+ if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
+ vma = i915_gem_object_bind_to_vm(obj, ctx->vm,
+ args->offset, 0, flags);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err;
+ }
+ }
+ }
+
+ if (flags & ASYNC_CLEAR) {
+ ret = i915_gem_exec_clear_object(obj);
+ if (ret)
+ goto err;
+ }
+
+ if (args->domain) {
+ if (args->domain == I915_GEM_DOMAIN_GTT) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret == -EINVAL) /* unbound */
+ ret = 0;
+ } else {
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ }
+ if (ret)
+ goto err;
+ }
+
+ ret = drm_gem_handle_create(file, &obj->base, &args->handle);
+ if (ret)
+ goto err;
+
+ obj->madv = args->madvise;
+ trace_i915_gem_object_create(obj);
+err:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
static inline int
@@ -3377,6 +3530,7 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
+ uint64_t offset,
unsigned alignment,
uint64_t flags)
{
@@ -3432,22 +3586,38 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
goto err_unpin;
+ if (offset & PIN_OFFSET_VALID) {
+ offset &= ~PIN_OFFSET_VALID;
+ if (alignment && offset & (alignment - 1)) {
+ vma = ERR_PTR(-EINVAL);
+ goto err_free_vma;
+ }
+
+ vma->node.start = offset;
+ vma->node.size = size;
+ vma->node.color = obj->cache_level;
+ ret = drm_mm_reserve_node(&vm->mm, &vma->node);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err_free_vma;
+ }
+ } else {
search_free:
- ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
- size, alignment,
- obj->cache_level,
- start, end,
- DRM_MM_SEARCH_DEFAULT,
- DRM_MM_CREATE_DEFAULT);
- if (ret) {
- ret = i915_gem_evict_something(dev, vm, size, alignment,
- obj->cache_level,
- start, end,
- flags);
- if (ret == 0)
- goto search_free;
+ ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
+ size, alignment,
+ obj->cache_level,
+ start, end,
+ DRM_MM_SEARCH_DEFAULT,
+ DRM_MM_CREATE_DEFAULT);
+ if (ret) {
+ ret = i915_gem_evict_something(dev, vm, size, alignment,
+ obj->cache_level,
+ start, end, flags);
+ if (ret == 0)
+ goto search_free;
- goto err_free_vma;
+ goto err_free_vma;
+ }
}
if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
obj->cache_level))) {
@@ -4085,7 +4255,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
- vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
+ vma = i915_gem_object_bind_to_vm(obj, vm, 0, alignment, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
}
@@ -201,7 +201,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
/* Check pitch constriants for all chips & tiling formats */
-static bool
+bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
int tile_width;
@@ -285,12 +285,68 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return true;
}
+int
+i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
+ int tiling_mode, int stride)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int ret;
+
+ if (tiling_mode == obj->tiling_mode && stride == obj->stride)
+ return 0;
+
+ /* We need to rebind the object if its current allocation
+ * no longer meets the alignment restrictions for its new
+ * tiling mode. Otherwise we can just leave it alone, but
+ * need to ensure that any fence register is updated before
+ * the next fenced (either through the GTT or by the BLT unit
+ * on older GPUs) access.
+ *
+ * After updating the tiling parameters, we then flag whether
+ * we need to update an associated fence register. Note this
+ * has to also include the unfenced register the GPU uses
+ * whilst executing a fenced command for an untiled object.
+ */
+
+ obj->map_and_fenceable =
+ !i915_gem_obj_ggtt_bound(obj) ||
+ ((i915_gem_obj_ggtt_offset(obj) + obj->base.size)
+ <= dev_priv->gtt.mappable_end &&
+ i915_gem_object_fence_ok(obj, tiling_mode));
+
+ /* Rebind if we need a change of alignment */
+ ret = 0;
+ if (!obj->map_and_fenceable) {
+ u32 unfenced_alignment =
+ i915_gem_get_gtt_alignment(dev_priv->dev,
+ obj->base.size, tiling_mode,
+ false);
+ if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1))
+ ret = i915_gem_object_ggtt_unbind(obj);
+ }
+
+ if (ret == 0) {
+ obj->fence_dirty =
+ obj->fenced_gpu_access ||
+ obj->fence_reg != I915_FENCE_REG_NONE;
+
+ obj->tiling_mode = tiling_mode;
+ obj->stride = stride;
+
+ /* Force the fence to be reacquired for GTT access */
+ i915_gem_release_mmap(obj);
+ }
+
+ return ret;
+}
+
+
/**
* Sets the tiling mode of an object, returning the required swizzling of
* bit 6 of addresses in the object.
*/
int
-i915_gem_set_tiling(struct drm_device *dev, void *data,
+i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
@@ -343,49 +399,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
- if (args->tiling_mode != obj->tiling_mode ||
- args->stride != obj->stride) {
- /* We need to rebind the object if its current allocation
- * no longer meets the alignment restrictions for its new
- * tiling mode. Otherwise we can just leave it alone, but
- * need to ensure that any fence register is updated before
- * the next fenced (either through the GTT or by the BLT unit
- * on older GPUs) access.
- *
- * After updating the tiling parameters, we then flag whether
- * we need to update an associated fence register. Note this
- * has to also include the unfenced register the GPU uses
- * whilst executing a fenced command for an untiled object.
- */
-
- obj->map_and_fenceable =
- !i915_gem_obj_ggtt_bound(obj) ||
- (i915_gem_obj_ggtt_offset(obj) +
- obj->base.size <= dev_priv->gtt.mappable_end &&
- i915_gem_object_fence_ok(obj, args->tiling_mode));
-
- /* Rebind if we need a change of alignment */
- if (!obj->map_and_fenceable) {
- u32 unfenced_align =
- i915_gem_get_gtt_alignment(dev, obj->base.size,
- args->tiling_mode,
- false);
- if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
- ret = i915_gem_object_ggtt_unbind(obj);
- }
-
- if (ret == 0) {
- obj->fence_dirty =
- obj->fenced_gpu_access ||
- obj->fence_reg != I915_FENCE_REG_NONE;
-
- obj->tiling_mode = args->tiling_mode;
- obj->stride = args->stride;
-
- /* Force the fence to be reacquired for GTT access */
- i915_gem_release_mmap(obj);
- }
- }
+ ret = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride);
/* we have to maintain this existing ABI... */
args->stride = obj->stride;
args->tiling_mode = obj->tiling_mode;
@@ -411,7 +425,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
* Returns the current tiling mode and required bit 6 swizzling for the object.
*/
int
-i915_gem_get_tiling(struct drm_device *dev, void *data,
+i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
@@ -224,6 +224,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_REG_READ 0x31
#define DRM_I915_GET_RESET_STATS 0x32
#define DRM_I915_GEM_USERPTR 0x33
+#define DRM_I915_GEM_CREATE2 0x34
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -254,6 +255,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_CREATE2 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE2, struct drm_i915_gem_create2)
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
@@ -437,6 +439,111 @@ struct drm_i915_gem_create {
__u32 pad;
};
+struct drm_i915_gem_create2 {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ __u64 size;
+
+ /**
+ * Requested offset for the object.
+ *
+ * Can be used for "soft-pinning" the object into the per-process
+ * GTT of the target context upon creation. Only possible if using
+ * contexts and per-process GTTs.
+ *
+ * The address must be page-aligned, and have the valid bit set.
+ */
+ __u64 offset;
+#define I915_CREATE_OFFSET_VALID (1<<0)
+
+ /**
+ * Target context of the object.
+ *
+ * The context of the object can be used for setting the initial offset
+ * of the object in the per-process GTT.
+ */
+ __u32 context;
+
+ /**
+ * Requested placement (which memory domain)
+ *
+ * You can request that the object be created from special memory
+ * rather than regular system pages. Such irregular objects may
+ * have certain restrictions (such as CPU access to a stolen
+ * object is verboten).
+ */
+ __u32 placement;
+#define I915_CREATE_PLACEMENT_SYSTEM 0
+#define I915_CREATE_PLACEMENT_STOLEN 1 /* Cannot use CPU mmaps or pread/pwrite */
+ /**
+ * Requested domain (which cache domain)
+ *
+ * You can request that the object be created from memory in a
+ * certain cache domain (such as RENDER, CPU or GTT). In some cases,
+ * this then may allocate from a pool of such pages to avoid any
+ * migration overhead, but it is always equivalent to performing
+ * an explicit set-domain(read=DOMAIN, write=DOMAIN) on the
+ * constructed object.
+ *
+ * Set to 0, to leave the initial domain unspecified and defaulting
+ * to the domain set by the constructor.
+ *
+ * See DRM_IOCTL_I915_GEM_SET_DOMAIN
+ */
+ __u32 domain;
+
+ /**
+ * Requested cache level.
+ *
+ * See DRM_IOCTL_I915_GEM_SET_CACHING
+ */
+ __u32 caching;
+
+ /**
+ * Requested tiling mode.
+ *
+ * See DRM_IOCTL_I915_GEM_SET_TILING
+ */
+ __u32 tiling_mode;
+ /**
+ * Requested stride for tiling.
+ *
+ * See DRM_IOCTL_I915_GEM_SET_TILING
+ */
+ __u32 stride;
+
+ /**
+ * Requested madvise priority.
+ *
+ * See DRM_IOCTL_I915_GEM_MADVISE
+ */
+ __u32 madvise;
+
+ /**
+ * Additional miscellaneous flags
+ *
+ * Reserved for future use, must be zero.
+ */
+ __u32 flags;
+
+ /**
+ * Padding for 64-bit struct alignment.
+ *
+ * Reserved for future use, must be zero.
+ */
+ __u32 pad;
+
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+};
+
struct drm_i915_gem_pread {
/** Handle for the object being read. */
__u32 handle;