@@ -1094,6 +1094,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
int ret, mode;
bool need_relocs;
struct i915_scheduler_queue_entry qe;
+#ifdef CONFIG_DRM_I915_SCHEDULER
+ int i;
+#endif
if (!i915_gem_check_execbuffer(args))
return -EINVAL;
@@ -1250,6 +1253,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
+#ifdef CONFIG_DRM_I915_SCHEDULER
+ qe.saved_objects = kzalloc(
+ sizeof(*qe.saved_objects) * args->buffer_count,
+ GFP_KERNEL);
+ if (!qe.saved_objects) {
+ ret = -ENOMEM;
+ goto err;
+ }
+#endif
+
/* Look up object handles */
ret = eb_lookup_vmas(eb, exec, args, vm, file);
if (ret)
@@ -1333,10 +1346,33 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
qe.params.args_DR4 = args->DR4;
qe.params.batch_obj = batch_obj;
qe.params.cliprects = cliprects;
- qe.params.ctx = ctx;
qe.params.mask = mask;
qe.params.mode = mode;
+#ifdef CONFIG_DRM_I915_SCHEDULER
+ /*
+ * Save away the list of objects used by this batch buffer for the
+ * purpose of tracking inter-buffer dependencies.
+ */
+ for (i = 0; i < args->buffer_count; i++) {
+ /*
+ * NB: 'drm_gem_object_lookup()' increments the object's
+ * reference count and so must be matched by a
+ * 'drm_gem_object_unreference' call.
+ */
+ qe.saved_objects[i].obj =
+ to_intel_bo(drm_gem_object_lookup(dev, file,
+ exec[i].handle));
+ }
+ qe.num_objs = i;
+
+ /* Lock and save the context object as well. */
+ i915_gem_context_reference(ctx);
+ qe.params.ctx = ctx;
+#else // CONFIG_DRM_I915_SCHEDULER
+ qe.params.ctx = ctx;
+#endif // CONFIG_DRM_I915_SCHEDULER
+
if (flags & I915_DISPATCH_SECURE)
qe.params.batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
else
@@ -1370,6 +1406,25 @@ err:
eb_destroy(eb);
+#ifdef CONFIG_DRM_I915_SCHEDULER
+ if (qe.saved_objects) {
+ /* Need to release the objects: */
+ for (i = 0; i < qe.num_objs; i++) {
+ if (!qe.saved_objects[i].obj)
+ continue;
+
+ drm_gem_object_unreference(
+ &qe.saved_objects[i].obj->base);
+ }
+
+ kfree(qe.saved_objects);
+
+ /* Context too */
+ if (qe.params.ctx)
+ i915_gem_context_unreference(qe.params.ctx);
+ }
+#endif // CONFIG_DRM_I915_SCHEDULER
+
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
@@ -62,7 +62,7 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
{
struct drm_i915_private *dev_priv = qe->params.dev->dev_private;
struct i915_scheduler *scheduler = dev_priv->scheduler;
- int ret;
+ int ret, i;
BUG_ON(!scheduler);
@@ -70,7 +70,23 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
ret = i915_gem_do_execbuffer_final(&qe->params);
- /* Free everything that is owned by the QE structure: */
+ /* Need to release the objects: */
+ for (i = 0; i < qe->num_objs; i++) {
+ if (!qe->saved_objects[i].obj)
+ continue;
+
+ drm_gem_object_unreference(&qe->saved_objects[i].obj->base);
+ }
+
+ kfree(qe->saved_objects);
+ qe->saved_objects = NULL;
+ qe->num_objs = 0;
+
+ /* Free the context object too: */
+ if (qe->params.ctx)
+ i915_gem_context_unreference(qe->params.ctx);
+
+ /* And anything else owned by the QE structure: */
kfree(qe->params.cliprects);
return ret;
@@ -45,8 +45,14 @@ struct i915_execbuffer_params {
uint32_t scheduler_index;
};
+struct i915_scheduler_obj_entry {
+ struct drm_i915_gem_object *obj;
+};
+
struct i915_scheduler_queue_entry {
struct i915_execbuffer_params params;
+ struct i915_scheduler_obj_entry *saved_objects;
+ int num_objs;
};
bool i915_scheduler_is_enabled(struct drm_device *dev);
From: John Harrison <John.C.Harrison@Intel.com> The scheduler needs to track interdependencies between batch buffers. These are calculated by analysing the object lists of the buffers and looking for commonality. The scheduler also needs to keep those buffers locked long after the initial IOCTL call has returned to user land. --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 57 +++++++++++++++++++++++++++- drivers/gpu/drm/i915/i915_scheduler.c | 20 +++++++++- drivers/gpu/drm/i915/i915_scheduler.h | 6 +++ 3 files changed, 80 insertions(+), 3 deletions(-)