@@ -994,6 +994,12 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
+
+ i915.enable_preemption =
+ intel_sanitize_enable_preemption(dev_priv,
+ i915.enable_preemption);
+ DRM_DEBUG_DRIVER("preemption enabled? %s\n",
+ yesno(i915.enable_preemption));
}
/**
@@ -2963,6 +2963,9 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value);
+bool intel_sanitize_enable_preemption(struct drm_i915_private *dev_priv,
+ int enable_preemption);
+
/* i915_drv.c */
void __printf(3, 4)
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
@@ -4482,6 +4482,28 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
return true;
}
+/**
+ * intel_sanitize_enable_preemption() - sanitize i915.enable_preemption
+ * @dev_priv: i915 device private
+ * @enable_preemption: value of i915.enable_preemption module parameter.
+ *
+ * We're currently only supporting preemption when GuC submission is being
+ * used.
+ *
+ * Return: true if preemption is supported and has to be enabled.
+ */
+bool intel_sanitize_enable_preemption(struct drm_i915_private *dev_priv,
+ int enable_preemption)
+{
+ if (!i915.enable_guc_submission)
+ return false;
+
+ if (enable_preemption >= 0)
+ return enable_preemption;
+
+ return false;
+}
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -489,6 +489,11 @@ static int guc_ring_doorbell(struct i915_guc_client *client)
return ret;
}
+static int i915_guc_preempt_noop(struct intel_engine_cs *engine)
+{
+ return 0;
+}
+
/**
* __i915_guc_submit() - Submit commands through GuC
* @rq: request associated with the commands
@@ -1043,6 +1048,9 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
*/
engine->irq_tasklet.func = i915_guc_irq_handler;
+ if (i915.enable_preemption)
+ engine->preempt = i915_guc_preempt_noop;
+
/* Replay the current set of previously submitted requests */
spin_lock_irqsave(&engine->timeline->lock, flags);
list_for_each_entry(rq, &engine->timeline->requests, link) {
@@ -63,6 +63,7 @@ struct i915_params i915 __read_mostly = {
.inject_load_failure = 0,
.enable_dpcd_backlight = false,
.enable_gvt = false,
+ .enable_preemption = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -243,3 +244,7 @@ MODULE_PARM_DESC(enable_dpcd_backlight,
module_param_named(enable_gvt, i915.enable_gvt, bool, 0400);
MODULE_PARM_DESC(enable_gvt,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
+
+module_param_named_unsafe(enable_preemption, i915.enable_preemption, int, 0400);
+MODULE_PARM_DESC(enable_preemption,
+ "Enable preemption (0=disabled [default], 1=enabled)");
@@ -49,6 +49,7 @@
func(int, use_mmio_flip); \
func(int, mmio_debug); \
func(int, edp_vswing); \
+ func(int, enable_preemption); \
func(unsigned int, inject_load_failure); \
/* leave bools at the end to not create holes */ \
func(bool, alpha_support); \
@@ -312,6 +312,9 @@ struct intel_engine_cs {
void (*schedule)(struct drm_i915_gem_request *request,
int priority);
+ /* Called to attempt preemption of currently executing workload */
+ int (*preempt)(struct intel_engine_cs *engine);
+
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
We're going to support preemption on platforms where GuC submission is enabled. Signed-off-by: Michał Winiarski <michal.winiarski@intel.com> --- drivers/gpu/drm/i915/i915_drv.c | 6 ++++++ drivers/gpu/drm/i915/i915_drv.h | 3 +++ drivers/gpu/drm/i915/i915_gem.c | 22 ++++++++++++++++++++++ drivers/gpu/drm/i915/i915_guc_submission.c | 8 ++++++++ drivers/gpu/drm/i915/i915_params.c | 5 +++++ drivers/gpu/drm/i915/i915_params.h | 1 + drivers/gpu/drm/i915/intel_ringbuffer.h | 3 +++ 7 files changed, 48 insertions(+)