@@ -1207,6 +1207,13 @@ struct i915_perf_stream {
*/
intel_wakeref_t wakeref;
+ /**
+ * @initial_config_rq: First request run at the opening of the i915
+ * perf stream to configure the HW. Should be NULL after the perf
+ * stream has been opened successfully.
+ */
+ struct i915_request *initial_config_rq;
+
/**
* @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
* properties given when opening a stream, representing the contents
@@ -390,6 +390,19 @@ void i915_oa_config_release(struct kref *ref)
kfree(oa_config);
}
+static void i915_oa_config_dispose_buffers(struct drm_i915_private *i915)
+{
+ struct i915_oa_config *oa_config, *next;
+
+ mutex_lock(&i915->perf.metrics_lock);
+ list_for_each_entry_safe(oa_config, next, &i915->perf.metrics_buffers, vma_link) {
+ list_del(&oa_config->vma_link);
+ i915_gem_object_put(oa_config->obj);
+ oa_config->obj = NULL;
+ }
+ mutex_unlock(&i915->perf.metrics_lock);
+}
+
static u32 *write_cs_mi_lri(u32 *cs, const struct i915_oa_reg *reg_data, u32 n_regs)
{
u32 i;
@@ -1447,6 +1460,14 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
}
}
+static void free_noa_wait(struct drm_i915_private *i915)
+{
+ mutex_lock(&i915->drm.struct_mutex);
+ i915_vma_unpin_and_release(&i915->perf.oa.noa_wait,
+ I915_VMA_RELEASE_MAP);
+ mutex_unlock(&i915->drm.struct_mutex);
+}
+
static void
free_oa_buffer(struct drm_i915_private *i915)
{
@@ -1471,11 +1492,11 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
* the metric set on gen8+.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
- dev_priv->perf.oa.exclusive_stream = NULL;
dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
- i915_vma_unpin_and_release(&dev_priv->perf.oa.noa_wait, 0);
+ dev_priv->perf.oa.exclusive_stream = NULL;
mutex_unlock(&dev_priv->drm.struct_mutex);
+ free_noa_wait(dev_priv);
free_oa_buffer(dev_priv);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@@ -1703,6 +1724,10 @@ static int alloc_noa_wait(struct drm_i915_private *i915)
return PTR_ERR(bo);
}
+ ret = i915_mutex_lock_interruptible(&i915->drm);
+ if (ret)
+ goto err_unref;
+
/*
* We pin in GGTT because we jump into this buffer now because
* multiple OA config BOs will have a jump to this address and it
@@ -1710,10 +1735,13 @@ static int alloc_noa_wait(struct drm_i915_private *i915)
*/
vma = i915_gem_object_ggtt_pin(bo, NULL, 0, 4096, 0);
if (IS_ERR(vma)) {
+ mutex_unlock(&i915->drm.struct_mutex);
ret = PTR_ERR(vma);
goto err_unref;
}
+ mutex_unlock(&i915->drm.struct_mutex);
+
batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
if (IS_ERR(batch)) {
ret = PTR_ERR(batch);
@@ -1847,7 +1875,11 @@ static int alloc_noa_wait(struct drm_i915_private *i915)
return 0;
err_unpin:
- __i915_vma_unpin(vma);
+ mutex_lock(&i915->drm.struct_mutex);
+ i915_vma_unpin_and_release(&i915->perf.oa.noa_wait, 0);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return ret;
err_unref:
i915_gem_object_put(bo);
@@ -1855,50 +1887,71 @@ static int alloc_noa_wait(struct drm_i915_private *i915)
return ret;
}
-static void config_oa_regs(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg *regs,
- u32 n_regs)
+static int emit_oa_config(struct drm_i915_private *i915,
+ struct i915_perf_stream *stream)
{
- u32 i;
+ struct i915_oa_config *oa_config = stream->oa_config;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
- for (i = 0; i < n_regs; i++) {
- const struct i915_oa_reg *reg = regs + i;
+ lockdep_assert_held(&i915->drm.struct_mutex);
- I915_WRITE(reg->addr, reg->value);
+ rq = i915_request_create(i915->engine[RCS0]->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ err = i915_active_request_set(&i915->engine[RCS0]->last_oa_config,
+ rq);
+ if (err)
+ goto err_add_request;
+
+ vma = i915_vma_instance(oa_config->obj, &i915->ggtt.vm, NULL);
+ if (unlikely(IS_ERR(vma))) {
+ err = PTR_ERR(vma);
+ goto err_add_request;
}
-}
-static void delay_after_mux(void)
-{
- /*
- * It apparently takes a fairly long time for a new MUX
- * configuration to be be applied after these register writes.
- * This delay duration was derived empirically based on the
- * render_basic config but hopefully it covers the maximum
- * configuration latency.
- *
- * As a fallback, the checks in _append_oa_reports() to skip
- * invalid OA reports do also seem to work to discard reports
- * generated before this config has completed - albeit not
- * silently.
- *
- * Unfortunately this is essentially a magic number, since we
- * don't currently know of a reliable mechanism for predicting
- * how long the MUX config will take to apply and besides
- * seeing invalid reports we don't know of a reliable way to
- * explicitly check that the MUX config has landed.
- *
- * It's even possible we've miss characterized the underlying
- * problem - it just seems like the simplest explanation why
- * a delay at this location would mitigate any invalid reports.
- */
- usleep_range(15000, 20000);
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_add_request;
+
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto err_vma_unpin;
+
+ cs = intel_ring_begin(rq, INTEL_GEN(i915) >= 8 ? 4 : 2);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_vma_unpin;
+ }
+
+ if (INTEL_GEN(i915) > 8) {
+ *cs++ = MI_BATCH_BUFFER_START_GEN8;
+ *cs++ = lower_32_bits(vma->node.start);
+ *cs++ = upper_32_bits(vma->node.start);
+ *cs++ = MI_NOOP;
+ } else {
+ *cs++ = MI_BATCH_BUFFER_START;
+ *cs++ = vma->node.start;
+ }
+
+ intel_ring_advance(rq, cs);
+
+ stream->initial_config_rq = i915_request_get(rq);
+
+err_vma_unpin:
+ i915_vma_unpin(vma);
+err_add_request:
+ i915_request_add(rq);
+
+ return err;
}
static int hsw_enable_metric_set(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- const struct i915_oa_config *oa_config = stream->oa_config;
/*
* PRM:
@@ -1915,13 +1968,7 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
GEN6_CSUNIT_CLOCK_GATE_DISABLE));
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
- delay_after_mux();
-
- config_oa_regs(dev_priv, oa_config->b_counter_regs,
- oa_config->b_counter_regs_len);
-
- return 0;
+ return emit_oa_config(dev_priv, stream);
}
static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
@@ -2268,13 +2315,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
if (ret)
return ret;
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
- delay_after_mux();
-
- config_oa_regs(dev_priv, oa_config->b_counter_regs,
- oa_config->b_counter_regs_len);
-
- return 0;
+ return emit_oa_config(dev_priv, stream);
}
static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
@@ -2445,7 +2486,9 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
struct perf_open_properties *props)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct drm_i915_gem_object *obj;
int format_size;
+ long timeout;
int ret;
/* If the sysfs metrics/ directory wasn't registered for some
@@ -2529,13 +2572,6 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
}
}
- ret = i915_perf_get_oa_config(dev_priv, props->metrics_set,
- &stream->oa_config, NULL);
- if (ret) {
- DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
- goto err_config;
- }
-
ret = alloc_noa_wait(dev_priv);
if (ret) {
DRM_DEBUG("Unable to allocate NOA wait batch buffer\n");
@@ -2561,47 +2597,68 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
if (ret)
goto err_oa_buf_alloc;
+ ret = i915_perf_get_oa_config(dev_priv, props->metrics_set,
+ &stream->oa_config, &obj);
+ if (ret) {
+ DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
+ goto err_config;
+ }
+
+ /*
+ * We just need the buffer to be created, but not our own reference on
+ * it as the oa_config already has one.
+ */
+ i915_gem_object_put(obj);
+
+ stream->ops = &i915_oa_stream_ops;
+
ret = i915_mutex_lock_interruptible(&dev_priv->drm);
if (ret)
goto err_lock;
- stream->ops = &i915_oa_stream_ops;
dev_priv->perf.oa.exclusive_stream = stream;
ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
}
- DRM_DEBUG("opening stream oa config uuid=%s\n", stream->oa_config->uuid);
+ timeout = i915_request_wait(stream->initial_config_rq,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(stream->initial_config_rq);
+ stream->initial_config_rq = NULL;
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ ret = timeout < 0 ? timeout : 0;
+ if (ret)
+ goto err_enable;
+
+ DRM_DEBUG("opening stream oa config uuid=%s\n", stream->oa_config->uuid);
return 0;
err_enable:
+ mutex_lock(&dev_priv->drm.struct_mutex);
dev_priv->perf.oa.exclusive_stream = NULL;
dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
err_lock:
+ i915_oa_config_put(stream->oa_config);
+ i915_oa_config_dispose_buffers(dev_priv);
+
+err_config:
free_oa_buffer(dev_priv);
err_oa_buf_alloc:
- i915_oa_config_put(stream->oa_config);
-
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
- mutex_lock(&dev_priv->drm.struct_mutex);
- i915_vma_unpin_and_release(&dev_priv->perf.oa.noa_wait, 0);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ free_noa_wait(dev_priv);
err_noa_wait_alloc:
- i915_oa_config_put(stream->oa_config);
-
-err_config:
if (stream->ctx)
oa_put_render_ctx_id(stream);
@@ -2963,20 +3020,13 @@ static int i915_perf_release(struct inode *inode, struct file *file)
{
struct i915_perf_stream *stream = file->private_data;
struct drm_i915_private *dev_priv = stream->dev_priv;
- struct i915_oa_config *oa_config, *next;
mutex_lock(&dev_priv->perf.lock);
i915_perf_destroy_locked(stream);
/* Dispose of all oa config batch buffers. */
- mutex_lock(&dev_priv->perf.metrics_lock);
- list_for_each_entry_safe(oa_config, next, &dev_priv->perf.metrics_buffers, vma_link) {
- list_del(&oa_config->vma_link);
- i915_gem_object_put(oa_config->obj);
- oa_config->obj = NULL;
- }
- mutex_unlock(&dev_priv->perf.metrics_lock);
+ i915_oa_config_dispose_buffers(dev_priv);
mutex_unlock(&dev_priv->perf.lock);
We haven't run into issues with programming the global OA/NOA registers configuration from CPU so far, but HW engineers actually recommend doing this from the command streamer. On TGL in particular one of the clock domain in which some of that programming goes might not be powered when we poke things from the CPU. Since we have a command buffer prepared for the execbuffer side of things, we can reuse that approach here too. This also allows us to significantly reduce the amount of time we hold the main lock. v2: Drop the global lock as much as possible v3: Take global lock to pin global v4: Create i915 request in emit_oa_config() to avoid deadlocks (Lionel) Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> --- drivers/gpu/drm/i915/i915_drv.h | 7 ++ drivers/gpu/drm/i915/i915_perf.c | 204 +++++++++++++++++++------------ 2 files changed, 134 insertions(+), 77 deletions(-)