@@ -1378,6 +1378,23 @@ free_noa_wait(struct i915_perf_stream *stream)
i915_vma_unpin_and_release(&stream->noa_wait, 0);
}
+static int
+wait_and_put_configure_request(struct i915_perf_stream *stream)
+{
+ struct i915_request *rq = stream->configure_request;
+ int ret = 0;
+
+ stream->configure_request = NULL;
+ GEM_BUG_ON(rq == NULL);
+
+ if (i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT) < 0)
+ ret = -ETIME;
+
+ i915_request_put(rq);
+
+ return ret;
+}
+
static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
{
struct i915_perf *perf = stream->perf;
@@ -1392,6 +1409,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
*/
WRITE_ONCE(perf->exclusive_stream, NULL);
perf->ops.disable_metric_set(stream);
+ GEM_BUG_ON(stream->configure_request != NULL);
free_oa_buffer(stream);
@@ -1956,7 +1974,8 @@ get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
static int emit_oa_config(struct i915_perf_stream *stream,
struct i915_oa_config *oa_config,
- struct intel_context *ce)
+ struct intel_context *ce,
+ bool store_on_stream)
{
struct i915_request *rq;
struct i915_vma *vma;
@@ -1989,6 +2008,12 @@ static int emit_oa_config(struct i915_perf_stream *stream,
err = rq->engine->emit_bb_start(rq,
vma->node.start, 0,
I915_DISPATCH_SECURE);
+
+ if (err == 0 && store_on_stream) {
+ GEM_BUG_ON(stream->configure_request != NULL);
+ stream->configure_request = i915_request_get(rq);
+ }
+
err_add_request:
i915_request_add(rq);
err_vma_unpin:
@@ -2022,7 +2047,9 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
intel_uncore_rmw(uncore, GEN6_UCGCTL1,
0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
- return emit_oa_config(stream, stream->oa_config, oa_context(stream));
+ return emit_oa_config(stream, stream->oa_config,
+ oa_context(stream),
+ true /* store_on_stream */);
}
static void hsw_disable_metric_set(struct i915_perf_stream *stream)
@@ -2452,7 +2479,9 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
if (ret)
return ret;
- return emit_oa_config(stream, oa_config, oa_context(stream));
+ return emit_oa_config(stream, oa_config,
+ oa_context(stream),
+ true /* store_on_stream */);
}
static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
@@ -2506,7 +2535,9 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
return ret;
}
- return emit_oa_config(stream, oa_config, oa_context(stream));
+ return emit_oa_config(stream, oa_config,
+ oa_context(stream),
+ true /* store_on_stream */);
}
static void gen8_disable_metric_set(struct i915_perf_stream *stream)
@@ -2841,6 +2872,12 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
goto err_enable;
}
+ ret = wait_and_put_configure_request(stream);
+ if (ret) {
+ DRM_DEBUG("Wait on OA config request timed out\n");
+ goto err_enable;
+ }
+
DRM_DEBUG("opening stream oa config uuid=%s\n",
stream->oa_config->uuid);
@@ -2855,6 +2892,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
err_enable:
WRITE_ONCE(perf->exclusive_stream, NULL);
perf->ops.disable_metric_set(stream);
+ GEM_BUG_ON(stream->configure_request != NULL);
free_oa_buffer(stream);
@@ -3163,7 +3201,8 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
* When set globally, we use a low priority kernel context,
* so it will effectively take effect when idle.
*/
- err = emit_oa_config(stream, config, oa_context(stream));
+ err = emit_oa_config(stream, config, oa_context(stream),
+ false /* store_on_stream */);
if (err == 0)
config = xchg(&stream->oa_config, config);
else
@@ -309,6 +309,14 @@ struct i915_perf_stream {
* reprogrammed.
*/
struct i915_vma *noa_wait;
+
+ /**
+ * @configure_request: Request on which to wait for HW to complete its
+ * initial configuration. This is required for applications caring
+ * about system wide monitoring. We want all the data they can get
+ * through the OA buffer to be valid.
+ */
+ struct i915_request *configure_request;
};
/**