@@ -1184,7 +1184,7 @@ static int __wait_request(struct drm_i915_gem_request *req,
return -ENODEV;
/* Record current time in case interrupted by signal, or wedged */
- trace_i915_gem_request_wait_begin(i915_gem_request_get_ring(req), i915_gem_request_get_seqno(req));
+ trace_i915_gem_request_wait_begin(req);
before = ktime_get_raw_ns();
for (;;) {
struct timer_list timer;
@@ -1235,7 +1235,7 @@ static int __wait_request(struct drm_i915_gem_request *req,
}
}
now = ktime_get_raw_ns();
- trace_i915_gem_request_wait_end(i915_gem_request_get_ring(req), i915_gem_request_get_seqno(req));
+ trace_i915_gem_request_wait_end(req);
if (!irq_test_in_progress)
ring->irq_put(ring);
@@ -2419,7 +2419,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
spin_unlock(&file_priv->mm.lock);
}
- trace_i915_gem_request_add(ring, request->seqno);
+ trace_i915_gem_request_add(request);
ring->outstanding_lazy_request = NULL;
if (!dev_priv->ums.mm_suspended) {
@@ -2684,7 +2684,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
if (!i915_gem_request_completed(request, true))
break;
- trace_i915_gem_request_retire(ring, request->seqno);
+ trace_i915_gem_request_retire(request);
/* This is one of the few common intersection points
* between legacy ringbuffer submission and execlists:
@@ -2902,7 +2902,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- trace_i915_gem_ring_sync_to(from, to, seqno);
+ trace_i915_gem_ring_sync_to(from, to, obj->last_read_req);
ret = to->semaphore.sync_to(to, from, seqno);
if (!ret)
/* We use last_read_req because sync_to()
@@ -1170,7 +1170,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
return ret;
}
- trace_i915_gem_ring_dispatch(ring, i915_gem_request_get_seqno(intel_ring_get_request(ring)), flags);
+ trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@ -328,8 +328,8 @@ TRACE_EVENT(i915_gem_evict_vm,
TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct intel_engine_cs *from,
struct intel_engine_cs *to,
- u32 seqno),
- TP_ARGS(from, to, seqno),
+ struct drm_i915_gem_request *req),
+ TP_ARGS(from, to, req),
TP_STRUCT__entry(
__field(u32, dev)
@@ -342,7 +342,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->dev = from->dev->primary->index;
__entry->sync_from = from->id;
__entry->sync_to = to->id;
- __entry->seqno = seqno;
+ __entry->seqno = i915_gem_request_get_seqno(req);
),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@@ -352,8 +352,8 @@ TRACE_EVENT(i915_gem_ring_sync_to,
);
TRACE_EVENT(i915_gem_ring_dispatch,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags),
- TP_ARGS(ring, seqno, flags),
+ TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
+ TP_ARGS(req, flags),
TP_STRUCT__entry(
__field(u32, dev)
@@ -363,11 +363,13 @@ TRACE_EVENT(i915_gem_ring_dispatch,
),
TP_fast_assign(
+ struct intel_engine_cs *ring =
+ i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
- __entry->seqno = seqno;
+ __entry->seqno = i915_gem_request_get_seqno(req);
__entry->flags = flags;
- i915_trace_irq_get(ring, seqno);
+ i915_trace_irq_get(ring, __entry->seqno);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -398,8 +400,8 @@ TRACE_EVENT(i915_gem_ring_flush,
);
DECLARE_EVENT_CLASS(i915_gem_request,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno),
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req),
TP_STRUCT__entry(
__field(u32, dev)
@@ -408,9 +410,11 @@ DECLARE_EVENT_CLASS(i915_gem_request,
),
TP_fast_assign(
+ struct intel_engine_cs *ring =
+ i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
- __entry->seqno = seqno;
+ __entry->seqno = i915_gem_request_get_seqno(req);
),
TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -418,8 +422,8 @@ DECLARE_EVENT_CLASS(i915_gem_request,
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
);
TRACE_EVENT(i915_gem_request_complete,
@@ -443,13 +447,13 @@ TRACE_EVENT(i915_gem_request_complete,
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
);
TRACE_EVENT(i915_gem_request_wait_begin,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno),
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req),
TP_STRUCT__entry(
__field(u32, dev)
@@ -465,10 +469,13 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
+ struct intel_engine_cs *ring =
+ i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
- __entry->seqno = seqno;
- __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
+ __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->blocking =
+ mutex_is_locked(&ring->dev->struct_mutex);
),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -477,8 +484,8 @@ TRACE_EVENT(i915_gem_request_wait_begin,
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
- TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
);
DECLARE_EVENT_CLASS(i915_ring,