@@ -2342,6 +2342,8 @@ bool i915_gem_reset(struct drm_device *dev)
void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
+ LIST_HEAD(deferred_request_free);
+ struct drm_i915_gem_request *request;
uint32_t seqno;
if (list_empty(&ring->request_list))
@@ -2352,8 +2354,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
seqno = ring->get_seqno(ring, true);
while (!list_empty(&ring->request_list)) {
- struct drm_i915_gem_request *request;
-
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
list);
@@ -2369,7 +2369,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
*/
ring->last_retired_head = request->tail;
- i915_gem_free_request(request);
+ list_move_tail(&request->list, &deferred_request_free);
}
/* Move any buffers on the active list that are no longer referenced
@@ -2395,6 +2395,13 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
ring->trace_irq_seqno = 0;
}
+ /* Finish processing active list before freeing request */
+ while (!list_empty(&deferred_request_free)) {
+ request = list_first_entry(&deferred_request_free,
+ struct drm_i915_gem_request,
+ list);
+ i915_gem_free_request(request);
+ }
WARN_ON(i915_verify_lists(ring->dev));
}
With context destruction, we always want to be able to tear down the underlying address space. This is invoked on the last unreference to the context which could happen before we've moved all objects to the inactive list. To enable a clean tear down the address space, make sure to process the request free lastly. Without this change, we cannot guarantee to we don't still have active objects in the VM. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_gem.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-)