@@ -1995,6 +1995,8 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
* an emission time with seqnos for tracking how far ahead of the GPU we are.
*/
struct drm_i915_gem_request {
+ struct kref ref;
+
/** On Which ring this request was generated */
struct intel_engine_cs *ring;
@@ -2024,6 +2026,32 @@ struct drm_i915_gem_request {
struct list_head client_list;
};
+void i915_gem_request_free(struct kref *req_ref);
+
+static inline void
+i915_gem_request_reference(struct drm_i915_gem_request *req)
+{
+ kref_get(&req->ref);
+}
+
+static inline void
+i915_gem_request_unreference(struct drm_i915_gem_request *req)
+{
+ kref_put(&req->ref, i915_gem_request_free);
+}
+
+static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
+ struct drm_i915_gem_request *src)
+{
+ if (src)
+ i915_gem_request_reference(src);
+
+ if (*pdst)
+ i915_gem_request_unreference(*pdst);
+
+ *pdst = src;
+}
+
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
struct drm_file *file;
@@ -2569,21 +2569,30 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv,
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
- struct intel_context *ctx = request->ctx;
-
list_del(&request->list);
i915_gem_request_remove_from_client(request);
+ i915_gem_request_unreference(request);
+}
+
+void i915_gem_request_free(struct kref *req_ref)
+{
+ struct drm_i915_gem_request *req = container_of(req_ref,
+ typeof(*req), ref);
+ struct intel_context *ctx = req->ctx;
+
if (ctx) {
if (i915.enable_execlists) {
- struct intel_engine_cs *ring = request->ring;
+ struct intel_engine_cs *ring = req->ring;
if (ctx != ring->default_context)
intel_lr_context_unpin(ring, ctx);
}
+
i915_gem_context_unreference(ctx);
}
- kfree(request);
+
+ kfree(req);
}
struct drm_i915_gem_request *
@@ -2671,8 +2680,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
}
/* These may not have been flush before the reset, do so now */
- kfree(ring->preallocated_lazy_request);
- ring->preallocated_lazy_request = NULL;
+ i915_gem_request_assign(&ring->preallocated_lazy_request, NULL);
ring->outstanding_lazy_seqno = 0;
}
@@ -905,6 +905,8 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
}
}
+ kref_init(&request->ref);
+
ret = i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
if (ret) {
intel_lr_context_unpin(ring, ctx);
@@ -1365,7 +1367,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
intel_logical_ring_stop(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
- ring->preallocated_lazy_request = NULL;
+ i915_gem_request_assign(&ring->preallocated_lazy_request, NULL);
ring->outstanding_lazy_seqno = 0;
if (ring->cleanup)
@@ -1870,7 +1870,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
intel_unpin_ringbuffer_obj(ringbuf);
intel_destroy_ringbuffer_obj(ringbuf);
- ring->preallocated_lazy_request = NULL;
+ i915_gem_request_assign(&ring->preallocated_lazy_request, NULL);
ring->outstanding_lazy_seqno = 0;
if (ring->cleanup)
@@ -2043,6 +2043,8 @@ intel_ring_alloc_seqno(struct intel_engine_cs *ring)
if (request == NULL)
return -ENOMEM;
+ kref_init(&request->ref);
+
ret = i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
if (ret) {
kfree(request);