@@ -2319,6 +2319,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
__i915_add_request(ring, NULL, NULL, seqno, true)
#define i915_add_request_wo_flush(ring) \
__i915_add_request(ring, NULL, NULL, NULL, false)
+int i915_gem_cancel_request(struct intel_engine_cs *ring, u32 seqno);
int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -2655,6 +2655,22 @@ void i915_gem_reset(struct drm_device *dev)
i915_gem_restore_fences(dev);
}
+int
+i915_gem_cancel_request(struct intel_engine_cs *ring, u32 seqno)
+{
+ struct drm_i915_gem_request *req, *next;
+ int found = 0;
+
+ list_for_each_entry_safe(req, next, &ring->request_list, list) {
+ if (req->seqno == seqno) {
+ found += 1;
+ i915_gem_free_request(req);
+ }
+ }
+
+ return found;
+}
+
/**
* This function clears the request list as sequence numbers are passed.
*/
From: John Harrison <John.C.Harrison@Intel.com> If the scheduler pre-empts a batch buffer that is queued in the ring or even executing in the ring then that buffer must be returned to the queued in software state. Part of this re-queueing is to clean up the request structure. --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 16 ++++++++++++++++ 2 files changed, 17 insertions(+)