@@ -2882,6 +2882,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev);
int i915_gem_init_rings(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
+int i915_gem_init_hw_late(struct drm_device *dev);
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
@@ -5073,6 +5073,20 @@ i915_gem_init_hw(struct drm_device *dev)
goto out;
}
+ ret = i915_gem_init_hw_late(dev);
+
+out:
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ return ret;
+}
+
+int
+i915_gem_init_hw_late(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int ret, i, j;
+
/* Now it is safe to go back round and do everything else: */
for_each_ring(ring, dev_priv, i) {
struct drm_i915_gem_request *req;
@@ -5110,7 +5124,6 @@ i915_gem_init_hw(struct drm_device *dev)
}
out:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
This function has recently been updated by several patches, including: drm/i915: Add explicit request management to i915_gem_init_hw() drm/i915: Moved the for_each_ring loop outside of i915_gem_context_enable() Now we need to move the entire loop into a separate function, replacing the inline loop with a call. This will allow a future patch to add a call from another locations (for now, there are no other calls). The split marks the distinction between early initialisation using MMIO register access to set up non-context registers, and late initialisation using batchbuffers containing LRI instructions to set up context-specific registers. Signed-off-by: Dave Gordon <david.s.gordon@intel.com> --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-)