@@ -210,24 +210,17 @@ static int init_ring_common(struct intel_ring_buffer *ring)
* 965+ support PIPE_CONTROL commands, which provide finer grained control
* over cache flushing.
*/
-struct pipe_control {
- struct drm_i915_gem_object *obj;
- volatile u32 *cpu_page;
-};
-
static int
init_pipe_control(struct intel_ring_buffer *ring)
{
- struct pipe_control *pc;
+ struct intel_pipe_control *pc;
struct drm_i915_gem_object *obj;
int ret;
- if (ring->private)
- return 0;
+ pc = &ring->pipe_control;
- pc = kmalloc(sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ if (pc->cpu_page)
+ return 0;
obj = i915_gem_alloc_object(ring->dev, 4096);
if (obj == NULL) {
@@ -240,41 +233,34 @@ init_pipe_control(struct intel_ring_buffer *ring)
ret = i915_gem_object_pin(obj, 4096, true);
if (ret)
- goto err_unref;
+ goto err;
pc->cpu_page = kmap(obj->pages[0]);
if (pc->cpu_page == NULL)
goto err_unpin;
pc->obj = obj;
- ring->private = pc;
return 0;
err_unpin:
i915_gem_object_unpin(obj);
-err_unref:
- drm_gem_object_unreference(&obj->base);
err:
- kfree(pc);
+ drm_gem_object_unreference(&obj->base);
return ret;
}
static void
cleanup_pipe_control(struct intel_ring_buffer *ring)
{
- struct pipe_control *pc = ring->private;
+ struct intel_pipe_control *pc = &ring->pipe_control;
struct drm_i915_gem_object *obj;
- if (!ring->private)
- return;
-
- obj = pc->obj;
- kunmap(obj->pages[0]);
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(&obj->base);
-
- kfree(pc);
- ring->private = NULL;
+ if (pc->cpu_page)
+ kunmap(obj->pages[0]);
+ if (pc->obj) {
+ i915_gem_object_unpin(pc->obj);
+ drm_gem_object_unreference(&pc->obj->base);
+ }
}
static int init_render_ring(struct intel_ring_buffer *ring)
@@ -300,14 +286,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
return ret;
}
-static void render_ring_cleanup(struct intel_ring_buffer *ring)
-{
- if (!ring->private)
- return;
-
- cleanup_pipe_control(ring);
-}
-
static void
update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
{
@@ -397,7 +375,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
{
struct drm_device *dev = ring->dev;
u32 seqno = i915_gem_get_seqno(dev);
- struct pipe_control *pc = ring->private;
+ struct intel_pipe_control *pc = &ring->pipe_control;
u32 scratch_addr = pc->obj->gtt_offset + 128;
int ret;
@@ -472,8 +450,7 @@ ring_get_seqno(struct intel_ring_buffer *ring)
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring)
{
- struct pipe_control *pc = ring->private;
- return pc->cpu_page[0];
+ return ring->pipe_control.cpu_page[0];
}
static void
@@ -886,6 +863,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
ring->obj = NULL;
cleanup_status_page(ring);
+ cleanup_pipe_control(ring);
}
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
@@ -999,7 +977,6 @@ static const struct intel_ring_buffer render_ring = {
.irq_get = render_ring_get_irq,
.irq_put = render_ring_put_irq,
.dispatch_execbuffer = render_ring_dispatch_execbuffer,
- .cleanup = render_ring_cleanup,
};
/* ring buffer for bit-stream decoder */
@@ -14,6 +14,11 @@ struct intel_hw_status_page {
struct drm_i915_gem_object *obj;
};
+struct intel_pipe_control {
+ struct drm_i915_gem_object *obj;
+ volatile u32 *cpu_page;
+};
+
#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
@@ -54,6 +59,7 @@ struct intel_ring_buffer {
int size;
int effective_size;
struct intel_hw_status_page status_page;
+ struct intel_pipe_control pipe_control;
spinlock_t irq_lock;
u32 irq_refcount;
@@ -77,7 +83,6 @@ struct intel_ring_buffer {
u32 (*get_seqno)(struct intel_ring_buffer *ring);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length);
- void (*cleanup)(struct intel_ring_buffer *ring);
/**
* List of objects currently involved in rendering from the
@@ -113,8 +118,6 @@ struct intel_ring_buffer {
wait_queue_head_t irq_queue;
drm_local_map_t map;
-
- void *private;
};
static inline u32
Pipecontrol is also required to implement gfdt flushing on gen6+. And if we ever switch to pipecontrol based cache management in the kernel, also required on gen4. I don't see the point in saving that little bit of storge. In the process make cleanup_pipe_control more robust and call it unconditionally. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> --- drivers/gpu/drm/i915/intel_ringbuffer.c | 55 +++++++++---------------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 9 +++-- 2 files changed, 22 insertions(+), 42 deletions(-)