@@ -137,7 +137,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
return 0;
}
-static void validate_cmds_sorted(struct intel_ring_buffer *ring)
+static void validate_cmds_sorted(struct intel_engine *ring)
{
int i;
@@ -179,7 +179,7 @@ static void check_sorted(int ring_id, const u32 *reg_table, int reg_count)
}
}
-static void validate_regs_sorted(struct intel_ring_buffer *ring)
+static void validate_regs_sorted(struct intel_engine *ring)
{
check_sorted(ring->id, ring->reg_table, ring->reg_count);
check_sorted(ring->id, ring->master_reg_table, ring->master_reg_count);
@@ -190,10 +190,10 @@ static void validate_regs_sorted(struct intel_ring_buffer *ring)
* @ring: the ringbuffer to initialize
*
* Optionally initializes fields related to batch buffer command parsing in the
- * struct intel_ring_buffer based on whether the platform requires software
+ * struct intel_engine based on whether the platform requires software
* command parsing.
*/
-void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
+void i915_cmd_parser_init_ring(struct intel_engine *ring)
{
if (!IS_GEN7(ring->dev))
return;
@@ -249,7 +249,7 @@ find_cmd_in_table(const struct drm_i915_cmd_table *table,
* ring's default length encoding and returns default_desc.
*/
static const struct drm_i915_cmd_descriptor*
-find_cmd(struct intel_ring_buffer *ring,
+find_cmd(struct intel_engine *ring,
u32 cmd_header,
struct drm_i915_cmd_descriptor *default_desc)
{
@@ -329,7 +329,7 @@ finish:
*
* Return: true if the ring requires software command parsing
*/
-bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
+bool i915_needs_cmd_parser(struct intel_engine *ring)
{
/* No command tables indicates a platform without parsing */
if (!ring->cmd_tables)
@@ -352,7 +352,7 @@ bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
*
* Return: non-zero if the parser finds violations or otherwise fails
*/
-int i915_parse_cmds(struct intel_ring_buffer *ring,
+int i915_parse_cmds(struct intel_engine *ring,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
bool is_master)
@@ -562,7 +562,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
struct drm_i915_gem_request *gem_request;
int ret, count, i;
@@ -594,7 +594,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
}
static void i915_ring_seqno_info(struct seq_file *m,
- struct intel_ring_buffer *ring)
+ struct intel_engine *ring)
{
if (ring->get_seqno) {
seq_printf(m, "Current sequence (%s): %u\n",
@@ -607,7 +607,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -630,7 +630,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -800,7 +800,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
const u32 *hws;
int i;
@@ -1654,7 +1654,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
struct i915_hw_context *ctx;
int ret, i;
@@ -1800,7 +1800,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int unused, i;
@@ -1825,7 +1825,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
struct drm_file *file;
int i;
@@ -119,7 +119,7 @@ static void i915_write_hws_pga(struct drm_device *dev)
static void i915_free_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_engine *ring = LP_RING(dev_priv);
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
@@ -139,7 +139,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_engine *ring = LP_RING(dev_priv);
/*
* We should never lose context on the ring with modesetting
@@ -234,7 +234,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
static int i915_dma_resume(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_engine *ring = LP_RING(dev_priv);
DRM_DEBUG_DRIVER("%s\n", __func__);
@@ -782,7 +782,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_engine *ring = LP_RING(dev_priv);
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
@@ -1070,7 +1070,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
@@ -595,7 +595,7 @@ struct i915_hw_context {
bool is_initialized;
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
- struct intel_ring_buffer *last_ring;
+ struct intel_engine *last_ring;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
struct i915_address_space *vm;
@@ -1283,7 +1283,7 @@ typedef struct drm_i915_private {
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
- struct intel_ring_buffer ring[I915_NUM_RINGS];
+ struct intel_engine ring[I915_NUM_RINGS];
uint32_t last_seqno, next_seqno;
drm_dma_handle_t *status_page_dmah;
@@ -1600,7 +1600,7 @@ struct drm_i915_gem_object {
void *dma_buf_vmapping;
int vmapping_count;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_read_seqno;
@@ -1639,7 +1639,7 @@ struct drm_i915_gem_object {
*/
struct drm_i915_gem_request {
/** On Which ring this request was generated */
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
/** GEM sequence number associated with this request. */
uint32_t seqno;
@@ -2091,9 +2091,9 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *to);
+ struct intel_engine *to);
void i915_vma_move_to_active(struct i915_vma *vma,
- struct intel_ring_buffer *ring);
+ struct intel_engine *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -2135,7 +2135,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
}
struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_ring_buffer *ring);
+i915_gem_find_active_request(struct intel_engine *ring);
bool i915_gem_retire_requests(struct drm_device *dev);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
@@ -2161,18 +2161,18 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
+int i915_gem_l3_remap(struct intel_engine *ring, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
-int __i915_add_request(struct intel_ring_buffer *ring,
+int __i915_add_request(struct intel_engine *ring,
struct drm_file *file,
struct drm_i915_gem_object *batch_obj,
u32 *seqno);
#define i915_add_request(ring, seqno) \
__i915_add_request(ring, NULL, NULL, seqno)
-int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
+int __must_check i915_wait_seqno(struct intel_engine *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
@@ -2183,7 +2183,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_ring_buffer *pipelined);
+ struct intel_engine *pipelined);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -2287,7 +2287,7 @@ void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
int i915_gem_context_enable(struct drm_i915_private *dev_priv);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
-int i915_switch_context(struct intel_ring_buffer *ring,
+int i915_switch_context(struct intel_engine *ring,
struct drm_file *file, struct i915_hw_context *to);
struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
@@ -2400,9 +2400,9 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(int type);
/* i915_cmd_parser.c */
-void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring);
-bool i915_needs_cmd_parser(struct intel_ring_buffer *ring);
-int i915_parse_cmds(struct intel_ring_buffer *ring,
+void i915_cmd_parser_init_ring(struct intel_engine *ring);
+bool i915_needs_cmd_parser(struct intel_engine *ring);
+int i915_parse_cmds(struct intel_engine *ring,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
bool is_master);
@@ -61,7 +61,7 @@ static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
-static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
+static void i915_gem_retire_requests_ring(struct intel_engine *ring);
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
@@ -970,7 +970,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
* equal.
*/
static int
-i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+i915_gem_check_olr(struct intel_engine *ring, u32 seqno)
{
int ret;
@@ -989,7 +989,7 @@ static void fake_irq(unsigned long data)
}
static bool missed_irq(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+ struct intel_engine *ring)
{
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}
@@ -1020,7 +1020,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
-static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+static int __wait_seqno(struct intel_engine *ring, u32 seqno,
unsigned reset_counter,
bool interruptible,
struct timespec *timeout,
@@ -1127,7 +1127,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
* request and object lists appropriately for that event.
*/
int
-i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
+i915_wait_seqno(struct intel_engine *ring, uint32_t seqno)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1152,7 +1152,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
static int
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+ struct intel_engine *ring)
{
i915_gem_retire_requests_ring(ring);
@@ -1177,7 +1177,7 @@ static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
- struct intel_ring_buffer *ring = obj->ring;
+ struct intel_engine *ring = obj->ring;
u32 seqno;
int ret;
@@ -1202,7 +1202,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = obj->ring;
+ struct intel_engine *ring = obj->ring;
unsigned reset_counter;
u32 seqno;
int ret;
@@ -2013,7 +2013,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
static void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+ struct intel_engine *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2051,7 +2051,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
void i915_vma_move_to_active(struct i915_vma *vma,
- struct intel_ring_buffer *ring)
+ struct intel_engine *ring)
{
list_move_tail(&vma->mm_list, &vma->vm->active_list);
return i915_gem_object_move_to_active(vma->obj, ring);
@@ -2093,7 +2093,7 @@ static int
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
int ret, i, j;
/* Carefully retire all requests without writing to the rings */
@@ -2159,7 +2159,7 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
return 0;
}
-int __i915_add_request(struct intel_ring_buffer *ring,
+int __i915_add_request(struct intel_engine *ring,
struct drm_file *file,
struct drm_i915_gem_object *obj,
u32 *out_seqno)
@@ -2318,7 +2318,7 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
}
struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_ring_buffer *ring)
+i915_gem_find_active_request(struct intel_engine *ring)
{
struct drm_i915_gem_request *request;
u32 completed_seqno;
@@ -2336,7 +2336,7 @@ i915_gem_find_active_request(struct intel_ring_buffer *ring)
}
static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+ struct intel_engine *ring)
{
struct drm_i915_gem_request *request;
bool ring_hung;
@@ -2355,7 +2355,7 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
}
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+ struct intel_engine *ring)
{
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
@@ -2409,7 +2409,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
int i;
/*
@@ -2434,7 +2434,7 @@ void i915_gem_reset(struct drm_device *dev)
* This function clears the request list as sequence numbers are passed.
*/
static void
-i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
+i915_gem_retire_requests_ring(struct intel_engine *ring)
{
uint32_t seqno;
@@ -2497,7 +2497,7 @@ bool
i915_gem_retire_requests(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
bool idle = true;
int i;
@@ -2591,7 +2591,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
- struct intel_ring_buffer *ring = NULL;
+ struct intel_engine *ring = NULL;
struct timespec timeout_stack, *timeout = NULL;
unsigned reset_counter;
u32 seqno = 0;
@@ -2662,9 +2662,9 @@ out:
*/
int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *to)
+ struct intel_engine *to)
{
- struct intel_ring_buffer *from = obj->ring;
+ struct intel_engine *from = obj->ring;
u32 seqno;
int ret, idx;
@@ -2785,7 +2785,7 @@ int i915_vma_unbind(struct i915_vma *vma)
int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
int ret, i;
/* Flush everything onto the inactive list. */
@@ -3641,7 +3641,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
- struct intel_ring_buffer *pipelined)
+ struct intel_engine *pipelined)
{
u32 old_read_domains, old_write_domain;
int ret;
@@ -3793,7 +3793,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
struct drm_i915_gem_request *request;
- struct intel_ring_buffer *ring = NULL;
+ struct intel_engine *ring = NULL;
unsigned reset_counter;
u32 seqno = 0;
int ret;
@@ -4273,7 +4273,7 @@ err:
return ret;
}
-int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
+int i915_gem_l3_remap(struct intel_engine *ring, int slice)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -4497,7 +4497,7 @@ void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
int i;
for_each_active_ring(ring, dev_priv, i)
@@ -4572,7 +4572,7 @@ i915_gem_lastclose(struct drm_device *dev)
}
static void
-init_ring_lists(struct intel_ring_buffer *ring)
+init_ring_lists(struct intel_engine *ring)
{
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
@@ -96,7 +96,7 @@
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
-static int do_switch(struct intel_ring_buffer *ring,
+static int do_switch(struct intel_engine *ring,
struct i915_hw_context *to);
static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
@@ -352,7 +352,7 @@ err_destroy:
void i915_gem_context_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
int i;
if (!HAS_HW_CONTEXTS(dev))
@@ -391,7 +391,7 @@ void i915_gem_context_reset(struct drm_device *dev)
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
struct i915_hw_context *default_context;
int unused;
@@ -429,7 +429,7 @@ void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
int unused;
if (!HAS_HW_CONTEXTS(dev))
@@ -470,7 +470,7 @@ void i915_gem_context_fini(struct drm_device *dev)
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
{
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
int ret, i;
if (!HAS_HW_CONTEXTS(dev_priv->dev))
@@ -572,7 +572,7 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
}
static inline int
-mi_set_context(struct intel_ring_buffer *ring,
+mi_set_context(struct intel_engine *ring,
struct i915_hw_context *new_context,
u32 hw_flags)
{
@@ -622,7 +622,7 @@ mi_set_context(struct intel_ring_buffer *ring,
return ret;
}
-static int do_switch(struct intel_ring_buffer *ring,
+static int do_switch(struct intel_engine *ring,
struct i915_hw_context *to)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -753,7 +753,7 @@ unpin_out:
* it will have a refoucnt > 1. This allows us to destroy the context abstract
* object while letting the normal object tracking destroy the backing BO.
*/
-int i915_switch_context(struct intel_ring_buffer *ring,
+int i915_switch_context(struct intel_engine *ring,
struct drm_file *file,
struct i915_hw_context *to)
{
@@ -538,7 +538,7 @@ need_reloc_mappable(struct i915_vma *vma)
static int
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
- struct intel_ring_buffer *ring,
+ struct intel_engine *ring,
bool *need_reloc)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -593,7 +593,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
}
static int
-i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+i915_gem_execbuffer_reserve(struct intel_engine *ring,
struct list_head *vmas,
bool *need_relocs)
{
@@ -708,7 +708,7 @@ static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
- struct intel_ring_buffer *ring,
+ struct intel_engine *ring,
struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec)
{
@@ -824,7 +824,7 @@ err:
}
static int
-i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
+i915_gem_execbuffer_move_to_gpu(struct intel_engine *ring,
struct list_head *vmas)
{
struct i915_vma *vma;
@@ -909,7 +909,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
static struct i915_hw_context *
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
- struct intel_ring_buffer *ring, const u32 ctx_id)
+ struct intel_engine *ring, const u32 ctx_id)
{
struct i915_hw_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
@@ -932,7 +932,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
static void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
- struct intel_ring_buffer *ring)
+ struct intel_engine *ring)
{
struct i915_vma *vma;
@@ -964,7 +964,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
- struct intel_ring_buffer *ring,
+ struct intel_engine *ring,
struct drm_i915_gem_object *obj)
{
/* Unconditionally force add_request to emit a full flush. */
@@ -976,7 +976,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_engine *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
@@ -1009,7 +1009,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
struct i915_hw_context *ctx;
struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
@@ -187,7 +187,7 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
}
/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
+static int gen8_write_pdp(struct intel_engine *ring, unsigned entry,
uint64_t val, bool synchronous)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -217,7 +217,7 @@ static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
}
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
+ struct intel_engine *ring,
bool synchronous)
{
int i, ret;
@@ -687,7 +687,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
+ struct intel_engine *ring,
bool synchronous)
{
struct drm_device *dev = ppgtt->base.dev;
@@ -731,7 +731,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
+ struct intel_engine *ring,
bool synchronous)
{
struct drm_device *dev = ppgtt->base.dev;
@@ -782,7 +782,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
}
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
+ struct intel_engine *ring,
bool synchronous)
{
struct drm_device *dev = ppgtt->base.dev;
@@ -803,7 +803,7 @@ static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
int j, ret;
for_each_active_ring(ring, dev_priv, j) {
@@ -833,7 +833,7 @@ static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
uint32_t ecochk, ecobits;
int i;
@@ -872,7 +872,7 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
uint32_t ecochk, gab_ctl, ecobits;
int i;
@@ -1240,7 +1240,7 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
void i915_check_and_clear_faults(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
int i;
if (INTEL_INFO(dev)->gen < 6)
@@ -260,7 +260,7 @@ struct i915_hw_ppgtt {
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
- struct intel_ring_buffer *ring,
+ struct intel_engine *ring,
bool synchronous);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
@@ -752,7 +752,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
}
static void i915_record_ring_state(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+ struct intel_engine *ring,
struct drm_i915_error_ring *ering)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -862,7 +862,7 @@ static void i915_record_ring_state(struct drm_device *dev,
}
-static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
+static void i915_gem_record_active_context(struct intel_engine *ring,
struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering)
{
@@ -892,7 +892,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
int i, count;
for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
+ struct intel_engine *ring = &dev_priv->ring[i];
if (ring->dev == NULL)
continue;
@@ -1073,7 +1073,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
}
static void notify_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_engine *ring)
{
if (ring->obj == NULL)
return;
@@ -2103,7 +2103,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
bool reset_completed)
{
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
int i;
/*
@@ -2526,14 +2526,14 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
}
static u32
-ring_last_seqno(struct intel_ring_buffer *ring)
+ring_last_seqno(struct intel_engine *ring)
{
return list_entry(ring->request_list.prev,
struct drm_i915_gem_request, list)->seqno;
}
static bool
-ring_idle(struct intel_ring_buffer *ring, u32 seqno)
+ring_idle(struct intel_engine *ring, u32 seqno)
{
return (list_empty(&ring->request_list) ||
i915_seqno_passed(seqno, ring_last_seqno(ring)));
@@ -2556,11 +2556,11 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
}
}
-static struct intel_ring_buffer *
-semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr)
+static struct intel_engine *
+semaphore_wait_to_signaller_ring(struct intel_engine *ring, u32 ipehr)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct intel_ring_buffer *signaller;
+ struct intel_engine *signaller;
int i;
if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
@@ -2589,8 +2589,8 @@ semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr)
return NULL;
}
-static struct intel_ring_buffer *
-semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
+static struct intel_engine *
+semaphore_waits_for(struct intel_engine *ring, u32 *seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 cmd, ipehr, head;
@@ -2632,10 +2632,10 @@ semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
return semaphore_wait_to_signaller_ring(ring, ipehr);
}
-static int semaphore_passed(struct intel_ring_buffer *ring)
+static int semaphore_passed(struct intel_engine *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct intel_ring_buffer *signaller;
+ struct intel_engine *signaller;
u32 seqno, ctl;
ring->hangcheck.deadlock = true;
@@ -2654,7 +2654,7 @@ static int semaphore_passed(struct intel_ring_buffer *ring)
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
int i;
for_each_active_ring(ring, dev_priv, i)
@@ -2662,7 +2662,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
}
static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
+ring_stuck(struct intel_engine *ring, u32 acthd)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2718,7 +2718,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
int i;
int busy_count = 0, rings_hung = 0;
bool stuck[I915_NUM_RINGS] = { 0 };
@@ -251,8 +251,8 @@ TRACE_EVENT(i915_gem_evict_vm,
);
TRACE_EVENT(i915_gem_ring_sync_to,
- TP_PROTO(struct intel_ring_buffer *from,
- struct intel_ring_buffer *to,
+ TP_PROTO(struct intel_engine *from,
+ struct intel_engine *to,
u32 seqno),
TP_ARGS(from, to, seqno),
@@ -277,7 +277,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
);
TRACE_EVENT(i915_gem_ring_dispatch,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
+ TP_PROTO(struct intel_engine *ring, u32 seqno, u32 flags),
TP_ARGS(ring, seqno, flags),
TP_STRUCT__entry(
@@ -300,7 +300,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
);
TRACE_EVENT(i915_gem_ring_flush,
- TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
+ TP_PROTO(struct intel_engine *ring, u32 invalidate, u32 flush),
TP_ARGS(ring, invalidate, flush),
TP_STRUCT__entry(
@@ -323,7 +323,7 @@ TRACE_EVENT(i915_gem_ring_flush,
);
DECLARE_EVENT_CLASS(i915_gem_request,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_PROTO(struct intel_engine *ring, u32 seqno),
TP_ARGS(ring, seqno),
TP_STRUCT__entry(
@@ -343,12 +343,12 @@ DECLARE_EVENT_CLASS(i915_gem_request,
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_PROTO(struct intel_engine *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
TRACE_EVENT(i915_gem_request_complete,
- TP_PROTO(struct intel_ring_buffer *ring),
+ TP_PROTO(struct intel_engine *ring),
TP_ARGS(ring),
TP_STRUCT__entry(
@@ -368,12 +368,12 @@ TRACE_EVENT(i915_gem_request_complete,
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_PROTO(struct intel_engine *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
TRACE_EVENT(i915_gem_request_wait_begin,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_PROTO(struct intel_engine *ring, u32 seqno),
TP_ARGS(ring, seqno),
TP_STRUCT__entry(
@@ -402,12 +402,12 @@ TRACE_EVENT(i915_gem_request_wait_begin,
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_PROTO(struct intel_engine *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
DECLARE_EVENT_CLASS(i915_ring,
- TP_PROTO(struct intel_ring_buffer *ring),
+ TP_PROTO(struct intel_engine *ring),
TP_ARGS(ring),
TP_STRUCT__entry(
@@ -424,12 +424,12 @@ DECLARE_EVENT_CLASS(i915_ring,
);
DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
- TP_PROTO(struct intel_ring_buffer *ring),
+ TP_PROTO(struct intel_engine *ring),
TP_ARGS(ring)
);
DEFINE_EVENT(i915_ring, i915_ring_wait_end,
- TP_PROTO(struct intel_ring_buffer *ring),
+ TP_PROTO(struct intel_engine *ring),
TP_ARGS(ring)
);
@@ -1954,7 +1954,7 @@ static int intel_align_height(struct drm_device *dev, int height, bool tiled)
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+ struct intel_engine *pipelined)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 alignment;
@@ -8452,7 +8452,7 @@ out:
}
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring)
+ struct intel_engine *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
@@ -8610,7 +8610,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine *ring = &dev_priv->ring[RCS];
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
@@ -8655,7 +8655,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine *ring = &dev_priv->ring[RCS];
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
@@ -8697,7 +8697,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine *ring = &dev_priv->ring[RCS];
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
@@ -8745,7 +8745,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine *ring = &dev_priv->ring[RCS];
uint32_t pf, pipesrc;
int ret;
@@ -8790,7 +8790,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
uint32_t plane_bit = 0;
int len, ret;
@@ -667,7 +667,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring);
+ struct intel_engine *ring);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
@@ -699,7 +699,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined);
+ struct intel_engine *pipelined);
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
@@ -213,7 +213,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine *ring = &dev_priv->ring[RCS];
int ret;
BUG_ON(overlay->last_flip_req);
@@ -236,7 +236,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine *ring = &dev_priv->ring[RCS];
int ret;
BUG_ON(overlay->active);
@@ -263,7 +263,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
u32 tmp;
int ret;
@@ -320,7 +320,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
int ret;
@@ -363,7 +363,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine *ring = &dev_priv->ring[RCS];
int ret;
if (overlay->last_flip_req == 0)
@@ -389,7 +389,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine *ring = &dev_priv->ring[RCS];
int ret;
/* Only wait if there is actually an old frame to release to
@@ -3251,7 +3251,7 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
uint32_t rc6_mask = 0, rp_state_cap;
int unused;
@@ -3323,7 +3323,7 @@ static void gen8_enable_rps(struct drm_device *dev)
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
u32 rp_state_cap;
u32 gt_perf_status;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
@@ -3597,7 +3597,7 @@ out:
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
u32 gtfifodbg, val, rc6_mode = 0;
int i;
@@ -3752,7 +3752,7 @@ static int ironlake_setup_rc6(struct drm_device *dev)
static void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine *ring = &dev_priv->ring[RCS];
bool was_interruptible;
int ret;
@@ -4264,7 +4264,7 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
bool i915_gpu_busy(void)
{
struct drm_i915_private *dev_priv;
- struct intel_ring_buffer *ring;
+ struct intel_engine *ring;
bool ret = false;
int i;
@@ -33,7 +33,7 @@
#include "i915_trace.h"
#include "intel_drv.h"
-static inline int ring_space(struct intel_ring_buffer *ring)
+static inline int ring_space(struct intel_engine *ring)
{
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0)
@@ -41,7 +41,7 @@ static inline int ring_space(struct intel_ring_buffer *ring)
return space;
}
-void __intel_ring_advance(struct intel_ring_buffer *ring)
+void __intel_ring_advance(struct intel_engine *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -52,7 +52,7 @@ void __intel_ring_advance(struct intel_ring_buffer *ring)
}
static int
-gen2_render_ring_flush(struct intel_ring_buffer *ring,
+gen2_render_ring_flush(struct intel_engine *ring,
u32 invalidate_domains,
u32 flush_domains)
{
@@ -78,7 +78,7 @@ gen2_render_ring_flush(struct intel_ring_buffer *ring,
}
static int
-gen4_render_ring_flush(struct intel_ring_buffer *ring,
+gen4_render_ring_flush(struct intel_engine *ring,
u32 invalidate_domains,
u32 flush_domains)
{
@@ -173,7 +173,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
* really our business. That leaves only stall at scoreboard.
*/
static int
-intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
+intel_emit_post_sync_nonzero_flush(struct intel_engine *ring)
{
u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
@@ -208,7 +208,7 @@ intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
}
static int
-gen6_render_ring_flush(struct intel_ring_buffer *ring,
+gen6_render_ring_flush(struct intel_engine *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
@@ -260,7 +260,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
}
static int
-gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
+gen7_render_ring_cs_stall_wa(struct intel_engine *ring)
{
int ret;
@@ -278,7 +278,7 @@ gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
return 0;
}
-static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
+static int gen7_ring_fbc_flush(struct intel_engine *ring, u32 value)
{
int ret;
@@ -302,7 +302,7 @@ static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
}
static int
-gen7_render_ring_flush(struct intel_ring_buffer *ring,
+gen7_render_ring_flush(struct intel_engine *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
@@ -363,7 +363,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
}
static int
-gen8_render_ring_flush(struct intel_ring_buffer *ring,
+gen8_render_ring_flush(struct intel_engine *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
@@ -403,14 +403,14 @@ gen8_render_ring_flush(struct intel_ring_buffer *ring,
}
-static void ring_write_tail(struct intel_ring_buffer *ring,
+static void ring_write_tail(struct intel_engine *ring,
u32 value)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
I915_WRITE_TAIL(ring, value);
}
-u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
+u32 intel_ring_get_active_head(struct intel_engine *ring)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
@@ -419,7 +419,7 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
return I915_READ(acthd_reg);
}
-static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
+static void ring_setup_phys_status_page(struct intel_engine *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 addr;
@@ -430,7 +430,7 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
I915_WRITE(HWS_PGA, addr);
}
-static int init_ring_common(struct intel_ring_buffer *ring)
+static int init_ring_common(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -519,7 +519,7 @@ out:
}
static int
-init_pipe_control(struct intel_ring_buffer *ring)
+init_pipe_control(struct intel_engine *ring)
{
int ret;
@@ -560,7 +560,7 @@ err:
return ret;
}
-static int init_render_ring(struct intel_ring_buffer *ring)
+static int init_render_ring(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -620,7 +620,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
return ret;
}
-static void render_ring_cleanup(struct intel_ring_buffer *ring)
+static void render_ring_cleanup(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
@@ -637,7 +637,7 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
}
static void
-update_mboxes(struct intel_ring_buffer *ring,
+update_mboxes(struct intel_engine *ring,
u32 mmio_offset)
{
/* NB: In order to be able to do semaphore MBOX updates for varying number
@@ -662,11 +662,11 @@ update_mboxes(struct intel_ring_buffer *ring,
* This acts like a signal in the canonical semaphore.
*/
static int
-gen6_add_request(struct intel_ring_buffer *ring)
+gen6_add_request(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *useless;
+ struct intel_engine *useless;
int i, ret, num_dwords = 4;
if (i915_semaphore_is_enabled(dev))
@@ -709,8 +709,8 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
* @seqno - seqno which the waiter will block on
*/
static int
-gen6_ring_sync(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
+gen6_ring_sync(struct intel_engine *waiter,
+ struct intel_engine *signaller,
u32 seqno)
{
int ret;
@@ -760,7 +760,7 @@ do { \
} while (0)
static int
-pc_render_add_request(struct intel_ring_buffer *ring)
+pc_render_add_request(struct intel_engine *ring)
{
u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
@@ -808,7 +808,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
}
static u32
-gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+gen6_ring_get_seqno(struct intel_engine *ring, bool lazy_coherency)
{
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
@@ -819,31 +819,31 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
}
static u32
-ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+ring_get_seqno(struct intel_engine *ring, bool lazy_coherency)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
static void
-ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+ring_set_seqno(struct intel_engine *ring, u32 seqno)
{
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
}
static u32
-pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+pc_render_get_seqno(struct intel_engine *ring, bool lazy_coherency)
{
return ring->scratch.cpu_page[0];
}
static void
-pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+pc_render_set_seqno(struct intel_engine *ring, u32 seqno)
{
ring->scratch.cpu_page[0] = seqno;
}
static bool
-gen5_ring_get_irq(struct intel_ring_buffer *ring)
+gen5_ring_get_irq(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -861,7 +861,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
}
static void
-gen5_ring_put_irq(struct intel_ring_buffer *ring)
+gen5_ring_put_irq(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -874,7 +874,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
}
static bool
-i9xx_ring_get_irq(struct intel_ring_buffer *ring)
+i9xx_ring_get_irq(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -895,7 +895,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
}
static void
-i9xx_ring_put_irq(struct intel_ring_buffer *ring)
+i9xx_ring_put_irq(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -911,7 +911,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
}
static bool
-i8xx_ring_get_irq(struct intel_ring_buffer *ring)
+i8xx_ring_get_irq(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -932,7 +932,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
}
static void
-i8xx_ring_put_irq(struct intel_ring_buffer *ring)
+i8xx_ring_put_irq(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -947,7 +947,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
-void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
+void intel_ring_setup_status_page(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = ring->dev->dev_private;
@@ -1005,7 +1005,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
}
static int
-bsd_ring_flush(struct intel_ring_buffer *ring,
+bsd_ring_flush(struct intel_engine *ring,
u32 invalidate_domains,
u32 flush_domains)
{
@@ -1022,7 +1022,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
}
static int
-i9xx_add_request(struct intel_ring_buffer *ring)
+i9xx_add_request(struct intel_engine *ring)
{
int ret;
@@ -1040,7 +1040,7 @@ i9xx_add_request(struct intel_ring_buffer *ring)
}
static bool
-gen6_ring_get_irq(struct intel_ring_buffer *ring)
+gen6_ring_get_irq(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1065,7 +1065,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
}
static void
-gen6_ring_put_irq(struct intel_ring_buffer *ring)
+gen6_ring_put_irq(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1083,7 +1083,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
}
static bool
-hsw_vebox_get_irq(struct intel_ring_buffer *ring)
+hsw_vebox_get_irq(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1103,7 +1103,7 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
}
static void
-hsw_vebox_put_irq(struct intel_ring_buffer *ring)
+hsw_vebox_put_irq(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1121,7 +1121,7 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
}
static bool
-gen8_ring_get_irq(struct intel_ring_buffer *ring)
+gen8_ring_get_irq(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1147,7 +1147,7 @@ gen8_ring_get_irq(struct intel_ring_buffer *ring)
}
static void
-gen8_ring_put_irq(struct intel_ring_buffer *ring)
+gen8_ring_put_irq(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1167,7 +1167,7 @@ gen8_ring_put_irq(struct intel_ring_buffer *ring)
}
static int
-i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
+i965_dispatch_execbuffer(struct intel_engine *ring,
u32 offset, u32 length,
unsigned flags)
{
@@ -1190,7 +1190,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024)
static int
-i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
+i830_dispatch_execbuffer(struct intel_engine *ring,
u32 offset, u32 len,
unsigned flags)
{
@@ -1241,7 +1241,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
}
static int
-i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
+i915_dispatch_execbuffer(struct intel_engine *ring,
u32 offset, u32 len,
unsigned flags)
{
@@ -1258,7 +1258,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
return 0;
}
-static void cleanup_status_page(struct intel_ring_buffer *ring)
+static void cleanup_status_page(struct intel_engine *ring)
{
struct drm_i915_gem_object *obj;
@@ -1272,7 +1272,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
ring->status_page.obj = NULL;
}
-static int init_status_page(struct intel_ring_buffer *ring)
+static int init_status_page(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *obj;
@@ -1315,7 +1315,7 @@ err:
return ret;
}
-static int init_phys_status_page(struct intel_ring_buffer *ring)
+static int init_phys_status_page(struct intel_engine *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1332,14 +1332,14 @@ static int init_phys_status_page(struct intel_ring_buffer *ring)
return 0;
}
-static void destroy_ring_buffer(struct intel_ring_buffer *ring)
+static void destroy_ring_buffer(struct intel_engine *ring)
{
i915_gem_object_ggtt_unpin(ring->obj);
drm_gem_object_unreference(&ring->obj->base);
ring->obj = NULL;
}
-static int alloc_ring_buffer(struct intel_ring_buffer *ring)
+static int alloc_ring_buffer(struct intel_engine *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *obj = NULL;
@@ -1372,7 +1372,7 @@ static int alloc_ring_buffer(struct intel_ring_buffer *ring)
}
static int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ struct intel_engine *ring)
{
struct drm_i915_gem_object *obj;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1437,7 +1437,7 @@ err_hws:
return ret;
}
-void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
+void intel_cleanup_ring_buffer(struct intel_engine *ring)
{
struct drm_i915_private *dev_priv;
int ret;
@@ -1466,7 +1466,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
cleanup_status_page(ring);
}
-static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
+static int intel_ring_wait_request(struct intel_engine *ring, int n)
{
struct drm_i915_gem_request *request;
u32 seqno = 0, tail;
@@ -1519,7 +1519,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
return 0;
}
-static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
+static int ring_wait_for_space(struct intel_engine *ring, int n)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1567,7 +1567,7 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
return -EBUSY;
}
-static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct intel_engine *ring)
{
uint32_t __iomem *virt;
int rem = ring->size - ring->tail;
@@ -1589,7 +1589,7 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
return 0;
}
-int intel_ring_idle(struct intel_ring_buffer *ring)
+int intel_ring_idle(struct intel_engine *ring)
{
u32 seqno;
int ret;
@@ -1613,7 +1613,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
}
static int
-intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+intel_ring_alloc_seqno(struct intel_engine *ring)
{
if (ring->outstanding_lazy_seqno)
return 0;
@@ -1631,7 +1631,7 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
}
-static int __intel_ring_prepare(struct intel_ring_buffer *ring,
+static int __intel_ring_prepare(struct intel_engine *ring,
int bytes)
{
int ret;
@@ -1651,7 +1651,7 @@ static int __intel_ring_prepare(struct intel_ring_buffer *ring,
return 0;
}
-int intel_ring_begin(struct intel_ring_buffer *ring,
+int intel_ring_begin(struct intel_engine *ring,
int num_dwords)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
@@ -1676,7 +1676,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
}
/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
+int intel_ring_cacheline_align(struct intel_engine *ring)
{
int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
int ret;
@@ -1696,7 +1696,7 @@ int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
return 0;
}
-void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
+void intel_ring_init_seqno(struct intel_engine *ring, u32 seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1713,7 +1713,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
ring->hangcheck.seqno = seqno;
}
-static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
+static void gen6_bsd_ring_write_tail(struct intel_engine *ring,
u32 value)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
@@ -1746,7 +1746,7 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
}
-static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
+static int gen6_bsd_ring_flush(struct intel_engine *ring,
u32 invalidate, u32 flush)
{
uint32_t cmd;
@@ -1782,7 +1782,7 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
}
static int
-gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+gen8_ring_dispatch_execbuffer(struct intel_engine *ring,
u32 offset, u32 len,
unsigned flags)
{
@@ -1806,7 +1806,7 @@ gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
}
static int
-hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+hsw_ring_dispatch_execbuffer(struct intel_engine *ring,
u32 offset, u32 len,
unsigned flags)
{
@@ -1827,7 +1827,7 @@ hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
}
static int
-gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+gen6_ring_dispatch_execbuffer(struct intel_engine *ring,
u32 offset, u32 len,
unsigned flags)
{
@@ -1849,7 +1849,7 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
/* Blitter support (SandyBridge+) */
-static int gen6_ring_flush(struct intel_ring_buffer *ring,
+static int gen6_ring_flush(struct intel_engine *ring,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
@@ -1892,7 +1892,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
int intel_init_render_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine *ring = &dev_priv->ring[RCS];
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
@@ -1989,7 +1989,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ struct intel_engine *ring = &dev_priv->ring[RCS];
int ret;
if (INTEL_INFO(dev)->gen >= 6) {
@@ -2053,7 +2053,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
+ struct intel_engine *ring = &dev_priv->ring[VCS];
ring->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 6) {
@@ -2111,7 +2111,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ struct intel_engine *ring = &dev_priv->ring[BCS];
ring->write_tail = ring_write_tail;
ring->flush = gen6_ring_flush;
@@ -2147,7 +2147,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
int intel_init_vebox_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
+ struct intel_engine *ring = &dev_priv->ring[VECS];
ring->write_tail = ring_write_tail;
ring->flush = gen6_ring_flush;
@@ -2182,7 +2182,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
}
int
-intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+intel_ring_flush_all_caches(struct intel_engine *ring)
{
int ret;
@@ -2200,7 +2200,7 @@ intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
}
int
-intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+intel_ring_invalidate_all_caches(struct intel_engine *ring)
{
uint32_t flush_domains;
int ret;
@@ -53,7 +53,9 @@ struct intel_ring_hangcheck {
enum intel_ring_hangcheck_action action;
};
-struct intel_ring_buffer {
+struct i915_hw_context;
+
+struct intel_engine {
const char *name;
enum intel_ring_id {
RCS = 0x0,
@@ -88,35 +90,35 @@ struct intel_ring_buffer {
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
- bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
- void (*irq_put)(struct intel_ring_buffer *ring);
+ bool __must_check (*irq_get)(struct intel_engine *ring);
+ void (*irq_put)(struct intel_engine *ring);
- int (*init)(struct intel_ring_buffer *ring);
+ int (*init)(struct intel_engine *ring);
- void (*write_tail)(struct intel_ring_buffer *ring,
+ void (*write_tail)(struct intel_engine *ring,
u32 value);
- int __must_check (*flush)(struct intel_ring_buffer *ring,
+ int __must_check (*flush)(struct intel_engine *ring,
u32 invalidate_domains,
u32 flush_domains);
- int (*add_request)(struct intel_ring_buffer *ring);
+ int (*add_request)(struct intel_engine *ring);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
* seen value is good enough. Note that the seqno will always be
* monotonic, even if not coherent.
*/
- u32 (*get_seqno)(struct intel_ring_buffer *ring,
+ u32 (*get_seqno)(struct intel_engine *ring,
bool lazy_coherency);
- void (*set_seqno)(struct intel_ring_buffer *ring,
+ void (*set_seqno)(struct intel_engine *ring,
u32 seqno);
- int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
+ int (*dispatch_execbuffer)(struct intel_engine *ring,
u32 offset, u32 length,
unsigned flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
- void (*cleanup)(struct intel_ring_buffer *ring);
- int (*sync_to)(struct intel_ring_buffer *ring,
- struct intel_ring_buffer *to,
+ void (*cleanup)(struct intel_engine *ring);
+ int (*sync_to)(struct intel_engine *ring,
+ struct intel_engine *to,
u32 seqno);
/* our mbox written by others */
@@ -201,20 +203,20 @@ struct intel_ring_buffer {
};
static inline bool
-intel_ring_initialized(struct intel_ring_buffer *ring)
+intel_ring_initialized(struct intel_engine *ring)
{
return ring->obj != NULL;
}
static inline unsigned
-intel_ring_flag(struct intel_ring_buffer *ring)
+intel_ring_flag(struct intel_engine *ring)
{
return 1 << ring->id;
}
static inline u32
-intel_ring_sync_index(struct intel_ring_buffer *ring,
- struct intel_ring_buffer *other)
+intel_ring_sync_index(struct intel_engine *ring,
+ struct intel_engine *other)
{
int idx;
@@ -232,7 +234,7 @@ intel_ring_sync_index(struct intel_ring_buffer *ring,
}
static inline u32
-intel_read_status_page(struct intel_ring_buffer *ring,
+intel_read_status_page(struct intel_engine *ring,
int reg)
{
/* Ensure that the compiler doesn't optimize away the load. */
@@ -241,7 +243,7 @@ intel_read_status_page(struct intel_ring_buffer *ring,
}
static inline void
-intel_write_status_page(struct intel_ring_buffer *ring,
+intel_write_status_page(struct intel_engine *ring,
int reg, u32 value)
{
ring->status_page.page_addr[reg] = value;
@@ -266,26 +268,26 @@ intel_write_status_page(struct intel_ring_buffer *ring,
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
-void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+void intel_cleanup_ring_buffer(struct intel_engine *ring);
-int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
-int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
-static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+int __must_check intel_ring_begin(struct intel_engine *ring, int n);
+int __must_check intel_ring_cacheline_align(struct intel_engine *ring);
+static inline void intel_ring_emit(struct intel_engine *ring,
u32 data)
{
iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4;
}
-static inline void intel_ring_advance(struct intel_ring_buffer *ring)
+static inline void intel_ring_advance(struct intel_engine *ring)
{
ring->tail &= ring->size - 1;
}
-void __intel_ring_advance(struct intel_ring_buffer *ring);
+void __intel_ring_advance(struct intel_engine *ring);
-int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
-void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
-int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
-int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
+int __must_check intel_ring_idle(struct intel_engine *ring);
+void intel_ring_init_seqno(struct intel_engine *ring, u32 seqno);
+int intel_ring_flush_all_caches(struct intel_engine *ring);
+int intel_ring_invalidate_all_caches(struct intel_engine *ring);
void intel_init_rings_early(struct drm_device *dev);
int intel_init_render_ring_buffer(struct drm_device *dev);
@@ -293,21 +295,21 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
-u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
-void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+u32 intel_ring_get_active_head(struct intel_engine *ring);
+void intel_ring_setup_status_page(struct intel_engine *ring);
-static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
+static inline u32 intel_ring_get_tail(struct intel_engine *ring)
{
return ring->tail;
}
-static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
+static inline u32 intel_ring_get_seqno(struct intel_engine *ring)
{
BUG_ON(ring->outstanding_lazy_seqno == 0);
return ring->outstanding_lazy_seqno;
}
-static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
+static inline void i915_trace_irq_get(struct intel_engine *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
ring->trace_irq_seqno = seqno;