Message ID | 1399637360-4277-7-git-send-email-oscar.mateo@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Fri, May 09, 2014 at 01:08:36PM +0100, oscar.mateo@intel.com wrote: > From: Oscar Mateo <oscar.mateo@intel.com> > > In the upcoming patches, we plan to break the correlation between > engines (a.k.a. rings) and ringbuffers, so it makes sense to > refactor the code and make the change obvious. > > No functional changes. > > Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> If we rename stuff I'd vote for something close to Bspec language, like CS. So maybe intel_cs_engine? /me sucks at this naming game -Daniel > --- > drivers/gpu/drm/i915/i915_cmd_parser.c | 16 +-- > drivers/gpu/drm/i915/i915_debugfs.c | 16 +-- > drivers/gpu/drm/i915/i915_dma.c | 10 +- > drivers/gpu/drm/i915/i915_drv.h | 32 +++--- > drivers/gpu/drm/i915/i915_gem.c | 58 +++++------ > drivers/gpu/drm/i915/i915_gem_context.c | 14 +-- > drivers/gpu/drm/i915/i915_gem_execbuffer.c | 18 ++-- > drivers/gpu/drm/i915/i915_gem_gtt.c | 18 ++-- > drivers/gpu/drm/i915/i915_gem_gtt.h | 2 +- > drivers/gpu/drm/i915/i915_gpu_error.c | 6 +- > drivers/gpu/drm/i915/i915_irq.c | 28 ++--- > drivers/gpu/drm/i915/i915_trace.h | 26 ++--- > drivers/gpu/drm/i915/intel_display.c | 14 +-- > drivers/gpu/drm/i915/intel_drv.h | 4 +- > drivers/gpu/drm/i915/intel_overlay.c | 12 +-- > drivers/gpu/drm/i915/intel_pm.c | 10 +- > drivers/gpu/drm/i915/intel_ringbuffer.c | 158 ++++++++++++++--------------- > drivers/gpu/drm/i915/intel_ringbuffer.h | 76 +++++++------- > 18 files changed, 259 insertions(+), 259 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c > index 69d34e4..3234d36 100644 > --- a/drivers/gpu/drm/i915/i915_cmd_parser.c > +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c > @@ -498,7 +498,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) > return 0; > } > > -static bool validate_cmds_sorted(struct intel_ring_buffer *ring) > +static bool validate_cmds_sorted(struct intel_engine *ring) > { > int i; > bool ret = true; > @@ -550,7 +550,7 @@ static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count) > return ret; > } > > -static bool validate_regs_sorted(struct intel_ring_buffer *ring) > +static bool validate_regs_sorted(struct intel_engine *ring) > { > return check_sorted(ring->id, ring->reg_table, ring->reg_count) && > check_sorted(ring->id, ring->master_reg_table, > @@ -562,10 +562,10 @@ static bool validate_regs_sorted(struct intel_ring_buffer *ring) > * @ring: the ringbuffer to initialize > * > * Optionally initializes fields related to batch buffer command parsing in the > - * struct intel_ring_buffer based on whether the platform requires software > + * struct intel_engine based on whether the platform requires software > * command parsing. > */ > -void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring) > +void i915_cmd_parser_init_ring(struct intel_engine *ring) > { > if (!IS_GEN7(ring->dev)) > return; > @@ -664,7 +664,7 @@ find_cmd_in_table(const struct drm_i915_cmd_table *table, > * ring's default length encoding and returns default_desc. > */ > static const struct drm_i915_cmd_descriptor* > -find_cmd(struct intel_ring_buffer *ring, > +find_cmd(struct intel_engine *ring, > u32 cmd_header, > struct drm_i915_cmd_descriptor *default_desc) > { > @@ -744,7 +744,7 @@ finish: > * > * Return: true if the ring requires software command parsing > */ > -bool i915_needs_cmd_parser(struct intel_ring_buffer *ring) > +bool i915_needs_cmd_parser(struct intel_engine *ring) > { > struct drm_i915_private *dev_priv = ring->dev->dev_private; > > @@ -763,7 +763,7 @@ bool i915_needs_cmd_parser(struct intel_ring_buffer *ring) > return (i915.enable_cmd_parser == 1); > } > > -static bool check_cmd(const struct intel_ring_buffer *ring, > +static bool check_cmd(const struct intel_engine *ring, > const struct drm_i915_cmd_descriptor *desc, > const u32 *cmd, > const bool is_master, > @@ -865,7 +865,7 @@ static bool check_cmd(const struct intel_ring_buffer *ring, > * > * Return: non-zero if the parser finds violations or otherwise fails > */ > -int i915_parse_cmds(struct intel_ring_buffer *ring, > +int i915_parse_cmds(struct intel_engine *ring, > struct drm_i915_gem_object *batch_obj, > u32 batch_start_offset, > bool is_master) > diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c > index 103e62c..0052460 100644 > --- a/drivers/gpu/drm/i915/i915_debugfs.c > +++ b/drivers/gpu/drm/i915/i915_debugfs.c > @@ -562,7 +562,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data) > struct drm_info_node *node = (struct drm_info_node *) m->private; > struct drm_device *dev = node->minor->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > struct drm_i915_gem_request *gem_request; > int ret, count, i; > > @@ -594,7 +594,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data) > } > > static void i915_ring_seqno_info(struct seq_file *m, > - struct intel_ring_buffer *ring) > + struct intel_engine *ring) > { > if (ring->get_seqno) { > seq_printf(m, "Current sequence (%s): %u\n", > @@ -607,7 +607,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) > struct drm_info_node *node = (struct drm_info_node *) m->private; > struct drm_device *dev = node->minor->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int ret, i; > > ret = mutex_lock_interruptible(&dev->struct_mutex); > @@ -630,7 +630,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) > struct drm_info_node *node = (struct drm_info_node *) m->private; > struct drm_device *dev = node->minor->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int ret, i, pipe; > > ret = mutex_lock_interruptible(&dev->struct_mutex); > @@ -800,7 +800,7 @@ static int i915_hws_info(struct seq_file *m, void *data) > struct drm_info_node *node = (struct drm_info_node *) m->private; > struct drm_device *dev = node->minor->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > const u32 *hws; > int i; > > @@ -1677,7 +1677,7 @@ static int i915_context_status(struct seq_file *m, void *unused) > struct drm_info_node *node = (struct drm_info_node *) m->private; > struct drm_device *dev = node->minor->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > struct i915_hw_context *ctx; > int ret, i; > > @@ -1826,7 +1826,7 @@ static int per_file_ctx(int id, void *ptr, void *data) > static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; > int unused, i; > > @@ -1850,7 +1850,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) > static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > struct drm_file *file; > int i; > > diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c > index d02c8de..5263d63 100644 > --- a/drivers/gpu/drm/i915/i915_dma.c > +++ b/drivers/gpu/drm/i915/i915_dma.c > @@ -119,7 +119,7 @@ static void i915_write_hws_pga(struct drm_device *dev) > static void i915_free_hws(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = LP_RING(dev_priv); > + struct intel_engine *ring = LP_RING(dev_priv); > > if (dev_priv->status_page_dmah) { > drm_pci_free(dev, dev_priv->status_page_dmah); > @@ -139,7 +139,7 @@ void i915_kernel_lost_context(struct drm_device * dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > struct drm_i915_master_private *master_priv; > - struct intel_ring_buffer *ring = LP_RING(dev_priv); > + struct intel_engine *ring = LP_RING(dev_priv); > > /* > * We should never lose context on the ring with modesetting > @@ -234,7 +234,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) > static int i915_dma_resume(struct drm_device * dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = LP_RING(dev_priv); > + struct intel_engine *ring = LP_RING(dev_priv); > > DRM_DEBUG_DRIVER("%s\n", __func__); > > @@ -782,7 +782,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) > struct drm_i915_private *dev_priv = dev->dev_private; > struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; > int ret = 0; > - struct intel_ring_buffer *ring = LP_RING(dev_priv); > + struct intel_engine *ring = LP_RING(dev_priv); > > DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, > READ_BREADCRUMB(dev_priv)); > @@ -1073,7 +1073,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, > { > struct drm_i915_private *dev_priv = dev->dev_private; > drm_i915_hws_addr_t *hws = data; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > > if (drm_core_check_feature(dev, DRIVER_MODESET)) > return -ENODEV; > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index b1725c6..3b7a36f9 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -594,7 +594,7 @@ struct i915_hw_context { > bool is_initialized; > uint8_t remap_slice; > struct drm_i915_file_private *file_priv; > - struct intel_ring_buffer *last_ring; > + struct intel_engine *last_ring; > struct drm_i915_gem_object *obj; > struct i915_ctx_hang_stats hang_stats; > struct i915_address_space *vm; > @@ -1354,7 +1354,7 @@ struct drm_i915_private { > wait_queue_head_t gmbus_wait_queue; > > struct pci_dev *bridge_dev; > - struct intel_ring_buffer ring[I915_NUM_RINGS]; > + struct intel_engine ring[I915_NUM_RINGS]; > uint32_t last_seqno, next_seqno; > > drm_dma_handle_t *status_page_dmah; > @@ -1675,7 +1675,7 @@ struct drm_i915_gem_object { > void *dma_buf_vmapping; > int vmapping_count; > > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > > /** Breadcrumb of last rendering to the buffer. */ > uint32_t last_read_seqno; > @@ -1714,7 +1714,7 @@ struct drm_i915_gem_object { > */ > struct drm_i915_gem_request { > /** On Which ring this request was generated */ > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > > /** GEM sequence number associated with this request. */ > uint32_t seqno; > @@ -1755,7 +1755,7 @@ struct drm_i915_file_private { > > struct i915_hw_context *private_default_ctx; > atomic_t rps_wait_boost; > - struct intel_ring_buffer *bsd_ring; > + struct intel_engine *bsd_ring; > }; > > /* > @@ -2182,9 +2182,9 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) > > int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); > int i915_gem_object_sync(struct drm_i915_gem_object *obj, > - struct intel_ring_buffer *to); > + struct intel_engine *to); > void i915_vma_move_to_active(struct i915_vma *vma, > - struct intel_ring_buffer *ring); > + struct intel_engine *ring); > int i915_gem_dumb_create(struct drm_file *file_priv, > struct drm_device *dev, > struct drm_mode_create_dumb *args); > @@ -2226,7 +2226,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) > } > > struct drm_i915_gem_request * > -i915_gem_find_active_request(struct intel_ring_buffer *ring); > +i915_gem_find_active_request(struct intel_engine *ring); > > bool i915_gem_retire_requests(struct drm_device *dev); > int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, > @@ -2264,18 +2264,18 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); > int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); > int __must_check i915_gem_init(struct drm_device *dev); > int __must_check i915_gem_init_hw(struct drm_device *dev); > -int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice); > +int i915_gem_l3_remap(struct intel_engine *ring, int slice); > void i915_gem_init_swizzling(struct drm_device *dev); > void i915_gem_cleanup_ringbuffer(struct drm_device *dev); > int __must_check i915_gpu_idle(struct drm_device *dev); > int __must_check i915_gem_suspend(struct drm_device *dev); > -int __i915_add_request(struct intel_ring_buffer *ring, > +int __i915_add_request(struct intel_engine *ring, > struct drm_file *file, > struct drm_i915_gem_object *batch_obj, > u32 *seqno); > #define i915_add_request(ring, seqno) \ > __i915_add_request(ring, NULL, NULL, seqno) > -int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, > +int __must_check i915_wait_seqno(struct intel_engine *ring, > uint32_t seqno); > int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); > int __must_check > @@ -2286,7 +2286,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); > int __must_check > i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, > u32 alignment, > - struct intel_ring_buffer *pipelined); > + struct intel_engine *pipelined); > void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); > int i915_gem_attach_phys_object(struct drm_device *dev, > struct drm_i915_gem_object *obj, > @@ -2388,7 +2388,7 @@ void i915_gem_context_reset(struct drm_device *dev); > int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); > int i915_gem_context_enable(struct drm_i915_private *dev_priv); > void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); > -int i915_switch_context(struct intel_ring_buffer *ring, > +int i915_switch_context(struct intel_engine *ring, > struct i915_hw_context *to); > struct i915_hw_context * > i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); > @@ -2497,9 +2497,9 @@ const char *i915_cache_level_str(int type); > > /* i915_cmd_parser.c */ > int i915_cmd_parser_get_version(void); > -void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring); > -bool i915_needs_cmd_parser(struct intel_ring_buffer *ring); > -int i915_parse_cmds(struct intel_ring_buffer *ring, > +void i915_cmd_parser_init_ring(struct intel_engine *ring); > +bool i915_needs_cmd_parser(struct intel_engine *ring); > +int i915_parse_cmds(struct intel_engine *ring, > struct drm_i915_gem_object *batch_obj, > u32 batch_start_offset, > bool is_master); > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index 6ef53bd..a3b697b 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -64,7 +64,7 @@ static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, > static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target); > static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); > static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); > -static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); > +static void i915_gem_retire_requests_ring(struct intel_engine *ring); > > static bool cpu_cache_is_coherent(struct drm_device *dev, > enum i915_cache_level level) > @@ -977,7 +977,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error, > * equal. > */ > static int > -i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) > +i915_gem_check_olr(struct intel_engine *ring, u32 seqno) > { > int ret; > > @@ -996,7 +996,7 @@ static void fake_irq(unsigned long data) > } > > static bool missed_irq(struct drm_i915_private *dev_priv, > - struct intel_ring_buffer *ring) > + struct intel_engine *ring) > { > return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); > } > @@ -1027,7 +1027,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv) > * Returns 0 if the seqno was found within the alloted time. Else returns the > * errno with remaining time filled in timeout argument. > */ > -static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, > +static int __wait_seqno(struct intel_engine *ring, u32 seqno, > unsigned reset_counter, > bool interruptible, > struct timespec *timeout, > @@ -1134,7 +1134,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, > * request and object lists appropriately for that event. > */ > int > -i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) > +i915_wait_seqno(struct intel_engine *ring, uint32_t seqno) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -1159,7 +1159,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) > > static int > i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj, > - struct intel_ring_buffer *ring) > + struct intel_engine *ring) > { > if (!obj->active) > return 0; > @@ -1184,7 +1184,7 @@ static __must_check int > i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, > bool readonly) > { > - struct intel_ring_buffer *ring = obj->ring; > + struct intel_engine *ring = obj->ring; > u32 seqno; > int ret; > > @@ -1209,7 +1209,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, > { > struct drm_device *dev = obj->base.dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = obj->ring; > + struct intel_engine *ring = obj->ring; > unsigned reset_counter; > u32 seqno; > int ret; > @@ -2011,7 +2011,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) > > static void > i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, > - struct intel_ring_buffer *ring) > + struct intel_engine *ring) > { > struct drm_device *dev = obj->base.dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -2049,7 +2049,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, > } > > void i915_vma_move_to_active(struct i915_vma *vma, > - struct intel_ring_buffer *ring) > + struct intel_engine *ring) > { > list_move_tail(&vma->mm_list, &vma->vm->active_list); > return i915_gem_object_move_to_active(vma->obj, ring); > @@ -2090,7 +2090,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) > static void > i915_gem_object_retire(struct drm_i915_gem_object *obj) > { > - struct intel_ring_buffer *ring = obj->ring; > + struct intel_engine *ring = obj->ring; > > if (ring == NULL) > return; > @@ -2104,7 +2104,7 @@ static int > i915_gem_init_seqno(struct drm_device *dev, u32 seqno) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int ret, i, j; > > /* Carefully retire all requests without writing to the rings */ > @@ -2170,7 +2170,7 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) > return 0; > } > > -int __i915_add_request(struct intel_ring_buffer *ring, > +int __i915_add_request(struct intel_engine *ring, > struct drm_file *file, > struct drm_i915_gem_object *obj, > u32 *out_seqno) > @@ -2330,7 +2330,7 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request) > } > > struct drm_i915_gem_request * > -i915_gem_find_active_request(struct intel_ring_buffer *ring) > +i915_gem_find_active_request(struct intel_engine *ring) > { > struct drm_i915_gem_request *request; > u32 completed_seqno; > @@ -2348,7 +2348,7 @@ i915_gem_find_active_request(struct intel_ring_buffer *ring) > } > > static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, > - struct intel_ring_buffer *ring) > + struct intel_engine *ring) > { > struct drm_i915_gem_request *request; > bool ring_hung; > @@ -2367,7 +2367,7 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, > } > > static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, > - struct intel_ring_buffer *ring) > + struct intel_engine *ring) > { > while (!list_empty(&ring->active_list)) { > struct drm_i915_gem_object *obj; > @@ -2426,7 +2426,7 @@ void i915_gem_restore_fences(struct drm_device *dev) > void i915_gem_reset(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int i; > > /* > @@ -2449,7 +2449,7 @@ void i915_gem_reset(struct drm_device *dev) > * This function clears the request list as sequence numbers are passed. > */ > static void > -i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) > +i915_gem_retire_requests_ring(struct intel_engine *ring) > { > uint32_t seqno; > > @@ -2512,7 +2512,7 @@ bool > i915_gem_retire_requests(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > bool idle = true; > int i; > > @@ -2606,7 +2606,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) > struct drm_i915_private *dev_priv = dev->dev_private; > struct drm_i915_gem_wait *args = data; > struct drm_i915_gem_object *obj; > - struct intel_ring_buffer *ring = NULL; > + struct intel_engine *ring = NULL; > struct timespec timeout_stack, *timeout = NULL; > unsigned reset_counter; > u32 seqno = 0; > @@ -2677,9 +2677,9 @@ out: > */ > int > i915_gem_object_sync(struct drm_i915_gem_object *obj, > - struct intel_ring_buffer *to) > + struct intel_engine *to) > { > - struct intel_ring_buffer *from = obj->ring; > + struct intel_engine *from = obj->ring; > u32 seqno; > int ret, idx; > > @@ -2800,7 +2800,7 @@ int i915_vma_unbind(struct i915_vma *vma) > int i915_gpu_idle(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int ret, i; > > /* Flush everything onto the inactive list. */ > @@ -3659,7 +3659,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj) > int > i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, > u32 alignment, > - struct intel_ring_buffer *pipelined) > + struct intel_engine *pipelined) > { > u32 old_read_domains, old_write_domain; > int ret; > @@ -3812,7 +3812,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) > struct drm_i915_file_private *file_priv = file->driver_priv; > unsigned long recent_enough = jiffies - msecs_to_jiffies(20); > struct drm_i915_gem_request *request; > - struct intel_ring_buffer *ring = NULL; > + struct intel_engine *ring = NULL; > unsigned reset_counter; > u32 seqno = 0; > int ret; > @@ -4258,7 +4258,7 @@ static void > i915_gem_stop_ringbuffers(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int i; > > for_each_active_ring(ring, dev_priv, i) > @@ -4307,7 +4307,7 @@ err: > return ret; > } > > -int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice) > +int i915_gem_l3_remap(struct intel_engine *ring, int slice) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -4532,7 +4532,7 @@ void > i915_gem_cleanup_ringbuffer(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int i; > > for_each_active_ring(ring, dev_priv, i) > @@ -4608,7 +4608,7 @@ i915_gem_lastclose(struct drm_device *dev) > } > > static void > -init_ring_lists(struct intel_ring_buffer *ring) > +init_ring_lists(struct intel_engine *ring) > { > INIT_LIST_HEAD(&ring->active_list); > INIT_LIST_HEAD(&ring->request_list); > diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c > index 014fb8f..4d37e20 100644 > --- a/drivers/gpu/drm/i915/i915_gem_context.c > +++ b/drivers/gpu/drm/i915/i915_gem_context.c > @@ -359,7 +359,7 @@ err_destroy: > void i915_gem_context_reset(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int i; > > /* Prevent the hardware from restoring the last context (which hung) on > @@ -392,7 +392,7 @@ int i915_gem_context_init(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > struct i915_hw_context *ctx; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int unused; > > /* Init should only be called once per module load. Eventually the > @@ -428,7 +428,7 @@ void i915_gem_context_fini(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int unused; > > if (dctx->obj) { > @@ -467,7 +467,7 @@ void i915_gem_context_fini(struct drm_device *dev) > > int i915_gem_context_enable(struct drm_i915_private *dev_priv) > { > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int ret, i; > > /* This is the only place the aliasing PPGTT gets enabled, which means > @@ -546,7 +546,7 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) > } > > static inline int > -mi_set_context(struct intel_ring_buffer *ring, > +mi_set_context(struct intel_engine *ring, > struct i915_hw_context *new_context, > u32 hw_flags) > { > @@ -596,7 +596,7 @@ mi_set_context(struct intel_ring_buffer *ring, > return ret; > } > > -static int do_switch(struct intel_ring_buffer *ring, > +static int do_switch(struct intel_engine *ring, > struct i915_hw_context *to) > { > struct drm_i915_private *dev_priv = ring->dev->dev_private; > @@ -726,7 +726,7 @@ unpin_out: > * it will have a refoucnt > 1. This allows us to destroy the context abstract > * object while letting the normal object tracking destroy the backing BO. > */ > -int i915_switch_context(struct intel_ring_buffer *ring, > +int i915_switch_context(struct intel_engine *ring, > struct i915_hw_context *to) > { > struct drm_i915_private *dev_priv = ring->dev->dev_private; > diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c > index 47fe8ec..95e797e 100644 > --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c > +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c > @@ -541,7 +541,7 @@ need_reloc_mappable(struct i915_vma *vma) > > static int > i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, > - struct intel_ring_buffer *ring, > + struct intel_engine *ring, > bool *need_reloc) > { > struct drm_i915_gem_object *obj = vma->obj; > @@ -596,7 +596,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, > } > > static int > -i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, > +i915_gem_execbuffer_reserve(struct intel_engine *ring, > struct list_head *vmas, > bool *need_relocs) > { > @@ -711,7 +711,7 @@ static int > i915_gem_execbuffer_relocate_slow(struct drm_device *dev, > struct drm_i915_gem_execbuffer2 *args, > struct drm_file *file, > - struct intel_ring_buffer *ring, > + struct intel_engine *ring, > struct eb_vmas *eb, > struct drm_i915_gem_exec_object2 *exec) > { > @@ -827,7 +827,7 @@ err: > } > > static int > -i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, > +i915_gem_execbuffer_move_to_gpu(struct intel_engine *ring, > struct list_head *vmas) > { > struct i915_vma *vma; > @@ -912,7 +912,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, > > static struct i915_hw_context * > i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, > - struct intel_ring_buffer *ring, const u32 ctx_id) > + struct intel_engine *ring, const u32 ctx_id) > { > struct i915_hw_context *ctx = NULL; > struct i915_ctx_hang_stats *hs; > @@ -935,7 +935,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, > > static void > i915_gem_execbuffer_move_to_active(struct list_head *vmas, > - struct intel_ring_buffer *ring) > + struct intel_engine *ring) > { > struct i915_vma *vma; > > @@ -970,7 +970,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, > static void > i915_gem_execbuffer_retire_commands(struct drm_device *dev, > struct drm_file *file, > - struct intel_ring_buffer *ring, > + struct intel_engine *ring, > struct drm_i915_gem_object *obj) > { > /* Unconditionally force add_request to emit a full flush. */ > @@ -982,7 +982,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, > > static int > i915_reset_gen7_sol_offsets(struct drm_device *dev, > - struct intel_ring_buffer *ring) > + struct intel_engine *ring) > { > struct drm_i915_private *dev_priv = dev->dev_private; > int ret, i; > @@ -1048,7 +1048,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, > struct eb_vmas *eb; > struct drm_i915_gem_object *batch_obj; > struct drm_clip_rect *cliprects = NULL; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > struct i915_hw_context *ctx; > struct i915_address_space *vm; > const u32 ctx_id = i915_execbuffer2_get_context_id(*args); > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c > index 1dff805..31b58ee 100644 > --- a/drivers/gpu/drm/i915/i915_gem_gtt.c > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c > @@ -207,7 +207,7 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, > } > > /* Broadwell Page Directory Pointer Descriptors */ > -static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry, > +static int gen8_write_pdp(struct intel_engine *ring, unsigned entry, > uint64_t val, bool synchronous) > { > struct drm_i915_private *dev_priv = ring->dev->dev_private; > @@ -237,7 +237,7 @@ static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry, > } > > static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, > - struct intel_ring_buffer *ring, > + struct intel_engine *ring, > bool synchronous) > { > int i, ret; > @@ -716,7 +716,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) > } > > static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, > - struct intel_ring_buffer *ring, > + struct intel_engine *ring, > bool synchronous) > { > struct drm_device *dev = ppgtt->base.dev; > @@ -760,7 +760,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, > } > > static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, > - struct intel_ring_buffer *ring, > + struct intel_engine *ring, > bool synchronous) > { > struct drm_device *dev = ppgtt->base.dev; > @@ -811,7 +811,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, > } > > static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, > - struct intel_ring_buffer *ring, > + struct intel_engine *ring, > bool synchronous) > { > struct drm_device *dev = ppgtt->base.dev; > @@ -832,7 +832,7 @@ static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) > { > struct drm_device *dev = ppgtt->base.dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int j, ret; > > for_each_active_ring(ring, dev_priv, j) { > @@ -862,7 +862,7 @@ static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) > { > struct drm_device *dev = ppgtt->base.dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > uint32_t ecochk, ecobits; > int i; > > @@ -901,7 +901,7 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) > { > struct drm_device *dev = ppgtt->base.dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > uint32_t ecochk, gab_ctl, ecobits; > int i; > > @@ -1269,7 +1269,7 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) > void i915_check_and_clear_faults(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int i; > > if (INTEL_INFO(dev)->gen < 6) > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h > index cfca023..0775662 100644 > --- a/drivers/gpu/drm/i915/i915_gem_gtt.h > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h > @@ -261,7 +261,7 @@ struct i915_hw_ppgtt { > > int (*enable)(struct i915_hw_ppgtt *ppgtt); > int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, > - struct intel_ring_buffer *ring, > + struct intel_engine *ring, > bool synchronous); > void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); > }; > diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c > index 8f37238..0853db3 100644 > --- a/drivers/gpu/drm/i915/i915_gpu_error.c > +++ b/drivers/gpu/drm/i915/i915_gpu_error.c > @@ -745,7 +745,7 @@ static void i915_gem_record_fences(struct drm_device *dev, > } > > static void i915_record_ring_state(struct drm_device *dev, > - struct intel_ring_buffer *ring, > + struct intel_engine *ring, > struct drm_i915_error_ring *ering) > { > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -857,7 +857,7 @@ static void i915_record_ring_state(struct drm_device *dev, > } > > > -static void i915_gem_record_active_context(struct intel_ring_buffer *ring, > +static void i915_gem_record_active_context(struct intel_engine *ring, > struct drm_i915_error_state *error, > struct drm_i915_error_ring *ering) > { > @@ -884,7 +884,7 @@ static void i915_gem_record_rings(struct drm_device *dev, > int i, count; > > for (i = 0; i < I915_NUM_RINGS; i++) { > - struct intel_ring_buffer *ring = &dev_priv->ring[i]; > + struct intel_engine *ring = &dev_priv->ring[i]; > > if (!intel_ring_initialized(ring)) > continue; > diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c > index 4a8e8cb..58c8812 100644 > --- a/drivers/gpu/drm/i915/i915_irq.c > +++ b/drivers/gpu/drm/i915/i915_irq.c > @@ -1077,7 +1077,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev) > } > > static void notify_ring(struct drm_device *dev, > - struct intel_ring_buffer *ring) > + struct intel_engine *ring) > { > if (ring->obj == NULL) > return; > @@ -2111,7 +2111,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) > static void i915_error_wake_up(struct drm_i915_private *dev_priv, > bool reset_completed) > { > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int i; > > /* > @@ -2544,14 +2544,14 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe) > } > > static u32 > -ring_last_seqno(struct intel_ring_buffer *ring) > +ring_last_seqno(struct intel_engine *ring) > { > return list_entry(ring->request_list.prev, > struct drm_i915_gem_request, list)->seqno; > } > > static bool > -ring_idle(struct intel_ring_buffer *ring, u32 seqno) > +ring_idle(struct intel_engine *ring, u32 seqno) > { > return (list_empty(&ring->request_list) || > i915_seqno_passed(seqno, ring_last_seqno(ring))); > @@ -2574,11 +2574,11 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) > } > } > > -static struct intel_ring_buffer * > -semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr) > +static struct intel_engine * > +semaphore_wait_to_signaller_ring(struct intel_engine *ring, u32 ipehr) > { > struct drm_i915_private *dev_priv = ring->dev->dev_private; > - struct intel_ring_buffer *signaller; > + struct intel_engine *signaller; > int i; > > if (INTEL_INFO(dev_priv->dev)->gen >= 8) { > @@ -2606,8 +2606,8 @@ semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr) > return NULL; > } > > -static struct intel_ring_buffer * > -semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) > +static struct intel_engine * > +semaphore_waits_for(struct intel_engine *ring, u32 *seqno) > { > struct drm_i915_private *dev_priv = ring->dev->dev_private; > u32 cmd, ipehr, head; > @@ -2649,10 +2649,10 @@ semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) > return semaphore_wait_to_signaller_ring(ring, ipehr); > } > > -static int semaphore_passed(struct intel_ring_buffer *ring) > +static int semaphore_passed(struct intel_engine *ring) > { > struct drm_i915_private *dev_priv = ring->dev->dev_private; > - struct intel_ring_buffer *signaller; > + struct intel_engine *signaller; > u32 seqno, ctl; > > ring->hangcheck.deadlock = true; > @@ -2671,7 +2671,7 @@ static int semaphore_passed(struct intel_ring_buffer *ring) > > static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) > { > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int i; > > for_each_active_ring(ring, dev_priv, i) > @@ -2679,7 +2679,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) > } > > static enum intel_ring_hangcheck_action > -ring_stuck(struct intel_ring_buffer *ring, u64 acthd) > +ring_stuck(struct intel_engine *ring, u64 acthd) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -2735,7 +2735,7 @@ static void i915_hangcheck_elapsed(unsigned long data) > { > struct drm_device *dev = (struct drm_device *)data; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > int i; > int busy_count = 0, rings_hung = 0; > bool stuck[I915_NUM_RINGS] = { 0 }; > diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h > index b29d7b1..a4f9e62 100644 > --- a/drivers/gpu/drm/i915/i915_trace.h > +++ b/drivers/gpu/drm/i915/i915_trace.h > @@ -326,8 +326,8 @@ TRACE_EVENT(i915_gem_evict_vm, > ); > > TRACE_EVENT(i915_gem_ring_sync_to, > - TP_PROTO(struct intel_ring_buffer *from, > - struct intel_ring_buffer *to, > + TP_PROTO(struct intel_engine *from, > + struct intel_engine *to, > u32 seqno), > TP_ARGS(from, to, seqno), > > @@ -352,7 +352,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, > ); > > TRACE_EVENT(i915_gem_ring_dispatch, > - TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags), > + TP_PROTO(struct intel_engine *ring, u32 seqno, u32 flags), > TP_ARGS(ring, seqno, flags), > > TP_STRUCT__entry( > @@ -375,7 +375,7 @@ TRACE_EVENT(i915_gem_ring_dispatch, > ); > > TRACE_EVENT(i915_gem_ring_flush, > - TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush), > + TP_PROTO(struct intel_engine *ring, u32 invalidate, u32 flush), > TP_ARGS(ring, invalidate, flush), > > TP_STRUCT__entry( > @@ -398,7 +398,7 @@ TRACE_EVENT(i915_gem_ring_flush, > ); > > DECLARE_EVENT_CLASS(i915_gem_request, > - TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), > + TP_PROTO(struct intel_engine *ring, u32 seqno), > TP_ARGS(ring, seqno), > > TP_STRUCT__entry( > @@ -418,12 +418,12 @@ DECLARE_EVENT_CLASS(i915_gem_request, > ); > > DEFINE_EVENT(i915_gem_request, i915_gem_request_add, > - TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), > + TP_PROTO(struct intel_engine *ring, u32 seqno), > TP_ARGS(ring, seqno) > ); > > TRACE_EVENT(i915_gem_request_complete, > - TP_PROTO(struct intel_ring_buffer *ring), > + TP_PROTO(struct intel_engine *ring), > TP_ARGS(ring), > > TP_STRUCT__entry( > @@ -443,12 +443,12 @@ TRACE_EVENT(i915_gem_request_complete, > ); > > DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, > - TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), > + TP_PROTO(struct intel_engine *ring, u32 seqno), > TP_ARGS(ring, seqno) > ); > > TRACE_EVENT(i915_gem_request_wait_begin, > - TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), > + TP_PROTO(struct intel_engine *ring, u32 seqno), > TP_ARGS(ring, seqno), > > TP_STRUCT__entry( > @@ -477,12 +477,12 @@ TRACE_EVENT(i915_gem_request_wait_begin, > ); > > DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, > - TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), > + TP_PROTO(struct intel_engine *ring, u32 seqno), > TP_ARGS(ring, seqno) > ); > > DECLARE_EVENT_CLASS(i915_ring, > - TP_PROTO(struct intel_ring_buffer *ring), > + TP_PROTO(struct intel_engine *ring), > TP_ARGS(ring), > > TP_STRUCT__entry( > @@ -499,12 +499,12 @@ DECLARE_EVENT_CLASS(i915_ring, > ); > > DEFINE_EVENT(i915_ring, i915_ring_wait_begin, > - TP_PROTO(struct intel_ring_buffer *ring), > + TP_PROTO(struct intel_engine *ring), > TP_ARGS(ring) > ); > > DEFINE_EVENT(i915_ring, i915_ring_wait_end, > - TP_PROTO(struct intel_ring_buffer *ring), > + TP_PROTO(struct intel_engine *ring), > TP_ARGS(ring) > ); > > diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c > index c65e7f7..f821147 100644 > --- a/drivers/gpu/drm/i915/intel_display.c > +++ b/drivers/gpu/drm/i915/intel_display.c > @@ -1944,7 +1944,7 @@ static int intel_align_height(struct drm_device *dev, int height, bool tiled) > int > intel_pin_and_fence_fb_obj(struct drm_device *dev, > struct drm_i915_gem_object *obj, > - struct intel_ring_buffer *pipelined) > + struct intel_engine *pipelined) > { > struct drm_i915_private *dev_priv = dev->dev_private; > u32 alignment; > @@ -8424,7 +8424,7 @@ out: > } > > void intel_mark_fb_busy(struct drm_i915_gem_object *obj, > - struct intel_ring_buffer *ring) > + struct intel_engine *ring) > { > struct drm_device *dev = obj->base.dev; > struct drm_crtc *crtc; > @@ -8582,7 +8582,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, > struct drm_i915_private *dev_priv = dev->dev_private; > struct intel_crtc *intel_crtc = to_intel_crtc(crtc); > u32 flip_mask; > - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; > + struct intel_engine *ring = &dev_priv->ring[RCS]; > int ret; > > ret = intel_pin_and_fence_fb_obj(dev, obj, ring); > @@ -8627,7 +8627,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, > struct drm_i915_private *dev_priv = dev->dev_private; > struct intel_crtc *intel_crtc = to_intel_crtc(crtc); > u32 flip_mask; > - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; > + struct intel_engine *ring = &dev_priv->ring[RCS]; > int ret; > > ret = intel_pin_and_fence_fb_obj(dev, obj, ring); > @@ -8669,7 +8669,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, > struct drm_i915_private *dev_priv = dev->dev_private; > struct intel_crtc *intel_crtc = to_intel_crtc(crtc); > uint32_t pf, pipesrc; > - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; > + struct intel_engine *ring = &dev_priv->ring[RCS]; > int ret; > > ret = intel_pin_and_fence_fb_obj(dev, obj, ring); > @@ -8717,7 +8717,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, > { > struct drm_i915_private *dev_priv = dev->dev_private; > struct intel_crtc *intel_crtc = to_intel_crtc(crtc); > - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; > + struct intel_engine *ring = &dev_priv->ring[RCS]; > uint32_t pf, pipesrc; > int ret; > > @@ -8762,7 +8762,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, > { > struct drm_i915_private *dev_priv = dev->dev_private; > struct intel_crtc *intel_crtc = to_intel_crtc(crtc); > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > uint32_t plane_bit = 0; > int len, ret; > > diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h > index d8b540b..23b5abf 100644 > --- a/drivers/gpu/drm/i915/intel_drv.h > +++ b/drivers/gpu/drm/i915/intel_drv.h > @@ -694,7 +694,7 @@ int intel_pch_rawclk(struct drm_device *dev); > int valleyview_cur_cdclk(struct drm_i915_private *dev_priv); > void intel_mark_busy(struct drm_device *dev); > void intel_mark_fb_busy(struct drm_i915_gem_object *obj, > - struct intel_ring_buffer *ring); > + struct intel_engine *ring); > void intel_mark_idle(struct drm_device *dev); > void intel_crtc_restore_mode(struct drm_crtc *crtc); > void intel_crtc_update_dpms(struct drm_crtc *crtc); > @@ -726,7 +726,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, > struct intel_load_detect_pipe *old); > int intel_pin_and_fence_fb_obj(struct drm_device *dev, > struct drm_i915_gem_object *obj, > - struct intel_ring_buffer *pipelined); > + struct intel_engine *pipelined); > void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); > struct drm_framebuffer * > __intel_framebuffer_create(struct drm_device *dev, > diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c > index d8adc91..965eec1 100644 > --- a/drivers/gpu/drm/i915/intel_overlay.c > +++ b/drivers/gpu/drm/i915/intel_overlay.c > @@ -213,7 +213,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, > { > struct drm_device *dev = overlay->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; > + struct intel_engine *ring = &dev_priv->ring[RCS]; > int ret; > > BUG_ON(overlay->last_flip_req); > @@ -236,7 +236,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) > { > struct drm_device *dev = overlay->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; > + struct intel_engine *ring = &dev_priv->ring[RCS]; > int ret; > > BUG_ON(overlay->active); > @@ -263,7 +263,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, > { > struct drm_device *dev = overlay->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; > + struct intel_engine *ring = &dev_priv->ring[RCS]; > u32 flip_addr = overlay->flip_addr; > u32 tmp; > int ret; > @@ -320,7 +320,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) > { > struct drm_device *dev = overlay->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; > + struct intel_engine *ring = &dev_priv->ring[RCS]; > u32 flip_addr = overlay->flip_addr; > int ret; > > @@ -363,7 +363,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) > { > struct drm_device *dev = overlay->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; > + struct intel_engine *ring = &dev_priv->ring[RCS]; > int ret; > > if (overlay->last_flip_req == 0) > @@ -389,7 +389,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) > { > struct drm_device *dev = overlay->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; > + struct intel_engine *ring = &dev_priv->ring[RCS]; > int ret; > > /* Only wait if there is actually an old frame to release to > diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c > index acfded3..17f636e 100644 > --- a/drivers/gpu/drm/i915/intel_pm.c > +++ b/drivers/gpu/drm/i915/intel_pm.c > @@ -3379,7 +3379,7 @@ static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_c > static void gen8_enable_rps(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > uint32_t rc6_mask = 0, rp_state_cap; > int unused; > > @@ -3454,7 +3454,7 @@ static void gen8_enable_rps(struct drm_device *dev) > static void gen6_enable_rps(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > u32 rp_state_cap; > u32 gt_perf_status; > u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; > @@ -3783,7 +3783,7 @@ static void valleyview_cleanup_gt_powersave(struct drm_device *dev) > static void valleyview_enable_rps(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > u32 gtfifodbg, val, rc6_mode = 0; > int i; > > @@ -3914,7 +3914,7 @@ static int ironlake_setup_rc6(struct drm_device *dev) > static void ironlake_enable_rc6(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; > + struct intel_engine *ring = &dev_priv->ring[RCS]; > bool was_interruptible; > int ret; > > @@ -4426,7 +4426,7 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower); > bool i915_gpu_busy(void) > { > struct drm_i915_private *dev_priv; > - struct intel_ring_buffer *ring; > + struct intel_engine *ring; > bool ret = false; > int i; > > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c > index 5d61923..4c3cc44 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c > @@ -40,7 +40,7 @@ > */ > #define CACHELINE_BYTES 64 > > -static inline int ring_space(struct intel_ring_buffer *ring) > +static inline int ring_space(struct intel_engine *ring) > { > int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); > if (space < 0) > @@ -48,13 +48,13 @@ static inline int ring_space(struct intel_ring_buffer *ring) > return space; > } > > -static bool intel_ring_stopped(struct intel_ring_buffer *ring) > +static bool intel_ring_stopped(struct intel_engine *ring) > { > struct drm_i915_private *dev_priv = ring->dev->dev_private; > return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); > } > > -void __intel_ring_advance(struct intel_ring_buffer *ring) > +void __intel_ring_advance(struct intel_engine *ring) > { > ring->tail &= ring->size - 1; > if (intel_ring_stopped(ring)) > @@ -63,7 +63,7 @@ void __intel_ring_advance(struct intel_ring_buffer *ring) > } > > static int > -gen2_render_ring_flush(struct intel_ring_buffer *ring, > +gen2_render_ring_flush(struct intel_engine *ring, > u32 invalidate_domains, > u32 flush_domains) > { > @@ -89,7 +89,7 @@ gen2_render_ring_flush(struct intel_ring_buffer *ring, > } > > static int > -gen4_render_ring_flush(struct intel_ring_buffer *ring, > +gen4_render_ring_flush(struct intel_engine *ring, > u32 invalidate_domains, > u32 flush_domains) > { > @@ -184,7 +184,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring, > * really our business. That leaves only stall at scoreboard. > */ > static int > -intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) > +intel_emit_post_sync_nonzero_flush(struct intel_engine *ring) > { > u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; > int ret; > @@ -219,7 +219,7 @@ intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) > } > > static int > -gen6_render_ring_flush(struct intel_ring_buffer *ring, > +gen6_render_ring_flush(struct intel_engine *ring, > u32 invalidate_domains, u32 flush_domains) > { > u32 flags = 0; > @@ -271,7 +271,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, > } > > static int > -gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) > +gen7_render_ring_cs_stall_wa(struct intel_engine *ring) > { > int ret; > > @@ -289,7 +289,7 @@ gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) > return 0; > } > > -static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value) > +static int gen7_ring_fbc_flush(struct intel_engine *ring, u32 value) > { > int ret; > > @@ -313,7 +313,7 @@ static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value) > } > > static int > -gen7_render_ring_flush(struct intel_ring_buffer *ring, > +gen7_render_ring_flush(struct intel_engine *ring, > u32 invalidate_domains, u32 flush_domains) > { > u32 flags = 0; > @@ -374,7 +374,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring, > } > > static int > -gen8_render_ring_flush(struct intel_ring_buffer *ring, > +gen8_render_ring_flush(struct intel_engine *ring, > u32 invalidate_domains, u32 flush_domains) > { > u32 flags = 0; > @@ -414,14 +414,14 @@ gen8_render_ring_flush(struct intel_ring_buffer *ring, > > } > > -static void ring_write_tail(struct intel_ring_buffer *ring, > +static void ring_write_tail(struct intel_engine *ring, > u32 value) > { > struct drm_i915_private *dev_priv = ring->dev->dev_private; > I915_WRITE_TAIL(ring, value); > } > > -u64 intel_ring_get_active_head(struct intel_ring_buffer *ring) > +u64 intel_ring_get_active_head(struct intel_engine *ring) > { > struct drm_i915_private *dev_priv = ring->dev->dev_private; > u64 acthd; > @@ -437,7 +437,7 @@ u64 intel_ring_get_active_head(struct intel_ring_buffer *ring) > return acthd; > } > > -static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) > +static void ring_setup_phys_status_page(struct intel_engine *ring) > { > struct drm_i915_private *dev_priv = ring->dev->dev_private; > u32 addr; > @@ -448,7 +448,7 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) > I915_WRITE(HWS_PGA, addr); > } > > -static bool stop_ring(struct intel_ring_buffer *ring) > +static bool stop_ring(struct intel_engine *ring) > { > struct drm_i915_private *dev_priv = to_i915(ring->dev); > > @@ -472,7 +472,7 @@ static bool stop_ring(struct intel_ring_buffer *ring) > return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; > } > > -static int init_ring_common(struct intel_ring_buffer *ring) > +static int init_ring_common(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -550,7 +550,7 @@ out: > } > > static int > -init_pipe_control(struct intel_ring_buffer *ring) > +init_pipe_control(struct intel_engine *ring) > { > int ret; > > @@ -591,7 +591,7 @@ err: > return ret; > } > > -static int init_render_ring(struct intel_ring_buffer *ring) > +static int init_render_ring(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -647,7 +647,7 @@ static int init_render_ring(struct intel_ring_buffer *ring) > return ret; > } > > -static void render_ring_cleanup(struct intel_ring_buffer *ring) > +static void render_ring_cleanup(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > > @@ -663,12 +663,12 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) > ring->scratch.obj = NULL; > } > > -static int gen6_signal(struct intel_ring_buffer *signaller, > +static int gen6_signal(struct intel_engine *signaller, > unsigned int num_dwords) > { > struct drm_device *dev = signaller->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *useless; > + struct intel_engine *useless; > int i, ret; > > /* NB: In order to be able to do semaphore MBOX updates for varying > @@ -713,7 +713,7 @@ static int gen6_signal(struct intel_ring_buffer *signaller, > * This acts like a signal in the canonical semaphore. > */ > static int > -gen6_add_request(struct intel_ring_buffer *ring) > +gen6_add_request(struct intel_engine *ring) > { > int ret; > > @@ -745,8 +745,8 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, > * @seqno - seqno which the waiter will block on > */ > static int > -gen6_ring_sync(struct intel_ring_buffer *waiter, > - struct intel_ring_buffer *signaller, > +gen6_ring_sync(struct intel_engine *waiter, > + struct intel_engine *signaller, > u32 seqno) > { > u32 dw1 = MI_SEMAPHORE_MBOX | > @@ -794,7 +794,7 @@ do { \ > } while (0) > > static int > -pc_render_add_request(struct intel_ring_buffer *ring) > +pc_render_add_request(struct intel_engine *ring) > { > u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; > int ret; > @@ -842,7 +842,7 @@ pc_render_add_request(struct intel_ring_buffer *ring) > } > > static u32 > -gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) > +gen6_ring_get_seqno(struct intel_engine *ring, bool lazy_coherency) > { > /* Workaround to force correct ordering between irq and seqno writes on > * ivb (and maybe also on snb) by reading from a CS register (like > @@ -856,31 +856,31 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) > } > > static u32 > -ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) > +ring_get_seqno(struct intel_engine *ring, bool lazy_coherency) > { > return intel_read_status_page(ring, I915_GEM_HWS_INDEX); > } > > static void > -ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno) > +ring_set_seqno(struct intel_engine *ring, u32 seqno) > { > intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); > } > > static u32 > -pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) > +pc_render_get_seqno(struct intel_engine *ring, bool lazy_coherency) > { > return ring->scratch.cpu_page[0]; > } > > static void > -pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) > +pc_render_set_seqno(struct intel_engine *ring, u32 seqno) > { > ring->scratch.cpu_page[0] = seqno; > } > > static bool > -gen5_ring_get_irq(struct intel_ring_buffer *ring) > +gen5_ring_get_irq(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -898,7 +898,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring) > } > > static void > -gen5_ring_put_irq(struct intel_ring_buffer *ring) > +gen5_ring_put_irq(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -911,7 +911,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring) > } > > static bool > -i9xx_ring_get_irq(struct intel_ring_buffer *ring) > +i9xx_ring_get_irq(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -932,7 +932,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring) > } > > static void > -i9xx_ring_put_irq(struct intel_ring_buffer *ring) > +i9xx_ring_put_irq(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -948,7 +948,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring) > } > > static bool > -i8xx_ring_get_irq(struct intel_ring_buffer *ring) > +i8xx_ring_get_irq(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -969,7 +969,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring) > } > > static void > -i8xx_ring_put_irq(struct intel_ring_buffer *ring) > +i8xx_ring_put_irq(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -984,7 +984,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring) > spin_unlock_irqrestore(&dev_priv->irq_lock, flags); > } > > -void intel_ring_setup_status_page(struct intel_ring_buffer *ring) > +void intel_ring_setup_status_page(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = ring->dev->dev_private; > @@ -1047,7 +1047,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) > } > > static int > -bsd_ring_flush(struct intel_ring_buffer *ring, > +bsd_ring_flush(struct intel_engine *ring, > u32 invalidate_domains, > u32 flush_domains) > { > @@ -1064,7 +1064,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring, > } > > static int > -i9xx_add_request(struct intel_ring_buffer *ring) > +i9xx_add_request(struct intel_engine *ring) > { > int ret; > > @@ -1082,7 +1082,7 @@ i9xx_add_request(struct intel_ring_buffer *ring) > } > > static bool > -gen6_ring_get_irq(struct intel_ring_buffer *ring) > +gen6_ring_get_irq(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -1107,7 +1107,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) > } > > static void > -gen6_ring_put_irq(struct intel_ring_buffer *ring) > +gen6_ring_put_irq(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -1125,7 +1125,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) > } > > static bool > -hsw_vebox_get_irq(struct intel_ring_buffer *ring) > +hsw_vebox_get_irq(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -1145,7 +1145,7 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring) > } > > static void > -hsw_vebox_put_irq(struct intel_ring_buffer *ring) > +hsw_vebox_put_irq(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -1163,7 +1163,7 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring) > } > > static bool > -gen8_ring_get_irq(struct intel_ring_buffer *ring) > +gen8_ring_get_irq(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -1189,7 +1189,7 @@ gen8_ring_get_irq(struct intel_ring_buffer *ring) > } > > static void > -gen8_ring_put_irq(struct intel_ring_buffer *ring) > +gen8_ring_put_irq(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -1209,7 +1209,7 @@ gen8_ring_put_irq(struct intel_ring_buffer *ring) > } > > static int > -i965_dispatch_execbuffer(struct intel_ring_buffer *ring, > +i965_dispatch_execbuffer(struct intel_engine *ring, > u64 offset, u32 length, > unsigned flags) > { > @@ -1232,7 +1232,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, > /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ > #define I830_BATCH_LIMIT (256*1024) > static int > -i830_dispatch_execbuffer(struct intel_ring_buffer *ring, > +i830_dispatch_execbuffer(struct intel_engine *ring, > u64 offset, u32 len, > unsigned flags) > { > @@ -1283,7 +1283,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, > } > > static int > -i915_dispatch_execbuffer(struct intel_ring_buffer *ring, > +i915_dispatch_execbuffer(struct intel_engine *ring, > u64 offset, u32 len, > unsigned flags) > { > @@ -1300,7 +1300,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring, > return 0; > } > > -static void cleanup_status_page(struct intel_ring_buffer *ring) > +static void cleanup_status_page(struct intel_engine *ring) > { > struct drm_i915_gem_object *obj; > > @@ -1314,7 +1314,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring) > ring->status_page.obj = NULL; > } > > -static int init_status_page(struct intel_ring_buffer *ring) > +static int init_status_page(struct intel_engine *ring) > { > struct drm_i915_gem_object *obj; > > @@ -1351,7 +1351,7 @@ err_unref: > return 0; > } > > -static int init_phys_status_page(struct intel_ring_buffer *ring) > +static int init_phys_status_page(struct intel_engine *ring) > { > struct drm_i915_private *dev_priv = ring->dev->dev_private; > > @@ -1368,7 +1368,7 @@ static int init_phys_status_page(struct intel_ring_buffer *ring) > return 0; > } > > -void intel_destroy_ring_buffer(struct intel_ring_buffer *ring) > +void intel_destroy_ring_buffer(struct intel_engine *ring) > { > if (!ring->obj) > return; > @@ -1379,7 +1379,7 @@ void intel_destroy_ring_buffer(struct intel_ring_buffer *ring) > ring->obj = NULL; > } > > -int intel_allocate_ring_buffer(struct intel_ring_buffer *ring) > +int intel_allocate_ring_buffer(struct intel_engine *ring) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = to_i915(dev); > @@ -1424,7 +1424,7 @@ err_unref: > } > > static int intel_init_ring_buffer(struct drm_device *dev, > - struct intel_ring_buffer *ring) > + struct intel_engine *ring) > { > int ret; > > @@ -1465,7 +1465,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, > return ring->init(ring); > } > > -void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) > +void intel_cleanup_ring_buffer(struct intel_engine *ring) > { > struct drm_i915_private *dev_priv = to_i915(ring->dev); > > @@ -1485,7 +1485,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) > cleanup_status_page(ring); > } > > -static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) > +static int intel_ring_wait_request(struct intel_engine *ring, int n) > { > struct drm_i915_gem_request *request; > u32 seqno = 0, tail; > @@ -1538,7 +1538,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) > return 0; > } > > -static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) > +static int ring_wait_for_space(struct intel_engine *ring, int n) > { > struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > @@ -1586,7 +1586,7 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) > return -EBUSY; > } > > -static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) > +static int intel_wrap_ring_buffer(struct intel_engine *ring) > { > uint32_t __iomem *virt; > int rem = ring->size - ring->tail; > @@ -1608,7 +1608,7 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) > return 0; > } > > -int intel_ring_idle(struct intel_ring_buffer *ring) > +int intel_ring_idle(struct intel_engine *ring) > { > u32 seqno; > int ret; > @@ -1632,7 +1632,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring) > } > > static int > -intel_ring_alloc_seqno(struct intel_ring_buffer *ring) > +intel_ring_alloc_seqno(struct intel_engine *ring) > { > if (ring->outstanding_lazy_seqno) > return 0; > @@ -1650,7 +1650,7 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring) > return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); > } > > -static int __intel_ring_prepare(struct intel_ring_buffer *ring, > +static int __intel_ring_prepare(struct intel_engine *ring, > int bytes) > { > int ret; > @@ -1670,7 +1670,7 @@ static int __intel_ring_prepare(struct intel_ring_buffer *ring, > return 0; > } > > -int intel_ring_begin(struct intel_ring_buffer *ring, > +int intel_ring_begin(struct intel_engine *ring, > int num_dwords) > { > struct drm_i915_private *dev_priv = ring->dev->dev_private; > @@ -1695,7 +1695,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring, > } > > /* Align the ring tail to a cacheline boundary */ > -int intel_ring_cacheline_align(struct intel_ring_buffer *ring) > +int intel_ring_cacheline_align(struct intel_engine *ring) > { > int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); > int ret; > @@ -1716,7 +1716,7 @@ int intel_ring_cacheline_align(struct intel_ring_buffer *ring) > return 0; > } > > -void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) > +void intel_ring_init_seqno(struct intel_engine *ring, u32 seqno) > { > struct drm_i915_private *dev_priv = ring->dev->dev_private; > > @@ -1733,7 +1733,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) > ring->hangcheck.seqno = seqno; > } > > -static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, > +static void gen6_bsd_ring_write_tail(struct intel_engine *ring, > u32 value) > { > struct drm_i915_private *dev_priv = ring->dev->dev_private; > @@ -1766,7 +1766,7 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, > _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); > } > > -static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, > +static int gen6_bsd_ring_flush(struct intel_engine *ring, > u32 invalidate, u32 flush) > { > uint32_t cmd; > @@ -1802,7 +1802,7 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, > } > > static int > -gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, > +gen8_ring_dispatch_execbuffer(struct intel_engine *ring, > u64 offset, u32 len, > unsigned flags) > { > @@ -1826,7 +1826,7 @@ gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, > } > > static int > -hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, > +hsw_ring_dispatch_execbuffer(struct intel_engine *ring, > u64 offset, u32 len, > unsigned flags) > { > @@ -1847,7 +1847,7 @@ hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, > } > > static int > -gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, > +gen6_ring_dispatch_execbuffer(struct intel_engine *ring, > u64 offset, u32 len, > unsigned flags) > { > @@ -1869,7 +1869,7 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, > > /* Blitter support (SandyBridge+) */ > > -static int gen6_ring_flush(struct intel_ring_buffer *ring, > +static int gen6_ring_flush(struct intel_engine *ring, > u32 invalidate, u32 flush) > { > struct drm_device *dev = ring->dev; > @@ -1912,7 +1912,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, > int intel_init_render_ring_buffer(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; > + struct intel_engine *ring = &dev_priv->ring[RCS]; > > if (INTEL_INFO(dev)->gen >= 6) { > ring->add_request = gen6_add_request; > @@ -2018,7 +2018,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) > int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; > + struct intel_engine *ring = &dev_priv->ring[RCS]; > int ret; > > if (INTEL_INFO(dev)->gen >= 6) { > @@ -2081,7 +2081,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) > int intel_init_bsd_ring_buffer(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; > + struct intel_engine *ring = &dev_priv->ring[VCS]; > > ring->write_tail = ring_write_tail; > if (INTEL_INFO(dev)->gen >= 6) { > @@ -2152,7 +2152,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) > int intel_init_bsd2_ring_buffer(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = &dev_priv->ring[VCS2]; > + struct intel_engine *ring = &dev_priv->ring[VCS2]; > > if ((INTEL_INFO(dev)->gen != 8)) { > DRM_ERROR("No dual-BSD ring on non-BDW machine\n"); > @@ -2196,7 +2196,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) > int intel_init_blt_ring_buffer(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; > + struct intel_engine *ring = &dev_priv->ring[BCS]; > > ring->write_tail = ring_write_tail; > ring->flush = gen6_ring_flush; > @@ -2241,7 +2241,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) > int intel_init_vebox_ring_buffer(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > - struct intel_ring_buffer *ring = &dev_priv->ring[VECS]; > + struct intel_engine *ring = &dev_priv->ring[VECS]; > > ring->write_tail = ring_write_tail; > ring->flush = gen6_ring_flush; > @@ -2279,7 +2279,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) > } > > int > -intel_ring_flush_all_caches(struct intel_ring_buffer *ring) > +intel_ring_flush_all_caches(struct intel_engine *ring) > { > int ret; > > @@ -2297,7 +2297,7 @@ intel_ring_flush_all_caches(struct intel_ring_buffer *ring) > } > > int > -intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) > +intel_ring_invalidate_all_caches(struct intel_engine *ring) > { > uint32_t flush_domains; > int ret; > @@ -2317,7 +2317,7 @@ intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) > } > > void > -intel_stop_ring_buffer(struct intel_ring_buffer *ring) > +intel_stop_ring_buffer(struct intel_engine *ring) > { > int ret; > > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h > index 680e451..50cc525 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.h > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h > @@ -54,7 +54,7 @@ struct intel_ring_hangcheck { > bool deadlock; > }; > > -struct intel_ring_buffer { > +struct intel_engine { > const char *name; > enum intel_ring_id { > RCS = 0x0, > @@ -90,33 +90,33 @@ struct intel_ring_buffer { > unsigned irq_refcount; /* protected by dev_priv->irq_lock */ > u32 irq_enable_mask; /* bitmask to enable ring interrupt */ > u32 trace_irq_seqno; > - bool __must_check (*irq_get)(struct intel_ring_buffer *ring); > - void (*irq_put)(struct intel_ring_buffer *ring); > + bool __must_check (*irq_get)(struct intel_engine *ring); > + void (*irq_put)(struct intel_engine *ring); > > - int (*init)(struct intel_ring_buffer *ring); > + int (*init)(struct intel_engine *ring); > > - void (*write_tail)(struct intel_ring_buffer *ring, > + void (*write_tail)(struct intel_engine *ring, > u32 value); > - int __must_check (*flush)(struct intel_ring_buffer *ring, > + int __must_check (*flush)(struct intel_engine *ring, > u32 invalidate_domains, > u32 flush_domains); > - int (*add_request)(struct intel_ring_buffer *ring); > + int (*add_request)(struct intel_engine *ring); > /* Some chipsets are not quite as coherent as advertised and need > * an expensive kick to force a true read of the up-to-date seqno. > * However, the up-to-date seqno is not always required and the last > * seen value is good enough. Note that the seqno will always be > * monotonic, even if not coherent. > */ > - u32 (*get_seqno)(struct intel_ring_buffer *ring, > + u32 (*get_seqno)(struct intel_engine *ring, > bool lazy_coherency); > - void (*set_seqno)(struct intel_ring_buffer *ring, > + void (*set_seqno)(struct intel_engine *ring, > u32 seqno); > - int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, > + int (*dispatch_execbuffer)(struct intel_engine *ring, > u64 offset, u32 length, > unsigned flags); > #define I915_DISPATCH_SECURE 0x1 > #define I915_DISPATCH_PINNED 0x2 > - void (*cleanup)(struct intel_ring_buffer *ring); > + void (*cleanup)(struct intel_engine *ring); > > struct { > u32 sync_seqno[I915_NUM_RINGS-1]; > @@ -129,10 +129,10 @@ struct intel_ring_buffer { > } mbox; > > /* AKA wait() */ > - int (*sync_to)(struct intel_ring_buffer *ring, > - struct intel_ring_buffer *to, > + int (*sync_to)(struct intel_engine *ring, > + struct intel_engine *to, > u32 seqno); > - int (*signal)(struct intel_ring_buffer *signaller, > + int (*signal)(struct intel_engine *signaller, > /* num_dwords needed by caller */ > unsigned int num_dwords); > } semaphore; > @@ -210,20 +210,20 @@ struct intel_ring_buffer { > }; > > static inline bool > -intel_ring_initialized(struct intel_ring_buffer *ring) > +intel_ring_initialized(struct intel_engine *ring) > { > return ring->obj != NULL; > } > > static inline unsigned > -intel_ring_flag(struct intel_ring_buffer *ring) > +intel_ring_flag(struct intel_engine *ring) > { > return 1 << ring->id; > } > > static inline u32 > -intel_ring_sync_index(struct intel_ring_buffer *ring, > - struct intel_ring_buffer *other) > +intel_ring_sync_index(struct intel_engine *ring, > + struct intel_engine *other) > { > int idx; > > @@ -241,7 +241,7 @@ intel_ring_sync_index(struct intel_ring_buffer *ring, > } > > static inline u32 > -intel_read_status_page(struct intel_ring_buffer *ring, > +intel_read_status_page(struct intel_engine *ring, > int reg) > { > /* Ensure that the compiler doesn't optimize away the load. */ > @@ -250,7 +250,7 @@ intel_read_status_page(struct intel_ring_buffer *ring, > } > > static inline void > -intel_write_status_page(struct intel_ring_buffer *ring, > +intel_write_status_page(struct intel_engine *ring, > int reg, u32 value) > { > ring->status_page.page_addr[reg] = value; > @@ -275,27 +275,27 @@ intel_write_status_page(struct intel_ring_buffer *ring, > #define I915_GEM_HWS_SCRATCH_INDEX 0x30 > #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) > > -void intel_stop_ring_buffer(struct intel_ring_buffer *ring); > -void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); > +void intel_stop_ring_buffer(struct intel_engine *ring); > +void intel_cleanup_ring_buffer(struct intel_engine *ring); > > -int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); > -int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring); > -static inline void intel_ring_emit(struct intel_ring_buffer *ring, > +int __must_check intel_ring_begin(struct intel_engine *ring, int n); > +int __must_check intel_ring_cacheline_align(struct intel_engine *ring); > +static inline void intel_ring_emit(struct intel_engine *ring, > u32 data) > { > iowrite32(data, ring->virtual_start + ring->tail); > ring->tail += 4; > } > -static inline void intel_ring_advance(struct intel_ring_buffer *ring) > +static inline void intel_ring_advance(struct intel_engine *ring) > { > ring->tail &= ring->size - 1; > } > -void __intel_ring_advance(struct intel_ring_buffer *ring); > +void __intel_ring_advance(struct intel_engine *ring); > > -int __must_check intel_ring_idle(struct intel_ring_buffer *ring); > -void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno); > -int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); > -int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); > +int __must_check intel_ring_idle(struct intel_engine *ring); > +void intel_ring_init_seqno(struct intel_engine *ring, u32 seqno); > +int intel_ring_flush_all_caches(struct intel_engine *ring); > +int intel_ring_invalidate_all_caches(struct intel_engine *ring); > > void intel_init_rings_early(struct drm_device *dev); > int intel_init_render_ring_buffer(struct drm_device *dev); > @@ -304,24 +304,24 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev); > int intel_init_blt_ring_buffer(struct drm_device *dev); > int intel_init_vebox_ring_buffer(struct drm_device *dev); > > -u64 intel_ring_get_active_head(struct intel_ring_buffer *ring); > -void intel_ring_setup_status_page(struct intel_ring_buffer *ring); > +u64 intel_ring_get_active_head(struct intel_engine *ring); > +void intel_ring_setup_status_page(struct intel_engine *ring); > > -void intel_destroy_ring_buffer(struct intel_ring_buffer *ring); > -int intel_allocate_ring_buffer(struct intel_ring_buffer *ring); > +void intel_destroy_ring_buffer(struct intel_engine *ring); > +int intel_allocate_ring_buffer(struct intel_engine *ring); > > -static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) > +static inline u32 intel_ring_get_tail(struct intel_engine *ring) > { > return ring->tail; > } > > -static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) > +static inline u32 intel_ring_get_seqno(struct intel_engine *ring) > { > BUG_ON(ring->outstanding_lazy_seqno == 0); > return ring->outstanding_lazy_seqno; > } > > -static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) > +static inline void i915_trace_irq_get(struct intel_engine *ring, u32 seqno) > { > if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) > ring->trace_irq_seqno = seqno; > -- > 1.9.0 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
On Tue, May 13, 2014 at 03:28:27PM +0200, Daniel Vetter wrote: > On Fri, May 09, 2014 at 01:08:36PM +0100, oscar.mateo@intel.com wrote: > > From: Oscar Mateo <oscar.mateo@intel.com> > > > > In the upcoming patches, we plan to break the correlation between > > engines (a.k.a. rings) and ringbuffers, so it makes sense to > > refactor the code and make the change obvious. > > > > No functional changes. > > > > Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> > > If we rename stuff I'd vote for something close to Bspec language, like > CS. So maybe intel_cs_engine? Also, can we have such patches (and the like of "drm/i915: for_each_ring") pushed early when everyone is happy with them, they cause constant rebasing pain. Thanks,
> -----Original Message----- > From: Lespiau, Damien > Sent: Wednesday, May 14, 2014 2:26 PM > To: Daniel Vetter > Cc: Mateo Lozano, Oscar; intel-gfx@lists.freedesktop.org > Subject: Re: [Intel-gfx] [PATCH 06/50] drm/i915: > s/intel_ring_buffer/intel_engine > > On Tue, May 13, 2014 at 03:28:27PM +0200, Daniel Vetter wrote: > > On Fri, May 09, 2014 at 01:08:36PM +0100, oscar.mateo@intel.com wrote: > > > From: Oscar Mateo <oscar.mateo@intel.com> > > > > > > In the upcoming patches, we plan to break the correlation between > > > engines (a.k.a. rings) and ringbuffers, so it makes sense to > > > refactor the code and make the change obvious. > > > > > > No functional changes. > > > > > > Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> > > > > If we rename stuff I'd vote for something close to Bspec language, > > like CS. So maybe intel_cs_engine? Bikeshedding much, are we? :) If we want to get closer to bspecish, intel_engine_cs would be better. > Also, can we have such patches (and the like of "drm/i915: > for_each_ring") pushed early when everyone is happy with them, they cause > constant rebasing pain. I second that motion!
On Thu, May 15, 2014 at 02:17:23PM +0000, Mateo Lozano, Oscar wrote: > > -----Original Message----- > > From: Lespiau, Damien > > Sent: Wednesday, May 14, 2014 2:26 PM > > To: Daniel Vetter > > Cc: Mateo Lozano, Oscar; intel-gfx@lists.freedesktop.org > > Subject: Re: [Intel-gfx] [PATCH 06/50] drm/i915: > > s/intel_ring_buffer/intel_engine > > > > On Tue, May 13, 2014 at 03:28:27PM +0200, Daniel Vetter wrote: > > > On Fri, May 09, 2014 at 01:08:36PM +0100, oscar.mateo@intel.com wrote: > > > > From: Oscar Mateo <oscar.mateo@intel.com> > > > > > > > > In the upcoming patches, we plan to break the correlation between > > > > engines (a.k.a. rings) and ringbuffers, so it makes sense to > > > > refactor the code and make the change obvious. > > > > > > > > No functional changes. > > > > > > > > Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> > > > > > > If we rename stuff I'd vote for something close to Bspec language, > > > like CS. So maybe intel_cs_engine? > > Bikeshedding much, are we? :) > If we want to get closer to bspecish, intel_engine_cs would be better. I'm ok with that too ;-) > > Also, can we have such patches (and the like of "drm/i915: > > for_each_ring") pushed early when everyone is happy with them, they cause > > constant rebasing pain. > > I second that motion! Fully agreed - as soon as we have a rough sketch of where we want to go to I'll pull in the rename. Aside I highly suggest to do the rename with coccinelle and regerate it on rebases - that's much less error-prone than doing it by hand. -Daniel
Hi Daniel, > -----Original Message----- > From: Daniel Vetter [mailto:daniel.vetter@ffwll.ch] On Behalf Of Daniel Vetter > Sent: Thursday, May 15, 2014 9:52 PM > To: Mateo Lozano, Oscar > Cc: Lespiau, Damien; Daniel Vetter; intel-gfx@lists.freedesktop.org > Subject: Re: [Intel-gfx] [PATCH 06/50] drm/i915: > s/intel_ring_buffer/intel_engine > > On Thu, May 15, 2014 at 02:17:23PM +0000, Mateo Lozano, Oscar wrote: > > > -----Original Message----- > > > From: Lespiau, Damien > > > Sent: Wednesday, May 14, 2014 2:26 PM > > > To: Daniel Vetter > > > Cc: Mateo Lozano, Oscar; intel-gfx@lists.freedesktop.org > > > Subject: Re: [Intel-gfx] [PATCH 06/50] drm/i915: > > > s/intel_ring_buffer/intel_engine > > > > > > On Tue, May 13, 2014 at 03:28:27PM +0200, Daniel Vetter wrote: > > > > On Fri, May 09, 2014 at 01:08:36PM +0100, oscar.mateo@intel.com > wrote: > > > > > From: Oscar Mateo <oscar.mateo@intel.com> > > > > > > > > > > In the upcoming patches, we plan to break the correlation > > > > > between engines (a.k.a. rings) and ringbuffers, so it makes > > > > > sense to refactor the code and make the change obvious. > > > > > > > > > > No functional changes. > > > > > > > > > > Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> > > > > > > > > If we rename stuff I'd vote for something close to Bspec language, > > > > like CS. So maybe intel_cs_engine? > > > > Bikeshedding much, are we? :) > > If we want to get closer to bspecish, intel_engine_cs would be better. > > I'm ok with that too ;-) > > > > Also, can we have such patches (and the like of "drm/i915: > > > for_each_ring") pushed early when everyone is happy with them, they > > > cause constant rebasing pain. > > > > I second that motion! > > Fully agreed - as soon as we have a rough sketch of where we want to go to I'll > pull in the rename. Aside I highly suggest to do the rename with coccinelle and > regerate it on rebases - that's much less error-prone than doing it by hand. > -Daniel I propose the following code refactoring at a minimum. Even if I abstract away all the "i915_gem_context.c" and "intel_ringbuffer.c" functionality, and part of "i915_gem_execbuffer.c", to keep changes to legacy code to a minimum, I still think the following changes are good for the overall code: 1) s/intel_ring_buffer/intel_engine_cs Straight renaming: if the actual ring buffers can live either inside the engine/ring (legacy ringbuffer submission) or inside the context (execlists), it doesn´t make sense that the engine/ring is called "intel_ring_buffer". 2) Split the ringbuffers and the rings New struct: +struct intel_ringbuffer { + struct drm_i915_gem_object *obj; + void __iomem *virtual_start; + + u32 head; + u32 tail; + int space; + int size; + int effective_size; + + /** We track the position of the requests in the ring buffer, and + * when each is retired we increment last_retired_head as the GPU + * must have finished processing the request and so we know we + * can advance the ringbuffer up to that position. + * + * last_retired_head is set to -1 after the value is consumed so + * we can detect new retirements. + */ + u32 last_retired_head; +}; And "struct intel_engine_cs" now groups all these elements into "buffer": - void __iomem *virtual_start; - struct drm_i915_gem_object *obj; - u32 head; - u32 tail; - int space; - int size; - int effective_size; - u32 last_retired_head; + struct intel_ringbuffer buffer; 3) Introduce one context backing object per engine - struct drm_i915_gem_object *obj; + struct { + struct drm_i915_gem_object *obj; + } engine[I915_NUM_RINGS]; Legacy code only ever uses engine[RCS], so I can use it everywhere in the existing code. If we agree on this minimum set, I´ll send the patches right away. Cheers, Oscar
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 69d34e4..3234d36 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -498,7 +498,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) return 0; } -static bool validate_cmds_sorted(struct intel_ring_buffer *ring) +static bool validate_cmds_sorted(struct intel_engine *ring) { int i; bool ret = true; @@ -550,7 +550,7 @@ static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count) return ret; } -static bool validate_regs_sorted(struct intel_ring_buffer *ring) +static bool validate_regs_sorted(struct intel_engine *ring) { return check_sorted(ring->id, ring->reg_table, ring->reg_count) && check_sorted(ring->id, ring->master_reg_table, @@ -562,10 +562,10 @@ static bool validate_regs_sorted(struct intel_ring_buffer *ring) * @ring: the ringbuffer to initialize * * Optionally initializes fields related to batch buffer command parsing in the - * struct intel_ring_buffer based on whether the platform requires software + * struct intel_engine based on whether the platform requires software * command parsing. */ -void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring) +void i915_cmd_parser_init_ring(struct intel_engine *ring) { if (!IS_GEN7(ring->dev)) return; @@ -664,7 +664,7 @@ find_cmd_in_table(const struct drm_i915_cmd_table *table, * ring's default length encoding and returns default_desc. */ static const struct drm_i915_cmd_descriptor* -find_cmd(struct intel_ring_buffer *ring, +find_cmd(struct intel_engine *ring, u32 cmd_header, struct drm_i915_cmd_descriptor *default_desc) { @@ -744,7 +744,7 @@ finish: * * Return: true if the ring requires software command parsing */ -bool i915_needs_cmd_parser(struct intel_ring_buffer *ring) +bool i915_needs_cmd_parser(struct intel_engine *ring) { struct drm_i915_private *dev_priv = ring->dev->dev_private; @@ -763,7 +763,7 @@ bool i915_needs_cmd_parser(struct intel_ring_buffer *ring) return (i915.enable_cmd_parser == 1); } -static bool check_cmd(const struct intel_ring_buffer *ring, +static bool check_cmd(const struct intel_engine *ring, const struct drm_i915_cmd_descriptor *desc, const u32 *cmd, const bool is_master, @@ -865,7 +865,7 @@ static bool check_cmd(const struct intel_ring_buffer *ring, * * Return: non-zero if the parser finds violations or otherwise fails */ -int i915_parse_cmds(struct intel_ring_buffer *ring, +int i915_parse_cmds(struct intel_engine *ring, struct drm_i915_gem_object *batch_obj, u32 batch_start_offset, bool is_master) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 103e62c..0052460 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -562,7 +562,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; struct drm_i915_gem_request *gem_request; int ret, count, i; @@ -594,7 +594,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data) } static void i915_ring_seqno_info(struct seq_file *m, - struct intel_ring_buffer *ring) + struct intel_engine *ring) { if (ring->get_seqno) { seq_printf(m, "Current sequence (%s): %u\n", @@ -607,7 +607,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; int ret, i; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -630,7 +630,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; int ret, i, pipe; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -800,7 +800,7 @@ static int i915_hws_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; const u32 *hws; int i; @@ -1677,7 +1677,7 @@ static int i915_context_status(struct seq_file *m, void *unused) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; struct i915_hw_context *ctx; int ret, i; @@ -1826,7 +1826,7 @@ static int per_file_ctx(int id, void *ptr, void *data) static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; int unused, i; @@ -1850,7 +1850,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; struct drm_file *file; int i; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index d02c8de..5263d63 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -119,7 +119,7 @@ static void i915_write_hws_pga(struct drm_device *dev) static void i915_free_hws(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = LP_RING(dev_priv); + struct intel_engine *ring = LP_RING(dev_priv); if (dev_priv->status_page_dmah) { drm_pci_free(dev, dev_priv->status_page_dmah); @@ -139,7 +139,7 @@ void i915_kernel_lost_context(struct drm_device * dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; - struct intel_ring_buffer *ring = LP_RING(dev_priv); + struct intel_engine *ring = LP_RING(dev_priv); /* * We should never lose context on the ring with modesetting @@ -234,7 +234,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) static int i915_dma_resume(struct drm_device * dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = LP_RING(dev_priv); + struct intel_engine *ring = LP_RING(dev_priv); DRM_DEBUG_DRIVER("%s\n", __func__); @@ -782,7 +782,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; int ret = 0; - struct intel_ring_buffer *ring = LP_RING(dev_priv); + struct intel_engine *ring = LP_RING(dev_priv); DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, READ_BREADCRUMB(dev_priv)); @@ -1073,7 +1073,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, { struct drm_i915_private *dev_priv = dev->dev_private; drm_i915_hws_addr_t *hws = data; - struct intel_ring_buffer *ring; + struct intel_engine *ring; if (drm_core_check_feature(dev, DRIVER_MODESET)) return -ENODEV; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b1725c6..3b7a36f9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -594,7 +594,7 @@ struct i915_hw_context { bool is_initialized; uint8_t remap_slice; struct drm_i915_file_private *file_priv; - struct intel_ring_buffer *last_ring; + struct intel_engine *last_ring; struct drm_i915_gem_object *obj; struct i915_ctx_hang_stats hang_stats; struct i915_address_space *vm; @@ -1354,7 +1354,7 @@ struct drm_i915_private { wait_queue_head_t gmbus_wait_queue; struct pci_dev *bridge_dev; - struct intel_ring_buffer ring[I915_NUM_RINGS]; + struct intel_engine ring[I915_NUM_RINGS]; uint32_t last_seqno, next_seqno; drm_dma_handle_t *status_page_dmah; @@ -1675,7 +1675,7 @@ struct drm_i915_gem_object { void *dma_buf_vmapping; int vmapping_count; - struct intel_ring_buffer *ring; + struct intel_engine *ring; /** Breadcrumb of last rendering to the buffer. */ uint32_t last_read_seqno; @@ -1714,7 +1714,7 @@ struct drm_i915_gem_object { */ struct drm_i915_gem_request { /** On Which ring this request was generated */ - struct intel_ring_buffer *ring; + struct intel_engine *ring; /** GEM sequence number associated with this request. */ uint32_t seqno; @@ -1755,7 +1755,7 @@ struct drm_i915_file_private { struct i915_hw_context *private_default_ctx; atomic_t rps_wait_boost; - struct intel_ring_buffer *bsd_ring; + struct intel_engine *bsd_ring; }; /* @@ -2182,9 +2182,9 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int i915_gem_object_sync(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *to); + struct intel_engine *to); void i915_vma_move_to_active(struct i915_vma *vma, - struct intel_ring_buffer *ring); + struct intel_engine *ring); int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); @@ -2226,7 +2226,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) } struct drm_i915_gem_request * -i915_gem_find_active_request(struct intel_ring_buffer *ring); +i915_gem_find_active_request(struct intel_engine *ring); bool i915_gem_retire_requests(struct drm_device *dev); int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, @@ -2264,18 +2264,18 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); int __must_check i915_gem_init(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); -int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice); +int i915_gem_l3_remap(struct intel_engine *ring, int slice); void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gem_suspend(struct drm_device *dev); -int __i915_add_request(struct intel_ring_buffer *ring, +int __i915_add_request(struct intel_engine *ring, struct drm_file *file, struct drm_i915_gem_object *batch_obj, u32 *seqno); #define i915_add_request(ring, seqno) \ __i915_add_request(ring, NULL, NULL, seqno) -int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, +int __must_check i915_wait_seqno(struct intel_engine *ring, uint32_t seqno); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int __must_check @@ -2286,7 +2286,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); int __must_check i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, - struct intel_ring_buffer *pipelined); + struct intel_engine *pipelined); void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); int i915_gem_attach_phys_object(struct drm_device *dev, struct drm_i915_gem_object *obj, @@ -2388,7 +2388,7 @@ void i915_gem_context_reset(struct drm_device *dev); int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); int i915_gem_context_enable(struct drm_i915_private *dev_priv); void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); -int i915_switch_context(struct intel_ring_buffer *ring, +int i915_switch_context(struct intel_engine *ring, struct i915_hw_context *to); struct i915_hw_context * i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); @@ -2497,9 +2497,9 @@ const char *i915_cache_level_str(int type); /* i915_cmd_parser.c */ int i915_cmd_parser_get_version(void); -void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring); -bool i915_needs_cmd_parser(struct intel_ring_buffer *ring); -int i915_parse_cmds(struct intel_ring_buffer *ring, +void i915_cmd_parser_init_ring(struct intel_engine *ring); +bool i915_needs_cmd_parser(struct intel_engine *ring); +int i915_parse_cmds(struct intel_engine *ring, struct drm_i915_gem_object *batch_obj, u32 batch_start_offset, bool is_master); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6ef53bd..a3b697b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -64,7 +64,7 @@ static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target); static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); -static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); +static void i915_gem_retire_requests_ring(struct intel_engine *ring); static bool cpu_cache_is_coherent(struct drm_device *dev, enum i915_cache_level level) @@ -977,7 +977,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error, * equal. */ static int -i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) +i915_gem_check_olr(struct intel_engine *ring, u32 seqno) { int ret; @@ -996,7 +996,7 @@ static void fake_irq(unsigned long data) } static bool missed_irq(struct drm_i915_private *dev_priv, - struct intel_ring_buffer *ring) + struct intel_engine *ring) { return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); } @@ -1027,7 +1027,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv) * Returns 0 if the seqno was found within the alloted time. Else returns the * errno with remaining time filled in timeout argument. */ -static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, +static int __wait_seqno(struct intel_engine *ring, u32 seqno, unsigned reset_counter, bool interruptible, struct timespec *timeout, @@ -1134,7 +1134,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, * request and object lists appropriately for that event. */ int -i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) +i915_wait_seqno(struct intel_engine *ring, uint32_t seqno) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1159,7 +1159,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) static int i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring) + struct intel_engine *ring) { if (!obj->active) return 0; @@ -1184,7 +1184,7 @@ static __must_check int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, bool readonly) { - struct intel_ring_buffer *ring = obj->ring; + struct intel_engine *ring = obj->ring; u32 seqno; int ret; @@ -1209,7 +1209,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = obj->ring; + struct intel_engine *ring = obj->ring; unsigned reset_counter; u32 seqno; int ret; @@ -2011,7 +2011,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) static void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring) + struct intel_engine *ring) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -2049,7 +2049,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, } void i915_vma_move_to_active(struct i915_vma *vma, - struct intel_ring_buffer *ring) + struct intel_engine *ring) { list_move_tail(&vma->mm_list, &vma->vm->active_list); return i915_gem_object_move_to_active(vma->obj, ring); @@ -2090,7 +2090,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) static void i915_gem_object_retire(struct drm_i915_gem_object *obj) { - struct intel_ring_buffer *ring = obj->ring; + struct intel_engine *ring = obj->ring; if (ring == NULL) return; @@ -2104,7 +2104,7 @@ static int i915_gem_init_seqno(struct drm_device *dev, u32 seqno) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; int ret, i, j; /* Carefully retire all requests without writing to the rings */ @@ -2170,7 +2170,7 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) return 0; } -int __i915_add_request(struct intel_ring_buffer *ring, +int __i915_add_request(struct intel_engine *ring, struct drm_file *file, struct drm_i915_gem_object *obj, u32 *out_seqno) @@ -2330,7 +2330,7 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request) } struct drm_i915_gem_request * -i915_gem_find_active_request(struct intel_ring_buffer *ring) +i915_gem_find_active_request(struct intel_engine *ring) { struct drm_i915_gem_request *request; u32 completed_seqno; @@ -2348,7 +2348,7 @@ i915_gem_find_active_request(struct intel_ring_buffer *ring) } static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, - struct intel_ring_buffer *ring) + struct intel_engine *ring) { struct drm_i915_gem_request *request; bool ring_hung; @@ -2367,7 +2367,7 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, } static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, - struct intel_ring_buffer *ring) + struct intel_engine *ring) { while (!list_empty(&ring->active_list)) { struct drm_i915_gem_object *obj; @@ -2426,7 +2426,7 @@ void i915_gem_restore_fences(struct drm_device *dev) void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; int i; /* @@ -2449,7 +2449,7 @@ void i915_gem_reset(struct drm_device *dev) * This function clears the request list as sequence numbers are passed. */ static void -i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) +i915_gem_retire_requests_ring(struct intel_engine *ring) { uint32_t seqno; @@ -2512,7 +2512,7 @@ bool i915_gem_retire_requests(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; bool idle = true; int i; @@ -2606,7 +2606,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_wait *args = data; struct drm_i915_gem_object *obj; - struct intel_ring_buffer *ring = NULL; + struct intel_engine *ring = NULL; struct timespec timeout_stack, *timeout = NULL; unsigned reset_counter; u32 seqno = 0; @@ -2677,9 +2677,9 @@ out: */ int i915_gem_object_sync(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *to) + struct intel_engine *to) { - struct intel_ring_buffer *from = obj->ring; + struct intel_engine *from = obj->ring; u32 seqno; int ret, idx; @@ -2800,7 +2800,7 @@ int i915_vma_unbind(struct i915_vma *vma) int i915_gpu_idle(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; int ret, i; /* Flush everything onto the inactive list. */ @@ -3659,7 +3659,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj) int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, - struct intel_ring_buffer *pipelined) + struct intel_engine *pipelined) { u32 old_read_domains, old_write_domain; int ret; @@ -3812,7 +3812,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) struct drm_i915_file_private *file_priv = file->driver_priv; unsigned long recent_enough = jiffies - msecs_to_jiffies(20); struct drm_i915_gem_request *request; - struct intel_ring_buffer *ring = NULL; + struct intel_engine *ring = NULL; unsigned reset_counter; u32 seqno = 0; int ret; @@ -4258,7 +4258,7 @@ static void i915_gem_stop_ringbuffers(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; int i; for_each_active_ring(ring, dev_priv, i) @@ -4307,7 +4307,7 @@ err: return ret; } -int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice) +int i915_gem_l3_remap(struct intel_engine *ring, int slice) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -4532,7 +4532,7 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; int i; for_each_active_ring(ring, dev_priv, i) @@ -4608,7 +4608,7 @@ i915_gem_lastclose(struct drm_device *dev) } static void -init_ring_lists(struct intel_ring_buffer *ring) +init_ring_lists(struct intel_engine *ring) { INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 014fb8f..4d37e20 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -359,7 +359,7 @@ err_destroy: void i915_gem_context_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; int i; /* Prevent the hardware from restoring the last context (which hung) on @@ -392,7 +392,7 @@ int i915_gem_context_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct i915_hw_context *ctx; - struct intel_ring_buffer *ring; + struct intel_engine *ring; int unused; /* Init should only be called once per module load. Eventually the @@ -428,7 +428,7 @@ void i915_gem_context_fini(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context; - struct intel_ring_buffer *ring; + struct intel_engine *ring; int unused; if (dctx->obj) { @@ -467,7 +467,7 @@ void i915_gem_context_fini(struct drm_device *dev) int i915_gem_context_enable(struct drm_i915_private *dev_priv) { - struct intel_ring_buffer *ring; + struct intel_engine *ring; int ret, i; /* This is the only place the aliasing PPGTT gets enabled, which means @@ -546,7 +546,7 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) } static inline int -mi_set_context(struct intel_ring_buffer *ring, +mi_set_context(struct intel_engine *ring, struct i915_hw_context *new_context, u32 hw_flags) { @@ -596,7 +596,7 @@ mi_set_context(struct intel_ring_buffer *ring, return ret; } -static int do_switch(struct intel_ring_buffer *ring, +static int do_switch(struct intel_engine *ring, struct i915_hw_context *to) { struct drm_i915_private *dev_priv = ring->dev->dev_private; @@ -726,7 +726,7 @@ unpin_out: * it will have a refoucnt > 1. This allows us to destroy the context abstract * object while letting the normal object tracking destroy the backing BO. */ -int i915_switch_context(struct intel_ring_buffer *ring, +int i915_switch_context(struct intel_engine *ring, struct i915_hw_context *to) { struct drm_i915_private *dev_priv = ring->dev->dev_private; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 47fe8ec..95e797e 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -541,7 +541,7 @@ need_reloc_mappable(struct i915_vma *vma) static int i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, - struct intel_ring_buffer *ring, + struct intel_engine *ring, bool *need_reloc) { struct drm_i915_gem_object *obj = vma->obj; @@ -596,7 +596,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, } static int -i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, +i915_gem_execbuffer_reserve(struct intel_engine *ring, struct list_head *vmas, bool *need_relocs) { @@ -711,7 +711,7 @@ static int i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_i915_gem_execbuffer2 *args, struct drm_file *file, - struct intel_ring_buffer *ring, + struct intel_engine *ring, struct eb_vmas *eb, struct drm_i915_gem_exec_object2 *exec) { @@ -827,7 +827,7 @@ err: } static int -i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, +i915_gem_execbuffer_move_to_gpu(struct intel_engine *ring, struct list_head *vmas) { struct i915_vma *vma; @@ -912,7 +912,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, static struct i915_hw_context * i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, - struct intel_ring_buffer *ring, const u32 ctx_id) + struct intel_engine *ring, const u32 ctx_id) { struct i915_hw_context *ctx = NULL; struct i915_ctx_hang_stats *hs; @@ -935,7 +935,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, static void i915_gem_execbuffer_move_to_active(struct list_head *vmas, - struct intel_ring_buffer *ring) + struct intel_engine *ring) { struct i915_vma *vma; @@ -970,7 +970,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, static void i915_gem_execbuffer_retire_commands(struct drm_device *dev, struct drm_file *file, - struct intel_ring_buffer *ring, + struct intel_engine *ring, struct drm_i915_gem_object *obj) { /* Unconditionally force add_request to emit a full flush. */ @@ -982,7 +982,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, static int i915_reset_gen7_sol_offsets(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_engine *ring) { struct drm_i915_private *dev_priv = dev->dev_private; int ret, i; @@ -1048,7 +1048,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct eb_vmas *eb; struct drm_i915_gem_object *batch_obj; struct drm_clip_rect *cliprects = NULL; - struct intel_ring_buffer *ring; + struct intel_engine *ring; struct i915_hw_context *ctx; struct i915_address_space *vm; const u32 ctx_id = i915_execbuffer2_get_context_id(*args); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1dff805..31b58ee 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -207,7 +207,7 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, } /* Broadwell Page Directory Pointer Descriptors */ -static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry, +static int gen8_write_pdp(struct intel_engine *ring, unsigned entry, uint64_t val, bool synchronous) { struct drm_i915_private *dev_priv = ring->dev->dev_private; @@ -237,7 +237,7 @@ static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry, } static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, - struct intel_ring_buffer *ring, + struct intel_engine *ring, bool synchronous) { int i, ret; @@ -716,7 +716,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) } static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, - struct intel_ring_buffer *ring, + struct intel_engine *ring, bool synchronous) { struct drm_device *dev = ppgtt->base.dev; @@ -760,7 +760,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, } static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, - struct intel_ring_buffer *ring, + struct intel_engine *ring, bool synchronous) { struct drm_device *dev = ppgtt->base.dev; @@ -811,7 +811,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, } static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, - struct intel_ring_buffer *ring, + struct intel_engine *ring, bool synchronous) { struct drm_device *dev = ppgtt->base.dev; @@ -832,7 +832,7 @@ static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) { struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; int j, ret; for_each_active_ring(ring, dev_priv, j) { @@ -862,7 +862,7 @@ static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) { struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; uint32_t ecochk, ecobits; int i; @@ -901,7 +901,7 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) { struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; uint32_t ecochk, gab_ctl, ecobits; int i; @@ -1269,7 +1269,7 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) void i915_check_and_clear_faults(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; int i; if (INTEL_INFO(dev)->gen < 6) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index cfca023..0775662 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -261,7 +261,7 @@ struct i915_hw_ppgtt { int (*enable)(struct i915_hw_ppgtt *ppgtt); int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, - struct intel_ring_buffer *ring, + struct intel_engine *ring, bool synchronous); void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); }; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 8f37238..0853db3 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -745,7 +745,7 @@ static void i915_gem_record_fences(struct drm_device *dev, } static void i915_record_ring_state(struct drm_device *dev, - struct intel_ring_buffer *ring, + struct intel_engine *ring, struct drm_i915_error_ring *ering) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -857,7 +857,7 @@ static void i915_record_ring_state(struct drm_device *dev, } -static void i915_gem_record_active_context(struct intel_ring_buffer *ring, +static void i915_gem_record_active_context(struct intel_engine *ring, struct drm_i915_error_state *error, struct drm_i915_error_ring *ering) { @@ -884,7 +884,7 @@ static void i915_gem_record_rings(struct drm_device *dev, int i, count; for (i = 0; i < I915_NUM_RINGS; i++) { - struct intel_ring_buffer *ring = &dev_priv->ring[i]; + struct intel_engine *ring = &dev_priv->ring[i]; if (!intel_ring_initialized(ring)) continue; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4a8e8cb..58c8812 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1077,7 +1077,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev) } static void notify_ring(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_engine *ring) { if (ring->obj == NULL) return; @@ -2111,7 +2111,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) static void i915_error_wake_up(struct drm_i915_private *dev_priv, bool reset_completed) { - struct intel_ring_buffer *ring; + struct intel_engine *ring; int i; /* @@ -2544,14 +2544,14 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe) } static u32 -ring_last_seqno(struct intel_ring_buffer *ring) +ring_last_seqno(struct intel_engine *ring) { return list_entry(ring->request_list.prev, struct drm_i915_gem_request, list)->seqno; } static bool -ring_idle(struct intel_ring_buffer *ring, u32 seqno) +ring_idle(struct intel_engine *ring, u32 seqno) { return (list_empty(&ring->request_list) || i915_seqno_passed(seqno, ring_last_seqno(ring))); @@ -2574,11 +2574,11 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) } } -static struct intel_ring_buffer * -semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr) +static struct intel_engine * +semaphore_wait_to_signaller_ring(struct intel_engine *ring, u32 ipehr) { struct drm_i915_private *dev_priv = ring->dev->dev_private; - struct intel_ring_buffer *signaller; + struct intel_engine *signaller; int i; if (INTEL_INFO(dev_priv->dev)->gen >= 8) { @@ -2606,8 +2606,8 @@ semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr) return NULL; } -static struct intel_ring_buffer * -semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) +static struct intel_engine * +semaphore_waits_for(struct intel_engine *ring, u32 *seqno) { struct drm_i915_private *dev_priv = ring->dev->dev_private; u32 cmd, ipehr, head; @@ -2649,10 +2649,10 @@ semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) return semaphore_wait_to_signaller_ring(ring, ipehr); } -static int semaphore_passed(struct intel_ring_buffer *ring) +static int semaphore_passed(struct intel_engine *ring) { struct drm_i915_private *dev_priv = ring->dev->dev_private; - struct intel_ring_buffer *signaller; + struct intel_engine *signaller; u32 seqno, ctl; ring->hangcheck.deadlock = true; @@ -2671,7 +2671,7 @@ static int semaphore_passed(struct intel_ring_buffer *ring) static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) { - struct intel_ring_buffer *ring; + struct intel_engine *ring; int i; for_each_active_ring(ring, dev_priv, i) @@ -2679,7 +2679,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) } static enum intel_ring_hangcheck_action -ring_stuck(struct intel_ring_buffer *ring, u64 acthd) +ring_stuck(struct intel_engine *ring, u64 acthd) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -2735,7 +2735,7 @@ static void i915_hangcheck_elapsed(unsigned long data) { struct drm_device *dev = (struct drm_device *)data; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; int i; int busy_count = 0, rings_hung = 0; bool stuck[I915_NUM_RINGS] = { 0 }; diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index b29d7b1..a4f9e62 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -326,8 +326,8 @@ TRACE_EVENT(i915_gem_evict_vm, ); TRACE_EVENT(i915_gem_ring_sync_to, - TP_PROTO(struct intel_ring_buffer *from, - struct intel_ring_buffer *to, + TP_PROTO(struct intel_engine *from, + struct intel_engine *to, u32 seqno), TP_ARGS(from, to, seqno), @@ -352,7 +352,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, ); TRACE_EVENT(i915_gem_ring_dispatch, - TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags), + TP_PROTO(struct intel_engine *ring, u32 seqno, u32 flags), TP_ARGS(ring, seqno, flags), TP_STRUCT__entry( @@ -375,7 +375,7 @@ TRACE_EVENT(i915_gem_ring_dispatch, ); TRACE_EVENT(i915_gem_ring_flush, - TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush), + TP_PROTO(struct intel_engine *ring, u32 invalidate, u32 flush), TP_ARGS(ring, invalidate, flush), TP_STRUCT__entry( @@ -398,7 +398,7 @@ TRACE_EVENT(i915_gem_ring_flush, ); DECLARE_EVENT_CLASS(i915_gem_request, - TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_PROTO(struct intel_engine *ring, u32 seqno), TP_ARGS(ring, seqno), TP_STRUCT__entry( @@ -418,12 +418,12 @@ DECLARE_EVENT_CLASS(i915_gem_request, ); DEFINE_EVENT(i915_gem_request, i915_gem_request_add, - TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_PROTO(struct intel_engine *ring, u32 seqno), TP_ARGS(ring, seqno) ); TRACE_EVENT(i915_gem_request_complete, - TP_PROTO(struct intel_ring_buffer *ring), + TP_PROTO(struct intel_engine *ring), TP_ARGS(ring), TP_STRUCT__entry( @@ -443,12 +443,12 @@ TRACE_EVENT(i915_gem_request_complete, ); DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, - TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_PROTO(struct intel_engine *ring, u32 seqno), TP_ARGS(ring, seqno) ); TRACE_EVENT(i915_gem_request_wait_begin, - TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_PROTO(struct intel_engine *ring, u32 seqno), TP_ARGS(ring, seqno), TP_STRUCT__entry( @@ -477,12 +477,12 @@ TRACE_EVENT(i915_gem_request_wait_begin, ); DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, - TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_PROTO(struct intel_engine *ring, u32 seqno), TP_ARGS(ring, seqno) ); DECLARE_EVENT_CLASS(i915_ring, - TP_PROTO(struct intel_ring_buffer *ring), + TP_PROTO(struct intel_engine *ring), TP_ARGS(ring), TP_STRUCT__entry( @@ -499,12 +499,12 @@ DECLARE_EVENT_CLASS(i915_ring, ); DEFINE_EVENT(i915_ring, i915_ring_wait_begin, - TP_PROTO(struct intel_ring_buffer *ring), + TP_PROTO(struct intel_engine *ring), TP_ARGS(ring) ); DEFINE_EVENT(i915_ring, i915_ring_wait_end, - TP_PROTO(struct intel_ring_buffer *ring), + TP_PROTO(struct intel_engine *ring), TP_ARGS(ring) ); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c65e7f7..f821147 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1944,7 +1944,7 @@ static int intel_align_height(struct drm_device *dev, int height, bool tiled) int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined) + struct intel_engine *pipelined) { struct drm_i915_private *dev_priv = dev->dev_private; u32 alignment; @@ -8424,7 +8424,7 @@ out: } void intel_mark_fb_busy(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring) + struct intel_engine *ring) { struct drm_device *dev = obj->base.dev; struct drm_crtc *crtc; @@ -8582,7 +8582,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 flip_mask; - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + struct intel_engine *ring = &dev_priv->ring[RCS]; int ret; ret = intel_pin_and_fence_fb_obj(dev, obj, ring); @@ -8627,7 +8627,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 flip_mask; - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + struct intel_engine *ring = &dev_priv->ring[RCS]; int ret; ret = intel_pin_and_fence_fb_obj(dev, obj, ring); @@ -8669,7 +8669,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + struct intel_engine *ring = &dev_priv->ring[RCS]; int ret; ret = intel_pin_and_fence_fb_obj(dev, obj, ring); @@ -8717,7 +8717,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + struct intel_engine *ring = &dev_priv->ring[RCS]; uint32_t pf, pipesrc; int ret; @@ -8762,7 +8762,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_ring_buffer *ring; + struct intel_engine *ring; uint32_t plane_bit = 0; int len, ret; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index d8b540b..23b5abf 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -694,7 +694,7 @@ int intel_pch_rawclk(struct drm_device *dev); int valleyview_cur_cdclk(struct drm_i915_private *dev_priv); void intel_mark_busy(struct drm_device *dev); void intel_mark_fb_busy(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring); + struct intel_engine *ring); void intel_mark_idle(struct drm_device *dev); void intel_crtc_restore_mode(struct drm_crtc *crtc); void intel_crtc_update_dpms(struct drm_crtc *crtc); @@ -726,7 +726,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old); int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined); + struct intel_engine *pipelined); void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); struct drm_framebuffer * __intel_framebuffer_create(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d8adc91..965eec1 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -213,7 +213,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, { struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + struct intel_engine *ring = &dev_priv->ring[RCS]; int ret; BUG_ON(overlay->last_flip_req); @@ -236,7 +236,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + struct intel_engine *ring = &dev_priv->ring[RCS]; int ret; BUG_ON(overlay->active); @@ -263,7 +263,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, { struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + struct intel_engine *ring = &dev_priv->ring[RCS]; u32 flip_addr = overlay->flip_addr; u32 tmp; int ret; @@ -320,7 +320,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + struct intel_engine *ring = &dev_priv->ring[RCS]; u32 flip_addr = overlay->flip_addr; int ret; @@ -363,7 +363,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + struct intel_engine *ring = &dev_priv->ring[RCS]; int ret; if (overlay->last_flip_req == 0) @@ -389,7 +389,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + struct intel_engine *ring = &dev_priv->ring[RCS]; int ret; /* Only wait if there is actually an old frame to release to diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index acfded3..17f636e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3379,7 +3379,7 @@ static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_c static void gen8_enable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; uint32_t rc6_mask = 0, rp_state_cap; int unused; @@ -3454,7 +3454,7 @@ static void gen8_enable_rps(struct drm_device *dev) static void gen6_enable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; u32 rp_state_cap; u32 gt_perf_status; u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; @@ -3783,7 +3783,7 @@ static void valleyview_cleanup_gt_powersave(struct drm_device *dev) static void valleyview_enable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; + struct intel_engine *ring; u32 gtfifodbg, val, rc6_mode = 0; int i; @@ -3914,7 +3914,7 @@ static int ironlake_setup_rc6(struct drm_device *dev) static void ironlake_enable_rc6(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + struct intel_engine *ring = &dev_priv->ring[RCS]; bool was_interruptible; int ret; @@ -4426,7 +4426,7 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower); bool i915_gpu_busy(void) { struct drm_i915_private *dev_priv; - struct intel_ring_buffer *ring; + struct intel_engine *ring; bool ret = false; int i; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 5d61923..4c3cc44 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -40,7 +40,7 @@ */ #define CACHELINE_BYTES 64 -static inline int ring_space(struct intel_ring_buffer *ring) +static inline int ring_space(struct intel_engine *ring) { int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); if (space < 0) @@ -48,13 +48,13 @@ static inline int ring_space(struct intel_ring_buffer *ring) return space; } -static bool intel_ring_stopped(struct intel_ring_buffer *ring) +static bool intel_ring_stopped(struct intel_engine *ring) { struct drm_i915_private *dev_priv = ring->dev->dev_private; return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); } -void __intel_ring_advance(struct intel_ring_buffer *ring) +void __intel_ring_advance(struct intel_engine *ring) { ring->tail &= ring->size - 1; if (intel_ring_stopped(ring)) @@ -63,7 +63,7 @@ void __intel_ring_advance(struct intel_ring_buffer *ring) } static int -gen2_render_ring_flush(struct intel_ring_buffer *ring, +gen2_render_ring_flush(struct intel_engine *ring, u32 invalidate_domains, u32 flush_domains) { @@ -89,7 +89,7 @@ gen2_render_ring_flush(struct intel_ring_buffer *ring, } static int -gen4_render_ring_flush(struct intel_ring_buffer *ring, +gen4_render_ring_flush(struct intel_engine *ring, u32 invalidate_domains, u32 flush_domains) { @@ -184,7 +184,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring, * really our business. That leaves only stall at scoreboard. */ static int -intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) +intel_emit_post_sync_nonzero_flush(struct intel_engine *ring) { u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; @@ -219,7 +219,7 @@ intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) } static int -gen6_render_ring_flush(struct intel_ring_buffer *ring, +gen6_render_ring_flush(struct intel_engine *ring, u32 invalidate_domains, u32 flush_domains) { u32 flags = 0; @@ -271,7 +271,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, } static int -gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) +gen7_render_ring_cs_stall_wa(struct intel_engine *ring) { int ret; @@ -289,7 +289,7 @@ gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) return 0; } -static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value) +static int gen7_ring_fbc_flush(struct intel_engine *ring, u32 value) { int ret; @@ -313,7 +313,7 @@ static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value) } static int -gen7_render_ring_flush(struct intel_ring_buffer *ring, +gen7_render_ring_flush(struct intel_engine *ring, u32 invalidate_domains, u32 flush_domains) { u32 flags = 0; @@ -374,7 +374,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring, } static int -gen8_render_ring_flush(struct intel_ring_buffer *ring, +gen8_render_ring_flush(struct intel_engine *ring, u32 invalidate_domains, u32 flush_domains) { u32 flags = 0; @@ -414,14 +414,14 @@ gen8_render_ring_flush(struct intel_ring_buffer *ring, } -static void ring_write_tail(struct intel_ring_buffer *ring, +static void ring_write_tail(struct intel_engine *ring, u32 value) { struct drm_i915_private *dev_priv = ring->dev->dev_private; I915_WRITE_TAIL(ring, value); } -u64 intel_ring_get_active_head(struct intel_ring_buffer *ring) +u64 intel_ring_get_active_head(struct intel_engine *ring) { struct drm_i915_private *dev_priv = ring->dev->dev_private; u64 acthd; @@ -437,7 +437,7 @@ u64 intel_ring_get_active_head(struct intel_ring_buffer *ring) return acthd; } -static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) +static void ring_setup_phys_status_page(struct intel_engine *ring) { struct drm_i915_private *dev_priv = ring->dev->dev_private; u32 addr; @@ -448,7 +448,7 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) I915_WRITE(HWS_PGA, addr); } -static bool stop_ring(struct intel_ring_buffer *ring) +static bool stop_ring(struct intel_engine *ring) { struct drm_i915_private *dev_priv = to_i915(ring->dev); @@ -472,7 +472,7 @@ static bool stop_ring(struct intel_ring_buffer *ring) return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; } -static int init_ring_common(struct intel_ring_buffer *ring) +static int init_ring_common(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -550,7 +550,7 @@ out: } static int -init_pipe_control(struct intel_ring_buffer *ring) +init_pipe_control(struct intel_engine *ring) { int ret; @@ -591,7 +591,7 @@ err: return ret; } -static int init_render_ring(struct intel_ring_buffer *ring) +static int init_render_ring(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -647,7 +647,7 @@ static int init_render_ring(struct intel_ring_buffer *ring) return ret; } -static void render_ring_cleanup(struct intel_ring_buffer *ring) +static void render_ring_cleanup(struct intel_engine *ring) { struct drm_device *dev = ring->dev; @@ -663,12 +663,12 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) ring->scratch.obj = NULL; } -static int gen6_signal(struct intel_ring_buffer *signaller, +static int gen6_signal(struct intel_engine *signaller, unsigned int num_dwords) { struct drm_device *dev = signaller->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *useless; + struct intel_engine *useless; int i, ret; /* NB: In order to be able to do semaphore MBOX updates for varying @@ -713,7 +713,7 @@ static int gen6_signal(struct intel_ring_buffer *signaller, * This acts like a signal in the canonical semaphore. */ static int -gen6_add_request(struct intel_ring_buffer *ring) +gen6_add_request(struct intel_engine *ring) { int ret; @@ -745,8 +745,8 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, * @seqno - seqno which the waiter will block on */ static int -gen6_ring_sync(struct intel_ring_buffer *waiter, - struct intel_ring_buffer *signaller, +gen6_ring_sync(struct intel_engine *waiter, + struct intel_engine *signaller, u32 seqno) { u32 dw1 = MI_SEMAPHORE_MBOX | @@ -794,7 +794,7 @@ do { \ } while (0) static int -pc_render_add_request(struct intel_ring_buffer *ring) +pc_render_add_request(struct intel_engine *ring) { u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; @@ -842,7 +842,7 @@ pc_render_add_request(struct intel_ring_buffer *ring) } static u32 -gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) +gen6_ring_get_seqno(struct intel_engine *ring, bool lazy_coherency) { /* Workaround to force correct ordering between irq and seqno writes on * ivb (and maybe also on snb) by reading from a CS register (like @@ -856,31 +856,31 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) } static u32 -ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) +ring_get_seqno(struct intel_engine *ring, bool lazy_coherency) { return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } static void -ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno) +ring_set_seqno(struct intel_engine *ring, u32 seqno) { intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); } static u32 -pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) +pc_render_get_seqno(struct intel_engine *ring, bool lazy_coherency) { return ring->scratch.cpu_page[0]; } static void -pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) +pc_render_set_seqno(struct intel_engine *ring, u32 seqno) { ring->scratch.cpu_page[0] = seqno; } static bool -gen5_ring_get_irq(struct intel_ring_buffer *ring) +gen5_ring_get_irq(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -898,7 +898,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring) } static void -gen5_ring_put_irq(struct intel_ring_buffer *ring) +gen5_ring_put_irq(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -911,7 +911,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring) } static bool -i9xx_ring_get_irq(struct intel_ring_buffer *ring) +i9xx_ring_get_irq(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -932,7 +932,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring) } static void -i9xx_ring_put_irq(struct intel_ring_buffer *ring) +i9xx_ring_put_irq(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -948,7 +948,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring) } static bool -i8xx_ring_get_irq(struct intel_ring_buffer *ring) +i8xx_ring_get_irq(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -969,7 +969,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring) } static void -i8xx_ring_put_irq(struct intel_ring_buffer *ring) +i8xx_ring_put_irq(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -984,7 +984,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring) spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } -void intel_ring_setup_status_page(struct intel_ring_buffer *ring) +void intel_ring_setup_status_page(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = ring->dev->dev_private; @@ -1047,7 +1047,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) } static int -bsd_ring_flush(struct intel_ring_buffer *ring, +bsd_ring_flush(struct intel_engine *ring, u32 invalidate_domains, u32 flush_domains) { @@ -1064,7 +1064,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring, } static int -i9xx_add_request(struct intel_ring_buffer *ring) +i9xx_add_request(struct intel_engine *ring) { int ret; @@ -1082,7 +1082,7 @@ i9xx_add_request(struct intel_ring_buffer *ring) } static bool -gen6_ring_get_irq(struct intel_ring_buffer *ring) +gen6_ring_get_irq(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1107,7 +1107,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) } static void -gen6_ring_put_irq(struct intel_ring_buffer *ring) +gen6_ring_put_irq(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1125,7 +1125,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) } static bool -hsw_vebox_get_irq(struct intel_ring_buffer *ring) +hsw_vebox_get_irq(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1145,7 +1145,7 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring) } static void -hsw_vebox_put_irq(struct intel_ring_buffer *ring) +hsw_vebox_put_irq(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1163,7 +1163,7 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring) } static bool -gen8_ring_get_irq(struct intel_ring_buffer *ring) +gen8_ring_get_irq(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1189,7 +1189,7 @@ gen8_ring_get_irq(struct intel_ring_buffer *ring) } static void -gen8_ring_put_irq(struct intel_ring_buffer *ring) +gen8_ring_put_irq(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1209,7 +1209,7 @@ gen8_ring_put_irq(struct intel_ring_buffer *ring) } static int -i965_dispatch_execbuffer(struct intel_ring_buffer *ring, +i965_dispatch_execbuffer(struct intel_engine *ring, u64 offset, u32 length, unsigned flags) { @@ -1232,7 +1232,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ #define I830_BATCH_LIMIT (256*1024) static int -i830_dispatch_execbuffer(struct intel_ring_buffer *ring, +i830_dispatch_execbuffer(struct intel_engine *ring, u64 offset, u32 len, unsigned flags) { @@ -1283,7 +1283,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, } static int -i915_dispatch_execbuffer(struct intel_ring_buffer *ring, +i915_dispatch_execbuffer(struct intel_engine *ring, u64 offset, u32 len, unsigned flags) { @@ -1300,7 +1300,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring, return 0; } -static void cleanup_status_page(struct intel_ring_buffer *ring) +static void cleanup_status_page(struct intel_engine *ring) { struct drm_i915_gem_object *obj; @@ -1314,7 +1314,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring) ring->status_page.obj = NULL; } -static int init_status_page(struct intel_ring_buffer *ring) +static int init_status_page(struct intel_engine *ring) { struct drm_i915_gem_object *obj; @@ -1351,7 +1351,7 @@ err_unref: return 0; } -static int init_phys_status_page(struct intel_ring_buffer *ring) +static int init_phys_status_page(struct intel_engine *ring) { struct drm_i915_private *dev_priv = ring->dev->dev_private; @@ -1368,7 +1368,7 @@ static int init_phys_status_page(struct intel_ring_buffer *ring) return 0; } -void intel_destroy_ring_buffer(struct intel_ring_buffer *ring) +void intel_destroy_ring_buffer(struct intel_engine *ring) { if (!ring->obj) return; @@ -1379,7 +1379,7 @@ void intel_destroy_ring_buffer(struct intel_ring_buffer *ring) ring->obj = NULL; } -int intel_allocate_ring_buffer(struct intel_ring_buffer *ring) +int intel_allocate_ring_buffer(struct intel_engine *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -1424,7 +1424,7 @@ err_unref: } static int intel_init_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_engine *ring) { int ret; @@ -1465,7 +1465,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, return ring->init(ring); } -void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) +void intel_cleanup_ring_buffer(struct intel_engine *ring) { struct drm_i915_private *dev_priv = to_i915(ring->dev); @@ -1485,7 +1485,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) cleanup_status_page(ring); } -static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) +static int intel_ring_wait_request(struct intel_engine *ring, int n) { struct drm_i915_gem_request *request; u32 seqno = 0, tail; @@ -1538,7 +1538,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) return 0; } -static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) +static int ring_wait_for_space(struct intel_engine *ring, int n) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1586,7 +1586,7 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) return -EBUSY; } -static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) +static int intel_wrap_ring_buffer(struct intel_engine *ring) { uint32_t __iomem *virt; int rem = ring->size - ring->tail; @@ -1608,7 +1608,7 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) return 0; } -int intel_ring_idle(struct intel_ring_buffer *ring) +int intel_ring_idle(struct intel_engine *ring) { u32 seqno; int ret; @@ -1632,7 +1632,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring) } static int -intel_ring_alloc_seqno(struct intel_ring_buffer *ring) +intel_ring_alloc_seqno(struct intel_engine *ring) { if (ring->outstanding_lazy_seqno) return 0; @@ -1650,7 +1650,7 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring) return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); } -static int __intel_ring_prepare(struct intel_ring_buffer *ring, +static int __intel_ring_prepare(struct intel_engine *ring, int bytes) { int ret; @@ -1670,7 +1670,7 @@ static int __intel_ring_prepare(struct intel_ring_buffer *ring, return 0; } -int intel_ring_begin(struct intel_ring_buffer *ring, +int intel_ring_begin(struct intel_engine *ring, int num_dwords) { struct drm_i915_private *dev_priv = ring->dev->dev_private; @@ -1695,7 +1695,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring, } /* Align the ring tail to a cacheline boundary */ -int intel_ring_cacheline_align(struct intel_ring_buffer *ring) +int intel_ring_cacheline_align(struct intel_engine *ring) { int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); int ret; @@ -1716,7 +1716,7 @@ int intel_ring_cacheline_align(struct intel_ring_buffer *ring) return 0; } -void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) +void intel_ring_init_seqno(struct intel_engine *ring, u32 seqno) { struct drm_i915_private *dev_priv = ring->dev->dev_private; @@ -1733,7 +1733,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) ring->hangcheck.seqno = seqno; } -static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, +static void gen6_bsd_ring_write_tail(struct intel_engine *ring, u32 value) { struct drm_i915_private *dev_priv = ring->dev->dev_private; @@ -1766,7 +1766,7 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); } -static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, +static int gen6_bsd_ring_flush(struct intel_engine *ring, u32 invalidate, u32 flush) { uint32_t cmd; @@ -1802,7 +1802,7 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, } static int -gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, +gen8_ring_dispatch_execbuffer(struct intel_engine *ring, u64 offset, u32 len, unsigned flags) { @@ -1826,7 +1826,7 @@ gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, } static int -hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, +hsw_ring_dispatch_execbuffer(struct intel_engine *ring, u64 offset, u32 len, unsigned flags) { @@ -1847,7 +1847,7 @@ hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, } static int -gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, +gen6_ring_dispatch_execbuffer(struct intel_engine *ring, u64 offset, u32 len, unsigned flags) { @@ -1869,7 +1869,7 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, /* Blitter support (SandyBridge+) */ -static int gen6_ring_flush(struct intel_ring_buffer *ring, +static int gen6_ring_flush(struct intel_engine *ring, u32 invalidate, u32 flush) { struct drm_device *dev = ring->dev; @@ -1912,7 +1912,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, int intel_init_render_ring_buffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + struct intel_engine *ring = &dev_priv->ring[RCS]; if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; @@ -2018,7 +2018,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + struct intel_engine *ring = &dev_priv->ring[RCS]; int ret; if (INTEL_INFO(dev)->gen >= 6) { @@ -2081,7 +2081,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) int intel_init_bsd_ring_buffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; + struct intel_engine *ring = &dev_priv->ring[VCS]; ring->write_tail = ring_write_tail; if (INTEL_INFO(dev)->gen >= 6) { @@ -2152,7 +2152,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) int intel_init_bsd2_ring_buffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = &dev_priv->ring[VCS2]; + struct intel_engine *ring = &dev_priv->ring[VCS2]; if ((INTEL_INFO(dev)->gen != 8)) { DRM_ERROR("No dual-BSD ring on non-BDW machine\n"); @@ -2196,7 +2196,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) int intel_init_blt_ring_buffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; + struct intel_engine *ring = &dev_priv->ring[BCS]; ring->write_tail = ring_write_tail; ring->flush = gen6_ring_flush; @@ -2241,7 +2241,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) int intel_init_vebox_ring_buffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = &dev_priv->ring[VECS]; + struct intel_engine *ring = &dev_priv->ring[VECS]; ring->write_tail = ring_write_tail; ring->flush = gen6_ring_flush; @@ -2279,7 +2279,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) } int -intel_ring_flush_all_caches(struct intel_ring_buffer *ring) +intel_ring_flush_all_caches(struct intel_engine *ring) { int ret; @@ -2297,7 +2297,7 @@ intel_ring_flush_all_caches(struct intel_ring_buffer *ring) } int -intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) +intel_ring_invalidate_all_caches(struct intel_engine *ring) { uint32_t flush_domains; int ret; @@ -2317,7 +2317,7 @@ intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) } void -intel_stop_ring_buffer(struct intel_ring_buffer *ring) +intel_stop_ring_buffer(struct intel_engine *ring) { int ret; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 680e451..50cc525 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -54,7 +54,7 @@ struct intel_ring_hangcheck { bool deadlock; }; -struct intel_ring_buffer { +struct intel_engine { const char *name; enum intel_ring_id { RCS = 0x0, @@ -90,33 +90,33 @@ struct intel_ring_buffer { unsigned irq_refcount; /* protected by dev_priv->irq_lock */ u32 irq_enable_mask; /* bitmask to enable ring interrupt */ u32 trace_irq_seqno; - bool __must_check (*irq_get)(struct intel_ring_buffer *ring); - void (*irq_put)(struct intel_ring_buffer *ring); + bool __must_check (*irq_get)(struct intel_engine *ring); + void (*irq_put)(struct intel_engine *ring); - int (*init)(struct intel_ring_buffer *ring); + int (*init)(struct intel_engine *ring); - void (*write_tail)(struct intel_ring_buffer *ring, + void (*write_tail)(struct intel_engine *ring, u32 value); - int __must_check (*flush)(struct intel_ring_buffer *ring, + int __must_check (*flush)(struct intel_engine *ring, u32 invalidate_domains, u32 flush_domains); - int (*add_request)(struct intel_ring_buffer *ring); + int (*add_request)(struct intel_engine *ring); /* Some chipsets are not quite as coherent as advertised and need * an expensive kick to force a true read of the up-to-date seqno. * However, the up-to-date seqno is not always required and the last * seen value is good enough. Note that the seqno will always be * monotonic, even if not coherent. */ - u32 (*get_seqno)(struct intel_ring_buffer *ring, + u32 (*get_seqno)(struct intel_engine *ring, bool lazy_coherency); - void (*set_seqno)(struct intel_ring_buffer *ring, + void (*set_seqno)(struct intel_engine *ring, u32 seqno); - int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, + int (*dispatch_execbuffer)(struct intel_engine *ring, u64 offset, u32 length, unsigned flags); #define I915_DISPATCH_SECURE 0x1 #define I915_DISPATCH_PINNED 0x2 - void (*cleanup)(struct intel_ring_buffer *ring); + void (*cleanup)(struct intel_engine *ring); struct { u32 sync_seqno[I915_NUM_RINGS-1]; @@ -129,10 +129,10 @@ struct intel_ring_buffer { } mbox; /* AKA wait() */ - int (*sync_to)(struct intel_ring_buffer *ring, - struct intel_ring_buffer *to, + int (*sync_to)(struct intel_engine *ring, + struct intel_engine *to, u32 seqno); - int (*signal)(struct intel_ring_buffer *signaller, + int (*signal)(struct intel_engine *signaller, /* num_dwords needed by caller */ unsigned int num_dwords); } semaphore; @@ -210,20 +210,20 @@ struct intel_ring_buffer { }; static inline bool -intel_ring_initialized(struct intel_ring_buffer *ring) +intel_ring_initialized(struct intel_engine *ring) { return ring->obj != NULL; } static inline unsigned -intel_ring_flag(struct intel_ring_buffer *ring) +intel_ring_flag(struct intel_engine *ring) { return 1 << ring->id; } static inline u32 -intel_ring_sync_index(struct intel_ring_buffer *ring, - struct intel_ring_buffer *other) +intel_ring_sync_index(struct intel_engine *ring, + struct intel_engine *other) { int idx; @@ -241,7 +241,7 @@ intel_ring_sync_index(struct intel_ring_buffer *ring, } static inline u32 -intel_read_status_page(struct intel_ring_buffer *ring, +intel_read_status_page(struct intel_engine *ring, int reg) { /* Ensure that the compiler doesn't optimize away the load. */ @@ -250,7 +250,7 @@ intel_read_status_page(struct intel_ring_buffer *ring, } static inline void -intel_write_status_page(struct intel_ring_buffer *ring, +intel_write_status_page(struct intel_engine *ring, int reg, u32 value) { ring->status_page.page_addr[reg] = value; @@ -275,27 +275,27 @@ intel_write_status_page(struct intel_ring_buffer *ring, #define I915_GEM_HWS_SCRATCH_INDEX 0x30 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) -void intel_stop_ring_buffer(struct intel_ring_buffer *ring); -void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); +void intel_stop_ring_buffer(struct intel_engine *ring); +void intel_cleanup_ring_buffer(struct intel_engine *ring); -int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); -int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring); -static inline void intel_ring_emit(struct intel_ring_buffer *ring, +int __must_check intel_ring_begin(struct intel_engine *ring, int n); +int __must_check intel_ring_cacheline_align(struct intel_engine *ring); +static inline void intel_ring_emit(struct intel_engine *ring, u32 data) { iowrite32(data, ring->virtual_start + ring->tail); ring->tail += 4; } -static inline void intel_ring_advance(struct intel_ring_buffer *ring) +static inline void intel_ring_advance(struct intel_engine *ring) { ring->tail &= ring->size - 1; } -void __intel_ring_advance(struct intel_ring_buffer *ring); +void __intel_ring_advance(struct intel_engine *ring); -int __must_check intel_ring_idle(struct intel_ring_buffer *ring); -void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno); -int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); -int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); +int __must_check intel_ring_idle(struct intel_engine *ring); +void intel_ring_init_seqno(struct intel_engine *ring, u32 seqno); +int intel_ring_flush_all_caches(struct intel_engine *ring); +int intel_ring_invalidate_all_caches(struct intel_engine *ring); void intel_init_rings_early(struct drm_device *dev); int intel_init_render_ring_buffer(struct drm_device *dev); @@ -304,24 +304,24 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev); int intel_init_blt_ring_buffer(struct drm_device *dev); int intel_init_vebox_ring_buffer(struct drm_device *dev); -u64 intel_ring_get_active_head(struct intel_ring_buffer *ring); -void intel_ring_setup_status_page(struct intel_ring_buffer *ring); +u64 intel_ring_get_active_head(struct intel_engine *ring); +void intel_ring_setup_status_page(struct intel_engine *ring); -void intel_destroy_ring_buffer(struct intel_ring_buffer *ring); -int intel_allocate_ring_buffer(struct intel_ring_buffer *ring); +void intel_destroy_ring_buffer(struct intel_engine *ring); +int intel_allocate_ring_buffer(struct intel_engine *ring); -static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) +static inline u32 intel_ring_get_tail(struct intel_engine *ring) { return ring->tail; } -static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) +static inline u32 intel_ring_get_seqno(struct intel_engine *ring) { BUG_ON(ring->outstanding_lazy_seqno == 0); return ring->outstanding_lazy_seqno; } -static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) +static inline void i915_trace_irq_get(struct intel_engine *ring, u32 seqno) { if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) ring->trace_irq_seqno = seqno;