@@ -43,6 +43,7 @@
#define ACTIVE_LIST 1
#define FLUSHING_LIST 2
#define INACTIVE_LIST 3
+#define BLIT_LIST 4
static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
{
@@ -80,6 +81,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
lock = &dev_priv->mm.active_list_lock;
head = &dev_priv->render_ring.active_list;
break;
+ case BLIT_LIST:
+ seq_printf(m, "BLIT:\n");
+ head = &dev_priv->blit_ring.active_list;
+ break;
case INACTIVE_LIST:
seq_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list;
@@ -242,6 +247,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(GTIIR));
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
+ if (HAS_BLIT_SPLIT(dev)) {
+ seq_printf(m, "Blitter Interrupt mask: %08x\n",
+ I915_READ(GEN6_BLITTER_IMR));
+ }
}
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
@@ -826,6 +835,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+ {"i915_gem_blit_active", i915_gem_object_list_info, 0, (void *) BLIT_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -135,6 +135,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
if (HAS_BSD(dev))
intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ if (HAS_BLIT_SPLIT(dev))
+ intel_cleanup_ring_buffer(dev, &dev_priv->blit_ring);
mutex_unlock(&dev->struct_mutex);
/* Clear the HWS virtual address at teardown */
@@ -765,6 +767,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_BSD:
value = HAS_BSD(dev);
break;
+ case I915_PARAM_HAS_BLIT_SPLIT:
+ value = HAS_BLIT_SPLIT(dev);
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -136,11 +136,13 @@ static const struct intel_device_info intel_ironlake_m_info = {
static const struct intel_device_info intel_sandybridge_d_info = {
.gen = 6, .is_i965g = 1, .is_i9xx = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_blit_split = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
.gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_blit_split = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -212,6 +212,7 @@ struct intel_device_info {
u8 has_pipe_cxsr : 1;
u8 has_hotplug : 1;
u8 cursor_needs_physical : 1;
+ u8 has_blit_split : 1;
};
enum no_fbc_reason {
@@ -244,6 +245,7 @@ typedef struct drm_i915_private {
struct pci_dev *bridge_dev;
struct intel_ring_buffer render_ring;
struct intel_ring_buffer bsd_ring;
+ struct intel_ring_buffer blit_ring;
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
@@ -292,6 +294,7 @@ typedef struct drm_i915_private {
u32 flush_rings;
#define FLUSH_RENDER_RING 0x1
#define FLUSH_BSD_RING 0x2
+#define FLUSH_BLIT_RING 0x3
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
@@ -1191,6 +1194,8 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+#define HAS_BLIT_SPLIT(dev) (INTEL_INFO(dev)->has_blit_split)
+
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
@@ -1797,6 +1797,8 @@ i915_gem_retire_requests(struct drm_device *dev)
i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
if (HAS_BSD(dev))
i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
+ if (HAS_BLIT_SPLIT(dev))
+ i915_gem_retire_requests_ring(dev, &dev_priv->blit_ring);
}
void
@@ -1815,7 +1817,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
if (!dev_priv->mm.suspended &&
(!list_empty(&dev_priv->render_ring.request_list) ||
(HAS_BSD(dev) &&
- !list_empty(&dev_priv->bsd_ring.request_list))))
+ !list_empty(&dev_priv->bsd_ring.request_list)) ||
+ (HAS_BLIT_SPLIT(dev) && !list_empty(&dev_priv->blit_ring.request_list))))
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
mutex_unlock(&dev->struct_mutex);
}
@@ -1851,13 +1854,11 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
ring->user_irq_get(dev, ring);
if (interruptible)
ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(
- ring->get_gem_seqno(dev, ring), seqno)
+ i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
else
wait_event(ring->irq_queue,
- i915_seqno_passed(
- ring->get_gem_seqno(dev, ring), seqno)
+ i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
ring->user_irq_put(dev, ring);
@@ -1910,6 +1911,11 @@ i915_gem_flush(struct drm_device *dev,
dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
invalidate_domains,
flush_domains);
+
+ if (HAS_BLIT_SPLIT(dev))
+ dev_priv->blit_ring.flush(dev, &dev_priv->blit_ring,
+ invalidate_domains,
+ flush_domains);
}
/**
@@ -2032,7 +2038,8 @@ i915_gpu_idle(struct drm_device *dev)
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->render_ring.active_list) &&
(!HAS_BSD(dev) ||
- list_empty(&dev_priv->bsd_ring.active_list)));
+ list_empty(&dev_priv->bsd_ring.active_list)) &&
+ (!HAS_BLIT_SPLIT(dev) || list_empty(&dev_priv->blit_ring.active_list)));
spin_unlock(&dev_priv->mm.active_list_lock);
if (lists_empty)
@@ -2057,6 +2064,16 @@ i915_gpu_idle(struct drm_device *dev)
return ret;
}
+ if (HAS_BLIT_SPLIT(dev)) {
+ seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+ &dev_priv->blit_ring);
+ if (seqno2 == 0)
+ return -ENOMEM;
+
+ ret = i915_wait_request(dev, seqno2, &dev_priv->blit_ring);
+ if (ret)
+ return ret;
+ }
return ret;
}
@@ -3034,6 +3051,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
dev_priv->flush_rings |= FLUSH_RENDER_RING;
else if (obj_priv->ring == &dev_priv->bsd_ring)
dev_priv->flush_rings |= FLUSH_BSD_RING;
+ else if (obj_priv->ring == &dev_priv->blit_ring)
+ dev_priv->flush_rings |= FLUSH_BLIT_RING;
}
dev->invalidate_domains |= invalidate_domains;
@@ -3573,6 +3592,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
ring = &dev_priv->bsd_ring;
+ } else if (args->flags & I915_EXEC_BLIT){
+ if (!HAS_BLIT_SPLIT(dev)) {
+ DRM_ERROR("wrong ring for blit!\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->blit_ring;
} else {
ring = &dev_priv->render_ring;
}
@@ -3780,6 +3805,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
(void)i915_add_request(dev, file_priv,
dev->flush_domains,
&dev_priv->bsd_ring);
+ if (dev_priv->flush_rings & FLUSH_BLIT_RING)
+ (void)i915_add_request(dev, file_priv,
+ dev->flush_domains,
+ &dev_priv->blit_ring);
+
}
for (i = 0; i < args->buffer_count; i++) {
@@ -4398,7 +4428,8 @@ i915_gem_idle(struct drm_device *dev)
if (dev_priv->mm.suspended ||
(dev_priv->render_ring.gem_object == NULL) ||
(HAS_BSD(dev) &&
- dev_priv->bsd_ring.gem_object == NULL)) {
+ dev_priv->bsd_ring.gem_object == NULL) ||
+ (HAS_BLIT_SPLIT(dev) && !dev_priv->blit_ring.gem_object)) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -4529,10 +4560,20 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
goto cleanup_render_ring;
}
+ if (HAS_BLIT_SPLIT(dev)) {
+ dev_priv->blit_ring = blit_ring;
+ ret = intel_init_ring_buffer(dev, &dev_priv->blit_ring);
+ if (ret)
+ goto cleanup_bsd_ring;
+ }
+
dev_priv->next_seqno = 1;
return 0;
+cleanup_bsd_ring:
+ if (HAS_BSD(dev))
+ intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
cleanup_render_ring:
intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
cleanup_pipe_control:
@@ -4549,6 +4590,8 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
if (HAS_BSD(dev))
intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ if (HAS_BLIT_SPLIT(dev))
+ intel_cleanup_ring_buffer(dev, &dev_priv->blit_ring);
if (HAS_PIPE_CONTROL(dev))
i915_gem_cleanup_pipe_control(dev);
}
@@ -4580,12 +4623,14 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
spin_lock(&dev_priv->mm.active_list_lock);
BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
+ BUG_ON(HAS_BLIT_SPLIT(dev) && !list_empty(&dev_priv->blit_ring.active_list));
spin_unlock(&dev_priv->mm.active_list_lock);
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
+ BUG_ON(HAS_BLIT_SPLIT(dev) && !list_empty(&dev_priv->blit_ring.request_list));
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -4645,6 +4690,10 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
}
+ if (HAS_BLIT_SPLIT(dev)) {
+ INIT_LIST_HEAD(&dev_priv->blit_ring.active_list);
+ INIT_LIST_HEAD(&dev_priv->blit_ring.request_list);
+ }
for (i = 0; i < 16; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
@@ -4894,6 +4943,8 @@ i915_gpu_is_active(struct drm_device *dev)
list_empty(&dev_priv->render_ring.active_list);
if (HAS_BSD(dev))
lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
+ if (HAS_BLIT_SPLIT(dev))
+ lists_empty &= list_empty(&dev_priv->blit_ring.active_list);
spin_unlock(&dev_priv->mm.active_list_lock);
return !lists_empty;
@@ -34,17 +34,87 @@
static struct drm_i915_gem_object *
i915_gem_next_active_object(struct drm_device *dev,
struct list_head **render_iter,
- struct list_head **bsd_iter)
+ struct list_head **bsd_iter,
+ struct list_head **blit_iter)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
+ struct drm_i915_gem_object *blit_obj = NULL;
if (*render_iter != &dev_priv->render_ring.active_list)
render_obj = list_entry(*render_iter,
struct drm_i915_gem_object,
list);
- if (HAS_BSD(dev)) {
+ /* It's true that chip has splitted blit also has BSD. */
+ if (HAS_BLIT_SPLIT(dev)) {
+ u32 render_seq, blit_seq, bsd_seq;
+
+ if (*blit_iter != &dev_priv->blit_ring.active_list)
+ blit_obj = list_entry(*blit_iter,
+ struct drm_i915_gem_object,
+ list);
+
+ if (*bsd_iter != &dev_priv->bsd_ring.active_list)
+ bsd_obj = list_entry(*bsd_iter,
+ struct drm_i915_gem_object,
+ list);
+
+ /* last_rendering_seqno never could be 0 */
+
+ if (!render_obj)
+ render_seq = 0;
+ else
+ render_seq = render_obj->last_rendering_seqno;
+
+ if (!blit_obj)
+ blit_seq = 0;
+ else
+ blit_seq = blit_obj->last_rendering_seqno;
+
+ if (!bsd_obj)
+ bsd_seq = 0;
+ else
+ bsd_seq = bsd_obj->last_rendering_seqno;
+
+ /* any two null return the third */
+ if (render_seq == blit_seq)
+ return bsd_obj;
+ else if (render_seq == bsd_seq)
+ return blit_obj;
+ else if (blit_seq == bsd_seq)
+ return render_obj;
+
+ /* then only 1 null possible */
+ if (!render_seq) {
+ if (blit_seq < bsd_seq)
+ return blit_obj;
+ else
+ return bsd_obj;
+ }
+ if (!blit_seq) {
+ if (render_seq < bsd_seq)
+ return render_obj;
+ else
+ return bsd_obj;
+ }
+ if (!bsd_seq) {
+ if (blit_seq < render_seq)
+ return blit_obj;
+ else
+ return render_obj;
+ }
+
+ /* all three */
+ if (render_seq < bsd_seq && render_seq < blit_seq)
+ return render_obj;
+ if (bsd_seq < render_seq && bsd_seq < blit_seq)
+ return bsd_obj;
+ if (blit_seq < bsd_seq && blit_seq < render_seq)
+ return blit_obj;
+ return NULL; /* shut up gcc */
+
+ } else if (HAS_BSD(dev)) {
if (*bsd_iter != &dev_priv->bsd_ring.active_list)
bsd_obj = list_entry(*bsd_iter,
struct drm_i915_gem_object,
@@ -83,10 +153,11 @@ mark_free(struct drm_i915_gem_object *obj_priv,
return drm_mm_scan_add_block(obj_priv->gtt_space);
}
-#define i915_for_each_active_object(OBJ, R, B) \
+#define i915_for_each_active_object(OBJ, R, B, BLIT) \
*(R) = dev_priv->render_ring.active_list.next; \
*(B) = dev_priv->bsd_ring.active_list.next; \
- while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
+ *(BLIT) = dev_priv->blit_ring.active_list.next; \
+ while (((OBJ) = i915_gem_next_active_object(dev, (R), (B),(BLIT))) != NULL)
int
i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
@@ -94,7 +165,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
struct drm_i915_gem_object *obj_priv;
- struct list_head *render_iter, *bsd_iter;
+ struct list_head *render_iter, *bsd_iter, *blit_iter;
int ret = 0;
i915_gem_retire_requests(dev);
@@ -137,7 +208,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
}
/* Now merge in the soon-to-be-expired objects... */
- i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+ i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter, &blit_iter) {
/* Does the object require an outstanding flush? */
if (obj_priv->base.write_domain || obj_priv->pin_count)
continue;
@@ -154,7 +225,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
if (mark_free(obj_priv, &unwind_list))
goto found;
}
- i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+ i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter, &blit_iter) {
if (! obj_priv->base.write_domain || obj_priv->pin_count)
continue;
@@ -312,6 +312,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
u32 de_iir, gt_iir, de_ier, pch_iir;
struct drm_i915_master_private *master_priv;
struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *blit_ring = &dev_priv->blit_ring;
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
@@ -345,6 +346,15 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
if (gt_iir & GT_BSD_USER_INTERRUPT)
DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+ if (HAS_BLIT_SPLIT(dev) && (gt_iir & GT_BLIT_USER_INTERRUPT)) {
+ u32 seqno = blit_ring->get_gem_seqno(dev, blit_ring);
+ blit_ring->irq_gem_seqno = seqno;
+ trace_i915_gem_request_complete(dev, seqno);
+ DRM_WAKEUP(&dev_priv->blit_ring.irq_queue);
+ dev_priv->hangcheck_count = 0;
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ }
if (de_iir & DE_GSE)
ironlake_opregion_gse_intr(dev);
@@ -1367,6 +1377,12 @@ void i915_hangcheck_elapsed(unsigned long data)
missed_wakeup = true;
}
+ if (HAS_BLIT_SPLIT(dev) && dev_priv->blit_ring.waiting_gem_seqno &&
+ waitqueue_active(&dev_priv->blit_ring.irq_queue)) {
+ DRM_WAKEUP(&dev_priv->blit_ring.irq_queue);
+ missed_wakeup = true;
+ }
+
if (missed_wakeup)
DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
return;
@@ -1436,17 +1452,18 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
(void) I915_READ(DEIER);
- /* Gen6 only needs render pipe_control now */
if (IS_GEN6(dev))
- render_mask = GT_PIPE_NOTIFY;
+ render_mask = GT_PIPE_NOTIFY | GT_BLIT_USER_INTERRUPT;
dev_priv->gt_irq_mask_reg = ~render_mask;
dev_priv->gt_irq_enable_reg = render_mask;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev)) {
I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
+ I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
+ }
I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
(void) I915_READ(GTIER);
@@ -1510,6 +1527,9 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
if (HAS_BSD(dev))
DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
+ if (HAS_BLIT_SPLIT(dev))
+ DRM_INIT_WAITQUEUE(&dev_priv->blit_ring.irq_queue);
+
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
if (HAS_PCH_SPLIT(dev))
@@ -192,6 +192,7 @@
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
+#define MI_FLUSH_DW MI_INSTR(0x26, 2)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
@@ -476,6 +477,14 @@
#define BSD_RING_ACTHD 0x04074
#define BSD_HWS_PGA 0x04080
+/* Blit ring */
+#define BLIT_RING_TAIL 0x22030
+#define BLIT_RING_HEAD 0x22034
+#define BLIT_RING_START 0x22038
+#define BLIT_RING_CTL 0x2203c
+#define BLIT_RING_ACTHD 0x22074
+#define BLIT_HWS_PGA 0x24080
+
/*
* Framebuffer compression (915+ only)
*/
@@ -2516,7 +2525,7 @@
#define GT_SYNC_STATUS (1 << 2)
#define GT_USER_INTERRUPT (1 << 0)
#define GT_BSD_USER_INTERRUPT (1 << 5)
-
+#define GT_BLIT_USER_INTERRUPT (1 << 22)
#define GTISR 0x44010
#define GTIMR 0x44014
@@ -488,6 +488,109 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
return 0;
}
+/* blit */
+static void blit_setup_status_page(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ I915_WRITE(BLIT_HWS_PGA, ring->status_page.gfx_addr);
+ I915_READ(BLIT_HWS_PGA);
+}
+
+void blit_ring_flush(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ intel_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_FLUSH_DW);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_advance(dev, ring);
+}
+
+static inline unsigned int blit_ring_get_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(BLIT_RING_HEAD) & HEAD_ADDR;
+}
+
+static inline unsigned int blit_ring_get_tail(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(BLIT_RING_TAIL) & TAIL_ADDR;
+}
+
+static inline unsigned int blit_ring_get_active_head(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return I915_READ(BLIT_RING_ACTHD);
+}
+
+static inline void blit_ring_advance_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ I915_WRITE(BLIT_RING_TAIL, ring->tail);
+}
+
+static u32 blit_ring_add_request(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_file *file_priv,
+ u32 flush_domains)
+{
+ u32 seqno;
+
+ seqno = i915_gem_get_seqno(dev);
+
+ intel_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(dev, ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(dev, ring, seqno);
+ intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
+ intel_ring_advance(dev, ring);
+
+ DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+
+ return seqno;
+}
+
+static void blit_ring_get_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ /* do nothing */
+}
+static void blit_ring_put_user_irq(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ /* do nothing */
+}
+
+static u32 blit_ring_get_gem_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static int blit_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_execbuffer2 *exec,
+ struct drm_clip_rect *cliprects,
+ uint64_t exec_offset)
+{
+ uint32_t exec_start;
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ intel_ring_begin(dev, ring, 2);
+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
+ (2 << 6) | MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(dev, ring, exec_start);
+ intel_ring_advance(dev, ring);
+ return 0;
+}
static int
render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
@@ -866,3 +969,39 @@ struct intel_ring_buffer bsd_ring = {
.status_page = {NULL, 0, NULL},
.map = {0,}
};
+
+struct intel_ring_buffer blit_ring = {
+ .name = "blit ring",
+ .regs = {
+ .ctl = BLIT_RING_CTL,
+ .head = BLIT_RING_HEAD,
+ .tail = BLIT_RING_TAIL,
+ .start = BLIT_RING_START
+ },
+ .ring_flag = I915_EXEC_BLIT,
+ .size = 32 * PAGE_SIZE,
+ .alignment = PAGE_SIZE,
+ .virtual_start = NULL,
+ .dev = NULL,
+ .gem_object = NULL,
+ .head = 0,
+ .tail = 0,
+ .space = 0,
+ .user_irq_refcount = 0,
+ .irq_gem_seqno = 0,
+ .waiting_gem_seqno = 0,
+ .setup_status_page = blit_setup_status_page,
+ .init = init_ring_common,
+ .get_head = blit_ring_get_head,
+ .get_tail = blit_ring_get_tail,
+ .get_active_head = blit_ring_get_active_head,
+ .advance_ring = blit_ring_advance_ring,
+ .flush = blit_ring_flush,
+ .add_request = blit_ring_add_request,
+ .get_gem_seqno = blit_ring_get_gem_seqno,
+ .user_irq_get = blit_ring_get_user_irq,
+ .user_irq_put = blit_ring_put_user_irq,
+ .dispatch_gem_execbuffer = blit_ring_dispatch_gem_execbuffer,
+ .status_page = {NULL, 0, NULL},
+ .map = {0,}
+};
@@ -127,5 +127,6 @@ u32 intel_ring_get_seqno(struct drm_device *dev,
extern struct intel_ring_buffer render_ring;
extern struct intel_ring_buffer bsd_ring;
+extern struct intel_ring_buffer blit_ring;
#endif /* _INTEL_RINGBUFFER_H_ */
@@ -286,6 +286,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_PAGEFLIPPING 8
#define I915_PARAM_HAS_EXECBUF2 9
#define I915_PARAM_HAS_BSD 10
+#define I915_PARAM_HAS_BLIT_SPLIT 11
typedef struct drm_i915_getparam {
int param;
@@ -629,6 +630,7 @@ struct drm_i915_gem_execbuffer2 {
__u64 cliprects_ptr;
#define I915_EXEC_RENDER (1<<0)
#define I915_EXEC_BSD (1<<1)
+#define I915_EXEC_BLIT (1<<2)
__u64 flags;
__u64 rsvd1;
__u64 rsvd2;