diff mbox

[i-g-t] lib/rendercopy: Refactoring rendercopy libraries

Message ID 20171206155807.6259-1-lukasz.kalamarz@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Kalamarz, Lukasz Dec. 6, 2017, 3:58 p.m. UTC
Current implementation of those libraries is not alligned with our
coding style. Some methods were copied from other gens and were
only renamed.
This patch is fixing those. No functional changes were made.

Signed-off-by: Lukasz Kalamarz <lukasz.kalamarz@intel.com>
Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
---
 lib/rendercopy_gen7.c |   4 +-
 lib/rendercopy_gen8.c | 165 +++++++++++++++++++++++---------------------------
 lib/rendercopy_gen9.c |  79 ++++++++++++------------
 3 files changed, 117 insertions(+), 131 deletions(-)

Comments

Katarzyna Dec Jan. 4, 2018, 12:24 p.m. UTC | #1
On Wed, Dec 06, 2017 at 04:58:07PM +0100, Lukasz Kalamarz wrote:
> Current implementation of those libraries is not alligned with our
> coding style. Some methods were copied from other gens and were
> only renamed.
> This patch is fixing those. No functional changes were made.
> 
> Signed-off-by: Lukasz Kalamarz <lukasz.kalamarz@intel.com>
> Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
> Cc: Imre Deak <imre.deak@intel.com>

LGTM. More changes are going to come to this library and other similar
(media and gpgpu). 
Reviewed-by: Katarzyna Dec <katarzyna.dec@intel.com>


> -- 
> 2.9.5
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
diff mbox

Patch

diff --git a/lib/rendercopy_gen7.c b/lib/rendercopy_gen7.c
index 3b92406..d76bb8d 100644
--- a/lib/rendercopy_gen7.c
+++ b/lib/rendercopy_gen7.c
@@ -68,7 +68,7 @@  batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint
 }
 
 static void
-gen7_render_flush(struct intel_batchbuffer *batch,
+gen6_render_flush(struct intel_batchbuffer *batch,
 		  drm_intel_context *context, uint32_t batch_end)
 {
 	int ret;
@@ -581,6 +581,6 @@  void gen7_render_copyfunc(struct intel_batchbuffer *batch,
 	batch_end = ALIGN(batch_end, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
-	gen7_render_flush(batch, context, batch_end);
+	gen6_render_flush(batch, context, batch_end);
 	intel_batchbuffer_reset(batch);
 }
diff --git a/lib/rendercopy_gen8.c b/lib/rendercopy_gen8.c
index a7fc2c4..c871b2f 100644
--- a/lib/rendercopy_gen8.c
+++ b/lib/rendercopy_gen8.c
@@ -77,15 +77,15 @@  struct annotations_context {
 	drm_intel_aub_annotation annotations[MAX_ANNOTATIONS];
 	int index;
 	uint32_t offset;
-};
+} aub_annotations;
 
-static void annotation_init(struct annotations_context *aub)
+static void annotation_init(struct annotations_context *ctx)
 {
-	/* aub->annotations is an array keeping a list of annotations of the
-	 * batch buffer ordered by offset. aub->annotations[0] is thus left
+	/* ctx->annotations is an array keeping a list of annotations of the
+	 * batch buffer ordered by offset. ctx->annotations[0] is thus left
 	 * for the command stream and will be filled just before executing
 	 * the batch buffer with annotations_add_batch() */
-	aub->index = 1;
+	ctx->index = 1;
 }
 
 static void add_annotation(drm_intel_aub_annotation *a,
@@ -97,36 +97,36 @@  static void add_annotation(drm_intel_aub_annotation *a,
 	a->ending_offset = ending_offset;
 }
 
-static void annotation_add_batch(struct annotations_context *aub, size_t size)
+static void annotation_add_batch(struct annotations_context *ctx, size_t size)
 {
-	add_annotation(&aub->annotations[0], AUB_TRACE_TYPE_BATCH, 0, size);
+	add_annotation(&ctx->annotations[0], AUB_TRACE_TYPE_BATCH, 0, size);
 }
 
-static void annotation_add_state(struct annotations_context *aub,
+static void annotation_add_state(struct annotations_context *ctx,
 				 uint32_t state_type,
 				 uint32_t start_offset,
 				 size_t   size)
 {
-	igt_assert(aub->index < MAX_ANNOTATIONS);
+	igt_assert(ctx->index < MAX_ANNOTATIONS);
 
-	add_annotation(&aub->annotations[aub->index++],
+	add_annotation(&ctx->annotations[ctx->index++],
 		       AUB_TRACE_TYPE_NOTYPE, 0,
 		       start_offset);
-	add_annotation(&aub->annotations[aub->index++],
+	add_annotation(&ctx->annotations[ctx->index++],
 		       AUB_TRACE_TYPE(state_type),
 		       AUB_TRACE_SUBTYPE(state_type),
 		       start_offset + size);
 }
 
-static void annotation_flush(struct annotations_context *aub,
+static void annotation_flush(struct annotations_context *ctx,
 			     struct intel_batchbuffer *batch)
 {
 	if (!igt_aub_dump_enabled())
 		return;
 
 	drm_intel_bufmgr_gem_set_aub_annotations(batch->bo,
-						 aub->annotations,
-						 aub->index);
+						 ctx->annotations,
+						 ctx->index);
 }
 
 static uint32_t
@@ -179,11 +179,8 @@  gen6_render_flush(struct intel_batchbuffer *batch,
 
 /* Mostly copy+paste from gen6, except height, width, pitch moved */
 static uint32_t
-gen8_bind_buf(struct intel_batchbuffer *batch,
-	      struct annotations_context *aub,
-	      struct igt_buf *buf,
-	      uint32_t format, int is_dst)
-{
+gen8_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
+	      uint32_t format, int is_dst) {
 	struct gen8_surface_state *ss;
 	uint32_t write_domain, read_domain, offset;
 	int ret;
@@ -197,7 +194,8 @@  gen8_bind_buf(struct intel_batchbuffer *batch,
 
 	ss = batch_alloc(batch, sizeof(*ss), 64);
 	offset = batch_offset(batch, ss);
-	annotation_add_state(aub, AUB_TRACE_SURFACE_STATE, offset, sizeof(*ss));
+	annotation_add_state(&aub_annotations, AUB_TRACE_SURFACE_STATE,
+		 offset, sizeof(*ss));
 
 	ss->ss0.surface_type = GEN6_SURFACE_2D;
 	ss->ss0.surface_format = format;
@@ -231,7 +229,6 @@  gen8_bind_buf(struct intel_batchbuffer *batch,
 
 static uint32_t
 gen8_bind_surfaces(struct intel_batchbuffer *batch,
-		   struct annotations_context *aub,
 		   struct igt_buf *src,
 		   struct igt_buf *dst)
 {
@@ -239,29 +236,26 @@  gen8_bind_surfaces(struct intel_batchbuffer *batch,
 
 	binding_table = batch_alloc(batch, 8, 32);
 	offset = batch_offset(batch, binding_table);
-	annotation_add_state(aub, AUB_TRACE_BINDING_TABLE, offset, 8);
+	annotation_add_state(&aub_annotations, AUB_TRACE_BINDING_TABLE,
+		 offset, 8);
 
 	binding_table[0] =
-		gen8_bind_buf(batch, aub,
-			      dst, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, 1);
+		gen8_bind_buf(batch, dst, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, 1);
 	binding_table[1] =
-		gen8_bind_buf(batch, aub,
-			      src, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, 0);
+		gen8_bind_buf(batch, src, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, 0);
 
 	return offset;
 }
 
 /* Mostly copy+paste from gen6, except wrap modes moved */
 static uint32_t
-gen8_create_sampler(struct intel_batchbuffer *batch,
-		    struct annotations_context *aub)
-{
+gen8_create_sampler(struct intel_batchbuffer *batch) {
 	struct gen8_sampler_state *ss;
 	uint32_t offset;
 
 	ss = batch_alloc(batch, sizeof(*ss), 64);
 	offset = batch_offset(batch, ss);
-	annotation_add_state(aub, AUB_TRACE_SAMPLER_STATE,
+	annotation_add_state(&aub_annotations, AUB_TRACE_SAMPLER_STATE,
 			     offset, sizeof(*ss));
 
 	ss->ss0.min_filter = GEN6_MAPFILTER_NEAREST;
@@ -279,20 +273,20 @@  gen8_create_sampler(struct intel_batchbuffer *batch,
 
 static uint32_t
 gen8_fill_ps(struct intel_batchbuffer *batch,
-	     struct annotations_context *aub,
 	     const uint32_t kernel[][4],
 	     size_t size)
 {
 	uint32_t offset;
 
 	offset = batch_copy(batch, kernel, size, 64);
-	annotation_add_state(aub, AUB_TRACE_KERNEL_INSTRUCTIONS, offset, size);
+	annotation_add_state(&aub_annotations, AUB_TRACE_KERNEL_INSTRUCTIONS,
+		 offset, size);
 
 	return offset;
 }
 
 /*
- * gen7_fill_vertex_buffer_data populate vertex buffer with data.
+ * gen8_fill_vertex_buffer_data populate vertex buffer with data.
  *
  * The vertex buffer consists of 3 vertices to construct a RECTLIST. The 4th
  * vertex is implied (automatically derived by the HW). Each element has the
@@ -302,8 +296,7 @@  gen8_fill_ps(struct intel_batchbuffer *batch,
  * see gen6_emit_vertex_elements
  */
 static uint32_t
-gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
-			     struct annotations_context *aub,
+gen8_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
 			     struct igt_buf *src,
 			     uint32_t src_x, uint32_t src_y,
 			     uint32_t dst_x, uint32_t dst_y,
@@ -328,7 +321,7 @@  gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
 	emit_vertex_normalized(batch, src_y, igt_buf_height(src));
 
 	offset = batch_offset(batch, start);
-	annotation_add_state(aub, AUB_TRACE_VERTEX_BUFFER,
+	annotation_add_state(&aub_annotations, AUB_TRACE_VERTEX_BUFFER,
 			     offset, 3 * VERTEX_SIZE);
 	return offset;
 }
@@ -339,7 +332,7 @@  gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
  * what gen6_rendercopy did. The most straightforward would be to store
  * everything as floats.
  *
- * see gen7_fill_vertex_buffer_data() for where the corresponding elements are
+ * see gen8_fill_vertex_buffer_data() for where the corresponding elements are
  * packed.
  */
 static void
@@ -407,23 +400,21 @@  static void gen8_emit_vertex_buffer(struct intel_batchbuffer *batch,
 }
 
 static uint32_t
-gen6_create_cc_state(struct intel_batchbuffer *batch,
-		     struct annotations_context *aub)
+gen6_create_cc_state(struct intel_batchbuffer *batch)
 {
 	struct gen6_color_calc_state *cc_state;
 	uint32_t offset;
 
 	cc_state = batch_alloc(batch, sizeof(*cc_state), 64);
 	offset = batch_offset(batch, cc_state);
-	annotation_add_state(aub, AUB_TRACE_CC_STATE,
+	annotation_add_state(&aub_annotations, AUB_TRACE_CC_STATE,
 			     offset, sizeof(*cc_state));
 
 	return offset;
 }
 
 static uint32_t
-gen8_create_blend_state(struct intel_batchbuffer *batch,
-			struct annotations_context *aub)
+gen8_create_blend_state(struct intel_batchbuffer *batch)
 {
 	struct gen8_blend_state *blend;
 	int i;
@@ -431,7 +422,7 @@  gen8_create_blend_state(struct intel_batchbuffer *batch,
 
 	blend = batch_alloc(batch, sizeof(*blend), 64);
 	offset = batch_offset(batch, blend);
-	annotation_add_state(aub, AUB_TRACE_BLEND_STATE,
+	annotation_add_state(&aub_annotations, AUB_TRACE_BLEND_STATE,
 			     offset, sizeof(*blend));
 
 	for (i = 0; i < 16; i++) {
@@ -446,15 +437,14 @@  gen8_create_blend_state(struct intel_batchbuffer *batch,
 }
 
 static uint32_t
-gen6_create_cc_viewport(struct intel_batchbuffer *batch,
-			struct annotations_context *aub)
+gen6_create_cc_viewport(struct intel_batchbuffer *batch)
 {
 	struct gen6_cc_viewport *vp;
 	uint32_t offset;
 
 	vp = batch_alloc(batch, sizeof(*vp), 32);
 	offset = batch_offset(batch, vp);
-	annotation_add_state(aub, AUB_TRACE_CC_VP_STATE,
+	annotation_add_state(&aub_annotations, AUB_TRACE_CC_VP_STATE,
 			     offset, sizeof(*vp));
 
 	/* XXX I don't understand this */
@@ -465,16 +455,14 @@  gen6_create_cc_viewport(struct intel_batchbuffer *batch,
 }
 
 static uint32_t
-gen7_create_sf_clip_viewport(struct intel_batchbuffer *batch,
-			struct annotations_context *aub)
-{
+gen7_create_sf_clip_viewport(struct intel_batchbuffer *batch) {
 	/* XXX these are likely not needed */
 	struct gen7_sf_clip_viewport *scv_state;
 	uint32_t offset;
 
 	scv_state = batch_alloc(batch, sizeof(*scv_state), 64);
 	offset = batch_offset(batch, scv_state);
-	annotation_add_state(aub, AUB_TRACE_CLIP_VP_STATE,
+	annotation_add_state(&aub_annotations, AUB_TRACE_CLIP_VP_STATE,
 			     offset, sizeof(*scv_state));
 
 	scv_state->guardband.xmin = 0;
@@ -486,15 +474,14 @@  gen7_create_sf_clip_viewport(struct intel_batchbuffer *batch,
 }
 
 static uint32_t
-gen6_create_scissor_rect(struct intel_batchbuffer *batch,
-			struct annotations_context *aub)
+gen6_create_scissor_rect(struct intel_batchbuffer *batch)
 {
 	struct gen6_scissor_rect *scissor;
 	uint32_t offset;
 
 	scissor = batch_alloc(batch, sizeof(*scissor), 64);
 	offset = batch_offset(batch, scissor);
-	annotation_add_state(aub, AUB_TRACE_SCISSOR_STATE,
+	annotation_add_state(&aub_annotations, AUB_TRACE_SCISSOR_STATE,
 			     offset, sizeof(*scissor));
 
 	return offset;
@@ -557,7 +544,7 @@  gen8_emit_state_base_address(struct intel_batchbuffer *batch) {
 }
 
 static void
-gen7_emit_urb(struct intel_batchbuffer *batch) {
+gen8_emit_urb(struct intel_batchbuffer *batch) {
 	/* XXX: Min valid values from mesa */
 	const int vs_entries = 64;
 	const int vs_size = 2;
@@ -611,7 +598,7 @@  gen8_emit_vs(struct intel_batchbuffer *batch) {
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN6_3DSTATE_VS | (9-2));
+	OUT_BATCH(GEN6_3DSTATE_VS | (9 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -636,7 +623,7 @@  gen8_emit_hs(struct intel_batchbuffer *batch) {
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN7_3DSTATE_HS | (9-2));
+	OUT_BATCH(GEN7_3DSTATE_HS | (9 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -667,7 +654,7 @@  gen8_emit_gs(struct intel_batchbuffer *batch) {
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN7_3DSTATE_GS | (10-2));
+	OUT_BATCH(GEN7_3DSTATE_GS | (10 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -699,7 +686,7 @@  gen8_emit_ds(struct intel_batchbuffer *batch) {
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN7_3DSTATE_DS | (9-2));
+	OUT_BATCH(GEN7_3DSTATE_DS | (9 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -718,7 +705,7 @@  gen8_emit_ds(struct intel_batchbuffer *batch) {
 
 static void
 gen8_emit_wm_hz_op(struct intel_batchbuffer *batch) {
-	OUT_BATCH(GEN8_3DSTATE_WM_HZ_OP | (5-2));
+	OUT_BATCH(GEN8_3DSTATE_WM_HZ_OP | (5 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -729,7 +716,7 @@  static void
 gen8_emit_null_state(struct intel_batchbuffer *batch) {
 	gen8_emit_wm_hz_op(batch);
 	gen8_emit_hs(batch);
-	OUT_BATCH(GEN7_3DSTATE_TE | (4-2));
+	OUT_BATCH(GEN7_3DSTATE_TE | (4 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -738,13 +725,7 @@  gen8_emit_null_state(struct intel_batchbuffer *batch) {
 	gen8_emit_vs(batch);
 }
 
-static void
-gen7_emit_clip(struct intel_batchbuffer *batch) {
-	OUT_BATCH(GEN6_3DSTATE_CLIP | (4 - 2));
-	OUT_BATCH(0); 
-	OUT_BATCH(0); /*  pass-through */
-	OUT_BATCH(0);
-}
+
 
 static void
 gen8_emit_sf(struct intel_batchbuffer *batch)
@@ -788,7 +769,7 @@  gen8_emit_ps(struct intel_batchbuffer *batch, uint32_t kernel) {
 		   * expect (g6, see below) */
 		  GEN7_3DSTATE_PS_PERSPECTIVE_PIXEL_BARYCENTRIC);
 
-	OUT_BATCH(GEN6_3DSTATE_CONSTANT_PS | (11-2));
+	OUT_BATCH(GEN6_3DSTATE_CONSTANT_PS | (11 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -800,7 +781,7 @@  gen8_emit_ps(struct intel_batchbuffer *batch, uint32_t kernel) {
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN7_3DSTATE_PS | (12-2));
+	OUT_BATCH(GEN7_3DSTATE_PS | (12 - 2));
 	OUT_BATCH(kernel);
 	OUT_BATCH(0); /* kernel hi */
 	OUT_BATCH(1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHIFT |
@@ -823,12 +804,22 @@  gen8_emit_ps(struct intel_batchbuffer *batch, uint32_t kernel) {
 }
 
 static void
-gen8_emit_depth(struct intel_batchbuffer *batch) {
+gen6_emit_clip(struct intel_batchbuffer *batch)
+{
+	OUT_BATCH(GEN6_3DSTATE_CLIP | (4 - 2));
+	OUT_BATCH(0);
+	OUT_BATCH(0); /*  pass-through */
+	OUT_BATCH(0);
+}
+
+static void
+gen8_emit_depth(struct intel_batchbuffer *batch)
+{
 	OUT_BATCH(GEN8_3DSTATE_WM_DEPTH_STENCIL | (3 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER | (8-2));
+	OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER | (8 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -852,7 +843,7 @@  gen8_emit_depth(struct intel_batchbuffer *batch) {
 
 static void
 gen7_emit_clear(struct intel_batchbuffer *batch) {
-	OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS | (3-2));
+	OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS | (3 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(1); // clear valid
 }
@@ -879,7 +870,7 @@  static void gen8_emit_primitive(struct intel_batchbuffer *batch, uint32_t offset
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN6_3DPRIMITIVE | (7-2));
+	OUT_BATCH(GEN6_3DPRIMITIVE | (7 - 2));
 	OUT_BATCH(0);	/* gen8+ ignore the topology type field */
 	OUT_BATCH(3);	/* vertex count */
 	OUT_BATCH(0);	/*  We're specifying this instead with offset in GEN6_3DSTATE_VERTEX_BUFFERS */
@@ -926,7 +917,6 @@  void gen8_render_copyfunc(struct intel_batchbuffer *batch,
 			  unsigned width, unsigned height,
 			  struct igt_buf *dst, unsigned dst_x, unsigned dst_y)
 {
-	struct annotations_context aub_annotations;
 	uint32_t ps_sampler_state, ps_kernel_off, ps_binding_table;
 	uint32_t scissor_state;
 	uint32_t vertex_buffer;
@@ -940,21 +930,18 @@  void gen8_render_copyfunc(struct intel_batchbuffer *batch,
 
 	annotation_init(&aub_annotations);
 
-	ps_binding_table  = gen8_bind_surfaces(batch, &aub_annotations,
-					       src, dst);
-	ps_sampler_state  = gen8_create_sampler(batch, &aub_annotations);
-	ps_kernel_off = gen8_fill_ps(batch, &aub_annotations,
-				     ps_kernel, sizeof(ps_kernel));
-	vertex_buffer = gen7_fill_vertex_buffer_data(batch, &aub_annotations,
-						     src,
+	ps_binding_table  = gen8_bind_surfaces(batch, src, dst);
+	ps_sampler_state  = gen8_create_sampler(batch);
+	ps_kernel_off = gen8_fill_ps(batch, ps_kernel, sizeof(ps_kernel));
+	vertex_buffer = gen8_fill_vertex_buffer_data(batch, src,
 						     src_x, src_y,
 						     dst_x, dst_y,
 						     width, height);
-	cc.cc_state = gen6_create_cc_state(batch, &aub_annotations);
-	cc.blend_state = gen8_create_blend_state(batch, &aub_annotations);
-	viewport.cc_state = gen6_create_cc_viewport(batch, &aub_annotations);
-	viewport.sf_clip_state = gen7_create_sf_clip_viewport(batch, &aub_annotations);
-	scissor_state = gen6_create_scissor_rect(batch, &aub_annotations);
+	cc.cc_state = gen6_create_cc_state(batch);
+	cc.blend_state = gen8_create_blend_state(batch);
+	viewport.cc_state = gen6_create_cc_viewport(batch);
+	viewport.sf_clip_state = gen7_create_sf_clip_viewport(batch);
+	scissor_state = gen6_create_scissor_rect(batch);
 	/* TODO: theree is other state which isn't setup */
 
 	igt_assert(batch->ptr < &batch->buffer[4095]);
@@ -976,7 +963,7 @@  void gen8_render_copyfunc(struct intel_batchbuffer *batch,
 	OUT_BATCH(GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP);
 	OUT_BATCH(viewport.sf_clip_state);
 
-	gen7_emit_urb(batch);
+	gen8_emit_urb(batch);
 
 	gen8_emit_cc(batch);
 
@@ -984,13 +971,13 @@  void gen8_render_copyfunc(struct intel_batchbuffer *batch,
 
 	gen8_emit_null_state(batch);
 
-	OUT_BATCH(GEN7_3DSTATE_STREAMOUT | (5-2));
+	OUT_BATCH(GEN7_3DSTATE_STREAMOUT | (5 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	gen7_emit_clip(batch);
+	gen6_emit_clip(batch);
 
 	gen8_emit_sf(batch);
 
diff --git a/lib/rendercopy_gen9.c b/lib/rendercopy_gen9.c
index 9537480..cb00f60 100644
--- a/lib/rendercopy_gen9.c
+++ b/lib/rendercopy_gen9.c
@@ -108,7 +108,7 @@  static void annotation_add_state(struct annotations_context *ctx,
 				 uint32_t start_offset,
 				 size_t   size)
 {
-	assert(ctx->index < MAX_ANNOTATIONS);
+	igt_assert(ctx->index < MAX_ANNOTATIONS);
 
 	add_annotation(&ctx->annotations[ctx->index++],
 		       AUB_TRACE_TYPE_NOTYPE, 0,
@@ -175,7 +175,7 @@  gen6_render_flush(struct intel_batchbuffer *batch,
 	if (ret == 0)
 		ret = drm_intel_gem_bo_context_exec(batch->bo, context,
 						    batch_end, 0);
-	assert(ret == 0);
+	igt_assert(ret == 0);
 }
 
 /* Mostly copy+paste from gen6, except height, width, pitch moved */
@@ -214,7 +214,7 @@  gen8_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
 				      batch_offset(batch, ss) + 8 * 4,
 				      buf->bo, 0,
 				      read_domain, write_domain);
-	assert(ret == 0);
+	igt_assert(ret == 0);
 
 	ss->ss2.height = igt_buf_height(buf) - 1;
 	ss->ss2.width  = igt_buf_width(buf) - 1;
@@ -287,7 +287,7 @@  gen8_fill_ps(struct intel_batchbuffer *batch,
 }
 
 /*
- * gen7_fill_vertex_buffer_data populate vertex buffer with data.
+ * gen8_fill_vertex_buffer_data populate vertex buffer with data.
  *
  * The vertex buffer consists of 3 vertices to construct a RECTLIST. The 4th
  * vertex is implied (automatically derived by the HW). Each element has the
@@ -297,7 +297,7 @@  gen8_fill_ps(struct intel_batchbuffer *batch,
  * see gen6_emit_vertex_elements
  */
 static uint32_t
-gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
+gen8_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
 			     struct igt_buf *src,
 			     uint32_t src_x, uint32_t src_y,
 			     uint32_t dst_x, uint32_t dst_y,
@@ -333,7 +333,7 @@  gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
  * what gen6_rendercopy did. The most straightforward would be to store
  * everything as floats.
  *
- * see gen7_fill_vertex_buffer_data() for where the corresponding elements are
+ * see gen8_fill_vertex_buffer_data() for where the corresponding elements are
  * packed.
  */
 static void
@@ -385,12 +385,12 @@  gen6_emit_vertex_elements(struct intel_batchbuffer *batch) {
 }
 
 /*
- * gen7_emit_vertex_buffer emit the vertex buffers command
+ * gen8_emit_vertex_buffer emit the vertex buffers command
  *
  * @batch
  * @offset - bytw offset within the @batch where the vertex buffer starts.
  */
-static void gen7_emit_vertex_buffer(struct intel_batchbuffer *batch,
+static void gen8_emit_vertex_buffer(struct intel_batchbuffer *batch,
 				    uint32_t offset) {
 	OUT_BATCH(GEN6_3DSTATE_VERTEX_BUFFERS | (1 + (4 * 1) - 2));
 	OUT_BATCH(0 << VB0_BUFFER_INDEX_SHIFT | /* VB 0th index */
@@ -554,7 +554,7 @@  gen9_emit_state_base_address(struct intel_batchbuffer *batch) {
 }
 
 static void
-gen7_emit_urb(struct intel_batchbuffer *batch) {
+gen9_emit_urb(struct intel_batchbuffer *batch) {
 	/* XXX: Min valid values from mesa */
 	const int vs_entries = 64;
 	const int vs_size = 2;
@@ -580,7 +580,7 @@  gen8_emit_cc(struct intel_batchbuffer *batch) {
 }
 
 static void
-gen8_emit_multisample(struct intel_batchbuffer *batch) {
+gen9_emit_multisample(struct intel_batchbuffer *batch) {
 	OUT_BATCH(GEN8_3DSTATE_MULTISAMPLE | 0);
 	OUT_BATCH(0);
 
@@ -589,8 +589,8 @@  gen8_emit_multisample(struct intel_batchbuffer *batch) {
 }
 
 static void
-gen8_emit_vs(struct intel_batchbuffer *batch) {
-	OUT_BATCH(GEN6_3DSTATE_CONSTANT_VS | (11-2));
+gen9_emit_vs(struct intel_batchbuffer *batch) {
+	OUT_BATCH(GEN6_3DSTATE_CONSTANT_VS | (11 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -608,7 +608,7 @@  gen8_emit_vs(struct intel_batchbuffer *batch) {
 	OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_VS);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN6_3DSTATE_VS | (9-2));
+	OUT_BATCH(GEN6_3DSTATE_VS | (9 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -621,7 +621,7 @@  gen8_emit_vs(struct intel_batchbuffer *batch) {
 
 static void
 gen8_emit_hs(struct intel_batchbuffer *batch) {
-	OUT_BATCH(GEN7_3DSTATE_CONSTANT_HS | (11-2));
+	OUT_BATCH(GEN7_3DSTATE_CONSTANT_HS | (11 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -633,7 +633,7 @@  gen8_emit_hs(struct intel_batchbuffer *batch) {
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN7_3DSTATE_HS | (9-2));
+	OUT_BATCH(GEN7_3DSTATE_HS | (9 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -652,7 +652,7 @@  gen8_emit_hs(struct intel_batchbuffer *batch) {
 
 static void
 gen8_emit_gs(struct intel_batchbuffer *batch) {
-	OUT_BATCH(GEN7_3DSTATE_CONSTANT_GS | (11-2));
+	OUT_BATCH(GEN7_3DSTATE_CONSTANT_GS | (11 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -664,7 +664,7 @@  gen8_emit_gs(struct intel_batchbuffer *batch) {
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN7_3DSTATE_GS | (10-2));
+	OUT_BATCH(GEN7_3DSTATE_GS | (10 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -684,7 +684,7 @@  gen8_emit_gs(struct intel_batchbuffer *batch) {
 
 static void
 gen9_emit_ds(struct intel_batchbuffer *batch) {
-	OUT_BATCH(GEN7_3DSTATE_CONSTANT_DS | (11-2));
+	OUT_BATCH(GEN7_3DSTATE_CONSTANT_DS | (11 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -696,7 +696,7 @@  gen9_emit_ds(struct intel_batchbuffer *batch) {
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN7_3DSTATE_DS | (11-2));
+	OUT_BATCH(GEN7_3DSTATE_DS | (11 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -715,10 +715,9 @@  gen9_emit_ds(struct intel_batchbuffer *batch) {
 	OUT_BATCH(0);
 }
 
-
 static void
 gen8_emit_wm_hz_op(struct intel_batchbuffer *batch) {
-	OUT_BATCH(GEN8_3DSTATE_WM_HZ_OP | (5-2));
+	OUT_BATCH(GEN8_3DSTATE_WM_HZ_OP | (5 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -729,13 +728,13 @@  static void
 gen8_emit_null_state(struct intel_batchbuffer *batch) {
 	gen8_emit_wm_hz_op(batch);
 	gen8_emit_hs(batch);
-	OUT_BATCH(GEN7_3DSTATE_TE | (4-2));
+	OUT_BATCH(GEN7_3DSTATE_TE | (4 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	gen8_emit_gs(batch);
 	gen9_emit_ds(batch);
-	gen8_emit_vs(batch);
+	gen9_emit_vs(batch);
 }
 
 static void
@@ -747,7 +746,7 @@  gen7_emit_clip(struct intel_batchbuffer *batch) {
 }
 
 static void
-gen8_emit_sf(struct intel_batchbuffer *batch)
+gen9_emit_sf(struct intel_batchbuffer *batch)
 {
 	int i;
 
@@ -790,7 +789,7 @@  gen8_emit_ps(struct intel_batchbuffer *batch, uint32_t kernel) {
 		   * expect (g6, see below) */
 		  GEN7_3DSTATE_PS_PERSPECTIVE_PIXEL_BARYCENTRIC);
 
-	OUT_BATCH(GEN6_3DSTATE_CONSTANT_PS | (11-2));
+	OUT_BATCH(GEN6_3DSTATE_CONSTANT_PS | (11 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -802,7 +801,7 @@  gen8_emit_ps(struct intel_batchbuffer *batch, uint32_t kernel) {
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN7_3DSTATE_PS | (12-2));
+	OUT_BATCH(GEN7_3DSTATE_PS | (12 - 2));
 	OUT_BATCH(kernel);
 	OUT_BATCH(0); /* kernel hi */
 	OUT_BATCH(1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHIFT |
@@ -832,7 +831,7 @@  gen9_emit_depth(struct intel_batchbuffer *batch)
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER | (8-2));
+	OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER | (8 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -841,13 +840,13 @@  gen9_emit_depth(struct intel_batchbuffer *batch)
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER | (5-2));
+	OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER | (5 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN7_3DSTATE_STENCIL_BUFFER | (5-2));
+	OUT_BATCH(GEN7_3DSTATE_STENCIL_BUFFER | (5 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -856,7 +855,7 @@  gen9_emit_depth(struct intel_batchbuffer *batch)
 
 static void
 gen7_emit_clear(struct intel_batchbuffer *batch) {
-	OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS | (3-2));
+	OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS | (3 - 2));
 	OUT_BATCH(0);
 	OUT_BATCH(1); // clear valid
 }
@@ -877,7 +876,7 @@  static void gen8_emit_vf_topology(struct intel_batchbuffer *batch)
 }
 
 /* Vertex elements MUST be defined before this according to spec */
-static void gen8_emit_primitive(struct intel_batchbuffer *batch, uint32_t offset)
+static void gen9_emit_primitive(struct intel_batchbuffer *batch, uint32_t offset)
 {
 	OUT_BATCH(GEN7_3DSTATE_VF | (2 - 2));
 	OUT_BATCH(0);
@@ -886,7 +885,7 @@  static void gen8_emit_primitive(struct intel_batchbuffer *batch, uint32_t offset
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 
-	OUT_BATCH(GEN6_3DPRIMITIVE | (7-2));
+	OUT_BATCH(GEN6_3DPRIMITIVE | (7 - 2));
 	OUT_BATCH(0);	/* gen8+ ignore the topology type field */
 	OUT_BATCH(3);	/* vertex count */
 	OUT_BATCH(0);	/*  We're specifying this instead with offset in GEN6_3DSTATE_VERTEX_BUFFERS */
@@ -949,7 +948,7 @@  void gen9_render_copyfunc(struct intel_batchbuffer *batch,
 	ps_binding_table  = gen8_bind_surfaces(batch, src, dst);
 	ps_sampler_state  = gen8_create_sampler(batch);
 	ps_kernel_off = gen8_fill_ps(batch, ps_kernel, sizeof(ps_kernel));
-	vertex_buffer = gen7_fill_vertex_buffer_data(batch, src,
+	vertex_buffer = gen8_fill_vertex_buffer_data(batch, src,
 						     src_x, src_y,
 						     dst_x, dst_y,
 						     width, height);
@@ -960,7 +959,7 @@  void gen9_render_copyfunc(struct intel_batchbuffer *batch,
 	scissor_state = gen6_create_scissor_rect(batch);
 	/* TODO: theree is other state which isn't setup */
 
-	assert(batch->ptr < &batch->buffer[4095]);
+	igt_assert(batch->ptr < &batch->buffer[4095]);
 
 	batch->ptr = batch->buffer;
 
@@ -980,11 +979,11 @@  void gen9_render_copyfunc(struct intel_batchbuffer *batch,
 	OUT_BATCH(GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP);
 	OUT_BATCH(viewport.sf_clip_state);
 
-	gen7_emit_urb(batch);
+	gen9_emit_urb(batch);
 
 	gen8_emit_cc(batch);
 
-	gen8_emit_multisample(batch);
+	gen9_emit_multisample(batch);
 
 	gen8_emit_null_state(batch);
 
@@ -996,7 +995,7 @@  void gen9_render_copyfunc(struct intel_batchbuffer *batch,
 
 	gen7_emit_clip(batch);
 
-	gen8_emit_sf(batch);
+	gen9_emit_sf(batch);
 
 	gen8_emit_ps(batch, ps_kernel_off);
 
@@ -1015,16 +1014,16 @@  void gen9_render_copyfunc(struct intel_batchbuffer *batch,
 
 	gen6_emit_drawing_rectangle(batch, dst);
 
-	gen7_emit_vertex_buffer(batch, vertex_buffer);
+	gen8_emit_vertex_buffer(batch, vertex_buffer);
 	gen6_emit_vertex_elements(batch);
 
 	gen8_emit_vf_topology(batch);
-	gen8_emit_primitive(batch, vertex_buffer);
+	gen9_emit_primitive(batch, vertex_buffer);
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
 	batch_end = batch_align(batch, 8);
-	assert(batch_end < BATCH_STATE_SPLIT);
+	igt_assert(batch_end < BATCH_STATE_SPLIT);
 	annotation_add_batch(&aub_annotations, batch_end);
 
 	dump_batch(batch);