All of lore.kernel.org
 help / color / mirror / Atom feed
* [igt-dev] [PATCH i-g-t] lib/intel_batchbuffer: Move batch functions from media/render/gpgpu libs
@ 2018-04-13 15:13 Lukasz Kalamarz
  2018-04-13 15:33 ` [igt-dev] ✗ Fi.CI.BAT: failure for " Patchwork
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Lukasz Kalamarz @ 2018-04-13 15:13 UTC (permalink / raw)
  To: igt-dev

Batch functions were copy/pasted across several libs.
With moving it into intel_batchbuffer lib test can now be
easly maintained without worrying that we forgot to modify
older version of lib.

Signed-off-by: Lukasz Kalamarz <lukasz.kalamarz@intel.com>
Cc: Katarzyna Dec <katarzyna.dec@intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
---
 lib/gpgpu_fill.c        |  6 ++--
 lib/gpu_fill.c          | 68 +++++++++++-----------------------------
 lib/gpu_fill.h          | 15 ---------
 lib/intel_batchbuffer.c | 58 ++++++++++++++++++++++++++++++----
 lib/intel_batchbuffer.h | 18 +++++++++--
 lib/media_fill_gen7.c   |  2 +-
 lib/media_fill_gen8.c   |  2 +-
 lib/media_fill_gen9.c   |  2 +-
 lib/media_spin.c        | 63 +++++++++----------------------------
 lib/rendercopy_gen6.c   | 75 ++++++++++----------------------------------
 lib/rendercopy_gen7.c   | 65 ++++++++++-----------------------------
 lib/rendercopy_gen8.c   | 82 +++++++++++++++----------------------------------
 lib/rendercopy_gen9.c   | 82 +++++++++++++++----------------------------------
 13 files changed, 188 insertions(+), 350 deletions(-)

diff --git a/lib/gpgpu_fill.c b/lib/gpgpu_fill.c
index 72a1445..010dde0 100644
--- a/lib/gpgpu_fill.c
+++ b/lib/gpgpu_fill.c
@@ -137,7 +137,7 @@ gen7_gpgpu_fillfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen7_render_flush(batch, batch_end);
@@ -185,7 +185,7 @@ gen8_gpgpu_fillfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen7_render_flush(batch, batch_end);
@@ -234,7 +234,7 @@ gen9_gpgpu_fillfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen7_render_flush(batch, batch_end);
diff --git a/lib/gpu_fill.c b/lib/gpu_fill.c
index f1fe5b3..5c1e217 100644
--- a/lib/gpu_fill.c
+++ b/lib/gpu_fill.c
@@ -24,41 +24,6 @@
 
 #include "gpu_fill.h"
 
-uint32_t
-batch_used(struct intel_batchbuffer *batch)
-{
-	return batch->ptr - batch->buffer;
-}
-
-uint32_t
-batch_align(struct intel_batchbuffer *batch, uint32_t align)
-{
-	uint32_t offset = batch_used(batch);
-	offset = ALIGN(offset, align);
-	batch->ptr = batch->buffer + offset;
-	return offset;
-}
-
-void *
-batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
-{
-	uint32_t offset = batch_align(batch, align);
-	batch->ptr += size;
-	return memset(batch->buffer + offset, 0, size);
-}
-
-uint32_t
-batch_offset(struct intel_batchbuffer *batch, void *ptr)
-{
-	return (uint8_t *)ptr - batch->buffer;
-}
-
-uint32_t
-batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
-{
-	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
-}
-
 void
 gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
 {
@@ -78,8 +43,10 @@ gen7_fill_curbe_buffer_data(struct intel_batchbuffer *batch,
 	uint8_t *curbe_buffer;
 	uint32_t offset;
 
-	curbe_buffer = batch_alloc(batch, sizeof(uint32_t) * 8, 64);
-	offset = batch_offset(batch, curbe_buffer);
+	curbe_buffer = intel_batchbuffer_subdata_alloc(batch,
+						       sizeof(uint32_t) * 8,
+						       64);
+	offset = intel_batchbuffer_subdata_offset(batch, curbe_buffer);
 	*curbe_buffer = color;
 
 	return offset;
@@ -102,8 +69,8 @@ gen7_fill_surface_state(struct intel_batchbuffer *batch,
 		read_domain = I915_GEM_DOMAIN_SAMPLER;
 	}
 
-	ss = batch_alloc(batch, sizeof(*ss), 64);
-	offset = batch_offset(batch, ss);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, ss);
 
 	ss->ss0.surface_type = GEN7_SURFACE_2D;
 	ss->ss0.surface_format = format;
@@ -116,7 +83,7 @@ gen7_fill_surface_state(struct intel_batchbuffer *batch,
 
 	ss->ss1.base_addr = buf->bo->offset;
 	ret = drm_intel_bo_emit_reloc(batch->bo,
-				batch_offset(batch, ss) + 4,
+				intel_batchbuffer_subdata_offset(batch, ss) + 4,
 				buf->bo, 0,
 				read_domain, write_domain);
 	igt_assert(ret == 0);
@@ -140,8 +107,8 @@ gen7_fill_binding_table(struct intel_batchbuffer *batch,
 {
 	uint32_t *binding_table, offset;
 
-	binding_table = batch_alloc(batch, 32, 64);
-	offset = batch_offset(batch, binding_table);
+	binding_table = intel_batchbuffer_subdata_alloc(batch, 32, 64);
+	offset = intel_batchbuffer_subdata_offset(batch, binding_table);
 	if (IS_GEN7(batch->devid))
 		binding_table[0] = gen7_fill_surface_state(batch, dst,
 						GEN7_SURFACEFORMAT_R8_UNORM, 1);
@@ -159,7 +126,7 @@ gen7_fill_kernel(struct intel_batchbuffer *batch,
 {
 	uint32_t offset;
 
-	offset = batch_copy(batch, kernel, size, 64);
+	offset = intel_batchbuffer_copy_data(batch, kernel, size, 64);
 
 	return offset;
 }
@@ -175,8 +142,8 @@ gen7_fill_interface_descriptor(struct intel_batchbuffer *batch, struct igt_buf *
 	binding_table_offset = gen7_fill_binding_table(batch, dst);
 	kernel_offset = gen7_fill_kernel(batch, kernel, size);
 
-	idd = batch_alloc(batch, sizeof(*idd), 64);
-	offset = batch_offset(batch, idd);
+	idd = intel_batchbuffer_subdata_alloc(batch, sizeof(*idd), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, idd);
 
 	idd->desc0.kernel_start_pointer = (kernel_offset >> 6);
 
@@ -401,8 +368,8 @@ gen8_fill_surface_state(struct intel_batchbuffer *batch,
 		read_domain = I915_GEM_DOMAIN_SAMPLER;
 	}
 
-	ss = batch_alloc(batch, sizeof(*ss), 64);
-	offset = batch_offset(batch, ss);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, ss);
 
 	ss->ss0.surface_type = GEN8_SURFACE_2D;
 	ss->ss0.surface_format = format;
@@ -418,7 +385,8 @@ gen8_fill_surface_state(struct intel_batchbuffer *batch,
 	ss->ss8.base_addr = buf->bo->offset;
 
 	ret = drm_intel_bo_emit_reloc(batch->bo,
-				batch_offset(batch, ss) + 8 * 4,
+				intel_batchbuffer_subdata_offset(batch,
+				ss) + 8 * 4,
 				buf->bo, 0,
 				read_domain, write_domain);
 	igt_assert(ret == 0);
@@ -445,8 +413,8 @@ gen8_fill_interface_descriptor(struct intel_batchbuffer *batch, struct igt_buf *
 	binding_table_offset = gen7_fill_binding_table(batch, dst);
 	kernel_offset = gen7_fill_kernel(batch, kernel, size);
 
-	idd = batch_alloc(batch, sizeof(*idd), 64);
-	offset = batch_offset(batch, idd);
+	idd = intel_batchbuffer_subdata_alloc(batch, sizeof(*idd), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, idd);
 
 	idd->desc0.kernel_start_pointer = (kernel_offset >> 6);
 
diff --git a/lib/gpu_fill.h b/lib/gpu_fill.h
index 072e9f7..067d498 100644
--- a/lib/gpu_fill.h
+++ b/lib/gpu_fill.h
@@ -37,21 +37,6 @@
 #include "intel_chipset.h"
 #include <assert.h>
 
-uint32_t
-batch_used(struct intel_batchbuffer *batch);
-
-uint32_t
-batch_align(struct intel_batchbuffer *batch, uint32_t align);
-
-void *
-batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align);
-
-uint32_t
-batch_offset(struct intel_batchbuffer *batch, void *ptr);
-
-uint32_t
-batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align);
-
 void
 gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end);
 
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index 10d4dce..ec7ef98 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -66,6 +66,49 @@
  */
 
 /**
+ * intel_batchbuffer_align:
+ * @batch: batchbuffer object
+ * @align: value in bytes to which we want to align
+ *
+ * Align batchbuffer offset to given value and then return it
+ */
+uint32_t
+intel_batchbuffer_align(struct intel_batchbuffer *batch, uint32_t align)
+{
+	uint32_t offset = batch->ptr - batch->buffer;
+
+	offset = ALIGN(offset, align);
+	batch->ptr = batch->buffer + offset;
+	return offset;
+}
+
+uint32_t
+batch_round_upto(struct intel_batchbuffer *batch, uint32_t divisor)
+{
+	uint32_t offset = batch->ptr - batch->buffer;
+
+	offset = (offset + divisor-1) / divisor * divisor;
+	batch->ptr = batch->buffer + offset;
+	return offset;
+}
+
+void *
+intel_batchbuffer_subdata_alloc(struct intel_batchbuffer *batch, uint32_t size,
+				uint32_t align)
+{
+	uint32_t offset = intel_batchbuffer_align(batch, align);
+
+	batch->ptr += size;
+	return memset(batch->buffer + offset, 0, size);
+}
+
+uint32_t
+intel_batchbuffer_subdata_offset(struct intel_batchbuffer *batch, void *ptr)
+{
+	return (uint8_t *)ptr - batch->buffer;
+}
+
+/**
  * intel_batchbuffer_reset:
  * @batch: batchbuffer object
  *
@@ -288,7 +331,7 @@ intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
 }
 
 /**
- * intel_batchbuffer_data:
+ * intel_batchbuffer_copy_data:
  * @batch: batchbuffer object
  * @data: pointer to the data to write into the batchbuffer
  * @bytes: number of bytes to write into the batchbuffer
@@ -296,14 +339,17 @@ intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
  * This transfers the given @data into the batchbuffer. Note that the length
  * must be DWORD aligned, i.e. multiples of 32bits.
  */
-void
-intel_batchbuffer_data(struct intel_batchbuffer *batch,
-                       const void *data, unsigned int bytes)
+uint32_t
+intel_batchbuffer_copy_data(struct intel_batchbuffer *batch,
+			    const void *data, unsigned int bytes,
+			    uint32_t align)
 {
+	uint32_t *subdata, *copied_data;
 	igt_assert((bytes & 3) == 0);
 	intel_batchbuffer_require_space(batch, bytes);
-	memcpy(batch->ptr, data, bytes);
-	batch->ptr += bytes;
+	subdata = intel_batchbuffer_subdata_alloc(batch, bytes, align);
+	copied_data = memcpy(subdata, data, bytes);
+	return intel_batchbuffer_subdata_offset(batch, copied_data);
 }
 
 /**
diff --git a/lib/intel_batchbuffer.h b/lib/intel_batchbuffer.h
index 2c262d7..7468eaf 100644
--- a/lib/intel_batchbuffer.h
+++ b/lib/intel_batchbuffer.h
@@ -41,8 +41,9 @@ void intel_batchbuffer_flush_with_context(struct intel_batchbuffer *batch,
 
 void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
 
-void intel_batchbuffer_data(struct intel_batchbuffer *batch,
-                            const void *data, unsigned int bytes);
+uint32_t intel_batchbuffer_copy_data(struct intel_batchbuffer *batch,
+				const void *data, unsigned int bytes,
+				uint32_t align);
 
 void intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
 				  drm_intel_bo *buffer,
@@ -51,6 +52,19 @@ void intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
 				  uint32_t write_domain,
 				  int fenced);
 
+uint32_t
+intel_batchbuffer_align(struct intel_batchbuffer *batch, uint32_t align);
+
+uint32_t
+batch_round_upto(struct intel_batchbuffer *batch, uint32_t divisor);
+
+void *
+intel_batchbuffer_subdata_alloc(struct intel_batchbuffer *batch,
+				uint32_t size, uint32_t align);
+
+uint32_t
+intel_batchbuffer_subdata_offset(struct intel_batchbuffer *batch, void *ptr);
+
 /* Inline functions - might actually be better off with these
  * non-inlined.  Certainly better off switching all command packets to
  * be passed as structs rather than dwords, but that's a little bit of
diff --git a/lib/media_fill_gen7.c b/lib/media_fill_gen7.c
index 5a8c32f..3dc5617 100644
--- a/lib/media_fill_gen7.c
+++ b/lib/media_fill_gen7.c
@@ -79,7 +79,7 @@ gen7_media_fillfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen7_render_flush(batch, batch_end);
diff --git a/lib/media_fill_gen8.c b/lib/media_fill_gen8.c
index d6dd741..63fe72e 100644
--- a/lib/media_fill_gen8.c
+++ b/lib/media_fill_gen8.c
@@ -82,7 +82,7 @@ gen8_media_fillfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen7_render_flush(batch, batch_end);
diff --git a/lib/media_fill_gen9.c b/lib/media_fill_gen9.c
index a9a829f..78e892f 100644
--- a/lib/media_fill_gen9.c
+++ b/lib/media_fill_gen9.c
@@ -91,7 +91,7 @@ gen9_media_fillfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen7_render_flush(batch, batch_end);
diff --git a/lib/media_spin.c b/lib/media_spin.c
index 580c109..20af549 100644
--- a/lib/media_spin.c
+++ b/lib/media_spin.c
@@ -45,42 +45,6 @@ static const uint32_t spin_kernel[][4] = {
 	{ 0x07800031, 0x20000a40, 0x0e000e00, 0x82000010 }, /* send.ts (16)null<1> r112<0;1;0>:d 0x82000010 */
 };
 
-static uint32_t
-batch_used(struct intel_batchbuffer *batch)
-{
-	return batch->ptr - batch->buffer;
-}
-
-static uint32_t
-batch_align(struct intel_batchbuffer *batch, uint32_t align)
-{
-	uint32_t offset = batch_used(batch);
-	offset = ALIGN(offset, align);
-	batch->ptr = batch->buffer + offset;
-	return offset;
-}
-
-static void *
-batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
-{
-	uint32_t offset = batch_align(batch, align);
-	batch->ptr += size;
-	return memset(batch->buffer + offset, 0, size);
-}
-
-static uint32_t
-batch_offset(struct intel_batchbuffer *batch, void *ptr)
-{
-	return (uint8_t *)ptr - batch->buffer;
-}
-
-static uint32_t
-batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size,
-	   uint32_t align)
-{
-	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
-}
-
 static void
 gen8_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
 {
@@ -100,8 +64,8 @@ gen8_spin_curbe_buffer_data(struct intel_batchbuffer *batch,
 	uint32_t *curbe_buffer;
 	uint32_t offset;
 
-	curbe_buffer = batch_alloc(batch, 64, 64);
-	offset = batch_offset(batch, curbe_buffer);
+	curbe_buffer = intel_batchbuffer_subdata_alloc(batch, 64, 64);
+	offset = intel_batchbuffer_subdata_offset(batch, curbe_buffer);
 	*curbe_buffer = iters;
 
 	return offset;
@@ -124,8 +88,8 @@ gen8_spin_surface_state(struct intel_batchbuffer *batch,
 		read_domain = I915_GEM_DOMAIN_SAMPLER;
 	}
 
-	ss = batch_alloc(batch, sizeof(*ss), 64);
-	offset = batch_offset(batch, ss);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, ss);
 
 	ss->ss0.surface_type = GEN8_SURFACE_2D;
 	ss->ss0.surface_format = format;
@@ -141,7 +105,8 @@ gen8_spin_surface_state(struct intel_batchbuffer *batch,
 	ss->ss8.base_addr = buf->bo->offset;
 
 	ret = drm_intel_bo_emit_reloc(batch->bo,
-				batch_offset(batch, ss) + 8 * 4,
+				intel_batchbuffer_subdata_offset(batch,
+				ss) + 8 * 4,
 				buf->bo, 0,
 				read_domain, write_domain);
 	igt_assert_eq(ret, 0);
@@ -164,8 +129,8 @@ gen8_spin_binding_table(struct intel_batchbuffer *batch,
 {
 	uint32_t *binding_table, offset;
 
-	binding_table = batch_alloc(batch, 32, 64);
-	offset = batch_offset(batch, binding_table);
+	binding_table = intel_batchbuffer_subdata_alloc(batch, 32, 64);
+	offset = intel_batchbuffer_subdata_offset(batch, binding_table);
 
 	binding_table[0] = gen8_spin_surface_state(batch, dst,
 					GEN8_SURFACEFORMAT_R8_UNORM, 1);
@@ -180,7 +145,7 @@ gen8_spin_media_kernel(struct intel_batchbuffer *batch,
 {
 	uint32_t offset;
 
-	offset = batch_copy(batch, kernel, size, 64);
+	offset = intel_batchbuffer_copy_data(batch, kernel, size, 64);
 
 	return offset;
 }
@@ -197,8 +162,8 @@ gen8_spin_interface_descriptor(struct intel_batchbuffer *batch,
 	kernel_offset = gen8_spin_media_kernel(batch, spin_kernel,
 					       sizeof(spin_kernel));
 
-	idd = batch_alloc(batch, sizeof(*idd), 64);
-	offset = batch_offset(batch, idd);
+	idd = intel_batchbuffer_subdata_alloc(batch, sizeof(*idd), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, idd);
 
 	idd->desc0.kernel_start_pointer = (kernel_offset >> 6);
 
@@ -444,7 +409,7 @@ gen8_media_spinfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen8_render_flush(batch, batch_end);
@@ -482,7 +447,7 @@ gen8lp_media_spinfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen8_render_flush(batch, batch_end);
@@ -532,7 +497,7 @@ gen9_media_spinfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen8_render_flush(batch, batch_end);
diff --git a/lib/rendercopy_gen6.c b/lib/rendercopy_gen6.c
index 8c24cf8..ddc9e7a 100644
--- a/lib/rendercopy_gen6.c
+++ b/lib/rendercopy_gen6.c
@@ -48,50 +48,6 @@ static const uint32_t ps_kernel_nomask_affine[][4] = {
 	{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
 };
 
-static uint32_t
-batch_used(struct intel_batchbuffer *batch)
-{
-	return batch->ptr - batch->buffer;
-}
-
-static uint32_t
-batch_align(struct intel_batchbuffer *batch, uint32_t align)
-{
-	uint32_t offset = batch_used(batch);
-	offset = ALIGN(offset, align);
-	batch->ptr = batch->buffer + offset;
-	return offset;
-}
-
-static uint32_t
-batch_round_upto(struct intel_batchbuffer *batch, uint32_t divisor)
-{
-	uint32_t offset = batch_used(batch);
-	offset = (offset + divisor-1) / divisor * divisor;
-	batch->ptr = batch->buffer + offset;
-	return offset;
-}
-
-static void *
-batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
-{
-	uint32_t offset = batch_align(batch, align);
-	batch->ptr += size;
-	return memset(batch->buffer + offset, 0, size);
-}
-
-static uint32_t
-batch_offset(struct intel_batchbuffer *batch, void *ptr)
-{
-	return (uint8_t *)ptr - batch->buffer;
-}
-
-static uint32_t
-batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
-{
-	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
-}
-
 static void
 gen6_render_flush(struct intel_batchbuffer *batch,
 		  drm_intel_context *context, uint32_t batch_end)
@@ -120,7 +76,7 @@ gen6_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
 		read_domain = I915_GEM_DOMAIN_SAMPLER;
 	}
 
-	ss = batch_alloc(batch, sizeof(*ss), 32);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32);
 	ss->ss0.surface_type = GEN6_SURFACE_2D;
 	ss->ss0.surface_format = format;
 
@@ -129,7 +85,8 @@ gen6_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
 	ss->ss1.base_addr = buf->bo->offset;
 
 	ret = drm_intel_bo_emit_reloc(batch->bo,
-				      batch_offset(batch, ss) + 4,
+				      intel_batchbuffer_subdata_offset(batch,
+				      ss) + 4,
 				      buf->bo, 0,
 				      read_domain, write_domain);
 	igt_assert(ret == 0);
@@ -140,7 +97,7 @@ gen6_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
 	ss->ss3.tiled_surface = buf->tiling != I915_TILING_NONE;
 	ss->ss3.tile_walk     = buf->tiling == I915_TILING_Y;
 
-	return batch_offset(batch, ss);
+	return intel_batchbuffer_subdata_offset(batch, ss);
 }
 
 static uint32_t
@@ -150,14 +107,14 @@ gen6_bind_surfaces(struct intel_batchbuffer *batch,
 {
 	uint32_t *binding_table;
 
-	binding_table = batch_alloc(batch, 32, 32);
+	binding_table = intel_batchbuffer_subdata_alloc(batch, 32, 32);
 
 	binding_table[0] =
 		gen6_bind_buf(batch, dst, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, 1);
 	binding_table[1] =
 		gen6_bind_buf(batch, src, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, 0);
 
-	return batch_offset(batch, binding_table);
+	return intel_batchbuffer_subdata_offset(batch, binding_table);
 }
 
 static void
@@ -427,12 +384,12 @@ gen6_create_cc_viewport(struct intel_batchbuffer *batch)
 {
 	struct gen6_cc_viewport *vp;
 
-	vp = batch_alloc(batch, sizeof(*vp), 32);
+	vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32);
 
 	vp->min_depth = -1.e35;
 	vp->max_depth = 1.e35;
 
-	return batch_offset(batch, vp);
+	return intel_batchbuffer_subdata_offset(batch, vp);
 }
 
 static uint32_t
@@ -440,7 +397,7 @@ gen6_create_cc_blend(struct intel_batchbuffer *batch)
 {
 	struct gen6_blend_state *blend;
 
-	blend = batch_alloc(batch, sizeof(*blend), 64);
+	blend = intel_batchbuffer_subdata_alloc(batch, sizeof(*blend), 64);
 
 	blend->blend0.dest_blend_factor = GEN6_BLENDFACTOR_ZERO;
 	blend->blend0.source_blend_factor = GEN6_BLENDFACTOR_ONE;
@@ -450,13 +407,13 @@ gen6_create_cc_blend(struct intel_batchbuffer *batch)
 	blend->blend1.post_blend_clamp_enable = 1;
 	blend->blend1.pre_blend_clamp_enable = 1;
 
-	return batch_offset(batch, blend);
+	return intel_batchbuffer_subdata_offset(batch, blend);
 }
 
 static uint32_t
 gen6_create_kernel(struct intel_batchbuffer *batch)
 {
-	return batch_copy(batch, ps_kernel_nomask_affine,
+	return intel_batchbuffer_copy_data(batch, ps_kernel_nomask_affine,
 			  sizeof(ps_kernel_nomask_affine),
 			  64);
 }
@@ -468,7 +425,7 @@ gen6_create_sampler(struct intel_batchbuffer *batch,
 {
 	struct gen6_sampler_state *ss;
 
-	ss = batch_alloc(batch, sizeof(*ss), 32);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32);
 	ss->ss0.lod_preclamp = 1;	/* GL mode */
 
 	/* We use the legacy mode to get the semantics specified by
@@ -511,7 +468,7 @@ gen6_create_sampler(struct intel_batchbuffer *batch,
 		break;
 	}
 
-	return batch_offset(batch, ss);
+	return intel_batchbuffer_subdata_offset(batch, ss);
 }
 
 static void gen6_emit_vertex_buffer(struct intel_batchbuffer *batch)
@@ -535,7 +492,7 @@ static uint32_t gen6_emit_primitive(struct intel_batchbuffer *batch)
 		  0 << 9 |
 		  4);
 	OUT_BATCH(3);	/* vertex count */
-	offset = batch_used(batch);
+	offset = batch->ptr - batch->buffer;
 	OUT_BATCH(0);	/* vertex_index */
 	OUT_BATCH(1);	/* single instance */
 	OUT_BATCH(0);	/* start instance location */
@@ -557,7 +514,7 @@ void gen6_render_copyfunc(struct intel_batchbuffer *batch,
 	intel_batchbuffer_flush_with_context(batch, context);
 
 	batch->ptr = batch->buffer + 1024;
-	batch_alloc(batch, 64, 64);
+	intel_batchbuffer_subdata_alloc(batch, 64, 64);
 	wm_table  = gen6_bind_surfaces(batch, src, dst);
 	wm_kernel = gen6_create_kernel(batch);
 	wm_state  = gen6_create_sampler(batch,
@@ -594,7 +551,7 @@ void gen6_render_copyfunc(struct intel_batchbuffer *batch,
 	offset = gen6_emit_primitive(batch);
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 
 	*(uint32_t*)(batch->buffer + offset) =
 		batch_round_upto(batch, VERTEX_SIZE)/VERTEX_SIZE;
diff --git a/lib/rendercopy_gen7.c b/lib/rendercopy_gen7.c
index 3b92406..eb7b9a5 100644
--- a/lib/rendercopy_gen7.c
+++ b/lib/rendercopy_gen7.c
@@ -32,41 +32,6 @@ static const uint32_t ps_kernel[][4] = {
 	{ 0x05800031, 0x20001fa8, 0x008d0e20, 0x90031000 },
 };
 
-static uint32_t
-batch_used(struct intel_batchbuffer *batch)
-{
-	return batch->state - batch->buffer;
-}
-
-static uint32_t
-batch_align(struct intel_batchbuffer *batch, uint32_t align)
-{
-	uint32_t offset = batch_used(batch);
-	offset = ALIGN(offset, align);
-	batch->state = batch->buffer + offset;
-	return offset;
-}
-
-static void *
-batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
-{
-	uint32_t offset = batch_align(batch, align);
-	batch->state += size;
-	return memset(batch->buffer + offset, 0, size);
-}
-
-static uint32_t
-batch_offset(struct intel_batchbuffer *batch, void *ptr)
-{
-	return (uint8_t *)ptr - batch->buffer;
-}
-
-static uint32_t
-batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
-{
-	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
-}
-
 static void
 gen7_render_flush(struct intel_batchbuffer *batch,
 		  drm_intel_context *context, uint32_t batch_end)
@@ -108,7 +73,7 @@ gen7_bind_buf(struct intel_batchbuffer *batch,
 		read_domain = I915_GEM_DOMAIN_SAMPLER;
 	}
 
-	ss = batch_alloc(batch, 8 * sizeof(*ss), 32);
+	ss = intel_batchbuffer_subdata_alloc(batch, 8 * sizeof(*ss), 32);
 
 	ss[0] = (GEN7_SURFACE_2D << GEN7_SURFACE_TYPE_SHIFT |
 		 gen7_tiling_bits(buf->tiling) |
@@ -125,12 +90,13 @@ gen7_bind_buf(struct intel_batchbuffer *batch,
 		ss[7] |= HSW_SURFACE_SWIZZLE(RED, GREEN, BLUE, ALPHA);
 
 	ret = drm_intel_bo_emit_reloc(batch->bo,
-				      batch_offset(batch, ss) + 4,
+				      intel_batchbuffer_subdata_offset(batch,
+				      ss) + 4,
 				      buf->bo, 0,
 				      read_domain, write_domain);
 	igt_assert(ret == 0);
 
-	return batch_offset(batch, ss);
+	return intel_batchbuffer_subdata_offset(batch, ss);
 }
 
 static void
@@ -175,7 +141,7 @@ gen7_create_vertex_buffer(struct intel_batchbuffer *batch,
 {
 	uint16_t *v;
 
-	v = batch_alloc(batch, 12*sizeof(*v), 8);
+	v = intel_batchbuffer_subdata_alloc(batch, 12*sizeof(*v), 8);
 
 	v[0] = dst_x + width;
 	v[1] = dst_y + height;
@@ -192,7 +158,7 @@ gen7_create_vertex_buffer(struct intel_batchbuffer *batch,
 	v[10] = src_x;
 	v[11] = src_y;
 
-	return batch_offset(batch, v);
+	return intel_batchbuffer_subdata_offset(batch, v);
 }
 
 static void gen7_emit_vertex_buffer(struct intel_batchbuffer *batch,
@@ -225,14 +191,14 @@ gen7_bind_surfaces(struct intel_batchbuffer *batch,
 {
 	uint32_t *binding_table;
 
-	binding_table = batch_alloc(batch, 8, 32);
+	binding_table = intel_batchbuffer_subdata_alloc(batch, 8, 32);
 
 	binding_table[0] =
 		gen7_bind_buf(batch, dst, GEN7_SURFACEFORMAT_B8G8R8A8_UNORM, 1);
 	binding_table[1] =
 		gen7_bind_buf(batch, src, GEN7_SURFACEFORMAT_B8G8R8A8_UNORM, 0);
 
-	return batch_offset(batch, binding_table);
+	return intel_batchbuffer_subdata_offset(batch, binding_table);
 }
 
 static void
@@ -258,7 +224,7 @@ gen7_create_blend_state(struct intel_batchbuffer *batch)
 {
 	struct gen7_blend_state *blend;
 
-	blend = batch_alloc(batch, sizeof(*blend), 64);
+	blend = intel_batchbuffer_subdata_alloc(batch, sizeof(*blend), 64);
 
 	blend->blend0.dest_blend_factor = GEN7_BLENDFACTOR_ZERO;
 	blend->blend0.source_blend_factor = GEN7_BLENDFACTOR_ONE;
@@ -266,7 +232,7 @@ gen7_create_blend_state(struct intel_batchbuffer *batch)
 	blend->blend1.post_blend_clamp_enable = 1;
 	blend->blend1.pre_blend_clamp_enable = 1;
 
-	return batch_offset(batch, blend);
+	return intel_batchbuffer_subdata_offset(batch, blend);
 }
 
 static void
@@ -290,11 +256,11 @@ gen7_create_cc_viewport(struct intel_batchbuffer *batch)
 {
 	struct gen7_cc_viewport *vp;
 
-	vp = batch_alloc(batch, sizeof(*vp), 32);
+	vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32);
 	vp->min_depth = -1.e35;
 	vp->max_depth = 1.e35;
 
-	return batch_offset(batch, vp);
+	return intel_batchbuffer_subdata_offset(batch, vp);
 }
 
 static void
@@ -312,7 +278,7 @@ gen7_create_sampler(struct intel_batchbuffer *batch)
 {
 	struct gen7_sampler_state *ss;
 
-	ss = batch_alloc(batch, sizeof(*ss), 32);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32);
 
 	ss->ss0.min_filter = GEN7_MAPFILTER_NEAREST;
 	ss->ss0.mag_filter = GEN7_MAPFILTER_NEAREST;
@@ -323,7 +289,7 @@ gen7_create_sampler(struct intel_batchbuffer *batch)
 
 	ss->ss3.non_normalized_coord = 1;
 
-	return batch_offset(batch, ss);
+	return intel_batchbuffer_subdata_offset(batch, ss);
 }
 
 static void
@@ -478,7 +444,8 @@ gen7_emit_ps(struct intel_batchbuffer *batch)
 		threads = 40 << IVB_PS_MAX_THREADS_SHIFT;
 
 	OUT_BATCH(GEN7_3DSTATE_PS | (8 - 2));
-	OUT_BATCH(batch_copy(batch, ps_kernel, sizeof(ps_kernel), 64));
+	OUT_BATCH(intel_batchbuffer_copy_data(batch, ps_kernel,
+		  sizeof(ps_kernel), 64));
 	OUT_BATCH(1 << GEN7_PS_SAMPLER_COUNT_SHIFT |
 		  2 << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT);
 	OUT_BATCH(0); /* scratch address */
diff --git a/lib/rendercopy_gen8.c b/lib/rendercopy_gen8.c
index fe3fedf..fbf049f 100644
--- a/lib/rendercopy_gen8.c
+++ b/lib/rendercopy_gen8.c
@@ -129,41 +129,6 @@ static void annotation_flush(struct annotations_context *aub,
 						 aub->index);
 }
 
-static uint32_t
-batch_used(struct intel_batchbuffer *batch)
-{
-	return batch->ptr - batch->buffer;
-}
-
-static uint32_t
-batch_align(struct intel_batchbuffer *batch, uint32_t align)
-{
-	uint32_t offset = batch_used(batch);
-	offset = ALIGN(offset, align);
-	batch->ptr = batch->buffer + offset;
-	return offset;
-}
-
-static void *
-batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
-{
-	uint32_t offset = batch_align(batch, align);
-	batch->ptr += size;
-	return memset(batch->buffer + offset, 0, size);
-}
-
-static uint32_t
-batch_offset(struct intel_batchbuffer *batch, void *ptr)
-{
-	return (uint8_t *)ptr - batch->buffer;
-}
-
-static uint32_t
-batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
-{
-	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
-}
-
 static void
 gen6_render_flush(struct intel_batchbuffer *batch,
 		  drm_intel_context *context, uint32_t batch_end)
@@ -195,8 +160,8 @@ gen8_bind_buf(struct intel_batchbuffer *batch,
 		read_domain = I915_GEM_DOMAIN_SAMPLER;
 	}
 
-	ss = batch_alloc(batch, sizeof(*ss), 64);
-	offset = batch_offset(batch, ss);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, ss);
 	annotation_add_state(aub, AUB_TRACE_SURFACE_STATE, offset, sizeof(*ss));
 
 	ss->ss0.surface_type = GEN6_SURFACE_2D;
@@ -212,7 +177,8 @@ gen8_bind_buf(struct intel_batchbuffer *batch,
 	ss->ss8.base_addr = buf->bo->offset;
 
 	ret = drm_intel_bo_emit_reloc(batch->bo,
-				      batch_offset(batch, ss) + 8 * 4,
+				      intel_batchbuffer_subdata_offset(batch,
+				      ss) + 8 * 4,
 				      buf->bo, 0,
 				      read_domain, write_domain);
 	igt_assert(ret == 0);
@@ -237,8 +203,8 @@ gen8_bind_surfaces(struct intel_batchbuffer *batch,
 {
 	uint32_t *binding_table, offset;
 
-	binding_table = batch_alloc(batch, 8, 32);
-	offset = batch_offset(batch, binding_table);
+	binding_table = intel_batchbuffer_subdata_alloc(batch, 8, 32);
+	offset = intel_batchbuffer_subdata_offset(batch, binding_table);
 	annotation_add_state(aub, AUB_TRACE_BINDING_TABLE, offset, 8);
 
 	binding_table[0] =
@@ -259,8 +225,8 @@ gen8_create_sampler(struct intel_batchbuffer *batch,
 	struct gen8_sampler_state *ss;
 	uint32_t offset;
 
-	ss = batch_alloc(batch, sizeof(*ss), 64);
-	offset = batch_offset(batch, ss);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, ss);
 	annotation_add_state(aub, AUB_TRACE_SAMPLER_STATE,
 			     offset, sizeof(*ss));
 
@@ -285,7 +251,7 @@ gen8_fill_ps(struct intel_batchbuffer *batch,
 {
 	uint32_t offset;
 
-	offset = batch_copy(batch, kernel, size, 64);
+	offset = intel_batchbuffer_copy_data(batch, kernel, size, 64);
 	annotation_add_state(aub, AUB_TRACE_KERNEL_INSTRUCTIONS, offset, size);
 
 	return offset;
@@ -312,7 +278,7 @@ gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
 	void *start;
 	uint32_t offset;
 
-	batch_align(batch, 8);
+	intel_batchbuffer_align(batch, 8);
 	start = batch->ptr;
 
 	emit_vertex_2s(batch, dst_x + width, dst_y + height);
@@ -327,7 +293,7 @@ gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
 	emit_vertex_normalized(batch, src_x, igt_buf_width(src));
 	emit_vertex_normalized(batch, src_y, igt_buf_height(src));
 
-	offset = batch_offset(batch, start);
+	offset = intel_batchbuffer_subdata_offset(batch, start);
 	annotation_add_state(aub, AUB_TRACE_VERTEX_BUFFER,
 			     offset, 3 * VERTEX_SIZE);
 	return offset;
@@ -413,8 +379,9 @@ gen6_create_cc_state(struct intel_batchbuffer *batch,
 	struct gen6_color_calc_state *cc_state;
 	uint32_t offset;
 
-	cc_state = batch_alloc(batch, sizeof(*cc_state), 64);
-	offset = batch_offset(batch, cc_state);
+	cc_state = intel_batchbuffer_subdata_alloc(batch,
+						   sizeof(*cc_state), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, cc_state);
 	annotation_add_state(aub, AUB_TRACE_CC_STATE,
 			     offset, sizeof(*cc_state));
 
@@ -429,8 +396,8 @@ gen8_create_blend_state(struct intel_batchbuffer *batch,
 	int i;
 	uint32_t offset;
 
-	blend = batch_alloc(batch, sizeof(*blend), 64);
-	offset = batch_offset(batch, blend);
+	blend = intel_batchbuffer_subdata_alloc(batch, sizeof(*blend), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, blend);
 	annotation_add_state(aub, AUB_TRACE_BLEND_STATE,
 			     offset, sizeof(*blend));
 
@@ -452,8 +419,8 @@ gen6_create_cc_viewport(struct intel_batchbuffer *batch,
 	struct gen6_cc_viewport *vp;
 	uint32_t offset;
 
-	vp = batch_alloc(batch, sizeof(*vp), 32);
-	offset = batch_offset(batch, vp);
+	vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32);
+	offset = intel_batchbuffer_subdata_offset(batch, vp);
 	annotation_add_state(aub, AUB_TRACE_CC_VP_STATE,
 			     offset, sizeof(*vp));
 
@@ -472,8 +439,9 @@ gen7_create_sf_clip_viewport(struct intel_batchbuffer *batch,
 	struct gen7_sf_clip_viewport *scv_state;
 	uint32_t offset;
 
-	scv_state = batch_alloc(batch, sizeof(*scv_state), 64);
-	offset = batch_offset(batch, scv_state);
+	scv_state = intel_batchbuffer_subdata_alloc(batch,
+						    sizeof(*scv_state), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, scv_state);
 	annotation_add_state(aub, AUB_TRACE_CLIP_VP_STATE,
 			     offset, sizeof(*scv_state));
 
@@ -492,8 +460,8 @@ gen6_create_scissor_rect(struct intel_batchbuffer *batch,
 	struct gen6_scissor_rect *scissor;
 	uint32_t offset;
 
-	scissor = batch_alloc(batch, sizeof(*scissor), 64);
-	offset = batch_offset(batch, scissor);
+	scissor = intel_batchbuffer_subdata_alloc(batch, sizeof(*scissor), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, scissor);
 	annotation_add_state(aub, AUB_TRACE_SCISSOR_STATE,
 			     offset, sizeof(*scissor));
 
@@ -934,7 +902,7 @@ void gen8_render_copyfunc(struct intel_batchbuffer *batch,
 
 	intel_batchbuffer_flush_with_context(batch, context);
 
-	batch_align(batch, 8);
+	intel_batchbuffer_align(batch, 8);
 
 	batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
 
@@ -1019,7 +987,7 @@ void gen8_render_copyfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 	annotation_add_batch(&aub_annotations, batch_end);
 
diff --git a/lib/rendercopy_gen9.c b/lib/rendercopy_gen9.c
index e646e97..9bd1cbb 100644
--- a/lib/rendercopy_gen9.c
+++ b/lib/rendercopy_gen9.c
@@ -130,41 +130,6 @@ static void annotation_flush(struct annotations_context *ctx,
 						 ctx->index);
 }
 
-static uint32_t
-batch_used(struct intel_batchbuffer *batch)
-{
-	return batch->ptr - batch->buffer;
-}
-
-static uint32_t
-batch_align(struct intel_batchbuffer *batch, uint32_t align)
-{
-	uint32_t offset = batch_used(batch);
-	offset = ALIGN(offset, align);
-	batch->ptr = batch->buffer + offset;
-	return offset;
-}
-
-static void *
-batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
-{
-	uint32_t offset = batch_align(batch, align);
-	batch->ptr += size;
-	return memset(batch->buffer + offset, 0, size);
-}
-
-static uint32_t
-batch_offset(struct intel_batchbuffer *batch, void *ptr)
-{
-	return (uint8_t *)ptr - batch->buffer;
-}
-
-static uint32_t
-batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
-{
-	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
-}
-
 static void
 gen6_render_flush(struct intel_batchbuffer *batch,
 		  drm_intel_context *context, uint32_t batch_end)
@@ -193,8 +158,8 @@ gen8_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
 		read_domain = I915_GEM_DOMAIN_SAMPLER;
 	}
 
-	ss = batch_alloc(batch, sizeof(*ss), 64);
-	offset = batch_offset(batch, ss);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, ss);
 	annotation_add_state(&aub_annotations, AUB_TRACE_SURFACE_STATE,
 			     offset, sizeof(*ss));
 
@@ -211,7 +176,8 @@ gen8_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
 	ss->ss8.base_addr = buf->bo->offset;
 
 	ret = drm_intel_bo_emit_reloc(batch->bo,
-				      batch_offset(batch, ss) + 8 * 4,
+				      intel_batchbuffer_subdata_offset(batch,
+				      ss) + 8 * 4,
 				      buf->bo, 0,
 				      read_domain, write_domain);
 	assert(ret == 0);
@@ -235,8 +201,8 @@ gen8_bind_surfaces(struct intel_batchbuffer *batch,
 {
 	uint32_t *binding_table, offset;
 
-	binding_table = batch_alloc(batch, 8, 32);
-	offset = batch_offset(batch, binding_table);
+	binding_table = intel_batchbuffer_subdata_alloc(batch, 8, 32);
+	offset = intel_batchbuffer_subdata_offset(batch, binding_table);
 	annotation_add_state(&aub_annotations, AUB_TRACE_BINDING_TABLE,
 			     offset, 8);
 
@@ -254,8 +220,8 @@ gen8_create_sampler(struct intel_batchbuffer *batch) {
 	struct gen8_sampler_state *ss;
 	uint32_t offset;
 
-	ss = batch_alloc(batch, sizeof(*ss), 64);
-	offset = batch_offset(batch, ss);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, ss);
 	annotation_add_state(&aub_annotations, AUB_TRACE_SAMPLER_STATE,
 			     offset, sizeof(*ss));
 
@@ -279,7 +245,7 @@ gen8_fill_ps(struct intel_batchbuffer *batch,
 {
 	uint32_t offset;
 
-	offset = batch_copy(batch, kernel, size, 64);
+	offset = intel_batchbuffer_copy_data(batch, kernel, size, 64);
 	annotation_add_state(&aub_annotations, AUB_TRACE_KERNEL_INSTRUCTIONS,
 			     offset, size);
 
@@ -306,7 +272,7 @@ gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
 	void *start;
 	uint32_t offset;
 
-	batch_align(batch, 8);
+	intel_batchbuffer_align(batch, 8);
 	start = batch->ptr;
 
 	emit_vertex_2s(batch, dst_x + width, dst_y + height);
@@ -321,7 +287,7 @@ gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
 	emit_vertex_normalized(batch, src_x, igt_buf_width(src));
 	emit_vertex_normalized(batch, src_y, igt_buf_height(src));
 
-	offset = batch_offset(batch, start);
+	offset = intel_batchbuffer_subdata_offset(batch, start);
 	annotation_add_state(&aub_annotations, AUB_TRACE_VERTEX_BUFFER,
 			     offset, 3 * VERTEX_SIZE);
 	return offset;
@@ -406,8 +372,9 @@ gen6_create_cc_state(struct intel_batchbuffer *batch)
 	struct gen6_color_calc_state *cc_state;
 	uint32_t offset;
 
-	cc_state = batch_alloc(batch, sizeof(*cc_state), 64);
-	offset = batch_offset(batch, cc_state);
+	cc_state = intel_batchbuffer_subdata_alloc(batch,
+						   sizeof(*cc_state), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, cc_state);
 	annotation_add_state(&aub_annotations, AUB_TRACE_CC_STATE,
 			     offset, sizeof(*cc_state));
 
@@ -421,8 +388,8 @@ gen8_create_blend_state(struct intel_batchbuffer *batch)
 	int i;
 	uint32_t offset;
 
-	blend = batch_alloc(batch, sizeof(*blend), 64);
-	offset = batch_offset(batch, blend);
+	blend = intel_batchbuffer_subdata_alloc(batch, sizeof(*blend), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, blend);
 	annotation_add_state(&aub_annotations, AUB_TRACE_BLEND_STATE,
 			     offset, sizeof(*blend));
 
@@ -443,8 +410,8 @@ gen6_create_cc_viewport(struct intel_batchbuffer *batch)
 	struct gen6_cc_viewport *vp;
 	uint32_t offset;
 
-	vp = batch_alloc(batch, sizeof(*vp), 32);
-	offset = batch_offset(batch, vp);
+	vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32);
+	offset = intel_batchbuffer_subdata_offset(batch, vp);
 	annotation_add_state(&aub_annotations, AUB_TRACE_CC_VP_STATE,
 			     offset, sizeof(*vp));
 
@@ -461,8 +428,9 @@ gen7_create_sf_clip_viewport(struct intel_batchbuffer *batch) {
 	struct gen7_sf_clip_viewport *scv_state;
 	uint32_t offset;
 
-	scv_state = batch_alloc(batch, sizeof(*scv_state), 64);
-	offset = batch_offset(batch, scv_state);
+	scv_state = intel_batchbuffer_subdata_alloc(batch,
+						    sizeof(*scv_state), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, scv_state);
 	annotation_add_state(&aub_annotations, AUB_TRACE_CLIP_VP_STATE,
 			     offset, sizeof(*scv_state));
 
@@ -480,8 +448,8 @@ gen6_create_scissor_rect(struct intel_batchbuffer *batch)
 	struct gen6_scissor_rect *scissor;
 	uint32_t offset;
 
-	scissor = batch_alloc(batch, sizeof(*scissor), 64);
-	offset = batch_offset(batch, scissor);
+	scissor = intel_batchbuffer_subdata_alloc(batch, sizeof(*scissor), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, scissor);
 	annotation_add_state(&aub_annotations, AUB_TRACE_SCISSOR_STATE,
 			     offset, sizeof(*scissor));
 
@@ -940,7 +908,7 @@ void gen9_render_copyfunc(struct intel_batchbuffer *batch,
 
 	intel_batchbuffer_flush_with_context(batch, context);
 
-	batch_align(batch, 8);
+	intel_batchbuffer_align(batch, 8);
 
 	batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
 
@@ -1023,7 +991,7 @@ void gen9_render_copyfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	assert(batch_end < BATCH_STATE_SPLIT);
 	annotation_add_batch(&aub_annotations, batch_end);
 
-- 
2.9.5

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [igt-dev] ✗ Fi.CI.BAT: failure for lib/intel_batchbuffer: Move batch functions from media/render/gpgpu libs
  2018-04-13 15:13 [igt-dev] [PATCH i-g-t] lib/intel_batchbuffer: Move batch functions from media/render/gpgpu libs Lukasz Kalamarz
@ 2018-04-13 15:33 ` Patchwork
  2018-04-16  8:13 ` [igt-dev] [PATCH i-g-t] " Katarzyna Dec
  2018-04-16 15:22 ` [igt-dev] [PATCH i-g-t v2 1/2] lib/rendercopy_gen7: Reorganizing batch allocations Lukasz Kalamarz
  2 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2018-04-13 15:33 UTC (permalink / raw)
  To: Lukasz Kalamarz; +Cc: igt-dev

== Series Details ==

Series: lib/intel_batchbuffer: Move batch functions from media/render/gpgpu libs
URL   : https://patchwork.freedesktop.org/series/41685/
State : failure

== Summary ==

= CI Bug Log - changes from IGT_4429 -> IGTPW_1255 =

== Summary - FAILURE ==

  Serious unknown changes coming with IGTPW_1255 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in IGTPW_1255, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://patchwork.freedesktop.org/api/1.0/series/41685/revisions/1/mbox/

== Possible new issues ==

  Here are the unknown changes that may have been introduced in IGTPW_1255:

  === IGT changes ===

    ==== Possible regressions ====

    igt@gem_render_linear_blits@basic:
      fi-byt-n2820:       PASS -> FAIL +2

    igt@gem_render_tiled_blits@basic:
      fi-ivb-3520m:       PASS -> FAIL +2

    igt@kms_frontbuffer_tracking@basic:
      fi-ivb-3770:        PASS -> FAIL +2
      fi-byt-j1900:       PASS -> FAIL +2
      fi-hsw-4770:        PASS -> FAIL +2

    
== Known issues ==

  Here are the changes found in IGTPW_1255 that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@gem_ringfill@basic-default-hang:
      fi-blb-e6850:       NOTRUN -> DMESG-WARN (fdo#101600)
      fi-pnv-d510:        NOTRUN -> DMESG-WARN (fdo#101600)

    igt@kms_pipe_crc_basic@suspend-read-crc-pipe-c:
      fi-bxt-dsi:         NOTRUN -> INCOMPLETE (fdo#103927)

    
    ==== Possible fixes ====

    igt@kms_pipe_crc_basic@read-crc-pipe-a-frame-sequence:
      fi-skl-6770hq:      FAIL (fdo#103481) -> PASS

    
  fdo#101600 https://bugs.freedesktop.org/show_bug.cgi?id=101600
  fdo#103481 https://bugs.freedesktop.org/show_bug.cgi?id=103481
  fdo#103927 https://bugs.freedesktop.org/show_bug.cgi?id=103927


== Participating hosts (28 -> 33) ==

  Additional (7): fi-kbl-7567u fi-bxt-dsi fi-skl-gvtdvm fi-bdw-gvtdvm fi-pnv-d510 fi-elk-e7500 fi-blb-e6850 
  Missing    (2): fi-ilk-m540 fi-skl-6700hq 


== Build changes ==

    * IGT: IGT_4429 -> IGTPW_1255
    * Linux: CI_DRM_4050 -> CI_DRM_4053

  CI_DRM_4050: dc857e9d853d2bd074adaf49c49deb473328ea72 @ git://anongit.freedesktop.org/gfx-ci/linux
  CI_DRM_4053: e2599f775a9c1c27f702e90e6432e555764edcd8 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGTPW_1255: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_1255/
  IGT_4429: 80e4910581c7310258375a003a5de9a57ed24546 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  piglit_4429: 93b35926a150e318439d2505901288594b3548f5 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_1255/issues.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [igt-dev] [PATCH i-g-t] lib/intel_batchbuffer: Move batch functions from media/render/gpgpu libs
  2018-04-13 15:13 [igt-dev] [PATCH i-g-t] lib/intel_batchbuffer: Move batch functions from media/render/gpgpu libs Lukasz Kalamarz
  2018-04-13 15:33 ` [igt-dev] ✗ Fi.CI.BAT: failure for " Patchwork
@ 2018-04-16  8:13 ` Katarzyna Dec
  2018-04-16 15:22 ` [igt-dev] [PATCH i-g-t v2 1/2] lib/rendercopy_gen7: Reorganizing batch allocations Lukasz Kalamarz
  2 siblings, 0 replies; 7+ messages in thread
From: Katarzyna Dec @ 2018-04-16  8:13 UTC (permalink / raw)
  To: Lukasz Kalamarz; +Cc: igt-dev

On Fri, Apr 13, 2018 at 05:13:36PM +0200, Lukasz Kalamarz wrote:
> Batch functions were copy/pasted across several libs.
> With moving it into intel_batchbuffer lib test can now be
> easly maintained without worrying that we forgot to modify
> older version of lib.
Please add more info in commit msg about what has changed.
> 
> Signed-off-by: Lukasz Kalamarz <lukasz.kalamarz@intel.com>
> Cc: Katarzyna Dec <katarzyna.dec@intel.com>
> Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
> ---
>  lib/gpgpu_fill.c        |  6 ++--
>  lib/gpu_fill.c          | 68 +++++++++++-----------------------------
>  lib/gpu_fill.h          | 15 ---------
>  lib/intel_batchbuffer.c | 58 ++++++++++++++++++++++++++++++----
>  lib/intel_batchbuffer.h | 18 +++++++++--
>  lib/media_fill_gen7.c   |  2 +-
>  lib/media_fill_gen8.c   |  2 +-
>  lib/media_fill_gen9.c   |  2 +-
>  lib/media_spin.c        | 63 +++++++++----------------------------
>  lib/rendercopy_gen6.c   | 75 ++++++++++----------------------------------
>  lib/rendercopy_gen7.c   | 65 ++++++++++-----------------------------
>  lib/rendercopy_gen8.c   | 82 +++++++++++++++----------------------------------
>  lib/rendercopy_gen9.c   | 82 +++++++++++++++----------------------------------
>  13 files changed, 188 insertions(+), 350 deletions(-)
> 
> diff --git a/lib/gpgpu_fill.c b/lib/gpgpu_fill.c
> index 72a1445..010dde0 100644
> --- a/lib/gpgpu_fill.c
> +++ b/lib/gpgpu_fill.c
> @@ -137,7 +137,7 @@ gen7_gpgpu_fillfunc(struct intel_batchbuffer *batch,
>  
>  	OUT_BATCH(MI_BATCH_BUFFER_END);
>  
> -	batch_end = batch_align(batch, 8);
> +	batch_end = intel_batchbuffer_align(batch, 8);
>  	igt_assert(batch_end < BATCH_STATE_SPLIT);
>  
>  	gen7_render_flush(batch, batch_end);
> @@ -185,7 +185,7 @@ gen8_gpgpu_fillfunc(struct intel_batchbuffer *batch,
>  
>  	OUT_BATCH(MI_BATCH_BUFFER_END);
>  
> -	batch_end = batch_align(batch, 8);
> +	batch_end = intel_batchbuffer_align(batch, 8);
>  	igt_assert(batch_end < BATCH_STATE_SPLIT);
>  
>  	gen7_render_flush(batch, batch_end);
> @@ -234,7 +234,7 @@ gen9_gpgpu_fillfunc(struct intel_batchbuffer *batch,
>  
>  	OUT_BATCH(MI_BATCH_BUFFER_END);
>  
> -	batch_end = batch_align(batch, 8);
> +	batch_end = intel_batchbuffer_align(batch, 8);
>  	igt_assert(batch_end < BATCH_STATE_SPLIT);
>  
>  	gen7_render_flush(batch, batch_end);
> diff --git a/lib/gpu_fill.c b/lib/gpu_fill.c
> index f1fe5b3..5c1e217 100644
> --- a/lib/gpu_fill.c
> +++ b/lib/gpu_fill.c
> @@ -24,41 +24,6 @@
>  
>  #include "gpu_fill.h"
>  
> -uint32_t
> -batch_used(struct intel_batchbuffer *batch)
> -{
> -	return batch->ptr - batch->buffer;
> -}
> -
> -uint32_t
> -batch_align(struct intel_batchbuffer *batch, uint32_t align)
> -{
> -	uint32_t offset = batch_used(batch);
> -	offset = ALIGN(offset, align);
> -	batch->ptr = batch->buffer + offset;
> -	return offset;
> -}
> -
> -void *
> -batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
> -{
> -	uint32_t offset = batch_align(batch, align);
> -	batch->ptr += size;
> -	return memset(batch->buffer + offset, 0, size);
> -}
> -
> -uint32_t
> -batch_offset(struct intel_batchbuffer *batch, void *ptr)
> -{
> -	return (uint8_t *)ptr - batch->buffer;
> -}
> -
> -uint32_t
> -batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
> -{
> -	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
> -}
> -
>  void
>  gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
>  {
> @@ -78,8 +43,10 @@ gen7_fill_curbe_buffer_data(struct intel_batchbuffer *batch,
>  	uint8_t *curbe_buffer;
>  	uint32_t offset;
>  
> -	curbe_buffer = batch_alloc(batch, sizeof(uint32_t) * 8, 64);
> -	offset = batch_offset(batch, curbe_buffer);
> +	curbe_buffer = intel_batchbuffer_subdata_alloc(batch,
> +						       sizeof(uint32_t) * 8,
> +						       64);
> +	offset = intel_batchbuffer_subdata_offset(batch, curbe_buffer);
>  	*curbe_buffer = color;
>  
>  	return offset;
> @@ -102,8 +69,8 @@ gen7_fill_surface_state(struct intel_batchbuffer *batch,
>  		read_domain = I915_GEM_DOMAIN_SAMPLER;
>  	}
>  
> -	ss = batch_alloc(batch, sizeof(*ss), 64);
> -	offset = batch_offset(batch, ss);
> +	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, ss);
>  
>  	ss->ss0.surface_type = GEN7_SURFACE_2D;
>  	ss->ss0.surface_format = format;
> @@ -116,7 +83,7 @@ gen7_fill_surface_state(struct intel_batchbuffer *batch,
>  
>  	ss->ss1.base_addr = buf->bo->offset;
>  	ret = drm_intel_bo_emit_reloc(batch->bo,
> -				batch_offset(batch, ss) + 4,
> +				intel_batchbuffer_subdata_offset(batch, ss) + 4,
>  				buf->bo, 0,
>  				read_domain, write_domain);
>  	igt_assert(ret == 0);
> @@ -140,8 +107,8 @@ gen7_fill_binding_table(struct intel_batchbuffer *batch,
>  {
>  	uint32_t *binding_table, offset;
>  
> -	binding_table = batch_alloc(batch, 32, 64);
> -	offset = batch_offset(batch, binding_table);
> +	binding_table = intel_batchbuffer_subdata_alloc(batch, 32, 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, binding_table);
>  	if (IS_GEN7(batch->devid))
>  		binding_table[0] = gen7_fill_surface_state(batch, dst,
>  						GEN7_SURFACEFORMAT_R8_UNORM, 1);
> @@ -159,7 +126,7 @@ gen7_fill_kernel(struct intel_batchbuffer *batch,
>  {
>  	uint32_t offset;
>  
> -	offset = batch_copy(batch, kernel, size, 64);
> +	offset = intel_batchbuffer_copy_data(batch, kernel, size, 64);
>  
>  	return offset;
>  }
> @@ -175,8 +142,8 @@ gen7_fill_interface_descriptor(struct intel_batchbuffer *batch, struct igt_buf *
>  	binding_table_offset = gen7_fill_binding_table(batch, dst);
>  	kernel_offset = gen7_fill_kernel(batch, kernel, size);
>  
> -	idd = batch_alloc(batch, sizeof(*idd), 64);
> -	offset = batch_offset(batch, idd);
> +	idd = intel_batchbuffer_subdata_alloc(batch, sizeof(*idd), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, idd);
>  
>  	idd->desc0.kernel_start_pointer = (kernel_offset >> 6);
>  
> @@ -401,8 +368,8 @@ gen8_fill_surface_state(struct intel_batchbuffer *batch,
>  		read_domain = I915_GEM_DOMAIN_SAMPLER;
>  	}
>  
> -	ss = batch_alloc(batch, sizeof(*ss), 64);
> -	offset = batch_offset(batch, ss);
> +	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, ss);
>  
>  	ss->ss0.surface_type = GEN8_SURFACE_2D;
>  	ss->ss0.surface_format = format;
> @@ -418,7 +385,8 @@ gen8_fill_surface_state(struct intel_batchbuffer *batch,
>  	ss->ss8.base_addr = buf->bo->offset;
>  
>  	ret = drm_intel_bo_emit_reloc(batch->bo,
> -				batch_offset(batch, ss) + 8 * 4,
> +				intel_batchbuffer_subdata_offset(batch,
> +				ss) + 8 * 4,
>  				buf->bo, 0,
>  				read_domain, write_domain);
>  	igt_assert(ret == 0);
> @@ -445,8 +413,8 @@ gen8_fill_interface_descriptor(struct intel_batchbuffer *batch, struct igt_buf *
>  	binding_table_offset = gen7_fill_binding_table(batch, dst);
>  	kernel_offset = gen7_fill_kernel(batch, kernel, size);
>  
> -	idd = batch_alloc(batch, sizeof(*idd), 64);
> -	offset = batch_offset(batch, idd);
> +	idd = intel_batchbuffer_subdata_alloc(batch, sizeof(*idd), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, idd);
>  
>  	idd->desc0.kernel_start_pointer = (kernel_offset >> 6);
>  
> diff --git a/lib/gpu_fill.h b/lib/gpu_fill.h
> index 072e9f7..067d498 100644
> --- a/lib/gpu_fill.h
> +++ b/lib/gpu_fill.h
> @@ -37,21 +37,6 @@
>  #include "intel_chipset.h"
>  #include <assert.h>
>  
> -uint32_t
> -batch_used(struct intel_batchbuffer *batch);
> -
> -uint32_t
> -batch_align(struct intel_batchbuffer *batch, uint32_t align);
> -
> -void *
> -batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align);
> -
> -uint32_t
> -batch_offset(struct intel_batchbuffer *batch, void *ptr);
> -
> -uint32_t
> -batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align);
> -
>  void
>  gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end);
>  
> diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
> index 10d4dce..ec7ef98 100644
> --- a/lib/intel_batchbuffer.c
> +++ b/lib/intel_batchbuffer.c
> @@ -66,6 +66,49 @@
>   */
>  
>  /**
> + * intel_batchbuffer_align:
> + * @batch: batchbuffer object
> + * @align: value in bytes to which we want to align
> + *
> + * Align batchbuffer offset to given value and then return it
> + */
> +uint32_t
> +intel_batchbuffer_align(struct intel_batchbuffer *batch, uint32_t align)
> +{
> +	uint32_t offset = batch->ptr - batch->buffer;
> +
> +	offset = ALIGN(offset, align);
> +	batch->ptr = batch->buffer + offset;
> +	return offset;
> +}
> +
> +uint32_t
> +batch_round_upto(struct intel_batchbuffer *batch, uint32_t divisor)
All functions are staring with intel_* in this lib, maybe this one also should?
> +{
> +	uint32_t offset = batch->ptr - batch->buffer;
> +
> +	offset = (offset + divisor-1) / divisor * divisor;
> +	batch->ptr = batch->buffer + offset;
> +	return offset;
> +}
> +
Please add documentation to all new added functions (almost all in the lib
have one)


> +void *
> +intel_batchbuffer_subdata_alloc(struct intel_batchbuffer *batch, uint32_t size,
> +				uint32_t align)
> +{
> +	uint32_t offset = intel_batchbuffer_align(batch, align);
> +
> +	batch->ptr += size;
> +	return memset(batch->buffer + offset, 0, size);
> +}
> +
> +uint32_t
> +intel_batchbuffer_subdata_offset(struct intel_batchbuffer *batch, void *ptr)
> +{
> +	return (uint8_t *)ptr - batch->buffer;
> +}
> +
> +/**
>   * intel_batchbuffer_reset:
>   * @batch: batchbuffer object
>   *
> @@ -288,7 +331,7 @@ intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
>  }
>  
>  /**
> - * intel_batchbuffer_data:
> + * intel_batchbuffer_copy_data:
>   * @batch: batchbuffer object
>   * @data: pointer to the data to write into the batchbuffer
>   * @bytes: number of bytes to write into the batchbuffer
> @@ -296,14 +339,17 @@ intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
>   * This transfers the given @data into the batchbuffer. Note that the length
>   * must be DWORD aligned, i.e. multiples of 32bits.
>   */
> -void
> -intel_batchbuffer_data(struct intel_batchbuffer *batch,
> -                       const void *data, unsigned int bytes)
> +uint32_t
> +intel_batchbuffer_copy_data(struct intel_batchbuffer *batch,
> +			    const void *data, unsigned int bytes,
> +			    uint32_t align)
>  {
> +	uint32_t *subdata, *copied_data;
Checkpatch is not mentioning this place, but there should be newline after
this declarations.

Kasia
>  	igt_assert((bytes & 3) == 0);
>  	intel_batchbuffer_require_space(batch, bytes);
> -	memcpy(batch->ptr, data, bytes);
> -	batch->ptr += bytes;
> +	subdata = intel_batchbuffer_subdata_alloc(batch, bytes, align);
> +	copied_data = memcpy(subdata, data, bytes);
> +	return intel_batchbuffer_subdata_offset(batch, copied_data);
>  }
>  
>  /**
> diff --git a/lib/intel_batchbuffer.h b/lib/intel_batchbuffer.h
> index 2c262d7..7468eaf 100644
> --- a/lib/intel_batchbuffer.h
> +++ b/lib/intel_batchbuffer.h
> @@ -41,8 +41,9 @@ void intel_batchbuffer_flush_with_context(struct intel_batchbuffer *batch,
>  
>  void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
>  
> -void intel_batchbuffer_data(struct intel_batchbuffer *batch,
> -                            const void *data, unsigned int bytes);
> +uint32_t intel_batchbuffer_copy_data(struct intel_batchbuffer *batch,
> +				const void *data, unsigned int bytes,
> +				uint32_t align);
>  
>  void intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
>  				  drm_intel_bo *buffer,
> @@ -51,6 +52,19 @@ void intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
>  				  uint32_t write_domain,
>  				  int fenced);
>  
> +uint32_t
> +intel_batchbuffer_align(struct intel_batchbuffer *batch, uint32_t align);
> +
> +uint32_t
> +batch_round_upto(struct intel_batchbuffer *batch, uint32_t divisor);
> +
> +void *
> +intel_batchbuffer_subdata_alloc(struct intel_batchbuffer *batch,
> +				uint32_t size, uint32_t align);
> +
> +uint32_t
> +intel_batchbuffer_subdata_offset(struct intel_batchbuffer *batch, void *ptr);
> +
>  /* Inline functions - might actually be better off with these
>   * non-inlined.  Certainly better off switching all command packets to
>   * be passed as structs rather than dwords, but that's a little bit of
> diff --git a/lib/media_fill_gen7.c b/lib/media_fill_gen7.c
> index 5a8c32f..3dc5617 100644
> --- a/lib/media_fill_gen7.c
> +++ b/lib/media_fill_gen7.c
> @@ -79,7 +79,7 @@ gen7_media_fillfunc(struct intel_batchbuffer *batch,
>  
>  	OUT_BATCH(MI_BATCH_BUFFER_END);
>  
> -	batch_end = batch_align(batch, 8);
> +	batch_end = intel_batchbuffer_align(batch, 8);
>  	igt_assert(batch_end < BATCH_STATE_SPLIT);
>  
>  	gen7_render_flush(batch, batch_end);
> diff --git a/lib/media_fill_gen8.c b/lib/media_fill_gen8.c
> index d6dd741..63fe72e 100644
> --- a/lib/media_fill_gen8.c
> +++ b/lib/media_fill_gen8.c
> @@ -82,7 +82,7 @@ gen8_media_fillfunc(struct intel_batchbuffer *batch,
>  
>  	OUT_BATCH(MI_BATCH_BUFFER_END);
>  
> -	batch_end = batch_align(batch, 8);
> +	batch_end = intel_batchbuffer_align(batch, 8);
>  	igt_assert(batch_end < BATCH_STATE_SPLIT);
>  
>  	gen7_render_flush(batch, batch_end);
> diff --git a/lib/media_fill_gen9.c b/lib/media_fill_gen9.c
> index a9a829f..78e892f 100644
> --- a/lib/media_fill_gen9.c
> +++ b/lib/media_fill_gen9.c
> @@ -91,7 +91,7 @@ gen9_media_fillfunc(struct intel_batchbuffer *batch,
>  
>  	OUT_BATCH(MI_BATCH_BUFFER_END);
>  
> -	batch_end = batch_align(batch, 8);
> +	batch_end = intel_batchbuffer_align(batch, 8);
>  	assert(batch_end < BATCH_STATE_SPLIT);
>  
>  	gen7_render_flush(batch, batch_end);
> diff --git a/lib/media_spin.c b/lib/media_spin.c
> index 580c109..20af549 100644
> --- a/lib/media_spin.c
> +++ b/lib/media_spin.c
> @@ -45,42 +45,6 @@ static const uint32_t spin_kernel[][4] = {
>  	{ 0x07800031, 0x20000a40, 0x0e000e00, 0x82000010 }, /* send.ts (16)null<1> r112<0;1;0>:d 0x82000010 */
>  };
>  
> -static uint32_t
> -batch_used(struct intel_batchbuffer *batch)
> -{
> -	return batch->ptr - batch->buffer;
> -}
> -
> -static uint32_t
> -batch_align(struct intel_batchbuffer *batch, uint32_t align)
> -{
> -	uint32_t offset = batch_used(batch);
> -	offset = ALIGN(offset, align);
> -	batch->ptr = batch->buffer + offset;
> -	return offset;
> -}
> -
> -static void *
> -batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
> -{
> -	uint32_t offset = batch_align(batch, align);
> -	batch->ptr += size;
> -	return memset(batch->buffer + offset, 0, size);
> -}
> -
> -static uint32_t
> -batch_offset(struct intel_batchbuffer *batch, void *ptr)
> -{
> -	return (uint8_t *)ptr - batch->buffer;
> -}
> -
> -static uint32_t
> -batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size,
> -	   uint32_t align)
> -{
> -	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
> -}
> -
>  static void
>  gen8_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
>  {
> @@ -100,8 +64,8 @@ gen8_spin_curbe_buffer_data(struct intel_batchbuffer *batch,
>  	uint32_t *curbe_buffer;
>  	uint32_t offset;
>  
> -	curbe_buffer = batch_alloc(batch, 64, 64);
> -	offset = batch_offset(batch, curbe_buffer);
> +	curbe_buffer = intel_batchbuffer_subdata_alloc(batch, 64, 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, curbe_buffer);
>  	*curbe_buffer = iters;
>  
>  	return offset;
> @@ -124,8 +88,8 @@ gen8_spin_surface_state(struct intel_batchbuffer *batch,
>  		read_domain = I915_GEM_DOMAIN_SAMPLER;
>  	}
>  
> -	ss = batch_alloc(batch, sizeof(*ss), 64);
> -	offset = batch_offset(batch, ss);
> +	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, ss);
>  
>  	ss->ss0.surface_type = GEN8_SURFACE_2D;
>  	ss->ss0.surface_format = format;
> @@ -141,7 +105,8 @@ gen8_spin_surface_state(struct intel_batchbuffer *batch,
>  	ss->ss8.base_addr = buf->bo->offset;
>  
>  	ret = drm_intel_bo_emit_reloc(batch->bo,
> -				batch_offset(batch, ss) + 8 * 4,
> +				intel_batchbuffer_subdata_offset(batch,
> +				ss) + 8 * 4,
>  				buf->bo, 0,
>  				read_domain, write_domain);
>  	igt_assert_eq(ret, 0);
> @@ -164,8 +129,8 @@ gen8_spin_binding_table(struct intel_batchbuffer *batch,
>  {
>  	uint32_t *binding_table, offset;
>  
> -	binding_table = batch_alloc(batch, 32, 64);
> -	offset = batch_offset(batch, binding_table);
> +	binding_table = intel_batchbuffer_subdata_alloc(batch, 32, 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, binding_table);
>  
>  	binding_table[0] = gen8_spin_surface_state(batch, dst,
>  					GEN8_SURFACEFORMAT_R8_UNORM, 1);
> @@ -180,7 +145,7 @@ gen8_spin_media_kernel(struct intel_batchbuffer *batch,
>  {
>  	uint32_t offset;
>  
> -	offset = batch_copy(batch, kernel, size, 64);
> +	offset = intel_batchbuffer_copy_data(batch, kernel, size, 64);
>  
>  	return offset;
>  }
> @@ -197,8 +162,8 @@ gen8_spin_interface_descriptor(struct intel_batchbuffer *batch,
>  	kernel_offset = gen8_spin_media_kernel(batch, spin_kernel,
>  					       sizeof(spin_kernel));
>  
> -	idd = batch_alloc(batch, sizeof(*idd), 64);
> -	offset = batch_offset(batch, idd);
> +	idd = intel_batchbuffer_subdata_alloc(batch, sizeof(*idd), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, idd);
>  
>  	idd->desc0.kernel_start_pointer = (kernel_offset >> 6);
>  
> @@ -444,7 +409,7 @@ gen8_media_spinfunc(struct intel_batchbuffer *batch,
>  
>  	OUT_BATCH(MI_BATCH_BUFFER_END);
>  
> -	batch_end = batch_align(batch, 8);
> +	batch_end = intel_batchbuffer_align(batch, 8);
>  	igt_assert(batch_end < BATCH_STATE_SPLIT);
>  
>  	gen8_render_flush(batch, batch_end);
> @@ -482,7 +447,7 @@ gen8lp_media_spinfunc(struct intel_batchbuffer *batch,
>  
>  	OUT_BATCH(MI_BATCH_BUFFER_END);
>  
> -	batch_end = batch_align(batch, 8);
> +	batch_end = intel_batchbuffer_align(batch, 8);
>  	igt_assert(batch_end < BATCH_STATE_SPLIT);
>  
>  	gen8_render_flush(batch, batch_end);
> @@ -532,7 +497,7 @@ gen9_media_spinfunc(struct intel_batchbuffer *batch,
>  
>  	OUT_BATCH(MI_BATCH_BUFFER_END);
>  
> -	batch_end = batch_align(batch, 8);
> +	batch_end = intel_batchbuffer_align(batch, 8);
>  	igt_assert(batch_end < BATCH_STATE_SPLIT);
>  
>  	gen8_render_flush(batch, batch_end);
> diff --git a/lib/rendercopy_gen6.c b/lib/rendercopy_gen6.c
> index 8c24cf8..ddc9e7a 100644
> --- a/lib/rendercopy_gen6.c
> +++ b/lib/rendercopy_gen6.c
> @@ -48,50 +48,6 @@ static const uint32_t ps_kernel_nomask_affine[][4] = {
>  	{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
>  };
>  
> -static uint32_t
> -batch_used(struct intel_batchbuffer *batch)
> -{
> -	return batch->ptr - batch->buffer;
> -}
> -
> -static uint32_t
> -batch_align(struct intel_batchbuffer *batch, uint32_t align)
> -{
> -	uint32_t offset = batch_used(batch);
> -	offset = ALIGN(offset, align);
> -	batch->ptr = batch->buffer + offset;
> -	return offset;
> -}
> -
> -static uint32_t
> -batch_round_upto(struct intel_batchbuffer *batch, uint32_t divisor)
> -{
> -	uint32_t offset = batch_used(batch);
> -	offset = (offset + divisor-1) / divisor * divisor;
> -	batch->ptr = batch->buffer + offset;
> -	return offset;
> -}
> -
> -static void *
> -batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
> -{
> -	uint32_t offset = batch_align(batch, align);
> -	batch->ptr += size;
> -	return memset(batch->buffer + offset, 0, size);
> -}
> -
> -static uint32_t
> -batch_offset(struct intel_batchbuffer *batch, void *ptr)
> -{
> -	return (uint8_t *)ptr - batch->buffer;
> -}
> -
> -static uint32_t
> -batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
> -{
> -	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
> -}
> -
>  static void
>  gen6_render_flush(struct intel_batchbuffer *batch,
>  		  drm_intel_context *context, uint32_t batch_end)
> @@ -120,7 +76,7 @@ gen6_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
>  		read_domain = I915_GEM_DOMAIN_SAMPLER;
>  	}
>  
> -	ss = batch_alloc(batch, sizeof(*ss), 32);
> +	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32);
>  	ss->ss0.surface_type = GEN6_SURFACE_2D;
>  	ss->ss0.surface_format = format;
>  
> @@ -129,7 +85,8 @@ gen6_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
>  	ss->ss1.base_addr = buf->bo->offset;
>  
>  	ret = drm_intel_bo_emit_reloc(batch->bo,
> -				      batch_offset(batch, ss) + 4,
> +				      intel_batchbuffer_subdata_offset(batch,
> +				      ss) + 4,
>  				      buf->bo, 0,
>  				      read_domain, write_domain);
>  	igt_assert(ret == 0);
> @@ -140,7 +97,7 @@ gen6_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
>  	ss->ss3.tiled_surface = buf->tiling != I915_TILING_NONE;
>  	ss->ss3.tile_walk     = buf->tiling == I915_TILING_Y;
>  
> -	return batch_offset(batch, ss);
> +	return intel_batchbuffer_subdata_offset(batch, ss);
>  }
>  
>  static uint32_t
> @@ -150,14 +107,14 @@ gen6_bind_surfaces(struct intel_batchbuffer *batch,
>  {
>  	uint32_t *binding_table;
>  
> -	binding_table = batch_alloc(batch, 32, 32);
> +	binding_table = intel_batchbuffer_subdata_alloc(batch, 32, 32);
>  
>  	binding_table[0] =
>  		gen6_bind_buf(batch, dst, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, 1);
>  	binding_table[1] =
>  		gen6_bind_buf(batch, src, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, 0);
>  
> -	return batch_offset(batch, binding_table);
> +	return intel_batchbuffer_subdata_offset(batch, binding_table);
>  }
>  
>  static void
> @@ -427,12 +384,12 @@ gen6_create_cc_viewport(struct intel_batchbuffer *batch)
>  {
>  	struct gen6_cc_viewport *vp;
>  
> -	vp = batch_alloc(batch, sizeof(*vp), 32);
> +	vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32);
>  
>  	vp->min_depth = -1.e35;
>  	vp->max_depth = 1.e35;
>  
> -	return batch_offset(batch, vp);
> +	return intel_batchbuffer_subdata_offset(batch, vp);
>  }
>  
>  static uint32_t
> @@ -440,7 +397,7 @@ gen6_create_cc_blend(struct intel_batchbuffer *batch)
>  {
>  	struct gen6_blend_state *blend;
>  
> -	blend = batch_alloc(batch, sizeof(*blend), 64);
> +	blend = intel_batchbuffer_subdata_alloc(batch, sizeof(*blend), 64);
>  
>  	blend->blend0.dest_blend_factor = GEN6_BLENDFACTOR_ZERO;
>  	blend->blend0.source_blend_factor = GEN6_BLENDFACTOR_ONE;
> @@ -450,13 +407,13 @@ gen6_create_cc_blend(struct intel_batchbuffer *batch)
>  	blend->blend1.post_blend_clamp_enable = 1;
>  	blend->blend1.pre_blend_clamp_enable = 1;
>  
> -	return batch_offset(batch, blend);
> +	return intel_batchbuffer_subdata_offset(batch, blend);
>  }
>  
>  static uint32_t
>  gen6_create_kernel(struct intel_batchbuffer *batch)
>  {
> -	return batch_copy(batch, ps_kernel_nomask_affine,
> +	return intel_batchbuffer_copy_data(batch, ps_kernel_nomask_affine,
>  			  sizeof(ps_kernel_nomask_affine),
>  			  64);
>  }
> @@ -468,7 +425,7 @@ gen6_create_sampler(struct intel_batchbuffer *batch,
>  {
>  	struct gen6_sampler_state *ss;
>  
> -	ss = batch_alloc(batch, sizeof(*ss), 32);
> +	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32);
>  	ss->ss0.lod_preclamp = 1;	/* GL mode */
>  
>  	/* We use the legacy mode to get the semantics specified by
> @@ -511,7 +468,7 @@ gen6_create_sampler(struct intel_batchbuffer *batch,
>  		break;
>  	}
>  
> -	return batch_offset(batch, ss);
> +	return intel_batchbuffer_subdata_offset(batch, ss);
>  }
>  
>  static void gen6_emit_vertex_buffer(struct intel_batchbuffer *batch)
> @@ -535,7 +492,7 @@ static uint32_t gen6_emit_primitive(struct intel_batchbuffer *batch)
>  		  0 << 9 |
>  		  4);
>  	OUT_BATCH(3);	/* vertex count */
> -	offset = batch_used(batch);
> +	offset = batch->ptr - batch->buffer;
>  	OUT_BATCH(0);	/* vertex_index */
>  	OUT_BATCH(1);	/* single instance */
>  	OUT_BATCH(0);	/* start instance location */
> @@ -557,7 +514,7 @@ void gen6_render_copyfunc(struct intel_batchbuffer *batch,
>  	intel_batchbuffer_flush_with_context(batch, context);
>  
>  	batch->ptr = batch->buffer + 1024;
> -	batch_alloc(batch, 64, 64);
> +	intel_batchbuffer_subdata_alloc(batch, 64, 64);
>  	wm_table  = gen6_bind_surfaces(batch, src, dst);
>  	wm_kernel = gen6_create_kernel(batch);
>  	wm_state  = gen6_create_sampler(batch,
> @@ -594,7 +551,7 @@ void gen6_render_copyfunc(struct intel_batchbuffer *batch,
>  	offset = gen6_emit_primitive(batch);
>  
>  	OUT_BATCH(MI_BATCH_BUFFER_END);
> -	batch_end = batch_align(batch, 8);
> +	batch_end = intel_batchbuffer_align(batch, 8);
>  
>  	*(uint32_t*)(batch->buffer + offset) =
>  		batch_round_upto(batch, VERTEX_SIZE)/VERTEX_SIZE;
> diff --git a/lib/rendercopy_gen7.c b/lib/rendercopy_gen7.c
> index 3b92406..eb7b9a5 100644
> --- a/lib/rendercopy_gen7.c
> +++ b/lib/rendercopy_gen7.c
> @@ -32,41 +32,6 @@ static const uint32_t ps_kernel[][4] = {
>  	{ 0x05800031, 0x20001fa8, 0x008d0e20, 0x90031000 },
>  };
>  
> -static uint32_t
> -batch_used(struct intel_batchbuffer *batch)
> -{
> -	return batch->state - batch->buffer;
> -}
> -
> -static uint32_t
> -batch_align(struct intel_batchbuffer *batch, uint32_t align)
> -{
> -	uint32_t offset = batch_used(batch);
> -	offset = ALIGN(offset, align);
> -	batch->state = batch->buffer + offset;
> -	return offset;
> -}
> -
> -static void *
> -batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
> -{
> -	uint32_t offset = batch_align(batch, align);
> -	batch->state += size;
> -	return memset(batch->buffer + offset, 0, size);
> -}
> -
> -static uint32_t
> -batch_offset(struct intel_batchbuffer *batch, void *ptr)
> -{
> -	return (uint8_t *)ptr - batch->buffer;
> -}
> -
> -static uint32_t
> -batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
> -{
> -	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
> -}
> -
>  static void
>  gen7_render_flush(struct intel_batchbuffer *batch,
>  		  drm_intel_context *context, uint32_t batch_end)
> @@ -108,7 +73,7 @@ gen7_bind_buf(struct intel_batchbuffer *batch,
>  		read_domain = I915_GEM_DOMAIN_SAMPLER;
>  	}
>  
> -	ss = batch_alloc(batch, 8 * sizeof(*ss), 32);
> +	ss = intel_batchbuffer_subdata_alloc(batch, 8 * sizeof(*ss), 32);
>  
>  	ss[0] = (GEN7_SURFACE_2D << GEN7_SURFACE_TYPE_SHIFT |
>  		 gen7_tiling_bits(buf->tiling) |
> @@ -125,12 +90,13 @@ gen7_bind_buf(struct intel_batchbuffer *batch,
>  		ss[7] |= HSW_SURFACE_SWIZZLE(RED, GREEN, BLUE, ALPHA);
>  
>  	ret = drm_intel_bo_emit_reloc(batch->bo,
> -				      batch_offset(batch, ss) + 4,
> +				      intel_batchbuffer_subdata_offset(batch,
> +				      ss) + 4,
>  				      buf->bo, 0,
>  				      read_domain, write_domain);
>  	igt_assert(ret == 0);
>  
> -	return batch_offset(batch, ss);
> +	return intel_batchbuffer_subdata_offset(batch, ss);
>  }
>  
>  static void
> @@ -175,7 +141,7 @@ gen7_create_vertex_buffer(struct intel_batchbuffer *batch,
>  {
>  	uint16_t *v;
>  
> -	v = batch_alloc(batch, 12*sizeof(*v), 8);
> +	v = intel_batchbuffer_subdata_alloc(batch, 12*sizeof(*v), 8);
>  
>  	v[0] = dst_x + width;
>  	v[1] = dst_y + height;
> @@ -192,7 +158,7 @@ gen7_create_vertex_buffer(struct intel_batchbuffer *batch,
>  	v[10] = src_x;
>  	v[11] = src_y;
>  
> -	return batch_offset(batch, v);
> +	return intel_batchbuffer_subdata_offset(batch, v);
>  }
>  
>  static void gen7_emit_vertex_buffer(struct intel_batchbuffer *batch,
> @@ -225,14 +191,14 @@ gen7_bind_surfaces(struct intel_batchbuffer *batch,
>  {
>  	uint32_t *binding_table;
>  
> -	binding_table = batch_alloc(batch, 8, 32);
> +	binding_table = intel_batchbuffer_subdata_alloc(batch, 8, 32);
>  
>  	binding_table[0] =
>  		gen7_bind_buf(batch, dst, GEN7_SURFACEFORMAT_B8G8R8A8_UNORM, 1);
>  	binding_table[1] =
>  		gen7_bind_buf(batch, src, GEN7_SURFACEFORMAT_B8G8R8A8_UNORM, 0);
>  
> -	return batch_offset(batch, binding_table);
> +	return intel_batchbuffer_subdata_offset(batch, binding_table);
>  }
>  
>  static void
> @@ -258,7 +224,7 @@ gen7_create_blend_state(struct intel_batchbuffer *batch)
>  {
>  	struct gen7_blend_state *blend;
>  
> -	blend = batch_alloc(batch, sizeof(*blend), 64);
> +	blend = intel_batchbuffer_subdata_alloc(batch, sizeof(*blend), 64);
>  
>  	blend->blend0.dest_blend_factor = GEN7_BLENDFACTOR_ZERO;
>  	blend->blend0.source_blend_factor = GEN7_BLENDFACTOR_ONE;
> @@ -266,7 +232,7 @@ gen7_create_blend_state(struct intel_batchbuffer *batch)
>  	blend->blend1.post_blend_clamp_enable = 1;
>  	blend->blend1.pre_blend_clamp_enable = 1;
>  
> -	return batch_offset(batch, blend);
> +	return intel_batchbuffer_subdata_offset(batch, blend);
>  }
>  
>  static void
> @@ -290,11 +256,11 @@ gen7_create_cc_viewport(struct intel_batchbuffer *batch)
>  {
>  	struct gen7_cc_viewport *vp;
>  
> -	vp = batch_alloc(batch, sizeof(*vp), 32);
> +	vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32);
>  	vp->min_depth = -1.e35;
>  	vp->max_depth = 1.e35;
>  
> -	return batch_offset(batch, vp);
> +	return intel_batchbuffer_subdata_offset(batch, vp);
>  }
>  
>  static void
> @@ -312,7 +278,7 @@ gen7_create_sampler(struct intel_batchbuffer *batch)
>  {
>  	struct gen7_sampler_state *ss;
>  
> -	ss = batch_alloc(batch, sizeof(*ss), 32);
> +	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32);
>  
>  	ss->ss0.min_filter = GEN7_MAPFILTER_NEAREST;
>  	ss->ss0.mag_filter = GEN7_MAPFILTER_NEAREST;
> @@ -323,7 +289,7 @@ gen7_create_sampler(struct intel_batchbuffer *batch)
>  
>  	ss->ss3.non_normalized_coord = 1;
>  
> -	return batch_offset(batch, ss);
> +	return intel_batchbuffer_subdata_offset(batch, ss);
>  }
>  
>  static void
> @@ -478,7 +444,8 @@ gen7_emit_ps(struct intel_batchbuffer *batch)
>  		threads = 40 << IVB_PS_MAX_THREADS_SHIFT;
>  
>  	OUT_BATCH(GEN7_3DSTATE_PS | (8 - 2));
> -	OUT_BATCH(batch_copy(batch, ps_kernel, sizeof(ps_kernel), 64));
> +	OUT_BATCH(intel_batchbuffer_copy_data(batch, ps_kernel,
> +		  sizeof(ps_kernel), 64));
>  	OUT_BATCH(1 << GEN7_PS_SAMPLER_COUNT_SHIFT |
>  		  2 << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT);
>  	OUT_BATCH(0); /* scratch address */
> diff --git a/lib/rendercopy_gen8.c b/lib/rendercopy_gen8.c
> index fe3fedf..fbf049f 100644
> --- a/lib/rendercopy_gen8.c
> +++ b/lib/rendercopy_gen8.c
> @@ -129,41 +129,6 @@ static void annotation_flush(struct annotations_context *aub,
>  						 aub->index);
>  }
>  
> -static uint32_t
> -batch_used(struct intel_batchbuffer *batch)
> -{
> -	return batch->ptr - batch->buffer;
> -}
> -
> -static uint32_t
> -batch_align(struct intel_batchbuffer *batch, uint32_t align)
> -{
> -	uint32_t offset = batch_used(batch);
> -	offset = ALIGN(offset, align);
> -	batch->ptr = batch->buffer + offset;
> -	return offset;
> -}
> -
> -static void *
> -batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
> -{
> -	uint32_t offset = batch_align(batch, align);
> -	batch->ptr += size;
> -	return memset(batch->buffer + offset, 0, size);
> -}
> -
> -static uint32_t
> -batch_offset(struct intel_batchbuffer *batch, void *ptr)
> -{
> -	return (uint8_t *)ptr - batch->buffer;
> -}
> -
> -static uint32_t
> -batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
> -{
> -	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
> -}
> -
>  static void
>  gen6_render_flush(struct intel_batchbuffer *batch,
>  		  drm_intel_context *context, uint32_t batch_end)
> @@ -195,8 +160,8 @@ gen8_bind_buf(struct intel_batchbuffer *batch,
>  		read_domain = I915_GEM_DOMAIN_SAMPLER;
>  	}
>  
> -	ss = batch_alloc(batch, sizeof(*ss), 64);
> -	offset = batch_offset(batch, ss);
> +	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, ss);
>  	annotation_add_state(aub, AUB_TRACE_SURFACE_STATE, offset, sizeof(*ss));
>  
>  	ss->ss0.surface_type = GEN6_SURFACE_2D;
> @@ -212,7 +177,8 @@ gen8_bind_buf(struct intel_batchbuffer *batch,
>  	ss->ss8.base_addr = buf->bo->offset;
>  
>  	ret = drm_intel_bo_emit_reloc(batch->bo,
> -				      batch_offset(batch, ss) + 8 * 4,
> +				      intel_batchbuffer_subdata_offset(batch,
> +				      ss) + 8 * 4,
>  				      buf->bo, 0,
>  				      read_domain, write_domain);
>  	igt_assert(ret == 0);
> @@ -237,8 +203,8 @@ gen8_bind_surfaces(struct intel_batchbuffer *batch,
>  {
>  	uint32_t *binding_table, offset;
>  
> -	binding_table = batch_alloc(batch, 8, 32);
> -	offset = batch_offset(batch, binding_table);
> +	binding_table = intel_batchbuffer_subdata_alloc(batch, 8, 32);
> +	offset = intel_batchbuffer_subdata_offset(batch, binding_table);
>  	annotation_add_state(aub, AUB_TRACE_BINDING_TABLE, offset, 8);
>  
>  	binding_table[0] =
> @@ -259,8 +225,8 @@ gen8_create_sampler(struct intel_batchbuffer *batch,
>  	struct gen8_sampler_state *ss;
>  	uint32_t offset;
>  
> -	ss = batch_alloc(batch, sizeof(*ss), 64);
> -	offset = batch_offset(batch, ss);
> +	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, ss);
>  	annotation_add_state(aub, AUB_TRACE_SAMPLER_STATE,
>  			     offset, sizeof(*ss));
>  
> @@ -285,7 +251,7 @@ gen8_fill_ps(struct intel_batchbuffer *batch,
>  {
>  	uint32_t offset;
>  
> -	offset = batch_copy(batch, kernel, size, 64);
> +	offset = intel_batchbuffer_copy_data(batch, kernel, size, 64);
>  	annotation_add_state(aub, AUB_TRACE_KERNEL_INSTRUCTIONS, offset, size);
>  
>  	return offset;
> @@ -312,7 +278,7 @@ gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
>  	void *start;
>  	uint32_t offset;
>  
> -	batch_align(batch, 8);
> +	intel_batchbuffer_align(batch, 8);
>  	start = batch->ptr;
>  
>  	emit_vertex_2s(batch, dst_x + width, dst_y + height);
> @@ -327,7 +293,7 @@ gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
>  	emit_vertex_normalized(batch, src_x, igt_buf_width(src));
>  	emit_vertex_normalized(batch, src_y, igt_buf_height(src));
>  
> -	offset = batch_offset(batch, start);
> +	offset = intel_batchbuffer_subdata_offset(batch, start);
>  	annotation_add_state(aub, AUB_TRACE_VERTEX_BUFFER,
>  			     offset, 3 * VERTEX_SIZE);
>  	return offset;
> @@ -413,8 +379,9 @@ gen6_create_cc_state(struct intel_batchbuffer *batch,
>  	struct gen6_color_calc_state *cc_state;
>  	uint32_t offset;
>  
> -	cc_state = batch_alloc(batch, sizeof(*cc_state), 64);
> -	offset = batch_offset(batch, cc_state);
> +	cc_state = intel_batchbuffer_subdata_alloc(batch,
> +						   sizeof(*cc_state), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, cc_state);
>  	annotation_add_state(aub, AUB_TRACE_CC_STATE,
>  			     offset, sizeof(*cc_state));
>  
> @@ -429,8 +396,8 @@ gen8_create_blend_state(struct intel_batchbuffer *batch,
>  	int i;
>  	uint32_t offset;
>  
> -	blend = batch_alloc(batch, sizeof(*blend), 64);
> -	offset = batch_offset(batch, blend);
> +	blend = intel_batchbuffer_subdata_alloc(batch, sizeof(*blend), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, blend);
>  	annotation_add_state(aub, AUB_TRACE_BLEND_STATE,
>  			     offset, sizeof(*blend));
>  
> @@ -452,8 +419,8 @@ gen6_create_cc_viewport(struct intel_batchbuffer *batch,
>  	struct gen6_cc_viewport *vp;
>  	uint32_t offset;
>  
> -	vp = batch_alloc(batch, sizeof(*vp), 32);
> -	offset = batch_offset(batch, vp);
> +	vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32);
> +	offset = intel_batchbuffer_subdata_offset(batch, vp);
>  	annotation_add_state(aub, AUB_TRACE_CC_VP_STATE,
>  			     offset, sizeof(*vp));
>  
> @@ -472,8 +439,9 @@ gen7_create_sf_clip_viewport(struct intel_batchbuffer *batch,
>  	struct gen7_sf_clip_viewport *scv_state;
>  	uint32_t offset;
>  
> -	scv_state = batch_alloc(batch, sizeof(*scv_state), 64);
> -	offset = batch_offset(batch, scv_state);
> +	scv_state = intel_batchbuffer_subdata_alloc(batch,
> +						    sizeof(*scv_state), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, scv_state);
>  	annotation_add_state(aub, AUB_TRACE_CLIP_VP_STATE,
>  			     offset, sizeof(*scv_state));
>  
> @@ -492,8 +460,8 @@ gen6_create_scissor_rect(struct intel_batchbuffer *batch,
>  	struct gen6_scissor_rect *scissor;
>  	uint32_t offset;
>  
> -	scissor = batch_alloc(batch, sizeof(*scissor), 64);
> -	offset = batch_offset(batch, scissor);
> +	scissor = intel_batchbuffer_subdata_alloc(batch, sizeof(*scissor), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, scissor);
>  	annotation_add_state(aub, AUB_TRACE_SCISSOR_STATE,
>  			     offset, sizeof(*scissor));
>  
> @@ -934,7 +902,7 @@ void gen8_render_copyfunc(struct intel_batchbuffer *batch,
>  
>  	intel_batchbuffer_flush_with_context(batch, context);
>  
> -	batch_align(batch, 8);
> +	intel_batchbuffer_align(batch, 8);
>  
>  	batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
>  
> @@ -1019,7 +987,7 @@ void gen8_render_copyfunc(struct intel_batchbuffer *batch,
>  
>  	OUT_BATCH(MI_BATCH_BUFFER_END);
>  
> -	batch_end = batch_align(batch, 8);
> +	batch_end = intel_batchbuffer_align(batch, 8);
>  	igt_assert(batch_end < BATCH_STATE_SPLIT);
>  	annotation_add_batch(&aub_annotations, batch_end);
>  
> diff --git a/lib/rendercopy_gen9.c b/lib/rendercopy_gen9.c
> index e646e97..9bd1cbb 100644
> --- a/lib/rendercopy_gen9.c
> +++ b/lib/rendercopy_gen9.c
> @@ -130,41 +130,6 @@ static void annotation_flush(struct annotations_context *ctx,
>  						 ctx->index);
>  }
>  
> -static uint32_t
> -batch_used(struct intel_batchbuffer *batch)
> -{
> -	return batch->ptr - batch->buffer;
> -}
> -
> -static uint32_t
> -batch_align(struct intel_batchbuffer *batch, uint32_t align)
> -{
> -	uint32_t offset = batch_used(batch);
> -	offset = ALIGN(offset, align);
> -	batch->ptr = batch->buffer + offset;
> -	return offset;
> -}
> -
> -static void *
> -batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
> -{
> -	uint32_t offset = batch_align(batch, align);
> -	batch->ptr += size;
> -	return memset(batch->buffer + offset, 0, size);
> -}
> -
> -static uint32_t
> -batch_offset(struct intel_batchbuffer *batch, void *ptr)
> -{
> -	return (uint8_t *)ptr - batch->buffer;
> -}
> -
> -static uint32_t
> -batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
> -{
> -	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
> -}
> -
>  static void
>  gen6_render_flush(struct intel_batchbuffer *batch,
>  		  drm_intel_context *context, uint32_t batch_end)
> @@ -193,8 +158,8 @@ gen8_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
>  		read_domain = I915_GEM_DOMAIN_SAMPLER;
>  	}
>  
> -	ss = batch_alloc(batch, sizeof(*ss), 64);
> -	offset = batch_offset(batch, ss);
> +	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, ss);
>  	annotation_add_state(&aub_annotations, AUB_TRACE_SURFACE_STATE,
>  			     offset, sizeof(*ss));
>  
> @@ -211,7 +176,8 @@ gen8_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
>  	ss->ss8.base_addr = buf->bo->offset;
>  
>  	ret = drm_intel_bo_emit_reloc(batch->bo,
> -				      batch_offset(batch, ss) + 8 * 4,
> +				      intel_batchbuffer_subdata_offset(batch,
> +				      ss) + 8 * 4,
>  				      buf->bo, 0,
>  				      read_domain, write_domain);
>  	assert(ret == 0);
> @@ -235,8 +201,8 @@ gen8_bind_surfaces(struct intel_batchbuffer *batch,
>  {
>  	uint32_t *binding_table, offset;
>  
> -	binding_table = batch_alloc(batch, 8, 32);
> -	offset = batch_offset(batch, binding_table);
> +	binding_table = intel_batchbuffer_subdata_alloc(batch, 8, 32);
> +	offset = intel_batchbuffer_subdata_offset(batch, binding_table);
>  	annotation_add_state(&aub_annotations, AUB_TRACE_BINDING_TABLE,
>  			     offset, 8);
>  
> @@ -254,8 +220,8 @@ gen8_create_sampler(struct intel_batchbuffer *batch) {
>  	struct gen8_sampler_state *ss;
>  	uint32_t offset;
>  
> -	ss = batch_alloc(batch, sizeof(*ss), 64);
> -	offset = batch_offset(batch, ss);
> +	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, ss);
>  	annotation_add_state(&aub_annotations, AUB_TRACE_SAMPLER_STATE,
>  			     offset, sizeof(*ss));
>  
> @@ -279,7 +245,7 @@ gen8_fill_ps(struct intel_batchbuffer *batch,
>  {
>  	uint32_t offset;
>  
> -	offset = batch_copy(batch, kernel, size, 64);
> +	offset = intel_batchbuffer_copy_data(batch, kernel, size, 64);
>  	annotation_add_state(&aub_annotations, AUB_TRACE_KERNEL_INSTRUCTIONS,
>  			     offset, size);
>  
> @@ -306,7 +272,7 @@ gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
>  	void *start;
>  	uint32_t offset;
>  
> -	batch_align(batch, 8);
> +	intel_batchbuffer_align(batch, 8);
>  	start = batch->ptr;
>  
>  	emit_vertex_2s(batch, dst_x + width, dst_y + height);
> @@ -321,7 +287,7 @@ gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
>  	emit_vertex_normalized(batch, src_x, igt_buf_width(src));
>  	emit_vertex_normalized(batch, src_y, igt_buf_height(src));
>  
> -	offset = batch_offset(batch, start);
> +	offset = intel_batchbuffer_subdata_offset(batch, start);
>  	annotation_add_state(&aub_annotations, AUB_TRACE_VERTEX_BUFFER,
>  			     offset, 3 * VERTEX_SIZE);
>  	return offset;
> @@ -406,8 +372,9 @@ gen6_create_cc_state(struct intel_batchbuffer *batch)
>  	struct gen6_color_calc_state *cc_state;
>  	uint32_t offset;
>  
> -	cc_state = batch_alloc(batch, sizeof(*cc_state), 64);
> -	offset = batch_offset(batch, cc_state);
> +	cc_state = intel_batchbuffer_subdata_alloc(batch,
> +						   sizeof(*cc_state), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, cc_state);
>  	annotation_add_state(&aub_annotations, AUB_TRACE_CC_STATE,
>  			     offset, sizeof(*cc_state));
>  
> @@ -421,8 +388,8 @@ gen8_create_blend_state(struct intel_batchbuffer *batch)
>  	int i;
>  	uint32_t offset;
>  
> -	blend = batch_alloc(batch, sizeof(*blend), 64);
> -	offset = batch_offset(batch, blend);
> +	blend = intel_batchbuffer_subdata_alloc(batch, sizeof(*blend), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, blend);
>  	annotation_add_state(&aub_annotations, AUB_TRACE_BLEND_STATE,
>  			     offset, sizeof(*blend));
>  
> @@ -443,8 +410,8 @@ gen6_create_cc_viewport(struct intel_batchbuffer *batch)
>  	struct gen6_cc_viewport *vp;
>  	uint32_t offset;
>  
> -	vp = batch_alloc(batch, sizeof(*vp), 32);
> -	offset = batch_offset(batch, vp);
> +	vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32);
> +	offset = intel_batchbuffer_subdata_offset(batch, vp);
>  	annotation_add_state(&aub_annotations, AUB_TRACE_CC_VP_STATE,
>  			     offset, sizeof(*vp));
>  
> @@ -461,8 +428,9 @@ gen7_create_sf_clip_viewport(struct intel_batchbuffer *batch) {
>  	struct gen7_sf_clip_viewport *scv_state;
>  	uint32_t offset;
>  
> -	scv_state = batch_alloc(batch, sizeof(*scv_state), 64);
> -	offset = batch_offset(batch, scv_state);
> +	scv_state = intel_batchbuffer_subdata_alloc(batch,
> +						    sizeof(*scv_state), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, scv_state);
>  	annotation_add_state(&aub_annotations, AUB_TRACE_CLIP_VP_STATE,
>  			     offset, sizeof(*scv_state));
>  
> @@ -480,8 +448,8 @@ gen6_create_scissor_rect(struct intel_batchbuffer *batch)
>  	struct gen6_scissor_rect *scissor;
>  	uint32_t offset;
>  
> -	scissor = batch_alloc(batch, sizeof(*scissor), 64);
> -	offset = batch_offset(batch, scissor);
> +	scissor = intel_batchbuffer_subdata_alloc(batch, sizeof(*scissor), 64);
> +	offset = intel_batchbuffer_subdata_offset(batch, scissor);
>  	annotation_add_state(&aub_annotations, AUB_TRACE_SCISSOR_STATE,
>  			     offset, sizeof(*scissor));
>  
> @@ -940,7 +908,7 @@ void gen9_render_copyfunc(struct intel_batchbuffer *batch,
>  
>  	intel_batchbuffer_flush_with_context(batch, context);
>  
> -	batch_align(batch, 8);
> +	intel_batchbuffer_align(batch, 8);
>  
>  	batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
>  
> @@ -1023,7 +991,7 @@ void gen9_render_copyfunc(struct intel_batchbuffer *batch,
>  
>  	OUT_BATCH(MI_BATCH_BUFFER_END);
>  
> -	batch_end = batch_align(batch, 8);
> +	batch_end = intel_batchbuffer_align(batch, 8);
>  	assert(batch_end < BATCH_STATE_SPLIT);
>  	annotation_add_batch(&aub_annotations, batch_end);
>  
> -- 
> 2.9.5
> 
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [igt-dev] [PATCH i-g-t v2 1/2] lib/rendercopy_gen7: Reorganizing batch allocations
  2018-04-13 15:13 [igt-dev] [PATCH i-g-t] lib/intel_batchbuffer: Move batch functions from media/render/gpgpu libs Lukasz Kalamarz
  2018-04-13 15:33 ` [igt-dev] ✗ Fi.CI.BAT: failure for " Patchwork
  2018-04-16  8:13 ` [igt-dev] [PATCH i-g-t] " Katarzyna Dec
@ 2018-04-16 15:22 ` Lukasz Kalamarz
  2018-04-16 15:22   ` [igt-dev] [PATCH i-g-t v2 2/2] lib/intel_batchbuffer: Move batch functions from media/render/gpgpu libs Lukasz Kalamarz
  2018-04-16 20:50   ` [igt-dev] [PATCH i-g-t v2 1/2] lib/rendercopy_gen7: Reorganizing batch allocations Daniele Ceraolo Spurio
  2 siblings, 2 replies; 7+ messages in thread
From: Lukasz Kalamarz @ 2018-04-16 15:22 UTC (permalink / raw)
  To: igt-dev

This lib was written in a different manner than all other libs,
which was causing some issues during refactoring. Previous
implementation was allocating data only in a state part of batchbuffer.
New implementation take usage of splitting batch into two parts,
one for batch commands and second for various states.

Signed-off-by: Lukasz Kalamarz <lukasz.kalamarz@intel.com>
Cc: Katarzyna Dec <katarzyna.dec@intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
---
 lib/rendercopy_gen7.c | 75 +++++++++++++++++++++++++++++----------------------
 1 file changed, 43 insertions(+), 32 deletions(-)

diff --git a/lib/rendercopy_gen7.c b/lib/rendercopy_gen7.c
index 3b92406..4a90efa 100644
--- a/lib/rendercopy_gen7.c
+++ b/lib/rendercopy_gen7.c
@@ -35,7 +35,7 @@ static const uint32_t ps_kernel[][4] = {
 static uint32_t
 batch_used(struct intel_batchbuffer *batch)
 {
-	return batch->state - batch->buffer;
+	return batch->ptr - batch->buffer;
 }
 
 static uint32_t
@@ -43,7 +43,7 @@ batch_align(struct intel_batchbuffer *batch, uint32_t align)
 {
 	uint32_t offset = batch_used(batch);
 	offset = ALIGN(offset, align);
-	batch->state = batch->buffer + offset;
+	batch->ptr = batch->buffer + offset;
 	return offset;
 }
 
@@ -51,7 +51,7 @@ static void *
 batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
 {
 	uint32_t offset = batch_align(batch, align);
-	batch->state += size;
+	batch->ptr += size;
 	return memset(batch->buffer + offset, 0, size);
 }
 
@@ -198,15 +198,9 @@ gen7_create_vertex_buffer(struct intel_batchbuffer *batch,
 static void gen7_emit_vertex_buffer(struct intel_batchbuffer *batch,
 				    int src_x, int src_y,
 				    int dst_x, int dst_y,
-				    int width, int height)
+				    int width, int height,
+				    uint32_t offset)
 {
-	uint32_t offset;
-
-	offset = gen7_create_vertex_buffer(batch,
-					   src_x, src_y,
-					   dst_x, dst_y,
-					   width, height);
-
 	OUT_BATCH(GEN7_3DSTATE_VERTEX_BUFFERS | (5 - 2));
 	OUT_BATCH(0 << GEN7_VB0_BUFFER_INDEX_SHIFT |
 		  GEN7_VB0_VERTEXDATA |
@@ -238,10 +232,11 @@ gen7_bind_surfaces(struct intel_batchbuffer *batch,
 static void
 gen7_emit_binding_table(struct intel_batchbuffer *batch,
 			struct igt_buf *src,
-			struct igt_buf *dst)
+			struct igt_buf *dst,
+			uint32_t bind_surf_off)
 {
 	OUT_BATCH(GEN7_3DSTATE_BINDING_TABLE_POINTERS_PS | (2 - 2));
-	OUT_BATCH(gen7_bind_surfaces(batch, src, dst));
+	OUT_BATCH(bind_surf_off);
 }
 
 static void
@@ -298,13 +293,14 @@ gen7_create_cc_viewport(struct intel_batchbuffer *batch)
 }
 
 static void
-gen7_emit_cc(struct intel_batchbuffer *batch)
+gen7_emit_cc(struct intel_batchbuffer *batch, uint32_t blend_state,
+	     uint32_t cc_viewport)
 {
-        OUT_BATCH(GEN7_3DSTATE_BLEND_STATE_POINTERS | (2 - 2));
-        OUT_BATCH(gen7_create_blend_state(batch));
+	OUT_BATCH(GEN7_3DSTATE_BLEND_STATE_POINTERS | (2 - 2));
+	OUT_BATCH(blend_state);
 
-        OUT_BATCH(GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_CC | (2 - 2));
-	OUT_BATCH(gen7_create_cc_viewport(batch));
+	OUT_BATCH(GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_CC | (2 - 2));
+	OUT_BATCH(cc_viewport);
 }
 
 static uint32_t
@@ -327,10 +323,10 @@ gen7_create_sampler(struct intel_batchbuffer *batch)
 }
 
 static void
-gen7_emit_sampler(struct intel_batchbuffer *batch)
+gen7_emit_sampler(struct intel_batchbuffer *batch, uint32_t sampler_off)
 {
-        OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_PS | (2 - 2));
-        OUT_BATCH(gen7_create_sampler(batch));
+	OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_PS | (2 - 2));
+	OUT_BATCH(sampler_off);
 }
 
 static void
@@ -468,7 +464,7 @@ gen7_emit_sbe(struct intel_batchbuffer *batch)
 }
 
 static void
-gen7_emit_ps(struct intel_batchbuffer *batch)
+gen7_emit_ps(struct intel_batchbuffer *batch, uint32_t kernel_off)
 {
 	int threads;
 
@@ -478,7 +474,7 @@ gen7_emit_ps(struct intel_batchbuffer *batch)
 		threads = 40 << IVB_PS_MAX_THREADS_SHIFT;
 
 	OUT_BATCH(GEN7_3DSTATE_PS | (8 - 2));
-	OUT_BATCH(batch_copy(batch, ps_kernel, sizeof(ps_kernel), 64));
+	OUT_BATCH(kernel_off);
 	OUT_BATCH(1 << GEN7_PS_SAMPLER_COUNT_SHIFT |
 		  2 << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT);
 	OUT_BATCH(0); /* scratch address */
@@ -535,12 +531,27 @@ void gen7_render_copyfunc(struct intel_batchbuffer *batch,
 			  unsigned width, unsigned height,
 			  struct igt_buf *dst, unsigned dst_x, unsigned dst_y)
 {
+	uint32_t ps_binding_table, ps_sampler_off, ps_kernel_off;
+	uint32_t blend_state, cc_viewport;
+	uint32_t vertex_buffer;
 	uint32_t batch_end;
 
 	intel_batchbuffer_flush_with_context(batch, context);
 
-	batch->state = &batch->buffer[BATCH_STATE_SPLIT];
+	batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
 
+
+	blend_state = gen7_create_blend_state(batch);
+	cc_viewport = gen7_create_cc_viewport(batch);
+	ps_sampler_off = gen7_create_sampler(batch);
+	ps_kernel_off = batch_copy(batch, ps_kernel, sizeof(ps_kernel), 64);
+	vertex_buffer = gen7_create_vertex_buffer(batch,
+						  src_x, src_y,
+						  dst_x, dst_y,
+						  width, height);
+	ps_binding_table = gen7_bind_surfaces(batch, src, dst);
+
+	batch->ptr = batch->buffer;
 	OUT_BATCH(GEN7_PIPELINE_SELECT | PIPELINE_SELECT_3D);
 
 	gen7_emit_state_base_address(batch);
@@ -556,18 +567,18 @@ void gen7_render_copyfunc(struct intel_batchbuffer *batch,
 	gen7_emit_wm(batch);
 	gen7_emit_streamout(batch);
 	gen7_emit_null_depth_buffer(batch);
-
-	gen7_emit_cc(batch);
-        gen7_emit_sampler(batch);
+	gen7_emit_cc(batch, blend_state, cc_viewport);
+	gen7_emit_sampler(batch, ps_sampler_off);
         gen7_emit_sbe(batch);
-        gen7_emit_ps(batch);
+	gen7_emit_ps(batch, ps_kernel_off);
         gen7_emit_vertex_elements(batch);
-        gen7_emit_vertex_buffer(batch,
-				src_x, src_y, dst_x, dst_y, width, height);
-	gen7_emit_binding_table(batch, src, dst);
+	gen7_emit_vertex_buffer(batch, src_x, src_y,
+				dst_x, dst_y, width,
+				height, vertex_buffer);
+	gen7_emit_binding_table(batch, src, dst, ps_binding_table);
 	gen7_emit_drawing_rectangle(batch, dst);
 
-        OUT_BATCH(GEN7_3DPRIMITIVE | (7- 2));
+	OUT_BATCH(GEN7_3DPRIMITIVE | (7 - 2));
         OUT_BATCH(GEN7_3DPRIMITIVE_VERTEX_SEQUENTIAL | _3DPRIM_RECTLIST);
         OUT_BATCH(3);
         OUT_BATCH(0);
-- 
2.9.5

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [igt-dev] [PATCH i-g-t v2 2/2] lib/intel_batchbuffer: Move batch functions from media/render/gpgpu libs
  2018-04-16 15:22 ` [igt-dev] [PATCH i-g-t v2 1/2] lib/rendercopy_gen7: Reorganizing batch allocations Lukasz Kalamarz
@ 2018-04-16 15:22   ` Lukasz Kalamarz
  2018-04-16 20:50   ` [igt-dev] [PATCH i-g-t v2 1/2] lib/rendercopy_gen7: Reorganizing batch allocations Daniele Ceraolo Spurio
  1 sibling, 0 replies; 7+ messages in thread
From: Lukasz Kalamarz @ 2018-04-16 15:22 UTC (permalink / raw)
  To: igt-dev

Batch functions were copy/pasted across several libs.
With moving it into intel_batchbuffer lib test can now be
easly maintained without worrying that we forgot to modify
older version of lib.

v2: Added documentation into lib and rebased patch

Signed-off-by: Lukasz Kalamarz <lukasz.kalamarz@intel.com>
Cc: Katarzyna Dec <katarzyna.dec@intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
---
 lib/gpgpu_fill.c        |  6 ++--
 lib/gpu_fill.c          | 68 ++++++++++---------------------------
 lib/gpu_fill.h          | 15 ---------
 lib/intel_batchbuffer.c | 89 +++++++++++++++++++++++++++++++++++++++++++++----
 lib/intel_batchbuffer.h | 18 ++++++++--
 lib/media_fill_gen7.c   |  2 +-
 lib/media_fill_gen8.c   |  2 +-
 lib/media_fill_gen9.c   |  2 +-
 lib/media_spin.c        | 63 ++++++++--------------------------
 lib/rendercopy_gen6.c   | 77 ++++++++++--------------------------------
 lib/rendercopy_gen7.c   | 65 +++++++++---------------------------
 lib/rendercopy_gen8.c   | 82 ++++++++++++++-------------------------------
 lib/rendercopy_gen9.c   | 82 ++++++++++++++-------------------------------
 13 files changed, 219 insertions(+), 352 deletions(-)

diff --git a/lib/gpgpu_fill.c b/lib/gpgpu_fill.c
index 72a1445..010dde0 100644
--- a/lib/gpgpu_fill.c
+++ b/lib/gpgpu_fill.c
@@ -137,7 +137,7 @@ gen7_gpgpu_fillfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen7_render_flush(batch, batch_end);
@@ -185,7 +185,7 @@ gen8_gpgpu_fillfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen7_render_flush(batch, batch_end);
@@ -234,7 +234,7 @@ gen9_gpgpu_fillfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen7_render_flush(batch, batch_end);
diff --git a/lib/gpu_fill.c b/lib/gpu_fill.c
index f1fe5b3..5c1e217 100644
--- a/lib/gpu_fill.c
+++ b/lib/gpu_fill.c
@@ -24,41 +24,6 @@
 
 #include "gpu_fill.h"
 
-uint32_t
-batch_used(struct intel_batchbuffer *batch)
-{
-	return batch->ptr - batch->buffer;
-}
-
-uint32_t
-batch_align(struct intel_batchbuffer *batch, uint32_t align)
-{
-	uint32_t offset = batch_used(batch);
-	offset = ALIGN(offset, align);
-	batch->ptr = batch->buffer + offset;
-	return offset;
-}
-
-void *
-batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
-{
-	uint32_t offset = batch_align(batch, align);
-	batch->ptr += size;
-	return memset(batch->buffer + offset, 0, size);
-}
-
-uint32_t
-batch_offset(struct intel_batchbuffer *batch, void *ptr)
-{
-	return (uint8_t *)ptr - batch->buffer;
-}
-
-uint32_t
-batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
-{
-	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
-}
-
 void
 gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
 {
@@ -78,8 +43,10 @@ gen7_fill_curbe_buffer_data(struct intel_batchbuffer *batch,
 	uint8_t *curbe_buffer;
 	uint32_t offset;
 
-	curbe_buffer = batch_alloc(batch, sizeof(uint32_t) * 8, 64);
-	offset = batch_offset(batch, curbe_buffer);
+	curbe_buffer = intel_batchbuffer_subdata_alloc(batch,
+						       sizeof(uint32_t) * 8,
+						       64);
+	offset = intel_batchbuffer_subdata_offset(batch, curbe_buffer);
 	*curbe_buffer = color;
 
 	return offset;
@@ -102,8 +69,8 @@ gen7_fill_surface_state(struct intel_batchbuffer *batch,
 		read_domain = I915_GEM_DOMAIN_SAMPLER;
 	}
 
-	ss = batch_alloc(batch, sizeof(*ss), 64);
-	offset = batch_offset(batch, ss);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, ss);
 
 	ss->ss0.surface_type = GEN7_SURFACE_2D;
 	ss->ss0.surface_format = format;
@@ -116,7 +83,7 @@ gen7_fill_surface_state(struct intel_batchbuffer *batch,
 
 	ss->ss1.base_addr = buf->bo->offset;
 	ret = drm_intel_bo_emit_reloc(batch->bo,
-				batch_offset(batch, ss) + 4,
+				intel_batchbuffer_subdata_offset(batch, ss) + 4,
 				buf->bo, 0,
 				read_domain, write_domain);
 	igt_assert(ret == 0);
@@ -140,8 +107,8 @@ gen7_fill_binding_table(struct intel_batchbuffer *batch,
 {
 	uint32_t *binding_table, offset;
 
-	binding_table = batch_alloc(batch, 32, 64);
-	offset = batch_offset(batch, binding_table);
+	binding_table = intel_batchbuffer_subdata_alloc(batch, 32, 64);
+	offset = intel_batchbuffer_subdata_offset(batch, binding_table);
 	if (IS_GEN7(batch->devid))
 		binding_table[0] = gen7_fill_surface_state(batch, dst,
 						GEN7_SURFACEFORMAT_R8_UNORM, 1);
@@ -159,7 +126,7 @@ gen7_fill_kernel(struct intel_batchbuffer *batch,
 {
 	uint32_t offset;
 
-	offset = batch_copy(batch, kernel, size, 64);
+	offset = intel_batchbuffer_copy_data(batch, kernel, size, 64);
 
 	return offset;
 }
@@ -175,8 +142,8 @@ gen7_fill_interface_descriptor(struct intel_batchbuffer *batch, struct igt_buf *
 	binding_table_offset = gen7_fill_binding_table(batch, dst);
 	kernel_offset = gen7_fill_kernel(batch, kernel, size);
 
-	idd = batch_alloc(batch, sizeof(*idd), 64);
-	offset = batch_offset(batch, idd);
+	idd = intel_batchbuffer_subdata_alloc(batch, sizeof(*idd), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, idd);
 
 	idd->desc0.kernel_start_pointer = (kernel_offset >> 6);
 
@@ -401,8 +368,8 @@ gen8_fill_surface_state(struct intel_batchbuffer *batch,
 		read_domain = I915_GEM_DOMAIN_SAMPLER;
 	}
 
-	ss = batch_alloc(batch, sizeof(*ss), 64);
-	offset = batch_offset(batch, ss);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, ss);
 
 	ss->ss0.surface_type = GEN8_SURFACE_2D;
 	ss->ss0.surface_format = format;
@@ -418,7 +385,8 @@ gen8_fill_surface_state(struct intel_batchbuffer *batch,
 	ss->ss8.base_addr = buf->bo->offset;
 
 	ret = drm_intel_bo_emit_reloc(batch->bo,
-				batch_offset(batch, ss) + 8 * 4,
+				intel_batchbuffer_subdata_offset(batch,
+				ss) + 8 * 4,
 				buf->bo, 0,
 				read_domain, write_domain);
 	igt_assert(ret == 0);
@@ -445,8 +413,8 @@ gen8_fill_interface_descriptor(struct intel_batchbuffer *batch, struct igt_buf *
 	binding_table_offset = gen7_fill_binding_table(batch, dst);
 	kernel_offset = gen7_fill_kernel(batch, kernel, size);
 
-	idd = batch_alloc(batch, sizeof(*idd), 64);
-	offset = batch_offset(batch, idd);
+	idd = intel_batchbuffer_subdata_alloc(batch, sizeof(*idd), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, idd);
 
 	idd->desc0.kernel_start_pointer = (kernel_offset >> 6);
 
diff --git a/lib/gpu_fill.h b/lib/gpu_fill.h
index 072e9f7..067d498 100644
--- a/lib/gpu_fill.h
+++ b/lib/gpu_fill.h
@@ -37,21 +37,6 @@
 #include "intel_chipset.h"
 #include <assert.h>
 
-uint32_t
-batch_used(struct intel_batchbuffer *batch);
-
-uint32_t
-batch_align(struct intel_batchbuffer *batch, uint32_t align);
-
-void *
-batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align);
-
-uint32_t
-batch_offset(struct intel_batchbuffer *batch, void *ptr);
-
-uint32_t
-batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align);
-
 void
 gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end);
 
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index 10d4dce..23cad0d 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -66,6 +66,73 @@
  */
 
 /**
+ * intel_batchbuffer_align:
+ * @batch: batchbuffer object
+ * @align: value in bytes to which we want to align
+ *
+ * Align batchbuffer offset to given value and then return it
+ *
+ * Returns: Batchbuffer offset aligned to given value.
+ */
+uint32_t
+intel_batchbuffer_align(struct intel_batchbuffer *batch, uint32_t align)
+{
+	uint32_t offset = batch->ptr - batch->buffer;
+
+	offset = ALIGN(offset, align);
+	batch->ptr = batch->buffer + offset;
+	return offset;
+}
+
+/**
+ * intel_batchbuffer_round_upto:
+ * @batch: batchbuffer object
+ * @divisor: amount of bytes need to allocate
+ *
+ * Returns: Calculated offset ounded up to given divisor.
+ */
+uint32_t
+intel_batchbuffer_round_upto(struct intel_batchbuffer *batch, uint32_t divisor)
+{
+	uint32_t offset = batch->ptr - batch->buffer;
+
+	offset = (offset + divisor-1) / divisor * divisor;
+	batch->ptr = batch->buffer + offset;
+	return offset;
+}
+
+/**
+ * intel_batchbuffer_subdata_alloc:
+ * @batch: batchbuffer object
+ * @size: amount of bytes need to allocate
+ * @align: value in bytes to which we want to align
+ *
+ * Allocate @size bytes within @batch.
+ */
+void *
+intel_batchbuffer_subdata_alloc(struct intel_batchbuffer *batch, uint32_t size,
+				uint32_t align)
+{
+	uint32_t offset = intel_batchbuffer_align(batch, align);
+
+	batch->ptr += size;
+	return memset(batch->buffer + offset, 0, size);
+}
+
+/**
+ * intel_batchbuffer_subdata_offset:
+ * @batch: batchbuffer object
+ * @ptr: pointer to given data
+ *
+ * Returns: Offset between given pointer and batchbuffer.
+ */
+uint32_t
+intel_batchbuffer_subdata_offset(struct intel_batchbuffer *batch, void *ptr)
+{
+	return (uint8_t *)ptr - batch->buffer;
+}
+
+/**
  * intel_batchbuffer_reset:
  * @batch: batchbuffer object
  *
@@ -288,22 +355,30 @@ intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
 }
 
 /**
- * intel_batchbuffer_data:
+ * intel_batchbuffer_copy_data:
  * @batch: batchbuffer object
- * @data: pointer to the data to write into the batchbuffer
+ * @data: pointer to data, which will be copied
  * @bytes: number of bytes to write into the batchbuffer
+ * @align: value in bytes to which we want to align
  *
  * This transfers the given @data into the batchbuffer. Note that the length
  * must be DWORD aligned, i.e. multiples of 32bits.
+ *
+ * Returns: Offset of copied data.
  */
-void
-intel_batchbuffer_data(struct intel_batchbuffer *batch,
-                       const void *data, unsigned int bytes)
+uint32_t
+intel_batchbuffer_copy_data(struct intel_batchbuffer *batch,
+			    const void *data, unsigned int bytes,
+			    uint32_t align)
 {
+	uint32_t *subdata, *copied_data;
+
 	igt_assert((bytes & 3) == 0);
 	intel_batchbuffer_require_space(batch, bytes);
-	memcpy(batch->ptr, data, bytes);
-	batch->ptr += bytes;
+	subdata = intel_batchbuffer_subdata_alloc(batch, bytes, align);
+	copied_data = memcpy(subdata, data, bytes);
+
+	return intel_batchbuffer_subdata_offset(batch, copied_data);
 }
 
 /**
diff --git a/lib/intel_batchbuffer.h b/lib/intel_batchbuffer.h
index 2c262d7..7468eaf 100644
--- a/lib/intel_batchbuffer.h
+++ b/lib/intel_batchbuffer.h
@@ -41,8 +41,9 @@ void intel_batchbuffer_flush_with_context(struct intel_batchbuffer *batch,
 
 void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
 
-void intel_batchbuffer_data(struct intel_batchbuffer *batch,
-                            const void *data, unsigned int bytes);
+uint32_t intel_batchbuffer_copy_data(struct intel_batchbuffer *batch,
+				const void *data, unsigned int bytes,
+				uint32_t align);
 
 void intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
 				  drm_intel_bo *buffer,
@@ -51,6 +52,19 @@ void intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
 				  uint32_t write_domain,
 				  int fenced);
 
+uint32_t
+intel_batchbuffer_align(struct intel_batchbuffer *batch, uint32_t align);
+
+uint32_t
+batch_round_upto(struct intel_batchbuffer *batch, uint32_t divisor);
+
+void *
+intel_batchbuffer_subdata_alloc(struct intel_batchbuffer *batch,
+				uint32_t size, uint32_t align);
+
+uint32_t
+intel_batchbuffer_subdata_offset(struct intel_batchbuffer *batch, void *ptr);
+
 /* Inline functions - might actually be better off with these
  * non-inlined.  Certainly better off switching all command packets to
  * be passed as structs rather than dwords, but that's a little bit of
diff --git a/lib/media_fill_gen7.c b/lib/media_fill_gen7.c
index 5a8c32f..3dc5617 100644
--- a/lib/media_fill_gen7.c
+++ b/lib/media_fill_gen7.c
@@ -79,7 +79,7 @@ gen7_media_fillfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen7_render_flush(batch, batch_end);
diff --git a/lib/media_fill_gen8.c b/lib/media_fill_gen8.c
index d6dd741..63fe72e 100644
--- a/lib/media_fill_gen8.c
+++ b/lib/media_fill_gen8.c
@@ -82,7 +82,7 @@ gen8_media_fillfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen7_render_flush(batch, batch_end);
diff --git a/lib/media_fill_gen9.c b/lib/media_fill_gen9.c
index a9a829f..78e892f 100644
--- a/lib/media_fill_gen9.c
+++ b/lib/media_fill_gen9.c
@@ -91,7 +91,7 @@ gen9_media_fillfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen7_render_flush(batch, batch_end);
diff --git a/lib/media_spin.c b/lib/media_spin.c
index 580c109..20af549 100644
--- a/lib/media_spin.c
+++ b/lib/media_spin.c
@@ -45,42 +45,6 @@ static const uint32_t spin_kernel[][4] = {
 	{ 0x07800031, 0x20000a40, 0x0e000e00, 0x82000010 }, /* send.ts (16)null<1> r112<0;1;0>:d 0x82000010 */
 };
 
-static uint32_t
-batch_used(struct intel_batchbuffer *batch)
-{
-	return batch->ptr - batch->buffer;
-}
-
-static uint32_t
-batch_align(struct intel_batchbuffer *batch, uint32_t align)
-{
-	uint32_t offset = batch_used(batch);
-	offset = ALIGN(offset, align);
-	batch->ptr = batch->buffer + offset;
-	return offset;
-}
-
-static void *
-batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
-{
-	uint32_t offset = batch_align(batch, align);
-	batch->ptr += size;
-	return memset(batch->buffer + offset, 0, size);
-}
-
-static uint32_t
-batch_offset(struct intel_batchbuffer *batch, void *ptr)
-{
-	return (uint8_t *)ptr - batch->buffer;
-}
-
-static uint32_t
-batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size,
-	   uint32_t align)
-{
-	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
-}
-
 static void
 gen8_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
 {
@@ -100,8 +64,8 @@ gen8_spin_curbe_buffer_data(struct intel_batchbuffer *batch,
 	uint32_t *curbe_buffer;
 	uint32_t offset;
 
-	curbe_buffer = batch_alloc(batch, 64, 64);
-	offset = batch_offset(batch, curbe_buffer);
+	curbe_buffer = intel_batchbuffer_subdata_alloc(batch, 64, 64);
+	offset = intel_batchbuffer_subdata_offset(batch, curbe_buffer);
 	*curbe_buffer = iters;
 
 	return offset;
@@ -124,8 +88,8 @@ gen8_spin_surface_state(struct intel_batchbuffer *batch,
 		read_domain = I915_GEM_DOMAIN_SAMPLER;
 	}
 
-	ss = batch_alloc(batch, sizeof(*ss), 64);
-	offset = batch_offset(batch, ss);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, ss);
 
 	ss->ss0.surface_type = GEN8_SURFACE_2D;
 	ss->ss0.surface_format = format;
@@ -141,7 +105,8 @@ gen8_spin_surface_state(struct intel_batchbuffer *batch,
 	ss->ss8.base_addr = buf->bo->offset;
 
 	ret = drm_intel_bo_emit_reloc(batch->bo,
-				batch_offset(batch, ss) + 8 * 4,
+				intel_batchbuffer_subdata_offset(batch,
+				ss) + 8 * 4,
 				buf->bo, 0,
 				read_domain, write_domain);
 	igt_assert_eq(ret, 0);
@@ -164,8 +129,8 @@ gen8_spin_binding_table(struct intel_batchbuffer *batch,
 {
 	uint32_t *binding_table, offset;
 
-	binding_table = batch_alloc(batch, 32, 64);
-	offset = batch_offset(batch, binding_table);
+	binding_table = intel_batchbuffer_subdata_alloc(batch, 32, 64);
+	offset = intel_batchbuffer_subdata_offset(batch, binding_table);
 
 	binding_table[0] = gen8_spin_surface_state(batch, dst,
 					GEN8_SURFACEFORMAT_R8_UNORM, 1);
@@ -180,7 +145,7 @@ gen8_spin_media_kernel(struct intel_batchbuffer *batch,
 {
 	uint32_t offset;
 
-	offset = batch_copy(batch, kernel, size, 64);
+	offset = intel_batchbuffer_copy_data(batch, kernel, size, 64);
 
 	return offset;
 }
@@ -197,8 +162,8 @@ gen8_spin_interface_descriptor(struct intel_batchbuffer *batch,
 	kernel_offset = gen8_spin_media_kernel(batch, spin_kernel,
 					       sizeof(spin_kernel));
 
-	idd = batch_alloc(batch, sizeof(*idd), 64);
-	offset = batch_offset(batch, idd);
+	idd = intel_batchbuffer_subdata_alloc(batch, sizeof(*idd), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, idd);
 
 	idd->desc0.kernel_start_pointer = (kernel_offset >> 6);
 
@@ -444,7 +409,7 @@ gen8_media_spinfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen8_render_flush(batch, batch_end);
@@ -482,7 +447,7 @@ gen8lp_media_spinfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen8_render_flush(batch, batch_end);
@@ -532,7 +497,7 @@ gen9_media_spinfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 
 	gen8_render_flush(batch, batch_end);
diff --git a/lib/rendercopy_gen6.c b/lib/rendercopy_gen6.c
index 8c24cf8..9dcfb86 100644
--- a/lib/rendercopy_gen6.c
+++ b/lib/rendercopy_gen6.c
@@ -48,50 +48,6 @@ static const uint32_t ps_kernel_nomask_affine[][4] = {
 	{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
 };
 
-static uint32_t
-batch_used(struct intel_batchbuffer *batch)
-{
-	return batch->ptr - batch->buffer;
-}
-
-static uint32_t
-batch_align(struct intel_batchbuffer *batch, uint32_t align)
-{
-	uint32_t offset = batch_used(batch);
-	offset = ALIGN(offset, align);
-	batch->ptr = batch->buffer + offset;
-	return offset;
-}
-
-static uint32_t
-batch_round_upto(struct intel_batchbuffer *batch, uint32_t divisor)
-{
-	uint32_t offset = batch_used(batch);
-	offset = (offset + divisor-1) / divisor * divisor;
-	batch->ptr = batch->buffer + offset;
-	return offset;
-}
-
-static void *
-batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
-{
-	uint32_t offset = batch_align(batch, align);
-	batch->ptr += size;
-	return memset(batch->buffer + offset, 0, size);
-}
-
-static uint32_t
-batch_offset(struct intel_batchbuffer *batch, void *ptr)
-{
-	return (uint8_t *)ptr - batch->buffer;
-}
-
-static uint32_t
-batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
-{
-	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
-}
-
 static void
 gen6_render_flush(struct intel_batchbuffer *batch,
 		  drm_intel_context *context, uint32_t batch_end)
@@ -120,7 +76,7 @@ gen6_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
 		read_domain = I915_GEM_DOMAIN_SAMPLER;
 	}
 
-	ss = batch_alloc(batch, sizeof(*ss), 32);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32);
 	ss->ss0.surface_type = GEN6_SURFACE_2D;
 	ss->ss0.surface_format = format;
 
@@ -129,7 +85,8 @@ gen6_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
 	ss->ss1.base_addr = buf->bo->offset;
 
 	ret = drm_intel_bo_emit_reloc(batch->bo,
-				      batch_offset(batch, ss) + 4,
+				      intel_batchbuffer_subdata_offset(batch,
+				      ss) + 4,
 				      buf->bo, 0,
 				      read_domain, write_domain);
 	igt_assert(ret == 0);
@@ -140,7 +97,7 @@ gen6_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
 	ss->ss3.tiled_surface = buf->tiling != I915_TILING_NONE;
 	ss->ss3.tile_walk     = buf->tiling == I915_TILING_Y;
 
-	return batch_offset(batch, ss);
+	return intel_batchbuffer_subdata_offset(batch, ss);
 }
 
 static uint32_t
@@ -150,14 +107,14 @@ gen6_bind_surfaces(struct intel_batchbuffer *batch,
 {
 	uint32_t *binding_table;
 
-	binding_table = batch_alloc(batch, 32, 32);
+	binding_table = intel_batchbuffer_subdata_alloc(batch, 32, 32);
 
 	binding_table[0] =
 		gen6_bind_buf(batch, dst, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, 1);
 	binding_table[1] =
 		gen6_bind_buf(batch, src, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, 0);
 
-	return batch_offset(batch, binding_table);
+	return intel_batchbuffer_subdata_offset(batch, binding_table);
 }
 
 static void
@@ -427,12 +384,12 @@ gen6_create_cc_viewport(struct intel_batchbuffer *batch)
 {
 	struct gen6_cc_viewport *vp;
 
-	vp = batch_alloc(batch, sizeof(*vp), 32);
+	vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32);
 
 	vp->min_depth = -1.e35;
 	vp->max_depth = 1.e35;
 
-	return batch_offset(batch, vp);
+	return intel_batchbuffer_subdata_offset(batch, vp);
 }
 
 static uint32_t
@@ -440,7 +397,7 @@ gen6_create_cc_blend(struct intel_batchbuffer *batch)
 {
 	struct gen6_blend_state *blend;
 
-	blend = batch_alloc(batch, sizeof(*blend), 64);
+	blend = intel_batchbuffer_subdata_alloc(batch, sizeof(*blend), 64);
 
 	blend->blend0.dest_blend_factor = GEN6_BLENDFACTOR_ZERO;
 	blend->blend0.source_blend_factor = GEN6_BLENDFACTOR_ONE;
@@ -450,13 +407,13 @@ gen6_create_cc_blend(struct intel_batchbuffer *batch)
 	blend->blend1.post_blend_clamp_enable = 1;
 	blend->blend1.pre_blend_clamp_enable = 1;
 
-	return batch_offset(batch, blend);
+	return intel_batchbuffer_subdata_offset(batch, blend);
 }
 
 static uint32_t
 gen6_create_kernel(struct intel_batchbuffer *batch)
 {
-	return batch_copy(batch, ps_kernel_nomask_affine,
+	return intel_batchbuffer_copy_data(batch, ps_kernel_nomask_affine,
 			  sizeof(ps_kernel_nomask_affine),
 			  64);
 }
@@ -468,7 +425,7 @@ gen6_create_sampler(struct intel_batchbuffer *batch,
 {
 	struct gen6_sampler_state *ss;
 
-	ss = batch_alloc(batch, sizeof(*ss), 32);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32);
 	ss->ss0.lod_preclamp = 1;	/* GL mode */
 
 	/* We use the legacy mode to get the semantics specified by
@@ -511,7 +468,7 @@ gen6_create_sampler(struct intel_batchbuffer *batch,
 		break;
 	}
 
-	return batch_offset(batch, ss);
+	return intel_batchbuffer_subdata_offset(batch, ss);
 }
 
 static void gen6_emit_vertex_buffer(struct intel_batchbuffer *batch)
@@ -535,7 +492,7 @@ static uint32_t gen6_emit_primitive(struct intel_batchbuffer *batch)
 		  0 << 9 |
 		  4);
 	OUT_BATCH(3);	/* vertex count */
-	offset = batch_used(batch);
+	offset = batch->ptr - batch->buffer;
 	OUT_BATCH(0);	/* vertex_index */
 	OUT_BATCH(1);	/* single instance */
 	OUT_BATCH(0);	/* start instance location */
@@ -557,7 +514,7 @@ void gen6_render_copyfunc(struct intel_batchbuffer *batch,
 	intel_batchbuffer_flush_with_context(batch, context);
 
 	batch->ptr = batch->buffer + 1024;
-	batch_alloc(batch, 64, 64);
+	intel_batchbuffer_subdata_alloc(batch, 64, 64);
 	wm_table  = gen6_bind_surfaces(batch, src, dst);
 	wm_kernel = gen6_create_kernel(batch);
 	wm_state  = gen6_create_sampler(batch,
@@ -594,10 +551,10 @@ void gen6_render_copyfunc(struct intel_batchbuffer *batch,
 	offset = gen6_emit_primitive(batch);
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 
 	*(uint32_t*)(batch->buffer + offset) =
-		batch_round_upto(batch, VERTEX_SIZE)/VERTEX_SIZE;
+		intel_batchbuffer_round_upto(batch, VERTEX_SIZE)/VERTEX_SIZE;
 
 	emit_vertex_2s(batch, dst_x + width, dst_y + height);
 	emit_vertex_normalized(batch, src_x + width, igt_buf_width(src));
diff --git a/lib/rendercopy_gen7.c b/lib/rendercopy_gen7.c
index 4a90efa..df1824f 100644
--- a/lib/rendercopy_gen7.c
+++ b/lib/rendercopy_gen7.c
@@ -32,41 +32,6 @@ static const uint32_t ps_kernel[][4] = {
 	{ 0x05800031, 0x20001fa8, 0x008d0e20, 0x90031000 },
 };
 
-static uint32_t
-batch_used(struct intel_batchbuffer *batch)
-{
-	return batch->ptr - batch->buffer;
-}
-
-static uint32_t
-batch_align(struct intel_batchbuffer *batch, uint32_t align)
-{
-	uint32_t offset = batch_used(batch);
-	offset = ALIGN(offset, align);
-	batch->ptr = batch->buffer + offset;
-	return offset;
-}
-
-static void *
-batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
-{
-	uint32_t offset = batch_align(batch, align);
-	batch->ptr += size;
-	return memset(batch->buffer + offset, 0, size);
-}
-
-static uint32_t
-batch_offset(struct intel_batchbuffer *batch, void *ptr)
-{
-	return (uint8_t *)ptr - batch->buffer;
-}
-
-static uint32_t
-batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
-{
-	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
-}
-
 static void
 gen7_render_flush(struct intel_batchbuffer *batch,
 		  drm_intel_context *context, uint32_t batch_end)
@@ -108,7 +73,7 @@ gen7_bind_buf(struct intel_batchbuffer *batch,
 		read_domain = I915_GEM_DOMAIN_SAMPLER;
 	}
 
-	ss = batch_alloc(batch, 8 * sizeof(*ss), 32);
+	ss = intel_batchbuffer_subdata_alloc(batch, 8 * sizeof(*ss), 32);
 
 	ss[0] = (GEN7_SURFACE_2D << GEN7_SURFACE_TYPE_SHIFT |
 		 gen7_tiling_bits(buf->tiling) |
@@ -125,12 +90,13 @@ gen7_bind_buf(struct intel_batchbuffer *batch,
 		ss[7] |= HSW_SURFACE_SWIZZLE(RED, GREEN, BLUE, ALPHA);
 
 	ret = drm_intel_bo_emit_reloc(batch->bo,
-				      batch_offset(batch, ss) + 4,
+				      intel_batchbuffer_subdata_offset(batch,
+				      ss) + 4,
 				      buf->bo, 0,
 				      read_domain, write_domain);
 	igt_assert(ret == 0);
 
-	return batch_offset(batch, ss);
+	return intel_batchbuffer_subdata_offset(batch, ss);
 }
 
 static void
@@ -175,7 +141,7 @@ gen7_create_vertex_buffer(struct intel_batchbuffer *batch,
 {
 	uint16_t *v;
 
-	v = batch_alloc(batch, 12*sizeof(*v), 8);
+	v = intel_batchbuffer_subdata_alloc(batch, 12*sizeof(*v), 8);
 
 	v[0] = dst_x + width;
 	v[1] = dst_y + height;
@@ -192,7 +158,7 @@ gen7_create_vertex_buffer(struct intel_batchbuffer *batch,
 	v[10] = src_x;
 	v[11] = src_y;
 
-	return batch_offset(batch, v);
+	return intel_batchbuffer_subdata_offset(batch, v);
 }
 
 static void gen7_emit_vertex_buffer(struct intel_batchbuffer *batch,
@@ -219,14 +185,14 @@ gen7_bind_surfaces(struct intel_batchbuffer *batch,
 {
 	uint32_t *binding_table;
 
-	binding_table = batch_alloc(batch, 8, 32);
+	binding_table = intel_batchbuffer_subdata_alloc(batch, 8, 32);
 
 	binding_table[0] =
 		gen7_bind_buf(batch, dst, GEN7_SURFACEFORMAT_B8G8R8A8_UNORM, 1);
 	binding_table[1] =
 		gen7_bind_buf(batch, src, GEN7_SURFACEFORMAT_B8G8R8A8_UNORM, 0);
 
-	return batch_offset(batch, binding_table);
+	return intel_batchbuffer_subdata_offset(batch, binding_table);
 }
 
 static void
@@ -253,7 +219,7 @@ gen7_create_blend_state(struct intel_batchbuffer *batch)
 {
 	struct gen7_blend_state *blend;
 
-	blend = batch_alloc(batch, sizeof(*blend), 64);
+	blend = intel_batchbuffer_subdata_alloc(batch, sizeof(*blend), 64);
 
 	blend->blend0.dest_blend_factor = GEN7_BLENDFACTOR_ZERO;
 	blend->blend0.source_blend_factor = GEN7_BLENDFACTOR_ONE;
@@ -261,7 +227,7 @@ gen7_create_blend_state(struct intel_batchbuffer *batch)
 	blend->blend1.post_blend_clamp_enable = 1;
 	blend->blend1.pre_blend_clamp_enable = 1;
 
-	return batch_offset(batch, blend);
+	return intel_batchbuffer_subdata_offset(batch, blend);
 }
 
 static void
@@ -285,11 +251,11 @@ gen7_create_cc_viewport(struct intel_batchbuffer *batch)
 {
 	struct gen7_cc_viewport *vp;
 
-	vp = batch_alloc(batch, sizeof(*vp), 32);
+	vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32);
 	vp->min_depth = -1.e35;
 	vp->max_depth = 1.e35;
 
-	return batch_offset(batch, vp);
+	return intel_batchbuffer_subdata_offset(batch, vp);
 }
 
 static void
@@ -308,7 +274,7 @@ gen7_create_sampler(struct intel_batchbuffer *batch)
 {
 	struct gen7_sampler_state *ss;
 
-	ss = batch_alloc(batch, sizeof(*ss), 32);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32);
 
 	ss->ss0.min_filter = GEN7_MAPFILTER_NEAREST;
 	ss->ss0.mag_filter = GEN7_MAPFILTER_NEAREST;
@@ -319,7 +285,7 @@ gen7_create_sampler(struct intel_batchbuffer *batch)
 
 	ss->ss3.non_normalized_coord = 1;
 
-	return batch_offset(batch, ss);
+	return intel_batchbuffer_subdata_offset(batch, ss);
 }
 
 static void
@@ -544,7 +510,8 @@ void gen7_render_copyfunc(struct intel_batchbuffer *batch,
 	blend_state = gen7_create_blend_state(batch);
 	cc_viewport = gen7_create_cc_viewport(batch);
 	ps_sampler_off = gen7_create_sampler(batch);
-	ps_kernel_off = batch_copy(batch, ps_kernel, sizeof(ps_kernel), 64);
+	ps_kernel_off = intel_batchbuffer_copy_data(batch, ps_kernel,
+						    sizeof(ps_kernel), 64);
 	vertex_buffer = gen7_create_vertex_buffer(batch,
 						  src_x, src_y,
 						  dst_x, dst_y,
diff --git a/lib/rendercopy_gen8.c b/lib/rendercopy_gen8.c
index fe3fedf..fbf049f 100644
--- a/lib/rendercopy_gen8.c
+++ b/lib/rendercopy_gen8.c
@@ -129,41 +129,6 @@ static void annotation_flush(struct annotations_context *aub,
 						 aub->index);
 }
 
-static uint32_t
-batch_used(struct intel_batchbuffer *batch)
-{
-	return batch->ptr - batch->buffer;
-}
-
-static uint32_t
-batch_align(struct intel_batchbuffer *batch, uint32_t align)
-{
-	uint32_t offset = batch_used(batch);
-	offset = ALIGN(offset, align);
-	batch->ptr = batch->buffer + offset;
-	return offset;
-}
-
-static void *
-batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
-{
-	uint32_t offset = batch_align(batch, align);
-	batch->ptr += size;
-	return memset(batch->buffer + offset, 0, size);
-}
-
-static uint32_t
-batch_offset(struct intel_batchbuffer *batch, void *ptr)
-{
-	return (uint8_t *)ptr - batch->buffer;
-}
-
-static uint32_t
-batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
-{
-	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
-}
-
 static void
 gen6_render_flush(struct intel_batchbuffer *batch,
 		  drm_intel_context *context, uint32_t batch_end)
@@ -195,8 +160,8 @@ gen8_bind_buf(struct intel_batchbuffer *batch,
 		read_domain = I915_GEM_DOMAIN_SAMPLER;
 	}
 
-	ss = batch_alloc(batch, sizeof(*ss), 64);
-	offset = batch_offset(batch, ss);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, ss);
 	annotation_add_state(aub, AUB_TRACE_SURFACE_STATE, offset, sizeof(*ss));
 
 	ss->ss0.surface_type = GEN6_SURFACE_2D;
@@ -212,7 +177,8 @@ gen8_bind_buf(struct intel_batchbuffer *batch,
 	ss->ss8.base_addr = buf->bo->offset;
 
 	ret = drm_intel_bo_emit_reloc(batch->bo,
-				      batch_offset(batch, ss) + 8 * 4,
+				      intel_batchbuffer_subdata_offset(batch,
+				      ss) + 8 * 4,
 				      buf->bo, 0,
 				      read_domain, write_domain);
 	igt_assert(ret == 0);
@@ -237,8 +203,8 @@ gen8_bind_surfaces(struct intel_batchbuffer *batch,
 {
 	uint32_t *binding_table, offset;
 
-	binding_table = batch_alloc(batch, 8, 32);
-	offset = batch_offset(batch, binding_table);
+	binding_table = intel_batchbuffer_subdata_alloc(batch, 8, 32);
+	offset = intel_batchbuffer_subdata_offset(batch, binding_table);
 	annotation_add_state(aub, AUB_TRACE_BINDING_TABLE, offset, 8);
 
 	binding_table[0] =
@@ -259,8 +225,8 @@ gen8_create_sampler(struct intel_batchbuffer *batch,
 	struct gen8_sampler_state *ss;
 	uint32_t offset;
 
-	ss = batch_alloc(batch, sizeof(*ss), 64);
-	offset = batch_offset(batch, ss);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, ss);
 	annotation_add_state(aub, AUB_TRACE_SAMPLER_STATE,
 			     offset, sizeof(*ss));
 
@@ -285,7 +251,7 @@ gen8_fill_ps(struct intel_batchbuffer *batch,
 {
 	uint32_t offset;
 
-	offset = batch_copy(batch, kernel, size, 64);
+	offset = intel_batchbuffer_copy_data(batch, kernel, size, 64);
 	annotation_add_state(aub, AUB_TRACE_KERNEL_INSTRUCTIONS, offset, size);
 
 	return offset;
@@ -312,7 +278,7 @@ gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
 	void *start;
 	uint32_t offset;
 
-	batch_align(batch, 8);
+	intel_batchbuffer_align(batch, 8);
 	start = batch->ptr;
 
 	emit_vertex_2s(batch, dst_x + width, dst_y + height);
@@ -327,7 +293,7 @@ gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
 	emit_vertex_normalized(batch, src_x, igt_buf_width(src));
 	emit_vertex_normalized(batch, src_y, igt_buf_height(src));
 
-	offset = batch_offset(batch, start);
+	offset = intel_batchbuffer_subdata_offset(batch, start);
 	annotation_add_state(aub, AUB_TRACE_VERTEX_BUFFER,
 			     offset, 3 * VERTEX_SIZE);
 	return offset;
@@ -413,8 +379,9 @@ gen6_create_cc_state(struct intel_batchbuffer *batch,
 	struct gen6_color_calc_state *cc_state;
 	uint32_t offset;
 
-	cc_state = batch_alloc(batch, sizeof(*cc_state), 64);
-	offset = batch_offset(batch, cc_state);
+	cc_state = intel_batchbuffer_subdata_alloc(batch,
+						   sizeof(*cc_state), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, cc_state);
 	annotation_add_state(aub, AUB_TRACE_CC_STATE,
 			     offset, sizeof(*cc_state));
 
@@ -429,8 +396,8 @@ gen8_create_blend_state(struct intel_batchbuffer *batch,
 	int i;
 	uint32_t offset;
 
-	blend = batch_alloc(batch, sizeof(*blend), 64);
-	offset = batch_offset(batch, blend);
+	blend = intel_batchbuffer_subdata_alloc(batch, sizeof(*blend), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, blend);
 	annotation_add_state(aub, AUB_TRACE_BLEND_STATE,
 			     offset, sizeof(*blend));
 
@@ -452,8 +419,8 @@ gen6_create_cc_viewport(struct intel_batchbuffer *batch,
 	struct gen6_cc_viewport *vp;
 	uint32_t offset;
 
-	vp = batch_alloc(batch, sizeof(*vp), 32);
-	offset = batch_offset(batch, vp);
+	vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32);
+	offset = intel_batchbuffer_subdata_offset(batch, vp);
 	annotation_add_state(aub, AUB_TRACE_CC_VP_STATE,
 			     offset, sizeof(*vp));
 
@@ -472,8 +439,9 @@ gen7_create_sf_clip_viewport(struct intel_batchbuffer *batch,
 	struct gen7_sf_clip_viewport *scv_state;
 	uint32_t offset;
 
-	scv_state = batch_alloc(batch, sizeof(*scv_state), 64);
-	offset = batch_offset(batch, scv_state);
+	scv_state = intel_batchbuffer_subdata_alloc(batch,
+						    sizeof(*scv_state), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, scv_state);
 	annotation_add_state(aub, AUB_TRACE_CLIP_VP_STATE,
 			     offset, sizeof(*scv_state));
 
@@ -492,8 +460,8 @@ gen6_create_scissor_rect(struct intel_batchbuffer *batch,
 	struct gen6_scissor_rect *scissor;
 	uint32_t offset;
 
-	scissor = batch_alloc(batch, sizeof(*scissor), 64);
-	offset = batch_offset(batch, scissor);
+	scissor = intel_batchbuffer_subdata_alloc(batch, sizeof(*scissor), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, scissor);
 	annotation_add_state(aub, AUB_TRACE_SCISSOR_STATE,
 			     offset, sizeof(*scissor));
 
@@ -934,7 +902,7 @@ void gen8_render_copyfunc(struct intel_batchbuffer *batch,
 
 	intel_batchbuffer_flush_with_context(batch, context);
 
-	batch_align(batch, 8);
+	intel_batchbuffer_align(batch, 8);
 
 	batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
 
@@ -1019,7 +987,7 @@ void gen8_render_copyfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	igt_assert(batch_end < BATCH_STATE_SPLIT);
 	annotation_add_batch(&aub_annotations, batch_end);
 
diff --git a/lib/rendercopy_gen9.c b/lib/rendercopy_gen9.c
index e646e97..9bd1cbb 100644
--- a/lib/rendercopy_gen9.c
+++ b/lib/rendercopy_gen9.c
@@ -130,41 +130,6 @@ static void annotation_flush(struct annotations_context *ctx,
 						 ctx->index);
 }
 
-static uint32_t
-batch_used(struct intel_batchbuffer *batch)
-{
-	return batch->ptr - batch->buffer;
-}
-
-static uint32_t
-batch_align(struct intel_batchbuffer *batch, uint32_t align)
-{
-	uint32_t offset = batch_used(batch);
-	offset = ALIGN(offset, align);
-	batch->ptr = batch->buffer + offset;
-	return offset;
-}
-
-static void *
-batch_alloc(struct intel_batchbuffer *batch, uint32_t size, uint32_t align)
-{
-	uint32_t offset = batch_align(batch, align);
-	batch->ptr += size;
-	return memset(batch->buffer + offset, 0, size);
-}
-
-static uint32_t
-batch_offset(struct intel_batchbuffer *batch, void *ptr)
-{
-	return (uint8_t *)ptr - batch->buffer;
-}
-
-static uint32_t
-batch_copy(struct intel_batchbuffer *batch, const void *ptr, uint32_t size, uint32_t align)
-{
-	return batch_offset(batch, memcpy(batch_alloc(batch, size, align), ptr, size));
-}
-
 static void
 gen6_render_flush(struct intel_batchbuffer *batch,
 		  drm_intel_context *context, uint32_t batch_end)
@@ -193,8 +158,8 @@ gen8_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
 		read_domain = I915_GEM_DOMAIN_SAMPLER;
 	}
 
-	ss = batch_alloc(batch, sizeof(*ss), 64);
-	offset = batch_offset(batch, ss);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, ss);
 	annotation_add_state(&aub_annotations, AUB_TRACE_SURFACE_STATE,
 			     offset, sizeof(*ss));
 
@@ -211,7 +176,8 @@ gen8_bind_buf(struct intel_batchbuffer *batch, struct igt_buf *buf,
 	ss->ss8.base_addr = buf->bo->offset;
 
 	ret = drm_intel_bo_emit_reloc(batch->bo,
-				      batch_offset(batch, ss) + 8 * 4,
+				      intel_batchbuffer_subdata_offset(batch,
+				      ss) + 8 * 4,
 				      buf->bo, 0,
 				      read_domain, write_domain);
 	assert(ret == 0);
@@ -235,8 +201,8 @@ gen8_bind_surfaces(struct intel_batchbuffer *batch,
 {
 	uint32_t *binding_table, offset;
 
-	binding_table = batch_alloc(batch, 8, 32);
-	offset = batch_offset(batch, binding_table);
+	binding_table = intel_batchbuffer_subdata_alloc(batch, 8, 32);
+	offset = intel_batchbuffer_subdata_offset(batch, binding_table);
 	annotation_add_state(&aub_annotations, AUB_TRACE_BINDING_TABLE,
 			     offset, 8);
 
@@ -254,8 +220,8 @@ gen8_create_sampler(struct intel_batchbuffer *batch) {
 	struct gen8_sampler_state *ss;
 	uint32_t offset;
 
-	ss = batch_alloc(batch, sizeof(*ss), 64);
-	offset = batch_offset(batch, ss);
+	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, ss);
 	annotation_add_state(&aub_annotations, AUB_TRACE_SAMPLER_STATE,
 			     offset, sizeof(*ss));
 
@@ -279,7 +245,7 @@ gen8_fill_ps(struct intel_batchbuffer *batch,
 {
 	uint32_t offset;
 
-	offset = batch_copy(batch, kernel, size, 64);
+	offset = intel_batchbuffer_copy_data(batch, kernel, size, 64);
 	annotation_add_state(&aub_annotations, AUB_TRACE_KERNEL_INSTRUCTIONS,
 			     offset, size);
 
@@ -306,7 +272,7 @@ gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
 	void *start;
 	uint32_t offset;
 
-	batch_align(batch, 8);
+	intel_batchbuffer_align(batch, 8);
 	start = batch->ptr;
 
 	emit_vertex_2s(batch, dst_x + width, dst_y + height);
@@ -321,7 +287,7 @@ gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch,
 	emit_vertex_normalized(batch, src_x, igt_buf_width(src));
 	emit_vertex_normalized(batch, src_y, igt_buf_height(src));
 
-	offset = batch_offset(batch, start);
+	offset = intel_batchbuffer_subdata_offset(batch, start);
 	annotation_add_state(&aub_annotations, AUB_TRACE_VERTEX_BUFFER,
 			     offset, 3 * VERTEX_SIZE);
 	return offset;
@@ -406,8 +372,9 @@ gen6_create_cc_state(struct intel_batchbuffer *batch)
 	struct gen6_color_calc_state *cc_state;
 	uint32_t offset;
 
-	cc_state = batch_alloc(batch, sizeof(*cc_state), 64);
-	offset = batch_offset(batch, cc_state);
+	cc_state = intel_batchbuffer_subdata_alloc(batch,
+						   sizeof(*cc_state), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, cc_state);
 	annotation_add_state(&aub_annotations, AUB_TRACE_CC_STATE,
 			     offset, sizeof(*cc_state));
 
@@ -421,8 +388,8 @@ gen8_create_blend_state(struct intel_batchbuffer *batch)
 	int i;
 	uint32_t offset;
 
-	blend = batch_alloc(batch, sizeof(*blend), 64);
-	offset = batch_offset(batch, blend);
+	blend = intel_batchbuffer_subdata_alloc(batch, sizeof(*blend), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, blend);
 	annotation_add_state(&aub_annotations, AUB_TRACE_BLEND_STATE,
 			     offset, sizeof(*blend));
 
@@ -443,8 +410,8 @@ gen6_create_cc_viewport(struct intel_batchbuffer *batch)
 	struct gen6_cc_viewport *vp;
 	uint32_t offset;
 
-	vp = batch_alloc(batch, sizeof(*vp), 32);
-	offset = batch_offset(batch, vp);
+	vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32);
+	offset = intel_batchbuffer_subdata_offset(batch, vp);
 	annotation_add_state(&aub_annotations, AUB_TRACE_CC_VP_STATE,
 			     offset, sizeof(*vp));
 
@@ -461,8 +428,9 @@ gen7_create_sf_clip_viewport(struct intel_batchbuffer *batch) {
 	struct gen7_sf_clip_viewport *scv_state;
 	uint32_t offset;
 
-	scv_state = batch_alloc(batch, sizeof(*scv_state), 64);
-	offset = batch_offset(batch, scv_state);
+	scv_state = intel_batchbuffer_subdata_alloc(batch,
+						    sizeof(*scv_state), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, scv_state);
 	annotation_add_state(&aub_annotations, AUB_TRACE_CLIP_VP_STATE,
 			     offset, sizeof(*scv_state));
 
@@ -480,8 +448,8 @@ gen6_create_scissor_rect(struct intel_batchbuffer *batch)
 	struct gen6_scissor_rect *scissor;
 	uint32_t offset;
 
-	scissor = batch_alloc(batch, sizeof(*scissor), 64);
-	offset = batch_offset(batch, scissor);
+	scissor = intel_batchbuffer_subdata_alloc(batch, sizeof(*scissor), 64);
+	offset = intel_batchbuffer_subdata_offset(batch, scissor);
 	annotation_add_state(&aub_annotations, AUB_TRACE_SCISSOR_STATE,
 			     offset, sizeof(*scissor));
 
@@ -940,7 +908,7 @@ void gen9_render_copyfunc(struct intel_batchbuffer *batch,
 
 	intel_batchbuffer_flush_with_context(batch, context);
 
-	batch_align(batch, 8);
+	intel_batchbuffer_align(batch, 8);
 
 	batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
 
@@ -1023,7 +991,7 @@ void gen9_render_copyfunc(struct intel_batchbuffer *batch,
 
 	OUT_BATCH(MI_BATCH_BUFFER_END);
 
-	batch_end = batch_align(batch, 8);
+	batch_end = intel_batchbuffer_align(batch, 8);
 	assert(batch_end < BATCH_STATE_SPLIT);
 	annotation_add_batch(&aub_annotations, batch_end);
 
-- 
2.9.5

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [igt-dev] [PATCH i-g-t v2 1/2] lib/rendercopy_gen7: Reorganizing batch allocations
  2018-04-16 15:22 ` [igt-dev] [PATCH i-g-t v2 1/2] lib/rendercopy_gen7: Reorganizing batch allocations Lukasz Kalamarz
  2018-04-16 15:22   ` [igt-dev] [PATCH i-g-t v2 2/2] lib/intel_batchbuffer: Move batch functions from media/render/gpgpu libs Lukasz Kalamarz
@ 2018-04-16 20:50   ` Daniele Ceraolo Spurio
  2018-04-17  9:13     ` Kalamarz, Lukasz
  1 sibling, 1 reply; 7+ messages in thread
From: Daniele Ceraolo Spurio @ 2018-04-16 20:50 UTC (permalink / raw)
  To: Lukasz Kalamarz, igt-dev



On 16/04/18 08:22, Lukasz Kalamarz wrote:
> This lib was written in a different manner than all other libs,
> which was causing some issues during refactoring. Previous
> implementation was allocating data only in a state part of batchbuffer.
> New implementation take usage of splitting batch into two parts,
> one for batch commands and second for various states.
> 

This commit message is a bit unclear. There are no changes in how data 
is allocated/written as both the old and the new implementations split 
the batch; the difference is that the old style used 2 different 
pointers (ptr and state) to access the 2 parts at the same time while 
the new implementation re-uses the same pointer (ptr) and only access 
one section at a time.

Also, if I'm not missing anything after this patch there are no more 
users of batch->state (grep only returns results in null_state_gen, 
which has its own definition of intel_batchbuffer), so we can remove it 
from the structure.

> Signed-off-by: Lukasz Kalamarz <lukasz.kalamarz@intel.com>
> Cc: Katarzyna Dec <katarzyna.dec@intel.com>
> Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
> ---

<snip>

> @@ -535,12 +531,27 @@ void gen7_render_copyfunc(struct intel_batchbuffer *batch,
>   			  unsigned width, unsigned height,
>   			  struct igt_buf *dst, unsigned dst_x, unsigned dst_y)
>   {
> +	uint32_t ps_binding_table, ps_sampler_off, ps_kernel_off;
> +	uint32_t blend_state, cc_viewport;
> +	uint32_t vertex_buffer;
>   	uint32_t batch_end;
>   
>   	intel_batchbuffer_flush_with_context(batch, context);
>   
> -	batch->state = &batch->buffer[BATCH_STATE_SPLIT];
> +	batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
>   
> +
> +	blend_state = gen7_create_blend_state(batch);
> +	cc_viewport = gen7_create_cc_viewport(batch);
> +	ps_sampler_off = gen7_create_sampler(batch);
> +	ps_kernel_off = batch_copy(batch, ps_kernel, sizeof(ps_kernel), 64);
> +	vertex_buffer = gen7_create_vertex_buffer(batch,
> +						  src_x, src_y,
> +						  dst_x, dst_y,
> +						  width, height);
> +	ps_binding_table = gen7_bind_surfaces(batch, src, dst);
> +

igt_assert(batch->ptr < &batch->buffer[4095]);

> +	batch->ptr = batch->buffer;
>   	OUT_BATCH(GEN7_PIPELINE_SELECT | PIPELINE_SELECT_3D);
>   
>   	gen7_emit_state_base_address(batch);
> @@ -556,18 +567,18 @@ void gen7_render_copyfunc(struct intel_batchbuffer *batch,
>   	gen7_emit_wm(batch);
>   	gen7_emit_streamout(batch);
>   	gen7_emit_null_depth_buffer(batch);
> -
> -	gen7_emit_cc(batch);
> -        gen7_emit_sampler(batch);
> +	gen7_emit_cc(batch, blend_state, cc_viewport);
> +	gen7_emit_sampler(batch, ps_sampler_off);
>           gen7_emit_sbe(batch);
> -        gen7_emit_ps(batch);
> +	gen7_emit_ps(batch, ps_kernel_off);
>           gen7_emit_vertex_elements(batch);
> -        gen7_emit_vertex_buffer(batch,
> -				src_x, src_y, dst_x, dst_y, width, height);
> -	gen7_emit_binding_table(batch, src, dst);
> +	gen7_emit_vertex_buffer(batch, src_x, src_y,
> +				dst_x, dst_y, width,
> +				height, vertex_buffer);
> +	gen7_emit_binding_table(batch, src, dst, ps_binding_table);
>   	gen7_emit_drawing_rectangle(batch, dst);
>   
> -        OUT_BATCH(GEN7_3DPRIMITIVE | (7- 2));
> +	OUT_BATCH(GEN7_3DPRIMITIVE | (7 - 2));
>           OUT_BATCH(GEN7_3DPRIMITIVE_VERTEX_SEQUENTIAL | _3DPRIM_RECTLIST);
>           OUT_BATCH(3);
>           OUT_BATCH(0);
> 

It might be worth adding a patch to change the spaces to tabs before 
this one to have cleaner result. Not a blocker since it isn't strictly 
related to this change.

Daniele
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [igt-dev] [PATCH i-g-t v2 1/2] lib/rendercopy_gen7: Reorganizing batch allocations
  2018-04-16 20:50   ` [igt-dev] [PATCH i-g-t v2 1/2] lib/rendercopy_gen7: Reorganizing batch allocations Daniele Ceraolo Spurio
@ 2018-04-17  9:13     ` Kalamarz, Lukasz
  0 siblings, 0 replies; 7+ messages in thread
From: Kalamarz, Lukasz @ 2018-04-17  9:13 UTC (permalink / raw)
  To: Ceraolo Spurio, Daniele, igt-dev

On Mon, 2018-04-16 at 13:50 -0700, Daniele Ceraolo Spurio wrote:
> 
> On 16/04/18 08:22, Lukasz Kalamarz wrote:
> > This lib was written in a different manner than all other libs,
> > which was causing some issues during refactoring. Previous
> > implementation was allocating data only in a state part of
> > batchbuffer.
> > New implementation take usage of splitting batch into two parts,
> > one for batch commands and second for various states.
> > 
> 
> This commit message is a bit unclear. There are no changes in how
> data 
> is allocated/written as both the old and the new implementations
> split 
> the batch; the difference is that the old style used 2 different 
> pointers (ptr and state) to access the 2 parts at the same time
> while 
> the new implementation re-uses the same pointer (ptr) and only
> access 
> one section at a time.
> 

Fixed that in new series

> Also, if I'm not missing anything after this patch there are no more 
> users of batch->state (grep only returns results in null_state_gen, 
> which has its own definition of intel_batchbuffer), so we can remove
> it 
> from the structure.
> 

Added new patch to remove state from intel_batchbuffer

> > Signed-off-by: Lukasz Kalamarz <lukasz.kalamarz@intel.com>
> > Cc: Katarzyna Dec <katarzyna.dec@intel.com>
> > Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
> > Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
> > ---
> 
> <snip>
> 
> > @@ -535,12 +531,27 @@ void gen7_render_copyfunc(struct
> > intel_batchbuffer *batch,
> >   			  unsigned width, unsigned height,
> >   			  struct igt_buf *dst, unsigned dst_x,
> > unsigned dst_y)
> >   {
> > +	uint32_t ps_binding_table, ps_sampler_off, ps_kernel_off;
> > +	uint32_t blend_state, cc_viewport;
> > +	uint32_t vertex_buffer;
> >   	uint32_t batch_end;
> >   
> >   	intel_batchbuffer_flush_with_context(batch, context);
> >   
> > -	batch->state = &batch->buffer[BATCH_STATE_SPLIT];
> > +	batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
> >   
> > +
> > +	blend_state = gen7_create_blend_state(batch);
> > +	cc_viewport = gen7_create_cc_viewport(batch);
> > +	ps_sampler_off = gen7_create_sampler(batch);
> > +	ps_kernel_off = batch_copy(batch, ps_kernel,
> > sizeof(ps_kernel), 64);
> > +	vertex_buffer = gen7_create_vertex_buffer(batch,
> > +						  src_x, src_y,
> > +						  dst_x, dst_y,
> > +						  width, height);
> > +	ps_binding_table = gen7_bind_surfaces(batch, src, dst);
> > +
> 
> igt_assert(batch->ptr < &batch->buffer[4095]);
> 

Missed that. Fixed in new version

> > +	batch->ptr = batch->buffer;
> >   	OUT_BATCH(GEN7_PIPELINE_SELECT | PIPELINE_SELECT_3D);
> >   
> >   	gen7_emit_state_base_address(batch);
> > @@ -556,18 +567,18 @@ void gen7_render_copyfunc(struct
> > intel_batchbuffer *batch,
> >   	gen7_emit_wm(batch);
> >   	gen7_emit_streamout(batch);
> >   	gen7_emit_null_depth_buffer(batch);
> > -
> > -	gen7_emit_cc(batch);
> > -        gen7_emit_sampler(batch);
> > +	gen7_emit_cc(batch, blend_state, cc_viewport);
> > +	gen7_emit_sampler(batch, ps_sampler_off);
> >           gen7_emit_sbe(batch);
> > -        gen7_emit_ps(batch);
> > +	gen7_emit_ps(batch, ps_kernel_off);
> >           gen7_emit_vertex_elements(batch);
> > -        gen7_emit_vertex_buffer(batch,
> > -				src_x, src_y, dst_x, dst_y, width,
> > height);
> > -	gen7_emit_binding_table(batch, src, dst);
> > +	gen7_emit_vertex_buffer(batch, src_x, src_y,
> > +				dst_x, dst_y, width,
> > +				height, vertex_buffer);
> > +	gen7_emit_binding_table(batch, src, dst,
> > ps_binding_table);
> >   	gen7_emit_drawing_rectangle(batch, dst);
> >   
> > -        OUT_BATCH(GEN7_3DPRIMITIVE | (7- 2));
> > +	OUT_BATCH(GEN7_3DPRIMITIVE | (7 - 2));
> >           OUT_BATCH(GEN7_3DPRIMITIVE_VERTEX_SEQUENTIAL |
> > _3DPRIM_RECTLIST);
> >           OUT_BATCH(3);
> >           OUT_BATCH(0);
> > 
> 
> It might be worth adding a patch to change the spaces to tabs before 
> this one to have cleaner result. Not a blocker since it isn't
> strictly 
> related to this change.
> Daniele

Created a new patch to fix that.
---
Lukasz
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2018-04-17  9:13 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-04-13 15:13 [igt-dev] [PATCH i-g-t] lib/intel_batchbuffer: Move batch functions from media/render/gpgpu libs Lukasz Kalamarz
2018-04-13 15:33 ` [igt-dev] ✗ Fi.CI.BAT: failure for " Patchwork
2018-04-16  8:13 ` [igt-dev] [PATCH i-g-t] " Katarzyna Dec
2018-04-16 15:22 ` [igt-dev] [PATCH i-g-t v2 1/2] lib/rendercopy_gen7: Reorganizing batch allocations Lukasz Kalamarz
2018-04-16 15:22   ` [igt-dev] [PATCH i-g-t v2 2/2] lib/intel_batchbuffer: Move batch functions from media/render/gpgpu libs Lukasz Kalamarz
2018-04-16 20:50   ` [igt-dev] [PATCH i-g-t v2 1/2] lib/rendercopy_gen7: Reorganizing batch allocations Daniele Ceraolo Spurio
2018-04-17  9:13     ` Kalamarz, Lukasz

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.