All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Zbigniew Kempczyński" <zbigniew.kempczynski@intel.com>
To: igt-dev@lists.freedesktop.org
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Subject: [igt-dev] [PATCH i-g-t v23 14/18] tests/gem|kms: remove libdrm dependency (batch 2)
Date: Sun,  2 Aug 2020 18:29:58 +0200	[thread overview]
Message-ID: <20200802163002.5815-15-zbigniew.kempczynski@intel.com> (raw)
In-Reply-To: <20200802163002.5815-1-zbigniew.kempczynski@intel.com>

Use intel_bb / intel_buf to remove libdrm dependency.

Tests changed:
- gem_read_read_speed
- gem_render_copy
- gem_render_copy_redux
- gem_render_linear_blits
- gem_render_tiled_blits
- gem_stress
- kms_big_fb
- kms_cursor_crc

Remove transitional rendercopy_bufmgr, we don't need it anymore.

Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
Cc: Dominik Grzegorzek <dominik.grzegorzek@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 lib/Makefile.sources                 |   2 -
 lib/meson.build                      |   1 -
 lib/rendercopy_bufmgr.c              | 171 ---------------
 lib/rendercopy_bufmgr.h              |  28 ---
 tests/i915/gem_read_read_speed.c     | 161 +++++++-------
 tests/i915/gem_render_copy.c         | 313 +++++++++++----------------
 tests/i915/gem_render_copy_redux.c   |  67 +++---
 tests/i915/gem_render_linear_blits.c |  90 +++-----
 tests/i915/gem_render_tiled_blits.c  |  93 ++++----
 tests/i915/gem_stress.c              | 244 +++++++++++----------
 tests/kms_big_fb.c                   |  54 +++--
 tests/kms_cursor_crc.c               |  63 +++---
 12 files changed, 506 insertions(+), 781 deletions(-)
 delete mode 100644 lib/rendercopy_bufmgr.c
 delete mode 100644 lib/rendercopy_bufmgr.h

diff --git a/lib/Makefile.sources b/lib/Makefile.sources
index 464f5be2..67b38645 100644
--- a/lib/Makefile.sources
+++ b/lib/Makefile.sources
@@ -107,8 +107,6 @@ lib_source_list =	 	\
 	gen7_render.h		\
 	gen8_render.h		\
 	gen9_render.h		\
-	rendercopy_bufmgr.c	\
-	rendercopy_bufmgr.h	\
 	rendercopy_gen4.c	\
 	rendercopy_gen6.c	\
 	rendercopy_gen7.c	\
diff --git a/lib/meson.build b/lib/meson.build
index 341696e5..f1f5f7ca 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -45,7 +45,6 @@ lib_sources = [
 	'gpu_cmds.c',
 	'rendercopy_i915.c',
 	'rendercopy_i830.c',
-	'rendercopy_bufmgr.c',
 	'rendercopy_gen4.c',
 	'rendercopy_gen6.c',
 	'rendercopy_gen7.c',
diff --git a/lib/rendercopy_bufmgr.c b/lib/rendercopy_bufmgr.c
deleted file mode 100644
index 5cb588fa..00000000
--- a/lib/rendercopy_bufmgr.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Copyright © 2020 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include <sys/ioctl.h>
-#include "igt.h"
-#include "igt_x86.h"
-#include "rendercopy_bufmgr.h"
-
-/**
- * SECTION:rendercopy_bufmgr
- * @short_description: Render copy buffer manager
- * @title: Render copy bufmgr
- * @include: igt.h
- *
- * # Rendercopy buffer manager
- *
- * Rendercopy depends on libdrm and igt_buf, so some middle layer to intel_buf
- * and buf_ops is required.
- *
- * |[<!-- language="c" -->
- * struct rendercopy_bufmgr *bmgr;
- * ...
- * bmgr = rendercopy_bufmgr_create(fd, bufmgr);
- * ...
- * igt_buf_init(bmgr, &buf, 512, 512, 32, I915_TILING_X, false);
- * ...
- * linear_to_igt_buf(bmgr, &buf, linear);
- * ...
- * igt_buf_to_linear(bmgr, &buf, linear);
- * ...
- * rendercopy_bufmgr_destroy(bmgr);
- * ]|
- */
-
-struct rendercopy_bufmgr {
-	int fd;
-	drm_intel_bufmgr *bufmgr;
-	struct buf_ops *bops;
-};
-
-static void __igt_buf_to_intel_buf(struct igt_buf *buf, struct intel_buf *ibuf)
-{
-	ibuf->handle = buf->bo->handle;
-	ibuf->surface[0].stride = buf->surface[0].stride;
-	ibuf->tiling = buf->tiling;
-	ibuf->bpp = buf->bpp;
-	ibuf->surface[0].size = buf->surface[0].size;
-	ibuf->compression = buf->compression;
-	ibuf->ccs[0].offset = buf->ccs[0].offset;
-	ibuf->ccs[0].stride = buf->ccs[0].stride;
-}
-
-void igt_buf_to_linear(struct rendercopy_bufmgr *bmgr, struct igt_buf *buf,
-		       uint32_t *linear)
-{
-	struct intel_buf ibuf;
-
-	__igt_buf_to_intel_buf(buf, &ibuf);
-
-	intel_buf_to_linear(bmgr->bops, &ibuf, linear);
-}
-
-void linear_to_igt_buf(struct rendercopy_bufmgr *bmgr, struct igt_buf *buf,
-		       uint32_t *linear)
-{
-	struct intel_buf ibuf;
-
-	__igt_buf_to_intel_buf(buf, &ibuf);
-
-	linear_to_intel_buf(bmgr->bops, &ibuf, linear);
-}
-
-struct rendercopy_bufmgr *
-rendercopy_bufmgr_create(int fd, drm_intel_bufmgr *bufmgr)
-{
-	struct buf_ops *bops;
-	struct rendercopy_bufmgr *bmgr;
-
-	igt_assert(bufmgr);
-
-	bops = buf_ops_create(fd);
-	igt_assert(bops);
-
-	bmgr = calloc(1, sizeof(*bmgr));
-	igt_assert(bmgr);
-
-	bmgr->fd = fd;
-	bmgr->bufmgr = bufmgr;
-	bmgr->bops = bops;
-
-	return bmgr;
-}
-
-void rendercopy_bufmgr_destroy(struct rendercopy_bufmgr *bmgr)
-{
-	igt_assert(bmgr);
-	igt_assert(bmgr->bops);
-
-	buf_ops_destroy(bmgr->bops);
-	free(bmgr);
-}
-
-bool rendercopy_bufmgr_set_software_tiling(struct rendercopy_bufmgr *bmgr,
-					   uint32_t tiling,
-					   bool use_software_tiling)
-{
-	return buf_ops_set_software_tiling(bmgr->bops, tiling,
-					   use_software_tiling);
-}
-
-void igt_buf_init(struct rendercopy_bufmgr *bmgr, struct igt_buf *buf,
-		  int width, int height, int bpp,
-		  uint32_t tiling, uint32_t compression)
-{
-	uint32_t devid = intel_get_drm_devid(bmgr->fd);
-	int generation= intel_gen(devid);
-	struct intel_buf ibuf;
-	int size;
-
-	memset(buf, 0, sizeof(*buf));
-
-	buf->surface[0].stride = ALIGN(width * (bpp / 8), 128);
-	buf->surface[0].size = buf->surface[0].stride * height;
-	buf->tiling = tiling;
-	buf->bpp = bpp;
-	buf->compression = compression;
-
-	size = buf->surface[0].stride * ALIGN(height, 32);
-
-	if (compression) {
-		int ccs_width = igt_buf_intel_ccs_width(generation, buf);
-		int ccs_height = igt_buf_intel_ccs_height(generation, buf);
-
-		buf->ccs[0].offset = buf->surface[0].stride * ALIGN(height, 32);
-		buf->ccs[0].stride = ccs_width;
-
-		size = buf->ccs[0].offset + ccs_width * ccs_height;
-	}
-
-	buf->bo = drm_intel_bo_alloc(bmgr->bufmgr, "", size, 4096);
-
-	intel_buf_init_using_handle(bmgr->bops,
-				    buf->bo->handle,
-				    &ibuf,
-				    width, height, bpp, 0,
-				    tiling, compression);
-
-	buf->ccs[0].offset = ibuf.ccs[0].offset;
-	buf->ccs[0].stride = ibuf.ccs[0].stride;
-}
diff --git a/lib/rendercopy_bufmgr.h b/lib/rendercopy_bufmgr.h
deleted file mode 100644
index f4e5118c..00000000
--- a/lib/rendercopy_bufmgr.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef __RENDERCOPY_BUFMGR_H__
-#define __RENDERCOPY_BUFMGR_H__
-
-#include <stdint.h>
-#include "intel_bufops.h"
-#include "intel_batchbuffer.h"
-
-struct rendercopy_bufmgr;
-
-struct rendercopy_bufmgr *rendercopy_bufmgr_create(int fd,
-						   drm_intel_bufmgr *bufmgr);
-void rendercopy_bufmgr_destroy(struct rendercopy_bufmgr *bmgr);
-
-bool rendercopy_bufmgr_set_software_tiling(struct rendercopy_bufmgr *bmgr,
-					   uint32_t tiling,
-					   bool use_software_tiling);
-
-void igt_buf_to_linear(struct rendercopy_bufmgr *bmgr, struct igt_buf *buf,
-		       uint32_t *linear);
-
-void linear_to_igt_buf(struct rendercopy_bufmgr *bmgr, struct igt_buf *buf,
-		       uint32_t *linear);
-
-void igt_buf_init(struct rendercopy_bufmgr *bmgr, struct igt_buf *buf,
-		  int width, int height, int bpp,
-		  uint32_t tiling, uint32_t compression);
-
-#endif
diff --git a/tests/i915/gem_read_read_speed.c b/tests/i915/gem_read_read_speed.c
index 06b66935..40494566 100644
--- a/tests/i915/gem_read_read_speed.c
+++ b/tests/i915/gem_read_read_speed.c
@@ -42,70 +42,63 @@
 #include "i915/gem.h"
 #include "igt.h"
 #include "igt_sysfs.h"
-#include "intel_bufmgr.h"
 
 IGT_TEST_DESCRIPTION("Test speed of concurrent reads between engines.");
 
+#define BBSIZE 4096
 igt_render_copyfunc_t rendercopy;
-struct intel_batchbuffer *batch;
 int width, height;
 
-static drm_intel_bo *rcs_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
+static void set_to_gtt_domain(struct intel_buf *buf, int writing)
 {
-	struct igt_buf d = {
-		.bo = dst,
-		.num_tiles = width * height * 4,
-		.surface[0] = {
-			.size = width * height * 4, .stride = width * 4,
-		},
-		.bpp = 32,
-	}, s = {
-		.bo = src,
-		.num_tiles = width * height * 4,
-		.surface[0] = {
-			.size = width * height * 4, .stride = width * 4,
-		},
-		.bpp = 32,
-	};
-	uint32_t swizzle;
-	drm_intel_bo *bo = batch->bo;
-	drm_intel_bo_reference(bo);
-
-	drm_intel_bo_get_tiling(dst, &d.tiling, &swizzle);
-	drm_intel_bo_get_tiling(src, &s.tiling, &swizzle);
-
-	rendercopy(batch, NULL,
-		   &s, 0, 0,
+	int i915 = buf_ops_get_fd(buf->bops);
+
+	gem_set_domain(i915, buf->handle, I915_GEM_DOMAIN_GTT,
+		       writing ? I915_GEM_DOMAIN_GTT : 0);
+}
+
+static struct intel_bb *rcs_copy_bo(struct intel_buf *dst,
+				    struct intel_buf *src)
+{
+	int i915 = buf_ops_get_fd(dst->bops);
+	struct intel_bb *ibb = intel_bb_create(i915, BBSIZE);
+
+	/* enforce batch won't be recreated after execution */
+	intel_bb_ref(ibb);
+
+	rendercopy(ibb, 0,
+		   src, 0, 0,
 		   width, height,
-		   &d, 0, 0);
+		   dst, 0, 0);
 
-	return bo;
+	return ibb;
 }
 
-static drm_intel_bo *bcs_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
+static struct intel_bb *bcs_copy_bo(struct intel_buf *dst,
+				    struct intel_buf *src)
 {
-	drm_intel_bo *bo = batch->bo;
-	drm_intel_bo_reference(bo);
+	int i915 = buf_ops_get_fd(dst->bops);
+	struct intel_bb *ibb = intel_bb_create(i915, BBSIZE);
 
-	intel_blt_copy(batch,
-		       src, 0, 0, 4*width,
-		       dst, 0, 0, 4*width,
-		       width, height, 32);
+	intel_bb_ref(ibb);
 
-	return bo;
+	intel_bb_blt_copy(ibb,
+			  src, 0, 0, 4*width,
+			  dst, 0, 0, 4*width,
+			  width, height, 32);
+
+	return ibb;
 }
 
-static void
-set_bo(drm_intel_bo *bo, uint32_t val)
+static void set_bo(struct intel_buf *buf, uint32_t val)
 {
 	int size = width * height;
 	uint32_t *vaddr;
 
-	do_or_die(drm_intel_bo_map(bo, 1));
-	vaddr = bo->virtual;
+	vaddr = intel_buf_device_map(buf, true);
 	while (size--)
 		*vaddr++ = val;
-	drm_intel_bo_unmap(bo);
+	intel_buf_unmap(buf);
 }
 
 static double elapsed(const struct timespec *start,
@@ -115,54 +108,59 @@ static double elapsed(const struct timespec *start,
 	return (1e6*(end->tv_sec - start->tv_sec) + (end->tv_nsec - start->tv_nsec)/1000)/loop;
 }
 
-static drm_intel_bo *create_bo(drm_intel_bufmgr *bufmgr,
-			       const char *name)
+static struct intel_buf *create_bo(struct buf_ops *bops, const char *name)
 {
 	uint32_t tiling_mode = I915_TILING_X;
-	unsigned long pitch;
-	return drm_intel_bo_alloc_tiled(bufmgr, name,
-					width, height, 4,
-					&tiling_mode, &pitch, 0);
+	struct intel_buf *buf;
+
+	buf = intel_buf_create(bops, width, height, 32, 0, tiling_mode,
+			       I915_COMPRESSION_NONE);
+	intel_buf_set_name(buf, name);
+
+	return buf;
 }
 
-static void run(drm_intel_bufmgr *bufmgr, int _width, int _height,
+static void run(struct buf_ops *bops, int _width, int _height,
 		bool write_bcs, bool write_rcs)
 {
-	drm_intel_bo *src = NULL, *bcs = NULL, *rcs = NULL;
-	drm_intel_bo *bcs_batch, *rcs_batch;
+	struct intel_buf *src = NULL, *bcs = NULL, *rcs = NULL;
+	struct intel_bb *bcs_ibb = NULL, *rcs_ibb = NULL;
 	struct timespec start, end;
-	int loops = 1000;
+	int loops = 1;
 
 	width = _width;
 	height = _height;
 
-	src = create_bo(bufmgr, "src");
-	bcs = create_bo(bufmgr, "bcs");
-	rcs = create_bo(bufmgr, "rcs");
+	igt_info("width: %d, height: %d\n", width, height);
+
+	src = create_bo(bops, "src");
+	bcs = create_bo(bops, "bcs");
+	rcs = create_bo(bops, "rcs");
 
 	set_bo(src, 0xdeadbeef);
 
 	if (write_bcs) {
-		bcs_batch = bcs_copy_bo(src, bcs);
+		bcs_ibb = bcs_copy_bo(src, bcs);
 	} else {
-		bcs_batch = bcs_copy_bo(bcs, src);
+		bcs_ibb = bcs_copy_bo(bcs, src);
 	}
 	if (write_rcs) {
-		rcs_batch = rcs_copy_bo(src, rcs);
+		rcs_ibb = rcs_copy_bo(src, rcs);
 	} else {
-		rcs_batch = rcs_copy_bo(rcs, src);
+		rcs_ibb = rcs_copy_bo(rcs, src);
 	}
 
-	drm_intel_bo_unreference(rcs);
-	drm_intel_bo_unreference(bcs);
+	set_to_gtt_domain(src, true);
 
-	drm_intel_gem_bo_start_gtt_access(src, true);
 	clock_gettime(CLOCK_MONOTONIC, &start);
 	for (int i = 0; i < loops; i++) {
-		drm_intel_gem_bo_context_exec(rcs_batch, NULL, 4096, I915_EXEC_RENDER);
-		drm_intel_gem_bo_context_exec(bcs_batch, NULL, 4096, I915_EXEC_BLT);
+		intel_bb_exec(rcs_ibb, intel_bb_offset(rcs_ibb),
+			      I915_EXEC_RENDER, false);
+		intel_bb_exec(bcs_ibb, intel_bb_offset(bcs_ibb),
+			      I915_EXEC_BLT, false);
 	}
-	drm_intel_gem_bo_start_gtt_access(src, true);
+
+	set_to_gtt_domain(src, true);
 	clock_gettime(CLOCK_MONOTONIC, &end);
 
 	igt_info("Time to %s-%s %dx%d [%dk]:		%7.3fµs\n",
@@ -171,16 +169,19 @@ static void run(drm_intel_bufmgr *bufmgr, int _width, int _height,
 		 width, height, 4*width*height/1024,
 		 elapsed(&start, &end, loops));
 
-	drm_intel_bo_unreference(rcs_batch);
-	drm_intel_bo_unreference(bcs_batch);
-
-	drm_intel_bo_unreference(src);
+	intel_bb_unref(rcs_ibb);
+	intel_bb_destroy(rcs_ibb);
+	intel_bb_unref(bcs_ibb);
+	intel_bb_destroy(bcs_ibb);
+	intel_buf_destroy(src);
+	intel_buf_destroy(rcs);
+	intel_buf_destroy(bcs);
 }
 
 igt_main
 {
-	const int sizes[] = {1, 128, 256, 512, 1024, 2048, 4096, 8192, 0};
-	drm_intel_bufmgr *bufmgr = NULL;
+	const int sizes[] = {128, 256, 512, 1024, 2048, 4096, 8192, 0};
+	struct buf_ops *bops = NULL;
 	int fd, i;
 
 	igt_fixture {
@@ -195,22 +196,24 @@ igt_main
 		rendercopy = igt_get_render_copyfunc(devid);
 		igt_require(rendercopy);
 
-		bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
-		igt_assert(bufmgr);
-
-		batch =  intel_batchbuffer_alloc(bufmgr, devid);
+		bops = buf_ops_create(fd);
 
 		gem_submission_print_method(fd);
 	}
 
 	for (i = 0; sizes[i] != 0; i++) {
 		igt_subtest_f("read-read-%dx%d", sizes[i], sizes[i])
-			run(bufmgr, sizes[i], sizes[i], false, false);
+			run(bops, sizes[i], sizes[i], false, false);
 		igt_subtest_f("read-write-%dx%d", sizes[i], sizes[i])
-			run(bufmgr, sizes[i], sizes[i], false, true);
+			run(bops, sizes[i], sizes[i], false, true);
 		igt_subtest_f("write-read-%dx%d", sizes[i], sizes[i])
-			run(bufmgr, sizes[i], sizes[i], true, false);
+			run(bops, sizes[i], sizes[i], true, false);
 		igt_subtest_f("write-write-%dx%d", sizes[i], sizes[i])
-			run(bufmgr, sizes[i], sizes[i], true, true);
+			run(bops, sizes[i], sizes[i], true, true);
+	}
+
+	igt_fixture {
+		buf_ops_destroy(bops);
+		close(fd);
 	}
 }
diff --git a/tests/i915/gem_render_copy.c b/tests/i915/gem_render_copy.c
index 1e1e79b9..7f425dcd 100644
--- a/tests/i915/gem_render_copy.c
+++ b/tests/i915/gem_render_copy.c
@@ -47,8 +47,7 @@
 #include "i915/gem.h"
 #include "igt.h"
 #include "igt_x86.h"
-#include "intel_bufmgr.h"
-#include "rendercopy_bufmgr.h"
+#include "intel_bufops.h"
 
 IGT_TEST_DESCRIPTION("Basic test for the render_copy() function.");
 
@@ -58,11 +57,10 @@ IGT_TEST_DESCRIPTION("Basic test for the render_copy() function.");
 typedef struct {
 	int drm_fd;
 	uint32_t devid;
-	drm_intel_bufmgr *bufmgr;
-	struct intel_batchbuffer *batch;
+	struct buf_ops *bops;
+	struct intel_bb *ibb;
 	igt_render_copyfunc_t render_copy;
 	igt_vebox_copyfunc_t vebox_copy;
-	struct rendercopy_bufmgr *bmgr;
 } data_t;
 static int opt_dump_png = false;
 static int check_all_pixels = false;
@@ -87,59 +85,32 @@ static void *alloc_aligned(uint64_t size)
 }
 
 static void
-copy_from_linear_buf(data_t *data, struct igt_buf *src, struct igt_buf *dst)
+copy_from_linear_buf(data_t *data, struct intel_buf *src, struct intel_buf *dst)
 {
-	void *linear;
-
 	igt_assert(src->tiling == I915_TILING_NONE);
 
-	gem_set_domain(data->drm_fd, src->bo->handle,
+	gem_set_domain(data->drm_fd, src->handle,
 		       I915_GEM_DOMAIN_CPU, 0);
-	linear = __gem_mmap_offset__cpu(data->drm_fd, src->bo->handle, 0,
-					src->bo->size, PROT_READ);
-	if (!linear)
-		linear = gem_mmap__cpu(data->drm_fd, src->bo->handle, 0,
-				       src->bo->size, PROT_READ);
-
-	linear_to_igt_buf(data->bmgr, dst, linear);
-
-	munmap(linear, src->bo->size);
-}
-
-static void scratch_buf_write_to_png(data_t *data, struct igt_buf *buf,
-				     const char *filename)
-{
-	cairo_surface_t *surface;
-	cairo_status_t ret;
-	void *linear;
-
-	linear = alloc_aligned(buf->bo->size);
-	igt_buf_to_linear(data->bmgr, buf, linear);
+	intel_buf_cpu_map(src, false);
 
-	surface = cairo_image_surface_create_for_data(linear,
-						      CAIRO_FORMAT_RGB24,
-						      igt_buf_width(buf),
-						      igt_buf_height(buf),
-						      buf->surface[0].stride);
-	ret = cairo_surface_write_to_png(surface, make_filename(filename));
-	igt_assert(ret == CAIRO_STATUS_SUCCESS);
-	cairo_surface_destroy(surface);
+	linear_to_intel_buf(data->bops, dst, src->ptr);
 
-	free(linear);
+	intel_buf_unmap(src);
 }
 
-static void *linear_copy_ccs(data_t *data, struct igt_buf *buf)
+static void *linear_copy_ccs(data_t *data, struct intel_buf *buf)
 {
 	void *ccs_data, *linear;
 	int gen = intel_gen(data->devid);
-	int ccs_size = igt_buf_intel_ccs_width(gen, buf) *
-		igt_buf_intel_ccs_height(gen, buf);
+	int ccs_size = intel_buf_ccs_width(gen, buf) *
+		intel_buf_ccs_height(gen, buf);
+	int bo_size = intel_buf_bo_size(buf);
 
 	ccs_data = alloc_aligned(ccs_size);
-	linear = alloc_aligned(buf->bo->size);
-	memset(linear, 0, buf->bo->size);
+	linear = alloc_aligned(bo_size);
+	memset(linear, 0, bo_size);
 
-	igt_buf_to_linear(data->bmgr, buf, linear);
+	intel_buf_to_linear(data->bops, buf, linear);
 	igt_memcpy_from_wc(ccs_data, linear + buf->ccs[0].offset, ccs_size);
 
 	free(linear);
@@ -147,31 +118,7 @@ static void *linear_copy_ccs(data_t *data, struct igt_buf *buf)
 	return ccs_data;
 }
 
-static void scratch_buf_ccs_write_to_png(data_t *data,
-					 struct igt_buf *buf,
-					 const char *filename)
-{
-	cairo_surface_t *surface;
-	cairo_status_t ret;
-	void *linear;
-	int gen = intel_gen(data->devid);
-	unsigned int ccs_width = igt_buf_intel_ccs_width(gen, buf);
-	unsigned int ccs_height = igt_buf_intel_ccs_height(gen, buf);
-
-	linear = linear_copy_ccs(data, buf);
-
-	surface = cairo_image_surface_create_for_data(linear,
-						      CAIRO_FORMAT_A8,
-						      ccs_width, ccs_height,
-						      buf->ccs[0].stride);
-	ret = cairo_surface_write_to_png(surface, make_filename(filename));
-	igt_assert(ret == CAIRO_STATUS_SUCCESS);
-	cairo_surface_destroy(surface);
-
-	free(linear);
-}
-
-static void scratch_buf_draw_pattern(data_t *data, struct igt_buf *buf,
+static void scratch_buf_draw_pattern(data_t *data, struct intel_buf *buf,
 				     int x, int y, int w, int h,
 				     int cx, int cy, int cw, int ch,
 				     bool use_alternate_colors)
@@ -181,12 +128,12 @@ static void scratch_buf_draw_pattern(data_t *data, struct igt_buf *buf,
 	cairo_t *cr;
 	void *linear;
 
-	linear = alloc_aligned(buf->bo->size);
+	linear = alloc_aligned(buf->surface[0].size);
 
 	surface = cairo_image_surface_create_for_data(linear,
 						      CAIRO_FORMAT_RGB24,
-						      igt_buf_width(buf),
-						      igt_buf_height(buf),
+						      intel_buf_width(buf),
+						      intel_buf_height(buf),
 						      buf->surface[0].stride);
 
 	cr = cairo_create(surface);
@@ -222,24 +169,24 @@ static void scratch_buf_draw_pattern(data_t *data, struct igt_buf *buf,
 
 	cairo_surface_destroy(surface);
 
-	linear_to_igt_buf(data->bmgr, buf, linear);
+	linear_to_intel_buf(data->bops, buf, linear);
 
 	free(linear);
 }
 
 static void
 scratch_buf_copy(data_t *data,
-		 struct igt_buf *src, int sx, int sy, int w, int h,
-		 struct igt_buf *dst, int dx, int dy)
+		 struct intel_buf *src, int sx, int sy, int w, int h,
+		 struct intel_buf *dst, int dx, int dy)
 {
-	int width = igt_buf_width(dst);
-	int height  = igt_buf_height(dst);
+	int width = intel_buf_width(dst);
+	int height = intel_buf_height(dst);
 	uint32_t *linear_dst;
 	uint32_t *linear_src;
 
-	igt_assert_eq(igt_buf_width(dst), igt_buf_width(src));
-	igt_assert_eq(igt_buf_height(dst), igt_buf_height(src));
-	igt_assert_eq(dst->bo->size, src->bo->size);
+	igt_assert_eq(intel_buf_width(dst), intel_buf_width(src));
+	igt_assert_eq(intel_buf_height(dst), intel_buf_height(src));
+	igt_assert_eq(intel_buf_bo_size(dst), intel_buf_bo_size(src));
 	igt_assert_eq(dst->bpp, src->bpp);
 
 	w = min(w, width - sx);
@@ -248,10 +195,10 @@ scratch_buf_copy(data_t *data,
 	h = min(h, height - sy);
 	h = min(h, height - dy);
 
-	linear_dst = alloc_aligned(dst->bo->size);
-	linear_src = alloc_aligned(src->bo->size);
-	igt_buf_to_linear(data->bmgr, src, linear_src);
-	igt_buf_to_linear(data->bmgr, dst, linear_dst);
+	linear_dst = alloc_aligned(intel_buf_bo_size(dst));
+	linear_src = alloc_aligned(intel_buf_bo_size(src));
+	intel_buf_to_linear(data->bops, src, linear_src);
+	intel_buf_to_linear(data->bops, dst, linear_dst);
 
 	for (int y = 0; y < h; y++) {
 		memcpy(&linear_dst[(dy+y) * width + dx],
@@ -260,50 +207,50 @@ scratch_buf_copy(data_t *data,
 	}
 	free(linear_src);
 
-	linear_to_igt_buf(data->bmgr, dst, linear_dst);
+	linear_to_intel_buf(data->bops, dst, linear_dst);
 	free(linear_dst);
 }
 
-static void scratch_buf_init(data_t *data, struct igt_buf *buf,
+static void scratch_buf_init(data_t *data, struct intel_buf *buf,
 			     int width, int height,
 			     uint32_t req_tiling,
 			     enum i915_compression compression)
 {
 	int bpp = 32;
 
-	igt_buf_init(data->bmgr, buf, width, height, bpp, req_tiling,
-		     compression);
+	intel_buf_init(data->bops, buf, width, height, bpp, 0,
+		       req_tiling, compression);
 
-	igt_assert(igt_buf_width(buf) == width);
-	igt_assert(igt_buf_height(buf) == height);
+	igt_assert(intel_buf_width(buf) == width);
+	igt_assert(intel_buf_height(buf) == height);
 }
 
-static void scratch_buf_fini(struct igt_buf *buf)
+static void scratch_buf_fini(struct intel_buf *buf)
 {
-	drm_intel_bo_unreference(buf->bo);
+	intel_buf_close(buf->bops, buf);
 }
 
 static void
 scratch_buf_check(data_t *data,
-		  struct igt_buf *buf,
-		  struct igt_buf *ref,
+		  struct intel_buf *buf,
+		  struct intel_buf *ref,
 		  int x, int y)
 {
-	int width = igt_buf_width(buf);
+	int width = intel_buf_width(buf);
 	uint32_t buf_val, ref_val;
 	uint32_t *linear;
 
-	igt_assert_eq(igt_buf_width(buf), igt_buf_width(ref));
-	igt_assert_eq(igt_buf_height(buf), igt_buf_height(ref));
-	igt_assert_eq(buf->bo->size, ref->bo->size);
+	igt_assert_eq(intel_buf_width(buf), intel_buf_width(ref));
+	igt_assert_eq(intel_buf_height(buf), intel_buf_height(ref));
+	igt_assert_eq(buf->surface[0].size, ref->surface[0].size);
 
-	linear = alloc_aligned(buf->bo->size);
-	igt_buf_to_linear(data->bmgr, buf, linear);
+	linear = alloc_aligned(buf->surface[0].size);
+	intel_buf_to_linear(data->bops, buf, linear);
 	buf_val = linear[y * width + x];
 	free(linear);
 
-	linear = alloc_aligned(ref->bo->size);
-	igt_buf_to_linear(data->bmgr, buf, linear);
+	linear = alloc_aligned(ref->surface[0].size);
+	intel_buf_to_linear(data->bops, buf, linear);
 	ref_val = linear[y * width + x];
 	free(linear);
 
@@ -314,21 +261,21 @@ scratch_buf_check(data_t *data,
 
 static void
 scratch_buf_check_all(data_t *data,
-		      struct igt_buf *buf,
-		      struct igt_buf *ref)
+		      struct intel_buf *buf,
+		      struct intel_buf *ref)
 {
-	int width = igt_buf_width(buf);
-	int height  = igt_buf_height(buf);
+	int width = intel_buf_width(buf);
+	int height = intel_buf_height(buf);
 	uint32_t *linear_buf, *linear_ref;
 
-	igt_assert_eq(igt_buf_width(buf), igt_buf_width(ref));
-	igt_assert_eq(igt_buf_height(buf), igt_buf_height(ref));
-	igt_assert_eq(buf->bo->size, ref->bo->size);
+	igt_assert_eq(intel_buf_width(buf), intel_buf_width(ref));
+	igt_assert_eq(intel_buf_height(buf), intel_buf_height(ref));
+	igt_assert_eq(buf->surface[0].size, ref->surface[0].size);
 
-	linear_buf = alloc_aligned(buf->bo->size);
-	linear_ref = alloc_aligned(ref->bo->size);
-	igt_buf_to_linear(data->bmgr, buf, linear_buf);
-	igt_buf_to_linear(data->bmgr, ref, linear_ref);
+	linear_buf = alloc_aligned(buf->surface[0].size);
+	linear_ref = alloc_aligned(ref->surface[0].size);
+	intel_buf_to_linear(data->bops, buf, linear_buf);
+	intel_buf_to_linear(data->bops, ref, linear_ref);
 
 	for (int y = 0; y < height; y++) {
 		for (int x = 0; x < width; x++) {
@@ -346,11 +293,11 @@ scratch_buf_check_all(data_t *data,
 }
 
 static void scratch_buf_ccs_check(data_t *data,
-				  struct igt_buf *buf)
+				  struct intel_buf *buf)
 {
 	int gen = intel_gen(data->devid);
-	int ccs_size = igt_buf_intel_ccs_width(gen, buf) *
-		igt_buf_intel_ccs_height(gen, buf);
+	int ccs_size = intel_buf_ccs_width(gen, buf) *
+		intel_buf_ccs_height(gen, buf);
 	uint8_t *linear;
 	int i;
 
@@ -368,26 +315,22 @@ static void scratch_buf_ccs_check(data_t *data,
 }
 
 static void
-dump_igt_buf_to_file(data_t *data, struct igt_buf *buf, const char *filename)
+dump_intel_buf_to_file(data_t *data, struct intel_buf *buf, const char *filename)
 {
 	FILE *out;
-	void *linear;
+	void *ptr;
+	uint32_t size = intel_buf_bo_size(buf);
 
-	gem_set_domain(data->drm_fd, buf->bo->handle,
+	gem_set_domain(data->drm_fd, buf->handle,
 		       I915_GEM_DOMAIN_CPU, 0);
+	ptr = gem_mmap__cpu_coherent(data->drm_fd, buf->handle, 0, size, PROT_READ);
 
-	linear = __gem_mmap_offset__cpu(data->drm_fd, buf->bo->handle, 0,
-					buf->bo->size, PROT_READ);
-	if (!linear)
-		linear = gem_mmap__cpu(data->drm_fd, buf->bo->handle, 0,
-				       buf->bo->size, PROT_READ);
 	out = fopen(filename, "wb");
 	igt_assert(out);
-	fwrite(linear, buf->bo->size, 1, out);
+	fwrite(ptr, size, 1, out);
 	fclose(out);
 
-	munmap(linear, buf->bo->size);
-
+	munmap(ptr, size);
 }
 
 #define SOURCE_MIXED_TILED	1
@@ -398,9 +341,9 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
 		 enum i915_compression dst_compression,
 		 int flags)
 {
-	struct igt_buf ref, src_tiled, src_ccs, dst_ccs, dst;
+	struct intel_buf ref, src_tiled, src_ccs, dst_ccs, dst;
 	struct {
-		struct igt_buf buf;
+		struct intel_buf buf;
 		const char *filename;
 		uint32_t tiling;
 		int x, y;
@@ -426,7 +369,6 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
 			.x = 1, .y = 1,
 		},
 	};
-	int opt_dump_aub = igt_aub_dump_enabled();
 	int num_src = ARRAY_SIZE(src);
 	const bool src_mixed_tiled = flags & SOURCE_MIXED_TILED;
 	const bool src_compressed = src_compression != I915_COMPRESSION_NONE;
@@ -495,18 +437,13 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
 
 	if (opt_dump_png) {
 		for (int i = 0; i < num_src; i++)
-			scratch_buf_write_to_png(data, &src[i].buf, src[i].filename);
+			intel_buf_write_to_png(&src[i].buf,
+					       make_filename(src[i].filename));
 		if (!src_mixed_tiled)
-			scratch_buf_write_to_png(data, &src_tiled,
-						 "source-tiled.png");
-		scratch_buf_write_to_png(data, &dst, "destination.png");
-		scratch_buf_write_to_png(data, &ref, "reference.png");
-	}
-
-	if (opt_dump_aub) {
-		drm_intel_bufmgr_gem_set_aub_filename(data->bufmgr,
-						      "rendercopy.aub");
-		drm_intel_bufmgr_gem_set_aub_dump(data->bufmgr, true);
+			intel_buf_write_to_png(&src_tiled,
+					       make_filename("source-tiled.png"));
+		intel_buf_write_to_png(&dst, make_filename("destination.png"));
+		intel_buf_write_to_png(&ref, make_filename("reference.png"));
 	}
 
 	/* This will copy the src to the mid point of the dst buffer. Presumably
@@ -519,75 +456,76 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
 	 */
 	if (src_mixed_tiled) {
 		if (dst_compressed)
-			data->render_copy(data->batch, NULL,
+			data->render_copy(data->ibb, 0,
 					  &dst, 0, 0, WIDTH, HEIGHT,
 					  &dst_ccs, 0, 0);
 
-		for (int i = 0; i < num_src; i++)
-			data->render_copy(data->batch, NULL,
+		for (int i = 0; i < num_src; i++) {
+			data->render_copy(data->ibb, 0,
 					  &src[i].buf,
 					  WIDTH/4, HEIGHT/4, WIDTH/2-2, HEIGHT/2-2,
 					  dst_compressed ? &dst_ccs : &dst,
 					  src[i].x, src[i].y);
+		}
 
 		if (dst_compressed)
-			data->render_copy(data->batch, NULL,
+			data->render_copy(data->ibb, 0,
 					  &dst_ccs, 0, 0, WIDTH, HEIGHT,
 					  &dst, 0, 0);
 
 	} else {
 		if (src_compression == I915_COMPRESSION_RENDER) {
-			data->render_copy(data->batch, NULL,
+			data->render_copy(data->ibb, 0,
 					  &src_tiled, 0, 0, WIDTH, HEIGHT,
 					  &src_ccs,
 					  0, 0);
 			if (dump_compressed_src_buf) {
-				dump_igt_buf_to_file(data, &src_tiled,
-						     "render-src_tiled.bin");
-				dump_igt_buf_to_file(data, &src_ccs,
-						     "render-src_ccs.bin");
+				dump_intel_buf_to_file(data, &src_tiled,
+						       "render-src_tiled.bin");
+				dump_intel_buf_to_file(data, &src_ccs,
+						       "render-src_ccs.bin");
 			}
 		} else if (src_compression == I915_COMPRESSION_MEDIA) {
-			data->vebox_copy(data->batch,
+			data->vebox_copy(data->ibb,
 					 &src_tiled, WIDTH, HEIGHT,
 					 &src_ccs);
 			if (dump_compressed_src_buf) {
-				dump_igt_buf_to_file(data, &src_tiled,
-						     "vebox-src_tiled.bin");
-				dump_igt_buf_to_file(data, &src_ccs,
-						     "vebox-src_ccs.bin");
+				dump_intel_buf_to_file(data, &src_tiled,
+						       "vebox-src_tiled.bin");
+				dump_intel_buf_to_file(data, &src_ccs,
+						       "vebox-src_ccs.bin");
 			}
 		}
 
 		if (dst_compression == I915_COMPRESSION_RENDER) {
-			data->render_copy(data->batch, NULL,
+			data->render_copy(data->ibb, 0,
 					  src_compressed ? &src_ccs : &src_tiled,
 					  0, 0, WIDTH, HEIGHT,
 					  &dst_ccs,
 					  0, 0);
 
-			data->render_copy(data->batch, NULL,
+			data->render_copy(data->ibb, 0,
 					  &dst_ccs,
 					  0, 0, WIDTH, HEIGHT,
 					  &dst,
 					  0, 0);
 		} else if (dst_compression == I915_COMPRESSION_MEDIA) {
-			data->vebox_copy(data->batch,
+			data->vebox_copy(data->ibb,
 					 src_compressed ? &src_ccs : &src_tiled,
 					 WIDTH, HEIGHT,
 					 &dst_ccs);
 
-			data->vebox_copy(data->batch,
+			data->vebox_copy(data->ibb,
 					 &dst_ccs,
 					 WIDTH, HEIGHT,
 					 &dst);
 		} else if (force_vebox_dst_copy) {
-			data->vebox_copy(data->batch,
+			data->vebox_copy(data->ibb,
 					 src_compressed ? &src_ccs : &src_tiled,
 					 WIDTH, HEIGHT,
 					 &dst);
 		} else {
-			data->render_copy(data->batch, NULL,
+			data->render_copy(data->ibb, 0,
 					  src_compressed ? &src_ccs : &src_tiled,
 					  0, 0, WIDTH, HEIGHT,
 					  &dst,
@@ -596,29 +534,22 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
 	}
 
 	if (opt_dump_png){
-		scratch_buf_write_to_png(data, &dst, "result.png");
+		intel_buf_write_to_png(&dst, make_filename("result.png"));
 		if (src_compressed) {
-			scratch_buf_write_to_png(data, &src_ccs,
-						 "compressed-src.png");
-			scratch_buf_ccs_write_to_png(data, &src_ccs,
-						     "compressed-src-ccs.png");
+			intel_buf_write_to_png(&src_ccs,
+					       make_filename("compressed-src.png"));
+			intel_buf_write_aux_to_png(&src_ccs,
+						   "compressed-src-ccs.png");
 		}
 		if (dst_compressed) {
-			scratch_buf_write_to_png(data, &dst_ccs,
-						 "compressed-dst.png");
-			scratch_buf_ccs_write_to_png(data, &dst_ccs,
-						     "compressed-dst-ccs.png");
+			intel_buf_write_to_png(&dst_ccs,
+					       make_filename("compressed-dst.png"));
+			intel_buf_write_aux_to_png(&dst_ccs,
+						   "compressed-dst-ccs.png");
 		}
 	}
 
-	if (opt_dump_aub) {
-		drm_intel_gem_bo_aub_dump_bmp(dst.bo,
-					      0, 0, igt_buf_width(&dst),
-					      igt_buf_height(&dst),
-					      AUB_DUMP_BMP_FORMAT_ARGB_8888,
-					      dst.surface[0].stride, 0);
-		drm_intel_bufmgr_gem_set_aub_dump(data->bufmgr, false);
-	} else if (check_all_pixels) {
+	if (check_all_pixels) {
 		scratch_buf_check_all(data, &dst, &ref);
 	} else {
 		scratch_buf_check(data, &dst, &ref, 10, 10);
@@ -631,6 +562,8 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
 		scratch_buf_ccs_check(data, &dst_ccs);
 
 	scratch_buf_fini(&ref);
+	if (!src_mixed_tiled)
+		scratch_buf_fini(&src_tiled);
 	if (dst_compressed)
 		scratch_buf_fini(&dst_ccs);
 	if (src_compressed)
@@ -638,6 +571,9 @@ static void test(data_t *data, uint32_t src_tiling, uint32_t dst_tiling,
 	scratch_buf_fini(&dst);
 	for (int i = 0; i < num_src; i++)
 		scratch_buf_fini(&src[i].buf);
+
+	/* handles gone, need to clean the objects cache within intel_bb */
+	intel_bb_reset(data->ibb, true);
 }
 
 static int opt_handler(int opt, int opt_index, void *data)
@@ -649,6 +585,9 @@ static int opt_handler(int opt, int opt_index, void *data)
 	case 'a':
 		check_all_pixels = true;
 		break;
+	case 'c':
+		dump_compressed_src_buf = true;
+		break;
 	default:
 		return IGT_OPT_HANDLER_ERROR;
 	}
@@ -659,6 +598,7 @@ static int opt_handler(int opt, int opt_index, void *data)
 const char *help_str =
 	"  -d\tDump PNG\n"
 	"  -a\tCheck all pixels\n"
+	"  -c\tDump compressed src surface\n"
 	;
 
 static void buf_mode_to_str(uint32_t tiling, bool mixed_tiled,
@@ -705,7 +645,7 @@ static void buf_mode_to_str(uint32_t tiling, bool mixed_tiled,
 		 tiling_str, compression_str[0] ? "-" : "", compression_str);
 }
 
-igt_main_args("da", NULL, help_str, opt_handler, NULL)
+igt_main_args("dac", NULL, help_str, opt_handler, NULL)
 {
 	static const struct test_desc {
 		int src_tiling;
@@ -849,20 +789,14 @@ igt_main_args("da", NULL, help_str, opt_handler, NULL)
 		data.devid = intel_get_drm_devid(data.drm_fd);
 		igt_require_gem(data.drm_fd);
 
-		data.bufmgr = drm_intel_bufmgr_gem_init(data.drm_fd, 4096);
-		igt_assert(data.bufmgr);
-
 		data.render_copy = igt_get_render_copyfunc(data.devid);
 		igt_require_f(data.render_copy,
 			      "no render-copy function\n");
 
 		data.vebox_copy = igt_get_vebox_copyfunc(data.devid);
 
-		data.batch = intel_batchbuffer_alloc(data.bufmgr, data.devid);
-		igt_assert(data.batch);
-
-		data.bmgr = rendercopy_bufmgr_create(data.drm_fd, data.bufmgr);
-		igt_assert(data.bmgr);
+		data.bops = buf_ops_create(data.drm_fd);
+		data.ibb = intel_bb_create(data.drm_fd, 4096);
 
 		igt_fork_hang_detector(data.drm_fd);
 	}
@@ -915,8 +849,7 @@ igt_main_args("da", NULL, help_str, opt_handler, NULL)
 
 	igt_fixture {
 		igt_stop_hang_detector();
-		intel_batchbuffer_free(data.batch);
-		drm_intel_bufmgr_destroy(data.bufmgr);
-		rendercopy_bufmgr_destroy(data.bmgr);
+		intel_bb_destroy(data.ibb);
+		buf_ops_destroy(data.bops);
 	}
 }
diff --git a/tests/i915/gem_render_copy_redux.c b/tests/i915/gem_render_copy_redux.c
index a3f77e84..3f9f926c 100644
--- a/tests/i915/gem_render_copy_redux.c
+++ b/tests/i915/gem_render_copy_redux.c
@@ -48,7 +48,6 @@
 
 #include "i915/gem.h"
 #include "igt.h"
-#include "intel_bufmgr.h"
 
 IGT_TEST_DESCRIPTION("Advanced test for the render_copy() function.");
 
@@ -63,8 +62,8 @@ IGT_TEST_DESCRIPTION("Advanced test for the render_copy() function.");
 typedef struct {
 	int fd;
 	uint32_t devid;
-	drm_intel_bufmgr *bufmgr;
-	struct intel_batchbuffer *batch;
+	struct buf_ops *bops;
+	struct intel_bb *ibb;
 	igt_render_copyfunc_t render_copy;
 	uint32_t linear[WIDTH * HEIGHT];
 } data_t;
@@ -74,58 +73,50 @@ static void data_init(data_t *data)
 	data->fd = drm_open_driver(DRIVER_INTEL);
 	data->devid = intel_get_drm_devid(data->fd);
 
-	data->bufmgr = drm_intel_bufmgr_gem_init(data->fd, 4096);
-	igt_assert(data->bufmgr);
-
+	data->bops = buf_ops_create(data->fd);
 	data->render_copy = igt_get_render_copyfunc(data->devid);
 	igt_require_f(data->render_copy,
 		      "no render-copy function\n");
 
-	data->batch = intel_batchbuffer_alloc(data->bufmgr, data->devid);
-	igt_assert(data->batch);
+	data->ibb = intel_bb_create(data->fd, 4096);
 }
 
 static void data_fini(data_t *data)
 {
-	 intel_batchbuffer_free(data->batch);
-	 drm_intel_bufmgr_destroy(data->bufmgr);
-	 close(data->fd);
+	intel_bb_destroy(data->ibb);
+	buf_ops_destroy(data->bops);
+	close(data->fd);
 }
 
-static void scratch_buf_init(data_t *data, struct igt_buf *buf,
+static void scratch_buf_init(data_t *data, struct intel_buf *buf,
 			     int width, int height, int stride, uint32_t color)
 {
-	drm_intel_bo *bo;
 	int i;
 
-	bo = drm_intel_bo_alloc(data->bufmgr, "", SIZE, 4096);
+	intel_buf_init(data->bops, buf, width, height, 32, 0,
+		       I915_TILING_NONE, I915_COMPRESSION_NONE);
+	igt_assert(buf->surface[0].size == SIZE);
+	igt_assert(buf->surface[0].stride == stride);
+
 	for (i = 0; i < width * height; i++)
 		data->linear[i] = color;
-	gem_write(data->fd, bo->handle, 0, data->linear,
+	gem_write(data->fd, buf->handle, 0, data->linear,
 		  sizeof(data->linear));
-
-	memset(buf, 0, sizeof(*buf));
-
-	buf->bo = bo;
-	buf->surface[0].stride = stride;
-	buf->tiling = I915_TILING_NONE;
-	buf->surface[0].size = SIZE;
-	buf->bpp = 32;
 }
 
-static void scratch_buf_fini(data_t *data, struct igt_buf *buf)
+static void scratch_buf_fini(data_t *data, struct intel_buf *buf)
 {
-	dri_bo_unreference(buf->bo);
+	intel_buf_close(data->bops, buf);
 	memset(buf, 0, sizeof(*buf));
 }
 
 static void
-scratch_buf_check(data_t *data, struct igt_buf *buf, int x, int y,
+scratch_buf_check(data_t *data, struct intel_buf *buf, int x, int y,
 		  uint32_t color)
 {
 	uint32_t val;
 
-	gem_read(data->fd, buf->bo->handle, 0,
+	gem_read(data->fd, buf->handle, 0,
 		 data->linear, sizeof(data->linear));
 	val = data->linear[y * WIDTH + x];
 	igt_assert_f(val == color,
@@ -135,7 +126,7 @@ scratch_buf_check(data_t *data, struct igt_buf *buf, int x, int y,
 
 static void copy(data_t *data)
 {
-	struct igt_buf src, dst;
+	struct intel_buf src, dst;
 
 	scratch_buf_init(data, &src, WIDTH, HEIGHT, STRIDE, SRC_COLOR);
 	scratch_buf_init(data, &dst, WIDTH, HEIGHT, STRIDE, DST_COLOR);
@@ -143,7 +134,7 @@ static void copy(data_t *data)
 	scratch_buf_check(data, &src, WIDTH / 2, HEIGHT / 2, SRC_COLOR);
 	scratch_buf_check(data, &dst, WIDTH / 2, HEIGHT / 2, DST_COLOR);
 
-	data->render_copy(data->batch, NULL,
+	data->render_copy(data->ibb, 0,
 			  &src, 0, 0, WIDTH, HEIGHT,
 			  &dst, WIDTH / 2, HEIGHT / 2);
 
@@ -157,9 +148,9 @@ static void copy(data_t *data)
 static void copy_flink(data_t *data)
 {
 	data_t local;
-	struct igt_buf src, dst;
-	struct igt_buf local_src, local_dst;
-	struct igt_buf flink;
+	struct intel_buf src, dst;
+	struct intel_buf local_src, local_dst;
+	struct intel_buf flink;
 	uint32_t name;
 
 	data_init(&local);
@@ -167,23 +158,22 @@ static void copy_flink(data_t *data)
 	scratch_buf_init(data, &src, WIDTH, HEIGHT, STRIDE, 0);
 	scratch_buf_init(data, &dst, WIDTH, HEIGHT, STRIDE, DST_COLOR);
 
-	data->render_copy(data->batch, NULL,
+	data->render_copy(data->ibb, 0,
 			  &src, 0, 0, WIDTH, HEIGHT,
 			  &dst, WIDTH, HEIGHT);
 
 	scratch_buf_init(&local, &local_src, WIDTH, HEIGHT, STRIDE, 0);
 	scratch_buf_init(&local, &local_dst, WIDTH, HEIGHT, STRIDE, SRC_COLOR);
 
-	local.render_copy(local.batch, NULL,
+	local.render_copy(local.ibb, 0,
 			  &local_src, 0, 0, WIDTH, HEIGHT,
 			  &local_dst, WIDTH, HEIGHT);
 
-
-	drm_intel_bo_flink(local_dst.bo, &name);
+	name = gem_flink(local.fd, local_dst.handle);
 	flink = local_dst;
-	flink.bo = drm_intel_bo_gem_create_from_name(data->bufmgr, "flink", name);
+	flink.handle = gem_open(data->fd, name);
 
-	data->render_copy(data->batch, NULL,
+	data->render_copy(data->ibb, 0,
 			  &flink, 0, 0, WIDTH, HEIGHT,
 			  &dst, WIDTH / 2, HEIGHT / 2);
 
@@ -193,6 +183,7 @@ static void copy_flink(data_t *data)
 	scratch_buf_check(data, &dst, 10, 10, DST_COLOR);
 	scratch_buf_check(data, &dst, WIDTH - 10, HEIGHT - 10, SRC_COLOR);
 
+	intel_bb_reset(data->ibb, true);
 	scratch_buf_fini(data, &src);
 	scratch_buf_fini(data, &flink);
 	scratch_buf_fini(data, &dst);
diff --git a/tests/i915/gem_render_linear_blits.c b/tests/i915/gem_render_linear_blits.c
index 55a9e6be..8d044e9e 100644
--- a/tests/i915/gem_render_linear_blits.c
+++ b/tests/i915/gem_render_linear_blits.c
@@ -49,7 +49,6 @@
 
 #include "i915/gem.h"
 #include "igt.h"
-#include "intel_bufmgr.h"
 
 #define WIDTH 512
 #define STRIDE (WIDTH*4)
@@ -60,7 +59,7 @@ static uint32_t linear[WIDTH*HEIGHT];
 static igt_render_copyfunc_t render_copy;
 
 static void
-check_bo(int fd, uint32_t handle, uint32_t val)
+check_buf(int fd, uint32_t handle, uint32_t val)
 {
 	int i;
 
@@ -76,114 +75,89 @@ check_bo(int fd, uint32_t handle, uint32_t val)
 
 static void run_test (int fd, int count)
 {
-	drm_intel_bufmgr *bufmgr;
-	struct intel_batchbuffer *batch;
+	struct buf_ops *bops;
+	struct intel_bb *ibb;
 	uint32_t *start_val;
-	drm_intel_bo **bo;
+	struct intel_buf *bufs;
 	uint32_t start = 0;
 	int i, j;
 
 	render_copy = igt_get_render_copyfunc(intel_get_drm_devid(fd));
 	igt_require(render_copy);
 
-	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
-	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
+	bops = buf_ops_create(fd);
+	ibb = intel_bb_create(fd, 4096);
 
-	bo = malloc(sizeof(*bo)*count);
+	bufs = malloc(sizeof(*bufs)*count);
 	start_val = malloc(sizeof(*start_val)*count);
 
 	for (i = 0; i < count; i++) {
-		bo[i] = drm_intel_bo_alloc(bufmgr, "", SIZE, 4096);
+		intel_buf_init(bops, &bufs[i], WIDTH, HEIGHT, 32, 0,
+			       I915_TILING_NONE, I915_COMPRESSION_NONE);
 		start_val[i] = start;
 		for (j = 0; j < WIDTH*HEIGHT; j++)
 			linear[j] = start++;
-		gem_write(fd, bo[i]->handle, 0, linear, sizeof(linear));
+		gem_write(fd, bufs[i].handle, 0, linear, sizeof(linear));
 	}
 
 	igt_info("Verifying initialisation - %d buffers of %d bytes\n", count, SIZE);
 	for (i = 0; i < count; i++)
-		check_bo(fd, bo[i]->handle, start_val[i]);
+		check_buf(fd, bufs[i].handle, start_val[i]);
 
 	igt_info("Cyclic blits, forward...\n");
 	for (i = 0; i < count * 4; i++) {
-		struct igt_buf src = {}, dst = {};
+		struct intel_buf *src, *dst;
 
-		src.bo = bo[i % count];
-		src.surface[0].stride = STRIDE;
-		src.tiling = I915_TILING_NONE;
-		src.surface[0].size = SIZE;
-		src.bpp = 32;
+		src = &bufs[i % count];
+		dst = &bufs[(i + 1) % count];
 
-		dst.bo = bo[(i + 1) % count];
-		dst.surface[0].stride = STRIDE;
-		dst.tiling = I915_TILING_NONE;
-		dst.surface[0].size = SIZE;
-		dst.bpp = 32;
-
-		render_copy(batch, NULL, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
+		render_copy(ibb, 0, src, 0, 0, WIDTH, HEIGHT, dst, 0, 0);
 		start_val[(i + 1) % count] = start_val[i % count];
 	}
+
 	for (i = 0; i < count; i++)
-		check_bo(fd, bo[i]->handle, start_val[i]);
+		check_buf(fd, bufs[i].handle, start_val[i]);
 
 	if (igt_run_in_simulation())
 		return;
 
 	igt_info("Cyclic blits, backward...\n");
 	for (i = 0; i < count * 4; i++) {
-		struct igt_buf src = {}, dst = {};
+		struct intel_buf *src, *dst;
 
-		src.bo = bo[(i + 1) % count];
-		src.surface[0].stride = STRIDE;
-		src.tiling = I915_TILING_NONE;
-		src.surface[0].size = SIZE;
-		src.bpp = 32;
+		src = &bufs[(i + 1) % count];
+		dst = &bufs[i % count];
 
-		dst.bo = bo[i % count];
-		dst.surface[0].stride = STRIDE;
-		dst.tiling = I915_TILING_NONE;
-		dst.surface[0].size = SIZE;
-		dst.bpp = 32;
-
-		render_copy(batch, NULL, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
+		render_copy(ibb, 0, src, 0, 0, WIDTH, HEIGHT, dst, 0, 0);
 		start_val[i % count] = start_val[(i + 1) % count];
 	}
 	for (i = 0; i < count; i++)
-		check_bo(fd, bo[i]->handle, start_val[i]);
+		check_buf(fd, bufs[i].handle, start_val[i]);
 
 	igt_info("Random blits...\n");
 	for (i = 0; i < count * 4; i++) {
-		struct igt_buf src = {}, dst = {};
+		struct intel_buf *src, *dst;
 		int s = random() % count;
 		int d = random() % count;
 
 		if (s == d)
 			continue;
 
-		src.bo = bo[s];
-		src.surface[0].stride = STRIDE;
-		src.tiling = I915_TILING_NONE;
-		src.surface[0].size = SIZE;
-		src.bpp = 32;
-
-		dst.bo = bo[d];
-		dst.surface[0].stride = STRIDE;
-		dst.tiling = I915_TILING_NONE;
-		dst.surface[0].size = SIZE;
-		dst.bpp = 32;
+		src = &bufs[s];
+		dst = &bufs[d];
 
-		render_copy(batch, NULL, &src, 0, 0, WIDTH, HEIGHT, &dst, 0, 0);
+		render_copy(ibb, 0, src, 0, 0, WIDTH, HEIGHT, dst, 0, 0);
 		start_val[d] = start_val[s];
 	}
 	for (i = 0; i < count; i++)
-		check_bo(fd, bo[i]->handle, start_val[i]);
+		check_buf(fd, bufs[i].handle, start_val[i]);
 
 	/* release resources */
-	for (i = 0; i < count; i++) {
-		drm_intel_bo_unreference(bo[i]);
-	}
-	intel_batchbuffer_free(batch);
-	drm_intel_bufmgr_destroy(bufmgr);
+	for (i = 0; i < count; i++)
+		intel_buf_close(bops, &bufs[i]);
+	free(bufs);
+	intel_bb_destroy(ibb);
+	buf_ops_destroy(bops);
 }
 
 igt_main
diff --git a/tests/i915/gem_render_tiled_blits.c b/tests/i915/gem_render_tiled_blits.c
index bd76066a..0d83a43e 100644
--- a/tests/i915/gem_render_tiled_blits.c
+++ b/tests/i915/gem_render_tiled_blits.c
@@ -42,12 +42,12 @@
 #include <errno.h>
 #include <sys/stat.h>
 #include <sys/time.h>
+#include <sys/mman.h>
 
 #include <drm.h>
 
 #include "i915/gem.h"
 #include "igt.h"
-#include "intel_bufmgr.h"
 
 #define WIDTH 512
 #define STRIDE (WIDTH*4)
@@ -55,29 +55,26 @@
 #define SIZE (HEIGHT*STRIDE)
 
 static igt_render_copyfunc_t render_copy;
-static drm_intel_bo *linear;
+static struct intel_buf linear;
 static uint32_t data[WIDTH*HEIGHT];
 static int snoop;
 
 static void
-check_bo(struct intel_batchbuffer *batch, struct igt_buf *buf, uint32_t val)
+check_buf(struct intel_bb *ibb, struct intel_buf *buf, uint32_t val)
 {
-	struct igt_buf tmp = {};
+	int i915 = buf_ops_get_fd(linear.bops);
 	uint32_t *ptr;
 	int i;
 
-	tmp.bo = linear;
-	tmp.surface[0].stride = STRIDE;
-	tmp.tiling = I915_TILING_NONE;
-	tmp.surface[0].size = SIZE;
-	tmp.bpp = 32;
+	render_copy(ibb, 0, buf, 0, 0, WIDTH, HEIGHT, &linear, 0, 0);
+	intel_bb_sync(ibb);
 
-	render_copy(batch, NULL, buf, 0, 0, WIDTH, HEIGHT, &tmp, 0, 0);
 	if (snoop) {
-		do_or_die(drm_intel_bo_map(linear, 0));
-		ptr = linear->virtual;
+		ptr = gem_mmap__cpu_coherent(i915, linear.handle, 0,
+					     linear.surface[0].size, PROT_READ);
+		gem_set_domain(i915, linear.handle, I915_GEM_DOMAIN_CPU, 0);
 	} else {
-		do_or_die(drm_intel_bo_get_subdata(linear, 0, sizeof(data), data));
+		gem_read(i915, linear.handle, 0, data, sizeof(data));
 		ptr = data;
 	}
 	for (i = 0; i < WIDTH*HEIGHT; i++) {
@@ -88,15 +85,15 @@ check_bo(struct intel_batchbuffer *batch, struct igt_buf *buf, uint32_t val)
 		val++;
 	}
 	if (ptr != data)
-		drm_intel_bo_unmap(linear);
+		munmap(ptr, linear.surface[0].size);
 }
 
 static void run_test (int fd, int count)
 {
-	drm_intel_bufmgr *bufmgr;
-	struct intel_batchbuffer *batch;
+	struct buf_ops *bops;
+	struct intel_bb *ibb;
 	uint32_t *start_val;
-	struct igt_buf *buf;
+	struct intel_buf *bufs;
 	uint32_t start = 0;
 	int i, j;
 	uint32_t devid;
@@ -112,66 +109,67 @@ static void run_test (int fd, int count)
 	if (IS_BROADWATER(devid) || IS_CRESTLINE(devid)) /* snafu */
 		snoop = 0;
 
-	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
-	drm_intel_bufmgr_gem_set_vma_cache_size(bufmgr, 32);
-	batch = intel_batchbuffer_alloc(bufmgr, devid);
+	bops = buf_ops_create(fd);
+	ibb = intel_bb_create(fd, 4096);
 
-	linear = drm_intel_bo_alloc(bufmgr, "linear", WIDTH*HEIGHT*4, 0);
+	intel_buf_init(bops, &linear, WIDTH, HEIGHT, 32, 0,
+		       I915_TILING_NONE, I915_COMPRESSION_NONE);
 	if (snoop) {
-		gem_set_caching(fd, linear->handle, 1);
+		gem_set_caching(fd, linear.handle, 1);
 		igt_info("Using a snoop linear buffer for comparisons\n");
 	}
 
-	buf = calloc(sizeof(*buf), count);
+	bufs = calloc(sizeof(*bufs), count);
 	start_val = malloc(sizeof(*start_val)*count);
 
 	for (i = 0; i < count; i++) {
 		uint32_t tiling = I915_TILING_X + (random() & 1);
-		unsigned long pitch = STRIDE;
 		uint32_t *ptr;
 
-		buf[i].bo = drm_intel_bo_alloc_tiled(bufmgr, "",
-						     WIDTH, HEIGHT, 4,
-						     &tiling, &pitch, 0);
-		buf[i].surface[0].stride = pitch;
-		buf[i].tiling = tiling;
-		buf[i].surface[0].size = SIZE;
-		buf[i].bpp = 32;
-
+		intel_buf_init(bops, &bufs[i], WIDTH, HEIGHT, 32, 0,
+			       tiling, I915_COMPRESSION_NONE);
 		start_val[i] = start;
 
-		do_or_die(drm_intel_gem_bo_map_gtt(buf[i].bo));
-		ptr = buf[i].bo->virtual;
+		ptr = gem_mmap__gtt(fd, bufs[i].handle,
+				    bufs[i].surface[0].size, PROT_WRITE);
+		gem_set_domain(fd, bufs[i].handle,
+			       I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
 		for (j = 0; j < WIDTH*HEIGHT; j++)
 			ptr[j] = start++;
-		drm_intel_gem_bo_unmap_gtt(buf[i].bo);
+
+		munmap(ptr, bufs[i].surface[0].size);
 	}
 
 	igt_info("Verifying initialisation...\n");
 	for (i = 0; i < count; i++)
-		check_bo(batch, &buf[i], start_val[i]);
+		check_buf(ibb, &bufs[i], start_val[i]);
 
 	igt_info("Cyclic blits, forward...\n");
 	for (i = 0; i < count * 4; i++) {
 		int src = i % count;
 		int dst = (i + 1) % count;
 
-		render_copy(batch, NULL, buf+src, 0, 0, WIDTH, HEIGHT, buf+dst, 0, 0);
+		render_copy(ibb, 0, &bufs[src], 0, 0, WIDTH, HEIGHT,
+			    &bufs[dst], 0, 0);
 		start_val[dst] = start_val[src];
 	}
+	intel_bb_sync(ibb);
+
 	for (i = 0; i < count; i++)
-		check_bo(batch, &buf[i], start_val[i]);
+		check_buf(ibb, &bufs[i], start_val[i]);
 
 	igt_info("Cyclic blits, backward...\n");
 	for (i = 0; i < count * 4; i++) {
 		int src = (i + 1) % count;
 		int dst = i % count;
 
-		render_copy(batch, NULL, buf+src, 0, 0, WIDTH, HEIGHT, buf+dst, 0, 0);
+		render_copy(ibb, 0, &bufs[src], 0, 0, WIDTH, HEIGHT,
+			    &bufs[dst], 0, 0);
 		start_val[dst] = start_val[src];
 	}
+	intel_bb_sync(ibb);
 	for (i = 0; i < count; i++)
-		check_bo(batch, &buf[i], start_val[i]);
+		check_buf(ibb, &bufs[i], start_val[i]);
 
 	igt_info("Random blits...\n");
 	for (i = 0; i < count * 4; i++) {
@@ -181,19 +179,22 @@ static void run_test (int fd, int count)
 		if (src == dst)
 			continue;
 
-		render_copy(batch, NULL, buf+src, 0, 0, WIDTH, HEIGHT, buf+dst, 0, 0);
+		render_copy(ibb, 0, &bufs[src], 0, 0, WIDTH, HEIGHT,
+			    &bufs[dst], 0, 0);
 		start_val[dst] = start_val[src];
 	}
+	intel_bb_sync(ibb);
+
 	for (i = 0; i < count; i++)
-		check_bo(batch, &buf[i], start_val[i]);
+		check_buf(ibb, &bufs[i], start_val[i]);
 
 	/* release resources */
-	drm_intel_bo_unreference(linear);
+	intel_buf_close(bops, &linear);
 	for (i = 0; i < count; i++) {
-		drm_intel_bo_unreference(buf[i].bo);
+		intel_buf_close(bops, &bufs[i]);
 	}
-	intel_batchbuffer_free(batch);
-	drm_intel_bufmgr_destroy(bufmgr);
+	intel_bb_destroy(ibb);
+	buf_ops_destroy(bops);
 }
 
 
diff --git a/tests/i915/gem_stress.c b/tests/i915/gem_stress.c
index 50245b93..22d64749 100644
--- a/tests/i915/gem_stress.c
+++ b/tests/i915/gem_stress.c
@@ -62,8 +62,6 @@
 
 #include <drm.h>
 
-#include "intel_bufmgr.h"
-
 IGT_TEST_DESCRIPTION("General gem coherency test.");
 
 #define CMD_POLY_STIPPLE_OFFSET       0x7906
@@ -84,13 +82,13 @@ IGT_TEST_DESCRIPTION("General gem coherency test.");
  *   first one (to check consistency of the kernel recovery paths)
  */
 
-drm_intel_bufmgr *bufmgr;
-struct intel_batchbuffer *batch;
+struct buf_ops *bops;
+struct intel_bb *ibb;
 int drm_fd;
 int devid;
 int num_fences;
 
-drm_intel_bo *busy_bo;
+struct intel_buf busy_bo;
 
 struct option_struct {
     unsigned scratch_buf_size;
@@ -136,7 +134,7 @@ struct option_struct options = {
 	.check_render_cpyfn = 0,
 };
 
-static struct igt_buf buffers[2][MAX_BUFS];
+static struct intel_buf buffers[2][MAX_BUFS];
 /* tile i is at logical position tile_permutation[i] */
 static unsigned *tile_permutation;
 static unsigned num_buffers = 0;
@@ -152,16 +150,16 @@ struct {
 	unsigned max_failed_reads;
 } stats;
 
-static void tile2xy(struct igt_buf *buf, unsigned tile, unsigned *x, unsigned *y)
+static void tile2xy(struct intel_buf *buf, unsigned tile, unsigned *x, unsigned *y)
 {
-	igt_assert(tile < buf->num_tiles);
+	igt_assert(tile < options.tiles_per_buf);
 	*x = (tile*options.tile_size) % (buf->surface[0].stride/sizeof(uint32_t));
 	*y = ((tile*options.tile_size) / (buf->surface[0].stride/sizeof(uint32_t))) * options.tile_size;
 }
 
-static void emit_blt(drm_intel_bo *src_bo, uint32_t src_tiling, unsigned src_pitch,
+static void emit_blt(struct intel_buf *src, uint32_t src_tiling, unsigned src_pitch,
 		     unsigned src_x, unsigned src_y, unsigned w, unsigned h,
-		     drm_intel_bo *dst_bo, uint32_t dst_tiling, unsigned dst_pitch,
+		     struct intel_buf *dst, uint32_t dst_tiling, unsigned dst_pitch,
 		     unsigned dst_x, unsigned dst_y)
 {
 	uint32_t cmd_bits = 0;
@@ -177,24 +175,26 @@ static void emit_blt(drm_intel_bo *src_bo, uint32_t src_tiling, unsigned src_pit
 	}
 
 	/* copy lower half to upper half */
-	BLIT_COPY_BATCH_START(cmd_bits);
-	OUT_BATCH((3 << 24) | /* 32 bits */
-		  (0xcc << 16) | /* copy ROP */
-		  dst_pitch);
-	OUT_BATCH(dst_y << 16 | dst_x);
-	OUT_BATCH((dst_y+h) << 16 | (dst_x+w));
-	OUT_RELOC_FENCED(dst_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
-	OUT_BATCH(src_y << 16 | src_x);
-	OUT_BATCH(src_pitch);
-	OUT_RELOC_FENCED(src_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
-	ADVANCE_BATCH();
-
-	if (batch->gen >= 6) {
-		BEGIN_BATCH(3, 0);
-		OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
-		OUT_BATCH(0);
-		OUT_BATCH(0);
-		ADVANCE_BATCH();
+	intel_bb_blit_start(ibb, cmd_bits);
+	intel_bb_out(ibb, (3 << 24) | /* 32 bits */
+		     (0xcc << 16) | /* copy ROP */
+		     dst_pitch);
+	intel_bb_out(ibb, dst_y << 16 | dst_x);
+	intel_bb_out(ibb, (dst_y+h) << 16 | (dst_x+w));
+	intel_bb_emit_reloc_fenced(ibb, dst->handle,
+				   I915_GEM_DOMAIN_RENDER,
+				   I915_GEM_DOMAIN_RENDER,
+				   0, dst->addr.offset);
+	intel_bb_out(ibb, src_y << 16 | src_x);
+	intel_bb_out(ibb, src_pitch);
+	intel_bb_emit_reloc_fenced(ibb, src->handle,
+				   I915_GEM_DOMAIN_RENDER, 0,
+				   0, src->addr.offset);
+
+	if (ibb->gen >= 6) {
+		intel_bb_out(ibb, XY_SETUP_CLIP_BLT_CMD);
+		intel_bb_out(ibb, 0);
+		intel_bb_out(ibb, 0);
 	}
 }
 
@@ -207,19 +207,25 @@ static void keep_gpu_busy(void)
 	tmp = 1 << gpu_busy_load;
 	igt_assert_lte(tmp, 1024);
 
-	emit_blt(busy_bo, 0, 4096, 0, 0, tmp, 128,
-		 busy_bo, 0, 4096, 0, 128);
+	emit_blt(&busy_bo, 0, 4096, 0, 0, tmp, 128,
+		 &busy_bo, 0, 4096, 0, 128);
 }
 
-static void set_to_cpu_domain(struct igt_buf *buf, int writing)
+static void set_to_cpu_domain(struct intel_buf *buf, int writing)
 {
-	gem_set_domain(drm_fd, buf->bo->handle, I915_GEM_DOMAIN_CPU,
+	gem_set_domain(drm_fd, buf->handle, I915_GEM_DOMAIN_CPU,
 		       writing ? I915_GEM_DOMAIN_CPU : 0);
 }
 
+static void set_to_gtt_domain(struct intel_buf *buf, int writing)
+{
+	gem_set_domain(drm_fd, buf->handle, I915_GEM_DOMAIN_GTT,
+		       writing ? I915_GEM_DOMAIN_GTT : 0);
+}
+
 static unsigned int copyfunc_seq = 0;
-static void (*copyfunc)(struct igt_buf *src, unsigned src_x, unsigned src_y,
-			struct igt_buf *dst, unsigned dst_x, unsigned dst_y,
+static void (*copyfunc)(struct intel_buf *src, unsigned src_x, unsigned src_y,
+			struct intel_buf *dst, unsigned dst_x, unsigned dst_y,
 			unsigned logical_tile_no);
 
 /* stride, x, y in units of uint32_t! */
@@ -254,51 +260,53 @@ static void cpucpy2d(uint32_t *src, unsigned src_stride, unsigned src_x, unsigne
 		stats.num_failed++;
 }
 
-static void cpu_copyfunc(struct igt_buf *src, unsigned src_x, unsigned src_y,
-			 struct igt_buf *dst, unsigned dst_x, unsigned dst_y,
+static void cpu_copyfunc(struct intel_buf *src, unsigned src_x, unsigned src_y,
+			 struct intel_buf *dst, unsigned dst_x, unsigned dst_y,
 			 unsigned logical_tile_no)
 {
-	igt_assert(batch->ptr == batch->buffer);
+	igt_assert(src->ptr);
+	igt_assert(dst->ptr);
 
 	if (options.ducttape)
-		drm_intel_bo_wait_rendering(dst->bo);
+		set_to_gtt_domain(dst, 1);
 
 	if (options.use_cpu_maps) {
 		set_to_cpu_domain(src, 0);
 		set_to_cpu_domain(dst, 1);
 	}
 
-	cpucpy2d(src->data, src->surface[0].stride/sizeof(uint32_t), src_x,
-		 src_y,
-		 dst->data, dst->surface[0].stride/sizeof(uint32_t), dst_x,
-		 dst_y,
+	cpucpy2d(src->ptr, src->surface[0].stride/sizeof(uint32_t), src_x, src_y,
+		 dst->ptr, dst->surface[0].stride/sizeof(uint32_t), dst_x, dst_y,
 		 logical_tile_no);
 }
 
-static void prw_copyfunc(struct igt_buf *src, unsigned src_x, unsigned src_y,
-			 struct igt_buf *dst, unsigned dst_x, unsigned dst_y,
+static void prw_copyfunc(struct intel_buf *src, unsigned src_x, unsigned src_y,
+			 struct intel_buf *dst, unsigned dst_x, unsigned dst_y,
 			 unsigned logical_tile_no)
 {
 	uint32_t tmp_tile[options.tile_size*options.tile_size];
 	int i;
 
-	igt_assert(batch->ptr == batch->buffer);
+	igt_assert(src->ptr);
+	igt_assert(dst->ptr);
+
+	igt_info("prw\n");
 
 	if (options.ducttape)
-		drm_intel_bo_wait_rendering(dst->bo);
+		set_to_gtt_domain(dst, 1);
 
 	if (src->tiling == I915_TILING_NONE) {
 		for (i = 0; i < options.tile_size; i++) {
 			unsigned ofs = src_x*sizeof(uint32_t) + src->surface[0].stride*(src_y + i);
-			drm_intel_bo_get_subdata(src->bo, ofs,
-						 options.tile_size*sizeof(uint32_t),
-						 tmp_tile + options.tile_size*i);
+			gem_read(drm_fd, src->handle, ofs,
+				 tmp_tile + options.tile_size*i,
+				 options.tile_size*sizeof(uint32_t));
 		}
 	} else {
 		if (options.use_cpu_maps)
 			set_to_cpu_domain(src, 0);
 
-		cpucpy2d(src->data, src->surface[0].stride/sizeof(uint32_t),
+		cpucpy2d(src->ptr, src->surface[0].stride/sizeof(uint32_t),
 			 src_x, src_y,
 			 tmp_tile, options.tile_size, 0, 0, logical_tile_no);
 	}
@@ -306,23 +314,23 @@ static void prw_copyfunc(struct igt_buf *src, unsigned src_x, unsigned src_y,
 	if (dst->tiling == I915_TILING_NONE) {
 		for (i = 0; i < options.tile_size; i++) {
 			unsigned ofs = dst_x*sizeof(uint32_t) + dst->surface[0].stride*(dst_y + i);
-			drm_intel_bo_subdata(dst->bo, ofs,
-					     options.tile_size*sizeof(uint32_t),
-					     tmp_tile + options.tile_size*i);
+			gem_write(drm_fd, dst->handle, ofs,
+				  tmp_tile + options.tile_size*i,
+				  options.tile_size*sizeof(uint32_t));
 		}
 	} else {
 		if (options.use_cpu_maps)
 			set_to_cpu_domain(dst, 1);
 
 		cpucpy2d(tmp_tile, options.tile_size, 0, 0,
-			 dst->data, dst->surface[0].stride/sizeof(uint32_t),
+			 dst->ptr, dst->surface[0].stride/sizeof(uint32_t),
 			 dst_x, dst_y,
 			 logical_tile_no);
 	}
 }
 
-static void blitter_copyfunc(struct igt_buf *src, unsigned src_x, unsigned src_y,
-			     struct igt_buf *dst, unsigned dst_x, unsigned dst_y,
+static void blitter_copyfunc(struct intel_buf *src, unsigned src_x, unsigned src_y,
+			     struct intel_buf *dst, unsigned dst_x, unsigned dst_y,
 			     unsigned logical_tile_no)
 {
 	static unsigned keep_gpu_busy_counter = 0;
@@ -331,9 +339,9 @@ static void blitter_copyfunc(struct igt_buf *src, unsigned src_x, unsigned src_y
 	if (keep_gpu_busy_counter & 1 && !fence_storm)
 		keep_gpu_busy();
 
-	emit_blt(src->bo, src->tiling, src->surface[0].stride, src_x, src_y,
+	emit_blt(src, src->tiling, src->surface[0].stride, src_x, src_y,
 		 options.tile_size, options.tile_size,
-		 dst->bo, dst->tiling, dst->surface[0].stride, dst_x, dst_y);
+		 dst, dst->tiling, dst->surface[0].stride, dst_x, dst_y);
 
 	if (!(keep_gpu_busy_counter & 1) && !fence_storm)
 		keep_gpu_busy();
@@ -347,12 +355,12 @@ static void blitter_copyfunc(struct igt_buf *src, unsigned src_x, unsigned src_y
 
 	if (fence_storm <= 1) {
 		fence_storm = 0;
-		intel_batchbuffer_flush(batch);
+		intel_bb_flush_blit(ibb);
 	}
 }
 
-static void render_copyfunc(struct igt_buf *src, unsigned src_x, unsigned src_y,
-			    struct igt_buf *dst, unsigned dst_x, unsigned dst_y,
+static void render_copyfunc(struct intel_buf *src, unsigned src_x, unsigned src_y,
+			    struct intel_buf *dst, unsigned dst_x, unsigned dst_y,
 			    unsigned logical_tile_no)
 {
 	static unsigned keep_gpu_busy_counter = 0;
@@ -367,8 +375,9 @@ static void render_copyfunc(struct igt_buf *src, unsigned src_x, unsigned src_y,
 		 * Flush outstanding blts so that they don't end up on
 		 * the render ring when that's not allowed (gen6+).
 		 */
-		intel_batchbuffer_flush(batch);
-		rendercopy(batch, NULL, src, src_x, src_y,
+		intel_bb_flush_blit(ibb);
+
+		rendercopy(ibb, 0, src, src_x, src_y,
 		     options.tile_size, options.tile_size,
 		     dst, dst_x, dst_y);
 	} else
@@ -379,7 +388,7 @@ static void render_copyfunc(struct igt_buf *src, unsigned src_x, unsigned src_y,
 		keep_gpu_busy();
 
 	keep_gpu_busy_counter++;
-	intel_batchbuffer_flush(batch);
+	intel_bb_flush_blit(ibb);
 }
 
 static void next_copyfunc(int tile)
@@ -444,7 +453,7 @@ static void fan_out(void)
 			set_to_cpu_domain(&buffers[current_set][buf_idx], 1);
 
 		cpucpy2d(tmp_tile, options.tile_size, 0, 0,
-			 buffers[current_set][buf_idx].data,
+			 buffers[current_set][buf_idx].ptr,
 			 buffers[current_set][buf_idx].surface[0].stride / sizeof(uint32_t),
 			 x, y, i);
 	}
@@ -468,7 +477,7 @@ static void fan_in_and_check(void)
 		if (options.use_cpu_maps)
 			set_to_cpu_domain(&buffers[current_set][buf_idx], 0);
 
-		cpucpy2d(buffers[current_set][buf_idx].data,
+		cpucpy2d(buffers[current_set][buf_idx].ptr,
 			 buffers[current_set][buf_idx].surface[0].stride / sizeof(uint32_t),
 			 x, y,
 			 tmp_tile, options.tile_size, 0, 0,
@@ -476,61 +485,59 @@ static void fan_in_and_check(void)
 	}
 }
 
-static void sanitize_stride(struct igt_buf *buf)
+static void sanitize_stride(struct intel_buf *buf)
 {
 
-	if (igt_buf_height(buf) > options.max_dimension)
+	if (intel_buf_height(buf) > options.max_dimension)
 		buf->surface[0].stride = buf->surface[0].size / options.max_dimension;
 
-	if (igt_buf_height(buf) < options.tile_size)
+	if (intel_buf_height(buf) < options.tile_size)
 		buf->surface[0].stride = buf->surface[0].size / options.tile_size;
 
-	if (igt_buf_width(buf) < options.tile_size)
+	if (intel_buf_width(buf) < options.tile_size)
 		buf->surface[0].stride = options.tile_size * sizeof(uint32_t);
 
 	igt_assert(buf->surface[0].stride <= 8192);
-	igt_assert(igt_buf_width(buf) <= options.max_dimension);
-	igt_assert(igt_buf_height(buf) <= options.max_dimension);
+	igt_assert(intel_buf_width(buf) <= options.max_dimension);
+	igt_assert(intel_buf_height(buf) <= options.max_dimension);
 
-	igt_assert(igt_buf_width(buf) >= options.tile_size);
-	igt_assert(igt_buf_height(buf) >= options.tile_size);
+	igt_assert(intel_buf_width(buf) >= options.tile_size);
+	igt_assert(intel_buf_height(buf) >= options.tile_size);
 
 }
 
-static void init_buffer(struct igt_buf *buf, unsigned size)
+static void init_buffer(struct intel_buf *buf, unsigned size)
 {
-	memset(buf, 0, sizeof(*buf));
+	uint32_t stride, width, height, bpp;
+
+	stride = 4096;
+	bpp = 32;
+	width = stride / (bpp / 8);
+	height = size / stride;
 
-	buf->bo = drm_intel_bo_alloc(bufmgr, "tiled bo", size, 4096);
-	buf->surface[0].size = size;
-	igt_assert(buf->bo);
-	buf->tiling = I915_TILING_NONE;
-	buf->surface[0].stride = 4096;
-	buf->bpp = 32;
+	intel_buf_init(bops, buf, width, height, bpp, 0,
+		       I915_TILING_NONE, I915_COMPRESSION_NONE);
 
 	sanitize_stride(buf);
 
 	if (options.no_hw)
-		buf->data = malloc(size);
+		buf->ptr = malloc(size);
 	else {
 		if (options.use_cpu_maps)
-			drm_intel_bo_map(buf->bo, 1);
+			intel_buf_cpu_map(buf, 1);
 		else
-			drm_intel_gem_bo_map_gtt(buf->bo);
-		buf->data = buf->bo->virtual;
+			intel_buf_device_map(buf, 1);
 	}
-
-	buf->num_tiles = options.tiles_per_buf;
 }
 
 static void exchange_buf(void *array, unsigned i, unsigned j)
 {
-	struct igt_buf *buf_arr, tmp;
+	struct intel_buf *buf_arr, tmp;
 	buf_arr = array;
 
-	memcpy(&tmp, &buf_arr[i], sizeof(struct igt_buf));
-	memcpy(&buf_arr[i], &buf_arr[j], sizeof(struct igt_buf));
-	memcpy(&buf_arr[j], &tmp, sizeof(struct igt_buf));
+	memcpy(&tmp, &buf_arr[i], sizeof(struct intel_buf));
+	memcpy(&buf_arr[i], &buf_arr[j], sizeof(struct intel_buf));
+	memcpy(&buf_arr[j], &tmp, sizeof(struct intel_buf));
 }
 
 
@@ -577,7 +584,7 @@ static void init_set(unsigned set)
 
 		sanitize_stride(&buffers[set][i]);
 
-		gem_set_tiling(drm_fd, buffers[set][i].bo->handle,
+		gem_set_tiling(drm_fd, buffers[set][i].handle,
 			       buffers[set][i].tiling,
 			       buffers[set][i].surface[0].stride);
 
@@ -598,8 +605,9 @@ static void copy_tiles(unsigned *permutation)
 {
 	unsigned src_tile, src_buf_idx, src_x, src_y;
 	unsigned dst_tile, dst_buf_idx, dst_x, dst_y;
-	struct igt_buf *src_buf, *dst_buf;
+	struct intel_buf *src_buf, *dst_buf;
 	int i, idx;
+
 	for (i = 0; i < num_total_tiles; i++) {
 		/* tile_permutation is independent of current_permutation, so
 		 * abuse it to randomize the order of the src bos */
@@ -620,10 +628,10 @@ static void copy_tiles(unsigned *permutation)
 			igt_info("copying tile %i from %i (%i, %i) to %i (%i, %i)", i, tile_permutation[i], src_buf_idx, src_tile, permutation[idx], dst_buf_idx, dst_tile);
 
 		if (options.no_hw) {
-			cpucpy2d(src_buf->data,
+			cpucpy2d(src_buf->ptr,
 				 src_buf->surface[0].stride / sizeof(uint32_t),
 				 src_x, src_y,
-				 dst_buf->data,
+				 dst_buf->ptr,
 				 dst_buf->surface[0].stride / sizeof(uint32_t),
 				 dst_x, dst_y,
 				 i);
@@ -635,7 +643,7 @@ static void copy_tiles(unsigned *permutation)
 		}
 	}
 
-	intel_batchbuffer_flush(batch);
+	intel_bb_flush_blit(ibb);
 }
 
 static void sanitize_tiles_per_buf(void)
@@ -757,6 +765,7 @@ static void init(void)
 {
 	int i;
 	unsigned tmp;
+	uint32_t stride, width, height, bpp;
 
 	if (options.num_buffers == 0) {
 		tmp = gem_aperture_size(drm_fd);
@@ -767,22 +776,25 @@ static void init(void)
 	} else
 		num_buffers = options.num_buffers;
 
-	bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
-	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
-	drm_intel_bufmgr_gem_enable_fenced_relocs(bufmgr);
 	num_fences = gem_available_fences(drm_fd);
 	igt_assert_lt(4, num_fences);
-	batch = intel_batchbuffer_alloc(bufmgr, devid);
 
-	busy_bo = drm_intel_bo_alloc(bufmgr, "tiled bo", BUSY_BUF_SIZE, 4096);
-	if (options.forced_tiling >= 0)
-		gem_set_tiling(drm_fd, busy_bo->handle, options.forced_tiling, 4096);
+	bops = buf_ops_create(drm_fd);
+	ibb = intel_bb_create(drm_fd, 4096);
+
+	stride = 4096;
+	bpp = 32;
+	width = stride / (bpp / 8);
+	height = BUSY_BUF_SIZE / stride;
+	intel_buf_init(bops, &busy_bo,
+		       width, height, bpp, 0, options.forced_tiling,
+		       I915_COMPRESSION_NONE);
 
 	for (i = 0; i < num_buffers; i++) {
 		init_buffer(&buffers[0][i], options.scratch_buf_size);
 		init_buffer(&buffers[1][i], options.scratch_buf_size);
 
-		num_total_tiles += buffers[0][i].num_tiles;
+		num_total_tiles += options.tiles_per_buf;
 	}
 	current_set = 0;
 
@@ -792,7 +804,7 @@ static void init(void)
 
 static void check_render_copyfunc(void)
 {
-	struct igt_buf src, dst;
+	struct intel_buf src, dst;
 	uint32_t *ptr;
 	int i, j, pass;
 
@@ -803,17 +815,18 @@ static void check_render_copyfunc(void)
 	init_buffer(&dst, options.scratch_buf_size);
 
 	for (pass = 0; pass < 16; pass++) {
-		int sx = random() % (igt_buf_width(&src)-options.tile_size);
-		int sy = random() % (igt_buf_height(&src)-options.tile_size);
-		int dx = random() % (igt_buf_width(&dst)-options.tile_size);
-		int dy = random() % (igt_buf_height(&dst)-options.tile_size);
+		int sx = random() % (intel_buf_width(&src)-options.tile_size);
+		int sy = random() % (intel_buf_height(&src)-options.tile_size);
+		int dx = random() % (intel_buf_width(&dst)-options.tile_size);
+		int dy = random() % (intel_buf_height(&dst)-options.tile_size);
 
 		if (options.use_cpu_maps)
 			set_to_cpu_domain(&src, 1);
 
-		memset(src.data, 0xff, options.scratch_buf_size);
+		memset(src.ptr, 0xff, options.scratch_buf_size);
 		for (j = 0; j < options.tile_size; j++) {
-			ptr = (uint32_t*)((char *)src.data + sx*4 + (sy+j) * src.surface[0].stride);
+			ptr = (uint32_t*)((char *)src.ptr + sx*4 +
+					  (sy+j) * src.surface[0].stride);
 			for (i = 0; i < options.tile_size; i++)
 				ptr[i] = j * options.tile_size + i;
 		}
@@ -824,7 +837,8 @@ static void check_render_copyfunc(void)
 			set_to_cpu_domain(&dst, 0);
 
 		for (j = 0; j < options.tile_size; j++) {
-			ptr = (uint32_t*)((char *)dst.data + dx*4 + (dy+j) * dst.surface[0].stride);
+			ptr = (uint32_t*)((char *)dst.ptr + dx*4 +
+					  (dy+j) * dst.surface[0].stride);
 			for (i = 0; i < options.tile_size; i++)
 				if (ptr[i] != j * options.tile_size + i) {
 					igt_info("render copyfunc mismatch at (%d, %d): found %d, expected %d\n", i, j, ptr[i], j * options.tile_size + i);
@@ -909,8 +923,8 @@ igt_simple_main_args("ds:g:c:t:rbuxmo:fp:",
 
 	igt_info("num failed tiles %u, max incoherent bytes %zd\n", stats.num_failed, stats.max_failed_reads * sizeof(uint32_t));
 
-	intel_batchbuffer_free(batch);
-	drm_intel_bufmgr_destroy(bufmgr);
+	intel_bb_destroy(ibb);
+	buf_ops_destroy(bops);
 
 	close(drm_fd);
 
diff --git a/tests/kms_big_fb.c b/tests/kms_big_fb.c
index a754b299..a9907ea3 100644
--- a/tests/kms_big_fb.c
+++ b/tests/kms_big_fb.c
@@ -46,28 +46,36 @@ typedef struct {
 	int big_fb_width, big_fb_height;
 	uint64_t ram_size, aper_size, mappable_size;
 	igt_render_copyfunc_t render_copy;
-	drm_intel_bufmgr *bufmgr;
-	struct intel_batchbuffer *batch;
+	struct buf_ops *bops;
+	struct intel_bb *ibb;
 } data_t;
 
 static void init_buf(data_t *data,
-		     struct igt_buf *buf,
+		     struct intel_buf *buf,
 		     const struct igt_fb *fb,
-		     const char *name)
+		     const char *buf_name)
 {
+	uint32_t name, handle, tiling, stride, width, height, bpp, size;
+
 	igt_assert_eq(fb->offsets[0], 0);
 
-	buf->bo = gem_handle_to_libdrm_bo(data->bufmgr, data->drm_fd,
-					  name, fb->gem_handle);
-	buf->tiling = igt_fb_mod_to_tiling(fb->modifier);
-	buf->surface[0].stride = fb->strides[0];
-	buf->bpp = fb->plane_bpp[0];
-	buf->surface[0].size = fb->size;
+	tiling = igt_fb_mod_to_tiling(fb->modifier);
+	stride = fb->strides[0];
+	bpp = fb->plane_bpp[0];
+	size = fb->size;
+	width = stride / (bpp / 8);
+	height = size / stride;
+
+	name = gem_flink(data->drm_fd, fb->gem_handle);
+	handle = gem_open(data->drm_fd, name);
+	intel_buf_init_using_handle(data->bops, handle, buf, width, height, bpp,
+				    0, tiling, 0);
+	intel_buf_set_name(buf, buf_name);
 }
 
-static void fini_buf(struct igt_buf *buf)
+static void fini_buf(struct intel_buf *buf)
 {
-	drm_intel_bo_unreference(buf->bo);
+	intel_buf_close(buf->bops, buf);
 }
 
 static void copy_pattern(data_t *data,
@@ -75,7 +83,7 @@ static void copy_pattern(data_t *data,
 			 struct igt_fb *src_fb, int sx, int sy,
 			 int w, int h)
 {
-	struct igt_buf src = {}, dst = {};
+	struct intel_buf src = {}, dst = {};
 
 	init_buf(data, &src, src_fb, "big fb src");
 	init_buf(data, &dst, dst_fb, "big fb dst");
@@ -91,7 +99,7 @@ static void copy_pattern(data_t *data,
 	 * rendered with the blitter/render engine.
 	 */
 	if (data->render_copy) {
-		data->render_copy(data->batch, NULL, &src, sx, sy, w, h, &dst, dx, dy);
+		data->render_copy(data->ibb, 0, &src, sx, sy, w, h, &dst, dx, dy);
 	} else {
 		w = min(w, src_fb->width - sx);
 		w = min(w, dst_fb->width - dx);
@@ -99,14 +107,16 @@ static void copy_pattern(data_t *data,
 		h = min(h, src_fb->height - sy);
 		h = min(h, dst_fb->height - dy);
 
-		intel_blt_copy(data->batch, src.bo, sx, sy,
-			       src.surface[0].stride,
-			       dst.bo, dx, dy, dst.surface[0].stride, w, h,
-			       dst.bpp);
+		intel_bb_blt_copy(data->ibb, &src, sx, sy, src.surface[0].stride,
+				  &dst, dx, dy, dst.surface[0].stride, w, h, dst.bpp);
 	}
 
 	fini_buf(&dst);
 	fini_buf(&src);
+
+	/* intel_bb cache doesn't know when objects dissappear, so
+	 * let's purge the cache */
+	intel_bb_reset(data->ibb, true);
 }
 
 static void generate_pattern(data_t *data,
@@ -648,8 +658,8 @@ igt_main
 		if (intel_gen(data.devid) >= 4)
 			data.render_copy = igt_get_render_copyfunc(data.devid);
 
-		data.bufmgr = drm_intel_bufmgr_gem_init(data.drm_fd, 4096);
-		data.batch = intel_batchbuffer_alloc(data.bufmgr, data.devid);
+		data.bops = buf_ops_create(data.drm_fd);
+		data.ibb = intel_bb_create(data.drm_fd, 4096);
 	}
 
 	/*
@@ -708,7 +718,7 @@ igt_main
 	igt_fixture {
 		igt_display_fini(&data.display);
 
-		intel_batchbuffer_free(data.batch);
-		drm_intel_bufmgr_destroy(data.bufmgr);
+		intel_bb_destroy(data.ibb);
+		buf_ops_destroy(data.bops);
 	}
 }
diff --git a/tests/kms_cursor_crc.c b/tests/kms_cursor_crc.c
index e9491847..be37a75d 100644
--- a/tests/kms_cursor_crc.c
+++ b/tests/kms_cursor_crc.c
@@ -64,11 +64,11 @@ typedef struct {
 	igt_plane_t *cursor;
 	cairo_surface_t *surface;
 	uint32_t devid;
-	drm_intel_bufmgr *bufmgr;
+	struct buf_ops *bops;
 	igt_render_copyfunc_t rendercopy;
-	drm_intel_bo * drmibo[2];
-	struct intel_batchbuffer *batch;
-	struct igt_buf igtbo[2];
+	uint32_t drm_handle[2];
+	struct intel_bb *ibb;
+	struct intel_buf igtbo[2];
 
 } data_t;
 
@@ -155,7 +155,7 @@ static void restore_image(data_t *data)
 
 	if (data->rendercopy != NULL) {
 		/* use rendercopy if available */
-		data->rendercopy(data->batch, NULL,
+		data->rendercopy(data->ibb, 0,
 				 &data->igtbo[RESTOREBUFFER], 0, 0,
 				 data->primary_fb[RESTOREBUFFER].width,
 				 data->primary_fb[RESTOREBUFFER].height,
@@ -381,16 +381,25 @@ static void cleanup_crtc(data_t *data)
 	igt_remove_fb(data->drm_fd, &data->primary_fb[FRONTBUFFER]);
 	igt_remove_fb(data->drm_fd, &data->primary_fb[RESTOREBUFFER]);
 
+	intel_bb_destroy(data->ibb);
+
 	igt_display_reset(display);
 }
 
 static void scratch_buf_init(data_t *data, int buffer)
 {
-	data->igtbo[buffer].bo = data->drmibo[buffer];
-	data->igtbo[buffer].surface[0].stride = data->primary_fb[buffer].strides[0];
-	data->igtbo[buffer].tiling = data->primary_fb[buffer].modifier;
-	data->igtbo[buffer].surface[0].size = data->primary_fb[buffer].size;
-	data->igtbo[buffer].bpp = data->primary_fb[buffer].plane_bpp[0];
+	uint32_t handle, tiling, stride, width, height, bpp, size;
+
+	handle = data->drm_handle[buffer];
+	size = data->primary_fb[buffer].size;
+	tiling = data->primary_fb[buffer].modifier;
+	stride = data->primary_fb[buffer].strides[0];
+	bpp = data->primary_fb[buffer].plane_bpp[0];
+	width = stride / (bpp / 8);
+	height = size / stride;
+
+	intel_buf_init_using_handle(data->bops, handle, &data->igtbo[buffer],
+				    width, height, bpp, 0, tiling, 0);
 }
 
 static void prepare_crtc(data_t *data, igt_output_t *output,
@@ -450,6 +459,8 @@ static void prepare_crtc(data_t *data, igt_output_t *output,
 		igt_paint_test_pattern(cr, data->screenw, data->screenh);
 		cairo_destroy(cr);
 	} else {
+		uint32_t name;
+
 		/* store test image as fb if rendercopy is available */
 		cr = igt_get_cairo_ctx(data->drm_fd,
 		                       &data->primary_fb[RESTOREBUFFER]);
@@ -457,22 +468,20 @@ static void prepare_crtc(data_t *data, igt_output_t *output,
 		igt_paint_test_pattern(cr, data->screenw, data->screenh);
 		igt_put_cairo_ctx(cr);
 
-		data->drmibo[FRONTBUFFER] = gem_handle_to_libdrm_bo(data->bufmgr,
-								    data->drm_fd,
-								    "", data->primary_fb[FRONTBUFFER].gem_handle);
-		igt_assert(data->drmibo[FRONTBUFFER]);
+		name = gem_flink(data->drm_fd,
+				 data->primary_fb[FRONTBUFFER].gem_handle);
+		data->drm_handle[FRONTBUFFER] = gem_open(data->drm_fd, name);
+		igt_assert(data->drm_handle[FRONTBUFFER]);
 
-		data->drmibo[RESTOREBUFFER] = gem_handle_to_libdrm_bo(data->bufmgr,
-								      data->drm_fd,
-								      "", data->primary_fb[RESTOREBUFFER].gem_handle);
-		igt_assert(data->drmibo[RESTOREBUFFER]);
+		name = gem_flink(data->drm_fd,
+				 data->primary_fb[RESTOREBUFFER].gem_handle);
+		data->drm_handle[RESTOREBUFFER] = gem_open(data->drm_fd, name);
+		igt_assert(data->drm_handle[RESTOREBUFFER]);
 
 		scratch_buf_init(data, RESTOREBUFFER);
 		scratch_buf_init(data, FRONTBUFFER);
 
-		data->batch = intel_batchbuffer_alloc(data->bufmgr,
-						      data->devid);
-		igt_assert(data->batch);
+		data->ibb = intel_bb_create(data->drm_fd, 4096);
 	}
 
 	igt_pipe_crc_start(data->pipe_crc);
@@ -832,10 +841,7 @@ igt_main
 		igt_display_require(&data.display, data.drm_fd);
 
 		if (is_i915_device(data.drm_fd)) {
-			data.bufmgr = drm_intel_bufmgr_gem_init(data.drm_fd, 4096);
-			igt_assert(data.bufmgr);
-			drm_intel_bufmgr_gem_enable_reuse(data.bufmgr);
-
+			data.bops = buf_ops_create(data.drm_fd);
 			data.devid = intel_get_drm_devid(data.drm_fd);
 			data.rendercopy = igt_get_render_copyfunc(data.devid);
 		}
@@ -854,12 +860,7 @@ igt_main
 			igt_pipe_crc_stop(data.pipe_crc);
 			igt_pipe_crc_free(data.pipe_crc);
 		}
-
-		if (data.bufmgr != NULL) {
-			intel_batchbuffer_free(data.batch);
-			drm_intel_bufmgr_destroy(data.bufmgr);
-		}
-
+		buf_ops_destroy(data.bops);
 		igt_display_fini(&data.display);
 	}
 }
-- 
2.26.0

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

  parent reply	other threads:[~2020-08-02 16:30 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-02 16:29 [igt-dev] [PATCH i-g-t v23 00/18] Remove libdrm in rendercopy Zbigniew Kempczyński
2020-08-02 16:29 ` [igt-dev] [PATCH i-g-t v23 01/18] lib/intel_bufops: add mapping on cpu / device Zbigniew Kempczyński
2020-08-02 16:29 ` [igt-dev] [PATCH i-g-t v23 02/18] lib/intel_bufops: change in hw/sw tiling detection Zbigniew Kempczyński
2020-08-02 16:29 ` [igt-dev] [PATCH i-g-t v23 03/18] lib/intel_bufops: change stride requirements for Grantsdale Zbigniew Kempczyński
2020-08-02 16:29 ` [igt-dev] [PATCH i-g-t v23 04/18] lib/intel_batchbuffer: add new functions to support rendercopy Zbigniew Kempczyński
2020-08-02 16:29 ` [igt-dev] [PATCH i-g-t v23 05/18] lib/intel_batchbuffer: dump bb to base64 Zbigniew Kempczyński
2020-08-02 16:29 ` [igt-dev] [PATCH i-g-t v23 06/18] lib/intel_batchbuffer: use canonical addresses for 48bit ppgtt Zbigniew Kempczyński
2020-08-02 16:29 ` [igt-dev] [PATCH i-g-t v23 07/18] tests/api_intel_bb: test flags are cleared on bb reset Zbigniew Kempczyński
2020-08-02 16:29 ` [igt-dev] [PATCH i-g-t v23 08/18] tests/gem_caching|partial: adopt to batch flush function cleanup Zbigniew Kempczyński
2020-08-02 16:29 ` [igt-dev] [PATCH i-g-t v23 09/18] lib/rendercopy: remove libdrm dependency Zbigniew Kempczyński
2020-08-02 16:29 ` [igt-dev] [PATCH i-g-t v23 10/18] tests/api_intel_bb: add render tests Zbigniew Kempczyński
2020-08-02 16:29 ` [igt-dev] [PATCH i-g-t v23 11/18] lib/igt_draw: remove libdrm dependency Zbigniew Kempczyński
2020-08-02 16:29 ` [igt-dev] [PATCH i-g-t v23 12/18] lib/igt_fb: Removal of " Zbigniew Kempczyński
2020-08-02 16:29 ` [igt-dev] [PATCH i-g-t v23 13/18] tests/gem|kms: remove libdrm dependency (batch 1) Zbigniew Kempczyński
2020-08-02 16:29 ` Zbigniew Kempczyński [this message]
2020-08-02 16:29 ` [igt-dev] [PATCH i-g-t v23 15/18] tools/intel_residency: adopt intel_residency to use bufops Zbigniew Kempczyński
2020-08-02 16:30 ` [igt-dev] [PATCH i-g-t v23 16/18] tests/perf: remove libdrm dependency for rendercopy Zbigniew Kempczyński
2020-08-02 16:30 ` [igt-dev] [PATCH i-g-t v23 17/18] fix: lib/intel_bufops: add 64bit bpp Zbigniew Kempczyński
2020-08-02 16:30 ` [igt-dev] [PATCH i-g-t v23 18/18] tests/kms_psr: trying to fix blt Zbigniew Kempczyński
2020-08-02 17:09 ` [igt-dev] ✓ Fi.CI.BAT: success for Remove libdrm in rendercopy (rev22) Patchwork
2020-08-02 20:36 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200802163002.5815-15-zbigniew.kempczynski@intel.com \
    --to=zbigniew.kempczynski@intel.com \
    --cc=chris@chris-wilson.co.uk \
    --cc=igt-dev@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.