All of lore.kernel.org
 help / color / mirror / Atom feed
From: Thomas Hellstrom <thellstrom@vmware.com>
To: dri-devel@lists.freedesktop.org
Cc: Thomas Hellstrom <thellstrom@vmware.com>,
	linux-graphics-maintainer@vmware.com
Subject: [PATCH -next 02/15] drm/vmwgfx: Move buffer object related code to vmwgfx_bo.c
Date: Tue,  3 Jul 2018 21:14:47 +0200	[thread overview]
Message-ID: <20180703191500.2374-3-thellstrom@vmware.com> (raw)
In-Reply-To: <20180703191500.2374-1-thellstrom@vmware.com>

It makes more sense to have all the buffer object related code in
a single file rather than splitting it up between the resource code
and buffer object pinning utilities.

Place all buffer object related code in vmwgfx_bo.c. Fix up headers
and export resource functionality when needed in the buffer object
code.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Brian Paul <brianp@vmware.com>
Reviewed-by: Sinclair Yeh <syeh@vmware.com>
Reviewed-by: Deepak Rawat <drawat@vmware.com>
---
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c         | 816 ++++++++++++++++++++-
 drivers/gpu/drm/vmwgfx/vmwgfx_context.c    |   4 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c    |   4 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h        |  73 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c         |   2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c        |   2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c        |   6 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c   | 640 +---------------
 drivers/gpu/drm/vmwgfx/vmwgfx_shader.c     |   4 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c    |   4 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c |   4 +-
 11 files changed, 851 insertions(+), 708 deletions(-)

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index f26f658cccdb..d950244798fe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,6 +29,51 @@
 
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
+#include "drm/ttm/ttm_object.h"
+
+
+/**
+ * struct vmw_user_buffer_object - User-space-visible buffer object
+ *
+ * @prime: The prime object providing user visibility.
+ * @vbo: The struct vmw_buffer_object
+ */
+struct vmw_user_buffer_object {
+	struct ttm_prime_object prime;
+	struct vmw_buffer_object vbo;
+};
+
+
+/**
+ * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
+ * vmw_buffer_object.
+ *
+ * @bo: Pointer to the TTM buffer object.
+ * Return: Pointer to the struct vmw_buffer_object embedding the
+ * TTM buffer object.
+ */
+static struct vmw_buffer_object *
+vmw_buffer_object(struct ttm_buffer_object *bo)
+{
+	return container_of(bo, struct vmw_buffer_object, base);
+}
+
+
+/**
+ * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
+ * vmw_user_buffer_object.
+ *
+ * @bo: Pointer to the TTM buffer object.
+ * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
+ * object.
+ */
+static struct vmw_user_buffer_object *
+vmw_user_buffer_object(struct ttm_buffer_object *bo)
+{
+	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+
+	return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
+}
 
 
 /**
@@ -38,9 +83,8 @@
  * @buf:  DMA buffer to move.
  * @placement:  The placement to pin it.
  * @interruptible:  Use interruptible wait.
- *
- * Returns
- *  -ERESTARTSYS if interrupted by a signal.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
  */
 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
 			    struct vmw_buffer_object *buf,
@@ -78,6 +122,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
 	return ret;
 }
 
+
 /**
  * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
  *
@@ -88,9 +133,8 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
  * @buf:  DMA buffer to move.
  * @pin:  Pin buffer if true.
  * @interruptible:  Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
  */
 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
 			      struct vmw_buffer_object *buf,
@@ -133,6 +177,7 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
 	return ret;
 }
 
+
 /**
  * vmw_bo_pin_in_vram - Move a buffer to vram.
  *
@@ -142,9 +187,8 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
  * @dev_priv:  Driver private.
  * @buf:  DMA buffer to move.
  * @interruptible:  Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
  */
 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
 		       struct vmw_buffer_object *buf,
@@ -154,6 +198,7 @@ int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
 				       interruptible);
 }
 
+
 /**
  * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
  *
@@ -163,9 +208,8 @@ int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
  * @dev_priv:  Driver private.
  * @buf:  DMA buffer to pin.
  * @interruptible:  Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
  */
 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
 				struct vmw_buffer_object *buf,
@@ -225,6 +269,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
 	return ret;
 }
 
+
 /**
  * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
  *
@@ -233,9 +278,8 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
  * @dev_priv:  Driver private.
  * @buf:  DMA buffer to unpin.
  * @interruptible:  Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
  */
 int vmw_bo_unpin(struct vmw_private *dev_priv,
 		 struct vmw_buffer_object *buf,
@@ -325,25 +369,8 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
 }
 
 
-/*
- * vmw_buffer_object_unmap - Tear down a cached buffer object map.
- *
- * @vbo: The buffer object whose map we are tearing down.
- *
- * This function tears down a cached map set up using
- * vmw_buffer_object_map_and_cache().
- */
-void vmw_buffer_object_unmap(struct vmw_buffer_object *vbo)
-{
-	if (vbo->map.bo == NULL)
-		return;
-
-	ttm_bo_kunmap(&vbo->map);
-}
-
-
-/*
- * vmw_buffer_object_map_and_cache - Map a buffer object and cache the map
+/**
+ * vmw_bo_map_and_cache - Map a buffer object and cache the map
  *
  * @vbo: The buffer object to map
  * Return: A kernel virtual address or NULL if mapping failed.
@@ -357,7 +384,7 @@ void vmw_buffer_object_unmap(struct vmw_buffer_object *vbo)
  * 3) Buffer object destruction
  *
  */
-void *vmw_buffer_object_map_and_cache(struct vmw_buffer_object *vbo)
+void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
 {
 	struct ttm_buffer_object *bo = &vbo->base;
 	bool not_used;
@@ -374,3 +401,720 @@ void *vmw_buffer_object_map_and_cache(struct vmw_buffer_object *vbo)
 
 	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
 }
+
+
+/**
+ * vmw_bo_unmap - Tear down a cached buffer object map.
+ *
+ * @vbo: The buffer object whose map we are tearing down.
+ *
+ * This function tears down a cached map set up using
+ * vmw_buffer_object_map_and_cache().
+ */
+void vmw_bo_unmap(struct vmw_buffer_object *vbo)
+{
+	if (vbo->map.bo == NULL)
+		return;
+
+	ttm_bo_kunmap(&vbo->map);
+}
+
+
+/**
+ * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
+ *
+ * @dev_priv: Pointer to a struct vmw_private identifying the device.
+ * @size: The requested buffer size.
+ * @user: Whether this is an ordinary dma buffer or a user dma buffer.
+ */
+static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
+			      bool user)
+{
+	static size_t struct_size, user_struct_size;
+	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
+
+	if (unlikely(struct_size == 0)) {
+		size_t backend_size = ttm_round_pot(vmw_tt_size);
+
+		struct_size = backend_size +
+			ttm_round_pot(sizeof(struct vmw_buffer_object));
+		user_struct_size = backend_size +
+			ttm_round_pot(sizeof(struct vmw_user_buffer_object));
+	}
+
+	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+		page_array_size +=
+			ttm_round_pot(num_pages * sizeof(dma_addr_t));
+
+	return ((user) ? user_struct_size : struct_size) +
+		page_array_size;
+}
+
+
+/**
+ * vmw_bo_bo_free - vmw buffer object destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+void vmw_bo_bo_free(struct ttm_buffer_object *bo)
+{
+	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+
+	vmw_bo_unmap(vmw_bo);
+	kfree(vmw_bo);
+}
+
+
+/**
+ * vmw_user_bo_destroy - vmw buffer object destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
+{
+	struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
+
+	vmw_bo_unmap(&vmw_user_bo->vbo);
+	ttm_prime_object_kfree(vmw_user_bo, prime);
+}
+
+
+/**
+ * vmw_bo_init - Initialize a vmw buffer object
+ *
+ * @dev_priv: Pointer to the device private struct
+ * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
+ * @size: Buffer object size in bytes.
+ * @placement: Initial placement.
+ * @interruptible: Whether waits should be performed interruptible.
+ * @bo_free: The buffer object destructor.
+ * Returns: Zero on success, negative error code on error.
+ *
+ * Note that on error, the code will free the buffer object.
+ */
+int vmw_bo_init(struct vmw_private *dev_priv,
+		struct vmw_buffer_object *vmw_bo,
+		size_t size, struct ttm_placement *placement,
+		bool interruptible,
+		void (*bo_free)(struct ttm_buffer_object *bo))
+{
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+	size_t acc_size;
+	int ret;
+	bool user = (bo_free == &vmw_user_bo_destroy);
+
+	WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
+
+	acc_size = vmw_bo_acc_size(dev_priv, size, user);
+	memset(vmw_bo, 0, sizeof(*vmw_bo));
+
+	INIT_LIST_HEAD(&vmw_bo->res_list);
+
+	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
+			  ttm_bo_type_device, placement,
+			  0, interruptible, acc_size,
+			  NULL, NULL, bo_free);
+	return ret;
+}
+
+
+/**
+ * vmw_user_bo_release - TTM reference base object release callback for
+ * vmw user buffer objects
+ *
+ * @p_base: The TTM base object pointer about to be unreferenced.
+ *
+ * Clears the TTM base object pointer and drops the reference the
+ * base object has on the underlying struct vmw_buffer_object.
+ */
+static void vmw_user_bo_release(struct ttm_base_object **p_base)
+{
+	struct vmw_user_buffer_object *vmw_user_bo;
+	struct ttm_base_object *base = *p_base;
+	struct ttm_buffer_object *bo;
+
+	*p_base = NULL;
+
+	if (unlikely(base == NULL))
+		return;
+
+	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+				   prime.base);
+	bo = &vmw_user_bo->vbo.base;
+	ttm_bo_unref(&bo);
+}
+
+
+/**
+ * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
+ * for vmw user buffer objects
+ *
+ * @base: Pointer to the TTM base object
+ * @ref_type: Reference type of the reference reaching zero.
+ *
+ * Called when user-space drops its last synccpu reference on the buffer
+ * object, Either explicitly or as part of a cleanup file close.
+ */
+static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
+					enum ttm_ref_type ref_type)
+{
+	struct vmw_user_buffer_object *user_bo;
+
+	user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
+
+	switch (ref_type) {
+	case TTM_REF_SYNCCPU_WRITE:
+		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+		break;
+	default:
+		WARN_ONCE(true, "Undefined buffer object reference release.\n");
+	}
+}
+
+
+/**
+ * vmw_user_bo_alloc - Allocate a user buffer object
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the buffer object.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
+ * should be assigned.
+ * Return: Zero on success, negative error code on error.
+ */
+int vmw_user_bo_alloc(struct vmw_private *dev_priv,
+		      struct ttm_object_file *tfile,
+		      uint32_t size,
+		      bool shareable,
+		      uint32_t *handle,
+		      struct vmw_buffer_object **p_vbo,
+		      struct ttm_base_object **p_base)
+{
+	struct vmw_user_buffer_object *user_bo;
+	struct ttm_buffer_object *tmp;
+	int ret;
+
+	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+	if (unlikely(!user_bo)) {
+		DRM_ERROR("Failed to allocate a buffer.\n");
+		return -ENOMEM;
+	}
+
+	ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
+			  (dev_priv->has_mob) ?
+			  &vmw_sys_placement :
+			  &vmw_vram_sys_placement, true,
+			  &vmw_user_bo_destroy);
+	if (unlikely(ret != 0))
+		return ret;
+
+	tmp = ttm_bo_reference(&user_bo->vbo.base);
+	ret = ttm_prime_object_init(tfile,
+				    size,
+				    &user_bo->prime,
+				    shareable,
+				    ttm_buffer_type,
+				    &vmw_user_bo_release,
+				    &vmw_user_bo_ref_obj_release);
+	if (unlikely(ret != 0)) {
+		ttm_bo_unref(&tmp);
+		goto out_no_base_object;
+	}
+
+	*p_vbo = &user_bo->vbo;
+	if (p_base) {
+		*p_base = &user_bo->prime.base;
+		kref_get(&(*p_base)->refcount);
+	}
+	*handle = user_bo->prime.base.hash.key;
+
+out_no_base_object:
+	return ret;
+}
+
+
+/**
+ * vmw_user_bo_verify_access - verify access permissions on this
+ * buffer object.
+ *
+ * @bo: Pointer to the buffer object being accessed
+ * @tfile: Identifying the caller.
+ */
+int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
+			      struct ttm_object_file *tfile)
+{
+	struct vmw_user_buffer_object *vmw_user_bo;
+
+	if (unlikely(bo->destroy != vmw_user_bo_destroy))
+		return -EPERM;
+
+	vmw_user_bo = vmw_user_buffer_object(bo);
+
+	/* Check that the caller has opened the object. */
+	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
+		return 0;
+
+	DRM_ERROR("Could not grant buffer access.\n");
+	return -EPERM;
+}
+
+
+/**
+ * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
+ * access, idling previous GPU operations on the buffer and optionally
+ * blocking it for further command submissions.
+ *
+ * @user_bo: Pointer to the buffer object being grabbed for CPU access
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating how the grab should be performed.
+ * Return: Zero on success, Negative error code on error. In particular,
+ * -EBUSY will be returned if a dontblock operation is requested and the
+ * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
+ * interrupted by a signal.
+ *
+ * A blocking grab will be automatically released when @tfile is closed.
+ */
+static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
+				    struct ttm_object_file *tfile,
+				    uint32_t flags)
+{
+	struct ttm_buffer_object *bo = &user_bo->vbo.base;
+	bool existed;
+	int ret;
+
+	if (flags & drm_vmw_synccpu_allow_cs) {
+		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
+		long lret;
+
+		lret = reservation_object_wait_timeout_rcu
+			(bo->resv, true, true,
+			 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+		if (!lret)
+			return -EBUSY;
+		else if (lret < 0)
+			return lret;
+		return 0;
+	}
+
+	ret = ttm_bo_synccpu_write_grab
+		(bo, !!(flags & drm_vmw_synccpu_dontblock));
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+				 TTM_REF_SYNCCPU_WRITE, &existed, false);
+	if (ret != 0 || existed)
+		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+
+	return ret;
+}
+
+/**
+ * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
+ * and unblock command submission on the buffer if blocked.
+ *
+ * @handle: Handle identifying the buffer object.
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating the type of release.
+ */
+static int vmw_user_bo_synccpu_release(uint32_t handle,
+					   struct ttm_object_file *tfile,
+					   uint32_t flags)
+{
+	if (!(flags & drm_vmw_synccpu_allow_cs))
+		return ttm_ref_object_base_unref(tfile, handle,
+						 TTM_REF_SYNCCPU_WRITE);
+
+	return 0;
+}
+
+
+/**
+ * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
+ * functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and calls the
+ * relevant synccpu functions.
+ */
+int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_vmw_synccpu_arg *arg =
+		(struct drm_vmw_synccpu_arg *) data;
+	struct vmw_buffer_object *vbo;
+	struct vmw_user_buffer_object *user_bo;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct ttm_base_object *buffer_base;
+	int ret;
+
+	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
+	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
+			       drm_vmw_synccpu_dontblock |
+			       drm_vmw_synccpu_allow_cs)) != 0) {
+		DRM_ERROR("Illegal synccpu flags.\n");
+		return -EINVAL;
+	}
+
+	switch (arg->op) {
+	case drm_vmw_synccpu_grab:
+		ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
+					     &buffer_base);
+		if (unlikely(ret != 0))
+			return ret;
+
+		user_bo = container_of(vbo, struct vmw_user_buffer_object,
+				       vbo);
+		ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
+		vmw_bo_unreference(&vbo);
+		ttm_base_object_unref(&buffer_base);
+		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+			     ret != -EBUSY)) {
+			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+				  (unsigned int) arg->handle);
+			return ret;
+		}
+		break;
+	case drm_vmw_synccpu_release:
+		ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
+						  arg->flags);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
+				  (unsigned int) arg->handle);
+			return ret;
+		}
+		break;
+	default:
+		DRM_ERROR("Invalid synccpu operation.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+/**
+ * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
+ * allocation functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and allocates a
+ * struct vmw_user_buffer_object bo.
+ */
+int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	union drm_vmw_alloc_dmabuf_arg *arg =
+	    (union drm_vmw_alloc_dmabuf_arg *)data;
+	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+	struct vmw_buffer_object *vbo;
+	uint32_t handle;
+	int ret;
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+				req->size, false, &handle, &vbo,
+				NULL);
+	if (unlikely(ret != 0))
+		goto out_no_bo;
+
+	rep->handle = handle;
+	rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
+	rep->cur_gmr_id = handle;
+	rep->cur_gmr_offset = 0;
+
+	vmw_bo_unreference(&vbo);
+
+out_no_bo:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+
+	return ret;
+}
+
+
+/**
+ * vmw_bo_unref_ioctl - Generic handle close ioctl.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and closes a
+ * handle to a TTM base object, optionally freeing the object.
+ */
+int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_vmw_unref_dmabuf_arg *arg =
+	    (struct drm_vmw_unref_dmabuf_arg *)data;
+
+	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+					 arg->handle,
+					 TTM_REF_USAGE);
+}
+
+
+/**
+ * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
+ *
+ * @tfile: The TTM object file the handle is registered with.
+ * @handle: The user buffer object handle
+ * @out: Pointer to a where a pointer to the embedded
+ * struct vmw_buffer_object should be placed.
+ * @p_base: Pointer to where a pointer to the TTM base object should be
+ * placed, or NULL if no such pointer is required.
+ * Return: Zero on success, Negative error code on error.
+ *
+ * Both the output base object pointer and the vmw buffer object pointer
+ * will be refcounted.
+ */
+int vmw_user_bo_lookup(struct ttm_object_file *tfile,
+		       uint32_t handle, struct vmw_buffer_object **out,
+		       struct ttm_base_object **p_base)
+{
+	struct vmw_user_buffer_object *vmw_user_bo;
+	struct ttm_base_object *base;
+
+	base = ttm_base_object_lookup(tfile, handle);
+	if (unlikely(base == NULL)) {
+		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+			  (unsigned long)handle);
+		return -ESRCH;
+	}
+
+	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
+		ttm_base_object_unref(&base);
+		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+			  (unsigned long)handle);
+		return -EINVAL;
+	}
+
+	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+				   prime.base);
+	(void)ttm_bo_reference(&vmw_user_bo->vbo.base);
+	if (p_base)
+		*p_base = base;
+	else
+		ttm_base_object_unref(&base);
+	*out = &vmw_user_bo->vbo;
+
+	return 0;
+}
+
+
+/**
+ * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
+ *
+ * @tfile: The TTM object file to register the handle with.
+ * @vbo: The embedded vmw buffer object.
+ * @handle: Pointer to where the new handle should be placed.
+ * Return: Zero on success, Negative error code on error.
+ */
+int vmw_user_bo_reference(struct ttm_object_file *tfile,
+			  struct vmw_buffer_object *vbo,
+			  uint32_t *handle)
+{
+	struct vmw_user_buffer_object *user_bo;
+
+	if (vbo->base.destroy != vmw_user_bo_destroy)
+		return -EINVAL;
+
+	user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
+
+	*handle = user_bo->prime.base.hash.key;
+	return ttm_ref_object_add(tfile, &user_bo->prime.base,
+				  TTM_REF_USAGE, NULL, false);
+}
+
+
+/**
+ * vmw_bo_fence_single - Utility function to fence a single TTM buffer
+ *                       object without unreserving it.
+ *
+ * @bo:             Pointer to the struct ttm_buffer_object to fence.
+ * @fence:          Pointer to the fence. If NULL, this function will
+ *                  insert a fence into the command stream..
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_bo_fence_single(struct ttm_buffer_object *bo,
+			 struct vmw_fence_obj *fence)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+
+	struct vmw_private *dev_priv =
+		container_of(bdev, struct vmw_private, bdev);
+
+	if (fence == NULL) {
+		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+		reservation_object_add_excl_fence(bo->resv, &fence->base);
+		dma_fence_put(&fence->base);
+	} else
+		reservation_object_add_excl_fence(bo->resv, &fence->base);
+}
+
+
+/**
+ * vmw_dumb_create - Create a dumb kms buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @args: Pointer to a struct drm_mode_create_dumb structure
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm create_dumb functionality.
+ * Note that this is very similar to the vmw_bo_alloc ioctl, except
+ * that the arguments have a different format.
+ */
+int vmw_dumb_create(struct drm_file *file_priv,
+		    struct drm_device *dev,
+		    struct drm_mode_create_dumb *args)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_buffer_object *vbo;
+	int ret;
+
+	args->pitch = args->width * ((args->bpp + 7) / 8);
+	args->size = args->pitch * args->height;
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+				    args->size, false, &args->handle,
+				    &vbo, NULL);
+	if (unlikely(ret != 0))
+		goto out_no_bo;
+
+	vmw_bo_unreference(&vbo);
+out_no_bo:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
+
+
+/**
+ * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * @offset: The address space offset returned.
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm dumb_map_offset functionality.
+ */
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+			struct drm_device *dev, uint32_t handle,
+			uint64_t *offset)
+{
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_buffer_object *out_buf;
+	int ret;
+
+	ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
+	if (ret != 0)
+		return -EINVAL;
+
+	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
+	vmw_bo_unreference(&out_buf);
+	return 0;
+}
+
+
+/**
+ * vmw_dumb_destroy - Destroy a dumb boffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm dumb_destroy functionality.
+ */
+int vmw_dumb_destroy(struct drm_file *file_priv,
+		     struct drm_device *dev,
+		     uint32_t handle)
+{
+	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+					 handle, TTM_REF_USAGE);
+}
+
+
+/**
+ * vmw_bo_swap_notify - swapout notify callback.
+ *
+ * @bo: The buffer object to be swapped out.
+ */
+void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
+{
+	/* Is @bo embedded in a struct vmw_buffer_object? */
+	if (bo->destroy != vmw_bo_bo_free &&
+	    bo->destroy != vmw_user_bo_destroy)
+		return;
+
+	/* Kill any cached kernel maps before swapout */
+	vmw_bo_unmap(vmw_buffer_object(bo));
+}
+
+
+/**
+ * vmw_bo_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ *       region the move is taking place.
+ *
+ * Detaches cached maps and device bindings that require that the
+ * buffer doesn't move.
+ */
+void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+			struct ttm_mem_reg *mem)
+{
+	struct vmw_buffer_object *vbo;
+
+	if (mem == NULL)
+		return;
+
+	/* Make sure @bo is embedded in a struct vmw_buffer_object? */
+	if (bo->destroy != vmw_bo_bo_free &&
+	    bo->destroy != vmw_user_bo_destroy)
+		return;
+
+	vbo = container_of(bo, struct vmw_buffer_object, base);
+
+	/*
+	 * Kill any cached kernel maps before move. An optimization could
+	 * be to do this iff source or destination memory type is in VRAM.
+	 */
+	vmw_bo_unmap(vbo);
+
+	/*
+	 * If we're moving a backup MOB out of MOB placement, then make sure we
+	 * read back all resource content first, and unbind the MOB from
+	 * the resource.
+	 */
+	if (mem->mem_type != VMW_PL_MOB)
+		vmw_resource_unbind_list(vbo);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index ff8acc74786c..d460a721eaee 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -424,7 +424,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
 					  &fence, NULL);
 
-	vmw_fence_single_bo(bo, fence);
+	vmw_bo_fence_single(bo, fence);
 
 	if (likely(fence != NULL))
 		vmw_fence_obj_unreference(&fence);
@@ -648,7 +648,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
 					  &fence, NULL);
 
-	vmw_fence_single_bo(bo, fence);
+	vmw_bo_fence_single(bo, fence);
 
 	if (likely(fence != NULL))
 		vmw_fence_obj_unreference(&fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 1052cd3cb700..980e6e7cd592 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -324,7 +324,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
 		vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
 	mutex_unlock(&dev_priv->binding_mutex);
 	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-	vmw_fence_single_bo(bo, fence);
+	vmw_bo_fence_single(bo, fence);
 	if (likely(fence != NULL))
 		vmw_fence_obj_unreference(&fence);
 
@@ -367,7 +367,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
 	}
 
 	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-	vmw_fence_single_bo(&res->backup->base, fence);
+	vmw_bo_fence_single(&res->backup->base, fence);
 	vmw_fence_obj_unreference(&fence);
 
 	return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 25c2f668ad6c..769d72fabb56 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -630,36 +630,6 @@ extern int vmw_user_resource_lookup_handle(
 	uint32_t handle,
 	const struct vmw_user_resource_conv *converter,
 	struct vmw_resource **p_res);
-extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
-extern int vmw_bo_init(struct vmw_private *dev_priv,
-		       struct vmw_buffer_object *vmw_bo,
-		       size_t size, struct ttm_placement *placement,
-		       bool interuptable,
-		       void (*bo_free)(struct ttm_buffer_object *bo));
-extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
-				     struct ttm_object_file *tfile);
-extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
-			     struct ttm_object_file *tfile,
-			     uint32_t size,
-			     bool shareable,
-			     uint32_t *handle,
-			     struct vmw_buffer_object **p_dma_buf,
-			     struct ttm_base_object **p_base);
-extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
-				 struct vmw_buffer_object *dma_buf,
-				 uint32_t *handle);
-extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
-			      struct drm_file *file_priv);
-extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
-			      struct drm_file *file_priv);
-extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
-				     struct drm_file *file_priv);
-extern uint32_t vmw_bo_validate_node(struct ttm_buffer_object *bo,
-				     uint32_t cur_validate_node);
-extern void vmw_bo_validate_clear(struct ttm_buffer_object *bo);
-extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
-			      uint32_t id, struct vmw_buffer_object **out,
-			      struct ttm_base_object **base);
 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
 				  struct drm_file *file_priv);
 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -672,16 +642,11 @@ extern void vmw_resource_unreserve(struct vmw_resource *res,
 				   bool switch_backup,
 				   struct vmw_buffer_object *new_backup,
 				   unsigned long new_backup_offset);
-extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
-				     struct ttm_mem_reg *mem);
 extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
 				  struct ttm_mem_reg *mem);
-extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo);
 extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
-extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
-				struct vmw_fence_obj *fence);
 extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
-
+extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
 
 /**
  * Buffer object helper functions - vmwgfx_bo.c
@@ -705,8 +670,40 @@ extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
 				 SVGAGuestPtr *ptr);
 extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
-extern void *vmw_buffer_object_map_and_cache(struct vmw_buffer_object *vbo);
-extern void vmw_buffer_object_unmap(struct vmw_buffer_object *vbo);
+extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
+extern int vmw_bo_init(struct vmw_private *dev_priv,
+		       struct vmw_buffer_object *vmw_bo,
+		       size_t size, struct ttm_placement *placement,
+		       bool interuptable,
+		       void (*bo_free)(struct ttm_buffer_object *bo));
+extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
+				     struct ttm_object_file *tfile);
+extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
+			     struct ttm_object_file *tfile,
+			     uint32_t size,
+			     bool shareable,
+			     uint32_t *handle,
+			     struct vmw_buffer_object **p_dma_buf,
+			     struct ttm_base_object **p_base);
+extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
+				 struct vmw_buffer_object *dma_buf,
+				 uint32_t *handle);
+extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+				     struct drm_file *file_priv);
+extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
+			      uint32_t id, struct vmw_buffer_object **out,
+			      struct ttm_base_object **base);
+extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
+				struct vmw_fence_obj *fence);
+extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
+extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
+extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+			       struct ttm_mem_reg *mem);
+extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
 
 /**
  * Misc Ioctl functionality - vmwgfx_ioctl.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index dcde4985c574..b913a56f3426 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -197,7 +197,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
 
 	(void) ttm_read_lock(&vmw_priv->reservation_sem, false);
 	(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
-	virtual = vmw_buffer_object_map_and_cache(vbo);
+	virtual = vmw_bo_map_and_cache(vbo);
 	if (!virtual)
 		goto out_unreserve;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 7a32be0cef14..e7a7a2e73bbf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2493,7 +2493,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
 					 file_priv ? &handle : NULL);
 	if (buf)
-		vmw_fence_single_bo(&buf->base, fence);
+		vmw_bo_fence_single(&buf->base, fence);
 	if (file_priv)
 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
 					    ret, user_fence_rep, fence,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index d07c585e3c1d..e44da7bb4861 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -225,7 +225,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
 		ret = ttm_bo_reserve(bo, false, true, NULL);
 		BUG_ON(ret != 0);
 
-		vmw_fence_single_bo(bo, NULL);
+		vmw_bo_fence_single(bo, NULL);
 		ttm_bo_unreserve(bo);
 	}
 
@@ -362,7 +362,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
 	ret = ttm_bo_reserve(bo, false, true, NULL);
 	BUG_ON(ret != 0);
 
-	vmw_fence_single_bo(bo, NULL);
+	vmw_bo_fence_single(bo, NULL);
 	ttm_bo_unreserve(bo);
 
 	ttm_bo_unref(&batch->otable_bo);
@@ -620,7 +620,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
 		vmw_fifo_commit(dev_priv, sizeof(*cmd));
 	}
 	if (bo) {
-		vmw_fence_single_bo(bo, NULL);
+		vmw_bo_fence_single(bo, NULL);
 		ttm_bo_unreserve(bo);
 	}
 	vmw_fifo_resource_dec(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 5aaf9ac65cba..6d2cad744f5d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -27,7 +27,6 @@
 
 #include "vmwgfx_drv.h"
 #include <drm/vmwgfx_drm.h>
-#include <drm/ttm/ttm_object.h>
 #include <drm/ttm/ttm_placement.h>
 #include <drm/drmP.h>
 #include "vmwgfx_resource_priv.h"
@@ -35,30 +34,6 @@
 
 #define VMW_RES_EVICT_ERR_COUNT 10
 
-struct vmw_user_buffer_object {
-	struct ttm_prime_object prime;
-	struct vmw_buffer_object vbo;
-};
-
-struct vmw_bo_user_rep {
-	uint32_t handle;
-	uint64_t map_handle;
-};
-
-static inline struct vmw_buffer_object *
-vmw_buffer_object(struct ttm_buffer_object *bo)
-{
-	return container_of(bo, struct vmw_buffer_object, base);
-}
-
-static inline struct vmw_user_buffer_object *
-vmw_user_buffer_object(struct ttm_buffer_object *bo)
-{
-	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
-
-	return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
-}
-
 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
 {
 	kref_get(&res->kref);
@@ -316,509 +291,6 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 	return ret;
 }
 
-/**
- * Buffer management.
- */
-
-/**
- * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
- *
- * @dev_priv: Pointer to a struct vmw_private identifying the device.
- * @size: The requested buffer size.
- * @user: Whether this is an ordinary dma buffer or a user dma buffer.
- */
-static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
-			      bool user)
-{
-	static size_t struct_size, user_struct_size;
-	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
-
-	if (unlikely(struct_size == 0)) {
-		size_t backend_size = ttm_round_pot(vmw_tt_size);
-
-		struct_size = backend_size +
-			ttm_round_pot(sizeof(struct vmw_buffer_object));
-		user_struct_size = backend_size +
-			ttm_round_pot(sizeof(struct vmw_user_buffer_object));
-	}
-
-	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
-		page_array_size +=
-			ttm_round_pot(num_pages * sizeof(dma_addr_t));
-
-	return ((user) ? user_struct_size : struct_size) +
-		page_array_size;
-}
-
-void vmw_bo_bo_free(struct ttm_buffer_object *bo)
-{
-	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
-
-	vmw_buffer_object_unmap(vmw_bo);
-	kfree(vmw_bo);
-}
-
-static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
-{
-	struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
-
-	vmw_buffer_object_unmap(&vmw_user_bo->vbo);
-	ttm_prime_object_kfree(vmw_user_bo, prime);
-}
-
-int vmw_bo_init(struct vmw_private *dev_priv,
-		struct vmw_buffer_object *vmw_bo,
-		size_t size, struct ttm_placement *placement,
-		bool interruptible,
-		void (*bo_free)(struct ttm_buffer_object *bo))
-{
-	struct ttm_bo_device *bdev = &dev_priv->bdev;
-	size_t acc_size;
-	int ret;
-	bool user = (bo_free == &vmw_user_bo_destroy);
-
-	WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
-
-	acc_size = vmw_bo_acc_size(dev_priv, size, user);
-	memset(vmw_bo, 0, sizeof(*vmw_bo));
-
-	INIT_LIST_HEAD(&vmw_bo->res_list);
-
-	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
-			  ttm_bo_type_device, placement,
-			  0, interruptible, acc_size,
-			  NULL, NULL, bo_free);
-	return ret;
-}
-
-static void vmw_user_bo_release(struct ttm_base_object **p_base)
-{
-	struct vmw_user_buffer_object *vmw_user_bo;
-	struct ttm_base_object *base = *p_base;
-	struct ttm_buffer_object *bo;
-
-	*p_base = NULL;
-
-	if (unlikely(base == NULL))
-		return;
-
-	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
-				   prime.base);
-	bo = &vmw_user_bo->vbo.base;
-	ttm_bo_unref(&bo);
-}
-
-static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
-					enum ttm_ref_type ref_type)
-{
-	struct vmw_user_buffer_object *user_bo;
-
-	user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
-
-	switch (ref_type) {
-	case TTM_REF_SYNCCPU_WRITE:
-		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
-		break;
-	default:
-		BUG();
-	}
-}
-
-/**
- * vmw_user_bo_alloc - Allocate a user dma buffer
- *
- * @dev_priv: Pointer to a struct device private.
- * @tfile: Pointer to a struct ttm_object_file on which to register the user
- * object.
- * @size: Size of the dma buffer.
- * @shareable: Boolean whether the buffer is shareable with other open files.
- * @handle: Pointer to where the handle value should be assigned.
- * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
- * should be assigned.
- */
-int vmw_user_bo_alloc(struct vmw_private *dev_priv,
-		      struct ttm_object_file *tfile,
-		      uint32_t size,
-		      bool shareable,
-		      uint32_t *handle,
-		      struct vmw_buffer_object **p_vbo,
-		      struct ttm_base_object **p_base)
-{
-	struct vmw_user_buffer_object *user_bo;
-	struct ttm_buffer_object *tmp;
-	int ret;
-
-	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
-	if (unlikely(!user_bo)) {
-		DRM_ERROR("Failed to allocate a buffer.\n");
-		return -ENOMEM;
-	}
-
-	ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
-			  (dev_priv->has_mob) ?
-			  &vmw_sys_placement :
-			  &vmw_vram_sys_placement, true,
-			  &vmw_user_bo_destroy);
-	if (unlikely(ret != 0))
-		return ret;
-
-	tmp = ttm_bo_reference(&user_bo->vbo.base);
-	ret = ttm_prime_object_init(tfile,
-				    size,
-				    &user_bo->prime,
-				    shareable,
-				    ttm_buffer_type,
-				    &vmw_user_bo_release,
-				    &vmw_user_bo_ref_obj_release);
-	if (unlikely(ret != 0)) {
-		ttm_bo_unref(&tmp);
-		goto out_no_base_object;
-	}
-
-	*p_vbo = &user_bo->vbo;
-	if (p_base) {
-		*p_base = &user_bo->prime.base;
-		kref_get(&(*p_base)->refcount);
-	}
-	*handle = user_bo->prime.base.hash.key;
-
-out_no_base_object:
-	return ret;
-}
-
-/**
- * vmw_user_bo_verify_access - verify access permissions on this
- * buffer object.
- *
- * @bo: Pointer to the buffer object being accessed
- * @tfile: Identifying the caller.
- */
-int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
-				  struct ttm_object_file *tfile)
-{
-	struct vmw_user_buffer_object *vmw_user_bo;
-
-	if (unlikely(bo->destroy != vmw_user_bo_destroy))
-		return -EPERM;
-
-	vmw_user_bo = vmw_user_buffer_object(bo);
-
-	/* Check that the caller has opened the object. */
-	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
-		return 0;
-
-	DRM_ERROR("Could not grant buffer access.\n");
-	return -EPERM;
-}
-
-/**
- * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
- * access, idling previous GPU operations on the buffer and optionally
- * blocking it for further command submissions.
- *
- * @user_bo: Pointer to the buffer object being grabbed for CPU access
- * @tfile: Identifying the caller.
- * @flags: Flags indicating how the grab should be performed.
- *
- * A blocking grab will be automatically released when @tfile is closed.
- */
-static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
-					struct ttm_object_file *tfile,
-					uint32_t flags)
-{
-	struct ttm_buffer_object *bo = &user_bo->vbo.base;
-	bool existed;
-	int ret;
-
-	if (flags & drm_vmw_synccpu_allow_cs) {
-		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
-		long lret;
-
-		lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
-							   nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
-		if (!lret)
-			return -EBUSY;
-		else if (lret < 0)
-			return lret;
-		return 0;
-	}
-
-	ret = ttm_bo_synccpu_write_grab
-		(bo, !!(flags & drm_vmw_synccpu_dontblock));
-	if (unlikely(ret != 0))
-		return ret;
-
-	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
-				 TTM_REF_SYNCCPU_WRITE, &existed, false);
-	if (ret != 0 || existed)
-		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
-
-	return ret;
-}
-
-/**
- * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
- * and unblock command submission on the buffer if blocked.
- *
- * @handle: Handle identifying the buffer object.
- * @tfile: Identifying the caller.
- * @flags: Flags indicating the type of release.
- */
-static int vmw_user_bo_synccpu_release(uint32_t handle,
-					   struct ttm_object_file *tfile,
-					   uint32_t flags)
-{
-	if (!(flags & drm_vmw_synccpu_allow_cs))
-		return ttm_ref_object_base_unref(tfile, handle,
-						 TTM_REF_SYNCCPU_WRITE);
-
-	return 0;
-}
-
-/**
- * vmw_user_bo_synccpu_release - ioctl function implementing the synccpu
- * functionality.
- *
- * @dev: Identifies the drm device.
- * @data: Pointer to the ioctl argument.
- * @file_priv: Identifies the caller.
- *
- * This function checks the ioctl arguments for validity and calls the
- * relevant synccpu functions.
- */
-int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
-				  struct drm_file *file_priv)
-{
-	struct drm_vmw_synccpu_arg *arg =
-		(struct drm_vmw_synccpu_arg *) data;
-	struct vmw_buffer_object *vbo;
-	struct vmw_user_buffer_object *user_bo;
-	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-	struct ttm_base_object *buffer_base;
-	int ret;
-
-	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
-	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
-			       drm_vmw_synccpu_dontblock |
-			       drm_vmw_synccpu_allow_cs)) != 0) {
-		DRM_ERROR("Illegal synccpu flags.\n");
-		return -EINVAL;
-	}
-
-	switch (arg->op) {
-	case drm_vmw_synccpu_grab:
-		ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
-					     &buffer_base);
-		if (unlikely(ret != 0))
-			return ret;
-
-		user_bo = container_of(vbo, struct vmw_user_buffer_object,
-				       vbo);
-		ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
-		vmw_bo_unreference(&vbo);
-		ttm_base_object_unref(&buffer_base);
-		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
-			     ret != -EBUSY)) {
-			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
-				  (unsigned int) arg->handle);
-			return ret;
-		}
-		break;
-	case drm_vmw_synccpu_release:
-		ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
-						  arg->flags);
-		if (unlikely(ret != 0)) {
-			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
-				  (unsigned int) arg->handle);
-			return ret;
-		}
-		break;
-	default:
-		DRM_ERROR("Invalid synccpu operation.\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
-		       struct drm_file *file_priv)
-{
-	struct vmw_private *dev_priv = vmw_priv(dev);
-	union drm_vmw_alloc_dmabuf_arg *arg =
-	    (union drm_vmw_alloc_dmabuf_arg *)data;
-	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
-	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
-	struct vmw_buffer_object *vbo;
-	uint32_t handle;
-	int ret;
-
-	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
-	if (unlikely(ret != 0))
-		return ret;
-
-	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
-				req->size, false, &handle, &vbo,
-				NULL);
-	if (unlikely(ret != 0))
-		goto out_no_bo;
-
-	rep->handle = handle;
-	rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
-	rep->cur_gmr_id = handle;
-	rep->cur_gmr_offset = 0;
-
-	vmw_bo_unreference(&vbo);
-
-out_no_bo:
-	ttm_read_unlock(&dev_priv->reservation_sem);
-
-	return ret;
-}
-
-int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
-		       struct drm_file *file_priv)
-{
-	struct drm_vmw_unref_dmabuf_arg *arg =
-	    (struct drm_vmw_unref_dmabuf_arg *)data;
-
-	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
-					 arg->handle,
-					 TTM_REF_USAGE);
-}
-
-int vmw_user_bo_lookup(struct ttm_object_file *tfile,
-			   uint32_t handle, struct vmw_buffer_object **out,
-			   struct ttm_base_object **p_base)
-{
-	struct vmw_user_buffer_object *vmw_user_bo;
-	struct ttm_base_object *base;
-
-	base = ttm_base_object_lookup(tfile, handle);
-	if (unlikely(base == NULL)) {
-		pr_err("Invalid buffer object handle 0x%08lx\n",
-		       (unsigned long)handle);
-		return -ESRCH;
-	}
-
-	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
-		ttm_base_object_unref(&base);
-		pr_err("Invalid buffer object handle 0x%08lx\n",
-		       (unsigned long)handle);
-		return -EINVAL;
-	}
-
-	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
-				   prime.base);
-	(void)ttm_bo_reference(&vmw_user_bo->vbo.base);
-	if (p_base)
-		*p_base = base;
-	else
-		ttm_base_object_unref(&base);
-	*out = &vmw_user_bo->vbo;
-
-	return 0;
-}
-
-int vmw_user_bo_reference(struct ttm_object_file *tfile,
-			      struct vmw_buffer_object *vbo,
-			      uint32_t *handle)
-{
-	struct vmw_user_buffer_object *user_bo;
-
-	if (vbo->base.destroy != vmw_user_bo_destroy)
-		return -EINVAL;
-
-	user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
-
-	*handle = user_bo->prime.base.hash.key;
-	return ttm_ref_object_add(tfile, &user_bo->prime.base,
-				  TTM_REF_USAGE, NULL, false);
-}
-
-/**
- * vmw_dumb_create - Create a dumb kms buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @args: Pointer to a struct drm_mode_create_dumb structure
- *
- * This is a driver callback for the core drm create_dumb functionality.
- * Note that this is very similar to the vmw_bo_alloc ioctl, except
- * that the arguments have a different format.
- */
-int vmw_dumb_create(struct drm_file *file_priv,
-		    struct drm_device *dev,
-		    struct drm_mode_create_dumb *args)
-{
-	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct vmw_buffer_object *vbo;
-	int ret;
-
-	args->pitch = args->width * ((args->bpp + 7) / 8);
-	args->size = args->pitch * args->height;
-
-	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
-	if (unlikely(ret != 0))
-		return ret;
-
-	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
-				    args->size, false, &args->handle,
-				    &vbo, NULL);
-	if (unlikely(ret != 0))
-		goto out_no_bo;
-
-	vmw_bo_unreference(&vbo);
-out_no_bo:
-	ttm_read_unlock(&dev_priv->reservation_sem);
-	return ret;
-}
-
-/**
- * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- * @offset: The address space offset returned.
- *
- * This is a driver callback for the core drm dumb_map_offset functionality.
- */
-int vmw_dumb_map_offset(struct drm_file *file_priv,
-			struct drm_device *dev, uint32_t handle,
-			uint64_t *offset)
-{
-	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-	struct vmw_buffer_object *out_buf;
-	int ret;
-
-	ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
-	if (ret != 0)
-		return -EINVAL;
-
-	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
-	vmw_bo_unreference(&out_buf);
-	return 0;
-}
-
-/**
- * vmw_dumb_destroy - Destroy a dumb boffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- *
- * This is a driver callback for the core drm dumb_destroy functionality.
- */
-int vmw_dumb_destroy(struct drm_file *file_priv,
-		     struct drm_device *dev,
-		     uint32_t handle)
-{
-	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
-					 handle, TTM_REF_USAGE);
-}
-
 /**
  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
  *
@@ -1182,109 +654,39 @@ int vmw_resource_validate(struct vmw_resource *res)
 	return ret;
 }
 
-/**
- * vmw_fence_single_bo - Utility function to fence a single TTM buffer
- *                       object without unreserving it.
- *
- * @bo:             Pointer to the struct ttm_buffer_object to fence.
- * @fence:          Pointer to the fence. If NULL, this function will
- *                  insert a fence into the command stream..
- *
- * Contrary to the ttm_eu version of this function, it takes only
- * a single buffer object instead of a list, and it also doesn't
- * unreserve the buffer object, which needs to be done separately.
- */
-void vmw_fence_single_bo(struct ttm_buffer_object *bo,
-			 struct vmw_fence_obj *fence)
-{
-	struct ttm_bo_device *bdev = bo->bdev;
-
-	struct vmw_private *dev_priv =
-		container_of(bdev, struct vmw_private, bdev);
-
-	if (fence == NULL) {
-		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-		reservation_object_add_excl_fence(bo->resv, &fence->base);
-		dma_fence_put(&fence->base);
-	} else
-		reservation_object_add_excl_fence(bo->resv, &fence->base);
-}
 
 /**
- * vmw_resource_move_notify - TTM move_notify_callback
+ * vmw_resource_unbind_list
  *
- * @bo: The TTM buffer object about to move.
- * @mem: The struct ttm_mem_reg indicating to what memory
- *       region the move is taking place.
+ * @vbo: Pointer to the current backing MOB.
  *
  * Evicts the Guest Backed hardware resource if the backup
  * buffer is being moved out of MOB memory.
- * Note that this function should not race with the resource
- * validation code as long as it accesses only members of struct
- * resource that remain static while bo::res is !NULL and
- * while we have @bo reserved. struct resource::backup is *not* a
- * static member. The resource validation code will take care
- * to set @bo::res to NULL, while having @bo reserved when the
- * buffer is no longer bound to the resource, so @bo:res can be
- * used to determine whether there is a need to unbind and whether
- * it is safe to unbind.
+ * Note that this function will not race with the resource
+ * validation code, since resource validation and eviction
+ * both require the backup buffer to be reserved.
  */
-void vmw_resource_move_notify(struct ttm_buffer_object *bo,
-			      struct ttm_mem_reg *mem)
+void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
 {
-	struct vmw_buffer_object *vbo;
-
-	if (mem == NULL)
-		return;
-
-	if (bo->destroy != vmw_bo_bo_free &&
-	    bo->destroy != vmw_user_bo_destroy)
-		return;
-
-	vbo = container_of(bo, struct vmw_buffer_object, base);
-
-	/*
-	 * Kill any cached kernel maps before move. An optimization could
-	 * be to do this iff source or destination memory type is VRAM.
-	 */
-	vmw_buffer_object_unmap(vbo);
 
-	if (mem->mem_type != VMW_PL_MOB) {
-		struct vmw_resource *res, *n;
-		struct ttm_validate_buffer val_buf;
+	struct vmw_resource *res, *next;
+	struct ttm_validate_buffer val_buf = {
+		.bo = &vbo->base,
+		.shared = false
+	};
 
-		val_buf.bo = bo;
-		val_buf.shared = false;
+	lockdep_assert_held(&vbo->base.resv->lock.base);
+	list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
+		if (!res->func->unbind)
+			continue;
 
-		list_for_each_entry_safe(res, n, &vbo->res_list, mob_head) {
-
-			if (unlikely(res->func->unbind == NULL))
-				continue;
-
-			(void) res->func->unbind(res, true, &val_buf);
-			res->backup_dirty = true;
-			res->res_dirty = false;
-			list_del_init(&res->mob_head);
-		}
-
-		(void) ttm_bo_wait(bo, false, false);
+		(void) res->func->unbind(res, true, &val_buf);
+		res->backup_dirty = true;
+		res->res_dirty = false;
+		list_del_init(&res->mob_head);
 	}
-}
-
-
-/**
- * vmw_resource_swap_notify - swapout notify callback.
- *
- * @bo: The buffer object to be swapped out.
- */
-void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
-{
-	if (bo->destroy != vmw_bo_bo_free &&
-	    bo->destroy != vmw_user_bo_destroy)
-		return;
 
-	/* Kill any cached kernel maps before swapout */
-	vmw_buffer_object_unmap(vmw_buffer_object(bo));
+	(void) ttm_bo_wait(&vbo->base, false, false);
 }
 
 
@@ -1370,7 +772,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
 
 		/* Create a fence and attach the BO to it */
 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-		vmw_fence_single_bo(bo, fence);
+		vmw_bo_fence_single(bo, fence);
 
 		if (fence != NULL)
 			vmw_fence_obj_unreference(&fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index f6c939f3ff5e..4ec1a91990d9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -306,7 +306,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
 					  &fence, NULL);
 
-	vmw_fence_single_bo(val_buf->bo, fence);
+	vmw_bo_fence_single(val_buf->bo, fence);
 
 	if (likely(fence != NULL))
 		vmw_fence_obj_unreference(&fence);
@@ -537,7 +537,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res,
 
 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
 					  &fence, NULL);
-	vmw_fence_single_bo(val_buf->bo, fence);
+	vmw_bo_fence_single(val_buf->bo, fence);
 
 	if (likely(fence != NULL))
 		vmw_fence_obj_unreference(&fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 2b2e8aa7114a..f2af53613803 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -468,7 +468,7 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
 					  &fence, NULL);
 
-	vmw_fence_single_bo(val_buf->bo, fence);
+	vmw_bo_fence_single(val_buf->bo, fence);
 
 	if (likely(fence != NULL))
 		vmw_fence_obj_unreference(&fence);
@@ -1210,7 +1210,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
 					  &fence, NULL);
 
-	vmw_fence_single_bo(val_buf->bo, fence);
+	vmw_bo_fence_single(val_buf->bo, fence);
 
 	if (likely(fence != NULL))
 		vmw_fence_obj_unreference(&fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 0931f43913b1..239e1edf0919 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -852,7 +852,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
 			    bool evict,
 			    struct ttm_mem_reg *mem)
 {
-	vmw_resource_move_notify(bo, mem);
+	vmw_bo_move_notify(bo, mem);
 	vmw_query_move_notify(bo, mem);
 }
 
@@ -864,7 +864,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
  */
 static void vmw_swap_notify(struct ttm_buffer_object *bo)
 {
-	vmw_resource_swap_notify(bo);
+	vmw_bo_swap_notify(bo);
 	(void) ttm_bo_wait(bo, false, false);
 }
 
-- 
2.18.0.rc1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

  parent reply	other threads:[~2018-07-03 19:16 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-07-03 19:14 [PATCH -next 00/15] vmwgfx cleanups and modesetting changes Thomas Hellstrom
2018-07-03 19:14 ` [PATCH -next 01/15] drm/vmwgfx: Replace vmw_dma_buffer with vmw_buffer_object Thomas Hellstrom
2018-07-03 19:14 ` Thomas Hellstrom [this message]
2018-07-03 19:14 ` [PATCH -next 03/15] drm/vmwgfx: Optimize the buffer object swap_notify callback somewhat Thomas Hellstrom
2018-07-03 19:14 ` [PATCH -next 04/15] drm/vmwgfx: Use blocking buffer object reserves when evicting resources Thomas Hellstrom
2018-07-03 19:14 ` [PATCH -next 05/15] drm/vmwgfx: Fix atomic mode set check Thomas Hellstrom
2018-07-04  8:35   ` Daniel Vetter
2018-07-04  9:35     ` Thomas Hellstrom
2018-07-03 19:14 ` [PATCH -next 06/15] drm/vmwgfx: Perform topology validation during atomic modeset Thomas Hellstrom
2018-07-03 19:14 ` [PATCH -next 07/15] drm/vmwgfx: Use modeset display memory validation for layout ioctl Thomas Hellstrom
2018-07-03 19:14 ` [PATCH -next 08/15] drm/vmwgfx: Perform memory validations only when need full modeset Thomas Hellstrom
2018-07-03 19:14 ` [PATCH -next 09/15] drm/vmwgfx: Remove primary memory validation against mode while creating fb Thomas Hellstrom
2018-07-03 19:14 ` [PATCH -next 10/15] drm/vmwgfx: Use a mutex to protect gui positioning in vmw_display_unit Thomas Hellstrom
2018-07-03 19:14 ` [PATCH -next 11/15] drm/vmwgfx: Add gui_x/y to vmw_connector_state Thomas Hellstrom
2018-07-03 19:14 ` [PATCH -next 12/15] drm/vmwgfx: Improve on host message error messages Thomas Hellstrom
2018-07-03 19:14 ` [PATCH -next 13/15] drm/vmwgfx: Reorganize the fence wait loop Thomas Hellstrom
2018-07-03 19:14 ` [PATCH -next 14/15] drm/vmwgfx: Fix host message module function declarations Thomas Hellstrom
2018-07-03 19:15 ` [PATCH -next 15/15] drm/vmwgfx: Remove an obsolete __le32 conversion Thomas Hellstrom

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180703191500.2374-3-thellstrom@vmware.com \
    --to=thellstrom@vmware.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=linux-graphics-maintainer@vmware.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.