All of lore.kernel.org
 help / color / mirror / Atom feed
From: Thomas Zimmermann <tzimmermann@suse.de>
To: airlied@redhat.com, sean@poorly.run, daniel@ffwll.ch,
	noralf@tronnes.org, sam@ravnborg.org, emil.velikov@collabora.com
Cc: Thomas Zimmermann <tzimmermann@suse.de>, dri-devel@lists.freedesktop.org
Subject: [PATCH 3/3] drm/udl: Switch to SHMEM
Date: Mon, 28 Oct 2019 09:45:49 +0100	[thread overview]
Message-ID: <20191028084549.30243-4-tzimmermann@suse.de> (raw)
In-Reply-To: <20191028084549.30243-1-tzimmermann@suse.de>

Udl's GEM code and the generic SHMEM are almost identical. Replace
the former with SHMEM. The dmabuf support in udl is being removed
in favor of the generic GEM PRIME functions.

The main difference is in the caching flags for mmap pages. By
default, SHMEM always sets (uncached) write combining. In udl's
memory management code, only imported buffers use write combining.
Memory pages of locally created buffer objects are mmap'ed with
caching enabled. To keep the optimization, udl provides its own
mmap function for GEM objects where it fixes up the mapping flags.

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
---
 drivers/gpu/drm/udl/Kconfig      |   1 +
 drivers/gpu/drm/udl/Makefile     |   2 +-
 drivers/gpu/drm/udl/udl_dmabuf.c | 254 -------------------------------
 drivers/gpu/drm/udl/udl_drv.c    |  29 +---
 drivers/gpu/drm/udl/udl_drv.h    |  30 +---
 drivers/gpu/drm/udl/udl_fb.c     |  65 ++++----
 drivers/gpu/drm/udl/udl_gem.c    | 237 +++++-----------------------
 7 files changed, 84 insertions(+), 534 deletions(-)
 delete mode 100644 drivers/gpu/drm/udl/udl_dmabuf.c

diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index b4d179b87f01..145b2a95ce58 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -5,6 +5,7 @@ config DRM_UDL
 	depends on USB_SUPPORT
 	depends on USB_ARCH_HAS_HCD
 	select USB
+	select DRM_GEM_SHMEM_HELPER
 	select DRM_KMS_HELPER
 	help
 	  This is a KMS driver for the USB displaylink video adapters.
diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
index e5bb6f757e11..9c42820ae33d 100644
--- a/drivers/gpu/drm/udl/Makefile
+++ b/drivers/gpu/drm/udl/Makefile
@@ -1,4 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o udl_dmabuf.o
+udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o
 
 obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
deleted file mode 100644
index b1c1ee64de59..000000000000
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ /dev/null
@@ -1,254 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * udl_dmabuf.c
- *
- * Copyright (c) 2014 The Chromium OS Authors
- */
-
-#include <linux/shmem_fs.h>
-#include <linux/dma-buf.h>
-
-#include <drm/drm_prime.h>
-
-#include "udl_drv.h"
-
-struct udl_drm_dmabuf_attachment {
-	struct sg_table sgt;
-	enum dma_data_direction dir;
-	bool is_mapped;
-};
-
-static int udl_attach_dma_buf(struct dma_buf *dmabuf,
-			      struct dma_buf_attachment *attach)
-{
-	struct udl_drm_dmabuf_attachment *udl_attach;
-
-	DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
-			attach->dmabuf->size);
-
-	udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
-	if (!udl_attach)
-		return -ENOMEM;
-
-	udl_attach->dir = DMA_NONE;
-	attach->priv = udl_attach;
-
-	return 0;
-}
-
-static void udl_detach_dma_buf(struct dma_buf *dmabuf,
-			       struct dma_buf_attachment *attach)
-{
-	struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
-	struct sg_table *sgt;
-
-	if (!udl_attach)
-		return;
-
-	DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
-			attach->dmabuf->size);
-
-	sgt = &udl_attach->sgt;
-
-	if (udl_attach->dir != DMA_NONE)
-		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
-				udl_attach->dir);
-
-	sg_free_table(sgt);
-	kfree(udl_attach);
-	attach->priv = NULL;
-}
-
-static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
-					enum dma_data_direction dir)
-{
-	struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
-	struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
-	struct drm_device *dev = obj->base.dev;
-	struct udl_device *udl = dev->dev_private;
-	struct scatterlist *rd, *wr;
-	struct sg_table *sgt = NULL;
-	unsigned int i;
-	int page_count;
-	int nents, ret;
-
-	DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
-			attach->dmabuf->size, dir);
-
-	/* just return current sgt if already requested. */
-	if (udl_attach->dir == dir && udl_attach->is_mapped)
-		return &udl_attach->sgt;
-
-	if (!obj->pages) {
-		ret = udl_gem_get_pages(obj);
-		if (ret) {
-			DRM_ERROR("failed to map pages.\n");
-			return ERR_PTR(ret);
-		}
-	}
-
-	page_count = obj->base.size / PAGE_SIZE;
-	obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
-	if (IS_ERR(obj->sg)) {
-		DRM_ERROR("failed to allocate sgt.\n");
-		return ERR_CAST(obj->sg);
-	}
-
-	sgt = &udl_attach->sgt;
-
-	ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
-	if (ret) {
-		DRM_ERROR("failed to alloc sgt.\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	mutex_lock(&udl->gem_lock);
-
-	rd = obj->sg->sgl;
-	wr = sgt->sgl;
-	for (i = 0; i < sgt->orig_nents; ++i) {
-		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
-		rd = sg_next(rd);
-		wr = sg_next(wr);
-	}
-
-	if (dir != DMA_NONE) {
-		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
-		if (!nents) {
-			DRM_ERROR("failed to map sgl with iommu.\n");
-			sg_free_table(sgt);
-			sgt = ERR_PTR(-EIO);
-			goto err_unlock;
-		}
-	}
-
-	udl_attach->is_mapped = true;
-	udl_attach->dir = dir;
-	attach->priv = udl_attach;
-
-err_unlock:
-	mutex_unlock(&udl->gem_lock);
-	return sgt;
-}
-
-static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
-			      struct sg_table *sgt,
-			      enum dma_data_direction dir)
-{
-	/* Nothing to do. */
-	DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
-			attach->dmabuf->size, dir);
-}
-
-static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
-	/* TODO */
-
-	return NULL;
-}
-
-static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
-			      unsigned long page_num, void *addr)
-{
-	/* TODO */
-}
-
-static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
-			   struct vm_area_struct *vma)
-{
-	/* TODO */
-
-	return -EINVAL;
-}
-
-static const struct dma_buf_ops udl_dmabuf_ops = {
-	.attach			= udl_attach_dma_buf,
-	.detach			= udl_detach_dma_buf,
-	.map_dma_buf		= udl_map_dma_buf,
-	.unmap_dma_buf		= udl_unmap_dma_buf,
-	.map			= udl_dmabuf_kmap,
-	.unmap			= udl_dmabuf_kunmap,
-	.mmap			= udl_dmabuf_mmap,
-	.release		= drm_gem_dmabuf_release,
-};
-
-struct dma_buf *udl_gem_prime_export(struct drm_gem_object *obj, int flags)
-{
-	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
-	exp_info.ops = &udl_dmabuf_ops;
-	exp_info.size = obj->size;
-	exp_info.flags = flags;
-	exp_info.priv = obj;
-
-	return drm_gem_dmabuf_export(obj->dev, &exp_info);
-}
-
-static int udl_prime_create(struct drm_device *dev,
-			    size_t size,
-			    struct sg_table *sg,
-			    struct udl_gem_object **obj_p)
-{
-	struct udl_gem_object *obj;
-	int npages;
-
-	npages = size / PAGE_SIZE;
-
-	*obj_p = NULL;
-	obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
-	if (!obj)
-		return -ENOMEM;
-
-	obj->sg = sg;
-	obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
-	if (obj->pages == NULL) {
-		DRM_ERROR("obj pages is NULL %d\n", npages);
-		return -ENOMEM;
-	}
-
-	drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
-
-	*obj_p = obj;
-	return 0;
-}
-
-struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
-				struct dma_buf *dma_buf)
-{
-	struct dma_buf_attachment *attach;
-	struct sg_table *sg;
-	struct udl_gem_object *uobj;
-	int ret;
-
-	/* need to attach */
-	get_device(dev->dev);
-	attach = dma_buf_attach(dma_buf, dev->dev);
-	if (IS_ERR(attach)) {
-		put_device(dev->dev);
-		return ERR_CAST(attach);
-	}
-
-	get_dma_buf(dma_buf);
-
-	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
-	if (IS_ERR(sg)) {
-		ret = PTR_ERR(sg);
-		goto fail_detach;
-	}
-
-	ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
-	if (ret)
-		goto fail_unmap;
-
-	uobj->base.import_attach = attach;
-
-	return &uobj->base;
-
-fail_unmap:
-	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
-	dma_buf_detach(dma_buf, attach);
-	dma_buf_put(dma_buf);
-	put_device(dev->dev);
-	return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 778a0b652f64..563cc5809e56 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -8,6 +8,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
+#include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_print.h>
@@ -32,23 +33,7 @@ static int udl_usb_resume(struct usb_interface *interface)
 	return 0;
 }
 
-static const struct vm_operations_struct udl_gem_vm_ops = {
-	.fault = udl_gem_fault,
-	.open = drm_gem_vm_open,
-	.close = drm_gem_vm_close,
-};
-
-static const struct file_operations udl_driver_fops = {
-	.owner = THIS_MODULE,
-	.open = drm_open,
-	.mmap = udl_drm_gem_mmap,
-	.poll = drm_poll,
-	.read = drm_read,
-	.unlocked_ioctl	= drm_ioctl,
-	.release = drm_release,
-	.compat_ioctl = drm_compat_ioctl,
-	.llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_FOPS(udl_driver_fops);
 
 static void udl_driver_release(struct drm_device *dev)
 {
@@ -63,18 +48,10 @@ static struct drm_driver driver = {
 	.release = udl_driver_release,
 
 	/* gem hooks */
-	.gem_free_object_unlocked = udl_gem_free_object,
 	.gem_create_object = udl_driver_gem_create_object,
-	.gem_vm_ops = &udl_gem_vm_ops,
 
-	.dumb_create = udl_dumb_create,
-	.dumb_map_offset = udl_gem_mmap,
 	.fops = &udl_driver_fops,
-
-	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-	.gem_prime_export = udl_gem_prime_export,
-	.gem_prime_import = udl_gem_prime_import,
+	DRM_GEM_SHMEM_DRIVER_OPS,
 
 	.name = DRIVER_NAME,
 	.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index fc312e791d18..987d99ae2dfa 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -73,18 +73,9 @@ struct udl_device {
 
 #define to_udl(x) container_of(x, struct udl_device, drm)
 
-struct udl_gem_object {
-	struct drm_gem_object base;
-	struct page **pages;
-	void *vmapping;
-	struct sg_table *sg;
-};
-
-#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
-
 struct udl_framebuffer {
 	struct drm_framebuffer base;
-	struct udl_gem_object *obj;
+	struct drm_gem_shmem_object *shmem;
 	bool active_16; /* active on the 16-bit channel */
 };
 
@@ -119,27 +110,8 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
 		     u32 byte_offset, u32 device_byte_offset, u32 byte_width,
 		     int *ident_ptr, int *sent_ptr);
 
-int udl_dumb_create(struct drm_file *file_priv,
-		    struct drm_device *dev,
-		    struct drm_mode_create_dumb *args);
-int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
-		 uint32_t handle, uint64_t *offset);
-
 struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev,
 						    size_t size);
-void udl_gem_free_object(struct drm_gem_object *gem_obj);
-struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
-					    size_t size);
-struct dma_buf *udl_gem_prime_export(struct drm_gem_object *obj, int flags);
-struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
-				struct dma_buf *dma_buf);
-
-int udl_gem_get_pages(struct udl_gem_object *obj);
-void udl_gem_put_pages(struct udl_gem_object *obj);
-int udl_gem_vmap(struct udl_gem_object *obj);
-void udl_gem_vunmap(struct udl_gem_object *obj);
-int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-vm_fault_t udl_gem_fault(struct vm_fault *vmf);
 
 int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
 		      int width, int height);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index ef3504d06343..2a163866ecd1 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -15,6 +15,7 @@
 #include <drm/drm_drv.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fourcc.h>
+#include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_modeset_helper.h>
 
 #include "udl_drv.h"
@@ -94,16 +95,14 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
 	if (!fb->active_16)
 		return 0;
 
-	if (!fb->obj->vmapping) {
-		ret = udl_gem_vmap(fb->obj);
-		if (ret == -ENOMEM) {
+	if (!fb->shmem->vaddr) {
+		void *vaddr;
+
+		vaddr = drm_gem_shmem_vmap(&fb->shmem->base);
+		if (IS_ERR(vaddr)) {
 			DRM_ERROR("failed to vmap fb\n");
 			return 0;
 		}
-		if (!fb->obj->vmapping) {
-			DRM_ERROR("failed to vmapping\n");
-			return 0;
-		}
 	}
 
 	aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
@@ -127,7 +126,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
 		const int byte_offset = line_offset + (x << log_bpp);
 		const int dev_byte_offset = (fb->base.width * i + x) << log_bpp;
 		if (udl_render_hline(dev, log_bpp, &urb,
-				     (char *) fb->obj->vmapping,
+				     (char *) fb->shmem->vaddr,
 				     &cmd, byte_offset, dev_byte_offset,
 				     width << log_bpp,
 				     &bytes_identical, &bytes_sent))
@@ -281,6 +280,7 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
 				      unsigned num_clips)
 {
 	struct udl_framebuffer *ufb = to_udl_fb(fb);
+	struct dma_buf_attachment *import_attach;
 	int i;
 	int ret = 0;
 
@@ -289,8 +289,10 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
 	if (!ufb->active_16)
 		goto unlock;
 
-	if (ufb->obj->base.import_attach) {
-		ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
+	import_attach = ufb->shmem->base.import_attach;
+
+	if (import_attach) {
+		ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
 					       DMA_FROM_DEVICE);
 		if (ret)
 			goto unlock;
@@ -304,8 +306,8 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
 			break;
 	}
 
-	if (ufb->obj->base.import_attach) {
-		ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
+	if (import_attach) {
+		ret = dma_buf_end_cpu_access(import_attach->dmabuf,
 					     DMA_FROM_DEVICE);
 	}
 
@@ -319,8 +321,8 @@ static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
 	struct udl_framebuffer *ufb = to_udl_fb(fb);
 
-	if (ufb->obj)
-		drm_gem_object_put_unlocked(&ufb->obj->base);
+	if (ufb->shmem)
+		drm_gem_object_put_unlocked(&ufb->shmem->base);
 
 	drm_framebuffer_cleanup(fb);
 	kfree(ufb);
@@ -336,11 +338,11 @@ static int
 udl_framebuffer_init(struct drm_device *dev,
 		     struct udl_framebuffer *ufb,
 		     const struct drm_mode_fb_cmd2 *mode_cmd,
-		     struct udl_gem_object *obj)
+		     struct drm_gem_shmem_object *shmem)
 {
 	int ret;
 
-	ufb->obj = obj;
+	ufb->shmem = shmem;
 	drm_helper_mode_fill_fb_struct(dev, &ufb->base, mode_cmd);
 	ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
 	return ret;
@@ -356,7 +358,8 @@ static int udlfb_create(struct drm_fb_helper *helper,
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd;
-	struct udl_gem_object *obj;
+	struct drm_gem_shmem_object *shmem;
+	void *vaddr;
 	uint32_t size;
 	int ret = 0;
 
@@ -373,12 +376,15 @@ static int udlfb_create(struct drm_fb_helper *helper,
 	size = mode_cmd.pitches[0] * mode_cmd.height;
 	size = ALIGN(size, PAGE_SIZE);
 
-	obj = udl_gem_alloc_object(dev, size);
-	if (!obj)
+	shmem = drm_gem_shmem_create(dev, size);
+	if (IS_ERR(shmem)) {
+		ret = PTR_ERR(shmem);
 		goto out;
+	}
 
-	ret = udl_gem_vmap(obj);
-	if (ret) {
+	vaddr = drm_gem_shmem_vmap(&shmem->base);
+	if (IS_ERR(vaddr)) {
+		ret = PTR_ERR(vaddr);
 		DRM_ERROR("failed to vmap fb\n");
 		goto out_gfree;
 	}
@@ -389,7 +395,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
 		goto out_gfree;
 	}
 
-	ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
+	ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, shmem);
 	if (ret)
 		goto out_gfree;
 
@@ -397,20 +403,20 @@ static int udlfb_create(struct drm_fb_helper *helper,
 
 	ufbdev->helper.fb = fb;
 
-	info->screen_base = ufbdev->ufb.obj->vmapping;
+	info->screen_base = vaddr;
 	info->fix.smem_len = size;
-	info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
+	info->fix.smem_start = (unsigned long)vaddr;
 
 	info->fbops = &udlfb_ops;
 	drm_fb_helper_fill_info(info, &ufbdev->helper, sizes);
 
 	DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
 		      fb->width, fb->height,
-		      ufbdev->ufb.obj->vmapping);
+		      ufbdev->ufb.shmem->vaddr);
 
 	return ret;
 out_gfree:
-	drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
+	drm_gem_object_put_unlocked(&ufbdev->ufb.shmem->base);
 out:
 	return ret;
 }
@@ -424,10 +430,10 @@ static void udl_fbdev_destroy(struct drm_device *dev,
 {
 	drm_fb_helper_unregister_fbi(&ufbdev->helper);
 	drm_fb_helper_fini(&ufbdev->helper);
-	if (ufbdev->ufb.obj) {
+	if (ufbdev->ufb.shmem) {
 		drm_framebuffer_unregister_private(&ufbdev->ufb.base);
 		drm_framebuffer_cleanup(&ufbdev->ufb.base);
-		drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
+		drm_gem_object_put_unlocked(&ufbdev->ufb.shmem->base);
 	}
 }
 
@@ -518,7 +524,8 @@ udl_fb_user_fb_create(struct drm_device *dev,
 	if (ufb == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj));
+	ret = udl_framebuffer_init(dev, ufb, mode_cmd,
+				   to_drm_gem_shmem_obj(obj));
 	if (ret) {
 		kfree(ufb);
 		return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 628749cc1143..ac82e06463bd 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -7,229 +7,76 @@
 #include <linux/vmalloc.h>
 
 #include <drm/drm_drv.h>
+#include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_mode.h>
 #include <drm/drm_prime.h>
 
 #include "udl_drv.h"
 
 /*
- * Helpers for struct drm_driver
+ * GEM object funcs
  */
 
-struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev,
-						    size_t size)
-{
-	struct udl_gem_object *obj;
-
-	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-	if (!obj)
-		return NULL;
-
-	return &obj->base;
-}
-
-struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
-					    size_t size)
-{
-	struct drm_gem_object *obj;
-
-	obj = dev->driver->gem_create_object(dev, size);
-	if (obj == NULL)
-		return NULL;
-
-	if (drm_gem_object_init(dev, obj, size) != 0) {
-		kfree(obj);
-		return NULL;
-	}
-
-	return to_udl_bo(obj);
-}
-
-static int
-udl_gem_create(struct drm_file *file,
-	       struct drm_device *dev,
-	       uint64_t size,
-	       uint32_t *handle_p)
-{
-	struct udl_gem_object *obj;
-	int ret;
-	u32 handle;
-
-	size = roundup(size, PAGE_SIZE);
-
-	obj = udl_gem_alloc_object(dev, size);
-	if (obj == NULL)
-		return -ENOMEM;
-
-	ret = drm_gem_handle_create(file, &obj->base, &handle);
-	if (ret) {
-		drm_gem_object_release(&obj->base);
-		kfree(obj);
-		return ret;
-	}
-
-	drm_gem_object_put_unlocked(&obj->base);
-	*handle_p = handle;
-	return 0;
-}
-
-int udl_dumb_create(struct drm_file *file,
-		    struct drm_device *dev,
-		    struct drm_mode_create_dumb *args)
+static void udl_gem_object_free_object(struct drm_gem_object *obj)
 {
-	args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
-	args->size = args->pitch * args->height;
-	return udl_gem_create(file, dev,
-			      args->size, &args->handle);
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	/* Fbdev emulation vmaps the buffer. Unmap it here for consistency
+	 * with the original udl GEM code.
+	 *
+	 * TODO: Switch to generic fbdev emulation and release the
+	 *       GEM object with drm_gem_shmem_free_object().
+	 */
+	if (shmem->vaddr)
+		drm_gem_shmem_vunmap(obj, shmem->vaddr);
+
+	drm_gem_shmem_free_object(obj);
 }
 
-int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int udl_gem_object_mmap(struct drm_gem_object *obj,
+			       struct vm_area_struct *vma)
 {
-	struct drm_gem_object *obj;
 	int ret;
 
-	ret = drm_gem_mmap(filp, vma);
+	ret = drm_gem_shmem_mmap(obj, vma);
 	if (ret)
 		return ret;
 
-	obj = vma->vm_private_data;
-
-	vma->vm_flags &= ~VM_PFNMAP;
-	vma->vm_flags |= VM_MIXEDMAP;
-
 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 	if (obj->import_attach)
 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
 
-	return ret;
-}
-
-vm_fault_t udl_gem_fault(struct vm_fault *vmf)
-{
-	struct vm_area_struct *vma = vmf->vma;
-	struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
-	struct page *page;
-	unsigned int page_offset;
-
-	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
-
-	if (!obj->pages)
-		return VM_FAULT_SIGBUS;
-
-	page = obj->pages[page_offset];
-	return vmf_insert_page(vma, vmf->address, page);
-}
-
-int udl_gem_get_pages(struct udl_gem_object *obj)
-{
-	struct page **pages;
-
-	if (obj->pages)
-		return 0;
-
-	pages = drm_gem_get_pages(&obj->base);
-	if (IS_ERR(pages))
-		return PTR_ERR(pages);
-
-	obj->pages = pages;
-
-	return 0;
-}
-
-void udl_gem_put_pages(struct udl_gem_object *obj)
-{
-	if (obj->base.import_attach) {
-		kvfree(obj->pages);
-		obj->pages = NULL;
-		return;
-	}
-
-	drm_gem_put_pages(&obj->base, obj->pages, false, false);
-	obj->pages = NULL;
-}
-
-int udl_gem_vmap(struct udl_gem_object *obj)
-{
-	int page_count = obj->base.size / PAGE_SIZE;
-	int ret;
-
-	if (obj->base.import_attach) {
-		obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
-		if (!obj->vmapping)
-			return -ENOMEM;
-		return 0;
-	}
-
-	ret = udl_gem_get_pages(obj);
-	if (ret)
-		return ret;
-
-	obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
-	if (!obj->vmapping)
-		return -ENOMEM;
 	return 0;
 }
 
-void udl_gem_vunmap(struct udl_gem_object *obj)
-{
-	if (obj->base.import_attach) {
-		dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
-		return;
-	}
-
-	vunmap(obj->vmapping);
-
-	udl_gem_put_pages(obj);
-}
-
-void udl_gem_free_object(struct drm_gem_object *gem_obj)
-{
-	struct udl_gem_object *obj = to_udl_bo(gem_obj);
-
-	if (obj->vmapping)
-		udl_gem_vunmap(obj);
-
-	if (gem_obj->import_attach) {
-		drm_prime_gem_destroy(gem_obj, obj->sg);
-		put_device(gem_obj->dev->dev);
-	}
+static const struct drm_gem_object_funcs udl_gem_object_funcs = {
+	.free = udl_gem_object_free_object,
+	.print_info = drm_gem_shmem_print_info,
+	.pin = drm_gem_shmem_pin,
+	.unpin = drm_gem_shmem_unpin,
+	.get_sg_table = drm_gem_shmem_get_sg_table,
+	.vmap = drm_gem_shmem_vmap,
+	.vunmap = drm_gem_shmem_vunmap,
+	.mmap = udl_gem_object_mmap,
+};
 
-	if (obj->pages)
-		udl_gem_put_pages(obj);
-
-	drm_gem_free_mmap_offset(gem_obj);
-}
+/*
+ * Helpers for struct drm_driver
+ */
 
-/* the dumb interface doesn't work with the GEM straight MMAP
-   interface, it expects to do MMAP on the drm fd, like normal */
-int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
-		 uint32_t handle, uint64_t *offset)
+struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev,
+						    size_t size)
 {
-	struct udl_gem_object *gobj;
+	struct drm_gem_shmem_object *shmem;
 	struct drm_gem_object *obj;
-	struct udl_device *udl = to_udl(dev);
-	int ret = 0;
-
-	mutex_lock(&udl->gem_lock);
-	obj = drm_gem_object_lookup(file, handle);
-	if (obj == NULL) {
-		ret = -ENOENT;
-		goto unlock;
-	}
-	gobj = to_udl_bo(obj);
-
-	ret = udl_gem_get_pages(gobj);
-	if (ret)
-		goto out;
-	ret = drm_gem_create_mmap_offset(obj);
-	if (ret)
-		goto out;
 
-	*offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
+	shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+	if (!shmem)
+		return NULL;
+
+	obj = &shmem->base;
+	obj->funcs = &udl_gem_object_funcs;
 
-out:
-	drm_gem_object_put_unlocked(&gobj->base);
-unlock:
-	mutex_unlock(&udl->gem_lock);
-	return ret;
+	return obj;
 }
-- 
2.23.0

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

  parent reply	other threads:[~2019-10-28  8:45 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-28  8:45 [PATCH 0/3] drm/udl: Convert to SHMEM Thomas Zimmermann
2019-10-28  8:45 ` [PATCH 1/3] drm/udl: Remove flags field from struct udl_gem_object Thomas Zimmermann
2019-11-05 11:05   ` Gerd Hoffmann
2019-10-28  8:45 ` [PATCH 2/3] drm/udl: Allocate GEM object via struct drm_driver.gem_create_object Thomas Zimmermann
2019-11-05 11:05   ` Gerd Hoffmann
2019-10-28  8:45 ` Thomas Zimmermann [this message]
2019-10-30  0:15   ` [PATCH 3/3] drm/udl: Switch to SHMEM kbuild test robot
2019-10-30  0:15     ` kbuild test robot
2019-10-30  0:15     ` kbuild test robot
2019-10-31 12:53   ` kbuild test robot
2019-10-31 12:53     ` kbuild test robot
2019-10-31 12:53     ` kbuild test robot
2019-11-05 11:05   ` Gerd Hoffmann
2019-11-06  8:22     ` Thomas Zimmermann
2019-10-28  8:48 ` [PATCH 0/3] drm/udl: Convert " Thomas Zimmermann
2019-10-28  8:48   ` Thomas Zimmermann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191028084549.30243-4-tzimmermann@suse.de \
    --to=tzimmermann@suse.de \
    --cc=airlied@redhat.com \
    --cc=daniel@ffwll.ch \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=emil.velikov@collabora.com \
    --cc=noralf@tronnes.org \
    --cc=sam@ravnborg.org \
    --cc=sean@poorly.run \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.