All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
@ 2020-10-09 10:21 ` Chris Wilson
  0 siblings, 0 replies; 18+ messages in thread
From: Chris Wilson @ 2020-10-09 10:21 UTC (permalink / raw)
  To: dri-devel; +Cc: intel-gfx, Chris Wilson

vgem is a minimalistic driver that provides shmemfs objects to
userspace that may then be used as an in-memory surface and transported
across dma-buf to other drivers. Since it's introduction,
drm_gem_shmem_helper now provides the same shmemfs facilities and so we
can trim vgem to wrap the helper.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/Kconfig         |   1 +
 drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
 drivers/gpu/drm/vgem/vgem_drv.h |  11 --
 3 files changed, 13 insertions(+), 280 deletions(-)

diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 147d61b9674e..db2ff76638cd 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -278,6 +278,7 @@ source "drivers/gpu/drm/i915/Kconfig"
 config DRM_VGEM
 	tristate "Virtual GEM provider"
 	depends on DRM
+	select DRM_GEM_SHMEM_HELPER
 	help
 	  Choose this option to get a virtual graphics memory manager,
 	  as used by Mesa's software renderer for enhanced performance.
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index fa54a6d1403d..73cb17c4f7a8 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -38,6 +38,7 @@
 
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
+#include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_managed.h>
 #include <drm/drm_prime.h>
@@ -50,87 +51,11 @@
 #define DRIVER_MAJOR	1
 #define DRIVER_MINOR	0
 
-static const struct drm_gem_object_funcs vgem_gem_object_funcs;
-
 static struct vgem_device {
 	struct drm_device drm;
 	struct platform_device *platform;
 } *vgem_device;
 
-static void vgem_gem_free_object(struct drm_gem_object *obj)
-{
-	struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
-
-	kvfree(vgem_obj->pages);
-	mutex_destroy(&vgem_obj->pages_lock);
-
-	if (obj->import_attach)
-		drm_prime_gem_destroy(obj, vgem_obj->table);
-
-	drm_gem_object_release(obj);
-	kfree(vgem_obj);
-}
-
-static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
-{
-	struct vm_area_struct *vma = vmf->vma;
-	struct drm_vgem_gem_object *obj = vma->vm_private_data;
-	/* We don't use vmf->pgoff since that has the fake offset */
-	unsigned long vaddr = vmf->address;
-	vm_fault_t ret = VM_FAULT_SIGBUS;
-	loff_t num_pages;
-	pgoff_t page_offset;
-	page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
-
-	num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
-
-	if (page_offset >= num_pages)
-		return VM_FAULT_SIGBUS;
-
-	mutex_lock(&obj->pages_lock);
-	if (obj->pages) {
-		get_page(obj->pages[page_offset]);
-		vmf->page = obj->pages[page_offset];
-		ret = 0;
-	}
-	mutex_unlock(&obj->pages_lock);
-	if (ret) {
-		struct page *page;
-
-		page = shmem_read_mapping_page(
-					file_inode(obj->base.filp)->i_mapping,
-					page_offset);
-		if (!IS_ERR(page)) {
-			vmf->page = page;
-			ret = 0;
-		} else switch (PTR_ERR(page)) {
-			case -ENOSPC:
-			case -ENOMEM:
-				ret = VM_FAULT_OOM;
-				break;
-			case -EBUSY:
-				ret = VM_FAULT_RETRY;
-				break;
-			case -EFAULT:
-			case -EINVAL:
-				ret = VM_FAULT_SIGBUS;
-				break;
-			default:
-				WARN_ON(PTR_ERR(page));
-				ret = VM_FAULT_SIGBUS;
-				break;
-		}
-
-	}
-	return ret;
-}
-
-static const struct vm_operations_struct vgem_gem_vm_ops = {
-	.fault = vgem_gem_fault,
-	.open = drm_gem_vm_open,
-	.close = drm_gem_vm_close,
-};
-
 static int vgem_open(struct drm_device *dev, struct drm_file *file)
 {
 	struct vgem_file *vfile;
@@ -159,41 +84,25 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
 	kfree(vfile);
 }
 
-static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
+static struct drm_gem_shmem_object *__vgem_gem_create(struct drm_device *dev,
 						unsigned long size)
 {
-	struct drm_vgem_gem_object *obj;
-	int ret;
-
-	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-	if (!obj)
-		return ERR_PTR(-ENOMEM);
+	struct drm_gem_shmem_object *obj;
 
-	obj->base.funcs = &vgem_gem_object_funcs;
-
-	ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
-	if (ret) {
-		kfree(obj);
-		return ERR_PTR(ret);
-	}
-
-	mutex_init(&obj->pages_lock);
+	obj = drm_gem_shmem_create(dev, round_up(size, PAGE_SIZE));
+	if (IS_ERR(obj))
+		return obj;
 
+	obj->map_cached = true;
 	return obj;
 }
 
-static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
-{
-	drm_gem_object_release(&obj->base);
-	kfree(obj);
-}
-
 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
 					      struct drm_file *file,
 					      unsigned int *handle,
 					      unsigned long size)
 {
-	struct drm_vgem_gem_object *obj;
+	struct drm_gem_shmem_object *obj;
 	int ret;
 
 	obj = __vgem_gem_create(dev, size);
@@ -239,96 +148,9 @@ static struct drm_ioctl_desc vgem_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
 };
 
-static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
-	unsigned long flags = vma->vm_flags;
-	int ret;
-
-	ret = drm_gem_mmap(filp, vma);
-	if (ret)
-		return ret;
-
-	/* Keep the WC mmaping set by drm_gem_mmap() but our pages
-	 * are ordinary and not special.
-	 */
-	vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
-	return 0;
-}
-
-static const struct file_operations vgem_driver_fops = {
-	.owner		= THIS_MODULE,
-	.open		= drm_open,
-	.mmap		= vgem_mmap,
-	.poll		= drm_poll,
-	.read		= drm_read,
-	.unlocked_ioctl = drm_ioctl,
-	.compat_ioctl	= drm_compat_ioctl,
-	.release	= drm_release,
-};
-
-static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
-{
-	mutex_lock(&bo->pages_lock);
-	if (bo->pages_pin_count++ == 0) {
-		struct page **pages;
-
-		pages = drm_gem_get_pages(&bo->base);
-		if (IS_ERR(pages)) {
-			bo->pages_pin_count--;
-			mutex_unlock(&bo->pages_lock);
-			return pages;
-		}
-
-		bo->pages = pages;
-	}
-	mutex_unlock(&bo->pages_lock);
-
-	return bo->pages;
-}
-
-static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
-{
-	mutex_lock(&bo->pages_lock);
-	if (--bo->pages_pin_count == 0) {
-		drm_gem_put_pages(&bo->base, bo->pages, true, true);
-		bo->pages = NULL;
-	}
-	mutex_unlock(&bo->pages_lock);
-}
-
-static int vgem_prime_pin(struct drm_gem_object *obj)
-{
-	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-	long n_pages = obj->size >> PAGE_SHIFT;
-	struct page **pages;
+DEFINE_DRM_GEM_FOPS(vgem_driver_fops);
 
-	pages = vgem_pin_pages(bo);
-	if (IS_ERR(pages))
-		return PTR_ERR(pages);
-
-	/* Flush the object from the CPU cache so that importers can rely
-	 * on coherent indirect access via the exported dma-address.
-	 */
-	drm_clflush_pages(pages, n_pages);
-
-	return 0;
-}
-
-static void vgem_prime_unpin(struct drm_gem_object *obj)
-{
-	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-
-	vgem_unpin_pages(bo);
-}
-
-static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
-{
-	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-
-	return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
-}
-
-static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
+static struct drm_gem_object *vgem_prime_import(struct drm_device *dev,
 						struct dma_buf *dma_buf)
 {
 	struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
@@ -336,85 +158,6 @@ static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
 	return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
 }
 
-static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
-			struct dma_buf_attachment *attach, struct sg_table *sg)
-{
-	struct drm_vgem_gem_object *obj;
-	int npages;
-
-	obj = __vgem_gem_create(dev, attach->dmabuf->size);
-	if (IS_ERR(obj))
-		return ERR_CAST(obj);
-
-	npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
-
-	obj->table = sg;
-	obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
-	if (!obj->pages) {
-		__vgem_gem_destroy(obj);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	obj->pages_pin_count++; /* perma-pinned */
-	drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
-					npages);
-	return &obj->base;
-}
-
-static void *vgem_prime_vmap(struct drm_gem_object *obj)
-{
-	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-	long n_pages = obj->size >> PAGE_SHIFT;
-	struct page **pages;
-
-	pages = vgem_pin_pages(bo);
-	if (IS_ERR(pages))
-		return NULL;
-
-	return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
-}
-
-static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
-{
-	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-
-	vunmap(vaddr);
-	vgem_unpin_pages(bo);
-}
-
-static int vgem_prime_mmap(struct drm_gem_object *obj,
-			   struct vm_area_struct *vma)
-{
-	int ret;
-
-	if (obj->size < vma->vm_end - vma->vm_start)
-		return -EINVAL;
-
-	if (!obj->filp)
-		return -ENODEV;
-
-	ret = call_mmap(obj->filp, vma);
-	if (ret)
-		return ret;
-
-	fput(vma->vm_file);
-	vma->vm_file = get_file(obj->filp);
-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-
-	return 0;
-}
-
-static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
-	.free = vgem_gem_free_object,
-	.pin = vgem_prime_pin,
-	.unpin = vgem_prime_unpin,
-	.get_sg_table = vgem_prime_get_sg_table,
-	.vmap = vgem_prime_vmap,
-	.vunmap = vgem_prime_vunmap,
-	.vm_ops = &vgem_gem_vm_ops,
-};
-
 static struct drm_driver vgem_driver = {
 	.driver_features		= DRIVER_GEM | DRIVER_RENDER,
 	.open				= vgem_open,
@@ -428,8 +171,8 @@ static struct drm_driver vgem_driver = {
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 	.gem_prime_import = vgem_prime_import,
-	.gem_prime_import_sg_table = vgem_prime_import_sg_table,
-	.gem_prime_mmap = vgem_prime_mmap,
+	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
+	.gem_prime_mmap = drm_gem_prime_mmap,
 
 	.name	= DRIVER_NAME,
 	.desc	= DRIVER_DESC,
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index 0ed300317f87..34cf63e6fb3d 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -39,17 +39,6 @@ struct vgem_file {
 	struct mutex fence_mutex;
 };
 
-#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
-struct drm_vgem_gem_object {
-	struct drm_gem_object base;
-
-	struct page **pages;
-	unsigned int pages_pin_count;
-	struct mutex pages_lock;
-
-	struct sg_table *table;
-};
-
 int vgem_fence_open(struct vgem_file *file);
 int vgem_fence_attach_ioctl(struct drm_device *dev,
 			    void *data,
-- 
2.20.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [Intel-gfx] [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
@ 2020-10-09 10:21 ` Chris Wilson
  0 siblings, 0 replies; 18+ messages in thread
From: Chris Wilson @ 2020-10-09 10:21 UTC (permalink / raw)
  To: dri-devel; +Cc: intel-gfx, Chris Wilson

vgem is a minimalistic driver that provides shmemfs objects to
userspace that may then be used as an in-memory surface and transported
across dma-buf to other drivers. Since it's introduction,
drm_gem_shmem_helper now provides the same shmemfs facilities and so we
can trim vgem to wrap the helper.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/Kconfig         |   1 +
 drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
 drivers/gpu/drm/vgem/vgem_drv.h |  11 --
 3 files changed, 13 insertions(+), 280 deletions(-)

diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 147d61b9674e..db2ff76638cd 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -278,6 +278,7 @@ source "drivers/gpu/drm/i915/Kconfig"
 config DRM_VGEM
 	tristate "Virtual GEM provider"
 	depends on DRM
+	select DRM_GEM_SHMEM_HELPER
 	help
 	  Choose this option to get a virtual graphics memory manager,
 	  as used by Mesa's software renderer for enhanced performance.
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index fa54a6d1403d..73cb17c4f7a8 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -38,6 +38,7 @@
 
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
+#include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_managed.h>
 #include <drm/drm_prime.h>
@@ -50,87 +51,11 @@
 #define DRIVER_MAJOR	1
 #define DRIVER_MINOR	0
 
-static const struct drm_gem_object_funcs vgem_gem_object_funcs;
-
 static struct vgem_device {
 	struct drm_device drm;
 	struct platform_device *platform;
 } *vgem_device;
 
-static void vgem_gem_free_object(struct drm_gem_object *obj)
-{
-	struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
-
-	kvfree(vgem_obj->pages);
-	mutex_destroy(&vgem_obj->pages_lock);
-
-	if (obj->import_attach)
-		drm_prime_gem_destroy(obj, vgem_obj->table);
-
-	drm_gem_object_release(obj);
-	kfree(vgem_obj);
-}
-
-static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
-{
-	struct vm_area_struct *vma = vmf->vma;
-	struct drm_vgem_gem_object *obj = vma->vm_private_data;
-	/* We don't use vmf->pgoff since that has the fake offset */
-	unsigned long vaddr = vmf->address;
-	vm_fault_t ret = VM_FAULT_SIGBUS;
-	loff_t num_pages;
-	pgoff_t page_offset;
-	page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
-
-	num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
-
-	if (page_offset >= num_pages)
-		return VM_FAULT_SIGBUS;
-
-	mutex_lock(&obj->pages_lock);
-	if (obj->pages) {
-		get_page(obj->pages[page_offset]);
-		vmf->page = obj->pages[page_offset];
-		ret = 0;
-	}
-	mutex_unlock(&obj->pages_lock);
-	if (ret) {
-		struct page *page;
-
-		page = shmem_read_mapping_page(
-					file_inode(obj->base.filp)->i_mapping,
-					page_offset);
-		if (!IS_ERR(page)) {
-			vmf->page = page;
-			ret = 0;
-		} else switch (PTR_ERR(page)) {
-			case -ENOSPC:
-			case -ENOMEM:
-				ret = VM_FAULT_OOM;
-				break;
-			case -EBUSY:
-				ret = VM_FAULT_RETRY;
-				break;
-			case -EFAULT:
-			case -EINVAL:
-				ret = VM_FAULT_SIGBUS;
-				break;
-			default:
-				WARN_ON(PTR_ERR(page));
-				ret = VM_FAULT_SIGBUS;
-				break;
-		}
-
-	}
-	return ret;
-}
-
-static const struct vm_operations_struct vgem_gem_vm_ops = {
-	.fault = vgem_gem_fault,
-	.open = drm_gem_vm_open,
-	.close = drm_gem_vm_close,
-};
-
 static int vgem_open(struct drm_device *dev, struct drm_file *file)
 {
 	struct vgem_file *vfile;
@@ -159,41 +84,25 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
 	kfree(vfile);
 }
 
-static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
+static struct drm_gem_shmem_object *__vgem_gem_create(struct drm_device *dev,
 						unsigned long size)
 {
-	struct drm_vgem_gem_object *obj;
-	int ret;
-
-	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-	if (!obj)
-		return ERR_PTR(-ENOMEM);
+	struct drm_gem_shmem_object *obj;
 
-	obj->base.funcs = &vgem_gem_object_funcs;
-
-	ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
-	if (ret) {
-		kfree(obj);
-		return ERR_PTR(ret);
-	}
-
-	mutex_init(&obj->pages_lock);
+	obj = drm_gem_shmem_create(dev, round_up(size, PAGE_SIZE));
+	if (IS_ERR(obj))
+		return obj;
 
+	obj->map_cached = true;
 	return obj;
 }
 
-static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
-{
-	drm_gem_object_release(&obj->base);
-	kfree(obj);
-}
-
 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
 					      struct drm_file *file,
 					      unsigned int *handle,
 					      unsigned long size)
 {
-	struct drm_vgem_gem_object *obj;
+	struct drm_gem_shmem_object *obj;
 	int ret;
 
 	obj = __vgem_gem_create(dev, size);
@@ -239,96 +148,9 @@ static struct drm_ioctl_desc vgem_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
 };
 
-static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
-	unsigned long flags = vma->vm_flags;
-	int ret;
-
-	ret = drm_gem_mmap(filp, vma);
-	if (ret)
-		return ret;
-
-	/* Keep the WC mmaping set by drm_gem_mmap() but our pages
-	 * are ordinary and not special.
-	 */
-	vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
-	return 0;
-}
-
-static const struct file_operations vgem_driver_fops = {
-	.owner		= THIS_MODULE,
-	.open		= drm_open,
-	.mmap		= vgem_mmap,
-	.poll		= drm_poll,
-	.read		= drm_read,
-	.unlocked_ioctl = drm_ioctl,
-	.compat_ioctl	= drm_compat_ioctl,
-	.release	= drm_release,
-};
-
-static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
-{
-	mutex_lock(&bo->pages_lock);
-	if (bo->pages_pin_count++ == 0) {
-		struct page **pages;
-
-		pages = drm_gem_get_pages(&bo->base);
-		if (IS_ERR(pages)) {
-			bo->pages_pin_count--;
-			mutex_unlock(&bo->pages_lock);
-			return pages;
-		}
-
-		bo->pages = pages;
-	}
-	mutex_unlock(&bo->pages_lock);
-
-	return bo->pages;
-}
-
-static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
-{
-	mutex_lock(&bo->pages_lock);
-	if (--bo->pages_pin_count == 0) {
-		drm_gem_put_pages(&bo->base, bo->pages, true, true);
-		bo->pages = NULL;
-	}
-	mutex_unlock(&bo->pages_lock);
-}
-
-static int vgem_prime_pin(struct drm_gem_object *obj)
-{
-	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-	long n_pages = obj->size >> PAGE_SHIFT;
-	struct page **pages;
+DEFINE_DRM_GEM_FOPS(vgem_driver_fops);
 
-	pages = vgem_pin_pages(bo);
-	if (IS_ERR(pages))
-		return PTR_ERR(pages);
-
-	/* Flush the object from the CPU cache so that importers can rely
-	 * on coherent indirect access via the exported dma-address.
-	 */
-	drm_clflush_pages(pages, n_pages);
-
-	return 0;
-}
-
-static void vgem_prime_unpin(struct drm_gem_object *obj)
-{
-	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-
-	vgem_unpin_pages(bo);
-}
-
-static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
-{
-	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-
-	return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
-}
-
-static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
+static struct drm_gem_object *vgem_prime_import(struct drm_device *dev,
 						struct dma_buf *dma_buf)
 {
 	struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
@@ -336,85 +158,6 @@ static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
 	return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
 }
 
-static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
-			struct dma_buf_attachment *attach, struct sg_table *sg)
-{
-	struct drm_vgem_gem_object *obj;
-	int npages;
-
-	obj = __vgem_gem_create(dev, attach->dmabuf->size);
-	if (IS_ERR(obj))
-		return ERR_CAST(obj);
-
-	npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
-
-	obj->table = sg;
-	obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
-	if (!obj->pages) {
-		__vgem_gem_destroy(obj);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	obj->pages_pin_count++; /* perma-pinned */
-	drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
-					npages);
-	return &obj->base;
-}
-
-static void *vgem_prime_vmap(struct drm_gem_object *obj)
-{
-	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-	long n_pages = obj->size >> PAGE_SHIFT;
-	struct page **pages;
-
-	pages = vgem_pin_pages(bo);
-	if (IS_ERR(pages))
-		return NULL;
-
-	return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
-}
-
-static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
-{
-	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-
-	vunmap(vaddr);
-	vgem_unpin_pages(bo);
-}
-
-static int vgem_prime_mmap(struct drm_gem_object *obj,
-			   struct vm_area_struct *vma)
-{
-	int ret;
-
-	if (obj->size < vma->vm_end - vma->vm_start)
-		return -EINVAL;
-
-	if (!obj->filp)
-		return -ENODEV;
-
-	ret = call_mmap(obj->filp, vma);
-	if (ret)
-		return ret;
-
-	fput(vma->vm_file);
-	vma->vm_file = get_file(obj->filp);
-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-
-	return 0;
-}
-
-static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
-	.free = vgem_gem_free_object,
-	.pin = vgem_prime_pin,
-	.unpin = vgem_prime_unpin,
-	.get_sg_table = vgem_prime_get_sg_table,
-	.vmap = vgem_prime_vmap,
-	.vunmap = vgem_prime_vunmap,
-	.vm_ops = &vgem_gem_vm_ops,
-};
-
 static struct drm_driver vgem_driver = {
 	.driver_features		= DRIVER_GEM | DRIVER_RENDER,
 	.open				= vgem_open,
@@ -428,8 +171,8 @@ static struct drm_driver vgem_driver = {
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 	.gem_prime_import = vgem_prime_import,
-	.gem_prime_import_sg_table = vgem_prime_import_sg_table,
-	.gem_prime_mmap = vgem_prime_mmap,
+	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
+	.gem_prime_mmap = drm_gem_prime_mmap,
 
 	.name	= DRIVER_NAME,
 	.desc	= DRIVER_DESC,
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index 0ed300317f87..34cf63e6fb3d 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -39,17 +39,6 @@ struct vgem_file {
 	struct mutex fence_mutex;
 };
 
-#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
-struct drm_vgem_gem_object {
-	struct drm_gem_object base;
-
-	struct page **pages;
-	unsigned int pages_pin_count;
-	struct mutex pages_lock;
-
-	struct sg_table *table;
-};
-
 int vgem_fence_open(struct vgem_file *file);
 int vgem_fence_attach_ioctl(struct drm_device *dev,
 			    void *data,
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
  2020-10-09 10:21 ` [Intel-gfx] " Chris Wilson
  (?)
@ 2020-10-09 11:40 ` Patchwork
  -1 siblings, 0 replies; 18+ messages in thread
From: Patchwork @ 2020-10-09 11:40 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
URL   : https://patchwork.freedesktop.org/series/82509/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
bb53064b196f drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
-:133: CHECK:PARENTHESIS_ALIGNMENT: Alignment should match open parenthesis
#133: FILE: drivers/gpu/drm/vgem/vgem_drv.c:88:
+static struct drm_gem_shmem_object *__vgem_gem_create(struct drm_device *dev,
 						unsigned long size)

total: 0 errors, 0 warnings, 1 checks, 359 lines checked


_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [Intel-gfx] ✗ Fi.CI.BAT: failure for drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
  2020-10-09 10:21 ` [Intel-gfx] " Chris Wilson
  (?)
  (?)
@ 2020-10-09 12:04 ` Patchwork
  -1 siblings, 0 replies; 18+ messages in thread
From: Patchwork @ 2020-10-09 12:04 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 17263 bytes --]

== Series Details ==

Series: drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
URL   : https://patchwork.freedesktop.org/series/82509/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_9119 -> Patchwork_18666
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_18666 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_18666, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/index.html

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_18666:

### IGT changes ###

#### Possible regressions ####

  * igt@prime_vgem@basic-fence-mmap:
    - fi-byt-j1900:       [PASS][1] -> [FAIL][2] +4 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-byt-j1900/igt@prime_vgem@basic-fence-mmap.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-byt-j1900/igt@prime_vgem@basic-fence-mmap.html
    - fi-blb-e6850:       [PASS][3] -> [INCOMPLETE][4] +1 similar issue
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-blb-e6850/igt@prime_vgem@basic-fence-mmap.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-blb-e6850/igt@prime_vgem@basic-fence-mmap.html

  * igt@prime_vgem@basic-fence-read:
    - fi-bsw-kefka:       [PASS][5] -> [INCOMPLETE][6] +1 similar issue
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-bsw-kefka/igt@prime_vgem@basic-fence-read.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-bsw-kefka/igt@prime_vgem@basic-fence-read.html
    - fi-ilk-650:         [PASS][7] -> [INCOMPLETE][8] +1 similar issue
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-ilk-650/igt@prime_vgem@basic-fence-read.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-ilk-650/igt@prime_vgem@basic-fence-read.html
    - fi-byt-j1900:       [PASS][9] -> [INCOMPLETE][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-byt-j1900/igt@prime_vgem@basic-fence-read.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-byt-j1900/igt@prime_vgem@basic-fence-read.html

  * igt@prime_vgem@basic-gtt:
    - fi-ilk-650:         [PASS][11] -> [FAIL][12] +3 similar issues
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-ilk-650/igt@prime_vgem@basic-gtt.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-ilk-650/igt@prime_vgem@basic-gtt.html
    - fi-elk-e7500:       [PASS][13] -> [FAIL][14] +3 similar issues
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-elk-e7500/igt@prime_vgem@basic-gtt.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-elk-e7500/igt@prime_vgem@basic-gtt.html

  * igt@prime_vgem@basic-read:
    - fi-bwr-2160:        [PASS][15] -> [FAIL][16] +5 similar issues
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-bwr-2160/igt@prime_vgem@basic-read.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-bwr-2160/igt@prime_vgem@basic-read.html
    - fi-bsw-kefka:       [PASS][17] -> [FAIL][18] +3 similar issues
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-bsw-kefka/igt@prime_vgem@basic-read.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-bsw-kefka/igt@prime_vgem@basic-read.html

  * igt@prime_vgem@basic-write:
    - fi-bsw-n3050:       [PASS][19] -> [FAIL][20] +5 similar issues
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-bsw-n3050/igt@prime_vgem@basic-write.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-bsw-n3050/igt@prime_vgem@basic-write.html
    - fi-pnv-d510:        [PASS][21] -> [FAIL][22] +5 similar issues
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-pnv-d510/igt@prime_vgem@basic-write.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-pnv-d510/igt@prime_vgem@basic-write.html
    - fi-blb-e6850:       [PASS][23] -> [FAIL][24] +2 similar issues
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-blb-e6850/igt@prime_vgem@basic-write.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-blb-e6850/igt@prime_vgem@basic-write.html

  * igt@vgem_basic@unload:
    - fi-cml-s:           [PASS][25] -> [FAIL][26]
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-cml-s/igt@vgem_basic@unload.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-cml-s/igt@vgem_basic@unload.html
    - fi-cfl-guc:         [PASS][27] -> [FAIL][28]
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-cfl-guc/igt@vgem_basic@unload.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-cfl-guc/igt@vgem_basic@unload.html
    - fi-tgl-u2:          [PASS][29] -> [FAIL][30]
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-tgl-u2/igt@vgem_basic@unload.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-tgl-u2/igt@vgem_basic@unload.html
    - fi-hsw-4770:        [PASS][31] -> [FAIL][32]
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-hsw-4770/igt@vgem_basic@unload.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-hsw-4770/igt@vgem_basic@unload.html
    - fi-cml-u2:          [PASS][33] -> [FAIL][34]
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-cml-u2/igt@vgem_basic@unload.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-cml-u2/igt@vgem_basic@unload.html
    - fi-snb-2600:        [PASS][35] -> [FAIL][36]
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-snb-2600/igt@vgem_basic@unload.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-snb-2600/igt@vgem_basic@unload.html
    - fi-gdg-551:         [PASS][37] -> [FAIL][38] +1 similar issue
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-gdg-551/igt@vgem_basic@unload.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-gdg-551/igt@vgem_basic@unload.html
    - fi-skl-lmem:        [PASS][39] -> [FAIL][40]
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-skl-lmem/igt@vgem_basic@unload.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-skl-lmem/igt@vgem_basic@unload.html
    - fi-cfl-8109u:       [PASS][41] -> [FAIL][42]
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-cfl-8109u/igt@vgem_basic@unload.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-cfl-8109u/igt@vgem_basic@unload.html
    - fi-bsw-nick:        [PASS][43] -> [FAIL][44] +4 similar issues
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-bsw-nick/igt@vgem_basic@unload.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-bsw-nick/igt@vgem_basic@unload.html
    - fi-bdw-5557u:       [PASS][45] -> [FAIL][46]
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-bdw-5557u/igt@vgem_basic@unload.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-bdw-5557u/igt@vgem_basic@unload.html
    - fi-kbl-r:           [PASS][47] -> [FAIL][48]
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-kbl-r/igt@vgem_basic@unload.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-kbl-r/igt@vgem_basic@unload.html
    - fi-kbl-7500u:       [PASS][49] -> [FAIL][50]
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-kbl-7500u/igt@vgem_basic@unload.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-kbl-7500u/igt@vgem_basic@unload.html
    - fi-icl-u2:          [PASS][51] -> [FAIL][52]
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-icl-u2/igt@vgem_basic@unload.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-icl-u2/igt@vgem_basic@unload.html
    - fi-skl-6600u:       [PASS][53] -> [FAIL][54]
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-skl-6600u/igt@vgem_basic@unload.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-skl-6600u/igt@vgem_basic@unload.html
    - fi-icl-y:           [PASS][55] -> [FAIL][56]
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-icl-y/igt@vgem_basic@unload.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-icl-y/igt@vgem_basic@unload.html
    - fi-cfl-8700k:       [PASS][57] -> [FAIL][58]
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-cfl-8700k/igt@vgem_basic@unload.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-cfl-8700k/igt@vgem_basic@unload.html
    - fi-snb-2520m:       [PASS][59] -> [FAIL][60]
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-snb-2520m/igt@vgem_basic@unload.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-snb-2520m/igt@vgem_basic@unload.html
    - fi-ivb-3770:        [PASS][61] -> [FAIL][62]
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-ivb-3770/igt@vgem_basic@unload.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-ivb-3770/igt@vgem_basic@unload.html
    - fi-glk-dsi:         [PASS][63] -> [FAIL][64]
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-glk-dsi/igt@vgem_basic@unload.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-glk-dsi/igt@vgem_basic@unload.html

  
#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * igt@vgem_basic@unload:
    - {fi-kbl-7560u}:     [PASS][65] -> [FAIL][66]
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-kbl-7560u/igt@vgem_basic@unload.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-kbl-7560u/igt@vgem_basic@unload.html
    - {fi-ehl-1}:         [PASS][67] -> [FAIL][68]
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-ehl-1/igt@vgem_basic@unload.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-ehl-1/igt@vgem_basic@unload.html
    - {fi-tgl-dsi}:       [PASS][69] -> [FAIL][70]
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-tgl-dsi/igt@vgem_basic@unload.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-tgl-dsi/igt@vgem_basic@unload.html

  
Known issues
------------

  Here are the changes found in Patchwork_18666 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_module_load@reload:
    - fi-bxt-dsi:         [PASS][71] -> [DMESG-WARN][72] ([i915#1635] / [i915#1982])
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-bxt-dsi/igt@i915_module_load@reload.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-bxt-dsi/igt@i915_module_load@reload.html

  * igt@prime_vgem@basic-fence-read:
    - fi-elk-e7500:       [PASS][73] -> [INCOMPLETE][74] ([i915#66]) +1 similar issue
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-elk-e7500/igt@prime_vgem@basic-fence-read.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-elk-e7500/igt@prime_vgem@basic-fence-read.html

  * igt@vgem_basic@unload:
    - fi-bxt-dsi:         [PASS][75] -> [FAIL][76] ([i915#1635])
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-bxt-dsi/igt@vgem_basic@unload.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-bxt-dsi/igt@vgem_basic@unload.html
    - fi-apl-guc:         [PASS][77] -> [FAIL][78] ([i915#1635])
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-apl-guc/igt@vgem_basic@unload.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-apl-guc/igt@vgem_basic@unload.html

  
#### Possible fixes ####

  * {igt@core_hotunplug@unbind-rebind}:
    - fi-kbl-x1275:       [DMESG-WARN][79] ([i915#62] / [i915#92] / [i915#95]) -> [PASS][80]
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-kbl-x1275/igt@core_hotunplug@unbind-rebind.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-kbl-x1275/igt@core_hotunplug@unbind-rebind.html

  * igt@i915_pm_rpm@basic-pci-d3-state:
    - fi-bsw-kefka:       [DMESG-WARN][81] ([i915#1982]) -> [PASS][82]
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-bsw-kefka/igt@i915_pm_rpm@basic-pci-d3-state.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-bsw-kefka/igt@i915_pm_rpm@basic-pci-d3-state.html

  * igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic:
    - fi-bsw-n3050:       [DMESG-WARN][83] ([i915#1982]) -> [PASS][84]
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-bsw-n3050/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-bsw-n3050/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic.html

  * igt@kms_cursor_legacy@basic-flip-after-cursor-atomic:
    - fi-icl-u2:          [DMESG-WARN][85] ([i915#1982]) -> [PASS][86] +1 similar issue
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-icl-u2/igt@kms_cursor_legacy@basic-flip-after-cursor-atomic.html
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-icl-u2/igt@kms_cursor_legacy@basic-flip-after-cursor-atomic.html

  
#### Warnings ####

  * igt@i915_pm_rpm@module-reload:
    - fi-kbl-guc:         [DMESG-FAIL][87] ([i915#2203]) -> [DMESG-WARN][88] ([i915#2203])
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-kbl-guc/igt@i915_pm_rpm@module-reload.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-kbl-guc/igt@i915_pm_rpm@module-reload.html

  * igt@kms_flip@basic-plain-flip@a-dp1:
    - fi-kbl-x1275:       [DMESG-WARN][89] ([i915#62] / [i915#92] / [i915#95]) -> [DMESG-WARN][90] ([i915#62] / [i915#92]) +1 similar issue
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-kbl-x1275/igt@kms_flip@basic-plain-flip@a-dp1.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-kbl-x1275/igt@kms_flip@basic-plain-flip@a-dp1.html

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
    - fi-kbl-x1275:       [DMESG-WARN][91] ([i915#62] / [i915#92]) -> [DMESG-WARN][92] ([i915#62] / [i915#92] / [i915#95]) +2 similar issues
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-kbl-x1275/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-kbl-x1275/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html

  * igt@vgem_basic@unload:
    - fi-skl-guc:         [DMESG-WARN][93] ([i915#2203]) -> [DMESG-FAIL][94] ([i915#2203])
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-skl-guc/igt@vgem_basic@unload.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-skl-guc/igt@vgem_basic@unload.html
    - fi-kbl-guc:         [DMESG-WARN][95] ([i915#2203]) -> [DMESG-FAIL][96] ([i915#2203])
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-kbl-guc/igt@vgem_basic@unload.html
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-kbl-guc/igt@vgem_basic@unload.html
    - fi-kbl-x1275:       [DMESG-WARN][97] ([i915#95]) -> [DMESG-FAIL][98] ([i915#62])
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9119/fi-kbl-x1275/igt@vgem_basic@unload.html
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/fi-kbl-x1275/igt@vgem_basic@unload.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [i915#1635]: https://gitlab.freedesktop.org/drm/intel/issues/1635
  [i915#1982]: https://gitlab.freedesktop.org/drm/intel/issues/1982
  [i915#2203]: https://gitlab.freedesktop.org/drm/intel/issues/2203
  [i915#62]: https://gitlab.freedesktop.org/drm/intel/issues/62
  [i915#66]: https://gitlab.freedesktop.org/drm/intel/issues/66
  [i915#92]: https://gitlab.freedesktop.org/drm/intel/issues/92
  [i915#95]: https://gitlab.freedesktop.org/drm/intel/issues/95
  [k.org#205379]: https://bugzilla.kernel.org/show_bug.cgi?id=205379


Participating hosts (46 -> 39)
------------------------------

  Missing    (7): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-ctg-p8600 fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * Linux: CI_DRM_9119 -> Patchwork_18666

  CI-20190529: 20190529
  CI_DRM_9119: a3e5da35ec06db35cface113da3befab8e00e03a @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5806: 6adb80cd84310b6d90a5259768d03ebb2c30fe50 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_18666: bb53064b196fd3ab9c21ba669ba4eb3467cac3c1 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

bb53064b196f drm/vgem: Replace vgem_object_funcs with the common drm shmem helper

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_18666/index.html

[-- Attachment #1.2: Type: text/html, Size: 20221 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
  2020-10-09 10:21 ` [Intel-gfx] " Chris Wilson
@ 2020-10-09 16:16   ` Daniel Vetter
  -1 siblings, 0 replies; 18+ messages in thread
From: Daniel Vetter @ 2020-10-09 16:16 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx, dri-devel

On Fri, Oct 9, 2020 at 12:21 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
>
> vgem is a minimalistic driver that provides shmemfs objects to
> userspace that may then be used as an in-memory surface and transported
> across dma-buf to other drivers. Since it's introduction,
> drm_gem_shmem_helper now provides the same shmemfs facilities and so we
> can trim vgem to wrap the helper.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/Kconfig         |   1 +
>  drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
>  drivers/gpu/drm/vgem/vgem_drv.h |  11 --
>  3 files changed, 13 insertions(+), 280 deletions(-)

Nice diffstat :-)

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>
> diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
> index 147d61b9674e..db2ff76638cd 100644
> --- a/drivers/gpu/drm/Kconfig
> +++ b/drivers/gpu/drm/Kconfig
> @@ -278,6 +278,7 @@ source "drivers/gpu/drm/i915/Kconfig"
>  config DRM_VGEM
>         tristate "Virtual GEM provider"
>         depends on DRM
> +       select DRM_GEM_SHMEM_HELPER
>         help
>           Choose this option to get a virtual graphics memory manager,
>           as used by Mesa's software renderer for enhanced performance.
> diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
> index fa54a6d1403d..73cb17c4f7a8 100644
> --- a/drivers/gpu/drm/vgem/vgem_drv.c
> +++ b/drivers/gpu/drm/vgem/vgem_drv.c
> @@ -38,6 +38,7 @@
>
>  #include <drm/drm_drv.h>
>  #include <drm/drm_file.h>
> +#include <drm/drm_gem_shmem_helper.h>
>  #include <drm/drm_ioctl.h>
>  #include <drm/drm_managed.h>
>  #include <drm/drm_prime.h>
> @@ -50,87 +51,11 @@
>  #define DRIVER_MAJOR   1
>  #define DRIVER_MINOR   0
>
> -static const struct drm_gem_object_funcs vgem_gem_object_funcs;
> -
>  static struct vgem_device {
>         struct drm_device drm;
>         struct platform_device *platform;
>  } *vgem_device;
>
> -static void vgem_gem_free_object(struct drm_gem_object *obj)
> -{
> -       struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
> -
> -       kvfree(vgem_obj->pages);
> -       mutex_destroy(&vgem_obj->pages_lock);
> -
> -       if (obj->import_attach)
> -               drm_prime_gem_destroy(obj, vgem_obj->table);
> -
> -       drm_gem_object_release(obj);
> -       kfree(vgem_obj);
> -}
> -
> -static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
> -{
> -       struct vm_area_struct *vma = vmf->vma;
> -       struct drm_vgem_gem_object *obj = vma->vm_private_data;
> -       /* We don't use vmf->pgoff since that has the fake offset */
> -       unsigned long vaddr = vmf->address;
> -       vm_fault_t ret = VM_FAULT_SIGBUS;
> -       loff_t num_pages;
> -       pgoff_t page_offset;
> -       page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
> -
> -       num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
> -
> -       if (page_offset >= num_pages)
> -               return VM_FAULT_SIGBUS;
> -
> -       mutex_lock(&obj->pages_lock);
> -       if (obj->pages) {
> -               get_page(obj->pages[page_offset]);
> -               vmf->page = obj->pages[page_offset];
> -               ret = 0;
> -       }
> -       mutex_unlock(&obj->pages_lock);
> -       if (ret) {
> -               struct page *page;
> -
> -               page = shmem_read_mapping_page(
> -                                       file_inode(obj->base.filp)->i_mapping,
> -                                       page_offset);
> -               if (!IS_ERR(page)) {
> -                       vmf->page = page;
> -                       ret = 0;
> -               } else switch (PTR_ERR(page)) {
> -                       case -ENOSPC:
> -                       case -ENOMEM:
> -                               ret = VM_FAULT_OOM;
> -                               break;
> -                       case -EBUSY:
> -                               ret = VM_FAULT_RETRY;
> -                               break;
> -                       case -EFAULT:
> -                       case -EINVAL:
> -                               ret = VM_FAULT_SIGBUS;
> -                               break;
> -                       default:
> -                               WARN_ON(PTR_ERR(page));
> -                               ret = VM_FAULT_SIGBUS;
> -                               break;
> -               }
> -
> -       }
> -       return ret;
> -}
> -
> -static const struct vm_operations_struct vgem_gem_vm_ops = {
> -       .fault = vgem_gem_fault,
> -       .open = drm_gem_vm_open,
> -       .close = drm_gem_vm_close,
> -};
> -
>  static int vgem_open(struct drm_device *dev, struct drm_file *file)
>  {
>         struct vgem_file *vfile;
> @@ -159,41 +84,25 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
>         kfree(vfile);
>  }
>
> -static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
> +static struct drm_gem_shmem_object *__vgem_gem_create(struct drm_device *dev,
>                                                 unsigned long size)
>  {
> -       struct drm_vgem_gem_object *obj;
> -       int ret;
> -
> -       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
> -       if (!obj)
> -               return ERR_PTR(-ENOMEM);
> +       struct drm_gem_shmem_object *obj;
>
> -       obj->base.funcs = &vgem_gem_object_funcs;
> -
> -       ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
> -       if (ret) {
> -               kfree(obj);
> -               return ERR_PTR(ret);
> -       }
> -
> -       mutex_init(&obj->pages_lock);
> +       obj = drm_gem_shmem_create(dev, round_up(size, PAGE_SIZE));
> +       if (IS_ERR(obj))
> +               return obj;
>
> +       obj->map_cached = true;
>         return obj;
>  }
>
> -static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
> -{
> -       drm_gem_object_release(&obj->base);
> -       kfree(obj);
> -}
> -
>  static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
>                                               struct drm_file *file,
>                                               unsigned int *handle,
>                                               unsigned long size)
>  {
> -       struct drm_vgem_gem_object *obj;
> +       struct drm_gem_shmem_object *obj;
>         int ret;
>
>         obj = __vgem_gem_create(dev, size);
> @@ -239,96 +148,9 @@ static struct drm_ioctl_desc vgem_ioctls[] = {
>         DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
>  };
>
> -static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
> -{
> -       unsigned long flags = vma->vm_flags;
> -       int ret;
> -
> -       ret = drm_gem_mmap(filp, vma);
> -       if (ret)
> -               return ret;
> -
> -       /* Keep the WC mmaping set by drm_gem_mmap() but our pages
> -        * are ordinary and not special.
> -        */
> -       vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
> -       return 0;
> -}
> -
> -static const struct file_operations vgem_driver_fops = {
> -       .owner          = THIS_MODULE,
> -       .open           = drm_open,
> -       .mmap           = vgem_mmap,
> -       .poll           = drm_poll,
> -       .read           = drm_read,
> -       .unlocked_ioctl = drm_ioctl,
> -       .compat_ioctl   = drm_compat_ioctl,
> -       .release        = drm_release,
> -};
> -
> -static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
> -{
> -       mutex_lock(&bo->pages_lock);
> -       if (bo->pages_pin_count++ == 0) {
> -               struct page **pages;
> -
> -               pages = drm_gem_get_pages(&bo->base);
> -               if (IS_ERR(pages)) {
> -                       bo->pages_pin_count--;
> -                       mutex_unlock(&bo->pages_lock);
> -                       return pages;
> -               }
> -
> -               bo->pages = pages;
> -       }
> -       mutex_unlock(&bo->pages_lock);
> -
> -       return bo->pages;
> -}
> -
> -static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
> -{
> -       mutex_lock(&bo->pages_lock);
> -       if (--bo->pages_pin_count == 0) {
> -               drm_gem_put_pages(&bo->base, bo->pages, true, true);
> -               bo->pages = NULL;
> -       }
> -       mutex_unlock(&bo->pages_lock);
> -}
> -
> -static int vgem_prime_pin(struct drm_gem_object *obj)
> -{
> -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> -       long n_pages = obj->size >> PAGE_SHIFT;
> -       struct page **pages;
> +DEFINE_DRM_GEM_FOPS(vgem_driver_fops);
>
> -       pages = vgem_pin_pages(bo);
> -       if (IS_ERR(pages))
> -               return PTR_ERR(pages);
> -
> -       /* Flush the object from the CPU cache so that importers can rely
> -        * on coherent indirect access via the exported dma-address.
> -        */
> -       drm_clflush_pages(pages, n_pages);
> -
> -       return 0;
> -}
> -
> -static void vgem_prime_unpin(struct drm_gem_object *obj)
> -{
> -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> -
> -       vgem_unpin_pages(bo);
> -}
> -
> -static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
> -{
> -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> -
> -       return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
> -}
> -
> -static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
> +static struct drm_gem_object *vgem_prime_import(struct drm_device *dev,
>                                                 struct dma_buf *dma_buf)
>  {
>         struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
> @@ -336,85 +158,6 @@ static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
>         return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
>  }
>
> -static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
> -                       struct dma_buf_attachment *attach, struct sg_table *sg)
> -{
> -       struct drm_vgem_gem_object *obj;
> -       int npages;
> -
> -       obj = __vgem_gem_create(dev, attach->dmabuf->size);
> -       if (IS_ERR(obj))
> -               return ERR_CAST(obj);
> -
> -       npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
> -
> -       obj->table = sg;
> -       obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
> -       if (!obj->pages) {
> -               __vgem_gem_destroy(obj);
> -               return ERR_PTR(-ENOMEM);
> -       }
> -
> -       obj->pages_pin_count++; /* perma-pinned */
> -       drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
> -                                       npages);
> -       return &obj->base;
> -}
> -
> -static void *vgem_prime_vmap(struct drm_gem_object *obj)
> -{
> -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> -       long n_pages = obj->size >> PAGE_SHIFT;
> -       struct page **pages;
> -
> -       pages = vgem_pin_pages(bo);
> -       if (IS_ERR(pages))
> -               return NULL;
> -
> -       return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
> -}
> -
> -static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
> -{
> -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> -
> -       vunmap(vaddr);
> -       vgem_unpin_pages(bo);
> -}
> -
> -static int vgem_prime_mmap(struct drm_gem_object *obj,
> -                          struct vm_area_struct *vma)
> -{
> -       int ret;
> -
> -       if (obj->size < vma->vm_end - vma->vm_start)
> -               return -EINVAL;
> -
> -       if (!obj->filp)
> -               return -ENODEV;
> -
> -       ret = call_mmap(obj->filp, vma);
> -       if (ret)
> -               return ret;
> -
> -       fput(vma->vm_file);
> -       vma->vm_file = get_file(obj->filp);
> -       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
> -       vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
> -
> -       return 0;
> -}
> -
> -static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
> -       .free = vgem_gem_free_object,
> -       .pin = vgem_prime_pin,
> -       .unpin = vgem_prime_unpin,
> -       .get_sg_table = vgem_prime_get_sg_table,
> -       .vmap = vgem_prime_vmap,
> -       .vunmap = vgem_prime_vunmap,
> -       .vm_ops = &vgem_gem_vm_ops,
> -};
> -
>  static struct drm_driver vgem_driver = {
>         .driver_features                = DRIVER_GEM | DRIVER_RENDER,
>         .open                           = vgem_open,
> @@ -428,8 +171,8 @@ static struct drm_driver vgem_driver = {
>         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
>         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
>         .gem_prime_import = vgem_prime_import,
> -       .gem_prime_import_sg_table = vgem_prime_import_sg_table,
> -       .gem_prime_mmap = vgem_prime_mmap,
> +       .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
> +       .gem_prime_mmap = drm_gem_prime_mmap,
>
>         .name   = DRIVER_NAME,
>         .desc   = DRIVER_DESC,
> diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
> index 0ed300317f87..34cf63e6fb3d 100644
> --- a/drivers/gpu/drm/vgem/vgem_drv.h
> +++ b/drivers/gpu/drm/vgem/vgem_drv.h
> @@ -39,17 +39,6 @@ struct vgem_file {
>         struct mutex fence_mutex;
>  };
>
> -#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
> -struct drm_vgem_gem_object {
> -       struct drm_gem_object base;
> -
> -       struct page **pages;
> -       unsigned int pages_pin_count;
> -       struct mutex pages_lock;
> -
> -       struct sg_table *table;
> -};
> -
>  int vgem_fence_open(struct vgem_file *file);
>  int vgem_fence_attach_ioctl(struct drm_device *dev,
>                             void *data,
> --
> 2.20.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx



-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
@ 2020-10-09 16:16   ` Daniel Vetter
  0 siblings, 0 replies; 18+ messages in thread
From: Daniel Vetter @ 2020-10-09 16:16 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx, dri-devel

On Fri, Oct 9, 2020 at 12:21 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
>
> vgem is a minimalistic driver that provides shmemfs objects to
> userspace that may then be used as an in-memory surface and transported
> across dma-buf to other drivers. Since it's introduction,
> drm_gem_shmem_helper now provides the same shmemfs facilities and so we
> can trim vgem to wrap the helper.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/Kconfig         |   1 +
>  drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
>  drivers/gpu/drm/vgem/vgem_drv.h |  11 --
>  3 files changed, 13 insertions(+), 280 deletions(-)

Nice diffstat :-)

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

>
> diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
> index 147d61b9674e..db2ff76638cd 100644
> --- a/drivers/gpu/drm/Kconfig
> +++ b/drivers/gpu/drm/Kconfig
> @@ -278,6 +278,7 @@ source "drivers/gpu/drm/i915/Kconfig"
>  config DRM_VGEM
>         tristate "Virtual GEM provider"
>         depends on DRM
> +       select DRM_GEM_SHMEM_HELPER
>         help
>           Choose this option to get a virtual graphics memory manager,
>           as used by Mesa's software renderer for enhanced performance.
> diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
> index fa54a6d1403d..73cb17c4f7a8 100644
> --- a/drivers/gpu/drm/vgem/vgem_drv.c
> +++ b/drivers/gpu/drm/vgem/vgem_drv.c
> @@ -38,6 +38,7 @@
>
>  #include <drm/drm_drv.h>
>  #include <drm/drm_file.h>
> +#include <drm/drm_gem_shmem_helper.h>
>  #include <drm/drm_ioctl.h>
>  #include <drm/drm_managed.h>
>  #include <drm/drm_prime.h>
> @@ -50,87 +51,11 @@
>  #define DRIVER_MAJOR   1
>  #define DRIVER_MINOR   0
>
> -static const struct drm_gem_object_funcs vgem_gem_object_funcs;
> -
>  static struct vgem_device {
>         struct drm_device drm;
>         struct platform_device *platform;
>  } *vgem_device;
>
> -static void vgem_gem_free_object(struct drm_gem_object *obj)
> -{
> -       struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
> -
> -       kvfree(vgem_obj->pages);
> -       mutex_destroy(&vgem_obj->pages_lock);
> -
> -       if (obj->import_attach)
> -               drm_prime_gem_destroy(obj, vgem_obj->table);
> -
> -       drm_gem_object_release(obj);
> -       kfree(vgem_obj);
> -}
> -
> -static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
> -{
> -       struct vm_area_struct *vma = vmf->vma;
> -       struct drm_vgem_gem_object *obj = vma->vm_private_data;
> -       /* We don't use vmf->pgoff since that has the fake offset */
> -       unsigned long vaddr = vmf->address;
> -       vm_fault_t ret = VM_FAULT_SIGBUS;
> -       loff_t num_pages;
> -       pgoff_t page_offset;
> -       page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
> -
> -       num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
> -
> -       if (page_offset >= num_pages)
> -               return VM_FAULT_SIGBUS;
> -
> -       mutex_lock(&obj->pages_lock);
> -       if (obj->pages) {
> -               get_page(obj->pages[page_offset]);
> -               vmf->page = obj->pages[page_offset];
> -               ret = 0;
> -       }
> -       mutex_unlock(&obj->pages_lock);
> -       if (ret) {
> -               struct page *page;
> -
> -               page = shmem_read_mapping_page(
> -                                       file_inode(obj->base.filp)->i_mapping,
> -                                       page_offset);
> -               if (!IS_ERR(page)) {
> -                       vmf->page = page;
> -                       ret = 0;
> -               } else switch (PTR_ERR(page)) {
> -                       case -ENOSPC:
> -                       case -ENOMEM:
> -                               ret = VM_FAULT_OOM;
> -                               break;
> -                       case -EBUSY:
> -                               ret = VM_FAULT_RETRY;
> -                               break;
> -                       case -EFAULT:
> -                       case -EINVAL:
> -                               ret = VM_FAULT_SIGBUS;
> -                               break;
> -                       default:
> -                               WARN_ON(PTR_ERR(page));
> -                               ret = VM_FAULT_SIGBUS;
> -                               break;
> -               }
> -
> -       }
> -       return ret;
> -}
> -
> -static const struct vm_operations_struct vgem_gem_vm_ops = {
> -       .fault = vgem_gem_fault,
> -       .open = drm_gem_vm_open,
> -       .close = drm_gem_vm_close,
> -};
> -
>  static int vgem_open(struct drm_device *dev, struct drm_file *file)
>  {
>         struct vgem_file *vfile;
> @@ -159,41 +84,25 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
>         kfree(vfile);
>  }
>
> -static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
> +static struct drm_gem_shmem_object *__vgem_gem_create(struct drm_device *dev,
>                                                 unsigned long size)
>  {
> -       struct drm_vgem_gem_object *obj;
> -       int ret;
> -
> -       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
> -       if (!obj)
> -               return ERR_PTR(-ENOMEM);
> +       struct drm_gem_shmem_object *obj;
>
> -       obj->base.funcs = &vgem_gem_object_funcs;
> -
> -       ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
> -       if (ret) {
> -               kfree(obj);
> -               return ERR_PTR(ret);
> -       }
> -
> -       mutex_init(&obj->pages_lock);
> +       obj = drm_gem_shmem_create(dev, round_up(size, PAGE_SIZE));
> +       if (IS_ERR(obj))
> +               return obj;
>
> +       obj->map_cached = true;
>         return obj;
>  }
>
> -static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
> -{
> -       drm_gem_object_release(&obj->base);
> -       kfree(obj);
> -}
> -
>  static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
>                                               struct drm_file *file,
>                                               unsigned int *handle,
>                                               unsigned long size)
>  {
> -       struct drm_vgem_gem_object *obj;
> +       struct drm_gem_shmem_object *obj;
>         int ret;
>
>         obj = __vgem_gem_create(dev, size);
> @@ -239,96 +148,9 @@ static struct drm_ioctl_desc vgem_ioctls[] = {
>         DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
>  };
>
> -static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
> -{
> -       unsigned long flags = vma->vm_flags;
> -       int ret;
> -
> -       ret = drm_gem_mmap(filp, vma);
> -       if (ret)
> -               return ret;
> -
> -       /* Keep the WC mmaping set by drm_gem_mmap() but our pages
> -        * are ordinary and not special.
> -        */
> -       vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
> -       return 0;
> -}
> -
> -static const struct file_operations vgem_driver_fops = {
> -       .owner          = THIS_MODULE,
> -       .open           = drm_open,
> -       .mmap           = vgem_mmap,
> -       .poll           = drm_poll,
> -       .read           = drm_read,
> -       .unlocked_ioctl = drm_ioctl,
> -       .compat_ioctl   = drm_compat_ioctl,
> -       .release        = drm_release,
> -};
> -
> -static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
> -{
> -       mutex_lock(&bo->pages_lock);
> -       if (bo->pages_pin_count++ == 0) {
> -               struct page **pages;
> -
> -               pages = drm_gem_get_pages(&bo->base);
> -               if (IS_ERR(pages)) {
> -                       bo->pages_pin_count--;
> -                       mutex_unlock(&bo->pages_lock);
> -                       return pages;
> -               }
> -
> -               bo->pages = pages;
> -       }
> -       mutex_unlock(&bo->pages_lock);
> -
> -       return bo->pages;
> -}
> -
> -static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
> -{
> -       mutex_lock(&bo->pages_lock);
> -       if (--bo->pages_pin_count == 0) {
> -               drm_gem_put_pages(&bo->base, bo->pages, true, true);
> -               bo->pages = NULL;
> -       }
> -       mutex_unlock(&bo->pages_lock);
> -}
> -
> -static int vgem_prime_pin(struct drm_gem_object *obj)
> -{
> -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> -       long n_pages = obj->size >> PAGE_SHIFT;
> -       struct page **pages;
> +DEFINE_DRM_GEM_FOPS(vgem_driver_fops);
>
> -       pages = vgem_pin_pages(bo);
> -       if (IS_ERR(pages))
> -               return PTR_ERR(pages);
> -
> -       /* Flush the object from the CPU cache so that importers can rely
> -        * on coherent indirect access via the exported dma-address.
> -        */
> -       drm_clflush_pages(pages, n_pages);
> -
> -       return 0;
> -}
> -
> -static void vgem_prime_unpin(struct drm_gem_object *obj)
> -{
> -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> -
> -       vgem_unpin_pages(bo);
> -}
> -
> -static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
> -{
> -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> -
> -       return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
> -}
> -
> -static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
> +static struct drm_gem_object *vgem_prime_import(struct drm_device *dev,
>                                                 struct dma_buf *dma_buf)
>  {
>         struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
> @@ -336,85 +158,6 @@ static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
>         return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
>  }
>
> -static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
> -                       struct dma_buf_attachment *attach, struct sg_table *sg)
> -{
> -       struct drm_vgem_gem_object *obj;
> -       int npages;
> -
> -       obj = __vgem_gem_create(dev, attach->dmabuf->size);
> -       if (IS_ERR(obj))
> -               return ERR_CAST(obj);
> -
> -       npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
> -
> -       obj->table = sg;
> -       obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
> -       if (!obj->pages) {
> -               __vgem_gem_destroy(obj);
> -               return ERR_PTR(-ENOMEM);
> -       }
> -
> -       obj->pages_pin_count++; /* perma-pinned */
> -       drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
> -                                       npages);
> -       return &obj->base;
> -}
> -
> -static void *vgem_prime_vmap(struct drm_gem_object *obj)
> -{
> -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> -       long n_pages = obj->size >> PAGE_SHIFT;
> -       struct page **pages;
> -
> -       pages = vgem_pin_pages(bo);
> -       if (IS_ERR(pages))
> -               return NULL;
> -
> -       return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
> -}
> -
> -static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
> -{
> -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> -
> -       vunmap(vaddr);
> -       vgem_unpin_pages(bo);
> -}
> -
> -static int vgem_prime_mmap(struct drm_gem_object *obj,
> -                          struct vm_area_struct *vma)
> -{
> -       int ret;
> -
> -       if (obj->size < vma->vm_end - vma->vm_start)
> -               return -EINVAL;
> -
> -       if (!obj->filp)
> -               return -ENODEV;
> -
> -       ret = call_mmap(obj->filp, vma);
> -       if (ret)
> -               return ret;
> -
> -       fput(vma->vm_file);
> -       vma->vm_file = get_file(obj->filp);
> -       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
> -       vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
> -
> -       return 0;
> -}
> -
> -static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
> -       .free = vgem_gem_free_object,
> -       .pin = vgem_prime_pin,
> -       .unpin = vgem_prime_unpin,
> -       .get_sg_table = vgem_prime_get_sg_table,
> -       .vmap = vgem_prime_vmap,
> -       .vunmap = vgem_prime_vunmap,
> -       .vm_ops = &vgem_gem_vm_ops,
> -};
> -
>  static struct drm_driver vgem_driver = {
>         .driver_features                = DRIVER_GEM | DRIVER_RENDER,
>         .open                           = vgem_open,
> @@ -428,8 +171,8 @@ static struct drm_driver vgem_driver = {
>         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
>         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
>         .gem_prime_import = vgem_prime_import,
> -       .gem_prime_import_sg_table = vgem_prime_import_sg_table,
> -       .gem_prime_mmap = vgem_prime_mmap,
> +       .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
> +       .gem_prime_mmap = drm_gem_prime_mmap,
>
>         .name   = DRIVER_NAME,
>         .desc   = DRIVER_DESC,
> diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
> index 0ed300317f87..34cf63e6fb3d 100644
> --- a/drivers/gpu/drm/vgem/vgem_drv.h
> +++ b/drivers/gpu/drm/vgem/vgem_drv.h
> @@ -39,17 +39,6 @@ struct vgem_file {
>         struct mutex fence_mutex;
>  };
>
> -#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
> -struct drm_vgem_gem_object {
> -       struct drm_gem_object base;
> -
> -       struct page **pages;
> -       unsigned int pages_pin_count;
> -       struct mutex pages_lock;
> -
> -       struct sg_table *table;
> -};
> -
>  int vgem_fence_open(struct vgem_file *file);
>  int vgem_fence_attach_ioctl(struct drm_device *dev,
>                             void *data,
> --
> 2.20.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx



-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
  2020-10-09 16:16   ` Daniel Vetter
@ 2020-10-09 22:43     ` Daniel Vetter
  -1 siblings, 0 replies; 18+ messages in thread
From: Daniel Vetter @ 2020-10-09 22:43 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx, dri-devel

On Fri, Oct 09, 2020 at 06:16:06PM +0200, Daniel Vetter wrote:
> On Fri, Oct 9, 2020 at 12:21 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> >
> > vgem is a minimalistic driver that provides shmemfs objects to
> > userspace that may then be used as an in-memory surface and transported
> > across dma-buf to other drivers. Since it's introduction,
> > drm_gem_shmem_helper now provides the same shmemfs facilities and so we
> > can trim vgem to wrap the helper.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> >  drivers/gpu/drm/Kconfig         |   1 +
> >  drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
> >  drivers/gpu/drm/vgem/vgem_drv.h |  11 --
> >  3 files changed, 13 insertions(+), 280 deletions(-)
> 
> Nice diffstat :-)
> 
> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

Just noticed, but I think with

	.dumb_create		= drm_gem_shmem_dumb_create,

you can delete a bunhc more :-)
-Daniel

> 
> >
> > diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
> > index 147d61b9674e..db2ff76638cd 100644
> > --- a/drivers/gpu/drm/Kconfig
> > +++ b/drivers/gpu/drm/Kconfig
> > @@ -278,6 +278,7 @@ source "drivers/gpu/drm/i915/Kconfig"
> >  config DRM_VGEM
> >         tristate "Virtual GEM provider"
> >         depends on DRM
> > +       select DRM_GEM_SHMEM_HELPER
> >         help
> >           Choose this option to get a virtual graphics memory manager,
> >           as used by Mesa's software renderer for enhanced performance.
> > diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
> > index fa54a6d1403d..73cb17c4f7a8 100644
> > --- a/drivers/gpu/drm/vgem/vgem_drv.c
> > +++ b/drivers/gpu/drm/vgem/vgem_drv.c
> > @@ -38,6 +38,7 @@
> >
> >  #include <drm/drm_drv.h>
> >  #include <drm/drm_file.h>
> > +#include <drm/drm_gem_shmem_helper.h>
> >  #include <drm/drm_ioctl.h>
> >  #include <drm/drm_managed.h>
> >  #include <drm/drm_prime.h>
> > @@ -50,87 +51,11 @@
> >  #define DRIVER_MAJOR   1
> >  #define DRIVER_MINOR   0
> >
> > -static const struct drm_gem_object_funcs vgem_gem_object_funcs;
> > -
> >  static struct vgem_device {
> >         struct drm_device drm;
> >         struct platform_device *platform;
> >  } *vgem_device;
> >
> > -static void vgem_gem_free_object(struct drm_gem_object *obj)
> > -{
> > -       struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
> > -
> > -       kvfree(vgem_obj->pages);
> > -       mutex_destroy(&vgem_obj->pages_lock);
> > -
> > -       if (obj->import_attach)
> > -               drm_prime_gem_destroy(obj, vgem_obj->table);
> > -
> > -       drm_gem_object_release(obj);
> > -       kfree(vgem_obj);
> > -}
> > -
> > -static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
> > -{
> > -       struct vm_area_struct *vma = vmf->vma;
> > -       struct drm_vgem_gem_object *obj = vma->vm_private_data;
> > -       /* We don't use vmf->pgoff since that has the fake offset */
> > -       unsigned long vaddr = vmf->address;
> > -       vm_fault_t ret = VM_FAULT_SIGBUS;
> > -       loff_t num_pages;
> > -       pgoff_t page_offset;
> > -       page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
> > -
> > -       num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
> > -
> > -       if (page_offset >= num_pages)
> > -               return VM_FAULT_SIGBUS;
> > -
> > -       mutex_lock(&obj->pages_lock);
> > -       if (obj->pages) {
> > -               get_page(obj->pages[page_offset]);
> > -               vmf->page = obj->pages[page_offset];
> > -               ret = 0;
> > -       }
> > -       mutex_unlock(&obj->pages_lock);
> > -       if (ret) {
> > -               struct page *page;
> > -
> > -               page = shmem_read_mapping_page(
> > -                                       file_inode(obj->base.filp)->i_mapping,
> > -                                       page_offset);
> > -               if (!IS_ERR(page)) {
> > -                       vmf->page = page;
> > -                       ret = 0;
> > -               } else switch (PTR_ERR(page)) {
> > -                       case -ENOSPC:
> > -                       case -ENOMEM:
> > -                               ret = VM_FAULT_OOM;
> > -                               break;
> > -                       case -EBUSY:
> > -                               ret = VM_FAULT_RETRY;
> > -                               break;
> > -                       case -EFAULT:
> > -                       case -EINVAL:
> > -                               ret = VM_FAULT_SIGBUS;
> > -                               break;
> > -                       default:
> > -                               WARN_ON(PTR_ERR(page));
> > -                               ret = VM_FAULT_SIGBUS;
> > -                               break;
> > -               }
> > -
> > -       }
> > -       return ret;
> > -}
> > -
> > -static const struct vm_operations_struct vgem_gem_vm_ops = {
> > -       .fault = vgem_gem_fault,
> > -       .open = drm_gem_vm_open,
> > -       .close = drm_gem_vm_close,
> > -};
> > -
> >  static int vgem_open(struct drm_device *dev, struct drm_file *file)
> >  {
> >         struct vgem_file *vfile;
> > @@ -159,41 +84,25 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
> >         kfree(vfile);
> >  }
> >
> > -static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
> > +static struct drm_gem_shmem_object *__vgem_gem_create(struct drm_device *dev,
> >                                                 unsigned long size)
> >  {
> > -       struct drm_vgem_gem_object *obj;
> > -       int ret;
> > -
> > -       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
> > -       if (!obj)
> > -               return ERR_PTR(-ENOMEM);
> > +       struct drm_gem_shmem_object *obj;
> >
> > -       obj->base.funcs = &vgem_gem_object_funcs;
> > -
> > -       ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
> > -       if (ret) {
> > -               kfree(obj);
> > -               return ERR_PTR(ret);
> > -       }
> > -
> > -       mutex_init(&obj->pages_lock);
> > +       obj = drm_gem_shmem_create(dev, round_up(size, PAGE_SIZE));
> > +       if (IS_ERR(obj))
> > +               return obj;
> >
> > +       obj->map_cached = true;
> >         return obj;
> >  }
> >
> > -static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
> > -{
> > -       drm_gem_object_release(&obj->base);
> > -       kfree(obj);
> > -}
> > -
> >  static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
> >                                               struct drm_file *file,
> >                                               unsigned int *handle,
> >                                               unsigned long size)
> >  {
> > -       struct drm_vgem_gem_object *obj;
> > +       struct drm_gem_shmem_object *obj;
> >         int ret;
> >
> >         obj = __vgem_gem_create(dev, size);
> > @@ -239,96 +148,9 @@ static struct drm_ioctl_desc vgem_ioctls[] = {
> >         DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
> >  };
> >
> > -static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
> > -{
> > -       unsigned long flags = vma->vm_flags;
> > -       int ret;
> > -
> > -       ret = drm_gem_mmap(filp, vma);
> > -       if (ret)
> > -               return ret;
> > -
> > -       /* Keep the WC mmaping set by drm_gem_mmap() but our pages
> > -        * are ordinary and not special.
> > -        */
> > -       vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
> > -       return 0;
> > -}
> > -
> > -static const struct file_operations vgem_driver_fops = {
> > -       .owner          = THIS_MODULE,
> > -       .open           = drm_open,
> > -       .mmap           = vgem_mmap,
> > -       .poll           = drm_poll,
> > -       .read           = drm_read,
> > -       .unlocked_ioctl = drm_ioctl,
> > -       .compat_ioctl   = drm_compat_ioctl,
> > -       .release        = drm_release,
> > -};
> > -
> > -static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
> > -{
> > -       mutex_lock(&bo->pages_lock);
> > -       if (bo->pages_pin_count++ == 0) {
> > -               struct page **pages;
> > -
> > -               pages = drm_gem_get_pages(&bo->base);
> > -               if (IS_ERR(pages)) {
> > -                       bo->pages_pin_count--;
> > -                       mutex_unlock(&bo->pages_lock);
> > -                       return pages;
> > -               }
> > -
> > -               bo->pages = pages;
> > -       }
> > -       mutex_unlock(&bo->pages_lock);
> > -
> > -       return bo->pages;
> > -}
> > -
> > -static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
> > -{
> > -       mutex_lock(&bo->pages_lock);
> > -       if (--bo->pages_pin_count == 0) {
> > -               drm_gem_put_pages(&bo->base, bo->pages, true, true);
> > -               bo->pages = NULL;
> > -       }
> > -       mutex_unlock(&bo->pages_lock);
> > -}
> > -
> > -static int vgem_prime_pin(struct drm_gem_object *obj)
> > -{
> > -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> > -       long n_pages = obj->size >> PAGE_SHIFT;
> > -       struct page **pages;
> > +DEFINE_DRM_GEM_FOPS(vgem_driver_fops);
> >
> > -       pages = vgem_pin_pages(bo);
> > -       if (IS_ERR(pages))
> > -               return PTR_ERR(pages);
> > -
> > -       /* Flush the object from the CPU cache so that importers can rely
> > -        * on coherent indirect access via the exported dma-address.
> > -        */
> > -       drm_clflush_pages(pages, n_pages);
> > -
> > -       return 0;
> > -}
> > -
> > -static void vgem_prime_unpin(struct drm_gem_object *obj)
> > -{
> > -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> > -
> > -       vgem_unpin_pages(bo);
> > -}
> > -
> > -static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
> > -{
> > -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> > -
> > -       return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
> > -}
> > -
> > -static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
> > +static struct drm_gem_object *vgem_prime_import(struct drm_device *dev,
> >                                                 struct dma_buf *dma_buf)
> >  {
> >         struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
> > @@ -336,85 +158,6 @@ static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
> >         return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
> >  }
> >
> > -static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
> > -                       struct dma_buf_attachment *attach, struct sg_table *sg)
> > -{
> > -       struct drm_vgem_gem_object *obj;
> > -       int npages;
> > -
> > -       obj = __vgem_gem_create(dev, attach->dmabuf->size);
> > -       if (IS_ERR(obj))
> > -               return ERR_CAST(obj);
> > -
> > -       npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
> > -
> > -       obj->table = sg;
> > -       obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
> > -       if (!obj->pages) {
> > -               __vgem_gem_destroy(obj);
> > -               return ERR_PTR(-ENOMEM);
> > -       }
> > -
> > -       obj->pages_pin_count++; /* perma-pinned */
> > -       drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
> > -                                       npages);
> > -       return &obj->base;
> > -}
> > -
> > -static void *vgem_prime_vmap(struct drm_gem_object *obj)
> > -{
> > -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> > -       long n_pages = obj->size >> PAGE_SHIFT;
> > -       struct page **pages;
> > -
> > -       pages = vgem_pin_pages(bo);
> > -       if (IS_ERR(pages))
> > -               return NULL;
> > -
> > -       return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
> > -}
> > -
> > -static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
> > -{
> > -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> > -
> > -       vunmap(vaddr);
> > -       vgem_unpin_pages(bo);
> > -}
> > -
> > -static int vgem_prime_mmap(struct drm_gem_object *obj,
> > -                          struct vm_area_struct *vma)
> > -{
> > -       int ret;
> > -
> > -       if (obj->size < vma->vm_end - vma->vm_start)
> > -               return -EINVAL;
> > -
> > -       if (!obj->filp)
> > -               return -ENODEV;
> > -
> > -       ret = call_mmap(obj->filp, vma);
> > -       if (ret)
> > -               return ret;
> > -
> > -       fput(vma->vm_file);
> > -       vma->vm_file = get_file(obj->filp);
> > -       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
> > -       vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
> > -
> > -       return 0;
> > -}
> > -
> > -static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
> > -       .free = vgem_gem_free_object,
> > -       .pin = vgem_prime_pin,
> > -       .unpin = vgem_prime_unpin,
> > -       .get_sg_table = vgem_prime_get_sg_table,
> > -       .vmap = vgem_prime_vmap,
> > -       .vunmap = vgem_prime_vunmap,
> > -       .vm_ops = &vgem_gem_vm_ops,
> > -};
> > -
> >  static struct drm_driver vgem_driver = {
> >         .driver_features                = DRIVER_GEM | DRIVER_RENDER,
> >         .open                           = vgem_open,
> > @@ -428,8 +171,8 @@ static struct drm_driver vgem_driver = {
> >         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
> >         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
> >         .gem_prime_import = vgem_prime_import,
> > -       .gem_prime_import_sg_table = vgem_prime_import_sg_table,
> > -       .gem_prime_mmap = vgem_prime_mmap,
> > +       .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
> > +       .gem_prime_mmap = drm_gem_prime_mmap,
> >
> >         .name   = DRIVER_NAME,
> >         .desc   = DRIVER_DESC,
> > diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
> > index 0ed300317f87..34cf63e6fb3d 100644
> > --- a/drivers/gpu/drm/vgem/vgem_drv.h
> > +++ b/drivers/gpu/drm/vgem/vgem_drv.h
> > @@ -39,17 +39,6 @@ struct vgem_file {
> >         struct mutex fence_mutex;
> >  };
> >
> > -#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
> > -struct drm_vgem_gem_object {
> > -       struct drm_gem_object base;
> > -
> > -       struct page **pages;
> > -       unsigned int pages_pin_count;
> > -       struct mutex pages_lock;
> > -
> > -       struct sg_table *table;
> > -};
> > -
> >  int vgem_fence_open(struct vgem_file *file);
> >  int vgem_fence_attach_ioctl(struct drm_device *dev,
> >                             void *data,
> > --
> > 2.20.1
> >
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
> 
> 
> -- 
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
@ 2020-10-09 22:43     ` Daniel Vetter
  0 siblings, 0 replies; 18+ messages in thread
From: Daniel Vetter @ 2020-10-09 22:43 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx, dri-devel

On Fri, Oct 09, 2020 at 06:16:06PM +0200, Daniel Vetter wrote:
> On Fri, Oct 9, 2020 at 12:21 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> >
> > vgem is a minimalistic driver that provides shmemfs objects to
> > userspace that may then be used as an in-memory surface and transported
> > across dma-buf to other drivers. Since it's introduction,
> > drm_gem_shmem_helper now provides the same shmemfs facilities and so we
> > can trim vgem to wrap the helper.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> >  drivers/gpu/drm/Kconfig         |   1 +
> >  drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
> >  drivers/gpu/drm/vgem/vgem_drv.h |  11 --
> >  3 files changed, 13 insertions(+), 280 deletions(-)
> 
> Nice diffstat :-)
> 
> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

Just noticed, but I think with

	.dumb_create		= drm_gem_shmem_dumb_create,

you can delete a bunhc more :-)
-Daniel

> 
> >
> > diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
> > index 147d61b9674e..db2ff76638cd 100644
> > --- a/drivers/gpu/drm/Kconfig
> > +++ b/drivers/gpu/drm/Kconfig
> > @@ -278,6 +278,7 @@ source "drivers/gpu/drm/i915/Kconfig"
> >  config DRM_VGEM
> >         tristate "Virtual GEM provider"
> >         depends on DRM
> > +       select DRM_GEM_SHMEM_HELPER
> >         help
> >           Choose this option to get a virtual graphics memory manager,
> >           as used by Mesa's software renderer for enhanced performance.
> > diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
> > index fa54a6d1403d..73cb17c4f7a8 100644
> > --- a/drivers/gpu/drm/vgem/vgem_drv.c
> > +++ b/drivers/gpu/drm/vgem/vgem_drv.c
> > @@ -38,6 +38,7 @@
> >
> >  #include <drm/drm_drv.h>
> >  #include <drm/drm_file.h>
> > +#include <drm/drm_gem_shmem_helper.h>
> >  #include <drm/drm_ioctl.h>
> >  #include <drm/drm_managed.h>
> >  #include <drm/drm_prime.h>
> > @@ -50,87 +51,11 @@
> >  #define DRIVER_MAJOR   1
> >  #define DRIVER_MINOR   0
> >
> > -static const struct drm_gem_object_funcs vgem_gem_object_funcs;
> > -
> >  static struct vgem_device {
> >         struct drm_device drm;
> >         struct platform_device *platform;
> >  } *vgem_device;
> >
> > -static void vgem_gem_free_object(struct drm_gem_object *obj)
> > -{
> > -       struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
> > -
> > -       kvfree(vgem_obj->pages);
> > -       mutex_destroy(&vgem_obj->pages_lock);
> > -
> > -       if (obj->import_attach)
> > -               drm_prime_gem_destroy(obj, vgem_obj->table);
> > -
> > -       drm_gem_object_release(obj);
> > -       kfree(vgem_obj);
> > -}
> > -
> > -static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
> > -{
> > -       struct vm_area_struct *vma = vmf->vma;
> > -       struct drm_vgem_gem_object *obj = vma->vm_private_data;
> > -       /* We don't use vmf->pgoff since that has the fake offset */
> > -       unsigned long vaddr = vmf->address;
> > -       vm_fault_t ret = VM_FAULT_SIGBUS;
> > -       loff_t num_pages;
> > -       pgoff_t page_offset;
> > -       page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
> > -
> > -       num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
> > -
> > -       if (page_offset >= num_pages)
> > -               return VM_FAULT_SIGBUS;
> > -
> > -       mutex_lock(&obj->pages_lock);
> > -       if (obj->pages) {
> > -               get_page(obj->pages[page_offset]);
> > -               vmf->page = obj->pages[page_offset];
> > -               ret = 0;
> > -       }
> > -       mutex_unlock(&obj->pages_lock);
> > -       if (ret) {
> > -               struct page *page;
> > -
> > -               page = shmem_read_mapping_page(
> > -                                       file_inode(obj->base.filp)->i_mapping,
> > -                                       page_offset);
> > -               if (!IS_ERR(page)) {
> > -                       vmf->page = page;
> > -                       ret = 0;
> > -               } else switch (PTR_ERR(page)) {
> > -                       case -ENOSPC:
> > -                       case -ENOMEM:
> > -                               ret = VM_FAULT_OOM;
> > -                               break;
> > -                       case -EBUSY:
> > -                               ret = VM_FAULT_RETRY;
> > -                               break;
> > -                       case -EFAULT:
> > -                       case -EINVAL:
> > -                               ret = VM_FAULT_SIGBUS;
> > -                               break;
> > -                       default:
> > -                               WARN_ON(PTR_ERR(page));
> > -                               ret = VM_FAULT_SIGBUS;
> > -                               break;
> > -               }
> > -
> > -       }
> > -       return ret;
> > -}
> > -
> > -static const struct vm_operations_struct vgem_gem_vm_ops = {
> > -       .fault = vgem_gem_fault,
> > -       .open = drm_gem_vm_open,
> > -       .close = drm_gem_vm_close,
> > -};
> > -
> >  static int vgem_open(struct drm_device *dev, struct drm_file *file)
> >  {
> >         struct vgem_file *vfile;
> > @@ -159,41 +84,25 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
> >         kfree(vfile);
> >  }
> >
> > -static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
> > +static struct drm_gem_shmem_object *__vgem_gem_create(struct drm_device *dev,
> >                                                 unsigned long size)
> >  {
> > -       struct drm_vgem_gem_object *obj;
> > -       int ret;
> > -
> > -       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
> > -       if (!obj)
> > -               return ERR_PTR(-ENOMEM);
> > +       struct drm_gem_shmem_object *obj;
> >
> > -       obj->base.funcs = &vgem_gem_object_funcs;
> > -
> > -       ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
> > -       if (ret) {
> > -               kfree(obj);
> > -               return ERR_PTR(ret);
> > -       }
> > -
> > -       mutex_init(&obj->pages_lock);
> > +       obj = drm_gem_shmem_create(dev, round_up(size, PAGE_SIZE));
> > +       if (IS_ERR(obj))
> > +               return obj;
> >
> > +       obj->map_cached = true;
> >         return obj;
> >  }
> >
> > -static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
> > -{
> > -       drm_gem_object_release(&obj->base);
> > -       kfree(obj);
> > -}
> > -
> >  static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
> >                                               struct drm_file *file,
> >                                               unsigned int *handle,
> >                                               unsigned long size)
> >  {
> > -       struct drm_vgem_gem_object *obj;
> > +       struct drm_gem_shmem_object *obj;
> >         int ret;
> >
> >         obj = __vgem_gem_create(dev, size);
> > @@ -239,96 +148,9 @@ static struct drm_ioctl_desc vgem_ioctls[] = {
> >         DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
> >  };
> >
> > -static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
> > -{
> > -       unsigned long flags = vma->vm_flags;
> > -       int ret;
> > -
> > -       ret = drm_gem_mmap(filp, vma);
> > -       if (ret)
> > -               return ret;
> > -
> > -       /* Keep the WC mmaping set by drm_gem_mmap() but our pages
> > -        * are ordinary and not special.
> > -        */
> > -       vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
> > -       return 0;
> > -}
> > -
> > -static const struct file_operations vgem_driver_fops = {
> > -       .owner          = THIS_MODULE,
> > -       .open           = drm_open,
> > -       .mmap           = vgem_mmap,
> > -       .poll           = drm_poll,
> > -       .read           = drm_read,
> > -       .unlocked_ioctl = drm_ioctl,
> > -       .compat_ioctl   = drm_compat_ioctl,
> > -       .release        = drm_release,
> > -};
> > -
> > -static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
> > -{
> > -       mutex_lock(&bo->pages_lock);
> > -       if (bo->pages_pin_count++ == 0) {
> > -               struct page **pages;
> > -
> > -               pages = drm_gem_get_pages(&bo->base);
> > -               if (IS_ERR(pages)) {
> > -                       bo->pages_pin_count--;
> > -                       mutex_unlock(&bo->pages_lock);
> > -                       return pages;
> > -               }
> > -
> > -               bo->pages = pages;
> > -       }
> > -       mutex_unlock(&bo->pages_lock);
> > -
> > -       return bo->pages;
> > -}
> > -
> > -static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
> > -{
> > -       mutex_lock(&bo->pages_lock);
> > -       if (--bo->pages_pin_count == 0) {
> > -               drm_gem_put_pages(&bo->base, bo->pages, true, true);
> > -               bo->pages = NULL;
> > -       }
> > -       mutex_unlock(&bo->pages_lock);
> > -}
> > -
> > -static int vgem_prime_pin(struct drm_gem_object *obj)
> > -{
> > -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> > -       long n_pages = obj->size >> PAGE_SHIFT;
> > -       struct page **pages;
> > +DEFINE_DRM_GEM_FOPS(vgem_driver_fops);
> >
> > -       pages = vgem_pin_pages(bo);
> > -       if (IS_ERR(pages))
> > -               return PTR_ERR(pages);
> > -
> > -       /* Flush the object from the CPU cache so that importers can rely
> > -        * on coherent indirect access via the exported dma-address.
> > -        */
> > -       drm_clflush_pages(pages, n_pages);
> > -
> > -       return 0;
> > -}
> > -
> > -static void vgem_prime_unpin(struct drm_gem_object *obj)
> > -{
> > -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> > -
> > -       vgem_unpin_pages(bo);
> > -}
> > -
> > -static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
> > -{
> > -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> > -
> > -       return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
> > -}
> > -
> > -static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
> > +static struct drm_gem_object *vgem_prime_import(struct drm_device *dev,
> >                                                 struct dma_buf *dma_buf)
> >  {
> >         struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
> > @@ -336,85 +158,6 @@ static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
> >         return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
> >  }
> >
> > -static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
> > -                       struct dma_buf_attachment *attach, struct sg_table *sg)
> > -{
> > -       struct drm_vgem_gem_object *obj;
> > -       int npages;
> > -
> > -       obj = __vgem_gem_create(dev, attach->dmabuf->size);
> > -       if (IS_ERR(obj))
> > -               return ERR_CAST(obj);
> > -
> > -       npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
> > -
> > -       obj->table = sg;
> > -       obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
> > -       if (!obj->pages) {
> > -               __vgem_gem_destroy(obj);
> > -               return ERR_PTR(-ENOMEM);
> > -       }
> > -
> > -       obj->pages_pin_count++; /* perma-pinned */
> > -       drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
> > -                                       npages);
> > -       return &obj->base;
> > -}
> > -
> > -static void *vgem_prime_vmap(struct drm_gem_object *obj)
> > -{
> > -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> > -       long n_pages = obj->size >> PAGE_SHIFT;
> > -       struct page **pages;
> > -
> > -       pages = vgem_pin_pages(bo);
> > -       if (IS_ERR(pages))
> > -               return NULL;
> > -
> > -       return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
> > -}
> > -
> > -static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
> > -{
> > -       struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
> > -
> > -       vunmap(vaddr);
> > -       vgem_unpin_pages(bo);
> > -}
> > -
> > -static int vgem_prime_mmap(struct drm_gem_object *obj,
> > -                          struct vm_area_struct *vma)
> > -{
> > -       int ret;
> > -
> > -       if (obj->size < vma->vm_end - vma->vm_start)
> > -               return -EINVAL;
> > -
> > -       if (!obj->filp)
> > -               return -ENODEV;
> > -
> > -       ret = call_mmap(obj->filp, vma);
> > -       if (ret)
> > -               return ret;
> > -
> > -       fput(vma->vm_file);
> > -       vma->vm_file = get_file(obj->filp);
> > -       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
> > -       vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
> > -
> > -       return 0;
> > -}
> > -
> > -static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
> > -       .free = vgem_gem_free_object,
> > -       .pin = vgem_prime_pin,
> > -       .unpin = vgem_prime_unpin,
> > -       .get_sg_table = vgem_prime_get_sg_table,
> > -       .vmap = vgem_prime_vmap,
> > -       .vunmap = vgem_prime_vunmap,
> > -       .vm_ops = &vgem_gem_vm_ops,
> > -};
> > -
> >  static struct drm_driver vgem_driver = {
> >         .driver_features                = DRIVER_GEM | DRIVER_RENDER,
> >         .open                           = vgem_open,
> > @@ -428,8 +171,8 @@ static struct drm_driver vgem_driver = {
> >         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
> >         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
> >         .gem_prime_import = vgem_prime_import,
> > -       .gem_prime_import_sg_table = vgem_prime_import_sg_table,
> > -       .gem_prime_mmap = vgem_prime_mmap,
> > +       .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
> > +       .gem_prime_mmap = drm_gem_prime_mmap,
> >
> >         .name   = DRIVER_NAME,
> >         .desc   = DRIVER_DESC,
> > diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
> > index 0ed300317f87..34cf63e6fb3d 100644
> > --- a/drivers/gpu/drm/vgem/vgem_drv.h
> > +++ b/drivers/gpu/drm/vgem/vgem_drv.h
> > @@ -39,17 +39,6 @@ struct vgem_file {
> >         struct mutex fence_mutex;
> >  };
> >
> > -#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
> > -struct drm_vgem_gem_object {
> > -       struct drm_gem_object base;
> > -
> > -       struct page **pages;
> > -       unsigned int pages_pin_count;
> > -       struct mutex pages_lock;
> > -
> > -       struct sg_table *table;
> > -};
> > -
> >  int vgem_fence_open(struct vgem_file *file);
> >  int vgem_fence_attach_ioctl(struct drm_device *dev,
> >                             void *data,
> > --
> > 2.20.1
> >
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
> 
> 
> -- 
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
  2020-10-09 16:16   ` Daniel Vetter
@ 2020-10-12 10:49     ` Chris Wilson
  -1 siblings, 0 replies; 18+ messages in thread
From: Chris Wilson @ 2020-10-12 10:49 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: intel-gfx, dri-devel

Quoting Daniel Vetter (2020-10-09 17:16:06)
> On Fri, Oct 9, 2020 at 12:21 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> >
> > vgem is a minimalistic driver that provides shmemfs objects to
> > userspace that may then be used as an in-memory surface and transported
> > across dma-buf to other drivers. Since it's introduction,
> > drm_gem_shmem_helper now provides the same shmemfs facilities and so we
> > can trim vgem to wrap the helper.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> >  drivers/gpu/drm/Kconfig         |   1 +
> >  drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
> >  drivers/gpu/drm/vgem/vgem_drv.h |  11 --
> >  3 files changed, 13 insertions(+), 280 deletions(-)
> 
> Nice diffstat :-)
> 
> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

Unfortunately I had to drop the drm_gem_prime_mmap() since the existing
expectation is that we hand the faulthandler off to shmemfs so we can
release the module while the memory is exported. The other issue happens
to be for arch/x86 where just setting PAT=WC on the PTE does not flush
the cache for that page, and the CPU will preferentially use the cache.
That has caught us out more than once.
-Chris
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
@ 2020-10-12 10:49     ` Chris Wilson
  0 siblings, 0 replies; 18+ messages in thread
From: Chris Wilson @ 2020-10-12 10:49 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: intel-gfx, dri-devel

Quoting Daniel Vetter (2020-10-09 17:16:06)
> On Fri, Oct 9, 2020 at 12:21 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> >
> > vgem is a minimalistic driver that provides shmemfs objects to
> > userspace that may then be used as an in-memory surface and transported
> > across dma-buf to other drivers. Since it's introduction,
> > drm_gem_shmem_helper now provides the same shmemfs facilities and so we
> > can trim vgem to wrap the helper.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> >  drivers/gpu/drm/Kconfig         |   1 +
> >  drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
> >  drivers/gpu/drm/vgem/vgem_drv.h |  11 --
> >  3 files changed, 13 insertions(+), 280 deletions(-)
> 
> Nice diffstat :-)
> 
> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

Unfortunately I had to drop the drm_gem_prime_mmap() since the existing
expectation is that we hand the faulthandler off to shmemfs so we can
release the module while the memory is exported. The other issue happens
to be for arch/x86 where just setting PAT=WC on the PTE does not flush
the cache for that page, and the CPU will preferentially use the cache.
That has caught us out more than once.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
  2020-10-12 10:49     ` Chris Wilson
@ 2020-10-12 13:55       ` Daniel Vetter
  -1 siblings, 0 replies; 18+ messages in thread
From: Daniel Vetter @ 2020-10-12 13:55 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx, dri-devel

On Mon, Oct 12, 2020 at 12:49 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> Quoting Daniel Vetter (2020-10-09 17:16:06)
> > On Fri, Oct 9, 2020 at 12:21 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > >
> > > vgem is a minimalistic driver that provides shmemfs objects to
> > > userspace that may then be used as an in-memory surface and transported
> > > across dma-buf to other drivers. Since it's introduction,
> > > drm_gem_shmem_helper now provides the same shmemfs facilities and so we
> > > can trim vgem to wrap the helper.
> > >
> > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > ---
> > >  drivers/gpu/drm/Kconfig         |   1 +
> > >  drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
> > >  drivers/gpu/drm/vgem/vgem_drv.h |  11 --
> > >  3 files changed, 13 insertions(+), 280 deletions(-)
> >
> > Nice diffstat :-)
> >
> > Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
>
> Unfortunately I had to drop the drm_gem_prime_mmap() since the existing
> expectation is that we hand the faulthandler off to shmemfs so we can
> release the module while the memory is exported.

That sounds like a broken igt. Once we have refcounting for
outstanding dma_fence/buf or anything else we'll block unloading of
the module (not unbinding of the driver). Which one is that?

> The other issue happens
> to be for arch/x86 where just setting PAT=WC on the PTE does not flush
> the cache for that page, and the CPU will preferentially use the cache.
> That has caught us out more than once.

Ah, the old disappointment around wc and dma-api on x86 I guess :-/
-Daniel
-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
@ 2020-10-12 13:55       ` Daniel Vetter
  0 siblings, 0 replies; 18+ messages in thread
From: Daniel Vetter @ 2020-10-12 13:55 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx, dri-devel

On Mon, Oct 12, 2020 at 12:49 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> Quoting Daniel Vetter (2020-10-09 17:16:06)
> > On Fri, Oct 9, 2020 at 12:21 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > >
> > > vgem is a minimalistic driver that provides shmemfs objects to
> > > userspace that may then be used as an in-memory surface and transported
> > > across dma-buf to other drivers. Since it's introduction,
> > > drm_gem_shmem_helper now provides the same shmemfs facilities and so we
> > > can trim vgem to wrap the helper.
> > >
> > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > ---
> > >  drivers/gpu/drm/Kconfig         |   1 +
> > >  drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
> > >  drivers/gpu/drm/vgem/vgem_drv.h |  11 --
> > >  3 files changed, 13 insertions(+), 280 deletions(-)
> >
> > Nice diffstat :-)
> >
> > Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
>
> Unfortunately I had to drop the drm_gem_prime_mmap() since the existing
> expectation is that we hand the faulthandler off to shmemfs so we can
> release the module while the memory is exported.

That sounds like a broken igt. Once we have refcounting for
outstanding dma_fence/buf or anything else we'll block unloading of
the module (not unbinding of the driver). Which one is that?

> The other issue happens
> to be for arch/x86 where just setting PAT=WC on the PTE does not flush
> the cache for that page, and the CPU will preferentially use the cache.
> That has caught us out more than once.

Ah, the old disappointment around wc and dma-api on x86 I guess :-/
-Daniel
-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
  2020-10-12 13:55       ` Daniel Vetter
@ 2020-10-12 14:01         ` Chris Wilson
  -1 siblings, 0 replies; 18+ messages in thread
From: Chris Wilson @ 2020-10-12 14:01 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: intel-gfx, dri-devel

Quoting Daniel Vetter (2020-10-12 14:55:07)
> On Mon, Oct 12, 2020 at 12:49 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > Quoting Daniel Vetter (2020-10-09 17:16:06)
> > > On Fri, Oct 9, 2020 at 12:21 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > > >
> > > > vgem is a minimalistic driver that provides shmemfs objects to
> > > > userspace that may then be used as an in-memory surface and transported
> > > > across dma-buf to other drivers. Since it's introduction,
> > > > drm_gem_shmem_helper now provides the same shmemfs facilities and so we
> > > > can trim vgem to wrap the helper.
> > > >
> > > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > > ---
> > > >  drivers/gpu/drm/Kconfig         |   1 +
> > > >  drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
> > > >  drivers/gpu/drm/vgem/vgem_drv.h |  11 --
> > > >  3 files changed, 13 insertions(+), 280 deletions(-)
> > >
> > > Nice diffstat :-)
> > >
> > > Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
> >
> > Unfortunately I had to drop the drm_gem_prime_mmap() since the existing
> > expectation is that we hand the faulthandler off to shmemfs so we can
> > release the module while the memory is exported.
> 
> That sounds like a broken igt. Once we have refcounting for
> outstanding dma_fence/buf or anything else we'll block unloading of
> the module (not unbinding of the driver). Which one is that?

The dma-buf is closed; all that remains is the mmap. Then from the
perspective of the module, there is no reference back to the module
since we delegate handling of the mmap back to the owner, the shmemfs
builtin. That allows us to remove the module as its object code is no
longer required.
-Chris
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
@ 2020-10-12 14:01         ` Chris Wilson
  0 siblings, 0 replies; 18+ messages in thread
From: Chris Wilson @ 2020-10-12 14:01 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: intel-gfx, dri-devel

Quoting Daniel Vetter (2020-10-12 14:55:07)
> On Mon, Oct 12, 2020 at 12:49 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > Quoting Daniel Vetter (2020-10-09 17:16:06)
> > > On Fri, Oct 9, 2020 at 12:21 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > > >
> > > > vgem is a minimalistic driver that provides shmemfs objects to
> > > > userspace that may then be used as an in-memory surface and transported
> > > > across dma-buf to other drivers. Since it's introduction,
> > > > drm_gem_shmem_helper now provides the same shmemfs facilities and so we
> > > > can trim vgem to wrap the helper.
> > > >
> > > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > > ---
> > > >  drivers/gpu/drm/Kconfig         |   1 +
> > > >  drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
> > > >  drivers/gpu/drm/vgem/vgem_drv.h |  11 --
> > > >  3 files changed, 13 insertions(+), 280 deletions(-)
> > >
> > > Nice diffstat :-)
> > >
> > > Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
> >
> > Unfortunately I had to drop the drm_gem_prime_mmap() since the existing
> > expectation is that we hand the faulthandler off to shmemfs so we can
> > release the module while the memory is exported.
> 
> That sounds like a broken igt. Once we have refcounting for
> outstanding dma_fence/buf or anything else we'll block unloading of
> the module (not unbinding of the driver). Which one is that?

The dma-buf is closed; all that remains is the mmap. Then from the
perspective of the module, there is no reference back to the module
since we delegate handling of the mmap back to the owner, the shmemfs
builtin. That allows us to remove the module as its object code is no
longer required.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
  2020-10-12 14:01         ` Chris Wilson
@ 2020-10-12 14:12           ` Daniel Vetter
  -1 siblings, 0 replies; 18+ messages in thread
From: Daniel Vetter @ 2020-10-12 14:12 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx, dri-devel

On Mon, Oct 12, 2020 at 03:01:09PM +0100, Chris Wilson wrote:
> Quoting Daniel Vetter (2020-10-12 14:55:07)
> > On Mon, Oct 12, 2020 at 12:49 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > > Quoting Daniel Vetter (2020-10-09 17:16:06)
> > > > On Fri, Oct 9, 2020 at 12:21 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > > > >
> > > > > vgem is a minimalistic driver that provides shmemfs objects to
> > > > > userspace that may then be used as an in-memory surface and transported
> > > > > across dma-buf to other drivers. Since it's introduction,
> > > > > drm_gem_shmem_helper now provides the same shmemfs facilities and so we
> > > > > can trim vgem to wrap the helper.
> > > > >
> > > > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > > > ---
> > > > >  drivers/gpu/drm/Kconfig         |   1 +
> > > > >  drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
> > > > >  drivers/gpu/drm/vgem/vgem_drv.h |  11 --
> > > > >  3 files changed, 13 insertions(+), 280 deletions(-)
> > > >
> > > > Nice diffstat :-)
> > > >
> > > > Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
> > >
> > > Unfortunately I had to drop the drm_gem_prime_mmap() since the existing
> > > expectation is that we hand the faulthandler off to shmemfs so we can
> > > release the module while the memory is exported.
> > 
> > That sounds like a broken igt. Once we have refcounting for
> > outstanding dma_fence/buf or anything else we'll block unloading of
> > the module (not unbinding of the driver). Which one is that?
> 
> The dma-buf is closed; all that remains is the mmap. Then from the
> perspective of the module, there is no reference back to the module
> since we delegate handling of the mmap back to the owner, the shmemfs
> builtin. That allows us to remove the module as its object code is no
> longer required.

Oh I know how that's possible, I wonder about which testcase encodes that.
Because it really shouldn't, since that's quite far away from the rough
consensus that we cobbled together on dri-devel a few months ago about how
hotunplug should work. If it's a vgem test, meh, we can change that
whenever. But if it's a generic test that falls over on vgem, then we need
to teach it better assumptions.
-Daniel
-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
@ 2020-10-12 14:12           ` Daniel Vetter
  0 siblings, 0 replies; 18+ messages in thread
From: Daniel Vetter @ 2020-10-12 14:12 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx, dri-devel

On Mon, Oct 12, 2020 at 03:01:09PM +0100, Chris Wilson wrote:
> Quoting Daniel Vetter (2020-10-12 14:55:07)
> > On Mon, Oct 12, 2020 at 12:49 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > > Quoting Daniel Vetter (2020-10-09 17:16:06)
> > > > On Fri, Oct 9, 2020 at 12:21 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > > > >
> > > > > vgem is a minimalistic driver that provides shmemfs objects to
> > > > > userspace that may then be used as an in-memory surface and transported
> > > > > across dma-buf to other drivers. Since it's introduction,
> > > > > drm_gem_shmem_helper now provides the same shmemfs facilities and so we
> > > > > can trim vgem to wrap the helper.
> > > > >
> > > > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > > > ---
> > > > >  drivers/gpu/drm/Kconfig         |   1 +
> > > > >  drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
> > > > >  drivers/gpu/drm/vgem/vgem_drv.h |  11 --
> > > > >  3 files changed, 13 insertions(+), 280 deletions(-)
> > > >
> > > > Nice diffstat :-)
> > > >
> > > > Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
> > >
> > > Unfortunately I had to drop the drm_gem_prime_mmap() since the existing
> > > expectation is that we hand the faulthandler off to shmemfs so we can
> > > release the module while the memory is exported.
> > 
> > That sounds like a broken igt. Once we have refcounting for
> > outstanding dma_fence/buf or anything else we'll block unloading of
> > the module (not unbinding of the driver). Which one is that?
> 
> The dma-buf is closed; all that remains is the mmap. Then from the
> perspective of the module, there is no reference back to the module
> since we delegate handling of the mmap back to the owner, the shmemfs
> builtin. That allows us to remove the module as its object code is no
> longer required.

Oh I know how that's possible, I wonder about which testcase encodes that.
Because it really shouldn't, since that's quite far away from the rough
consensus that we cobbled together on dri-devel a few months ago about how
hotunplug should work. If it's a vgem test, meh, we can change that
whenever. But if it's a generic test that falls over on vgem, then we need
to teach it better assumptions.
-Daniel
-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
  2020-10-12 14:12           ` Daniel Vetter
@ 2020-10-12 14:17             ` Chris Wilson
  -1 siblings, 0 replies; 18+ messages in thread
From: Chris Wilson @ 2020-10-12 14:17 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: intel-gfx, dri-devel

Quoting Daniel Vetter (2020-10-12 15:12:50)
> On Mon, Oct 12, 2020 at 03:01:09PM +0100, Chris Wilson wrote:
> > Quoting Daniel Vetter (2020-10-12 14:55:07)
> > > On Mon, Oct 12, 2020 at 12:49 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > > > Quoting Daniel Vetter (2020-10-09 17:16:06)
> > > > > On Fri, Oct 9, 2020 at 12:21 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > > > > >
> > > > > > vgem is a minimalistic driver that provides shmemfs objects to
> > > > > > userspace that may then be used as an in-memory surface and transported
> > > > > > across dma-buf to other drivers. Since it's introduction,
> > > > > > drm_gem_shmem_helper now provides the same shmemfs facilities and so we
> > > > > > can trim vgem to wrap the helper.
> > > > > >
> > > > > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > > > > ---
> > > > > >  drivers/gpu/drm/Kconfig         |   1 +
> > > > > >  drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
> > > > > >  drivers/gpu/drm/vgem/vgem_drv.h |  11 --
> > > > > >  3 files changed, 13 insertions(+), 280 deletions(-)
> > > > >
> > > > > Nice diffstat :-)
> > > > >
> > > > > Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
> > > >
> > > > Unfortunately I had to drop the drm_gem_prime_mmap() since the existing
> > > > expectation is that we hand the faulthandler off to shmemfs so we can
> > > > release the module while the memory is exported.
> > > 
> > > That sounds like a broken igt. Once we have refcounting for
> > > outstanding dma_fence/buf or anything else we'll block unloading of
> > > the module (not unbinding of the driver). Which one is that?
> > 
> > The dma-buf is closed; all that remains is the mmap. Then from the
> > perspective of the module, there is no reference back to the module
> > since we delegate handling of the mmap back to the owner, the shmemfs
> > builtin. That allows us to remove the module as its object code is no
> > longer required.
> 
> Oh I know how that's possible, I wonder about which testcase encodes that.
> Because it really shouldn't, since that's quite far away from the rough
> consensus that we cobbled together on dri-devel a few months ago about how
> hotunplug should work. If it's a vgem test, meh, we can change that
> whenever. But if it's a generic test that falls over on vgem, then we need
> to teach it better assumptions.

We intentionally copied the module unload behaviour from i915.
-Chris
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper
@ 2020-10-12 14:17             ` Chris Wilson
  0 siblings, 0 replies; 18+ messages in thread
From: Chris Wilson @ 2020-10-12 14:17 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: intel-gfx, dri-devel

Quoting Daniel Vetter (2020-10-12 15:12:50)
> On Mon, Oct 12, 2020 at 03:01:09PM +0100, Chris Wilson wrote:
> > Quoting Daniel Vetter (2020-10-12 14:55:07)
> > > On Mon, Oct 12, 2020 at 12:49 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > > > Quoting Daniel Vetter (2020-10-09 17:16:06)
> > > > > On Fri, Oct 9, 2020 at 12:21 PM Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > > > > >
> > > > > > vgem is a minimalistic driver that provides shmemfs objects to
> > > > > > userspace that may then be used as an in-memory surface and transported
> > > > > > across dma-buf to other drivers. Since it's introduction,
> > > > > > drm_gem_shmem_helper now provides the same shmemfs facilities and so we
> > > > > > can trim vgem to wrap the helper.
> > > > > >
> > > > > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > > > > ---
> > > > > >  drivers/gpu/drm/Kconfig         |   1 +
> > > > > >  drivers/gpu/drm/vgem/vgem_drv.c | 281 ++------------------------------
> > > > > >  drivers/gpu/drm/vgem/vgem_drv.h |  11 --
> > > > > >  3 files changed, 13 insertions(+), 280 deletions(-)
> > > > >
> > > > > Nice diffstat :-)
> > > > >
> > > > > Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
> > > >
> > > > Unfortunately I had to drop the drm_gem_prime_mmap() since the existing
> > > > expectation is that we hand the faulthandler off to shmemfs so we can
> > > > release the module while the memory is exported.
> > > 
> > > That sounds like a broken igt. Once we have refcounting for
> > > outstanding dma_fence/buf or anything else we'll block unloading of
> > > the module (not unbinding of the driver). Which one is that?
> > 
> > The dma-buf is closed; all that remains is the mmap. Then from the
> > perspective of the module, there is no reference back to the module
> > since we delegate handling of the mmap back to the owner, the shmemfs
> > builtin. That allows us to remove the module as its object code is no
> > longer required.
> 
> Oh I know how that's possible, I wonder about which testcase encodes that.
> Because it really shouldn't, since that's quite far away from the rough
> consensus that we cobbled together on dri-devel a few months ago about how
> hotunplug should work. If it's a vgem test, meh, we can change that
> whenever. But if it's a generic test that falls over on vgem, then we need
> to teach it better assumptions.

We intentionally copied the module unload behaviour from i915.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2020-10-12 14:17 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-09 10:21 [PATCH] drm/vgem: Replace vgem_object_funcs with the common drm shmem helper Chris Wilson
2020-10-09 10:21 ` [Intel-gfx] " Chris Wilson
2020-10-09 11:40 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
2020-10-09 12:04 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2020-10-09 16:16 ` [Intel-gfx] [PATCH] " Daniel Vetter
2020-10-09 16:16   ` Daniel Vetter
2020-10-09 22:43   ` Daniel Vetter
2020-10-09 22:43     ` Daniel Vetter
2020-10-12 10:49   ` Chris Wilson
2020-10-12 10:49     ` Chris Wilson
2020-10-12 13:55     ` Daniel Vetter
2020-10-12 13:55       ` Daniel Vetter
2020-10-12 14:01       ` Chris Wilson
2020-10-12 14:01         ` Chris Wilson
2020-10-12 14:12         ` Daniel Vetter
2020-10-12 14:12           ` Daniel Vetter
2020-10-12 14:17           ` Chris Wilson
2020-10-12 14:17             ` Chris Wilson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.