dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3] drm/ast: add dmabuf/prime buffer sharing support
@ 2022-09-01 12:44 oushixiong
  2022-09-05  9:19 ` Thomas Zimmermann
  2022-09-07  7:50 ` Thomas Zimmermann
  0 siblings, 2 replies; 7+ messages in thread
From: oushixiong @ 2022-09-01 12:44 UTC (permalink / raw)
  To: Dave Airlie
  Cc: oushixiong, Thomas Zimmermann, David Airlie, linux-kernel,
	dri-devel, Sumit Semwal, linaro-mm-sig, Christian König,
	linux-media

[-- Attachment #1: Type: text/plain, Size: 6047 bytes --]


This patch adds ast specific codes for DRM prime feature, this is to
allow for offloading of rending in one direction and outputs in other.

This patch is designed to solve the problem that the AST is not displayed
when the server plug in a discrete graphics card at the same time.
We call the dirty callback function to copy the rendering results of the
discrete graphics card to the ast side by dma-buf.

v1->v2:
  - Fix the comment.
v2->v3:
  - we remove the gem_prime_import_sg_table callback and use the
    gem_prime_import callback, because it just map and access the buffer
    with the CPU. and do not to pin the buffer.

Signed-off-by: oushixiong <oushixiong@kylinos.cn>
Acked-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/ast/ast_drv.c  |  27 +++++++
 drivers/gpu/drm/ast/ast_mode.c | 125 ++++++++++++++++++++++++++++++++-
 2 files changed, 151 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 7465c4f0156a..fd3c4bad2eb4 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -28,6 +28,7 @@
 
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/dma-buf.h>
 
 #include <drm/drm_aperture.h>
 #include <drm/drm_atomic_helper.h>
@@ -50,6 +51,29 @@ module_param_named(modeset, ast_modeset, int, 0400);
 
 DEFINE_DRM_GEM_FOPS(ast_fops);
 
+static struct drm_gem_object *ast_gem_prime_import(struct drm_device *dev,
+						struct dma_buf *dma_buf)
+{
+	struct drm_gem_vram_object *gbo;
+
+	gbo = drm_gem_vram_of_gem(dma_buf->priv);
+	if (gbo->bo.base.dev == dev) {
+		/*
+		 * Importing dmabuf exported from out own gem increases
+		 * refcount on gem itself instead of f_count of dmabuf.
+		 */
+		drm_gem_object_get(&gbo->bo.base);
+		return &gbo->bo.base;
+	}
+
+	gbo = drm_gem_vram_create(dev, dma_buf->size, 0);
+	if (IS_ERR(gbo))
+		return NULL;
+
+	get_dma_buf(dma_buf);
+	return &gbo->bo.base;
+}
+
 static const struct drm_driver ast_driver = {
 	.driver_features = DRIVER_ATOMIC |
 			   DRIVER_GEM |
@@ -63,6 +87,9 @@ static const struct drm_driver ast_driver = {
 	.minor = DRIVER_MINOR,
 	.patchlevel = DRIVER_PATCHLEVEL,
 
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_import = ast_gem_prime_import,
+
 	DRM_GEM_VRAM_DRIVER
 };
 
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 45b56b39ad47..65a4342c5622 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -48,6 +48,8 @@
 #include "ast_drv.h"
 #include "ast_tables.h"
 
+MODULE_IMPORT_NS(DMA_BUF);
+
 static inline void ast_load_palette_index(struct ast_private *ast,
 				     u8 index, u8 red, u8 green,
 				     u8 blue)
@@ -1535,8 +1537,129 @@ static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs =
 	.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
 };
 
+static int ast_handle_damage(struct drm_framebuffer *fb, int x, int y,
+					int width, int height)
+{
+	struct drm_gem_vram_object *dst_bo = NULL;
+	void *dst = NULL;
+	int ret = 0, i;
+	unsigned long offset = 0;
+	bool unmap = false;
+	unsigned int bytesPerPixel;
+	struct iosys_map map;
+	struct iosys_map dmabuf_map;
+
+	bytesPerPixel = fb->format->cpp[0];
+
+	if (!fb->obj[0]->dma_buf)
+		return -EINVAL;
+
+	if (!fb->obj[0]->dma_buf->vmap_ptr.vaddr) {
+		ret = dma_buf_vmap(fb->obj[0]->dma_buf, &dmabuf_map);
+		if (ret)
+			return ret;
+	} else
+		dmabuf_map.vaddr = fb->obj[0]->dma_buf->vmap_ptr.vaddr;
+
+	dst_bo = drm_gem_vram_of_gem(fb->obj[0]);
+
+	ret = drm_gem_vram_pin(dst_bo, 0);
+	if (ret) {
+		DRM_ERROR("ast_bo_pin failed\n");
+		return ret;
+	}
+
+	if (!dst_bo->map.vaddr) {
+		ret = drm_gem_vram_vmap(dst_bo, &map);
+		if (ret) {
+			drm_gem_vram_unpin(dst_bo);
+			DRM_ERROR("failed to vmap fbcon\n");
+			return ret;
+		}
+		unmap = true;
+	}
+	dst = dst_bo->map.vaddr;
+
+	for (i = y; i < y + height; i++) {
+		offset = i * fb->pitches[0] + (x * bytesPerPixel);
+		memcpy_toio(dst + offset, dmabuf_map.vaddr + offset,
+			width * bytesPerPixel);
+	}
+
+	if (unmap)
+		drm_gem_vram_vunmap(dst_bo, &map);
+
+	drm_gem_vram_unpin(dst_bo);
+
+	return 0;
+}
+
+
+static int ast_user_framebuffer_dirty(struct drm_framebuffer *fb,
+				struct drm_file *file,
+				unsigned int flags,
+				unsigned int color,
+				struct drm_clip_rect *clips,
+				unsigned int num_clips)
+{
+	int i, ret = 0;
+
+	drm_modeset_lock_all(fb->dev);
+	if (fb->obj[0]->dma_buf) {
+		ret = dma_buf_begin_cpu_access(fb->obj[0]->dma_buf,
+				DMA_FROM_DEVICE);
+		if (ret)
+			goto unlock;
+	}
+
+	for (i = 0; i < num_clips; i++) {
+		ret = ast_handle_damage(fb, clips[i].x1, clips[i].y1,
+				clips[i].x2 - clips[i].x1, clips[i].y2 - clips[i].y1);
+		if (ret)
+			break;
+	}
+
+	if (fb->obj[0]->dma_buf) {
+		dma_buf_end_cpu_access(fb->obj[0]->dma_buf,
+				DMA_FROM_DEVICE);
+	}
+
+unlock:
+	drm_modeset_unlock_all(fb->dev);
+
+	return ret;
+}
+
+static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct iosys_map dmabuf_map;
+
+	if (fb->obj[0]->dma_buf) {
+		dmabuf_map.is_iomem = fb->obj[0]->dma_buf->vmap_ptr.is_iomem;
+		dmabuf_map.vaddr = fb->obj[0]->dma_buf->vmap_ptr.vaddr;
+		if (dmabuf_map.vaddr)
+			dma_buf_vunmap(fb->obj[0]->dma_buf, &dmabuf_map);
+	}
+
+	drm_gem_fb_destroy(fb);
+}
+
+static const struct drm_framebuffer_funcs ast_gem_fb_funcs_dirtyfb = {
+	.destroy	= ast_user_framebuffer_destroy,
+	.create_handle	= drm_gem_fb_create_handle,
+	.dirty		= ast_user_framebuffer_dirty,
+};
+
+static struct drm_framebuffer *
+ast_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
+				const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
+					&ast_gem_fb_funcs_dirtyfb);
+}
+
 static const struct drm_mode_config_funcs ast_mode_config_funcs = {
-	.fb_create = drm_gem_fb_create,
+	.fb_create = ast_gem_fb_create_with_dirty,
 	.mode_valid = drm_vram_helper_mode_valid,
 	.atomic_check = drm_atomic_helper_check,
 	.atomic_commit = drm_atomic_helper_commit,
-- 
2.17.1


[-- Attachment #2: Type: text/plain, Size: 82 bytes --]

Content-type: Text/plain

No virus found
		Checked by Hillstone Network AntiVirus

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] drm/ast: add dmabuf/prime buffer sharing support
  2022-09-01 12:44 [PATCH v3] drm/ast: add dmabuf/prime buffer sharing support oushixiong
@ 2022-09-05  9:19 ` Thomas Zimmermann
  2022-09-07  7:50 ` Thomas Zimmermann
  1 sibling, 0 replies; 7+ messages in thread
From: Thomas Zimmermann @ 2022-09-05  9:19 UTC (permalink / raw)
  To: oushixiong, Dave Airlie
  Cc: David Airlie, linux-kernel, dri-devel, Sumit Semwal,
	linaro-mm-sig, Christian König, linux-media


[-- Attachment #1.1: Type: text/plain, Size: 8919 bytes --]

Hi,

I've been on vacation. Sorry for the late reply.

I have plans to replace ast's memory manager with the GEM SHMEM helpers. 
These already support PRIME buffer sharing and the usecase you describe. 
I also consider the GEM VRAM helpers used by ast as deprecated. There's 
no benefit over SHMEM helpers, except with the bochs driver. So whatever 
we merge here, might not live for too long.

Am 01.09.22 um 14:44 schrieb oushixiong:
> 
> This patch adds ast specific codes for DRM prime feature, this is to
> allow for offloading of rending in one direction and outputs in other.
> 
> This patch is designed to solve the problem that the AST is not displayed
> when the server plug in a discrete graphics card at the same time.
> We call the dirty callback function to copy the rendering results of the
> discrete graphics card to the ast side by dma-buf.

I assume that userspace guarantees that both, ASPEED and discrete 
device, are set the the same display resolution?

> 
> v1->v2:
>    - Fix the comment.
> v2->v3:
>    - we remove the gem_prime_import_sg_table callback and use the
>      gem_prime_import callback, because it just map and access the buffer
>      with the CPU. and do not to pin the buffer.
> 
> Signed-off-by: oushixiong <oushixiong@kylinos.cn>
> Acked-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/ast/ast_drv.c  |  27 +++++++
>   drivers/gpu/drm/ast/ast_mode.c | 125 ++++++++++++++++++++++++++++++++-
>   2 files changed, 151 insertions(+), 1 deletion(-)

There's nothing specifically to AST here. Could this be build on GEM 
VRAM helpers instead? A handful of other drivers should then be able to 
use the functionality.

> 
> diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
> index 7465c4f0156a..fd3c4bad2eb4 100644
> --- a/drivers/gpu/drm/ast/ast_drv.c
> +++ b/drivers/gpu/drm/ast/ast_drv.c
> @@ -28,6 +28,7 @@
>   
>   #include <linux/module.h>
>   #include <linux/pci.h>
> +#include <linux/dma-buf.h>

Sort alphabetically please.

>   
>   #include <drm/drm_aperture.h>
>   #include <drm/drm_atomic_helper.h>
> @@ -50,6 +51,29 @@ module_param_named(modeset, ast_modeset, int, 0400);
>   
>   DEFINE_DRM_GEM_FOPS(ast_fops);
>   
> +static struct drm_gem_object *ast_gem_prime_import(struct drm_device *dev,
> +						struct dma_buf *dma_buf)
> +{
> +	struct drm_gem_vram_object *gbo;
> +
> +	gbo = drm_gem_vram_of_gem(dma_buf->priv);
> +	if (gbo->bo.base.dev == dev) {
> +		/*
> +		 * Importing dmabuf exported from out own gem increases
> +		 * refcount on gem itself instead of f_count of dmabuf.
> +		 */
> +		drm_gem_object_get(&gbo->bo.base);
> +		return &gbo->bo.base;
> +	}
> +
> +	gbo = drm_gem_vram_create(dev, dma_buf->size, 0);
> +	if (IS_ERR(gbo))
> +		return NULL;
> +
> +	get_dma_buf(dma_buf);
> +	return &gbo->bo.base;
> +}
> +
>   static const struct drm_driver ast_driver = {
>   	.driver_features = DRIVER_ATOMIC |
>   			   DRIVER_GEM |
> @@ -63,6 +87,9 @@ static const struct drm_driver ast_driver = {
>   	.minor = DRIVER_MINOR,
>   	.patchlevel = DRIVER_PATCHLEVEL,
>   
> +	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
> +	.gem_prime_import = ast_gem_prime_import,
> +
>   	DRM_GEM_VRAM_DRIVER
>   };
>   
> diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
> index 45b56b39ad47..65a4342c5622 100644
> --- a/drivers/gpu/drm/ast/ast_mode.c
> +++ b/drivers/gpu/drm/ast/ast_mode.c
> @@ -48,6 +48,8 @@
>   #include "ast_drv.h"
>   #include "ast_tables.h"
>   
> +MODULE_IMPORT_NS(DMA_BUF);
> +
>   static inline void ast_load_palette_index(struct ast_private *ast,
>   				     u8 index, u8 red, u8 green,
>   				     u8 blue)
> @@ -1535,8 +1537,129 @@ static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs =
>   	.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
>   };
>   
> +static int ast_handle_damage(struct drm_framebuffer *fb, int x, int y,
> +					int width, int height)
> +{
> +	struct drm_gem_vram_object *dst_bo = NULL;
> +	void *dst = NULL;
> +	int ret = 0, i;
> +	unsigned long offset = 0;
> +	bool unmap = false;
> +	unsigned int bytesPerPixel;
> +	struct iosys_map map;
> +	struct iosys_map dmabuf_map;
> +
> +	bytesPerPixel = fb->format->cpp[0];
> +
> +	if (!fb->obj[0]->dma_buf)
> +		return -EINVAL;
> +
> +	if (!fb->obj[0]->dma_buf->vmap_ptr.vaddr) {
> +		ret = dma_buf_vmap(fb->obj[0]->dma_buf, &dmabuf_map);
> +		if (ret)
> +			return ret;
> +	} else
> +		dmabuf_map.vaddr = fb->obj[0]->dma_buf->vmap_ptr.vaddr;

It's too late to call dma_buf_vmap() here. The correct place would be 
drm_plane_helper_funcs.prepare_fb for the vmap and the resulting address 
would have to be stored in a VRAM plane state.  The vunmap call would go 
into drm_plane_helper_funcs.cleanup_fb.

We already have prepare_fb/cleanup_fb for the VRAM helpers.  There's no 
VRAM-specific plane-state yet.

> +
> +	dst_bo = drm_gem_vram_of_gem(fb->obj[0]);
> +
> +	ret = drm_gem_vram_pin(dst_bo, 0);
> +	if (ret) {
> +		DRM_ERROR("ast_bo_pin failed\n");
> +		return ret;
> +	}

Like vmap, pinning should be done by the existing prepare_fb helper
drm_gem_vram_plane_helper_prepare_fb().

> +
> +	if (!dst_bo->map.vaddr) {
> +		ret = drm_gem_vram_vmap(dst_bo, &map);
> +		if (ret) {
> +			drm_gem_vram_unpin(dst_bo);
> +			DRM_ERROR("failed to vmap fbcon\n");
> +			return ret;
> +		}
> +		unmap = true;
> +	}
> +	dst = dst_bo->map.vaddr;
> +
> +	for (i = y; i < y + height; i++) {
> +		offset = i * fb->pitches[0] + (x * bytesPerPixel);
> +		memcpy_toio(dst + offset, dmabuf_map.vaddr + offset,
> +			width * bytesPerPixel);
> +	}

drm_fb_mempy() implements this logic already.

> +
> +	if (unmap)
> +		drm_gem_vram_vunmap(dst_bo, &map);
> +
> +	drm_gem_vram_unpin(dst_bo);

Cleanup goes into drm_gem_vram_plane_helper_cleanup_fb()

> +
> +	return 0;
> +}
> +
> +
> +static int ast_user_framebuffer_dirty(struct drm_framebuffer *fb,
> +				struct drm_file *file,
> +				unsigned int flags,
> +				unsigned int color,
> +				struct drm_clip_rect *clips,
> +				unsigned int num_clips)
> +{
> +	int i, ret = 0;
> +
> +	drm_modeset_lock_all(fb->dev);
> +	if (fb->obj[0]->dma_buf) {
> +		ret = dma_buf_begin_cpu_access(fb->obj[0]->dma_buf,
> +				DMA_FROM_DEVICE);
> +		if (ret)
> +			goto unlock;
> +	}

This is already available in drm_gem_fb_begin_cpu_access()

> +
> +	for (i = 0; i < num_clips; i++) {
> +		ret = ast_handle_damage(fb, clips[i].x1, clips[i].y1,
> +				clips[i].x2 - clips[i].x1, clips[i].y2 - clips[i].y1);
> +		if (ret)
> +			break;
> +	}
> +
> +	if (fb->obj[0]->dma_buf) {
> +		dma_buf_end_cpu_access(fb->obj[0]->dma_buf,
> +				DMA_FROM_DEVICE);

That's in drm_gem_fb_end_cpu_access()

> +	}
> +
> +unlock:
> +	drm_modeset_unlock_all(fb->dev);
> +
> +	return ret;
> +}
> +
> +static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
> +{
> +	struct iosys_map dmabuf_map;
> +
> +	if (fb->obj[0]->dma_buf) {
> +		dmabuf_map.is_iomem = fb->obj[0]->dma_buf->vmap_ptr.is_iomem;
> +		dmabuf_map.vaddr = fb->obj[0]->dma_buf->vmap_ptr.vaddr;
> +		if (dmabuf_map.vaddr)
> +			dma_buf_vunmap(fb->obj[0]->dma_buf, &dmabuf_map);
> +	}
> +
> +	drm_gem_fb_destroy(fb);
> +}
> +
> +static const struct drm_framebuffer_funcs ast_gem_fb_funcs_dirtyfb = {
> +	.destroy	= ast_user_framebuffer_destroy,
> +	.create_handle	= drm_gem_fb_create_handle,
> +	.dirty		= ast_user_framebuffer_dirty,
> +};
> +
> +static struct drm_framebuffer *
> +ast_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
> +				const struct drm_mode_fb_cmd2 *mode_cmd)
> +{
> +	return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
> +					&ast_gem_fb_funcs_dirtyfb);
> +}
> +
>   static const struct drm_mode_config_funcs ast_mode_config_funcs = {
> -	.fb_create = drm_gem_fb_create,
> +	.fb_create = ast_gem_fb_create_with_dirty,

AFAICT, the whole framebuffer update code can be solved with existing 
generic helpers and the VRAM updates I outlined above. As part of ast's 
atomic update, the displayed data will be updated from the 
dma-buf-attached external BO.

But as I said, I consider the VRAM helpers a deprecated. And once I 
moved ast to the SHMEM helpers, the functionality will be there as well.

Best regards
Thomas


>   	.mode_valid = drm_vram_helper_mode_valid,
>   	.atomic_check = drm_atomic_helper_check,
>   	.atomic_commit = drm_atomic_helper_commit,
> 
> 
> Content-type: Text/plain
> 
> No virus found
> 		Checked by Hillstone Network AntiVirus

-- 
Thomas Zimmermann
Graphics Driver Developer
SUSE Software Solutions Germany GmbH
Maxfeldstr. 5, 90409 Nürnberg, Germany
(HRB 36809, AG Nürnberg)
Geschäftsführer: Ivo Totev

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 840 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] drm/ast: add dmabuf/prime buffer sharing support
  2022-09-01 12:44 [PATCH v3] drm/ast: add dmabuf/prime buffer sharing support oushixiong
  2022-09-05  9:19 ` Thomas Zimmermann
@ 2022-09-07  7:50 ` Thomas Zimmermann
  2022-09-07  8:10   ` Christian König
  1 sibling, 1 reply; 7+ messages in thread
From: Thomas Zimmermann @ 2022-09-07  7:50 UTC (permalink / raw)
  To: oushixiong, Dave Airlie
  Cc: David Airlie, linux-kernel, dri-devel, Sumit Semwal,
	linaro-mm-sig, Christian König, linux-media


[-- Attachment #1.1: Type: text/plain, Size: 7388 bytes --]

Hi,

on a more general note, let me say that your patch doesn't seem to fit 
the ideas of how buffer sharing is supposed to work. Your patch does the 
BMC screen update 'behind the scenes.'

Shouldn't userspace set up the DRM state for mirroring the output of the 
discrete card to the BMC?

Best regards
Thomas

Am 01.09.22 um 14:44 schrieb oushixiong:
> 
> This patch adds ast specific codes for DRM prime feature, this is to
> allow for offloading of rending in one direction and outputs in other.
> 
> This patch is designed to solve the problem that the AST is not displayed
> when the server plug in a discrete graphics card at the same time.
> We call the dirty callback function to copy the rendering results of the
> discrete graphics card to the ast side by dma-buf.
> 
> v1->v2:
>    - Fix the comment.
> v2->v3:
>    - we remove the gem_prime_import_sg_table callback and use the
>      gem_prime_import callback, because it just map and access the buffer
>      with the CPU. and do not to pin the buffer.
> 
> Signed-off-by: oushixiong <oushixiong@kylinos.cn>
> Acked-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/ast/ast_drv.c  |  27 +++++++
>   drivers/gpu/drm/ast/ast_mode.c | 125 ++++++++++++++++++++++++++++++++-
>   2 files changed, 151 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
> index 7465c4f0156a..fd3c4bad2eb4 100644
> --- a/drivers/gpu/drm/ast/ast_drv.c
> +++ b/drivers/gpu/drm/ast/ast_drv.c
> @@ -28,6 +28,7 @@
>   
>   #include <linux/module.h>
>   #include <linux/pci.h>
> +#include <linux/dma-buf.h>
>   
>   #include <drm/drm_aperture.h>
>   #include <drm/drm_atomic_helper.h>
> @@ -50,6 +51,29 @@ module_param_named(modeset, ast_modeset, int, 0400);
>   
>   DEFINE_DRM_GEM_FOPS(ast_fops);
>   
> +static struct drm_gem_object *ast_gem_prime_import(struct drm_device *dev,
> +						struct dma_buf *dma_buf)
> +{
> +	struct drm_gem_vram_object *gbo;
> +
> +	gbo = drm_gem_vram_of_gem(dma_buf->priv);
> +	if (gbo->bo.base.dev == dev) {
> +		/*
> +		 * Importing dmabuf exported from out own gem increases
> +		 * refcount on gem itself instead of f_count of dmabuf.
> +		 */
> +		drm_gem_object_get(&gbo->bo.base);
> +		return &gbo->bo.base;
> +	}
> +
> +	gbo = drm_gem_vram_create(dev, dma_buf->size, 0);
> +	if (IS_ERR(gbo))
> +		return NULL;
> +
> +	get_dma_buf(dma_buf);
> +	return &gbo->bo.base;
> +}
> +
>   static const struct drm_driver ast_driver = {
>   	.driver_features = DRIVER_ATOMIC |
>   			   DRIVER_GEM |
> @@ -63,6 +87,9 @@ static const struct drm_driver ast_driver = {
>   	.minor = DRIVER_MINOR,
>   	.patchlevel = DRIVER_PATCHLEVEL,
>   
> +	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
> +	.gem_prime_import = ast_gem_prime_import,
> +
>   	DRM_GEM_VRAM_DRIVER
>   };
>   
> diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
> index 45b56b39ad47..65a4342c5622 100644
> --- a/drivers/gpu/drm/ast/ast_mode.c
> +++ b/drivers/gpu/drm/ast/ast_mode.c
> @@ -48,6 +48,8 @@
>   #include "ast_drv.h"
>   #include "ast_tables.h"
>   
> +MODULE_IMPORT_NS(DMA_BUF);
> +
>   static inline void ast_load_palette_index(struct ast_private *ast,
>   				     u8 index, u8 red, u8 green,
>   				     u8 blue)
> @@ -1535,8 +1537,129 @@ static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs =
>   	.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
>   };
>   
> +static int ast_handle_damage(struct drm_framebuffer *fb, int x, int y,
> +					int width, int height)
> +{
> +	struct drm_gem_vram_object *dst_bo = NULL;
> +	void *dst = NULL;
> +	int ret = 0, i;
> +	unsigned long offset = 0;
> +	bool unmap = false;
> +	unsigned int bytesPerPixel;
> +	struct iosys_map map;
> +	struct iosys_map dmabuf_map;
> +
> +	bytesPerPixel = fb->format->cpp[0];
> +
> +	if (!fb->obj[0]->dma_buf)
> +		return -EINVAL;
> +
> +	if (!fb->obj[0]->dma_buf->vmap_ptr.vaddr) {
> +		ret = dma_buf_vmap(fb->obj[0]->dma_buf, &dmabuf_map);
> +		if (ret)
> +			return ret;
> +	} else
> +		dmabuf_map.vaddr = fb->obj[0]->dma_buf->vmap_ptr.vaddr;
> +
> +	dst_bo = drm_gem_vram_of_gem(fb->obj[0]);
> +
> +	ret = drm_gem_vram_pin(dst_bo, 0);
> +	if (ret) {
> +		DRM_ERROR("ast_bo_pin failed\n");
> +		return ret;
> +	}
> +
> +	if (!dst_bo->map.vaddr) {
> +		ret = drm_gem_vram_vmap(dst_bo, &map);
> +		if (ret) {
> +			drm_gem_vram_unpin(dst_bo);
> +			DRM_ERROR("failed to vmap fbcon\n");
> +			return ret;
> +		}
> +		unmap = true;
> +	}
> +	dst = dst_bo->map.vaddr;
> +
> +	for (i = y; i < y + height; i++) {
> +		offset = i * fb->pitches[0] + (x * bytesPerPixel);
> +		memcpy_toio(dst + offset, dmabuf_map.vaddr + offset,
> +			width * bytesPerPixel);
> +	}
> +
> +	if (unmap)
> +		drm_gem_vram_vunmap(dst_bo, &map);
> +
> +	drm_gem_vram_unpin(dst_bo);
> +
> +	return 0;
> +}
> +
> +
> +static int ast_user_framebuffer_dirty(struct drm_framebuffer *fb,
> +				struct drm_file *file,
> +				unsigned int flags,
> +				unsigned int color,
> +				struct drm_clip_rect *clips,
> +				unsigned int num_clips)
> +{
> +	int i, ret = 0;
> +
> +	drm_modeset_lock_all(fb->dev);
> +	if (fb->obj[0]->dma_buf) {
> +		ret = dma_buf_begin_cpu_access(fb->obj[0]->dma_buf,
> +				DMA_FROM_DEVICE);
> +		if (ret)
> +			goto unlock;
> +	}
> +
> +	for (i = 0; i < num_clips; i++) {
> +		ret = ast_handle_damage(fb, clips[i].x1, clips[i].y1,
> +				clips[i].x2 - clips[i].x1, clips[i].y2 - clips[i].y1);
> +		if (ret)
> +			break;
> +	}
> +
> +	if (fb->obj[0]->dma_buf) {
> +		dma_buf_end_cpu_access(fb->obj[0]->dma_buf,
> +				DMA_FROM_DEVICE);
> +	}
> +
> +unlock:
> +	drm_modeset_unlock_all(fb->dev);
> +
> +	return ret;
> +}
> +
> +static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
> +{
> +	struct iosys_map dmabuf_map;
> +
> +	if (fb->obj[0]->dma_buf) {
> +		dmabuf_map.is_iomem = fb->obj[0]->dma_buf->vmap_ptr.is_iomem;
> +		dmabuf_map.vaddr = fb->obj[0]->dma_buf->vmap_ptr.vaddr;
> +		if (dmabuf_map.vaddr)
> +			dma_buf_vunmap(fb->obj[0]->dma_buf, &dmabuf_map);
> +	}
> +
> +	drm_gem_fb_destroy(fb);
> +}
> +
> +static const struct drm_framebuffer_funcs ast_gem_fb_funcs_dirtyfb = {
> +	.destroy	= ast_user_framebuffer_destroy,
> +	.create_handle	= drm_gem_fb_create_handle,
> +	.dirty		= ast_user_framebuffer_dirty,
> +};
> +
> +static struct drm_framebuffer *
> +ast_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
> +				const struct drm_mode_fb_cmd2 *mode_cmd)
> +{
> +	return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
> +					&ast_gem_fb_funcs_dirtyfb);
> +}
> +
>   static const struct drm_mode_config_funcs ast_mode_config_funcs = {
> -	.fb_create = drm_gem_fb_create,
> +	.fb_create = ast_gem_fb_create_with_dirty,
>   	.mode_valid = drm_vram_helper_mode_valid,
>   	.atomic_check = drm_atomic_helper_check,
>   	.atomic_commit = drm_atomic_helper_commit,
> 
> 
> Content-type: Text/plain
> 
> No virus found
> 		Checked by Hillstone Network AntiVirus

-- 
Thomas Zimmermann
Graphics Driver Developer
SUSE Software Solutions Germany GmbH
Maxfeldstr. 5, 90409 Nürnberg, Germany
(HRB 36809, AG Nürnberg)
Geschäftsführer: Ivo Totev

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 840 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] drm/ast: add dmabuf/prime buffer sharing support
  2022-09-07  7:50 ` Thomas Zimmermann
@ 2022-09-07  8:10   ` Christian König
  2022-09-07  9:40     ` Thomas Zimmermann
  0 siblings, 1 reply; 7+ messages in thread
From: Christian König @ 2022-09-07  8:10 UTC (permalink / raw)
  To: Thomas Zimmermann, oushixiong, Dave Airlie
  Cc: David Airlie, linux-kernel, dri-devel, linaro-mm-sig,
	Sumit Semwal, linux-media

Hi Thomas,

I was wondering pretty much the same thing, but then thought that this 
might be the first step to direct scanout from DMA-bufs.

If this isn't the case then I to see this rather critically since that 
functionality belongs into userspace.

Regards,
Christian.

Am 07.09.22 um 09:50 schrieb Thomas Zimmermann:
> Hi,
>
> on a more general note, let me say that your patch doesn't seem to fit 
> the ideas of how buffer sharing is supposed to work. Your patch does 
> the BMC screen update 'behind the scenes.'
>
> Shouldn't userspace set up the DRM state for mirroring the output of 
> the discrete card to the BMC?
>
> Best regards
> Thomas
>
> Am 01.09.22 um 14:44 schrieb oushixiong:
>>
>> This patch adds ast specific codes for DRM prime feature, this is to
>> allow for offloading of rending in one direction and outputs in other.
>>
>> This patch is designed to solve the problem that the AST is not 
>> displayed
>> when the server plug in a discrete graphics card at the same time.
>> We call the dirty callback function to copy the rendering results of the
>> discrete graphics card to the ast side by dma-buf.
>>
>> v1->v2:
>>    - Fix the comment.
>> v2->v3:
>>    - we remove the gem_prime_import_sg_table callback and use the
>>      gem_prime_import callback, because it just map and access the 
>> buffer
>>      with the CPU. and do not to pin the buffer.
>>
>> Signed-off-by: oushixiong <oushixiong@kylinos.cn>
>> Acked-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/ast/ast_drv.c  |  27 +++++++
>>   drivers/gpu/drm/ast/ast_mode.c | 125 ++++++++++++++++++++++++++++++++-
>>   2 files changed, 151 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/ast/ast_drv.c 
>> b/drivers/gpu/drm/ast/ast_drv.c
>> index 7465c4f0156a..fd3c4bad2eb4 100644
>> --- a/drivers/gpu/drm/ast/ast_drv.c
>> +++ b/drivers/gpu/drm/ast/ast_drv.c
>> @@ -28,6 +28,7 @@
>>     #include <linux/module.h>
>>   #include <linux/pci.h>
>> +#include <linux/dma-buf.h>
>>     #include <drm/drm_aperture.h>
>>   #include <drm/drm_atomic_helper.h>
>> @@ -50,6 +51,29 @@ module_param_named(modeset, ast_modeset, int, 0400);
>>     DEFINE_DRM_GEM_FOPS(ast_fops);
>>   +static struct drm_gem_object *ast_gem_prime_import(struct 
>> drm_device *dev,
>> +                        struct dma_buf *dma_buf)
>> +{
>> +    struct drm_gem_vram_object *gbo;
>> +
>> +    gbo = drm_gem_vram_of_gem(dma_buf->priv);
>> +    if (gbo->bo.base.dev == dev) {
>> +        /*
>> +         * Importing dmabuf exported from out own gem increases
>> +         * refcount on gem itself instead of f_count of dmabuf.
>> +         */
>> +        drm_gem_object_get(&gbo->bo.base);
>> +        return &gbo->bo.base;
>> +    }
>> +
>> +    gbo = drm_gem_vram_create(dev, dma_buf->size, 0);
>> +    if (IS_ERR(gbo))
>> +        return NULL;
>> +
>> +    get_dma_buf(dma_buf);
>> +    return &gbo->bo.base;
>> +}
>> +
>>   static const struct drm_driver ast_driver = {
>>       .driver_features = DRIVER_ATOMIC |
>>                  DRIVER_GEM |
>> @@ -63,6 +87,9 @@ static const struct drm_driver ast_driver = {
>>       .minor = DRIVER_MINOR,
>>       .patchlevel = DRIVER_PATCHLEVEL,
>>   +    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
>> +    .gem_prime_import = ast_gem_prime_import,
>> +
>>       DRM_GEM_VRAM_DRIVER
>>   };
>>   diff --git a/drivers/gpu/drm/ast/ast_mode.c 
>> b/drivers/gpu/drm/ast/ast_mode.c
>> index 45b56b39ad47..65a4342c5622 100644
>> --- a/drivers/gpu/drm/ast/ast_mode.c
>> +++ b/drivers/gpu/drm/ast/ast_mode.c
>> @@ -48,6 +48,8 @@
>>   #include "ast_drv.h"
>>   #include "ast_tables.h"
>>   +MODULE_IMPORT_NS(DMA_BUF);
>> +
>>   static inline void ast_load_palette_index(struct ast_private *ast,
>>                        u8 index, u8 red, u8 green,
>>                        u8 blue)
>> @@ -1535,8 +1537,129 @@ static const struct 
>> drm_mode_config_helper_funcs ast_mode_config_helper_funcs =
>>       .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
>>   };
>>   +static int ast_handle_damage(struct drm_framebuffer *fb, int x, 
>> int y,
>> +                    int width, int height)
>> +{
>> +    struct drm_gem_vram_object *dst_bo = NULL;
>> +    void *dst = NULL;
>> +    int ret = 0, i;
>> +    unsigned long offset = 0;
>> +    bool unmap = false;
>> +    unsigned int bytesPerPixel;
>> +    struct iosys_map map;
>> +    struct iosys_map dmabuf_map;
>> +
>> +    bytesPerPixel = fb->format->cpp[0];
>> +
>> +    if (!fb->obj[0]->dma_buf)
>> +        return -EINVAL;
>> +
>> +    if (!fb->obj[0]->dma_buf->vmap_ptr.vaddr) {
>> +        ret = dma_buf_vmap(fb->obj[0]->dma_buf, &dmabuf_map);
>> +        if (ret)
>> +            return ret;
>> +    } else
>> +        dmabuf_map.vaddr = fb->obj[0]->dma_buf->vmap_ptr.vaddr;
>> +
>> +    dst_bo = drm_gem_vram_of_gem(fb->obj[0]);
>> +
>> +    ret = drm_gem_vram_pin(dst_bo, 0);
>> +    if (ret) {
>> +        DRM_ERROR("ast_bo_pin failed\n");
>> +        return ret;
>> +    }
>> +
>> +    if (!dst_bo->map.vaddr) {
>> +        ret = drm_gem_vram_vmap(dst_bo, &map);
>> +        if (ret) {
>> +            drm_gem_vram_unpin(dst_bo);
>> +            DRM_ERROR("failed to vmap fbcon\n");
>> +            return ret;
>> +        }
>> +        unmap = true;
>> +    }
>> +    dst = dst_bo->map.vaddr;
>> +
>> +    for (i = y; i < y + height; i++) {
>> +        offset = i * fb->pitches[0] + (x * bytesPerPixel);
>> +        memcpy_toio(dst + offset, dmabuf_map.vaddr + offset,
>> +            width * bytesPerPixel);
>> +    }
>> +
>> +    if (unmap)
>> +        drm_gem_vram_vunmap(dst_bo, &map);
>> +
>> +    drm_gem_vram_unpin(dst_bo);
>> +
>> +    return 0;
>> +}
>> +
>> +
>> +static int ast_user_framebuffer_dirty(struct drm_framebuffer *fb,
>> +                struct drm_file *file,
>> +                unsigned int flags,
>> +                unsigned int color,
>> +                struct drm_clip_rect *clips,
>> +                unsigned int num_clips)
>> +{
>> +    int i, ret = 0;
>> +
>> +    drm_modeset_lock_all(fb->dev);
>> +    if (fb->obj[0]->dma_buf) {
>> +        ret = dma_buf_begin_cpu_access(fb->obj[0]->dma_buf,
>> +                DMA_FROM_DEVICE);
>> +        if (ret)
>> +            goto unlock;
>> +    }
>> +
>> +    for (i = 0; i < num_clips; i++) {
>> +        ret = ast_handle_damage(fb, clips[i].x1, clips[i].y1,
>> +                clips[i].x2 - clips[i].x1, clips[i].y2 - clips[i].y1);
>> +        if (ret)
>> +            break;
>> +    }
>> +
>> +    if (fb->obj[0]->dma_buf) {
>> +        dma_buf_end_cpu_access(fb->obj[0]->dma_buf,
>> +                DMA_FROM_DEVICE);
>> +    }
>> +
>> +unlock:
>> +    drm_modeset_unlock_all(fb->dev);
>> +
>> +    return ret;
>> +}
>> +
>> +static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
>> +{
>> +    struct iosys_map dmabuf_map;
>> +
>> +    if (fb->obj[0]->dma_buf) {
>> +        dmabuf_map.is_iomem = fb->obj[0]->dma_buf->vmap_ptr.is_iomem;
>> +        dmabuf_map.vaddr = fb->obj[0]->dma_buf->vmap_ptr.vaddr;
>> +        if (dmabuf_map.vaddr)
>> +            dma_buf_vunmap(fb->obj[0]->dma_buf, &dmabuf_map);
>> +    }
>> +
>> +    drm_gem_fb_destroy(fb);
>> +}
>> +
>> +static const struct drm_framebuffer_funcs ast_gem_fb_funcs_dirtyfb = {
>> +    .destroy    = ast_user_framebuffer_destroy,
>> +    .create_handle    = drm_gem_fb_create_handle,
>> +    .dirty        = ast_user_framebuffer_dirty,
>> +};
>> +
>> +static struct drm_framebuffer *
>> +ast_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file 
>> *file,
>> +                const struct drm_mode_fb_cmd2 *mode_cmd)
>> +{
>> +    return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
>> +                    &ast_gem_fb_funcs_dirtyfb);
>> +}
>> +
>>   static const struct drm_mode_config_funcs ast_mode_config_funcs = {
>> -    .fb_create = drm_gem_fb_create,
>> +    .fb_create = ast_gem_fb_create_with_dirty,
>>       .mode_valid = drm_vram_helper_mode_valid,
>>       .atomic_check = drm_atomic_helper_check,
>>       .atomic_commit = drm_atomic_helper_commit,
>>
>>
>> Content-type: Text/plain
>>
>> No virus found
>>         Checked by Hillstone Network AntiVirus
>


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] drm/ast: add dmabuf/prime buffer sharing support
  2022-09-07  8:10   ` Christian König
@ 2022-09-07  9:40     ` Thomas Zimmermann
  2022-09-07 10:14       ` oushixiong
  0 siblings, 1 reply; 7+ messages in thread
From: Thomas Zimmermann @ 2022-09-07  9:40 UTC (permalink / raw)
  To: Christian König, oushixiong, Dave Airlie
  Cc: David Airlie, linux-kernel, dri-devel, linaro-mm-sig,
	Sumit Semwal, linux-media


[-- Attachment #1.1: Type: text/plain, Size: 10060 bytes --]

Hi

Am 07.09.22 um 10:10 schrieb Christian König:
> Hi Thomas,
> 
> I was wondering pretty much the same thing, but then thought that this 
> might be the first step to direct scanout from DMA-bufs.
> 
> If this isn't the case then I to see this rather critically since that 
> functionality belongs into userspace.

With GEM VRAM helpers, ast currently doesn't support dma-buf sharing. I 
do have patches that convert it to GEM SHMEM (for other reasons), which 
would also add this functionality.

I intent to post these patches in the coming days. My suggestion is to 
merge them first and then see how to go from there.

Best regards
Thomas

> 
> Regards,
> Christian.
> 
> Am 07.09.22 um 09:50 schrieb Thomas Zimmermann:
>> Hi,
>>
>> on a more general note, let me say that your patch doesn't seem to fit 
>> the ideas of how buffer sharing is supposed to work. Your patch does 
>> the BMC screen update 'behind the scenes.'
>>
>> Shouldn't userspace set up the DRM state for mirroring the output of 
>> the discrete card to the BMC?
>>
>> Best regards
>> Thomas
>>
>> Am 01.09.22 um 14:44 schrieb oushixiong:
>>>
>>> This patch adds ast specific codes for DRM prime feature, this is to
>>> allow for offloading of rending in one direction and outputs in other.
>>>
>>> This patch is designed to solve the problem that the AST is not 
>>> displayed
>>> when the server plug in a discrete graphics card at the same time.
>>> We call the dirty callback function to copy the rendering results of the
>>> discrete graphics card to the ast side by dma-buf.
>>>
>>> v1->v2:
>>>    - Fix the comment.
>>> v2->v3:
>>>    - we remove the gem_prime_import_sg_table callback and use the
>>>      gem_prime_import callback, because it just map and access the 
>>> buffer
>>>      with the CPU. and do not to pin the buffer.
>>>
>>> Signed-off-by: oushixiong <oushixiong@kylinos.cn>
>>> Acked-by: Christian König <christian.koenig@amd.com>
>>> ---
>>>   drivers/gpu/drm/ast/ast_drv.c  |  27 +++++++
>>>   drivers/gpu/drm/ast/ast_mode.c | 125 ++++++++++++++++++++++++++++++++-
>>>   2 files changed, 151 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/drivers/gpu/drm/ast/ast_drv.c 
>>> b/drivers/gpu/drm/ast/ast_drv.c
>>> index 7465c4f0156a..fd3c4bad2eb4 100644
>>> --- a/drivers/gpu/drm/ast/ast_drv.c
>>> +++ b/drivers/gpu/drm/ast/ast_drv.c
>>> @@ -28,6 +28,7 @@
>>>     #include <linux/module.h>
>>>   #include <linux/pci.h>
>>> +#include <linux/dma-buf.h>
>>>     #include <drm/drm_aperture.h>
>>>   #include <drm/drm_atomic_helper.h>
>>> @@ -50,6 +51,29 @@ module_param_named(modeset, ast_modeset, int, 0400);
>>>     DEFINE_DRM_GEM_FOPS(ast_fops);
>>>   +static struct drm_gem_object *ast_gem_prime_import(struct 
>>> drm_device *dev,
>>> +                        struct dma_buf *dma_buf)
>>> +{
>>> +    struct drm_gem_vram_object *gbo;
>>> +
>>> +    gbo = drm_gem_vram_of_gem(dma_buf->priv);
>>> +    if (gbo->bo.base.dev == dev) {
>>> +        /*
>>> +         * Importing dmabuf exported from out own gem increases
>>> +         * refcount on gem itself instead of f_count of dmabuf.
>>> +         */
>>> +        drm_gem_object_get(&gbo->bo.base);
>>> +        return &gbo->bo.base;
>>> +    }
>>> +
>>> +    gbo = drm_gem_vram_create(dev, dma_buf->size, 0);
>>> +    if (IS_ERR(gbo))
>>> +        return NULL;
>>> +
>>> +    get_dma_buf(dma_buf);
>>> +    return &gbo->bo.base;
>>> +}
>>> +
>>>   static const struct drm_driver ast_driver = {
>>>       .driver_features = DRIVER_ATOMIC |
>>>                  DRIVER_GEM |
>>> @@ -63,6 +87,9 @@ static const struct drm_driver ast_driver = {
>>>       .minor = DRIVER_MINOR,
>>>       .patchlevel = DRIVER_PATCHLEVEL,
>>>   +    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
>>> +    .gem_prime_import = ast_gem_prime_import,
>>> +
>>>       DRM_GEM_VRAM_DRIVER
>>>   };
>>>   diff --git a/drivers/gpu/drm/ast/ast_mode.c 
>>> b/drivers/gpu/drm/ast/ast_mode.c
>>> index 45b56b39ad47..65a4342c5622 100644
>>> --- a/drivers/gpu/drm/ast/ast_mode.c
>>> +++ b/drivers/gpu/drm/ast/ast_mode.c
>>> @@ -48,6 +48,8 @@
>>>   #include "ast_drv.h"
>>>   #include "ast_tables.h"
>>>   +MODULE_IMPORT_NS(DMA_BUF);
>>> +
>>>   static inline void ast_load_palette_index(struct ast_private *ast,
>>>                        u8 index, u8 red, u8 green,
>>>                        u8 blue)
>>> @@ -1535,8 +1537,129 @@ static const struct 
>>> drm_mode_config_helper_funcs ast_mode_config_helper_funcs =
>>>       .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
>>>   };
>>>   +static int ast_handle_damage(struct drm_framebuffer *fb, int x, 
>>> int y,
>>> +                    int width, int height)
>>> +{
>>> +    struct drm_gem_vram_object *dst_bo = NULL;
>>> +    void *dst = NULL;
>>> +    int ret = 0, i;
>>> +    unsigned long offset = 0;
>>> +    bool unmap = false;
>>> +    unsigned int bytesPerPixel;
>>> +    struct iosys_map map;
>>> +    struct iosys_map dmabuf_map;
>>> +
>>> +    bytesPerPixel = fb->format->cpp[0];
>>> +
>>> +    if (!fb->obj[0]->dma_buf)
>>> +        return -EINVAL;
>>> +
>>> +    if (!fb->obj[0]->dma_buf->vmap_ptr.vaddr) {
>>> +        ret = dma_buf_vmap(fb->obj[0]->dma_buf, &dmabuf_map);
>>> +        if (ret)
>>> +            return ret;
>>> +    } else
>>> +        dmabuf_map.vaddr = fb->obj[0]->dma_buf->vmap_ptr.vaddr;
>>> +
>>> +    dst_bo = drm_gem_vram_of_gem(fb->obj[0]);
>>> +
>>> +    ret = drm_gem_vram_pin(dst_bo, 0);
>>> +    if (ret) {
>>> +        DRM_ERROR("ast_bo_pin failed\n");
>>> +        return ret;
>>> +    }
>>> +
>>> +    if (!dst_bo->map.vaddr) {
>>> +        ret = drm_gem_vram_vmap(dst_bo, &map);
>>> +        if (ret) {
>>> +            drm_gem_vram_unpin(dst_bo);
>>> +            DRM_ERROR("failed to vmap fbcon\n");
>>> +            return ret;
>>> +        }
>>> +        unmap = true;
>>> +    }
>>> +    dst = dst_bo->map.vaddr;
>>> +
>>> +    for (i = y; i < y + height; i++) {
>>> +        offset = i * fb->pitches[0] + (x * bytesPerPixel);
>>> +        memcpy_toio(dst + offset, dmabuf_map.vaddr + offset,
>>> +            width * bytesPerPixel);
>>> +    }
>>> +
>>> +    if (unmap)
>>> +        drm_gem_vram_vunmap(dst_bo, &map);
>>> +
>>> +    drm_gem_vram_unpin(dst_bo);
>>> +
>>> +    return 0;
>>> +}
>>> +
>>> +
>>> +static int ast_user_framebuffer_dirty(struct drm_framebuffer *fb,
>>> +                struct drm_file *file,
>>> +                unsigned int flags,
>>> +                unsigned int color,
>>> +                struct drm_clip_rect *clips,
>>> +                unsigned int num_clips)
>>> +{
>>> +    int i, ret = 0;
>>> +
>>> +    drm_modeset_lock_all(fb->dev);
>>> +    if (fb->obj[0]->dma_buf) {
>>> +        ret = dma_buf_begin_cpu_access(fb->obj[0]->dma_buf,
>>> +                DMA_FROM_DEVICE);
>>> +        if (ret)
>>> +            goto unlock;
>>> +    }
>>> +
>>> +    for (i = 0; i < num_clips; i++) {
>>> +        ret = ast_handle_damage(fb, clips[i].x1, clips[i].y1,
>>> +                clips[i].x2 - clips[i].x1, clips[i].y2 - clips[i].y1);
>>> +        if (ret)
>>> +            break;
>>> +    }
>>> +
>>> +    if (fb->obj[0]->dma_buf) {
>>> +        dma_buf_end_cpu_access(fb->obj[0]->dma_buf,
>>> +                DMA_FROM_DEVICE);
>>> +    }
>>> +
>>> +unlock:
>>> +    drm_modeset_unlock_all(fb->dev);
>>> +
>>> +    return ret;
>>> +}
>>> +
>>> +static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
>>> +{
>>> +    struct iosys_map dmabuf_map;
>>> +
>>> +    if (fb->obj[0]->dma_buf) {
>>> +        dmabuf_map.is_iomem = fb->obj[0]->dma_buf->vmap_ptr.is_iomem;
>>> +        dmabuf_map.vaddr = fb->obj[0]->dma_buf->vmap_ptr.vaddr;
>>> +        if (dmabuf_map.vaddr)
>>> +            dma_buf_vunmap(fb->obj[0]->dma_buf, &dmabuf_map);
>>> +    }
>>> +
>>> +    drm_gem_fb_destroy(fb);
>>> +}
>>> +
>>> +static const struct drm_framebuffer_funcs ast_gem_fb_funcs_dirtyfb = {
>>> +    .destroy    = ast_user_framebuffer_destroy,
>>> +    .create_handle    = drm_gem_fb_create_handle,
>>> +    .dirty        = ast_user_framebuffer_dirty,
>>> +};
>>> +
>>> +static struct drm_framebuffer *
>>> +ast_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file 
>>> *file,
>>> +                const struct drm_mode_fb_cmd2 *mode_cmd)
>>> +{
>>> +    return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
>>> +                    &ast_gem_fb_funcs_dirtyfb);
>>> +}
>>> +
>>>   static const struct drm_mode_config_funcs ast_mode_config_funcs = {
>>> -    .fb_create = drm_gem_fb_create,
>>> +    .fb_create = ast_gem_fb_create_with_dirty,
>>>       .mode_valid = drm_vram_helper_mode_valid,
>>>       .atomic_check = drm_atomic_helper_check,
>>>       .atomic_commit = drm_atomic_helper_commit,
>>>
>>>
>>> Content-type: Text/plain
>>>
>>> No virus found
>>>         Checked by Hillstone Network AntiVirus
>>
> 

-- 
Thomas Zimmermann
Graphics Driver Developer
SUSE Software Solutions Germany GmbH
Maxfeldstr. 5, 90409 Nürnberg, Germany
(HRB 36809, AG Nürnberg)
Geschäftsführer: Ivo Totev

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 840 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v3] drm/ast: add dmabuf/prime buffer sharing support
  2022-09-07  9:40     ` Thomas Zimmermann
@ 2022-09-07 10:14       ` oushixiong
  0 siblings, 0 replies; 7+ messages in thread
From: oushixiong @ 2022-09-07 10:14 UTC (permalink / raw)
  To: Thomas Zimmermann, Christian König, Dave Airlie
  Cc: David Airlie, linux-kernel, dri-devel, linaro-mm-sig,
	Sumit Semwal, linux-media

Hi,

Firstly, the dirty() call back function is called by 
drm_mode_dirtyfb_ioctl.

     drm_mode_dirtyfb_ioctl
         |__   fb->funcs->dirty(fb, file_priv, flags, r->color,clips, 
num_clips);
                         |__ ast_user_framebuffer_dirty
                                     |__ ast_handle_damage

Secondly, due to hardware limitations, the AST  display control modules 
can not access the dmabuf that in GTT, so it had to copy the data to AST 
VRAM, if we do not use the dmabuf , it need to copy data from discrete 
card's VRAM to memory,and copy the data from memory to AST VRAM.

Best regards
oushixiong


On 2022/9/7 下午5:40, Thomas Zimmermann wrote:
> Hi ,
>
> Am 07.09.22 um 10:10 schrieb Christian König:
>> Hi Thomas,
>>
>> I was wondering pretty much the same thing, but then thought that 
>> this might be the first step to direct scanout from DMA-bufs.
>>
>> If this isn't the case then I to see this rather critically since 
>> that functionality belongs into userspace.
>
> With GEM VRAM helpers, ast currently doesn't support dma-buf sharing. 
> I do have patches that convert it to GEM SHMEM (for other reasons), 
> which would also add this functionality.
>
> I intent to post these patches in the coming days. My suggestion is to 
> merge them first and then see how to go from there.
>
> Best regards
> Thomas
>
>>
>> Regards,
>> Christian.
>>
>> Am 07.09.22 um 09:50 schrieb Thomas Zimmermann:
>>> Hi,
>>>
>>> on a more general note, let me say that your patch doesn't seem to 
>>> fit the ideas of how buffer sharing is supposed to work. Your patch 
>>> does the BMC screen update 'behind the scenes.'
>>>
>>> Shouldn't userspace set up the DRM state for mirroring the output of 
>>> the discrete card to the BMC?
>>>
>>> Best regards
>>> Thomas
>>>
>>> Am 01.09.22 um 14:44 schrieb oushixiong:
>>>>
>>>> This patch adds ast specific codes for DRM prime feature, this is to
>>>> allow for offloading of rending in one direction and outputs in other.
>>>>
>>>> This patch is designed to solve the problem that the AST is not 
>>>> displayed
>>>> when the server plug in a discrete graphics card at the same time.
>>>> We call the dirty callback function to copy the rendering results 
>>>> of the
>>>> discrete graphics card to the ast side by dma-buf.
>>>>
>>>> v1->v2:
>>>>    - Fix the comment.
>>>> v2->v3:
>>>>    - we remove the gem_prime_import_sg_table callback and use the
>>>>      gem_prime_import callback, because it just map and access the 
>>>> buffer
>>>>      with the CPU. and do not to pin the buffer.
>>>>
>>>> Signed-off-by: oushixiong <oushixiong@kylinos.cn>
>>>> Acked-by: Christian König <christian.koenig@amd.com>
>>>> ---
>>>>   drivers/gpu/drm/ast/ast_drv.c  |  27 +++++++
>>>>   drivers/gpu/drm/ast/ast_mode.c | 125 
>>>> ++++++++++++++++++++++++++++++++-
>>>>   2 files changed, 151 insertions(+), 1 deletion(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/ast/ast_drv.c 
>>>> b/drivers/gpu/drm/ast/ast_drv.c
>>>> index 7465c4f0156a..fd3c4bad2eb4 100644
>>>> --- a/drivers/gpu/drm/ast/ast_drv.c
>>>> +++ b/drivers/gpu/drm/ast/ast_drv.c
>>>> @@ -28,6 +28,7 @@
>>>>     #include <linux/module.h>
>>>>   #include <linux/pci.h>
>>>> +#include <linux/dma-buf.h>
>>>>     #include <drm/drm_aperture.h>
>>>>   #include <drm/drm_atomic_helper.h>
>>>> @@ -50,6 +51,29 @@ module_param_named(modeset, ast_modeset, int, 
>>>> 0400);
>>>>     DEFINE_DRM_GEM_FOPS(ast_fops);
>>>>   +static struct drm_gem_object *ast_gem_prime_import(struct 
>>>> drm_device *dev,
>>>> +                        struct dma_buf *dma_buf)
>>>> +{
>>>> +    struct drm_gem_vram_object *gbo;
>>>> +
>>>> +    gbo = drm_gem_vram_of_gem(dma_buf->priv);
>>>> +    if (gbo->bo.base.dev == dev) {
>>>> +        /*
>>>> +         * Importing dmabuf exported from out own gem increases
>>>> +         * refcount on gem itself instead of f_count of dmabuf.
>>>> +         */
>>>> +        drm_gem_object_get(&gbo->bo.base);
>>>> +        return &gbo->bo.base;
>>>> +    }
>>>> +
>>>> +    gbo = drm_gem_vram_create(dev, dma_buf->size, 0);
>>>> +    if (IS_ERR(gbo))
>>>> +        return NULL;
>>>> +
>>>> +    get_dma_buf(dma_buf);
>>>> +    return &gbo->bo.base;
>>>> +}
>>>> +
>>>>   static const struct drm_driver ast_driver = {
>>>>       .driver_features = DRIVER_ATOMIC |
>>>>                  DRIVER_GEM |
>>>> @@ -63,6 +87,9 @@ static const struct drm_driver ast_driver = {
>>>>       .minor = DRIVER_MINOR,
>>>>       .patchlevel = DRIVER_PATCHLEVEL,
>>>>   +    .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
>>>> +    .gem_prime_import = ast_gem_prime_import,
>>>> +
>>>>       DRM_GEM_VRAM_DRIVER
>>>>   };
>>>>   diff --git a/drivers/gpu/drm/ast/ast_mode.c 
>>>> b/drivers/gpu/drm/ast/ast_mode.c
>>>> index 45b56b39ad47..65a4342c5622 100644
>>>> --- a/drivers/gpu/drm/ast/ast_mode.c
>>>> +++ b/drivers/gpu/drm/ast/ast_mode.c
>>>> @@ -48,6 +48,8 @@
>>>>   #include "ast_drv.h"
>>>>   #include "ast_tables.h"
>>>>   +MODULE_IMPORT_NS(DMA_BUF);
>>>> +
>>>>   static inline void ast_load_palette_index(struct ast_private *ast,
>>>>                        u8 index, u8 red, u8 green,
>>>>                        u8 blue)
>>>> @@ -1535,8 +1537,129 @@ static const struct 
>>>> drm_mode_config_helper_funcs ast_mode_config_helper_funcs =
>>>>       .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
>>>>   };
>>>>   +static int ast_handle_damage(struct drm_framebuffer *fb, int x, 
>>>> int y,
>>>> +                    int width, int height)
>>>> +{
>>>> +    struct drm_gem_vram_object *dst_bo = NULL;
>>>> +    void *dst = NULL;
>>>> +    int ret = 0, i;
>>>> +    unsigned long offset = 0;
>>>> +    bool unmap = false;
>>>> +    unsigned int bytesPerPixel;
>>>> +    struct iosys_map map;
>>>> +    struct iosys_map dmabuf_map;
>>>> +
>>>> +    bytesPerPixel = fb->format->cpp[0];
>>>> +
>>>> +    if (!fb->obj[0]->dma_buf)
>>>> +        return -EINVAL;
>>>> +
>>>> +    if (!fb->obj[0]->dma_buf->vmap_ptr.vaddr) {
>>>> +        ret = dma_buf_vmap(fb->obj[0]->dma_buf, &dmabuf_map);
>>>> +        if (ret)
>>>> +            return ret;
>>>> +    } else
>>>> +        dmabuf_map.vaddr = fb->obj[0]->dma_buf->vmap_ptr.vaddr;
>>>> +
>>>> +    dst_bo = drm_gem_vram_of_gem(fb->obj[0]);
>>>> +
>>>> +    ret = drm_gem_vram_pin(dst_bo, 0);
>>>> +    if (ret) {
>>>> +        DRM_ERROR("ast_bo_pin failed\n");
>>>> +        return ret;
>>>> +    }
>>>> +
>>>> +    if (!dst_bo->map.vaddr) {
>>>> +        ret = drm_gem_vram_vmap(dst_bo, &map);
>>>> +        if (ret) {
>>>> +            drm_gem_vram_unpin(dst_bo);
>>>> +            DRM_ERROR("failed to vmap fbcon\n");
>>>> +            return ret;
>>>> +        }
>>>> +        unmap = true;
>>>> +    }
>>>> +    dst = dst_bo->map.vaddr;
>>>> +
>>>> +    for (i = y; i < y + height; i++) {
>>>> +        offset = i * fb->pitches[0] + (x * bytesPerPixel);
>>>> +        memcpy_toio(dst + offset, dmabuf_map.vaddr + offset,
>>>> +            width * bytesPerPixel);
>>>> +    }
>>>> +
>>>> +    if (unmap)
>>>> +        drm_gem_vram_vunmap(dst_bo, &map);
>>>> +
>>>> +    drm_gem_vram_unpin(dst_bo);
>>>> +
>>>> +    return 0;
>>>> +}
>>>> +
>>>> +
>>>> +static int ast_user_framebuffer_dirty(struct drm_framebuffer *fb,
>>>> +                struct drm_file *file,
>>>> +                unsigned int flags,
>>>> +                unsigned int color,
>>>> +                struct drm_clip_rect *clips,
>>>> +                unsigned int num_clips)
>>>> +{
>>>> +    int i, ret = 0;
>>>> +
>>>> +    drm_modeset_lock_all(fb->dev);
>>>> +    if (fb->obj[0]->dma_buf) {
>>>> +        ret = dma_buf_begin_cpu_access(fb->obj[0]->dma_buf,
>>>> +                DMA_FROM_DEVICE);
>>>> +        if (ret)
>>>> +            goto unlock;
>>>> +    }
>>>> +
>>>> +    for (i = 0; i < num_clips; i++) {
>>>> +        ret = ast_handle_damage(fb, clips[i].x1, clips[i].y1,
>>>> +                clips[i].x2 - clips[i].x1, clips[i].y2 - 
>>>> clips[i].y1);
>>>> +        if (ret)
>>>> +            break;
>>>> +    }
>>>> +
>>>> +    if (fb->obj[0]->dma_buf) {
>>>> +        dma_buf_end_cpu_access(fb->obj[0]->dma_buf,
>>>> +                DMA_FROM_DEVICE);
>>>> +    }
>>>> +
>>>> +unlock:
>>>> +    drm_modeset_unlock_all(fb->dev);
>>>> +
>>>> +    return ret;
>>>> +}
>>>> +
>>>> +static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
>>>> +{
>>>> +    struct iosys_map dmabuf_map;
>>>> +
>>>> +    if (fb->obj[0]->dma_buf) {
>>>> +        dmabuf_map.is_iomem = fb->obj[0]->dma_buf->vmap_ptr.is_iomem;
>>>> +        dmabuf_map.vaddr = fb->obj[0]->dma_buf->vmap_ptr.vaddr;
>>>> +        if (dmabuf_map.vaddr)
>>>> +            dma_buf_vunmap(fb->obj[0]->dma_buf, &dmabuf_map);
>>>> +    }
>>>> +
>>>> +    drm_gem_fb_destroy(fb);
>>>> +}
>>>> +
>>>> +static const struct drm_framebuffer_funcs ast_gem_fb_funcs_dirtyfb 
>>>> = {
>>>> +    .destroy    = ast_user_framebuffer_destroy,
>>>> +    .create_handle    = drm_gem_fb_create_handle,
>>>> +    .dirty        = ast_user_framebuffer_dirty,
>>>> +};
>>>> +
>>>> +static struct drm_framebuffer *
>>>> +ast_gem_fb_create_with_dirty(struct drm_device *dev, struct 
>>>> drm_file *file,
>>>> +                const struct drm_mode_fb_cmd2 *mode_cmd)
>>>> +{
>>>> +    return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
>>>> +                    &ast_gem_fb_funcs_dirtyfb);
>>>> +}
>>>> +
>>>>   static const struct drm_mode_config_funcs ast_mode_config_funcs = {
>>>> -    .fb_create = drm_gem_fb_create,
>>>> +    .fb_create = ast_gem_fb_create_with_dirty,
>>>>       .mode_valid = drm_vram_helper_mode_valid,
>>>>       .atomic_check = drm_atomic_helper_check,
>>>>       .atomic_commit = drm_atomic_helper_commit,
>>>>
>>>>
>>>> Content-type: Text/plain
>>>>
>>>> No virus found
>>>>         Checked by Hillstone Network AntiVirus
>>>
>>
>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v3] drm/ast: add dmabuf/prime buffer sharing support
@ 2022-08-29  5:59 oushixiong
  0 siblings, 0 replies; 7+ messages in thread
From: oushixiong @ 2022-08-29  5:59 UTC (permalink / raw)
  To: Dave Airlie
  Cc: oushixiong, Thomas Zimmermann, David Airlie, linux-kernel,
	dri-devel, Sumit Semwal, linaro-mm-sig, kernel test robot,
	Christian König, linux-media

This patch adds ast specific codes for DRM prime feature, this is to
allow for offloading of rending in one direction and outputs in other.

This patch is designed to solve the problem that the AST is not displayed
when the server plug in a discrete  graphics card at the same time.
We call the dirty callback function to copy the rendering results of the
discrete graphics card to the ast side by dma-buf.

v1->v2:
  - Fix the comment.
v2->v3:
  - we remove the gem_prime_import_sg_table callback and use the
    gem_prime_import callback, because it just map and access the buffer
    with the CPU. and do not to pin the buffer.

Signed-off-by: oushixiong <oushixiong@kylinos.cn>
Reported-by: kernel test robot <lkp@intel.com>
---
 drivers/gpu/drm/ast/ast_drv.c  |  27 +++++++
 drivers/gpu/drm/ast/ast_mode.c | 125 ++++++++++++++++++++++++++++++++-
 2 files changed, 151 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 7465c4f0156a..fd3c4bad2eb4 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -28,6 +28,7 @@
 
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/dma-buf.h>
 
 #include <drm/drm_aperture.h>
 #include <drm/drm_atomic_helper.h>
@@ -50,6 +51,29 @@ module_param_named(modeset, ast_modeset, int, 0400);
 
 DEFINE_DRM_GEM_FOPS(ast_fops);
 
+static struct drm_gem_object *ast_gem_prime_import(struct drm_device *dev,
+						struct dma_buf *dma_buf)
+{
+	struct drm_gem_vram_object *gbo;
+
+	gbo = drm_gem_vram_of_gem(dma_buf->priv);
+	if (gbo->bo.base.dev == dev) {
+		/*
+		 * Importing dmabuf exported from out own gem increases
+		 * refcount on gem itself instead of f_count of dmabuf.
+		 */
+		drm_gem_object_get(&gbo->bo.base);
+		return &gbo->bo.base;
+	}
+
+	gbo = drm_gem_vram_create(dev, dma_buf->size, 0);
+	if (IS_ERR(gbo))
+		return NULL;
+
+	get_dma_buf(dma_buf);
+	return &gbo->bo.base;
+}
+
 static const struct drm_driver ast_driver = {
 	.driver_features = DRIVER_ATOMIC |
 			   DRIVER_GEM |
@@ -63,6 +87,9 @@ static const struct drm_driver ast_driver = {
 	.minor = DRIVER_MINOR,
 	.patchlevel = DRIVER_PATCHLEVEL,
 
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_import = ast_gem_prime_import,
+
 	DRM_GEM_VRAM_DRIVER
 };
 
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 45b56b39ad47..65a4342c5622 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -48,6 +48,8 @@
 #include "ast_drv.h"
 #include "ast_tables.h"
 
+MODULE_IMPORT_NS(DMA_BUF);
+
 static inline void ast_load_palette_index(struct ast_private *ast,
 				     u8 index, u8 red, u8 green,
 				     u8 blue)
@@ -1535,8 +1537,129 @@ static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs =
 	.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
 };
 
+static int ast_handle_damage(struct drm_framebuffer *fb, int x, int y,
+					int width, int height)
+{
+	struct drm_gem_vram_object *dst_bo = NULL;
+	void *dst = NULL;
+	int ret = 0, i;
+	unsigned long offset = 0;
+	bool unmap = false;
+	unsigned int bytesPerPixel;
+	struct iosys_map map;
+	struct iosys_map dmabuf_map;
+
+	bytesPerPixel = fb->format->cpp[0];
+
+	if (!fb->obj[0]->dma_buf)
+		return -EINVAL;
+
+	if (!fb->obj[0]->dma_buf->vmap_ptr.vaddr) {
+		ret = dma_buf_vmap(fb->obj[0]->dma_buf, &dmabuf_map);
+		if (ret)
+			return ret;
+	} else
+		dmabuf_map.vaddr = fb->obj[0]->dma_buf->vmap_ptr.vaddr;
+
+	dst_bo = drm_gem_vram_of_gem(fb->obj[0]);
+
+	ret = drm_gem_vram_pin(dst_bo, 0);
+	if (ret) {
+		DRM_ERROR("ast_bo_pin failed\n");
+		return ret;
+	}
+
+	if (!dst_bo->map.vaddr) {
+		ret = drm_gem_vram_vmap(dst_bo, &map);
+		if (ret) {
+			drm_gem_vram_unpin(dst_bo);
+			DRM_ERROR("failed to vmap fbcon\n");
+			return ret;
+		}
+		unmap = true;
+	}
+	dst = dst_bo->map.vaddr;
+
+	for (i = y; i < y + height; i++) {
+		offset = i * fb->pitches[0] + (x * bytesPerPixel);
+		memcpy_toio(dst + offset, dmabuf_map.vaddr + offset,
+			width * bytesPerPixel);
+	}
+
+	if (unmap)
+		drm_gem_vram_vunmap(dst_bo, &map);
+
+	drm_gem_vram_unpin(dst_bo);
+
+	return 0;
+}
+
+
+static int ast_user_framebuffer_dirty(struct drm_framebuffer *fb,
+				struct drm_file *file,
+				unsigned int flags,
+				unsigned int color,
+				struct drm_clip_rect *clips,
+				unsigned int num_clips)
+{
+	int i, ret = 0;
+
+	drm_modeset_lock_all(fb->dev);
+	if (fb->obj[0]->dma_buf) {
+		ret = dma_buf_begin_cpu_access(fb->obj[0]->dma_buf,
+				DMA_FROM_DEVICE);
+		if (ret)
+			goto unlock;
+	}
+
+	for (i = 0; i < num_clips; i++) {
+		ret = ast_handle_damage(fb, clips[i].x1, clips[i].y1,
+				clips[i].x2 - clips[i].x1, clips[i].y2 - clips[i].y1);
+		if (ret)
+			break;
+	}
+
+	if (fb->obj[0]->dma_buf) {
+		dma_buf_end_cpu_access(fb->obj[0]->dma_buf,
+				DMA_FROM_DEVICE);
+	}
+
+unlock:
+	drm_modeset_unlock_all(fb->dev);
+
+	return ret;
+}
+
+static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct iosys_map dmabuf_map;
+
+	if (fb->obj[0]->dma_buf) {
+		dmabuf_map.is_iomem = fb->obj[0]->dma_buf->vmap_ptr.is_iomem;
+		dmabuf_map.vaddr = fb->obj[0]->dma_buf->vmap_ptr.vaddr;
+		if (dmabuf_map.vaddr)
+			dma_buf_vunmap(fb->obj[0]->dma_buf, &dmabuf_map);
+	}
+
+	drm_gem_fb_destroy(fb);
+}
+
+static const struct drm_framebuffer_funcs ast_gem_fb_funcs_dirtyfb = {
+	.destroy	= ast_user_framebuffer_destroy,
+	.create_handle	= drm_gem_fb_create_handle,
+	.dirty		= ast_user_framebuffer_dirty,
+};
+
+static struct drm_framebuffer *
+ast_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
+				const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
+					&ast_gem_fb_funcs_dirtyfb);
+}
+
 static const struct drm_mode_config_funcs ast_mode_config_funcs = {
-	.fb_create = drm_gem_fb_create,
+	.fb_create = ast_gem_fb_create_with_dirty,
 	.mode_valid = drm_vram_helper_mode_valid,
 	.atomic_check = drm_atomic_helper_check,
 	.atomic_commit = drm_atomic_helper_commit,
-- 
2.17.1


No virus found
		Checked by Hillstone Network AntiVirus

^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2022-09-07 10:14 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-01 12:44 [PATCH v3] drm/ast: add dmabuf/prime buffer sharing support oushixiong
2022-09-05  9:19 ` Thomas Zimmermann
2022-09-07  7:50 ` Thomas Zimmermann
2022-09-07  8:10   ` Christian König
2022-09-07  9:40     ` Thomas Zimmermann
2022-09-07 10:14       ` oushixiong
  -- strict thread matches above, loose matches on Subject: below --
2022-08-29  5:59 oushixiong

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).