All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Noralf Trønnes via B4 Submission Endpoint" <devnull+noralf.tronnes.org@kernel.org>
To: "Thomas Zimmermann" <tzimmermann@suse.de>,
	"Javier Martinez Canillas" <javierm@redhat.com>,
	dri-devel@lists.freedesktop.org,
	"Maxime Ripard" <mripard@kernel.org>,
	stable@kernel.org,
	Noralf =?unknown-8bit?q?Tr=C3=B8nnes?= <noralf@tronnes.org>
Subject: [PATCH v2 4/6] drm/gud: Prepare buffer for CPU access in gud_flush_work()
Date: Wed, 30 Nov 2022 20:26:52 +0100	[thread overview]
Message-ID: <20221122-gud-shadow-plane-v2-4-435037990a83@tronnes.org> (raw)
In-Reply-To: <20221122-gud-shadow-plane-v2-0-435037990a83@tronnes.org>

From: Noralf Trønnes <noralf@tronnes.org>

In preparation for moving to the shadow plane helper prepare the
framebuffer for CPU access as early as possible.

v2:
- Use src as variable name for iosys_map (Thomas)

Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
---
 drivers/gpu/drm/gud/gud_pipe.c | 67 +++++++++++++++++++++---------------------
 1 file changed, 33 insertions(+), 34 deletions(-)

diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index d2af9947494f..98fe8efda4a9 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -15,6 +15,7 @@
 #include <drm/drm_fourcc.h>
 #include <drm/drm_framebuffer.h>
 #include <drm/drm_gem.h>
+#include <drm/drm_gem_atomic_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_print.h>
 #include <drm/drm_rect.h>
@@ -152,32 +153,21 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
 }
 
 static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
+			  const struct iosys_map *src, bool cached_reads,
 			  const struct drm_format_info *format, struct drm_rect *rect,
 			  struct gud_set_buffer_req *req)
 {
-	struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
 	u8 compression = gdrm->compression;
-	struct iosys_map map[DRM_FORMAT_MAX_PLANES] = { };
-	struct iosys_map map_data[DRM_FORMAT_MAX_PLANES] = { };
 	struct iosys_map dst;
 	void *vaddr, *buf;
 	size_t pitch, len;
-	int ret = 0;
 
 	pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(rect));
 	len = pitch * drm_rect_height(rect);
 	if (len > gdrm->bulk_len)
 		return -E2BIG;
 
-	ret = drm_gem_fb_vmap(fb, map, map_data);
-	if (ret)
-		return ret;
-
-	vaddr = map_data[0].vaddr;
-
-	ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
-	if (ret)
-		goto vunmap;
+	vaddr = src[0].vaddr;
 retry:
 	if (compression)
 		buf = gdrm->compress_buf;
@@ -192,29 +182,27 @@ static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
 	if (format != fb->format) {
 		if (format->format == GUD_DRM_FORMAT_R1) {
 			len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect);
-			if (!len) {
-				ret = -ENOMEM;
-				goto end_cpu_access;
-			}
+			if (!len)
+				return -ENOMEM;
 		} else if (format->format == DRM_FORMAT_R8) {
-			drm_fb_xrgb8888_to_gray8(&dst, NULL, map_data, fb, rect);
+			drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect);
 		} else if (format->format == DRM_FORMAT_RGB332) {
-			drm_fb_xrgb8888_to_rgb332(&dst, NULL, map_data, fb, rect);
+			drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect);
 		} else if (format->format == DRM_FORMAT_RGB565) {
-			drm_fb_xrgb8888_to_rgb565(&dst, NULL, map_data, fb, rect,
+			drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect,
 						  gud_is_big_endian());
 		} else if (format->format == DRM_FORMAT_RGB888) {
-			drm_fb_xrgb8888_to_rgb888(&dst, NULL, map_data, fb, rect);
+			drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect);
 		} else {
 			len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
 		}
 	} else if (gud_is_big_endian() && format->cpp[0] > 1) {
-		drm_fb_swab(&dst, NULL, map_data, fb, rect, !import_attach);
-	} else if (compression && !import_attach && pitch == fb->pitches[0]) {
+		drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads);
+	} else if (compression && cached_reads && pitch == fb->pitches[0]) {
 		/* can compress directly from the framebuffer */
 		buf = vaddr + rect->y1 * pitch;
 	} else {
-		drm_fb_memcpy(&dst, NULL, map_data, fb, rect);
+		drm_fb_memcpy(&dst, NULL, src, fb, rect);
 	}
 
 	memset(req, 0, sizeof(*req));
@@ -237,12 +225,7 @@ static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
 		req->compressed_length = cpu_to_le32(complen);
 	}
 
-end_cpu_access:
-	drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-vunmap:
-	drm_gem_fb_vunmap(fb, map);
-
-	return ret;
+	return 0;
 }
 
 struct gud_usb_bulk_context {
@@ -285,6 +268,7 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
 }
 
 static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
+			  const struct iosys_map *src, bool cached_reads,
 			  const struct drm_format_info *format, struct drm_rect *rect)
 {
 	struct gud_set_buffer_req req;
@@ -293,7 +277,7 @@ static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
 
 	drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
 
-	ret = gud_prep_flush(gdrm, fb, format, rect, &req);
+	ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req);
 	if (ret)
 		return ret;
 
@@ -334,6 +318,7 @@ void gud_clear_damage(struct gud_device *gdrm)
 }
 
 static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
+			     const struct iosys_map *src, bool cached_reads,
 			     struct drm_rect *damage)
 {
 	const struct drm_format_info *format;
@@ -358,7 +343,7 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb
 		rect.y1 += i * lines;
 		rect.y2 = min_t(u32, rect.y1 + lines, damage->y2);
 
-		ret = gud_flush_rect(gdrm, fb, format, &rect);
+		ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect);
 		if (ret) {
 			if (ret != -ENODEV && ret != -ECONNRESET &&
 			    ret != -ESHUTDOWN && ret != -EPROTO)
@@ -373,9 +358,10 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb
 void gud_flush_work(struct work_struct *work)
 {
 	struct gud_device *gdrm = container_of(work, struct gud_device, work);
+	struct iosys_map gem_map = { }, fb_map = { };
 	struct drm_framebuffer *fb;
 	struct drm_rect damage;
-	int idx;
+	int idx, ret;
 
 	if (!drm_dev_enter(&gdrm->drm, &idx))
 		return;
@@ -390,8 +376,21 @@ void gud_flush_work(struct work_struct *work)
 	if (!fb)
 		goto out;
 
-	gud_flush_damage(gdrm, fb, &damage);
+	ret = drm_gem_fb_vmap(fb, &gem_map, &fb_map);
+	if (ret)
+		goto fb_put;
 
+	ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+	if (ret)
+		goto vunmap;
+
+	/* Imported buffers are assumed to be WriteCombined with uncached reads */
+	gud_flush_damage(gdrm, fb, &fb_map, !fb->obj[0]->import_attach, &damage);
+
+	drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+vunmap:
+	drm_gem_fb_vunmap(fb, &gem_map);
+fb_put:
 	drm_framebuffer_put(fb);
 out:
 	drm_dev_exit(idx);

-- 
2.34.1

WARNING: multiple messages have this Message-ID (diff)
From: Noralf Trønnes <noralf@tronnes.org>
To: Thomas Zimmermann <tzimmermann@suse.de>,
	Javier Martinez Canillas <javierm@redhat.com>,
	dri-devel@lists.freedesktop.org,
	Maxime Ripard <mripard@kernel.org>,
	stable@vger.kernel.org, Noralf Trønnes <noralf@tronnes.org>
Subject: [PATCH v2 4/6] drm/gud: Prepare buffer for CPU access in gud_flush_work()
Date: Wed, 30 Nov 2022 20:26:52 +0100	[thread overview]
Message-ID: <20221122-gud-shadow-plane-v2-4-435037990a83@tronnes.org> (raw)
In-Reply-To: <20221122-gud-shadow-plane-v2-0-435037990a83@tronnes.org>

In preparation for moving to the shadow plane helper prepare the
framebuffer for CPU access as early as possible.

v2:
- Use src as variable name for iosys_map (Thomas)

Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
---
 drivers/gpu/drm/gud/gud_pipe.c | 67 +++++++++++++++++++++---------------------
 1 file changed, 33 insertions(+), 34 deletions(-)

diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index d2af9947494f..98fe8efda4a9 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -15,6 +15,7 @@
 #include <drm/drm_fourcc.h>
 #include <drm/drm_framebuffer.h>
 #include <drm/drm_gem.h>
+#include <drm/drm_gem_atomic_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_print.h>
 #include <drm/drm_rect.h>
@@ -152,32 +153,21 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
 }
 
 static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
+			  const struct iosys_map *src, bool cached_reads,
 			  const struct drm_format_info *format, struct drm_rect *rect,
 			  struct gud_set_buffer_req *req)
 {
-	struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
 	u8 compression = gdrm->compression;
-	struct iosys_map map[DRM_FORMAT_MAX_PLANES] = { };
-	struct iosys_map map_data[DRM_FORMAT_MAX_PLANES] = { };
 	struct iosys_map dst;
 	void *vaddr, *buf;
 	size_t pitch, len;
-	int ret = 0;
 
 	pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(rect));
 	len = pitch * drm_rect_height(rect);
 	if (len > gdrm->bulk_len)
 		return -E2BIG;
 
-	ret = drm_gem_fb_vmap(fb, map, map_data);
-	if (ret)
-		return ret;
-
-	vaddr = map_data[0].vaddr;
-
-	ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
-	if (ret)
-		goto vunmap;
+	vaddr = src[0].vaddr;
 retry:
 	if (compression)
 		buf = gdrm->compress_buf;
@@ -192,29 +182,27 @@ static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
 	if (format != fb->format) {
 		if (format->format == GUD_DRM_FORMAT_R1) {
 			len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect);
-			if (!len) {
-				ret = -ENOMEM;
-				goto end_cpu_access;
-			}
+			if (!len)
+				return -ENOMEM;
 		} else if (format->format == DRM_FORMAT_R8) {
-			drm_fb_xrgb8888_to_gray8(&dst, NULL, map_data, fb, rect);
+			drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect);
 		} else if (format->format == DRM_FORMAT_RGB332) {
-			drm_fb_xrgb8888_to_rgb332(&dst, NULL, map_data, fb, rect);
+			drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect);
 		} else if (format->format == DRM_FORMAT_RGB565) {
-			drm_fb_xrgb8888_to_rgb565(&dst, NULL, map_data, fb, rect,
+			drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect,
 						  gud_is_big_endian());
 		} else if (format->format == DRM_FORMAT_RGB888) {
-			drm_fb_xrgb8888_to_rgb888(&dst, NULL, map_data, fb, rect);
+			drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect);
 		} else {
 			len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
 		}
 	} else if (gud_is_big_endian() && format->cpp[0] > 1) {
-		drm_fb_swab(&dst, NULL, map_data, fb, rect, !import_attach);
-	} else if (compression && !import_attach && pitch == fb->pitches[0]) {
+		drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads);
+	} else if (compression && cached_reads && pitch == fb->pitches[0]) {
 		/* can compress directly from the framebuffer */
 		buf = vaddr + rect->y1 * pitch;
 	} else {
-		drm_fb_memcpy(&dst, NULL, map_data, fb, rect);
+		drm_fb_memcpy(&dst, NULL, src, fb, rect);
 	}
 
 	memset(req, 0, sizeof(*req));
@@ -237,12 +225,7 @@ static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
 		req->compressed_length = cpu_to_le32(complen);
 	}
 
-end_cpu_access:
-	drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-vunmap:
-	drm_gem_fb_vunmap(fb, map);
-
-	return ret;
+	return 0;
 }
 
 struct gud_usb_bulk_context {
@@ -285,6 +268,7 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
 }
 
 static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
+			  const struct iosys_map *src, bool cached_reads,
 			  const struct drm_format_info *format, struct drm_rect *rect)
 {
 	struct gud_set_buffer_req req;
@@ -293,7 +277,7 @@ static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
 
 	drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
 
-	ret = gud_prep_flush(gdrm, fb, format, rect, &req);
+	ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req);
 	if (ret)
 		return ret;
 
@@ -334,6 +318,7 @@ void gud_clear_damage(struct gud_device *gdrm)
 }
 
 static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
+			     const struct iosys_map *src, bool cached_reads,
 			     struct drm_rect *damage)
 {
 	const struct drm_format_info *format;
@@ -358,7 +343,7 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb
 		rect.y1 += i * lines;
 		rect.y2 = min_t(u32, rect.y1 + lines, damage->y2);
 
-		ret = gud_flush_rect(gdrm, fb, format, &rect);
+		ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect);
 		if (ret) {
 			if (ret != -ENODEV && ret != -ECONNRESET &&
 			    ret != -ESHUTDOWN && ret != -EPROTO)
@@ -373,9 +358,10 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb
 void gud_flush_work(struct work_struct *work)
 {
 	struct gud_device *gdrm = container_of(work, struct gud_device, work);
+	struct iosys_map gem_map = { }, fb_map = { };
 	struct drm_framebuffer *fb;
 	struct drm_rect damage;
-	int idx;
+	int idx, ret;
 
 	if (!drm_dev_enter(&gdrm->drm, &idx))
 		return;
@@ -390,8 +376,21 @@ void gud_flush_work(struct work_struct *work)
 	if (!fb)
 		goto out;
 
-	gud_flush_damage(gdrm, fb, &damage);
+	ret = drm_gem_fb_vmap(fb, &gem_map, &fb_map);
+	if (ret)
+		goto fb_put;
 
+	ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+	if (ret)
+		goto vunmap;
+
+	/* Imported buffers are assumed to be WriteCombined with uncached reads */
+	gud_flush_damage(gdrm, fb, &fb_map, !fb->obj[0]->import_attach, &damage);
+
+	drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+vunmap:
+	drm_gem_fb_vunmap(fb, &gem_map);
+fb_put:
 	drm_framebuffer_put(fb);
 out:
 	drm_dev_exit(idx);

-- 
2.34.1

  parent reply	other threads:[~2022-11-30 19:27 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-30 19:26 [PATCH v2 0/6] drm/gud: Use the shadow plane helper Noralf Trønnes via B4 Submission Endpoint
2022-11-30 19:26 ` Noralf Trønnes
2022-11-30 19:26 ` [PATCH v2 1/6] drm/gud: Fix UBSAN warning Noralf Trønnes via B4 Submission Endpoint
2022-11-30 19:26   ` Noralf Trønnes
2022-11-30 19:26 ` [PATCH v2 2/6] drm/gud: Don't retry a failed framebuffer flush Noralf Trønnes via B4 Submission Endpoint
2022-11-30 19:26   ` Noralf Trønnes
2022-11-30 19:26 ` [PATCH v2 3/6] drm/gud: Split up gud_flush_work() Noralf Trønnes via B4 Submission Endpoint
2022-11-30 19:26   ` Noralf Trønnes
2022-11-30 19:26 ` Noralf Trønnes via B4 Submission Endpoint [this message]
2022-11-30 19:26   ` [PATCH v2 4/6] drm/gud: Prepare buffer for CPU access in gud_flush_work() Noralf Trønnes
2022-12-01  8:51   ` Thomas Zimmermann
2022-11-30 19:26 ` [PATCH v2 5/6] drm/gud: Use the shadow plane helper Noralf Trønnes via B4 Submission Endpoint
2022-11-30 19:26   ` Noralf Trønnes
2022-12-01  8:55   ` Thomas Zimmermann
2022-11-30 19:26 ` [PATCH v2 6/6] drm/gud: Enable synchronous flushing by default Noralf Trønnes via B4 Submission Endpoint
2022-11-30 19:26   ` Noralf Trønnes
2022-12-01  8:57   ` Thomas Zimmermann
2022-12-01  5:55 ` [PATCH v2 0/6] drm/gud: Use the shadow plane helper Greg KH
2022-12-01 10:00   ` Noralf Trønnes
2022-12-01 10:00     ` Noralf Trønnes
2022-12-01 12:12     ` Greg KH
2022-12-01 12:12       ` Greg KH
2022-12-01 13:14       ` Noralf Trønnes
2022-12-01 13:14         ` Noralf Trønnes
2022-12-01 13:21         ` Greg KH
2022-12-01 13:21           ` Greg KH
2022-12-01 13:34           ` Javier Martinez Canillas
2022-12-01 13:34             ` Javier Martinez Canillas
2022-12-01 14:16             ` Konstantin Ryabitsev
2022-12-01 14:16               ` Konstantin Ryabitsev
2022-12-01 14:20               ` Javier Martinez Canillas
2022-12-01 14:20                 ` Javier Martinez Canillas
2022-12-01 14:27               ` Vlastimil Babka
2022-12-01 14:27                 ` Vlastimil Babka
2022-12-01 14:32                 ` Mark Brown
2022-12-01 14:32                   ` Mark Brown
2023-01-05 12:35                   ` Daniel Vetter
2023-01-05 12:35                     ` Daniel Vetter
2023-01-05 16:00                     ` Konstantin Ryabitsev
2023-01-05 16:00                       ` Konstantin Ryabitsev
2022-12-01 20:52               ` Noralf Trønnes
2022-12-01 20:52                 ` Noralf Trønnes
2022-12-06 15:57 ` Noralf Trønnes

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221122-gud-shadow-plane-v2-4-435037990a83@tronnes.org \
    --to=devnull+noralf.tronnes.org@kernel.org \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=javierm@redhat.com \
    --cc=mripard@kernel.org \
    --cc=noralf@tronnes.org \
    --cc=stable@kernel.org \
    --cc=tzimmermann@suse.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.