qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Gerd Hoffmann <kraxel@redhat.com>
To: qemu-devel@nongnu.org
Cc: Paolo Bonzini <pbonzini@redhat.com>,
	Gerd Hoffmann <kraxel@redhat.com>,
	Auger Eric <eric.auger@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>
Subject: [PULL 09/25] virtio-gpu: handle partial maps properly
Date: Mon, 10 May 2021 15:20:35 +0200	[thread overview]
Message-ID: <20210510132051.2208563-10-kraxel@redhat.com> (raw)
In-Reply-To: <20210510132051.2208563-1-kraxel@redhat.com>

dma_memory_map() may map only a part of the request.  Happens if the
request can't be mapped in one go, for example due to a iommu creating
a linear dma mapping for scattered physical pages.  Should that be the
case virtio-gpu must call dma_memory_map() again with the remaining
range instead of simply throwing an error.

Note that this change implies the number of iov entries may differ from
the number of mapping entries sent by the guest.  Therefore the iov_len
bookkeeping needs some updates too, we have to explicitly pass around
the iov length now.

Reported-by: Auger Eric <eric.auger@redhat.com>
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Message-id: 20210506091001.1301250-1-kraxel@redhat.com
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Tested-by: Eric Auger <eric.auger@redhat.com>
Message-Id: <20210506091001.1301250-1-kraxel@redhat.com>
---
 include/hw/virtio/virtio-gpu.h |  3 +-
 hw/display/virtio-gpu-3d.c     |  7 ++--
 hw/display/virtio-gpu.c        | 76 ++++++++++++++++++++--------------
 3 files changed, 52 insertions(+), 34 deletions(-)

diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
index fae149235c58..0d15af41d96d 100644
--- a/include/hw/virtio/virtio-gpu.h
+++ b/include/hw/virtio/virtio-gpu.h
@@ -209,7 +209,8 @@ void virtio_gpu_get_edid(VirtIOGPU *g,
 int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
                                   struct virtio_gpu_resource_attach_backing *ab,
                                   struct virtio_gpu_ctrl_command *cmd,
-                                  uint64_t **addr, struct iovec **iov);
+                                  uint64_t **addr, struct iovec **iov,
+                                  uint32_t *niov);
 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
                                     struct iovec *iov, uint32_t count);
 void virtio_gpu_process_cmdq(VirtIOGPU *g);
diff --git a/hw/display/virtio-gpu-3d.c b/hw/display/virtio-gpu-3d.c
index d98964858e13..72c14d91324b 100644
--- a/hw/display/virtio-gpu-3d.c
+++ b/hw/display/virtio-gpu-3d.c
@@ -283,22 +283,23 @@ static void virgl_resource_attach_backing(VirtIOGPU *g,
 {
     struct virtio_gpu_resource_attach_backing att_rb;
     struct iovec *res_iovs;
+    uint32_t res_niov;
     int ret;
 
     VIRTIO_GPU_FILL_CMD(att_rb);
     trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
 
-    ret = virtio_gpu_create_mapping_iov(g, &att_rb, cmd, NULL, &res_iovs);
+    ret = virtio_gpu_create_mapping_iov(g, &att_rb, cmd, NULL, &res_iovs, &res_niov);
     if (ret != 0) {
         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
         return;
     }
 
     ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
-                                             res_iovs, att_rb.nr_entries);
+                                             res_iovs, res_niov);
 
     if (ret != 0)
-        virtio_gpu_cleanup_mapping_iov(g, res_iovs, att_rb.nr_entries);
+        virtio_gpu_cleanup_mapping_iov(g, res_iovs, res_niov);
 }
 
 static void virgl_resource_detach_backing(VirtIOGPU *g,
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index c9f5e36fd076..6f3791deb3ae 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -608,11 +608,12 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
 int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
                                   struct virtio_gpu_resource_attach_backing *ab,
                                   struct virtio_gpu_ctrl_command *cmd,
-                                  uint64_t **addr, struct iovec **iov)
+                                  uint64_t **addr, struct iovec **iov,
+                                  uint32_t *niov)
 {
     struct virtio_gpu_mem_entry *ents;
     size_t esize, s;
-    int i;
+    int e, v;
 
     if (ab->nr_entries > 16384) {
         qemu_log_mask(LOG_GUEST_ERROR,
@@ -633,37 +634,53 @@ int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
         return -1;
     }
 
-    *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
+    *iov = NULL;
     if (addr) {
-        *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
+        *addr = NULL;
     }
-    for (i = 0; i < ab->nr_entries; i++) {
-        uint64_t a = le64_to_cpu(ents[i].addr);
-        uint32_t l = le32_to_cpu(ents[i].length);
-        hwaddr len = l;
-        (*iov)[i].iov_base = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
-                                            a, &len, DMA_DIRECTION_TO_DEVICE);
-        (*iov)[i].iov_len = len;
-        if (addr) {
-            (*addr)[i] = a;
-        }
-        if (!(*iov)[i].iov_base || len != l) {
-            qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
-                          " resource %d element %d\n",
-                          __func__, ab->resource_id, i);
-            if ((*iov)[i].iov_base) {
-                i++; /* cleanup the 'i'th map */
+    for (e = 0, v = 0; e < ab->nr_entries; e++) {
+        uint64_t a = le64_to_cpu(ents[e].addr);
+        uint32_t l = le32_to_cpu(ents[e].length);
+        hwaddr len;
+        void *map;
+
+        do {
+            len = l;
+            map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
+                                 a, &len, DMA_DIRECTION_TO_DEVICE);
+            if (!map) {
+                qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
+                              " resource %d element %d\n",
+                              __func__, ab->resource_id, e);
+                virtio_gpu_cleanup_mapping_iov(g, *iov, v);
+                g_free(ents);
+                *iov = NULL;
+                if (addr) {
+                    g_free(*addr);
+                    *addr = NULL;
+                }
+                return -1;
             }
-            virtio_gpu_cleanup_mapping_iov(g, *iov, i);
-            g_free(ents);
-            *iov = NULL;
+
+            if (!(v % 16)) {
+                *iov = g_realloc(*iov, sizeof(struct iovec) * (v + 16));
+                if (addr) {
+                    *addr = g_realloc(*addr, sizeof(uint64_t) * (v + 16));
+                }
+            }
+            (*iov)[v].iov_base = map;
+            (*iov)[v].iov_len = len;
             if (addr) {
-                g_free(*addr);
-                *addr = NULL;
+                (*addr)[v] = a;
             }
-            return -1;
-        }
+
+            a += len;
+            l -= len;
+            v += 1;
+        } while (l > 0);
     }
+    *niov = v;
+
     g_free(ents);
     return 0;
 }
@@ -717,13 +734,12 @@ virtio_gpu_resource_attach_backing(VirtIOGPU *g,
         return;
     }
 
-    ret = virtio_gpu_create_mapping_iov(g, &ab, cmd, &res->addrs, &res->iov);
+    ret = virtio_gpu_create_mapping_iov(g, &ab, cmd, &res->addrs,
+                                        &res->iov, &res->iov_cnt);
     if (ret != 0) {
         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
         return;
     }
-
-    res->iov_cnt = ab.nr_entries;
 }
 
 static void
-- 
2.31.1



  parent reply	other threads:[~2021-05-10 13:29 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-10 13:20 [PULL 00/25] Vga 20210510 patches Gerd Hoffmann
2021-05-10 13:20 ` [PULL 01/25] qemu-edid: use qemu_edid_size() Gerd Hoffmann
2021-05-10 13:20 ` [PULL 02/25] edid: edid_desc_next Gerd Hoffmann
2021-05-10 13:20 ` [PULL 03/25] edid: move xtra3 descriptor Gerd Hoffmann
2021-05-10 13:20 ` [PULL 04/25] edid: use dta extension block descriptors Gerd Hoffmann
2021-05-10 13:20 ` [PULL 05/25] edid: Make refresh rate configurable Gerd Hoffmann
2021-05-10 13:20 ` [PULL 06/25] edid: move timing generation into a separate function Gerd Hoffmann
2021-05-10 13:20 ` [PULL 07/25] edid: allow arbitrary-length checksums Gerd Hoffmann
2021-05-10 13:20 ` [PULL 08/25] edid: add support for DisplayID extension (5k resolution) Gerd Hoffmann
2021-05-10 13:20 ` Gerd Hoffmann [this message]
2021-05-10 13:20 ` [PULL 10/25] virtio-gpu: rename virgl source file Gerd Hoffmann
2021-05-10 13:20 ` [PULL 11/25] virtio-gpu: add virtio-gpu-gl-device Gerd Hoffmann
2021-05-10 13:20 ` [PULL 12/25] virtio-gpu: move virgl realize + properties Gerd Hoffmann
2021-05-21  9:32   ` Michal Prívozník
2021-05-21 10:50     ` Marc-André Lureau
2021-05-21 13:45       ` Gerd Hoffmann
2021-05-21 13:57     ` Gerd Hoffmann
2021-05-10 13:20 ` [PULL 13/25] virtio-gpu: move virgl reset Gerd Hoffmann
2021-05-10 13:20 ` [PULL 14/25] virtio-gpu: use class function for ctrl queue handlers Gerd Hoffmann
2021-05-10 13:20 ` [PULL 15/25] virtio-gpu: move virgl handle_ctrl Gerd Hoffmann
2021-05-10 13:20 ` [PULL 16/25] virtio-gpu: move virgl gl_flushed Gerd Hoffmann
2021-05-10 13:20 ` [PULL 17/25] virtio-gpu: move virgl process_cmd Gerd Hoffmann
2021-05-10 13:20 ` [PULL 18/25] virtio-gpu: move update_cursor_data Gerd Hoffmann
2021-05-10 13:20 ` [PULL 19/25] virtio-gpu: drop VIRGL() macro Gerd Hoffmann
2021-05-10 13:20 ` [PULL 20/25] virtio-gpu: move virtio-gpu-gl-device to separate module Gerd Hoffmann
2021-05-10 13:20 ` [PULL 21/25] virtio-gpu: drop use_virgl_renderer Gerd Hoffmann
2021-05-10 13:20 ` [PULL 22/25] virtio-gpu: move fields to struct VirtIOGPUGL Gerd Hoffmann
2021-05-10 13:20 ` [PULL 23/25] virtio-gpu: add virtio-gpu-gl-pci Gerd Hoffmann
2021-05-10 13:20 ` [PULL 24/25] modules: add have_vga Gerd Hoffmann
2021-05-10 13:20 ` [PULL 25/25] virtio-gpu: add virtio-vga-gl Gerd Hoffmann
2021-05-10 13:43 ` [PULL 00/25] Vga 20210510 patches no-reply
2021-05-12 15:05 ` Peter Maydell
2021-06-30 18:16   ` Marc-André Lureau
2021-07-01  6:29     ` Marc-André Lureau

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210510132051.2208563-10-kraxel@redhat.com \
    --to=kraxel@redhat.com \
    --cc=eric.auger@redhat.com \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).