All of lore.kernel.org
 help / color / mirror / Atom feed
From: John Johnson <john.g.johnson@oracle.com>
To: qemu-devel@nongnu.org
Cc: alex.williamson@redhat.com, clg@redhat.com, philmd@linaro.org
Subject: [PATCH v2 20/23] vfio-user: dma read/write operations
Date: Wed,  1 Feb 2023 21:55:56 -0800	[thread overview]
Message-ID: <ea57a62b62b71428b2ee7fba65c1e6d73cc76b6b.1675228037.git.john.g.johnson@oracle.com> (raw)
In-Reply-To: <cover.1675228037.git.john.g.johnson@oracle.com>
In-Reply-To: <cover.1675228037.git.john.g.johnson@oracle.com>

Messages from server to client that peform device DMA.

Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
Signed-off-by: John G Johnson <john.g.johnson@oracle.com>
Signed-off-by: Jagannathan Raman <jag.raman@oracle.com>
---
 hw/vfio/user-protocol.h |  11 +++++
 hw/vfio/user.h          |   3 ++
 hw/vfio/user-pci.c      | 110 ++++++++++++++++++++++++++++++++++++++++++++++++
 hw/vfio/user.c          |  57 +++++++++++++++++++++++++
 4 files changed, 181 insertions(+)

diff --git a/hw/vfio/user-protocol.h b/hw/vfio/user-protocol.h
index 109076d..1a40cca 100644
--- a/hw/vfio/user-protocol.h
+++ b/hw/vfio/user-protocol.h
@@ -201,6 +201,17 @@ typedef struct {
     char data[];
 } VFIOUserRegionRW;
 
+/*
+ * VFIO_USER_DMA_READ
+ * VFIO_USER_DMA_WRITE
+ */
+typedef struct {
+    VFIOUserHdr hdr;
+    uint64_t offset;
+    uint32_t count;
+    char data[];
+} VFIOUserDMARW;
+
 /*imported from struct vfio_bitmap */
 typedef struct {
     uint64_t pgsize;
diff --git a/hw/vfio/user.h b/hw/vfio/user.h
index fe0115b..ae7654f 100644
--- a/hw/vfio/user.h
+++ b/hw/vfio/user.h
@@ -97,6 +97,9 @@ VFIOGroup *vfio_user_get_group(VFIOUserProxy *proxy, AddressSpace *as,
                                Error **errp);
 void vfio_user_put_group(VFIOGroup *group);
 int vfio_user_validate_version(VFIOUserProxy *proxy, Error **errp);
+void vfio_user_send_reply(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int size);
+void vfio_user_send_error(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int error);
+void vfio_user_putfds(VFIOUserMsg *msg);
 
 extern VFIODeviceIO vfio_dev_io_sock;
 extern VFIOContainerIO vfio_cont_io_sock;
diff --git a/hw/vfio/user-pci.c b/hw/vfio/user-pci.c
index bf84d7c..6465b1c 100644
--- a/hw/vfio/user-pci.c
+++ b/hw/vfio/user-pci.c
@@ -101,6 +101,95 @@ static void vfio_user_msix_teardown(VFIOPCIDevice *vdev)
     vdev->msix->pba_region = NULL;
 }
 
+static void vfio_user_dma_read(VFIOPCIDevice *vdev, VFIOUserDMARW *msg)
+{
+    PCIDevice *pdev = &vdev->pdev;
+    VFIOUserProxy *proxy = vdev->vbasedev.proxy;
+    VFIOUserDMARW *res;
+    MemTxResult r;
+    size_t size;
+
+    if (msg->hdr.size < sizeof(*msg)) {
+        vfio_user_send_error(proxy, &msg->hdr, EINVAL);
+        return;
+    }
+    if (msg->count > proxy->max_xfer_size) {
+        vfio_user_send_error(proxy, &msg->hdr, E2BIG);
+        return;
+    }
+
+    /* switch to our own message buffer */
+    size = msg->count + sizeof(VFIOUserDMARW);
+    res = g_malloc0(size);
+    memcpy(res, msg, sizeof(*res));
+    g_free(msg);
+
+    r = pci_dma_read(pdev, res->offset, &res->data, res->count);
+
+    switch (r) {
+    case MEMTX_OK:
+        if (res->hdr.flags & VFIO_USER_NO_REPLY) {
+            g_free(res);
+            return;
+        }
+        vfio_user_send_reply(proxy, &res->hdr, size);
+        break;
+    case MEMTX_ERROR:
+        vfio_user_send_error(proxy, &res->hdr, EFAULT);
+        break;
+    case MEMTX_DECODE_ERROR:
+        vfio_user_send_error(proxy, &res->hdr, ENODEV);
+        break;
+    case MEMTX_ACCESS_ERROR:
+        vfio_user_send_error(proxy, &res->hdr, EPERM);
+        break;
+    default:
+        error_printf("vfio_user_dma_read unknown error %d\n", r);
+        vfio_user_send_error(vdev->vbasedev.proxy, &res->hdr, EINVAL);
+    }
+}
+
+static void vfio_user_dma_write(VFIOPCIDevice *vdev, VFIOUserDMARW *msg)
+{
+    PCIDevice *pdev = &vdev->pdev;
+    VFIOUserProxy *proxy = vdev->vbasedev.proxy;
+    MemTxResult r;
+
+    if (msg->hdr.size < sizeof(*msg)) {
+        vfio_user_send_error(proxy, &msg->hdr, EINVAL);
+        return;
+    }
+    /* make sure transfer count isn't larger than the message data */
+    if (msg->count > msg->hdr.size - sizeof(*msg)) {
+        vfio_user_send_error(proxy, &msg->hdr, E2BIG);
+        return;
+    }
+
+    r = pci_dma_write(pdev, msg->offset, &msg->data, msg->count);
+
+    switch (r) {
+    case MEMTX_OK:
+        if ((msg->hdr.flags & VFIO_USER_NO_REPLY) == 0) {
+            vfio_user_send_reply(proxy, &msg->hdr, sizeof(msg->hdr));
+        } else {
+            g_free(msg);
+        }
+        break;
+    case MEMTX_ERROR:
+        vfio_user_send_error(proxy, &msg->hdr, EFAULT);
+        break;
+    case MEMTX_DECODE_ERROR:
+        vfio_user_send_error(proxy, &msg->hdr, ENODEV);
+        break;
+    case MEMTX_ACCESS_ERROR:
+        vfio_user_send_error(proxy, &msg->hdr, EPERM);
+        break;
+    default:
+        error_printf("vfio_user_dma_write unknown error %d\n", r);
+        vfio_user_send_error(vdev->vbasedev.proxy, &msg->hdr, EINVAL);
+    }
+}
+
 /*
  * Incoming request message callback.
  *
@@ -108,7 +197,28 @@ static void vfio_user_msix_teardown(VFIOPCIDevice *vdev)
  */
 static void vfio_user_pci_process_req(void *opaque, VFIOUserMsg *msg)
 {
+    VFIOPCIDevice *vdev = opaque;
+    VFIOUserHdr *hdr = msg->hdr;
+
+    /* no incoming PCI requests pass FDs */
+    if (msg->fds != NULL) {
+        vfio_user_send_error(vdev->vbasedev.proxy, hdr, EINVAL);
+        vfio_user_putfds(msg);
+        return;
+    }
 
+    switch (hdr->command) {
+    case VFIO_USER_DMA_READ:
+        vfio_user_dma_read(vdev, (VFIOUserDMARW *)hdr);
+        break;
+    case VFIO_USER_DMA_WRITE:
+        vfio_user_dma_write(vdev, (VFIOUserDMARW *)hdr);
+        break;
+    default:
+        error_printf("vfio_user_pci_process_req unknown cmd %d\n",
+                     hdr->command);
+        vfio_user_send_error(vdev->vbasedev.proxy, hdr, ENOSYS);
+    }
 }
 
 /*
diff --git a/hw/vfio/user.c b/hw/vfio/user.c
index 0a7b354..3aabf6b 100644
--- a/hw/vfio/user.c
+++ b/hw/vfio/user.c
@@ -394,6 +394,10 @@ static int vfio_user_recv_one(VFIOUserProxy *proxy)
         *msg->hdr = hdr;
         data = (char *)msg->hdr + sizeof(hdr);
     } else {
+        if (hdr.size > proxy->max_xfer_size + sizeof(VFIOUserDMARW)) {
+            error_setg(&local_err, "vfio_user_recv request larger than max");
+            goto err;
+        }
         buf = g_malloc0(hdr.size);
         memcpy(buf, &hdr, sizeof(hdr));
         data = buf + sizeof(hdr);
@@ -804,6 +808,59 @@ static void vfio_user_wait_reqs(VFIOUserProxy *proxy)
     }
 }
 
+/*
+ * Reply to an incoming request.
+ */
+void vfio_user_send_reply(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int size)
+{
+
+    if (size < sizeof(VFIOUserHdr)) {
+        error_printf("vfio_user_send_reply - size too small\n");
+        g_free(hdr);
+        return;
+    }
+
+    /*
+     * convert header to associated reply
+     */
+    hdr->flags = VFIO_USER_REPLY;
+    hdr->size = size;
+
+    vfio_user_send_async(proxy, hdr, NULL);
+}
+
+/*
+ * Send an error reply to an incoming request.
+ */
+void vfio_user_send_error(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int error)
+{
+
+    /*
+     * convert header to associated reply
+     */
+    hdr->flags = VFIO_USER_REPLY;
+    hdr->flags |= VFIO_USER_ERROR;
+    hdr->error_reply = error;
+    hdr->size = sizeof(*hdr);
+
+    vfio_user_send_async(proxy, hdr, NULL);
+}
+
+/*
+ * Close FDs erroneously received in an incoming request.
+ */
+void vfio_user_putfds(VFIOUserMsg *msg)
+{
+    VFIOUserFDs *fds = msg->fds;
+    int i;
+
+    for (i = 0; i < fds->recv_fds; i++) {
+        close(fds->fds[i]);
+    }
+    g_free(fds);
+    msg->fds = NULL;
+}
+
 static QLIST_HEAD(, VFIOUserProxy) vfio_user_sockets =
     QLIST_HEAD_INITIALIZER(vfio_user_sockets);
 
-- 
1.9.4



  parent reply	other threads:[~2023-02-02  5:47 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-02  5:55 [PATCH v2 00/23] vfio-user client John Johnson
2023-02-02  5:55 ` [PATCH v2 01/23] vfio-user: introduce vfio-user protocol specification John Johnson
2023-02-02  5:55 ` [PATCH v2 02/23] vfio-user: add VFIO base abstract class John Johnson
2023-02-02  5:55 ` [PATCH v2 03/23] vfio-user: add container IO ops vector John Johnson
2023-02-03 22:21   ` Alex Williamson
2023-02-03 22:33     ` Alex Williamson
2023-02-02  5:55 ` [PATCH v2 04/23] vfio-user: add region cache John Johnson
2023-02-02  5:55 ` [PATCH v2 05/23] vfio-user: add device IO ops vector John Johnson
2023-02-02  5:55 ` [PATCH v2 06/23] vfio-user: Define type vfio_user_pci_dev_info John Johnson
2023-02-02  5:55 ` [PATCH v2 07/23] vfio-user: connect vfio proxy to remote server John Johnson
2023-02-02  5:55 ` [PATCH v2 08/23] vfio-user: define socket receive functions John Johnson
2023-02-02  5:55 ` [PATCH v2 09/23] vfio-user: define socket send functions John Johnson
2023-02-02  5:55 ` [PATCH v2 10/23] vfio-user: get device info John Johnson
2023-02-02  5:55 ` [PATCH v2 11/23] vfio-user: get region info John Johnson
2023-02-03 23:11   ` Alex Williamson
2023-02-02  5:55 ` [PATCH v2 12/23] vfio-user: region read/write John Johnson
2023-02-06 19:07   ` Alex Williamson
2023-02-08  6:38     ` John Johnson
2023-02-08 20:33       ` Alex Williamson
2023-02-10  5:28         ` John Johnson
2023-02-02  5:55 ` [PATCH v2 13/23] vfio-user: pci_user_realize PCI setup John Johnson
2023-02-02  5:55 ` [PATCH v2 14/23] vfio-user: get and set IRQs John Johnson
2023-02-02  5:55 ` [PATCH v2 15/23] vfio-user: forward msix BAR accesses to server John Johnson
2023-02-06 20:33   ` Alex Williamson
2023-02-08  6:38     ` John Johnson
2023-02-08 21:30       ` Alex Williamson
2023-02-10  5:28         ` John Johnson
2023-02-02  5:55 ` [PATCH v2 16/23] vfio-user: proxy container connect/disconnect John Johnson
2023-02-02  5:55 ` [PATCH v2 17/23] vfio-user: dma map/unmap operations John Johnson
2023-02-03 21:28   ` Alex Williamson
2023-02-06 20:58   ` Alex Williamson
2023-02-02  5:55 ` [PATCH v2 18/23] vfio-user: add dma_unmap_all John Johnson
2023-02-06 21:29   ` Alex Williamson
2023-02-02  5:55 ` [PATCH v2 19/23] vfio-user: no-mmap DMA support John Johnson
2023-02-02  5:55 ` John Johnson [this message]
2023-02-02  5:55 ` [PATCH v2 21/23] vfio-user: pci reset John Johnson
2023-02-02  5:55 ` [PATCH v2 22/23] vfio-user: add 'x-msg-timeout' option that specifies msg wait times John Johnson
2023-02-02  5:55 ` [PATCH v2 23/23] vfio-user: add coalesced posted writes John Johnson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ea57a62b62b71428b2ee7fba65c1e6d73cc76b6b.1675228037.git.john.g.johnson@oracle.com \
    --to=john.g.johnson@oracle.com \
    --cc=alex.williamson@redhat.com \
    --cc=clg@redhat.com \
    --cc=philmd@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.