qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/3] revert 9pfs reply truncation, wait for free room to reply
@ 2020-05-21 19:26 Stefano Stabellini
  2020-05-21 19:26 ` [PATCH v2 1/3] Revert "9p: init_in_iov_from_pdu can truncate the size" Stefano Stabellini
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Stefano Stabellini @ 2020-05-21 19:26 UTC (permalink / raw)
  To: groug, qemu_oss; +Cc: anthony.perard, sstabellini, qemu-devel, paul

Hi all,

This short series reverts commit 16724a173049ac29c7b5ade741da93a0f46edff
becauses it is the cause for https://bugs.launchpad.net/bugs/1877688.

The original issue addressed by 16724a173049ac29c7b5ade741da93a0f46edff
is solved differently in this series by using qemu_coroutine_yield() to
wait for the client to free more data from the ring before sending the
reply.

Cheers,

Stefano

Changes in v2:
- add comments on barriers
- add patch to increase ring size to the max allowed


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v2 1/3] Revert "9p: init_in_iov_from_pdu can truncate the size"
  2020-05-21 19:26 [PATCH v2 0/3] revert 9pfs reply truncation, wait for free room to reply Stefano Stabellini
@ 2020-05-21 19:26 ` Stefano Stabellini
  2020-05-22 12:34   ` Christian Schoenebeck
  2020-05-21 19:26 ` [PATCH v2 2/3] xen/9pfs: yield when there isn't enough room on the ring Stefano Stabellini
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 7+ messages in thread
From: Stefano Stabellini @ 2020-05-21 19:26 UTC (permalink / raw)
  To: groug
  Cc: sstabellini, paul, qemu_oss, qemu-devel, anthony.perard,
	Stefano Stabellini

From: Stefano Stabellini <stefano.stabellini@xilinx.com>

This reverts commit 16724a173049ac29c7b5ade741da93a0f46edff7.
It causes https://bugs.launchpad.net/bugs/1877688.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
---
 hw/9pfs/9p.c               | 33 +++++++++++----------------------
 hw/9pfs/9p.h               |  2 +-
 hw/9pfs/virtio-9p-device.c | 11 ++++-------
 hw/9pfs/xen-9p-backend.c   | 15 ++++++---------
 4 files changed, 22 insertions(+), 39 deletions(-)

diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index a2a14b5979..d39bfee462 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -2102,29 +2102,22 @@ out_nofid:
  * with qemu_iovec_destroy().
  */
 static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
-                                    size_t skip, size_t *size,
+                                    size_t skip, size_t size,
                                     bool is_write)
 {
     QEMUIOVector elem;
     struct iovec *iov;
     unsigned int niov;
-    size_t alloc_size = *size + skip;
 
     if (is_write) {
-        pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, alloc_size);
+        pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size + skip);
     } else {
-        pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, &alloc_size);
-    }
-
-    if (alloc_size < skip) {
-        *size = 0;
-    } else {
-        *size = alloc_size - skip;
+        pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size + skip);
     }
 
     qemu_iovec_init_external(&elem, iov, niov);
     qemu_iovec_init(qiov, niov);
-    qemu_iovec_concat(qiov, &elem, skip, *size);
+    qemu_iovec_concat(qiov, &elem, skip, size);
 }
 
 static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
@@ -2132,14 +2125,15 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
 {
     ssize_t err;
     size_t offset = 7;
-    size_t read_count;
+    uint64_t read_count;
     QEMUIOVector qiov_full;
 
     if (fidp->fs.xattr.len < off) {
         read_count = 0;
-    } else if (fidp->fs.xattr.len - off < max_count) {
-        read_count = fidp->fs.xattr.len - off;
     } else {
+        read_count = fidp->fs.xattr.len - off;
+    }
+    if (read_count > max_count) {
         read_count = max_count;
     }
     err = pdu_marshal(pdu, offset, "d", read_count);
@@ -2148,7 +2142,7 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
     }
     offset += err;
 
-    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &read_count, false);
+    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, read_count, false);
     err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0,
                     ((char *)fidp->fs.xattr.value) + off,
                     read_count);
@@ -2277,11 +2271,9 @@ static void coroutine_fn v9fs_read(void *opaque)
         QEMUIOVector qiov_full;
         QEMUIOVector qiov;
         int32_t len;
-        size_t size = max_count;
 
-        v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, &size, false);
+        v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count, false);
         qemu_iovec_init(&qiov, qiov_full.niov);
-        max_count = size;
         do {
             qemu_iovec_reset(&qiov);
             qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size - count);
@@ -2532,7 +2524,6 @@ static void coroutine_fn v9fs_write(void *opaque)
     int32_t len = 0;
     int32_t total = 0;
     size_t offset = 7;
-    size_t size;
     V9fsFidState *fidp;
     V9fsPDU *pdu = opaque;
     V9fsState *s = pdu->s;
@@ -2545,9 +2536,7 @@ static void coroutine_fn v9fs_write(void *opaque)
         return;
     }
     offset += err;
-    size = count;
-    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &size, true);
-    count = size;
+    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, count, true);
     trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov);
 
     fidp = get_fid(pdu, fid);
diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
index dd1c6cb8d2..1b9e110605 100644
--- a/hw/9pfs/9p.h
+++ b/hw/9pfs/9p.h
@@ -436,7 +436,7 @@ struct V9fsTransport {
     ssize_t     (*pdu_vunmarshal)(V9fsPDU *pdu, size_t offset, const char *fmt,
                                   va_list ap);
     void        (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
-                                        unsigned int *pniov, size_t *size);
+                                        unsigned int *pniov, size_t size);
     void        (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
                                          unsigned int *pniov, size_t size);
     void        (*push_and_notify)(V9fsPDU *pdu);
diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
index e5b44977c7..36f3aa9352 100644
--- a/hw/9pfs/virtio-9p-device.c
+++ b/hw/9pfs/virtio-9p-device.c
@@ -147,22 +147,19 @@ static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
 }
 
 static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
-                                        unsigned int *pniov, size_t *size)
+                                        unsigned int *pniov, size_t size)
 {
     V9fsState *s = pdu->s;
     V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
     VirtQueueElement *elem = v->elems[pdu->idx];
     size_t buf_size = iov_size(elem->in_sg, elem->in_num);
 
-    if (buf_size < P9_IOHDRSZ) {
+    if (buf_size < size) {
         VirtIODevice *vdev = VIRTIO_DEVICE(v);
 
         virtio_error(vdev,
-                     "VirtFS reply type %d needs %zu bytes, buffer has %zu, less than minimum",
-                     pdu->id + 1, *size, buf_size);
-    }
-    if (buf_size < *size) {
-        *size = buf_size;
+                     "VirtFS reply type %d needs %zu bytes, buffer has %zu",
+                     pdu->id + 1, size, buf_size);
     }
 
     *piov = elem->in_sg;
diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index f04caabfe5..fc197f6c8a 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -188,7 +188,7 @@ static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
 static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
                                           struct iovec **piov,
                                           unsigned int *pniov,
-                                          size_t *size)
+                                          size_t size)
 {
     Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
     Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
@@ -198,19 +198,16 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
     g_free(ring->sg);
 
     ring->sg = g_new0(struct iovec, 2);
-    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, *size);
+    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
 
     buf_size = iov_size(ring->sg, num);
-    if (buf_size  < P9_IOHDRSZ) {
-        xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs reply type %d needs "
-                      "%zu bytes, buffer has %zu, less than minimum\n",
-                      pdu->id + 1, *size, buf_size);
+    if (buf_size  < size) {
+        xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
+                "needs %zu bytes, buffer has %zu\n", pdu->id, size,
+                buf_size);
         xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
         xen_9pfs_disconnect(&xen_9pfs->xendev);
     }
-    if (buf_size  < *size) {
-        *size = buf_size;
-    }
 
     *piov = ring->sg;
     *pniov = num;
-- 
2.17.1



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v2 2/3] xen/9pfs: yield when there isn't enough room on the ring
  2020-05-21 19:26 [PATCH v2 0/3] revert 9pfs reply truncation, wait for free room to reply Stefano Stabellini
  2020-05-21 19:26 ` [PATCH v2 1/3] Revert "9p: init_in_iov_from_pdu can truncate the size" Stefano Stabellini
@ 2020-05-21 19:26 ` Stefano Stabellini
  2020-05-21 19:26 ` [PATCH v2 3/3] xen/9pfs: increase max ring order to 9 Stefano Stabellini
  2020-05-25 13:48 ` [PATCH v2 0/3] revert 9pfs reply truncation, wait for free room to reply Greg Kurz
  3 siblings, 0 replies; 7+ messages in thread
From: Stefano Stabellini @ 2020-05-21 19:26 UTC (permalink / raw)
  To: groug
  Cc: sstabellini, paul, qemu_oss, qemu-devel, anthony.perard,
	Stefano Stabellini

From: Stefano Stabellini <stefano.stabellini@xilinx.com>

Instead of truncating replies, which is problematic, wait until the
client reads more data and frees bytes on the reply ring.

Do that by calling qemu_coroutine_yield(). The corresponding
qemu_coroutine_enter_if_inactive() is called from xen_9pfs_bh upon
receiving the next notification from the client.

We need to be careful to avoid races in case xen_9pfs_bh and the
coroutine are both active at the same time. In xen_9pfs_bh, wait until
either the critical section is over (ring->co == NULL) or until the
coroutine becomes inactive (qemu_coroutine_yield() was called) before
continuing. Then, simply wake up the coroutine if it is inactive.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
Reviewed-by: Christian Schoenebeck <qemu_oss@crudebyte.com>
---
Changes in v2:
- add comments on barriers
---
 hw/9pfs/xen-9p-backend.c | 31 +++++++++++++++++++++++++------
 1 file changed, 25 insertions(+), 6 deletions(-)

diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index fc197f6c8a..3c84c86ab8 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -37,6 +37,7 @@ typedef struct Xen9pfsRing {
 
     struct iovec *sg;
     QEMUBH *bh;
+    Coroutine *co;
 
     /* local copies, so that we can read/write PDU data directly from
      * the ring */
@@ -198,16 +199,20 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
     g_free(ring->sg);
 
     ring->sg = g_new0(struct iovec, 2);
-    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
+    ring->co = qemu_coroutine_self();
+    /* make sure other threads see ring->co changes before continuing */
+    smp_wmb();
 
+again:
+    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
     buf_size = iov_size(ring->sg, num);
     if (buf_size  < size) {
-        xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
-                "needs %zu bytes, buffer has %zu\n", pdu->id, size,
-                buf_size);
-        xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
-        xen_9pfs_disconnect(&xen_9pfs->xendev);
+        qemu_coroutine_yield();
+        goto again;
     }
+    ring->co = NULL;
+    /* make sure other threads see ring->co changes before continuing */
+    smp_wmb();
 
     *piov = ring->sg;
     *pniov = num;
@@ -292,6 +297,20 @@ static int xen_9pfs_receive(Xen9pfsRing *ring)
 static void xen_9pfs_bh(void *opaque)
 {
     Xen9pfsRing *ring = opaque;
+    bool wait;
+
+again:
+    wait = ring->co != NULL && qemu_coroutine_entered(ring->co);
+    /* paired with the smb_wmb barriers in xen_9pfs_init_in_iov_from_pdu */
+    smp_rmb();
+    if (wait) {
+        cpu_relax();
+        goto again;
+    }
+
+    if (ring->co != NULL) {
+        qemu_coroutine_enter_if_inactive(ring->co);
+    }
     xen_9pfs_receive(ring);
 }
 
-- 
2.17.1



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v2 3/3] xen/9pfs: increase max ring order to 9
  2020-05-21 19:26 [PATCH v2 0/3] revert 9pfs reply truncation, wait for free room to reply Stefano Stabellini
  2020-05-21 19:26 ` [PATCH v2 1/3] Revert "9p: init_in_iov_from_pdu can truncate the size" Stefano Stabellini
  2020-05-21 19:26 ` [PATCH v2 2/3] xen/9pfs: yield when there isn't enough room on the ring Stefano Stabellini
@ 2020-05-21 19:26 ` Stefano Stabellini
  2020-05-22 12:35   ` Christian Schoenebeck
  2020-05-25 13:48 ` [PATCH v2 0/3] revert 9pfs reply truncation, wait for free room to reply Greg Kurz
  3 siblings, 1 reply; 7+ messages in thread
From: Stefano Stabellini @ 2020-05-21 19:26 UTC (permalink / raw)
  To: groug
  Cc: sstabellini, paul, qemu_oss, qemu-devel, anthony.perard,
	Stefano Stabellini

From: Stefano Stabellini <stefano.stabellini@xilinx.com>

The max order allowed by the protocol is 9. Increase the max order
supported by QEMU to 9 to increase performance.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
---
Changes in v2:
- patch added
---
 hw/9pfs/xen-9p-backend.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index 3c84c86ab8..a969fcc54c 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -21,7 +21,7 @@
 
 #define VERSIONS "1"
 #define MAX_RINGS 8
-#define MAX_RING_ORDER 8
+#define MAX_RING_ORDER 9
 
 typedef struct Xen9pfsRing {
     struct Xen9pfsDev *priv;
-- 
2.17.1



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v2 1/3] Revert "9p: init_in_iov_from_pdu can truncate the size"
  2020-05-21 19:26 ` [PATCH v2 1/3] Revert "9p: init_in_iov_from_pdu can truncate the size" Stefano Stabellini
@ 2020-05-22 12:34   ` Christian Schoenebeck
  0 siblings, 0 replies; 7+ messages in thread
From: Christian Schoenebeck @ 2020-05-22 12:34 UTC (permalink / raw)
  To: qemu-devel
  Cc: Stefano Stabellini, groug, paul, anthony.perard, Stefano Stabellini

On Donnerstag, 21. Mai 2020 21:26:25 CEST Stefano Stabellini wrote:
> From: Stefano Stabellini <stefano.stabellini@xilinx.com>
> 
> This reverts commit 16724a173049ac29c7b5ade741da93a0f46edff7.
> It causes https://bugs.launchpad.net/bugs/1877688.
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
> ---

Actually already reviewed by me; no changes, so just to make it clear:

Reviewed-by: Christian Schoenebeck <qemu_oss@crudebyte.com>

>  hw/9pfs/9p.c               | 33 +++++++++++----------------------
>  hw/9pfs/9p.h               |  2 +-
>  hw/9pfs/virtio-9p-device.c | 11 ++++-------
>  hw/9pfs/xen-9p-backend.c   | 15 ++++++---------
>  4 files changed, 22 insertions(+), 39 deletions(-)
> 
> diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
> index a2a14b5979..d39bfee462 100644
> --- a/hw/9pfs/9p.c
> +++ b/hw/9pfs/9p.c
> @@ -2102,29 +2102,22 @@ out_nofid:
>   * with qemu_iovec_destroy().
>   */
>  static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
> -                                    size_t skip, size_t *size,
> +                                    size_t skip, size_t size,
>                                      bool is_write)
>  {
>      QEMUIOVector elem;
>      struct iovec *iov;
>      unsigned int niov;
> -    size_t alloc_size = *size + skip;
> 
>      if (is_write) {
> -        pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov,
> alloc_size); +        pdu->s->transport->init_out_iov_from_pdu(pdu, &iov,
> &niov, size + skip); } else {
> -        pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov,
> &alloc_size); -    }
> -
> -    if (alloc_size < skip) {
> -        *size = 0;
> -    } else {
> -        *size = alloc_size - skip;
> +        pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size +
> skip); }
> 
>      qemu_iovec_init_external(&elem, iov, niov);
>      qemu_iovec_init(qiov, niov);
> -    qemu_iovec_concat(qiov, &elem, skip, *size);
> +    qemu_iovec_concat(qiov, &elem, skip, size);
>  }
> 
>  static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
> @@ -2132,14 +2125,15 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU
> *pdu, V9fsFidState *fidp, {
>      ssize_t err;
>      size_t offset = 7;
> -    size_t read_count;
> +    uint64_t read_count;
>      QEMUIOVector qiov_full;
> 
>      if (fidp->fs.xattr.len < off) {
>          read_count = 0;
> -    } else if (fidp->fs.xattr.len - off < max_count) {
> -        read_count = fidp->fs.xattr.len - off;
>      } else {
> +        read_count = fidp->fs.xattr.len - off;
> +    }
> +    if (read_count > max_count) {
>          read_count = max_count;
>      }
>      err = pdu_marshal(pdu, offset, "d", read_count);
> @@ -2148,7 +2142,7 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu,
> V9fsFidState *fidp, }
>      offset += err;
> 
> -    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &read_count, false);
> +    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, read_count, false);
>      err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0,
>                      ((char *)fidp->fs.xattr.value) + off,
>                      read_count);
> @@ -2277,11 +2271,9 @@ static void coroutine_fn v9fs_read(void *opaque)
>          QEMUIOVector qiov_full;
>          QEMUIOVector qiov;
>          int32_t len;
> -        size_t size = max_count;
> 
> -        v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, &size, false);
> +        v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count,
> false); qemu_iovec_init(&qiov, qiov_full.niov);
> -        max_count = size;
>          do {
>              qemu_iovec_reset(&qiov);
>              qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size -
> count); @@ -2532,7 +2524,6 @@ static void coroutine_fn v9fs_write(void
> *opaque) int32_t len = 0;
>      int32_t total = 0;
>      size_t offset = 7;
> -    size_t size;
>      V9fsFidState *fidp;
>      V9fsPDU *pdu = opaque;
>      V9fsState *s = pdu->s;
> @@ -2545,9 +2536,7 @@ static void coroutine_fn v9fs_write(void *opaque)
>          return;
>      }
>      offset += err;
> -    size = count;
> -    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &size, true);
> -    count = size;
> +    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, count, true);
>      trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov);
> 
>      fidp = get_fid(pdu, fid);
> diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
> index dd1c6cb8d2..1b9e110605 100644
> --- a/hw/9pfs/9p.h
> +++ b/hw/9pfs/9p.h
> @@ -436,7 +436,7 @@ struct V9fsTransport {
>      ssize_t     (*pdu_vunmarshal)(V9fsPDU *pdu, size_t offset, const char
> *fmt, va_list ap);
>      void        (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
> -                                        unsigned int *pniov, size_t *size);
> +                                        unsigned int *pniov, size_t size);
> void        (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
> unsigned int *pniov, size_t size); void        (*push_and_notify)(V9fsPDU
> *pdu);
> diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
> index e5b44977c7..36f3aa9352 100644
> --- a/hw/9pfs/virtio-9p-device.c
> +++ b/hw/9pfs/virtio-9p-device.c
> @@ -147,22 +147,19 @@ static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu,
> size_t offset, }
> 
>  static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
> -                                        unsigned int *pniov, size_t *size)
> +                                        unsigned int *pniov, size_t size)
>  {
>      V9fsState *s = pdu->s;
>      V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
>      VirtQueueElement *elem = v->elems[pdu->idx];
>      size_t buf_size = iov_size(elem->in_sg, elem->in_num);
> 
> -    if (buf_size < P9_IOHDRSZ) {
> +    if (buf_size < size) {
>          VirtIODevice *vdev = VIRTIO_DEVICE(v);
> 
>          virtio_error(vdev,
> -                     "VirtFS reply type %d needs %zu bytes, buffer has %zu,
> less than minimum", -                     pdu->id + 1, *size, buf_size);
> -    }
> -    if (buf_size < *size) {
> -        *size = buf_size;
> +                     "VirtFS reply type %d needs %zu bytes, buffer has
> %zu", +                     pdu->id + 1, size, buf_size);
>      }
> 
>      *piov = elem->in_sg;
> diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> index f04caabfe5..fc197f6c8a 100644
> --- a/hw/9pfs/xen-9p-backend.c
> +++ b/hw/9pfs/xen-9p-backend.c
> @@ -188,7 +188,7 @@ static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
> static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
>                                            struct iovec **piov,
>                                            unsigned int *pniov,
> -                                          size_t *size)
> +                                          size_t size)
>  {
>      Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
>      Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
> @@ -198,19 +198,16 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU
> *pdu, g_free(ring->sg);
> 
>      ring->sg = g_new0(struct iovec, 2);
> -    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, *size);
> +    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
> 
>      buf_size = iov_size(ring->sg, num);
> -    if (buf_size  < P9_IOHDRSZ) {
> -        xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs reply type %d needs "
> -                      "%zu bytes, buffer has %zu, less than minimum\n", - 
>                     pdu->id + 1, *size, buf_size);
> +    if (buf_size  < size) {
> +        xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
> +                "needs %zu bytes, buffer has %zu\n", pdu->id, size,
> +                buf_size);
>          xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
>          xen_9pfs_disconnect(&xen_9pfs->xendev);
>      }
> -    if (buf_size  < *size) {
> -        *size = buf_size;
> -    }
> 
>      *piov = ring->sg;
>      *pniov = num;

Best regards,
Christian Schoenebeck




^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2 3/3] xen/9pfs: increase max ring order to 9
  2020-05-21 19:26 ` [PATCH v2 3/3] xen/9pfs: increase max ring order to 9 Stefano Stabellini
@ 2020-05-22 12:35   ` Christian Schoenebeck
  0 siblings, 0 replies; 7+ messages in thread
From: Christian Schoenebeck @ 2020-05-22 12:35 UTC (permalink / raw)
  To: qemu-devel
  Cc: Stefano Stabellini, groug, paul, anthony.perard, Stefano Stabellini

On Donnerstag, 21. Mai 2020 21:26:27 CEST Stefano Stabellini wrote:
> From: Stefano Stabellini <stefano.stabellini@xilinx.com>
> 
> The max order allowed by the protocol is 9. Increase the max order
> supported by QEMU to 9 to increase performance.
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
> ---
> Changes in v2:
> - patch added
> ---

Reviewed-by: Christian Schoenebeck <qemu_oss@crudebyte.com>

>  hw/9pfs/xen-9p-backend.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> index 3c84c86ab8..a969fcc54c 100644
> --- a/hw/9pfs/xen-9p-backend.c
> +++ b/hw/9pfs/xen-9p-backend.c
> @@ -21,7 +21,7 @@
> 
>  #define VERSIONS "1"
>  #define MAX_RINGS 8
> -#define MAX_RING_ORDER 8
> +#define MAX_RING_ORDER 9
> 
>  typedef struct Xen9pfsRing {
>      struct Xen9pfsDev *priv;

Best regards,
Christian Schoenebeck




^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2 0/3] revert 9pfs reply truncation, wait for free room to reply
  2020-05-21 19:26 [PATCH v2 0/3] revert 9pfs reply truncation, wait for free room to reply Stefano Stabellini
                   ` (2 preceding siblings ...)
  2020-05-21 19:26 ` [PATCH v2 3/3] xen/9pfs: increase max ring order to 9 Stefano Stabellini
@ 2020-05-25 13:48 ` Greg Kurz
  3 siblings, 0 replies; 7+ messages in thread
From: Greg Kurz @ 2020-05-25 13:48 UTC (permalink / raw)
  To: Stefano Stabellini; +Cc: anthony.perard, qemu_oss, qemu-devel, paul

On Thu, 21 May 2020 12:26:18 -0700 (PDT)
Stefano Stabellini <sstabellini@kernel.org> wrote:

> Hi all,
> 
> This short series reverts commit 16724a173049ac29c7b5ade741da93a0f46edff
> becauses it is the cause for https://bugs.launchpad.net/bugs/1877688.
> 
> The original issue addressed by 16724a173049ac29c7b5ade741da93a0f46edff
> is solved differently in this series by using qemu_coroutine_yield() to
> wait for the client to free more data from the ring before sending the
> reply.
> 
> Cheers,
> 
> Stefano
> 

I wasn't really involved in the review of these patches, but I've
done some testing with virtio-9p which show no regression. I
assume you have done some testing on the Xen side as well. So
I've pushed the series to 9p-next. I shall send a PR in a day
or so.

Cheers,

--
Greg

> Changes in v2:
> - add comments on barriers
> - add patch to increase ring size to the max allowed



^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2020-05-25 13:50 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-21 19:26 [PATCH v2 0/3] revert 9pfs reply truncation, wait for free room to reply Stefano Stabellini
2020-05-21 19:26 ` [PATCH v2 1/3] Revert "9p: init_in_iov_from_pdu can truncate the size" Stefano Stabellini
2020-05-22 12:34   ` Christian Schoenebeck
2020-05-21 19:26 ` [PATCH v2 2/3] xen/9pfs: yield when there isn't enough room on the ring Stefano Stabellini
2020-05-21 19:26 ` [PATCH v2 3/3] xen/9pfs: increase max ring order to 9 Stefano Stabellini
2020-05-22 12:35   ` Christian Schoenebeck
2020-05-25 13:48 ` [PATCH v2 0/3] revert 9pfs reply truncation, wait for free room to reply Greg Kurz

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).