From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from mailman by lists.gnu.org with tmda-scanned (Exim 4.43) id 1ND1Mb-0000Tt-Rz for qemu-devel@nongnu.org; Tue, 24 Nov 2009 14:47:49 -0500 Received: from exim by lists.gnu.org with spam-scanned (Exim 4.43) id 1ND1MX-0000R3-S6 for qemu-devel@nongnu.org; Tue, 24 Nov 2009 14:47:49 -0500 Received: from [199.232.76.173] (port=49463 helo=monty-python.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1ND1MX-0000Qq-Mi for qemu-devel@nongnu.org; Tue, 24 Nov 2009 14:47:45 -0500 Received: from mx1.redhat.com ([209.132.183.28]:49651) by monty-python.gnu.org with esmtp (Exim 4.60) (envelope-from ) id 1ND1MX-0008Qn-9g for qemu-devel@nongnu.org; Tue, 24 Nov 2009 14:47:45 -0500 Date: Tue, 24 Nov 2009 21:45:02 +0200 From: "Michael S. Tsirkin" Message-ID: <20091124194502.GA4250@redhat.com> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline Subject: [Qemu-devel] [PATCH] qemu/virtio-net: remove wrong s/g layout assumptions List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: Rusty Russell , qemu-devel@nongnu.org, Shirley Ma , Anthony Liguori virtio net currently assumes that the first s/g element it gets is always virtio net header. This is wrong. There should be no assumption on sg boundaries. For example, the guest should be able to put the virtio_net_hdr in the front of the skbuf data if there is room. Get rid of this assumption, properly consume space from iovec, always. Signed-off-by: Michael S. Tsirkin --- Rusty, since you suggested this fix, could you ack it please? hw/virtio-net.c | 80 +++++++++++++++++++++++++++++++++++++++---------------- 1 files changed, 57 insertions(+), 23 deletions(-) diff --git a/hw/virtio-net.c b/hw/virtio-net.c index 2f147e5..06c5148 100644 --- a/hw/virtio-net.c +++ b/hw/virtio-net.c @@ -434,26 +434,59 @@ static int iov_fill(struct iovec *iov, int iovcnt, const void *buf, int count) return offset; } +static int iov_skip(struct iovec *iov, int iovcnt, int count) +{ + int offset, i; + + offset = i = 0; + while (offset < count && i < iovcnt) { + int len = MIN(iov[i].iov_len, count - offset); + iov[i].iov_base += len; + iov[i].iov_len -= len; + offset += len; + i++; + } + + return offset; +} + +static int iov_copy(struct iovec *to, struct iovec *from, int iovcnt, int count) +{ + int offset, i; + + offset = i = 0; + while (offset < count && i < iovcnt) { + int len = MIN(from[i].iov_len, count - offset); + to[i].iov_base = from[i].iov_base; + to[i].iov_len = from[i].iov_len; + offset += len; + i++; + } + + return i; +} + static int receive_header(VirtIONet *n, struct iovec *iov, int iovcnt, const void *buf, size_t size, size_t hdr_len) { - struct virtio_net_hdr *hdr = (struct virtio_net_hdr *)iov[0].iov_base; + struct virtio_net_hdr hdr = {}; int offset = 0; - hdr->flags = 0; - hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; + hdr.flags = 0; + hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; if (n->has_vnet_hdr) { - memcpy(hdr, buf, sizeof(*hdr)); - offset = sizeof(*hdr); - work_around_broken_dhclient(hdr, buf + offset, size - offset); + memcpy(&hdr, buf, sizeof hdr); + offset = sizeof hdr; + work_around_broken_dhclient(&hdr, buf + offset, size - offset); } + iov_fill(iov, iovcnt, &hdr, sizeof hdr); + /* We only ever receive a struct virtio_net_hdr from the tapfd, * but we may be passing along a larger header to the guest. */ - iov[0].iov_base += hdr_len; - iov[0].iov_len -= hdr_len; + iov_skip(iov, iovcnt, hdr_len); return offset; } @@ -514,7 +547,8 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) static ssize_t virtio_net_receive(VLANClientState *vc, const uint8_t *buf, size_t size) { VirtIONet *n = vc->opaque; - struct virtio_net_hdr_mrg_rxbuf *mhdr = NULL; + struct iovec mhdr[VIRTQUEUE_MAX_SIZE]; + int mhdrcnt = 0; size_t hdr_len, offset, i; if (!virtio_net_can_receive(n->vc)) @@ -552,16 +586,13 @@ static ssize_t virtio_net_receive(VLANClientState *vc, const uint8_t *buf, size_ exit(1); } - if (!n->mergeable_rx_bufs && elem.in_sg[0].iov_len != hdr_len) { - fprintf(stderr, "virtio-net header not in first element\n"); - exit(1); - } - memcpy(&sg, &elem.in_sg[0], sizeof(sg[0]) * elem.in_num); if (i == 0) { - if (n->mergeable_rx_bufs) - mhdr = (struct virtio_net_hdr_mrg_rxbuf *)sg[0].iov_base; + if (n->mergeable_rx_bufs) { + mhdrcnt = iov_copy(mhdr, sg, elem.in_num, + sizeof(struct virtio_net_hdr_mrg_rxbuf)); + } offset += receive_header(n, sg, elem.in_num, buf + offset, size - offset, hdr_len); @@ -579,8 +610,12 @@ static ssize_t virtio_net_receive(VLANClientState *vc, const uint8_t *buf, size_ offset += len; } - if (mhdr) - mhdr->num_buffers = i; + if (mhdrcnt) { + uint16_t num = i; + iov_skip(mhdr, mhdrcnt, + offsetof(struct virtio_net_hdr_mrg_rxbuf, num_buffers)); + iov_fill(mhdr, mhdrcnt, &num, sizeof num); + } virtqueue_flush(n->rx_vq, i); virtio_notify(&n->vdev, n->rx_vq); @@ -627,20 +662,19 @@ static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq) sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); - if (out_num < 1 || out_sg->iov_len != hdr_len) { - fprintf(stderr, "virtio-net header not in first element\n"); + if (out_num < 1) { + fprintf(stderr, "virtio-net: no output element\n"); exit(1); } /* ignore the header if GSO is not supported */ if (!n->has_vnet_hdr) { - out_num--; - out_sg++; + iov_skip(out_sg, out_num, hdr_len); len += hdr_len; } else if (n->mergeable_rx_bufs) { /* tapfd expects a struct virtio_net_hdr */ hdr_len -= sizeof(struct virtio_net_hdr); - out_sg->iov_len -= hdr_len; + iov_skip(out_sg, out_num, hdr_len); len += hdr_len; } -- 1.6.5.2.143.g8cc62