From: "Michael S. Tsirkin" <mst@redhat.com>
To: Jason Wang <jasowang@redhat.com>
Cc: linux-kernel@vger.kernel.org,
"Eugenio Pérez" <eperezma@redhat.com>,
kvm@vger.kernel.org, virtualization@lists.linux-foundation.org,
netdev@vger.kernel.org
Subject: Re: [PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct
Date: Sun, 7 Jun 2020 09:59:00 -0400 [thread overview]
Message-ID: <20200607095810-mutt-send-email-mst@kernel.org> (raw)
In-Reply-To: <48e6d644-c4aa-2754-9d06-22133987b3be@redhat.com>
On Wed, Jun 03, 2020 at 08:04:45PM +0800, Jason Wang wrote:
>
> On 2020/6/3 下午5:48, Michael S. Tsirkin wrote:
> > On Wed, Jun 03, 2020 at 03:13:56PM +0800, Jason Wang wrote:
> > > On 2020/6/2 下午9:05, Michael S. Tsirkin wrote:
>
>
> [...]
>
>
> > > > +
> > > > +static int fetch_indirect_descs(struct vhost_virtqueue *vq,
> > > > + struct vhost_desc *indirect,
> > > > + u16 head)
> > > > +{
> > > > + struct vring_desc desc;
> > > > + unsigned int i = 0, count, found = 0;
> > > > + u32 len = indirect->len;
> > > > + struct iov_iter from;
> > > > + int ret;
> > > > +
> > > > + /* Sanity check */
> > > > + if (unlikely(len % sizeof desc)) {
> > > > + vq_err(vq, "Invalid length in indirect descriptor: "
> > > > + "len 0x%llx not multiple of 0x%zx\n",
> > > > + (unsigned long long)len,
> > > > + sizeof desc);
> > > > + return -EINVAL;
> > > > + }
> > > > +
> > > > + ret = translate_desc(vq, indirect->addr, len, vq->indirect,
> > > > + UIO_MAXIOV, VHOST_ACCESS_RO);
> > > > + if (unlikely(ret < 0)) {
> > > > + if (ret != -EAGAIN)
> > > > + vq_err(vq, "Translation failure %d in indirect.\n", ret);
> > > > + return ret;
> > > > + }
> > > > + iov_iter_init(&from, READ, vq->indirect, ret, len);
> > > > +
> > > > + /* We will use the result as an address to read from, so most
> > > > + * architectures only need a compiler barrier here. */
> > > > + read_barrier_depends();
> > > > +
> > > > + count = len / sizeof desc;
> > > > + /* Buffers are chained via a 16 bit next field, so
> > > > + * we can have at most 2^16 of these. */
> > > > + if (unlikely(count > USHRT_MAX + 1)) {
> > > > + vq_err(vq, "Indirect buffer length too big: %d\n",
> > > > + indirect->len);
> > > > + return -E2BIG;
> > > > + }
> > > > + if (unlikely(vq->ndescs + count > vq->max_descs)) {
> > > > + vq_err(vq, "Too many indirect + direct descs: %d + %d\n",
> > > > + vq->ndescs, indirect->len);
> > > > + return -E2BIG;
> > > > + }
> > > > +
> > > > + do {
> > > > + if (unlikely(++found > count)) {
> > > > + vq_err(vq, "Loop detected: last one at %u "
> > > > + "indirect size %u\n",
> > > > + i, count);
> > > > + return -EINVAL;
> > > > + }
> > > > + if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
> > > > + vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
> > > > + i, (size_t)indirect->addr + i * sizeof desc);
> > > > + return -EINVAL;
> > > > + }
> > > > + if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
> > > > + vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
> > > > + i, (size_t)indirect->addr + i * sizeof desc);
> > > > + return -EINVAL;
> > > > + }
> > > > +
> > > > + push_split_desc(vq, &desc, head);
> > >
> > > The error is ignored.
> > See above:
> >
> > if (unlikely(vq->ndescs + count > vq->max_descs))
> >
> > So it can't fail here, we never fetch unless there's space.
> >
> > I guess we can add a WARN_ON here.
>
>
> Yes.
>
>
> >
> > > > + } while ((i = next_desc(vq, &desc)) != -1);
> > > > + return 0;
> > > > +}
> > > > +
> > > > +static int fetch_descs(struct vhost_virtqueue *vq)
> > > > +{
> > > > + unsigned int i, head, found = 0;
> > > > + struct vhost_desc *last;
> > > > + struct vring_desc desc;
> > > > + __virtio16 avail_idx;
> > > > + __virtio16 ring_head;
> > > > + u16 last_avail_idx;
> > > > + int ret;
> > > > +
> > > > + /* Check it isn't doing very strange things with descriptor numbers. */
> > > > + last_avail_idx = vq->last_avail_idx;
> > > > +
> > > > + if (vq->avail_idx == vq->last_avail_idx) {
> > > > + if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
> > > > + vq_err(vq, "Failed to access avail idx at %p\n",
> > > > + &vq->avail->idx);
> > > > + return -EFAULT;
> > > > + }
> > > > + vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
> > > > +
> > > > + if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
> > > > + vq_err(vq, "Guest moved used index from %u to %u",
> > > > + last_avail_idx, vq->avail_idx);
> > > > + return -EFAULT;
> > > > + }
> > > > +
> > > > + /* If there's nothing new since last we looked, return
> > > > + * invalid.
> > > > + */
> > > > + if (vq->avail_idx == last_avail_idx)
> > > > + return vq->num;
> > > > +
> > > > + /* Only get avail ring entries after they have been
> > > > + * exposed by guest.
> > > > + */
> > > > + smp_rmb();
> > > > + }
> > > > +
> > > > + /* Grab the next descriptor number they're advertising */
> > > > + if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
> > > > + vq_err(vq, "Failed to read head: idx %d address %p\n",
> > > > + last_avail_idx,
> > > > + &vq->avail->ring[last_avail_idx % vq->num]);
> > > > + return -EFAULT;
> > > > + }
> > > > +
> > > > + head = vhost16_to_cpu(vq, ring_head);
> > > > +
> > > > + /* If their number is silly, that's an error. */
> > > > + if (unlikely(head >= vq->num)) {
> > > > + vq_err(vq, "Guest says index %u > %u is available",
> > > > + head, vq->num);
> > > > + return -EINVAL;
> > > > + }
> > > > +
> > > > + i = head;
> > > > + do {
> > > > + if (unlikely(i >= vq->num)) {
> > > > + vq_err(vq, "Desc index is %u > %u, head = %u",
> > > > + i, vq->num, head);
> > > > + return -EINVAL;
> > > > + }
> > > > + if (unlikely(++found > vq->num)) {
> > > > + vq_err(vq, "Loop detected: last one at %u "
> > > > + "vq size %u head %u\n",
> > > > + i, vq->num, head);
> > > > + return -EINVAL;
> > > > + }
> > > > + ret = vhost_get_desc(vq, &desc, i);
> > > > + if (unlikely(ret)) {
> > > > + vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
> > > > + i, vq->desc + i);
> > > > + return -EFAULT;
> > > > + }
> > > > + ret = push_split_desc(vq, &desc, head);
> > > > + if (unlikely(ret)) {
> > > > + vq_err(vq, "Failed to save descriptor: idx %d\n", i);
> > > > + return -EINVAL;
> > > > + }
> > > > + } while ((i = next_desc(vq, &desc)) != -1);
> > > > +
> > > > + last = peek_split_desc(vq);
> > > > + if (unlikely(last->flags & VRING_DESC_F_INDIRECT)) {
> > > > + pop_split_desc(vq);
> > > > + ret = fetch_indirect_descs(vq, last, head);
> > >
> > > Note that this means we don't supported chained indirect descriptors which
> > > complies the spec but we support this in vhost_get_vq_desc().
> > Well the spec says:
> > A driver MUST NOT set both VIRTQ_DESC_F_INDIRECT and VIRTQ_DESC_F_NEXT in flags.
> >
> > Did I miss anything?
> >
>
> No, but I meant current vhost_get_vq_desc() supports chained indirect
> descriptor. Not sure if there's an application that depends on this
> silently.
>
> Thanks
>
I don't think we need to worry about that unless this actually
surfaces.
--
MST
next prev parent reply other threads:[~2020-06-07 13:59 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-02 13:05 [PATCH RFC 00/13] vhost: format independence Michael S. Tsirkin
2020-06-02 13:05 ` [PATCH RFC 01/13] vhost: option to fetch descriptors through an independent struct Michael S. Tsirkin
2020-06-03 7:13 ` Jason Wang
2020-06-03 9:48 ` Michael S. Tsirkin
2020-06-03 12:04 ` Jason Wang
2020-06-07 13:59 ` Michael S. Tsirkin [this message]
2020-06-02 13:05 ` [PATCH RFC 02/13] vhost: use batched version by default Michael S. Tsirkin
2020-06-03 7:15 ` Jason Wang
2020-06-02 13:06 ` [PATCH RFC 03/13] vhost: batching fetches Michael S. Tsirkin
2020-06-03 7:27 ` Jason Wang
2020-06-04 8:59 ` Michael S. Tsirkin
2020-06-05 3:40 ` Jason Wang
2020-06-07 13:57 ` Michael S. Tsirkin
2020-06-08 3:35 ` Jason Wang
2020-06-08 6:01 ` Michael S. Tsirkin
2020-06-02 13:06 ` [PATCH RFC 04/13] vhost: cleanup fetch_buf return code handling Michael S. Tsirkin
2020-06-03 7:29 ` Jason Wang
2020-06-04 9:01 ` Michael S. Tsirkin
2020-06-02 13:06 ` [PATCH RFC 05/13] vhost/net: pass net specific struct pointer Michael S. Tsirkin
2020-06-02 13:06 ` [PATCH RFC 06/13] vhost: reorder functions Michael S. Tsirkin
2020-06-02 13:06 ` [PATCH RFC 07/13] vhost: format-independent API for used buffers Michael S. Tsirkin
2020-06-03 7:58 ` Jason Wang
2020-06-04 9:03 ` Michael S. Tsirkin
2020-06-04 9:18 ` Jason Wang
2020-06-04 10:17 ` Michael S. Tsirkin
2020-06-02 13:06 ` [PATCH RFC 08/13] vhost/net: convert to new API: heads->bufs Michael S. Tsirkin
2020-06-03 8:11 ` Jason Wang
2020-06-04 9:05 ` Michael S. Tsirkin
2020-06-02 13:06 ` [PATCH RFC 09/13] vhost/net: avoid iov length math Michael S. Tsirkin
2020-06-02 13:06 ` [PATCH RFC 10/13] vhost/test: convert to the buf API Michael S. Tsirkin
2020-06-02 13:06 ` [PATCH RFC 11/13] vhost/scsi: switch to buf APIs Michael S. Tsirkin
2020-06-05 8:36 ` Stefan Hajnoczi
2020-06-02 13:06 ` [PATCH RFC 12/13] vhost/vsock: switch to the buf API Michael S. Tsirkin
2020-06-05 8:36 ` Stefan Hajnoczi
2020-06-02 13:06 ` [PATCH RFC 13/13] vhost: drop head based APIs Michael S. Tsirkin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200607095810-mutt-send-email-mst@kernel.org \
--to=mst@redhat.com \
--cc=eperezma@redhat.com \
--cc=jasowang@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=virtualization@lists.linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).