From: Jason Wang <jasowang@redhat.com>
To: mst@redhat.com, jasowang@redhat.com, kvm@vger.kernel.org,
virtualization@lists.linux-foundation.org,
netdev@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: peterx@redhat.com, aarcange@redhat.com,
James.Bottomley@hansenpartnership.com, hch@infradead.org,
davem@davemloft.net, jglisse@redhat.com, linux-mm@kvack.org,
linux-arm-kernel@lists.infradead.org,
linux-parisc@vger.kernel.org, christophe.de.dinechin@gmail.com,
jrdr.linux@gmail.com
Subject: [RFC PATCH V3 4/6] vhost: introduce helpers to get the size of metadata area
Date: Tue, 23 Apr 2019 01:54:18 -0400 [thread overview]
Message-ID: <20190423055420.26408-5-jasowang@redhat.com> (raw)
In-Reply-To: <20190423055420.26408-1-jasowang@redhat.com>
To avoid code duplication since it will be used by kernel VA prefetching.
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/vhost/vhost.c | 51 ++++++++++++++++++++++++++++---------------
1 file changed, 33 insertions(+), 18 deletions(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index bff4d586871d..f3f86c3ed659 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -413,6 +413,32 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
vhost_vq_free_iovecs(dev->vqs[i]);
}
+static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
+ unsigned int num)
+{
+ size_t event __maybe_unused =
+ vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+
+ return sizeof(*vq->avail) +
+ sizeof(*vq->avail->ring) * num + event;
+}
+
+static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
+ unsigned int num)
+{
+ size_t event __maybe_unused =
+ vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+
+ return sizeof(*vq->used) +
+ sizeof(*vq->used->ring) * num + event;
+}
+
+static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
+ unsigned int num)
+{
+ return sizeof(*vq->desc) * num;
+}
+
void vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
{
@@ -1257,13 +1283,9 @@ static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
struct vring_used __user *used)
{
- size_t s __maybe_unused = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
-
- return access_ok(desc, num * sizeof *desc) &&
- access_ok(avail,
- sizeof *avail + num * sizeof *avail->ring + s) &&
- access_ok(used,
- sizeof *used + num * sizeof *used->ring + s);
+ return access_ok(desc, vhost_get_desc_size(vq, num)) &&
+ access_ok(avail, vhost_get_avail_size(vq, num)) &&
+ access_ok(used, vhost_get_used_size(vq, num));
}
static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
@@ -1315,22 +1337,18 @@ static bool iotlb_access_ok(struct vhost_virtqueue *vq,
int vq_meta_prefetch(struct vhost_virtqueue *vq)
{
- size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
unsigned int num = vq->num;
if (!vq->iotlb)
return 1;
return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
- num * sizeof(*vq->desc), VHOST_ADDR_DESC) &&
+ vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail,
- sizeof *vq->avail +
- num * sizeof(*vq->avail->ring) + s,
+ vhost_get_avail_size(vq, num),
VHOST_ADDR_AVAIL) &&
iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used,
- sizeof *vq->used +
- num * sizeof(*vq->used->ring) + s,
- VHOST_ADDR_USED);
+ vhost_get_used_size(vq, num), VHOST_ADDR_USED);
}
EXPORT_SYMBOL_GPL(vq_meta_prefetch);
@@ -1347,13 +1365,10 @@ EXPORT_SYMBOL_GPL(vhost_log_access_ok);
static bool vq_log_access_ok(struct vhost_virtqueue *vq,
void __user *log_base)
{
- size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
-
return vq_memory_access_ok(log_base, vq->umem,
vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
(!vq->log_used || log_access_ok(log_base, vq->log_addr,
- sizeof *vq->used +
- vq->num * sizeof *vq->used->ring + s));
+ vhost_get_used_size(vq, vq->num)));
}
/* Can we start vq? */
--
2.18.1
next prev parent reply other threads:[~2019-04-23 5:55 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-04-23 5:54 [RFC PATCH V3 0/6] vhost: accelerate metadata access Jason Wang
2019-04-23 5:54 ` [RFC PATCH V3 1/6] vhost: generalize adding used elem Jason Wang
2019-04-23 5:54 ` [RFC PATCH V3 2/6] vhost: fine grain userspace memory accessors Jason Wang
2019-04-23 5:54 ` [RFC PATCH V3 3/6] vhost: rename vq_iotlb_prefetch() to vq_meta_prefetch() Jason Wang
2019-04-23 5:54 ` Jason Wang [this message]
2019-04-23 5:54 ` [RFC PATCH V3 5/6] vhost: factor out setting vring addr and num Jason Wang
2019-04-23 5:54 ` [RFC PATCH V3 6/6] vhost: access vq metadata through kernel virtual address Jason Wang
2019-05-05 9:20 ` [RFC PATCH V3 0/6] vhost: accelerate metadata access Jason Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190423055420.26408-5-jasowang@redhat.com \
--to=jasowang@redhat.com \
--cc=James.Bottomley@hansenpartnership.com \
--cc=aarcange@redhat.com \
--cc=christophe.de.dinechin@gmail.com \
--cc=davem@davemloft.net \
--cc=hch@infradead.org \
--cc=jglisse@redhat.com \
--cc=jrdr.linux@gmail.com \
--cc=kvm@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-parisc@vger.kernel.org \
--cc=mst@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=peterx@redhat.com \
--cc=virtualization@lists.linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).