From: Zhu Lingshan <lingshan.zhu@intel.com>
To: mst@redhat.com, kvm@vger.kernel.org,
virtualization@lists.linux-foundation.org,
linux-kernel@vger.kernel.org, netdev@vger.kernel.org,
jasowang@redhat.com
Cc: lulu@redhat.com, dan.daly@intel.com, cunming.liang@intel.com,
Zhu Lingshan <lingshan.zhu@intel.com>
Subject: [PATCH] vdpa: bypass waking up vhost_woker for vdpa vq kick
Date: Tue, 26 May 2020 13:32:25 +0800 [thread overview]
Message-ID: <1590471145-4436-1-git-send-email-lingshan.zhu@intel.com> (raw)
Standard vhost devices rely on waking up a vhost_worker to kick
a virtquque. However vdpa devices have hardware backends, so it
does not need this waking up routin. In this commit, vdpa device
will kick a virtqueue directly, reduce the performance overhead
caused by waking up a vhost_woker.
Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
Suggested-by: Jason Wang <jasowang@redhat.com>
---
drivers/vhost/vdpa.c | 100 +++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 100 insertions(+)
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 0968361..d3a2aca 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -287,6 +287,66 @@ static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
return 0;
}
+void vhost_vdpa_poll_stop(struct vhost_virtqueue *vq)
+{
+ vhost_poll_stop(&vq->poll);
+}
+
+int vhost_vdpa_poll_start(struct vhost_virtqueue *vq)
+{
+ struct vhost_poll *poll = &vq->poll;
+ struct file *file = vq->kick;
+ __poll_t mask;
+
+
+ if (poll->wqh)
+ return 0;
+
+ mask = vfs_poll(file, &poll->table);
+ if (mask)
+ vq->handle_kick(&vq->poll.work);
+ if (mask & EPOLLERR) {
+ vhost_poll_stop(poll);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static long vhost_vdpa_set_vring_kick(struct vhost_virtqueue *vq,
+ void __user *argp)
+{
+ bool pollstart = false, pollstop = false;
+ struct file *eventfp, *filep = NULL;
+ struct vhost_vring_file f;
+ long r;
+
+ if (copy_from_user(&f, argp, sizeof(f)))
+ return -EFAULT;
+
+ eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
+ if (IS_ERR(eventfp)) {
+ r = PTR_ERR(eventfp);
+ return r;
+ }
+
+ if (eventfp != vq->kick) {
+ pollstop = (filep = vq->kick) != NULL;
+ pollstart = (vq->kick = eventfp) != NULL;
+ } else
+ filep = eventfp;
+
+ if (pollstop && vq->handle_kick)
+ vhost_vdpa_poll_stop(vq);
+
+ if (filep)
+ fput(filep);
+
+ if (pollstart && vq->handle_kick)
+ r = vhost_vdpa_poll_start(vq);
+
+ return r;
+}
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
void __user *argp)
@@ -316,6 +376,11 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
return 0;
}
+ if (cmd == VHOST_SET_VRING_KICK) {
+ r = vhost_vdpa_set_vring_kick(vq, argp);
+ return r;
+ }
+
if (cmd == VHOST_GET_VRING_BASE)
vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx);
@@ -667,6 +732,39 @@ static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
v->domain = NULL;
}
+static int vhost_vdpa_poll_worker(wait_queue_entry_t *wait, unsigned int mode,
+ int sync, void *key)
+{
+ struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
+ struct vhost_virtqueue *vq = container_of(poll, struct vhost_virtqueue,
+ poll);
+
+ if (!(key_to_poll(key) & poll->mask))
+ return 0;
+
+ vq->handle_kick(&vq->poll.work);
+
+ return 0;
+}
+
+void vhost_vdpa_poll_init(struct vhost_dev *dev)
+{
+ struct vhost_virtqueue *vq;
+ struct vhost_poll *poll;
+ int i;
+
+ for (i = 0; i < dev->nvqs; i++) {
+ vq = dev->vqs[i];
+ poll = &vq->poll;
+ if (vq->handle_kick) {
+ init_waitqueue_func_entry(&poll->wait,
+ vhost_vdpa_poll_worker);
+ poll->work.fn = vq->handle_kick;
+ }
+
+ }
+}
+
static int vhost_vdpa_open(struct inode *inode, struct file *filep)
{
struct vhost_vdpa *v;
@@ -697,6 +795,8 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
vhost_dev_init(dev, vqs, nvqs, 0, 0, 0,
vhost_vdpa_process_iotlb_msg);
+ vhost_vdpa_poll_init(dev);
+
dev->iotlb = vhost_iotlb_alloc(0, 0);
if (!dev->iotlb) {
r = -ENOMEM;
--
1.8.3.1
next reply other threads:[~2020-05-26 5:35 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-26 5:32 Zhu Lingshan [this message]
2020-05-26 22:53 ` [PATCH] vdpa: bypass waking up vhost_woker for vdpa vq kick kbuild test robot
2020-05-27 9:16 ` [RFC PATCH] vdpa: vhost_vdpa_poll_stop() can be static kbuild test robot
2020-05-28 10:06 ` [PATCH] vdpa: bypass waking up vhost_woker for vdpa vq kick Jason Wang
2020-06-02 9:42 ` Dan Carpenter
2020-06-02 10:16 ` Jason Wang
2020-06-02 11:10 ` Jason Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1590471145-4436-1-git-send-email-lingshan.zhu@intel.com \
--to=lingshan.zhu@intel.com \
--cc=cunming.liang@intel.com \
--cc=dan.daly@intel.com \
--cc=jasowang@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=lulu@redhat.com \
--cc=mst@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=virtualization@lists.linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).