From: Mike Christie <michael.christie@oracle.com> To: target-devel@vger.kernel.org, linux-scsi@vger.kernel.org, stefanha@redhat.com, pbonzini@redhat.com, jasowang@redhat.com, mst@redhat.com, sgarzare@redhat.com, virtualization@lists.linux-foundation.org Cc: Mike Christie <michael.christie@oracle.com> Subject: [PATCH 8/9] vhost: add vhost_dev pointer to vhost_work Date: Tue, 25 May 2021 13:05:59 -0500 [thread overview] Message-ID: <20210525180600.6349-9-michael.christie@oracle.com> (raw) In-Reply-To: <20210525180600.6349-1-michael.christie@oracle.com> The next patch allows a vhost_worker to handle different devices. To prepare for that, this patch adds a pointer to the device on the work so we can get to the different mms in the vhost_worker thread. Signed-off-by: Mike Christie <michael.christie@oracle.com> --- drivers/vhost/scsi.c | 7 ++++--- drivers/vhost/vhost.c | 24 ++++++++++++++---------- drivers/vhost/vhost.h | 4 +++- drivers/vhost/vsock.c | 3 ++- 4 files changed, 23 insertions(+), 15 deletions(-) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index b607bff41074..073b20bca257 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1808,7 +1808,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) if (!vqs) goto err_vqs; - vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work); + vhost_work_init(&vs->dev, &vs->vs_event_work, vhost_scsi_evt_work); vs->vs_events_nr = 0; vs->vs_events_missed = false; @@ -1823,7 +1823,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) vqs[i] = &svq->vq; svq->vs = vs; init_llist_head(&svq->completion_list); - vhost_work_init(&svq->completion_work, + vhost_work_init(&vs->dev, &svq->completion_work, vhost_scsi_complete_cmd_work); svq->vq.handle_kick = vhost_scsi_handle_kick; } @@ -2017,7 +2017,8 @@ static int vhost_scsi_port_link(struct se_portal_group *se_tpg, if (!tmf) return -ENOMEM; INIT_LIST_HEAD(&tmf->queue_entry); - vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work); + vhost_work_init(&tpg->vhost_scsi->dev, &tmf->vwork, + vhost_scsi_tmf_resp_work); mutex_lock(&vhost_scsi_mutex); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 981e9bac7a31..eb16eb2bbee0 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -182,10 +182,12 @@ static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, return 0; } -void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) +void vhost_work_init(struct vhost_dev *dev, struct vhost_work *work, + vhost_work_fn_t fn) { clear_bit(VHOST_WORK_QUEUED, &work->flags); work->fn = fn; + work->dev = dev; } EXPORT_SYMBOL_GPL(vhost_work_init); @@ -201,7 +203,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, poll->wqh = NULL; poll->vq = vq; - vhost_work_init(&poll->work, fn); + vhost_work_init(dev, &poll->work, fn); } EXPORT_SYMBOL_GPL(vhost_poll_init); @@ -270,12 +272,13 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) } EXPORT_SYMBOL_GPL(vhost_work_queue); -static void vhost_work_dev_flush_on(struct vhost_worker *worker) +static void vhost_work_dev_flush_on(struct vhost_dev *dev, + struct vhost_worker *worker) { struct vhost_flush_struct flush; init_completion(&flush.wait_event); - vhost_work_init(&flush.work, vhost_flush_work); + vhost_work_init(dev, &flush.work, vhost_flush_work); vhost_work_queue_on(&flush.work, worker); wait_for_completion(&flush.wait_event); @@ -286,7 +289,7 @@ void vhost_work_dev_flush(struct vhost_dev *dev) int i; for (i = 0; i < dev->num_workers; i++) - vhost_work_dev_flush_on(dev->workers[i]); + vhost_work_dev_flush_on(dev, dev->workers[i]); } EXPORT_SYMBOL_GPL(vhost_work_dev_flush); @@ -306,7 +309,7 @@ EXPORT_SYMBOL_GPL(vhost_has_work); void vhost_vq_work_flush(struct vhost_virtqueue *vq) { - vhost_work_dev_flush_on(vq->worker); + vhost_work_dev_flush_on(vq->dev, vq->worker); } EXPORT_SYMBOL_GPL(vhost_vq_work_flush); @@ -573,14 +576,15 @@ static void vhost_attach_cgroups_work(struct vhost_work *work) s->ret = cgroup_attach_task_all(s->owner, current); } -static int vhost_attach_cgroups_on(struct vhost_worker *worker) +static int vhost_attach_cgroups_on(struct vhost_dev *dev, + struct vhost_worker *worker) { struct vhost_attach_cgroups_struct attach; attach.owner = current; - vhost_work_init(&attach.work, vhost_attach_cgroups_work); + vhost_work_init(dev, &attach.work, vhost_attach_cgroups_work); vhost_work_queue_on(&attach.work, worker); - vhost_work_dev_flush_on(worker); + vhost_work_dev_flush_on(dev, worker); return attach.ret; } @@ -675,7 +679,7 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev) worker->task = task; wake_up_process(task); /* avoid contributing to loadavg */ - ret = vhost_attach_cgroups_on(worker); + ret = vhost_attach_cgroups_on(dev, worker); if (ret) goto stop_worker; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 75b884ad1f17..75ad3aa5adca 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -24,6 +24,7 @@ struct vhost_work { struct llist_node node; vhost_work_fn_t fn; unsigned long flags; + struct vhost_dev *dev; }; struct vhost_worker { @@ -47,7 +48,8 @@ struct vhost_poll { struct vhost_virtqueue *vq; }; -void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn); +void vhost_work_init(struct vhost_dev *dev, struct vhost_work *work, + vhost_work_fn_t fn); void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work); bool vhost_has_work(struct vhost_dev *dev); void vhost_vq_work_flush(struct vhost_virtqueue *vq); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index f954f4d29c95..302415b6460b 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -648,7 +648,8 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) file->private_data = vsock; spin_lock_init(&vsock->send_pkt_list_lock); INIT_LIST_HEAD(&vsock->send_pkt_list); - vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); + vhost_work_init(&vsock->dev, &vsock->send_pkt_work, + vhost_transport_send_pkt_work); return 0; out: -- 2.25.1
WARNING: multiple messages have this Message-ID (diff)
From: Mike Christie <michael.christie@oracle.com> To: target-devel@vger.kernel.org, linux-scsi@vger.kernel.org, stefanha@redhat.com, pbonzini@redhat.com, jasowang@redhat.com, mst@redhat.com, sgarzare@redhat.com, virtualization@lists.linux-foundation.org Subject: [PATCH 8/9] vhost: add vhost_dev pointer to vhost_work Date: Tue, 25 May 2021 13:05:59 -0500 [thread overview] Message-ID: <20210525180600.6349-9-michael.christie@oracle.com> (raw) In-Reply-To: <20210525180600.6349-1-michael.christie@oracle.com> The next patch allows a vhost_worker to handle different devices. To prepare for that, this patch adds a pointer to the device on the work so we can get to the different mms in the vhost_worker thread. Signed-off-by: Mike Christie <michael.christie@oracle.com> --- drivers/vhost/scsi.c | 7 ++++--- drivers/vhost/vhost.c | 24 ++++++++++++++---------- drivers/vhost/vhost.h | 4 +++- drivers/vhost/vsock.c | 3 ++- 4 files changed, 23 insertions(+), 15 deletions(-) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index b607bff41074..073b20bca257 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1808,7 +1808,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) if (!vqs) goto err_vqs; - vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work); + vhost_work_init(&vs->dev, &vs->vs_event_work, vhost_scsi_evt_work); vs->vs_events_nr = 0; vs->vs_events_missed = false; @@ -1823,7 +1823,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) vqs[i] = &svq->vq; svq->vs = vs; init_llist_head(&svq->completion_list); - vhost_work_init(&svq->completion_work, + vhost_work_init(&vs->dev, &svq->completion_work, vhost_scsi_complete_cmd_work); svq->vq.handle_kick = vhost_scsi_handle_kick; } @@ -2017,7 +2017,8 @@ static int vhost_scsi_port_link(struct se_portal_group *se_tpg, if (!tmf) return -ENOMEM; INIT_LIST_HEAD(&tmf->queue_entry); - vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work); + vhost_work_init(&tpg->vhost_scsi->dev, &tmf->vwork, + vhost_scsi_tmf_resp_work); mutex_lock(&vhost_scsi_mutex); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 981e9bac7a31..eb16eb2bbee0 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -182,10 +182,12 @@ static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, return 0; } -void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) +void vhost_work_init(struct vhost_dev *dev, struct vhost_work *work, + vhost_work_fn_t fn) { clear_bit(VHOST_WORK_QUEUED, &work->flags); work->fn = fn; + work->dev = dev; } EXPORT_SYMBOL_GPL(vhost_work_init); @@ -201,7 +203,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, poll->wqh = NULL; poll->vq = vq; - vhost_work_init(&poll->work, fn); + vhost_work_init(dev, &poll->work, fn); } EXPORT_SYMBOL_GPL(vhost_poll_init); @@ -270,12 +272,13 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) } EXPORT_SYMBOL_GPL(vhost_work_queue); -static void vhost_work_dev_flush_on(struct vhost_worker *worker) +static void vhost_work_dev_flush_on(struct vhost_dev *dev, + struct vhost_worker *worker) { struct vhost_flush_struct flush; init_completion(&flush.wait_event); - vhost_work_init(&flush.work, vhost_flush_work); + vhost_work_init(dev, &flush.work, vhost_flush_work); vhost_work_queue_on(&flush.work, worker); wait_for_completion(&flush.wait_event); @@ -286,7 +289,7 @@ void vhost_work_dev_flush(struct vhost_dev *dev) int i; for (i = 0; i < dev->num_workers; i++) - vhost_work_dev_flush_on(dev->workers[i]); + vhost_work_dev_flush_on(dev, dev->workers[i]); } EXPORT_SYMBOL_GPL(vhost_work_dev_flush); @@ -306,7 +309,7 @@ EXPORT_SYMBOL_GPL(vhost_has_work); void vhost_vq_work_flush(struct vhost_virtqueue *vq) { - vhost_work_dev_flush_on(vq->worker); + vhost_work_dev_flush_on(vq->dev, vq->worker); } EXPORT_SYMBOL_GPL(vhost_vq_work_flush); @@ -573,14 +576,15 @@ static void vhost_attach_cgroups_work(struct vhost_work *work) s->ret = cgroup_attach_task_all(s->owner, current); } -static int vhost_attach_cgroups_on(struct vhost_worker *worker) +static int vhost_attach_cgroups_on(struct vhost_dev *dev, + struct vhost_worker *worker) { struct vhost_attach_cgroups_struct attach; attach.owner = current; - vhost_work_init(&attach.work, vhost_attach_cgroups_work); + vhost_work_init(dev, &attach.work, vhost_attach_cgroups_work); vhost_work_queue_on(&attach.work, worker); - vhost_work_dev_flush_on(worker); + vhost_work_dev_flush_on(dev, worker); return attach.ret; } @@ -675,7 +679,7 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev) worker->task = task; wake_up_process(task); /* avoid contributing to loadavg */ - ret = vhost_attach_cgroups_on(worker); + ret = vhost_attach_cgroups_on(dev, worker); if (ret) goto stop_worker; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 75b884ad1f17..75ad3aa5adca 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -24,6 +24,7 @@ struct vhost_work { struct llist_node node; vhost_work_fn_t fn; unsigned long flags; + struct vhost_dev *dev; }; struct vhost_worker { @@ -47,7 +48,8 @@ struct vhost_poll { struct vhost_virtqueue *vq; }; -void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn); +void vhost_work_init(struct vhost_dev *dev, struct vhost_work *work, + vhost_work_fn_t fn); void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work); bool vhost_has_work(struct vhost_dev *dev); void vhost_vq_work_flush(struct vhost_virtqueue *vq); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index f954f4d29c95..302415b6460b 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -648,7 +648,8 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) file->private_data = vsock; spin_lock_init(&vsock->send_pkt_list_lock); INIT_LIST_HEAD(&vsock->send_pkt_list); - vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); + vhost_work_init(&vsock->dev, &vsock->send_pkt_work, + vhost_transport_send_pkt_work); return 0; out: -- 2.25.1 _______________________________________________ Virtualization mailing list Virtualization@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/virtualization
next prev parent reply other threads:[~2021-05-25 18:06 UTC|newest] Thread overview: 64+ messages / expand[flat|nested] mbox.gz Atom feed top 2021-05-25 18:05 vhost: multiple worker support Mike Christie 2021-05-25 18:05 ` Mike Christie 2021-05-25 18:05 ` [PATCH 1/9] vhost: move worker thread fields to new struct Mike Christie 2021-05-25 18:05 ` Mike Christie 2021-06-03 10:16 ` Stefan Hajnoczi 2021-06-03 10:16 ` Stefan Hajnoczi 2021-05-25 18:05 ` [PATCH 2/9] vhost: move vhost worker creation to kick setup Mike Christie 2021-05-25 18:05 ` Mike Christie 2021-06-03 10:28 ` Stefan Hajnoczi 2021-06-03 10:28 ` Stefan Hajnoczi 2021-05-25 18:05 ` [PATCH 3/9] vhost: modify internal functions to take a vhost_worker Mike Christie 2021-05-25 18:05 ` Mike Christie 2021-06-03 10:45 ` Stefan Hajnoczi 2021-06-03 10:45 ` Stefan Hajnoczi 2021-05-25 18:05 ` [PATCH 4/9] vhost: allow vhost_polls to use different vhost_workers Mike Christie 2021-05-25 18:05 ` Mike Christie 2021-06-03 13:51 ` Stefan Hajnoczi 2021-06-03 13:51 ` Stefan Hajnoczi 2021-05-25 18:05 ` [PATCH 5/9] vhost-scsi: flush IO vqs then send TMF rsp Mike Christie 2021-05-25 18:05 ` Mike Christie 2021-06-03 13:54 ` Stefan Hajnoczi 2021-06-03 13:54 ` Stefan Hajnoczi 2021-05-25 18:05 ` [PATCH 6/9] vhost-scsi: make SCSI cmd completion per vq Mike Christie 2021-05-25 18:05 ` Mike Christie 2021-06-03 13:57 ` Stefan Hajnoczi 2021-06-03 13:57 ` Stefan Hajnoczi 2021-05-25 18:05 ` [PATCH 7/9] vhost: allow userspace to create workers Mike Christie 2021-05-25 18:05 ` Mike Christie 2021-06-03 14:30 ` Stefan Hajnoczi 2021-06-03 14:30 ` Stefan Hajnoczi 2021-06-05 23:53 ` michael.christie 2021-06-05 23:53 ` michael.christie 2021-06-07 15:19 ` Stefan Hajnoczi 2021-06-07 15:19 ` Stefan Hajnoczi 2021-06-09 21:03 ` Mike Christie 2021-06-09 21:03 ` Mike Christie 2021-06-10 8:06 ` Stefan Hajnoczi 2021-06-10 8:06 ` Stefan Hajnoczi 2021-06-18 2:49 ` Mike Christie 2021-06-18 2:49 ` Mike Christie 2021-06-21 13:41 ` Stefan Hajnoczi 2021-06-21 13:41 ` Stefan Hajnoczi 2021-05-25 18:05 ` Mike Christie [this message] 2021-05-25 18:05 ` [PATCH 8/9] vhost: add vhost_dev pointer to vhost_work Mike Christie 2021-06-03 14:31 ` Stefan Hajnoczi 2021-06-03 14:31 ` Stefan Hajnoczi 2021-05-25 18:06 ` [PATCH 9/9] vhost: support sharing workers across devs Mike Christie 2021-05-25 18:06 ` Mike Christie 2021-06-03 14:32 ` Stefan Hajnoczi 2021-06-03 14:32 ` Stefan Hajnoczi 2021-06-07 2:18 ` Jason Wang 2021-06-07 2:18 ` Jason Wang 2021-06-03 10:13 ` vhost: multiple worker support Stefan Hajnoczi 2021-06-03 10:13 ` Stefan Hajnoczi 2021-06-03 18:45 ` Mike Christie 2021-06-03 18:45 ` Mike Christie 2021-06-03 14:37 ` Stefan Hajnoczi 2021-06-03 14:37 ` Stefan Hajnoczi 2021-06-03 22:16 ` Mike Christie 2021-06-03 22:16 ` Mike Christie 2021-06-05 22:40 ` michael.christie 2021-06-05 22:40 ` michael.christie 2021-06-07 15:23 ` Stefan Hajnoczi 2021-06-07 15:23 ` Stefan Hajnoczi
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20210525180600.6349-9-michael.christie@oracle.com \ --to=michael.christie@oracle.com \ --cc=jasowang@redhat.com \ --cc=linux-scsi@vger.kernel.org \ --cc=mst@redhat.com \ --cc=pbonzini@redhat.com \ --cc=sgarzare@redhat.com \ --cc=stefanha@redhat.com \ --cc=target-devel@vger.kernel.org \ --cc=virtualization@lists.linux-foundation.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.