Target-devel archive on lore.kernel.org
 help / color / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: Mike Christie <michael.christie@oracle.com>,
	martin.petersen@oracle.com, linux-scsi@vger.kernel.org,
	target-devel@vger.kernel.org, mst@redhat.com,
	pbonzini@redhat.com, stefanha@redhat.com,
	virtualization@lists.linux-foundation.org
Subject: Re: [PATCH 05/11] vhost: move vq iovec allocation to dev init time
Date: Mon, 09 Nov 2020 03:41:18 +0000
Message-ID: <347657f8-7f2a-0e47-bab7-015ad4290684@redhat.com> (raw)
In-Reply-To: <1604528804-2878-6-git-send-email-michael.christie@oracle.com>


On 2020/11/5 上午6:26, Mike Christie wrote:
> The next patches allow us to create vqs on demand after vhost_dev_init
> and vhost_dev_set_owner have been called. For vhost-scsi we don't
> know the number of vqs we really want until the vring/vq setup
> operations have started up. For other devices we know the number of vqs
> at vhost_dev_init time, so for those devs we init the vq and allocate
> the needed iovecs. For vhost-scsi we will do it later when userspace has
> indicated to us that it's going to use a vq.
>
> Signed-off-by: Mike Christie <michael.christie@oracle.com>
> ---
>   drivers/vhost/vhost.c | 71 +++++++++++++++++++++++++++------------------------
>   1 file changed, 38 insertions(+), 33 deletions(-)
>
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index b35229e..a4a4450 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -383,29 +383,27 @@ static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
>   	vq->heads = NULL;
>   }
>   
> -/* Helper to allocate iovec buffers for all vqs. */
> -static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
> +static int vhost_vq_alloc_iovecs(struct vhost_dev *dev,
> +				 struct vhost_virtqueue *vq)
>   {
> -	struct vhost_virtqueue *vq;
> -	int i;
> +	vq->indirect = kmalloc_array(UIO_MAXIOV, sizeof(*vq->indirect),
> +				     GFP_KERNEL);
> +	if (!vq->indirect)
> +		return -ENOMEM;
> +
> +	if (!dev->iov_limit)
> +		return 0;


This looks like an optimization. Let's try to defer this into another patch.


> +
> +	vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log), GFP_KERNEL);
> +	vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
> +				  GFP_KERNEL);
> +	if (!vq->log || !vq->heads)
> +		goto err_nomem;
>   
> -	for (i = 0; i < dev->nvqs; ++i) {
> -		vq = dev->vqs[i];
> -		vq->indirect = kmalloc_array(UIO_MAXIOV,
> -					     sizeof(*vq->indirect),
> -					     GFP_KERNEL);
> -		vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
> -					GFP_KERNEL);
> -		vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
> -					  GFP_KERNEL);
> -		if (!vq->indirect || !vq->log || !vq->heads)
> -			goto err_nomem;
> -	}
>   	return 0;
>   
>   err_nomem:
> -	for (; i >= 0; --i)
> -		vhost_vq_free_iovecs(dev->vqs[i]);
> +	vhost_vq_free_iovecs(vq);
>   	return -ENOMEM;
>   }
>   
> @@ -458,6 +456,21 @@ static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
>   	return sizeof(*vq->desc) * num;
>   }
>   
> +static int vhost_vq_init(struct vhost_dev *dev, struct vhost_virtqueue *vq)
> +{
> +	vq->log = NULL;
> +	vq->indirect = NULL;
> +	vq->heads = NULL;
> +	vq->dev = dev;
> +	mutex_init(&vq->mutex);
> +	vhost_vq_reset(dev, vq);
> +
> +	if (vq->handle_kick)
> +		vhost_poll_init(&vq->poll, vq->handle_kick, EPOLLIN, dev);
> +
> +	return vhost_vq_alloc_iovecs(dev, vq);
> +}


If it's possible, I would do a patch to introduce vhost_vq_init() and 
then add vhost_vq_alloc_iovecs() on top.

Thanks


> +
>   int vhost_dev_init(struct vhost_dev *dev,
>   		   struct vhost_virtqueue **vqs, int nvqs,
>   		   int iov_limit, int weight, int byte_weight,
> @@ -465,7 +478,6 @@ int vhost_dev_init(struct vhost_dev *dev,
>   		   int (*msg_handler)(struct vhost_dev *dev,
>   				      struct vhost_iotlb_msg *msg))
>   {
> -	struct vhost_virtqueue *vq;
>   	int i;
>   
>   	dev->vqs = vqs;
> @@ -489,19 +501,16 @@ int vhost_dev_init(struct vhost_dev *dev,
>   
>   
>   	for (i = 0; i < dev->nvqs; ++i) {
> -		vq = dev->vqs[i];
> -		vq->log = NULL;
> -		vq->indirect = NULL;
> -		vq->heads = NULL;
> -		vq->dev = dev;
> -		mutex_init(&vq->mutex);
> -		vhost_vq_reset(dev, vq);
> -		if (vq->handle_kick)
> -			vhost_poll_init(&vq->poll, vq->handle_kick,
> -					EPOLLIN, dev);
> +		if (vhost_vq_init(dev, dev->vqs[i]))
> +			goto err_vq_init;
>   	}
>   
>   	return 0;
> +
> +err_vq_init:
> +	for (--i; i >= 0; --i)
> +		vhost_vq_free_iovecs(dev->vqs[i]);
> +	return -ENOMEM;
>   }
>   EXPORT_SYMBOL_GPL(vhost_dev_init);
>   
> @@ -606,10 +615,6 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
>   			goto err_cgroup;
>   	}
>   
> -	err = vhost_dev_alloc_iovecs(dev);
> -	if (err)
> -		goto err_cgroup;
> -
>   	return 0;
>   err_cgroup:
>   	if (dev->worker) {

  reply index

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-04 22:26 [PATCH 00/11 V4] vhost: vhost-scsi bug fixes Mike Christie
2020-11-04 22:26 ` [PATCH 01/11] vhost scsi: add lun parser helper Mike Christie
2020-11-04 22:26 ` [PATCH 02/11] vhost: remove work arg from vhost_work_flush Mike Christie
2020-11-04 22:26 ` [PATCH 03/11] vhost net: use goto error handling in open Mike Christie
2020-11-04 22:26 ` [PATCH 04/11] vhost: prep vhost_dev_init users to handle failures Mike Christie
2020-11-04 22:26 ` [PATCH 05/11] vhost: move vq iovec allocation to dev init time Mike Christie
2020-11-09  3:41   ` Jason Wang [this message]
2020-11-04 22:26 ` [PATCH 06/11] vhost: support delayed vq creation Mike Christie
2020-11-09  4:01   ` Jason Wang
2020-11-09 18:41     ` Mike Christie
2020-11-09 20:30       ` Mike Christie
2020-11-09 22:32         ` Michael S. Tsirkin
2020-11-10  2:50         ` Jason Wang
2020-11-10  2:44       ` Jason Wang
2020-11-04 22:26 ` [PATCH 07/11] vhost scsi: support delayed IO " Mike Christie
2020-11-04 22:26 ` [PATCH 08/11] vhost scsi: alloc cmds per vq instead of session Mike Christie
2020-11-04 22:26 ` [PATCH 09/11] vhost scsi: fix cmd completion race Mike Christie
2020-11-04 22:26 ` [PATCH 10/11] vhost scsi: Add support for LUN resets Mike Christie
2020-11-04 22:26 ` [PATCH 11/11] vhost scsi: remove extra flushes Mike Christie

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=347657f8-7f2a-0e47-bab7-015ad4290684@redhat.com \
    --to=jasowang@redhat.com \
    --cc=linux-scsi@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    --cc=michael.christie@oracle.com \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=stefanha@redhat.com \
    --cc=target-devel@vger.kernel.org \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Target-devel archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/target-devel/0 target-devel/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 target-devel target-devel/ https://lore.kernel.org/target-devel \
		target-devel@vger.kernel.org
	public-inbox-index target-devel

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.kernel.vger.target-devel


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git