linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Yongji Xie <xieyongji@bytedance.com>
To: Jason Wang <jasowang@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>,
	virtualization <virtualization@lists.linux-foundation.org>,
	linux-kernel <linux-kernel@vger.kernel.org>,
	kvm <kvm@vger.kernel.org>,
	netdev@vger.kernel.org, Stefan Hajnoczi <stefanha@redhat.com>
Subject: Re: [PATCH 1/2] vdpa: support per virtqueue max queue size
Date: Wed, 7 Jul 2021 12:04:26 +0800	[thread overview]
Message-ID: <CACycT3tMd750PQ0mgqCjHnxM4RmMcx2+Eo=2RBs2E2W3qPJang@mail.gmail.com> (raw)
In-Reply-To: <20210705071910.31965-1-jasowang@redhat.com>

On Mon, Jul 5, 2021 at 3:19 PM Jason Wang <jasowang@redhat.com> wrote:
>
> Virtio spec allows the device to specify the per virtqueue max queue
> size. vDPA needs to adapt to this flexibility. E.g Qemu advertise a
> small control virtqueue for virtio-net.
>
> So this patch adds a index parameter to get_vq_num_max bus operations
> for the device to report its per virtqueue max queue size.
>
> Both VHOST_VDPA_GET_VRING_NUM and VDPA_ATTR_DEV_MAX_VQ_SIZE assume a
> global maximum size. So we iterate all the virtqueues to return the
> minimal size in this case. Actually, the VHOST_VDPA_GET_VRING_NUM is
> not a must for the userspace. Userspace may choose to check the
> VHOST_SET_VRING_NUM for proving or validating the maximum virtqueue
> size. Anyway, we can invent a per vq version of
> VHOST_VDPA_GET_VRING_NUM in the future if it's necessary.
>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
>  drivers/vdpa/ifcvf/ifcvf_main.c   |  2 +-
>  drivers/vdpa/mlx5/net/mlx5_vnet.c |  2 +-
>  drivers/vdpa/vdpa.c               | 22 +++++++++++++++++++++-
>  drivers/vdpa/vdpa_sim/vdpa_sim.c  |  2 +-
>  drivers/vdpa/virtio_pci/vp_vdpa.c |  2 +-
>  drivers/vhost/vdpa.c              |  9 ++++++---
>  drivers/virtio/virtio_vdpa.c      |  2 +-
>  include/linux/vdpa.h              |  5 ++++-
>  8 files changed, 36 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
> index ab0ab5cf0f6e..646b340db2af 100644
> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
> @@ -254,7 +254,7 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
>         ifcvf_set_status(vf, status);
>  }
>
> -static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
> +static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev, u16 qid)
>  {
>         return IFCVF_QUEUE_MAX;
>  }
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index dda5dc6f7737..afd6114d07b0 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -1584,7 +1584,7 @@ static void mlx5_vdpa_set_config_cb(struct vdpa_device *vdev, struct vdpa_callba
>  }
>
>  #define MLX5_VDPA_MAX_VQ_ENTRIES 256
> -static u16 mlx5_vdpa_get_vq_num_max(struct vdpa_device *vdev)
> +static u16 mlx5_vdpa_get_vq_num_max(struct vdpa_device *vdev, u16 idx)
>  {
>         return MLX5_VDPA_MAX_VQ_ENTRIES;
>  }
> diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
> index bb3f1d1f0422..d77d59811389 100644
> --- a/drivers/vdpa/vdpa.c
> +++ b/drivers/vdpa/vdpa.c
> @@ -239,6 +239,26 @@ void vdpa_unregister_driver(struct vdpa_driver *drv)
>  }
>  EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
>
> +/**
> + * vdpa_get_vq_num_max - get the maximum virtqueue size
> + * @vdev: vdpa device
> + */
> +u16 vdpa_get_vq_num_max(struct vdpa_device *vdev)
> +{
> +       const struct vdpa_config_ops *ops = vdev->config;
> +       u16 s, size = ops->get_vq_num_max(vdev, 0);
> +       int i;
> +
> +       for (i = 1; i < vdev->nvqs; i++) {
> +               s = ops->get_vq_num_max(vdev, i);
> +               if (s && s < size)
> +                       size = s;
> +       }
> +
> +       return size;
> +}
> +EXPORT_SYMBOL_GPL(vdpa_get_vq_num_max);
> +
>  /**
>   * vdpa_mgmtdev_register - register a vdpa management device
>   *
> @@ -502,7 +522,7 @@ vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq
>
>         device_id = vdev->config->get_device_id(vdev);
>         vendor_id = vdev->config->get_vendor_id(vdev);
> -       max_vq_size = vdev->config->get_vq_num_max(vdev);
> +       max_vq_size = vdpa_get_vq_num_max(vdev);
>
>         err = -EMSGSIZE;
>         if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
> diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
> index 98f793bc9376..49e29056f164 100644
> --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
> +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
> @@ -422,7 +422,7 @@ static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
>         /* We don't support config interrupt */
>  }
>
> -static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
> +static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa, u16 idx)
>  {
>         return VDPASIM_QUEUE_MAX;
>  }
> diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
> index c76ebb531212..2926641fb586 100644
> --- a/drivers/vdpa/virtio_pci/vp_vdpa.c
> +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
> @@ -195,7 +195,7 @@ static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
>                 vp_vdpa_free_irq(vp_vdpa);
>  }
>
> -static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
> +static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa, u16 qid)
>  {
>         return VP_VDPA_QUEUE_MAX;
>  }
> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> index fb41db3da611..c9ec395b8c42 100644
> --- a/drivers/vhost/vdpa.c
> +++ b/drivers/vhost/vdpa.c
> @@ -289,11 +289,14 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
>
>  static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
>  {
> -       struct vdpa_device *vdpa = v->vdpa;
> -       const struct vdpa_config_ops *ops = vdpa->config;
>         u16 num;
>
> -       num = ops->get_vq_num_max(vdpa);
> +       /*
> +        * VHOST_VDPA_GET_VRING_NUM asssumes a global max virtqueue

s/asssumes/assumes. Other looks good to me.

Thanks,
Yongji

  parent reply	other threads:[~2021-07-07  4:04 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-05  7:19 [PATCH 1/2] vdpa: support per virtqueue max queue size Jason Wang
2021-07-05  7:19 ` [PATCH 2/2] vdpa: vp_vdpa: don't use hard-coded maximum virtqueue size Jason Wang
2021-07-05  7:26   ` Michael S. Tsirkin
2021-07-05  7:29     ` Jason Wang
2021-07-05 17:59       ` Michael S. Tsirkin
2021-07-06  2:30         ` Jason Wang
2021-07-06 13:14 ` [PATCH 1/2] vdpa: support per virtqueue max queue size Stefan Hajnoczi
2021-07-07  4:04 ` Yongji Xie [this message]
2021-07-07  5:38   ` Jason Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CACycT3tMd750PQ0mgqCjHnxM4RmMcx2+Eo=2RBs2E2W3qPJang@mail.gmail.com' \
    --to=xieyongji@bytedance.com \
    --cc=jasowang@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=stefanha@redhat.com \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).