All of lore.kernel.org
 help / color / mirror / Atom feed
* Re: [PATCH 1/2] vdpa: Add support for querying statistics
       [not found] ` <20211118055741.239639-2-elic@nvidia.com>
@ 2021-11-19  2:41   ` Jason Wang
  2021-11-19  3:08     ` Parav Pandit via Virtualization
  0 siblings, 1 reply; 10+ messages in thread
From: Jason Wang @ 2021-11-19  2:41 UTC (permalink / raw)
  To: Eli Cohen; +Cc: Laurent Vivier, mst, virtualization, eperezma

On Thu, Nov 18, 2021 at 1:58 PM Eli Cohen <elic@nvidia.com> wrote:
>
> Add support for querying virtqueue statistics. Supported statistics are:
>
> Received_desc - number of descriptors received for the virtqueue
> completed_desc - number of descriptors completed for the virtqueue
>
> A new callback was added to vdpa_config_ops which provides the means for
> the vdpa driver to return statistics results.
>
> The interface allows for reading all the supported virtqueues, including
> the control virtqueue if it exists, by returning the next queue index to
> query.
>
> Examples:
> 1. Read statisitics for the virtqueue at index 1
> $ vdpa dev stats show vdpa-a index 1
> vdpa-a:
> index 1 tx received_desc 21 completed_desc 21
>
> 2. Read statisitics for all the virtqueues
> vdpa dev stats show vdpa-a
> vdpa-a:
> index 0 rx received_desc 256 completed_desc 12
> index 1 tx received_desc 21 completed_desc 21
> index 2 ctrl received_desc 0 completed_desc 0

Adding Adrian and Laurent.

It's quite useful but I think it's vendor specific statistics.  I
wonder if it's better to use "vendor" prefix in the protocol, then we
use this instead:

vdpa dev vendor-stats show vdpa-a

Or if we want to make it standard is exposing virtio index better?

qid 0 last_avail_idx X avail_idx Y last_used_idx M used_idx N

Thanks

>
> Signed-off-by: Eli Cohen <elic@nvidia.com>
> ---
>  drivers/vdpa/vdpa.c       | 141 ++++++++++++++++++++++++++++++++++++++
>  include/linux/vdpa.h      |  10 +++
>  include/uapi/linux/vdpa.h |   9 +++
>  3 files changed, 160 insertions(+)
>
> diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
> index 7332a74a4b00..54bcb140af74 100644
> --- a/drivers/vdpa/vdpa.c
> +++ b/drivers/vdpa/vdpa.c
> @@ -781,6 +781,87 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
>         return err;
>  }
>
> +static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg, u16 *index)
> +{
> +       struct vdpa_vq_stats stats;
> +       u16 idx = *index;
> +       int err;
> +
> +       err = vdev->config->get_vq_stats(vdev, index, &stats);
> +       if (err)
> +               return err;
> +
> +       if (nla_put_u16(msg, VDPA_ATTR_DEV_QUEUE_INDEX, idx))
> +               return -EMSGSIZE;
> +
> +       if (nla_put_u16(msg, VDPA_ATTR_DEV_QUEUE_CTRL, !!stats.ctrl_vq))
> +               return -EMSGSIZE;
> +
> +       if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_RECEIVED_DESC, stats.received_desc, VDPA_ATTR_PAD))
> +               return -EMSGSIZE;
> +
> +       if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_COMPLETED_DESC, stats.completed_desc,
> +                             VDPA_ATTR_PAD))
> +               return -EMSGSIZE;
> +
> +       return 0;
> +}
> +
> +static int vdpa_dev_net_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg, u16 index)
> +{
> +       int err;
> +
> +       if (!vdev->config->get_vq_stats)
> +               return -EOPNOTSUPP;
> +
> +       if (index != (u16)-1) {
> +               err = vdpa_fill_stats_rec(vdev, msg, &index);
> +               if (err)
> +                       return err;
> +       } else {
> +               index = 0;
> +               do {
> +                       err = vdpa_fill_stats_rec(vdev, msg, &index);
> +               } while (index != VDPA_INVAL_QUEUE_INDEX);
> +       }
> +
> +       return 0;
> +}
> +
> +static int vdpa_dev_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
> +                              u32 seq, int flags, u16 index)
> +{
> +       u32 device_id;
> +       void *hdr;
> +       int err;
> +
> +       hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
> +                         VDPA_CMD_DEV_STATS_GET);
> +       if (!hdr)
> +               return -EMSGSIZE;
> +
> +       if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
> +               err = -EMSGSIZE;
> +               goto undo_msg;
> +       }
> +
> +       device_id = vdev->config->get_device_id(vdev);
> +       if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
> +               err = -EMSGSIZE;
> +               goto undo_msg;
> +       }
> +
> +       err = vdpa_dev_net_stats_fill(vdev, msg, index);
> +
> +       genlmsg_end(msg, hdr);
> +
> +       return err;
> +
> +undo_msg:
> +       genlmsg_cancel(msg, hdr);
> +       return err;
> +}
> +
>  static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
>  {
>         struct vdpa_device *vdev;
> @@ -862,6 +943,59 @@ vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *
>         return msg->len;
>  }
>
> +static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
> +                                         struct genl_info *info)
> +{
> +       struct vdpa_device *vdev;
> +       struct sk_buff *msg;
> +       const char *devname;
> +       struct device *dev;
> +       u16 index = -1;
> +       int err;
> +
> +       if (!info->attrs[VDPA_ATTR_DEV_NAME])
> +               return -EINVAL;
> +
> +       if (info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
> +               index = nla_get_u16(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
> +
> +       devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
> +       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
> +       if (!msg)
> +               return -ENOMEM;
> +
> +       mutex_lock(&vdpa_dev_mutex);
> +       dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
> +       if (!dev) {
> +               NL_SET_ERR_MSG_MOD(info->extack, "device not found");
> +               err = -ENODEV;
> +               goto dev_err;
> +       }
> +       vdev = container_of(dev, struct vdpa_device, dev);
> +       if (!vdev->mdev) {
> +               NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
> +               err = -EINVAL;
> +               goto mdev_err;
> +       }
> +       err = vdpa_dev_stats_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, index);
> +       if (!err)
> +               err = genlmsg_reply(msg, info);
> +
> +       put_device(dev);
> +       mutex_unlock(&vdpa_dev_mutex);
> +
> +       if (err)
> +               nlmsg_free(msg);
> +
> +       return err;
> +
> +mdev_err:
> +       put_device(dev);
> +dev_err:
> +       mutex_unlock(&vdpa_dev_mutex);
> +       return err;
> +}
> +
>  static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
>         [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
>         [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
> @@ -869,6 +1003,7 @@ static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
>         [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
>         /* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
>         [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
> +       [VDPA_ATTR_DEV_QUEUE_INDEX] = NLA_POLICY_RANGE(NLA_U16, 0, 32767),
>  };
>
>  static const struct genl_ops vdpa_nl_ops[] = {
> @@ -902,6 +1037,12 @@ static const struct genl_ops vdpa_nl_ops[] = {
>                 .doit = vdpa_nl_cmd_dev_config_get_doit,
>                 .dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
>         },
> +       {
> +               .cmd = VDPA_CMD_DEV_STATS_GET,
> +               .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
> +               .doit = vdpa_nl_cmd_dev_stats_get_doit,
> +               .flags = GENL_ADMIN_PERM,
> +       },
>  };
>
>  static struct genl_family vdpa_nl_family __ro_after_init = {
> diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
> index c3011ccda430..1df19a87a9cb 100644
> --- a/include/linux/vdpa.h
> +++ b/include/linux/vdpa.h
> @@ -58,6 +58,12 @@ struct vdpa_vq_state {
>         };
>  };
>
> +struct vdpa_vq_stats {
> +       u64 received_desc;
> +       u64 completed_desc;
> +       bool ctrl_vq;
> +};
> +
>  struct vdpa_mgmt_dev;
>
>  /**
> @@ -269,6 +275,8 @@ struct vdpa_config_ops {
>                             const struct vdpa_vq_state *state);
>         int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
>                             struct vdpa_vq_state *state);
> +       int (*get_vq_stats)(struct vdpa_device *vdev, u16 *idx,
> +                           struct vdpa_vq_stats *stats);
>         struct vdpa_notification_area
>         (*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
>         /* vq irq is not expected to be changed once DRIVER_OK is set */
> @@ -443,4 +451,6 @@ struct vdpa_mgmt_dev {
>  int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev);
>  void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev);
>
> +#define VDPA_INVAL_QUEUE_INDEX 0xffff
> +
>  #endif /* _LINUX_VDPA_H */
> diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
> index a252f06f9dfd..be7deeae139a 100644
> --- a/include/uapi/linux/vdpa.h
> +++ b/include/uapi/linux/vdpa.h
> @@ -18,11 +18,15 @@ enum vdpa_command {
>         VDPA_CMD_DEV_DEL,
>         VDPA_CMD_DEV_GET,               /* can dump */
>         VDPA_CMD_DEV_CONFIG_GET,        /* can dump */
> +       VDPA_CMD_DEV_STATS_GET,
>  };
>
>  enum vdpa_attr {
>         VDPA_ATTR_UNSPEC,
>
> +       /* Pad attribute for 64b alignment */
> +       VDPA_ATTR_PAD = VDPA_ATTR_UNSPEC,
> +
>         /* bus name (optional) + dev name together make the parent device handle */
>         VDPA_ATTR_MGMTDEV_BUS_NAME,             /* string */
>         VDPA_ATTR_MGMTDEV_DEV_NAME,             /* string */
> @@ -40,6 +44,11 @@ enum vdpa_attr {
>         VDPA_ATTR_DEV_NET_CFG_MAX_VQP,          /* u16 */
>         VDPA_ATTR_DEV_NET_CFG_MTU,              /* u16 */
>
> +       VDPA_ATTR_DEV_QUEUE_INDEX,              /* u16 */
> +       VDPA_ATTR_DEV_QUEUE_CTRL,               /* u16 */
> +       VDPA_ATTR_DEV_RECEIVED_DESC,            /* u64 */
> +       VDPA_ATTR_DEV_COMPLETED_DESC,           /* u64 */
> +
>         /* new attributes must be added above here */
>         VDPA_ATTR_MAX,
>  };
> --
> 2.33.1
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH 1/2] vdpa: Add support for querying statistics
  2021-11-19  2:41   ` [PATCH 1/2] vdpa: Add support for querying statistics Jason Wang
@ 2021-11-19  3:08     ` Parav Pandit via Virtualization
  2021-11-22  2:30       ` Jason Wang
  0 siblings, 1 reply; 10+ messages in thread
From: Parav Pandit via Virtualization @ 2021-11-19  3:08 UTC (permalink / raw)
  To: Jason Wang, Eli Cohen; +Cc: Laurent Vivier, eperezma, virtualization, mst



> From: Jason Wang <jasowang@redhat.com>
> Sent: Friday, November 19, 2021 8:12 AM
> 
> On Thu, Nov 18, 2021 at 1:58 PM Eli Cohen <elic@nvidia.com> wrote:
> >
> > Add support for querying virtqueue statistics. Supported statistics are:
> >
> > Received_desc - number of descriptors received for the virtqueue
> > completed_desc - number of descriptors completed for the virtqueue
> >
> > A new callback was added to vdpa_config_ops which provides the means
> > for the vdpa driver to return statistics results.
> >
> > The interface allows for reading all the supported virtqueues,
> > including the control virtqueue if it exists, by returning the next
> > queue index to query.
> >
> > Examples:
> > 1. Read statisitics for the virtqueue at index 1 $ vdpa dev stats show
> > vdpa-a index 1
> > vdpa-a:
> > index 1 tx received_desc 21 completed_desc 21
> >
> > 2. Read statisitics for all the virtqueues vdpa dev stats show vdpa-a
> > vdpa-a:
> > index 0 rx received_desc 256 completed_desc 12 index 1 tx
> > received_desc 21 completed_desc 21 index 2 ctrl received_desc 0
> > completed_desc 0
> 
> Adding Adrian and Laurent.
> 
> It's quite useful but I think it's vendor specific statistics.  
These are vdpa device specific of Linux.
And are very generic of the VQ for all device types.

> I wonder if it's better
> to use "vendor" prefix in the protocol, then we use this instead:
> 
> vdpa dev vendor-stats show vdpa-a
> 
May be. Lets evaluate if stats of this patch are generic enough or not.
 
> Or if we want to make it standard is exposing virtio index better?
> 
> qid 0 last_avail_idx X avail_idx Y last_used_idx M used_idx N
> 
I did consider this option a while back. Shows indices are equally useful.
I think we should show that as vq info, along with other VQ attributes (addr, len).
$ vdpa dev show vq

But showing indices are not less statistics and more current state of the queue.
For example roll over of the indices won't cover absolute number of descriptors processed for the queue.
And even if we make them u64 (not good), non_developer end user needs to keep using side calculator to count the delta.

So I think useful q indices belong to q info.

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] vdpa: Add support for querying statistics
  2021-11-19  3:08     ` Parav Pandit via Virtualization
@ 2021-11-22  2:30       ` Jason Wang
       [not found]         ` <20211122075615.GB74340@mtl-vdi-166.wap.labs.mlnx>
  2021-11-22 10:12         ` Parav Pandit via Virtualization
  0 siblings, 2 replies; 10+ messages in thread
From: Jason Wang @ 2021-11-22  2:30 UTC (permalink / raw)
  To: Parav Pandit; +Cc: Laurent Vivier, mst, virtualization, eperezma, Eli Cohen

On Fri, Nov 19, 2021 at 11:09 AM Parav Pandit <parav@nvidia.com> wrote:
>
>
>
> > From: Jason Wang <jasowang@redhat.com>
> > Sent: Friday, November 19, 2021 8:12 AM
> >
> > On Thu, Nov 18, 2021 at 1:58 PM Eli Cohen <elic@nvidia.com> wrote:
> > >
> > > Add support for querying virtqueue statistics. Supported statistics are:
> > >
> > > Received_desc - number of descriptors received for the virtqueue
> > > completed_desc - number of descriptors completed for the virtqueue
> > >
> > > A new callback was added to vdpa_config_ops which provides the means
> > > for the vdpa driver to return statistics results.
> > >
> > > The interface allows for reading all the supported virtqueues,
> > > including the control virtqueue if it exists, by returning the next
> > > queue index to query.
> > >
> > > Examples:
> > > 1. Read statisitics for the virtqueue at index 1 $ vdpa dev stats show
> > > vdpa-a index 1
> > > vdpa-a:
> > > index 1 tx received_desc 21 completed_desc 21
> > >
> > > 2. Read statisitics for all the virtqueues vdpa dev stats show vdpa-a
> > > vdpa-a:
> > > index 0 rx received_desc 256 completed_desc 12 index 1 tx
> > > received_desc 21 completed_desc 21 index 2 ctrl received_desc 0
> > > completed_desc 0
> >
> > Adding Adrian and Laurent.
> >
> > It's quite useful but I think it's vendor specific statistics.
> These are vdpa device specific of Linux.
> And are very generic of the VQ for all device types.

The question is what happens if the parent doesn't implement those statistics.

>
> > I wonder if it's better
> > to use "vendor" prefix in the protocol, then we use this instead:
> >
> > vdpa dev vendor-stats show vdpa-a
> >
> May be. Lets evaluate if stats of this patch are generic enough or not.
>
> > Or if we want to make it standard is exposing virtio index better?
> >
> > qid 0 last_avail_idx X avail_idx Y last_used_idx M used_idx N
> >
> I did consider this option a while back. Shows indices are equally useful.
> I think we should show that as vq info, along with other VQ attributes (addr, len).

That may work but it looks to me the receiced_desc/completed_desc is
also per vq.

Another question is that is it more useful to use buffers instead of
descriptors? E.g how indirect descriptors are counted.

> $ vdpa dev show vq
>
> But showing indices are not less statistics and more current state of the queue.
> For example roll over of the indices won't cover absolute number of descriptors processed for the queue.
> And even if we make them u64 (not good), non_developer end user needs to keep using side calculator to count the delta.

How about exposing those raw indices via the protocol and letting the
vdpa tool calculate for us?

Thanks

>
> So I think useful q indices belong to q info.
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] vdpa: Add support for querying statistics
       [not found]         ` <20211122075615.GB74340@mtl-vdi-166.wap.labs.mlnx>
@ 2021-11-22  8:07           ` Jason Wang
       [not found]             ` <20211122090755.GA75489@mtl-vdi-166.wap.labs.mlnx>
  0 siblings, 1 reply; 10+ messages in thread
From: Jason Wang @ 2021-11-22  8:07 UTC (permalink / raw)
  To: Eli Cohen; +Cc: Laurent Vivier, mst, virtualization, eperezma

On Mon, Nov 22, 2021 at 3:56 PM Eli Cohen <elic@nvidia.com> wrote:
>
> On Mon, Nov 22, 2021 at 10:30:12AM +0800, Jason Wang wrote:
> > On Fri, Nov 19, 2021 at 11:09 AM Parav Pandit <parav@nvidia.com> wrote:
> > >
> > >
> > >
> > > > From: Jason Wang <jasowang@redhat.com>
> > > > Sent: Friday, November 19, 2021 8:12 AM
> > > >
> > > > On Thu, Nov 18, 2021 at 1:58 PM Eli Cohen <elic@nvidia.com> wrote:
> > > > >
> > > > > Add support for querying virtqueue statistics. Supported statistics are:
> > > > >
> > > > > Received_desc - number of descriptors received for the virtqueue
> > > > > completed_desc - number of descriptors completed for the virtqueue
> > > > >
> > > > > A new callback was added to vdpa_config_ops which provides the means
> > > > > for the vdpa driver to return statistics results.
> > > > >
> > > > > The interface allows for reading all the supported virtqueues,
> > > > > including the control virtqueue if it exists, by returning the next
> > > > > queue index to query.
> > > > >
> > > > > Examples:
> > > > > 1. Read statisitics for the virtqueue at index 1 $ vdpa dev stats show
> > > > > vdpa-a index 1
> > > > > vdpa-a:
> > > > > index 1 tx received_desc 21 completed_desc 21
> > > > >
> > > > > 2. Read statisitics for all the virtqueues vdpa dev stats show vdpa-a
> > > > > vdpa-a:
> > > > > index 0 rx received_desc 256 completed_desc 12 index 1 tx
> > > > > received_desc 21 completed_desc 21 index 2 ctrl received_desc 0
> > > > > completed_desc 0
> > > >
> > > > Adding Adrian and Laurent.
> > > >
> > > > It's quite useful but I think it's vendor specific statistics.
> > > These are vdpa device specific of Linux.
> > > And are very generic of the VQ for all device types.
> >
> > The question is what happens if the parent doesn't implement those statistics.
> >
>
> Are you suggesting that some parents may support reporting a subeset of
> the fields and we should maybe indicate what is actually reported?

It's an open question. E.g do we want a stable API for the those
statistics counter? If yes, it's better to not to let the parents to
report a subset but then it forces the exact same counters to be
supported by other vendors. If not, it should be fine. For any case, I
think it's simpler to start with "vendor-stats" and we can switch it
to a standard way if it was agreed by every vendor.

>
> > >
> > > > I wonder if it's better
> > > > to use "vendor" prefix in the protocol, then we use this instead:
> > > >
> > > > vdpa dev vendor-stats show vdpa-a
> > > >
> > > May be. Lets evaluate if stats of this patch are generic enough or not.
> > >
> > > > Or if we want to make it standard is exposing virtio index better?
> > > >
> > > > qid 0 last_avail_idx X avail_idx Y last_used_idx M used_idx N
> > > >
> > > I did consider this option a while back. Shows indices are equally useful.
> > > I think we should show that as vq info, along with other VQ attributes (addr, len).
> >
> > That may work but it looks to me the receiced_desc/completed_desc is
> > also per vq.
> >
> > Another question is that is it more useful to use buffers instead of
> > descriptors? E.g how indirect descriptors are counted.
> >
>
> I think it's descriptors so indirect descriptors are counted once but I
> need to verify that.
>
> > > $ vdpa dev show vq
> > >
> > > But showing indices are not less statistics and more current state of the queue.
> > > For example roll over of the indices won't cover absolute number of descriptors processed for the queue.
> > > And even if we make them u64 (not good), non_developer end user needs to keep using side calculator to count the delta.
> >
> > How about exposing those raw indices via the protocol and letting the
> > vdpa tool calculate for us?
> >
>
> counters are 16 bit per the virtio spec so I don't know how you could
> handle rolover without losing information.

So at most 1 rollover I guess. So it's not hard by comparing the
indices. E.g the if last_avil_idx(device avail idx) > avail_idx, we
know there's one?

Thanks

>
> > Thanks
> >
> > >
> > > So I think useful q indices belong to q info.
> > >
> >
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] vdpa: Add support for querying statistics
       [not found]             ` <20211122090755.GA75489@mtl-vdi-166.wap.labs.mlnx>
@ 2021-11-22  9:31               ` Jason Wang
  2021-11-22 10:15                 ` Parav Pandit via Virtualization
  0 siblings, 1 reply; 10+ messages in thread
From: Jason Wang @ 2021-11-22  9:31 UTC (permalink / raw)
  To: Eli Cohen; +Cc: Laurent Vivier, mst, virtualization, eperezma

On Mon, Nov 22, 2021 at 5:08 PM Eli Cohen <elic@nvidia.com> wrote:
>
> On Mon, Nov 22, 2021 at 04:07:24PM +0800, Jason Wang wrote:
> > On Mon, Nov 22, 2021 at 3:56 PM Eli Cohen <elic@nvidia.com> wrote:
> > >
> > > On Mon, Nov 22, 2021 at 10:30:12AM +0800, Jason Wang wrote:
> > > > On Fri, Nov 19, 2021 at 11:09 AM Parav Pandit <parav@nvidia.com> wrote:
> > > > >
> > > > >
> > > > >
> > > > > > From: Jason Wang <jasowang@redhat.com>
> > > > > > Sent: Friday, November 19, 2021 8:12 AM
> > > > > >
> > > > > > On Thu, Nov 18, 2021 at 1:58 PM Eli Cohen <elic@nvidia.com> wrote:
> > > > > > >
> > > > > > > Add support for querying virtqueue statistics. Supported statistics are:
> > > > > > >
> > > > > > > Received_desc - number of descriptors received for the virtqueue
> > > > > > > completed_desc - number of descriptors completed for the virtqueue
> > > > > > >
> > > > > > > A new callback was added to vdpa_config_ops which provides the means
> > > > > > > for the vdpa driver to return statistics results.
> > > > > > >
> > > > > > > The interface allows for reading all the supported virtqueues,
> > > > > > > including the control virtqueue if it exists, by returning the next
> > > > > > > queue index to query.
> > > > > > >
> > > > > > > Examples:
> > > > > > > 1. Read statisitics for the virtqueue at index 1 $ vdpa dev stats show
> > > > > > > vdpa-a index 1
> > > > > > > vdpa-a:
> > > > > > > index 1 tx received_desc 21 completed_desc 21
> > > > > > >
> > > > > > > 2. Read statisitics for all the virtqueues vdpa dev stats show vdpa-a
> > > > > > > vdpa-a:
> > > > > > > index 0 rx received_desc 256 completed_desc 12 index 1 tx
> > > > > > > received_desc 21 completed_desc 21 index 2 ctrl received_desc 0
> > > > > > > completed_desc 0
> > > > > >
> > > > > > Adding Adrian and Laurent.
> > > > > >
> > > > > > It's quite useful but I think it's vendor specific statistics.
> > > > > These are vdpa device specific of Linux.
> > > > > And are very generic of the VQ for all device types.
> > > >
> > > > The question is what happens if the parent doesn't implement those statistics.
> > > >
> > >
> > > Are you suggesting that some parents may support reporting a subeset of
> > > the fields and we should maybe indicate what is actually reported?
> >
> > It's an open question. E.g do we want a stable API for the those
> > statistics counter? If yes, it's better to not to let the parents to
> > report a subset but then it forces the exact same counters to be
> > supported by other vendors. If not, it should be fine. For any case, I
> > think it's simpler to start with "vendor-stats" and we can switch it
> > to a standard way if it was agreed by every vendor.
> >
>
> received and completed descriptors are very basic and I assume any
> vendor would support those.

At least it was not supported by virtio.

> If we go with vendor stats, how can we
> communicate the information to userspace? Currenlty we use netlink
> attributes defined to pass this information.

It can be done exactly as what have been done in the patch, we can
document it as vendor stats.

>
> > >
> > > > >
> > > > > > I wonder if it's better
> > > > > > to use "vendor" prefix in the protocol, then we use this instead:
> > > > > >
> > > > > > vdpa dev vendor-stats show vdpa-a
> > > > > >
> > > > > May be. Lets evaluate if stats of this patch are generic enough or not.
> > > > >
> > > > > > Or if we want to make it standard is exposing virtio index better?
> > > > > >
> > > > > > qid 0 last_avail_idx X avail_idx Y last_used_idx M used_idx N
> > > > > >
> > > > > I did consider this option a while back. Shows indices are equally useful.
> > > > > I think we should show that as vq info, along with other VQ attributes (addr, len).
> > > >
> > > > That may work but it looks to me the receiced_desc/completed_desc is
> > > > also per vq.
> > > >
> > > > Another question is that is it more useful to use buffers instead of
> > > > descriptors? E.g how indirect descriptors are counted.
> > > >
> > >
> > > I think it's descriptors so indirect descriptors are counted once but I
> > > need to verify that.
> > >
> > > > > $ vdpa dev show vq
> > > > >
> > > > > But showing indices are not less statistics and more current state of the queue.
> > > > > For example roll over of the indices won't cover absolute number of descriptors processed for the queue.
> > > > > And even if we make them u64 (not good), non_developer end user needs to keep using side calculator to count the delta.
> > > >
> > > > How about exposing those raw indices via the protocol and letting the
> > > > vdpa tool calculate for us?
> > > >
> > >
> > > counters are 16 bit per the virtio spec so I don't know how you could
> > > handle rolover without losing information.
> >
> > So at most 1 rollover I guess. So it's not hard by comparing the
> > indices. E.g the if last_avil_idx(device avail idx) > avail_idx, we
> > know there's one?
> >
>
> I am not sure I am following you. You do query twice. You can never know
> how many rounds the virtqueue has gone through so the information is
> useless.

Ok, I think I get you. So I wonder if it's more useful to use device
specific counters. For networking, it could be packets send/received
etc.

Thanks

>
> > Thanks
> >
> > >
> > > > Thanks
> > > >
> > > > >
> > > > > So I think useful q indices belong to q info.
> > > > >
> > > >
> > >
> >
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH 1/2] vdpa: Add support for querying statistics
  2021-11-22  2:30       ` Jason Wang
       [not found]         ` <20211122075615.GB74340@mtl-vdi-166.wap.labs.mlnx>
@ 2021-11-22 10:12         ` Parav Pandit via Virtualization
  1 sibling, 0 replies; 10+ messages in thread
From: Parav Pandit via Virtualization @ 2021-11-22 10:12 UTC (permalink / raw)
  To: Jason Wang; +Cc: Laurent Vivier, mst, virtualization, eperezma, Eli Cohen



> From: Jason Wang <jasowang@redhat.com>
> Sent: Monday, November 22, 2021 8:00 AM
> 
> On Fri, Nov 19, 2021 at 11:09 AM Parav Pandit <parav@nvidia.com> wrote:
> >
> >
> >
> > > From: Jason Wang <jasowang@redhat.com>
> > > Sent: Friday, November 19, 2021 8:12 AM
> > >
> > > On Thu, Nov 18, 2021 at 1:58 PM Eli Cohen <elic@nvidia.com> wrote:
> > > >
> > > > Add support for querying virtqueue statistics. Supported statistics are:
> > > >
> > > > Received_desc - number of descriptors received for the virtqueue
> > > > completed_desc - number of descriptors completed for the virtqueue
> > > >
> > > > A new callback was added to vdpa_config_ops which provides the
> > > > means for the vdpa driver to return statistics results.
> > > >
> > > > The interface allows for reading all the supported virtqueues,
> > > > including the control virtqueue if it exists, by returning the
> > > > next queue index to query.
> > > >
> > > > Examples:
> > > > 1. Read statisitics for the virtqueue at index 1 $ vdpa dev stats
> > > > show vdpa-a index 1
> > > > vdpa-a:
> > > > index 1 tx received_desc 21 completed_desc 21
> > > >
> > > > 2. Read statisitics for all the virtqueues vdpa dev stats show
> > > > vdpa-a
> > > > vdpa-a:
> > > > index 0 rx received_desc 256 completed_desc 12 index 1 tx
> > > > received_desc 21 completed_desc 21 index 2 ctrl received_desc 0
> > > > completed_desc 0
> > >
> > > Adding Adrian and Laurent.
> > >
> > > It's quite useful but I think it's vendor specific statistics.
> > These are vdpa device specific of Linux.
> > And are very generic of the VQ for all device types.
> 
> The question is what happens if the parent doesn't implement those statistics.
Only those statistics to be filled up by the vendor driver that it implements.
If parent doesn't implement any of it, usual EUNSUPPORT is returned.


> 
> >
> > > I wonder if it's better
> > > to use "vendor" prefix in the protocol, then we use this instead:
> > >
> > > vdpa dev vendor-stats show vdpa-a
> > >
> > May be. Lets evaluate if stats of this patch are generic enough or not.
> >
> > > Or if we want to make it standard is exposing virtio index better?
> > >
> > > qid 0 last_avail_idx X avail_idx Y last_used_idx M used_idx N
> > >
> > I did consider this option a while back. Shows indices are equally useful.
> > I think we should show that as vq info, along with other VQ attributes (addr,
> len).
> 
> That may work but it looks to me the receiced_desc/completed_desc is also per
> vq.
> 
It is per VQ but statistics of a VQ and VQ config are two different things and also mostly consumed by different entities.
For example, prometheous and other similar entity will use VQ statistics.
Prometheous etc statistics collector often makes frequent queries for it for which a VQ address is of no use.

While VQ addr, indices etc will be more used by the developers/debuggers etc.

And it is better not to mix these different info under one netlink command.
 
> > $ vdpa dev show vq
> >
> > But showing indices are not less statistics and more current state of the
> queue.
> > For example roll over of the indices won't cover absolute number of
> descriptors processed for the queue.
> > And even if we make them u64 (not good), non_developer end user needs to
> keep using side calculator to count the delta.
> 
> How about exposing those raw indices via the protocol and letting the vdpa tool
> calculate for us?
With wrap arounds it wont be able to calculate it.
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH 1/2] vdpa: Add support for querying statistics
  2021-11-22  9:31               ` Jason Wang
@ 2021-11-22 10:15                 ` Parav Pandit via Virtualization
       [not found]                   ` <20211122150704.GA88466@mtl-vdi-166.wap.labs.mlnx>
  0 siblings, 1 reply; 10+ messages in thread
From: Parav Pandit via Virtualization @ 2021-11-22 10:15 UTC (permalink / raw)
  To: Jason Wang, Eli Cohen; +Cc: Laurent Vivier, eperezma, virtualization, mst



> From: Jason Wang <jasowang@redhat.com>
> Sent: Monday, November 22, 2021 3:02 PM
> 
> > If we go with vendor stats, how can we communicate the information to
> > userspace? Currenlty we use netlink attributes defined to pass this
> > information.
> 
> It can be done exactly as what have been done in the patch, we can document
> it as vendor stats.
> 
Yes, attribute to have VENDOR_ prefix in it.
> 
> Ok, I think I get you. So I wonder if it's more useful to use device specific
> counters. For networking, it could be packets send/received etc.

Yes, I equally discussed this previously with Eli as its more meaningful for end users.
We just return the device id of it along with queue number that helps to show tx and rx.
For ctrl q, it is just ctrl commands and ctrl completions.
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH 1/2] vdpa: Add support for querying statistics
       [not found]                   ` <20211122150704.GA88466@mtl-vdi-166.wap.labs.mlnx>
@ 2021-11-22 15:56                     ` Parav Pandit via Virtualization
  2021-11-23  2:28                       ` Jason Wang
  0 siblings, 1 reply; 10+ messages in thread
From: Parav Pandit via Virtualization @ 2021-11-22 15:56 UTC (permalink / raw)
  To: Eli Cohen; +Cc: Laurent Vivier, mst, virtualization, eperezma



> From: Eli Cohen <elic@nvidia.com>
> Sent: Monday, November 22, 2021 8:37 PM
> 
> On Mon, Nov 22, 2021 at 12:15:36PM +0200, Parav Pandit wrote:
> >
> >
> > > From: Jason Wang <jasowang@redhat.com>
> > > Sent: Monday, November 22, 2021 3:02 PM
> > >
> > > > If we go with vendor stats, how can we communicate the information
> > > > to userspace? Currenlty we use netlink attributes defined to pass
> > > > this information.
> > >
> > > It can be done exactly as what have been done in the patch, we can
> > > document it as vendor stats.
> > >
> > Yes, attribute to have VENDOR_ prefix in it.
> > >
> > > Ok, I think I get you. So I wonder if it's more useful to use device
> > > specific counters. For networking, it could be packets send/received etc.
> >
> > Yes, I equally discussed this previously with Eli as its more meaningful for end
> users.
> > We just return the device id of it along with queue number that helps to show
> tx and rx.
> > For ctrl q, it is just ctrl commands and ctrl completions.
> 
> I don't think we should mix send/receive packets for descriptors statistics. The
> hardware could process a descriptor and still not transmit any packet.
> 
> We can add packets send/recv but descriptor statistics have their own value.
> 
Oh right. I read Jason's comment of _packets_ to fast. I meant to say send/receive descriptors.
I guess you already named them as tx and rx. Didn't review the patches in this series yet.

> To summarize, I can add the VENDOR_ preifx to the attibutes and re-send or is
> there anything else you think should change?
VENDOR_ prefix and command as iproute2 command as "vstats" looks fine to me.

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] vdpa: Add support for querying statistics
  2021-11-22 15:56                     ` Parav Pandit via Virtualization
@ 2021-11-23  2:28                       ` Jason Wang
       [not found]                         ` <20211123064235.GA118175@mtl-vdi-166.wap.labs.mlnx>
  0 siblings, 1 reply; 10+ messages in thread
From: Jason Wang @ 2021-11-23  2:28 UTC (permalink / raw)
  To: Parav Pandit; +Cc: Laurent Vivier, mst, virtualization, eperezma, Eli Cohen

On Mon, Nov 22, 2021 at 11:56 PM Parav Pandit <parav@nvidia.com> wrote:
>
>
>
> > From: Eli Cohen <elic@nvidia.com>
> > Sent: Monday, November 22, 2021 8:37 PM
> >
> > On Mon, Nov 22, 2021 at 12:15:36PM +0200, Parav Pandit wrote:
> > >
> > >
> > > > From: Jason Wang <jasowang@redhat.com>
> > > > Sent: Monday, November 22, 2021 3:02 PM
> > > >
> > > > > If we go with vendor stats, how can we communicate the information
> > > > > to userspace? Currenlty we use netlink attributes defined to pass
> > > > > this information.
> > > >
> > > > It can be done exactly as what have been done in the patch, we can
> > > > document it as vendor stats.
> > > >
> > > Yes, attribute to have VENDOR_ prefix in it.
> > > >
> > > > Ok, I think I get you. So I wonder if it's more useful to use device
> > > > specific counters. For networking, it could be packets send/received etc.
> > >
> > > Yes, I equally discussed this previously with Eli as its more meaningful for end
> > users.
> > > We just return the device id of it along with queue number that helps to show
> > tx and rx.
> > > For ctrl q, it is just ctrl commands and ctrl completions.
> >
> > I don't think we should mix send/receive packets for descriptors statistics. The
> > hardware could process a descriptor and still not transmit any packet.
> >
> > We can add packets send/recv but descriptor statistics have their own value.
> >
> Oh right. I read Jason's comment of _packets_ to fast. I meant to say send/receive descriptors.
> I guess you already named them as tx and rx. Didn't review the patches in this series yet.
>
> > To summarize, I can add the VENDOR_ preifx to the attibutes and re-send or is
> > there anything else you think should change?
> VENDOR_ prefix and command as iproute2 command as "vstats" looks fine to me.

Ack, but we need to figure out:

1) use descriptors or buffers.
2) if we use descriptors, for indirect descriptors and descriptor
chains how are they counted?

Thanks

>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/2] vdpa: Add support for querying statistics
       [not found]                         ` <20211123064235.GA118175@mtl-vdi-166.wap.labs.mlnx>
@ 2021-11-23  8:38                           ` Jason Wang
  0 siblings, 0 replies; 10+ messages in thread
From: Jason Wang @ 2021-11-23  8:38 UTC (permalink / raw)
  To: Eli Cohen; +Cc: Laurent Vivier, mst, virtualization, eperezma

On Tue, Nov 23, 2021 at 2:42 PM Eli Cohen <elic@nvidia.com> wrote:
>
> On Tue, Nov 23, 2021 at 10:28:04AM +0800, Jason Wang wrote:
> > On Mon, Nov 22, 2021 at 11:56 PM Parav Pandit <parav@nvidia.com> wrote:
> > >
> > >
> > >
> > > > From: Eli Cohen <elic@nvidia.com>
> > > > Sent: Monday, November 22, 2021 8:37 PM
> > > >
> > > > On Mon, Nov 22, 2021 at 12:15:36PM +0200, Parav Pandit wrote:
> > > > >
> > > > >
> > > > > > From: Jason Wang <jasowang@redhat.com>
> > > > > > Sent: Monday, November 22, 2021 3:02 PM
> > > > > >
> > > > > > > If we go with vendor stats, how can we communicate the information
> > > > > > > to userspace? Currenlty we use netlink attributes defined to pass
> > > > > > > this information.
> > > > > >
> > > > > > It can be done exactly as what have been done in the patch, we can
> > > > > > document it as vendor stats.
> > > > > >
> > > > > Yes, attribute to have VENDOR_ prefix in it.
> > > > > >
> > > > > > Ok, I think I get you. So I wonder if it's more useful to use device
> > > > > > specific counters. For networking, it could be packets send/received etc.
> > > > >
> > > > > Yes, I equally discussed this previously with Eli as its more meaningful for end
> > > > users.
> > > > > We just return the device id of it along with queue number that helps to show
> > > > tx and rx.
> > > > > For ctrl q, it is just ctrl commands and ctrl completions.
> > > >
> > > > I don't think we should mix send/receive packets for descriptors statistics. The
> > > > hardware could process a descriptor and still not transmit any packet.
> > > >
> > > > We can add packets send/recv but descriptor statistics have their own value.
> > > >
> > > Oh right. I read Jason's comment of _packets_ to fast. I meant to say send/receive descriptors.
> > > I guess you already named them as tx and rx. Didn't review the patches in this series yet.
> > >
> > > > To summarize, I can add the VENDOR_ preifx to the attibutes and re-send or is
> > > > there anything else you think should change?
> > > VENDOR_ prefix and command as iproute2 command as "vstats" looks fine to me.
> >
> > Ack, but we need to figure out:
> >
> > 1) use descriptors or buffers.
>
> Descriptors.
> Currently we don't support indirect buffers but when we do, we will
> preserve the semantics.

Just to confirm, if I understand correctly:

1) with indirect descriptors, only 1 descriptor is counted
2) with N descriptors chained together, it will report N descriptors

Thanks

>
> > 2) if we use descriptors, for indirect descriptors and descriptor
> > chains how are they counted?
> >
>
> We count descriptors, not buffers.
>
> > Thanks
> >
> > >
> >
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2021-11-23  8:39 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20211118055741.239639-1-elic@nvidia.com>
     [not found] ` <20211118055741.239639-2-elic@nvidia.com>
2021-11-19  2:41   ` [PATCH 1/2] vdpa: Add support for querying statistics Jason Wang
2021-11-19  3:08     ` Parav Pandit via Virtualization
2021-11-22  2:30       ` Jason Wang
     [not found]         ` <20211122075615.GB74340@mtl-vdi-166.wap.labs.mlnx>
2021-11-22  8:07           ` Jason Wang
     [not found]             ` <20211122090755.GA75489@mtl-vdi-166.wap.labs.mlnx>
2021-11-22  9:31               ` Jason Wang
2021-11-22 10:15                 ` Parav Pandit via Virtualization
     [not found]                   ` <20211122150704.GA88466@mtl-vdi-166.wap.labs.mlnx>
2021-11-22 15:56                     ` Parav Pandit via Virtualization
2021-11-23  2:28                       ` Jason Wang
     [not found]                         ` <20211123064235.GA118175@mtl-vdi-166.wap.labs.mlnx>
2021-11-23  8:38                           ` Jason Wang
2021-11-22 10:12         ` Parav Pandit via Virtualization

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.