All of lore.kernel.org
 help / color / mirror / Atom feed
* Re: [PATCH 1/3] vdpa/mlx5: Remove mtu field from vdpa net device
       [not found] ` <20210909123635.30884-2-elic@nvidia.com>
@ 2021-09-13 22:09   ` Michael S. Tsirkin
  0 siblings, 0 replies; 3+ messages in thread
From: Michael S. Tsirkin @ 2021-09-13 22:09 UTC (permalink / raw)
  To: Eli Cohen; +Cc: eperezma, virtualization

On Thu, Sep 09, 2021 at 03:36:33PM +0300, Eli Cohen wrote:
> No need to save the mtu int the net device struct. We can save it in the
> config struct which cannot be modified.
> 
> Moreover, move the initialization to. mlx5_vdpa_set_features() callback
> is not the right place to put it.

the reason it's there is the endian-ness mess. before set features
VERSION_1 is not set. Does mlx5 support a transitional mode?
Or modern only? If the later then cpu_to_mlx5vdpa16
should really be switched to just use LE unconfitionally.


> 
> Signed-off-by: Eli Cohen <elic@nvidia.com>
> ---
>  drivers/vdpa/mlx5/net/mlx5_vnet.c | 9 +++++----
>  1 file changed, 5 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index 7784e8a5647f..08ac15b17b83 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -159,7 +159,6 @@ struct mlx5_vdpa_net {
>  	struct mlx5_fc *rx_counter;
>  	struct mlx5_flow_handle *rx_rule;
>  	bool setup;
> -	u16 mtu;
>  	u32 cur_num_vqs;
>  };
>  
> @@ -1942,8 +1941,6 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
>  		return err;
>  
>  	ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
> -	ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu);
> -	ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
>  	update_cvq_info(mvdev);
>  	return err;
>  }
> @@ -2405,6 +2402,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
>  	struct mlx5_vdpa_net *ndev;
>  	struct mlx5_core_dev *mdev;
>  	u32 max_vqs;
> +	u16 mtu;
>  	int err;
>  
>  	if (mgtdev->ndev)
> @@ -2432,10 +2430,13 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
>  	init_mvqs(ndev);
>  	mutex_init(&ndev->reslock);
>  	config = &ndev->config;
> -	err = query_mtu(mdev, &ndev->mtu);
> +	err = query_mtu(mdev, &mtu);
>  	if (err)
>  		goto err_mtu;
>  
> +	ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu);
> +	ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
> +
>  	err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
>  	if (err)
>  		goto err_mtu;
> -- 
> 2.31.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH 2/3] vdpa/mlx5: Rename control VQ workqueue to vdpa wq
       [not found] ` <20210909123635.30884-3-elic@nvidia.com>
@ 2021-09-14  2:57   ` Jason Wang
  0 siblings, 0 replies; 3+ messages in thread
From: Jason Wang @ 2021-09-14  2:57 UTC (permalink / raw)
  To: Eli Cohen; +Cc: eperezma, virtualization, mst

On Thu, Sep 9, 2021 at 8:36 PM Eli Cohen <elic@nvidia.com> wrote:
>
> A subesequent patch will use the same workqueue for executing other
> work not related to control VQ. Rename the workqueue and the work queue
> entry used to convey information to the workqueue.
>
> Signed-off-by: Eli Cohen <elic@nvidia.com>

Acked-by: Jason Wang <jasowang@redhat.com>

> ---
>  drivers/vdpa/mlx5/core/mlx5_vdpa.h | 2 +-
>  drivers/vdpa/mlx5/net/mlx5_vnet.c  | 8 ++++----
>  2 files changed, 5 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
> index 01a848adf590..81dc3d88d3dd 100644
> --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
> +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
> @@ -63,7 +63,7 @@ struct mlx5_control_vq {
>         unsigned short head;
>  };
>
> -struct mlx5_ctrl_wq_ent {
> +struct mlx5_vdpa_wq_ent {
>         struct work_struct work;
>         struct mlx5_vdpa_dev *mvdev;
>  };
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index 08ac15b17b83..59f1874648ae 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -1556,14 +1556,14 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
>  {
>         virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
>         struct virtio_net_ctrl_hdr ctrl;
> -       struct mlx5_ctrl_wq_ent *wqent;
> +       struct mlx5_vdpa_wq_ent *wqent;
>         struct mlx5_vdpa_dev *mvdev;
>         struct mlx5_control_vq *cvq;
>         struct mlx5_vdpa_net *ndev;
>         size_t read, write;
>         int err;
>
> -       wqent = container_of(work, struct mlx5_ctrl_wq_ent, work);
> +       wqent = container_of(work, struct mlx5_vdpa_wq_ent, work);
>         mvdev = wqent->mvdev;
>         ndev = to_mlx5_vdpa_ndev(mvdev);
>         cvq = &mvdev->cvq;
> @@ -1615,7 +1615,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
>         struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
>         struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
>         struct mlx5_vdpa_virtqueue *mvq;
> -       struct mlx5_ctrl_wq_ent *wqent;
> +       struct mlx5_vdpa_wq_ent *wqent;
>
>         if (!is_index_valid(mvdev, idx))
>                 return;
> @@ -2466,7 +2466,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
>         if (err)
>                 goto err_mr;
>
> -       mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_ctrl_wq");
> +       mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
>         if (!mvdev->wq) {
>                 err = -ENOMEM;
>                 goto err_res2;
> --
> 2.31.1
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH 3/3] vdpa/mlx5: Propagate link status from device to vdpa driver
       [not found] ` <20210909123635.30884-4-elic@nvidia.com>
@ 2021-09-14  3:00   ` Jason Wang
  0 siblings, 0 replies; 3+ messages in thread
From: Jason Wang @ 2021-09-14  3:00 UTC (permalink / raw)
  To: Eli Cohen; +Cc: eperezma, virtualization, mst

On Thu, Sep 9, 2021 at 8:36 PM Eli Cohen <elic@nvidia.com> wrote:
>
> Add code to register to hardware asynchronous events. Use this
> mechanism to track link status events coming from the device and update
> the config struct.
>
> After doing link status change, call the vdpa callback to notify of the
> link status change.
>
> Signed-off-by: Eli Cohen <elic@nvidia.com>

Acked-by: Jason Wang <jasowang@redhat.com>

> ---
>  drivers/vdpa/mlx5/net/mlx5_vnet.c | 94 ++++++++++++++++++++++++++++++-
>  1 file changed, 92 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index 59f1874648ae..c2b5c62358b8 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -160,6 +160,8 @@ struct mlx5_vdpa_net {
>         struct mlx5_flow_handle *rx_rule;
>         bool setup;
>         u32 cur_num_vqs;
> +       struct notifier_block nb;
> +       struct vdpa_callback config_cb;
>  };
>
>  static void free_resources(struct mlx5_vdpa_net *ndev);
> @@ -1851,6 +1853,7 @@ static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
>         ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ);
>         ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR);
>         ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MQ);
> +       ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
>
>         print_features(mvdev, ndev->mvdev.mlx_features, false);
>         return ndev->mvdev.mlx_features;
> @@ -1947,8 +1950,10 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
>
>  static void mlx5_vdpa_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb)
>  {
> -       /* not implemented */
> -       mlx5_vdpa_warn(to_mvdev(vdev), "set config callback not supported\n");
> +       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
> +       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
> +
> +       ndev->config_cb = *cb;
>  }
>
>  #define MLX5_VDPA_MAX_VQ_ENTRIES 256
> @@ -2393,6 +2398,82 @@ struct mlx5_vdpa_mgmtdev {
>         struct mlx5_vdpa_net *ndev;
>  };
>
> +static u8 query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
> +{
> +       u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
> +       u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
> +       int err;
> +
> +       MLX5_SET(query_vport_state_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_STATE);
> +       MLX5_SET(query_vport_state_in, in, op_mod, opmod);
> +       MLX5_SET(query_vport_state_in, in, vport_number, vport);
> +       if (vport)
> +               MLX5_SET(query_vport_state_in, in, other_vport, 1);
> +
> +       err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
> +       if (err)
> +               return 0;
> +
> +       return MLX5_GET(query_vport_state_out, out, state);
> +}
> +
> +static bool get_link_state(struct mlx5_vdpa_dev *mvdev)
> +{
> +       if (query_vport_state(mvdev->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0) ==
> +           VPORT_STATE_UP)
> +               return true;
> +
> +       return false;
> +}
> +
> +static void update_carrier(struct work_struct *work)
> +{
> +       struct mlx5_vdpa_wq_ent *wqent;
> +       struct mlx5_vdpa_dev *mvdev;
> +       struct mlx5_vdpa_net *ndev;
> +
> +       wqent = container_of(work, struct mlx5_vdpa_wq_ent, work);
> +       mvdev = wqent->mvdev;
> +       ndev = to_mlx5_vdpa_ndev(mvdev);
> +       if (get_link_state(mvdev))
> +               ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
> +       else
> +               ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);
> +
> +       if (ndev->config_cb.callback)
> +               ndev->config_cb.callback(ndev->config_cb.private);
> +
> +       kfree(wqent);
> +}
> +
> +static int event_handler(struct notifier_block *nb, unsigned long event, void *param)
> +{
> +       struct mlx5_vdpa_net *ndev = container_of(nb, struct mlx5_vdpa_net, nb);
> +       struct mlx5_eqe *eqe = param;
> +       int ret = NOTIFY_DONE;
> +       struct mlx5_vdpa_wq_ent *wqent;
> +
> +       if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
> +               switch (eqe->sub_type) {
> +               case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
> +               case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
> +                       wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
> +                       if (!wqent)
> +                               return NOTIFY_DONE;
> +
> +                       wqent->mvdev = &ndev->mvdev;
> +                       INIT_WORK(&wqent->work, update_carrier);
> +                       queue_work(ndev->mvdev.wq, &wqent->work);
> +                       ret = NOTIFY_OK;
> +                       break;
> +               default:
> +                       return NOTIFY_DONE;
> +               }
> +               return ret;
> +       }
> +       return ret;
> +}
> +
>  static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
>  {
>         struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
> @@ -2441,6 +2522,11 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
>         if (err)
>                 goto err_mtu;
>
> +       if (get_link_state(mvdev))
> +               ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
> +       else
> +               ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);
> +
>         if (!is_zero_ether_addr(config->mac)) {
>                 pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
>                 err = mlx5_mpfs_add_mac(pfmdev, config->mac);
> @@ -2472,6 +2558,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
>                 goto err_res2;
>         }
>
> +       ndev->nb.notifier_call = event_handler;
> +       mlx5_notifier_register(mdev, &ndev->nb);
>         ndev->cur_num_vqs = 2 * mlx5_vdpa_max_qps(max_vqs);
>         mvdev->vdev.mdev = &mgtdev->mgtdev;
>         err = _vdpa_register_device(&mvdev->vdev, ndev->cur_num_vqs + 1);
> @@ -2502,7 +2590,9 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
>  {
>         struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
>         struct mlx5_vdpa_dev *mvdev = to_mvdev(dev);
> +       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
>
> +       mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
>         destroy_workqueue(mvdev->wq);
>         _vdpa_unregister_device(dev);
>         mgtdev->ndev = NULL;
> --
> 2.31.1
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-09-14  3:00 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20210909123635.30884-1-elic@nvidia.com>
     [not found] ` <20210909123635.30884-2-elic@nvidia.com>
2021-09-13 22:09   ` [PATCH 1/3] vdpa/mlx5: Remove mtu field from vdpa net device Michael S. Tsirkin
     [not found] ` <20210909123635.30884-3-elic@nvidia.com>
2021-09-14  2:57   ` [PATCH 2/3] vdpa/mlx5: Rename control VQ workqueue to vdpa wq Jason Wang
     [not found] ` <20210909123635.30884-4-elic@nvidia.com>
2021-09-14  3:00   ` [PATCH 3/3] vdpa/mlx5: Propagate link status from device to vdpa driver Jason Wang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.