All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH RESEND V3 3/3] vdpa/mlx5: Use consistent RQT size
@ 2022-04-06  8:53 Eli Cohen
  2022-05-16  8:17 ` Eli Cohen
  0 siblings, 1 reply; 5+ messages in thread
From: Eli Cohen @ 2022-04-06  8:53 UTC (permalink / raw)
  To: mst, jasowang; +Cc: hdanton, virtualization, linux-kernel, Eli Cohen

The current code evaluates RQT size based on the configured number of
virtqueues. This can raise an issue in the following scenario:

Assume MQ was negotiated.
1. mlx5_vdpa_set_map() gets called.
2. handle_ctrl_mq() is called setting cur_num_vqs to some value, lower
   than the configured max VQs.
3. A second set_map gets called, but now a smaller number of VQs is used
   to evaluate the size of the RQT.
4. handle_ctrl_mq() is called with a value larger than what the RQT can
   hold. This will emit errors and the driver state is compromised.

To fix this, we use a new field in struct mlx5_vdpa_net to hold the
required number of entries in the RQT. This value is evaluated in
mlx5_vdpa_set_driver_features() where we have the negotiated features
all set up.

In addition to that, we take into consideration the max capability of RQT
entries early when the device is added so we don't need to take consider
it when creating the RQT.

Last, we remove the use of mlx5_vdpa_max_qps() which just returns the
max_vas / 2 and make the code clearer.

Fixes: 52893733f2c5 ("vdpa/mlx5: Add multiqueue support")
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Eli Cohen <elic@nvidia.com>
---
V2 -> V3:
Fix typo in change log
Add acked-by Jason

 drivers/vdpa/mlx5/net/mlx5_vnet.c | 61 +++++++++++--------------------
 1 file changed, 21 insertions(+), 40 deletions(-)

diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 79001301b383..e0de44000d92 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -161,6 +161,7 @@ struct mlx5_vdpa_net {
 	struct mlx5_flow_handle *rx_rule_mcast;
 	bool setup;
 	u32 cur_num_vqs;
+	u32 rqt_size;
 	struct notifier_block nb;
 	struct vdpa_callback config_cb;
 	struct mlx5_vdpa_wq_ent cvq_ent;
@@ -204,17 +205,12 @@ static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
 	return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
 }
 
-static inline u32 mlx5_vdpa_max_qps(int max_vqs)
-{
-	return max_vqs / 2;
-}
-
 static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev)
 {
 	if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
 		return 2;
 
-	return 2 * mlx5_vdpa_max_qps(mvdev->max_vqs);
+	return mvdev->max_vqs;
 }
 
 static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx)
@@ -1236,25 +1232,13 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
 static int create_rqt(struct mlx5_vdpa_net *ndev)
 {
 	__be32 *list;
-	int max_rqt;
 	void *rqtc;
 	int inlen;
 	void *in;
 	int i, j;
 	int err;
-	int num;
-
-	if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
-		num = 1;
-	else
-		num = ndev->cur_num_vqs / 2;
 
-	max_rqt = min_t(int, roundup_pow_of_two(num),
-			1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
-	if (max_rqt < 1)
-		return -EOPNOTSUPP;
-
-	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
+	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
 	in = kzalloc(inlen, GFP_KERNEL);
 	if (!in)
 		return -ENOMEM;
@@ -1263,12 +1247,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
 	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
 
 	MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
-	MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
+	MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size);
 	list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
-	for (i = 0, j = 0; i < max_rqt; i++, j += 2)
-		list[i] = cpu_to_be32(ndev->vqs[j % (2 * num)].virtq_id);
+	for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
+		list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
 
-	MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
+	MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
 	err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
 	kfree(in);
 	if (err)
@@ -1282,19 +1266,13 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
 static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
 {
 	__be32 *list;
-	int max_rqt;
 	void *rqtc;
 	int inlen;
 	void *in;
 	int i, j;
 	int err;
 
-	max_rqt = min_t(int, roundup_pow_of_two(ndev->cur_num_vqs / 2),
-			1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
-	if (max_rqt < 1)
-		return -EOPNOTSUPP;
-
-	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
+	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
 	in = kzalloc(inlen, GFP_KERNEL);
 	if (!in)
 		return -ENOMEM;
@@ -1305,10 +1283,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
 	MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
 
 	list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
-	for (i = 0, j = 0; i < max_rqt; i++, j += 2)
+	for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
 		list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
 
-	MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
+	MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
 	err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
 	kfree(in);
 	if (err)
@@ -1625,7 +1603,7 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
 
 		newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
 		if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
-		    newqps > mlx5_vdpa_max_qps(mvdev->max_vqs))
+		    newqps > ndev->rqt_size)
 			break;
 
 		if (ndev->cur_num_vqs == 2 * newqps) {
@@ -1989,7 +1967,7 @@ static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
 	int err;
 	int i;
 
-	for (i = 0; i < 2 * mlx5_vdpa_max_qps(mvdev->max_vqs); i++) {
+	for (i = 0; i < mvdev->max_vqs; i++) {
 		err = setup_vq(ndev, &ndev->vqs[i]);
 		if (err)
 			goto err_vq;
@@ -2060,9 +2038,11 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
 
 	ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
 	if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
-		ndev->cur_num_vqs = 2 * mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
+		ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
 	else
-		ndev->cur_num_vqs = 2;
+		ndev->rqt_size = 1;
+
+	ndev->cur_num_vqs = 2 * ndev->rqt_size;
 
 	update_cvq_info(mvdev);
 	return err;
@@ -2529,7 +2509,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
 	struct mlx5_vdpa_virtqueue *mvq;
 	int i;
 
-	for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
+	for (i = 0; i < ndev->mvdev.max_vqs; ++i) {
 		mvq = &ndev->vqs[i];
 		memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
 		mvq->index = i;
@@ -2671,7 +2651,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
 		return -EOPNOTSUPP;
 	}
 
-	max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
+	max_vqs = min_t(int, MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues),
+			1 << MLX5_CAP_GEN(mdev, log_max_rqt_size));
 	if (max_vqs < 2) {
 		dev_warn(mdev->device,
 			 "%d virtqueues are supported. At least 2 are required\n",
@@ -2742,7 +2723,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
 		ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
 	}
 
-	config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, mlx5_vdpa_max_qps(max_vqs));
+	config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2);
 	mvdev->vdev.dma_dev = &mdev->pdev->dev;
 	err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
 	if (err)
@@ -2769,7 +2750,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
 	ndev->nb.notifier_call = event_handler;
 	mlx5_notifier_register(mdev, &ndev->nb);
 	mvdev->vdev.mdev = &mgtdev->mgtdev;
-	err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1);
+	err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
 	if (err)
 		goto err_reg;
 
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* RE: [PATCH RESEND V3 3/3] vdpa/mlx5: Use consistent RQT size
  2022-04-06  8:53 [PATCH RESEND V3 3/3] vdpa/mlx5: Use consistent RQT size Eli Cohen
@ 2022-05-16  8:17 ` Eli Cohen
  2022-05-16  8:22     ` Michael S. Tsirkin
  0 siblings, 1 reply; 5+ messages in thread
From: Eli Cohen @ 2022-05-16  8:17 UTC (permalink / raw)
  To: mst, jasowang, Cindy Lu; +Cc: hdanton, virtualization, linux-kernel

Hi Michael,

When are you going to pull this fix?
It fixes a real problem and was reviewed and acked.

> -----Original Message-----
> From: Eli Cohen <elic@nvidia.com>
> Sent: Wednesday, April 6, 2022 11:53 AM
> To: mst@redhat.com; jasowang@redhat.com
> Cc: hdanton@sina.com; virtualization@lists.linux-foundation.org; linux-kernel@vger.kernel.org; Eli Cohen <elic@nvidia.com>
> Subject: [PATCH RESEND V3 3/3] vdpa/mlx5: Use consistent RQT size
> 
> The current code evaluates RQT size based on the configured number of
> virtqueues. This can raise an issue in the following scenario:
> 
> Assume MQ was negotiated.
> 1. mlx5_vdpa_set_map() gets called.
> 2. handle_ctrl_mq() is called setting cur_num_vqs to some value, lower
>    than the configured max VQs.
> 3. A second set_map gets called, but now a smaller number of VQs is used
>    to evaluate the size of the RQT.
> 4. handle_ctrl_mq() is called with a value larger than what the RQT can
>    hold. This will emit errors and the driver state is compromised.
> 
> To fix this, we use a new field in struct mlx5_vdpa_net to hold the
> required number of entries in the RQT. This value is evaluated in
> mlx5_vdpa_set_driver_features() where we have the negotiated features
> all set up.
> 
> In addition to that, we take into consideration the max capability of RQT
> entries early when the device is added so we don't need to take consider
> it when creating the RQT.
> 
> Last, we remove the use of mlx5_vdpa_max_qps() which just returns the
> max_vas / 2 and make the code clearer.
> 
> Fixes: 52893733f2c5 ("vdpa/mlx5: Add multiqueue support")
> Acked-by: Jason Wang <jasowang@redhat.com>
> Signed-off-by: Eli Cohen <elic@nvidia.com>
> ---
> V2 -> V3:
> Fix typo in change log
> Add acked-by Jason
> 
>  drivers/vdpa/mlx5/net/mlx5_vnet.c | 61 +++++++++++--------------------
>  1 file changed, 21 insertions(+), 40 deletions(-)
> 
> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> index 79001301b383..e0de44000d92 100644
> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> @@ -161,6 +161,7 @@ struct mlx5_vdpa_net {
>  	struct mlx5_flow_handle *rx_rule_mcast;
>  	bool setup;
>  	u32 cur_num_vqs;
> +	u32 rqt_size;
>  	struct notifier_block nb;
>  	struct vdpa_callback config_cb;
>  	struct mlx5_vdpa_wq_ent cvq_ent;
> @@ -204,17 +205,12 @@ static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
>  	return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
>  }
> 
> -static inline u32 mlx5_vdpa_max_qps(int max_vqs)
> -{
> -	return max_vqs / 2;
> -}
> -
>  static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev)
>  {
>  	if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
>  		return 2;
> 
> -	return 2 * mlx5_vdpa_max_qps(mvdev->max_vqs);
> +	return mvdev->max_vqs;
>  }
> 
>  static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx)
> @@ -1236,25 +1232,13 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
>  static int create_rqt(struct mlx5_vdpa_net *ndev)
>  {
>  	__be32 *list;
> -	int max_rqt;
>  	void *rqtc;
>  	int inlen;
>  	void *in;
>  	int i, j;
>  	int err;
> -	int num;
> -
> -	if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
> -		num = 1;
> -	else
> -		num = ndev->cur_num_vqs / 2;
> 
> -	max_rqt = min_t(int, roundup_pow_of_two(num),
> -			1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
> -	if (max_rqt < 1)
> -		return -EOPNOTSUPP;
> -
> -	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
> +	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
>  	in = kzalloc(inlen, GFP_KERNEL);
>  	if (!in)
>  		return -ENOMEM;
> @@ -1263,12 +1247,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
>  	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
> 
>  	MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
> -	MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
> +	MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size);
>  	list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
> -	for (i = 0, j = 0; i < max_rqt; i++, j += 2)
> -		list[i] = cpu_to_be32(ndev->vqs[j % (2 * num)].virtq_id);
> +	for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
> +		list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
> 
> -	MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
> +	MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
>  	err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
>  	kfree(in);
>  	if (err)
> @@ -1282,19 +1266,13 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
>  static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
>  {
>  	__be32 *list;
> -	int max_rqt;
>  	void *rqtc;
>  	int inlen;
>  	void *in;
>  	int i, j;
>  	int err;
> 
> -	max_rqt = min_t(int, roundup_pow_of_two(ndev->cur_num_vqs / 2),
> -			1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
> -	if (max_rqt < 1)
> -		return -EOPNOTSUPP;
> -
> -	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
> +	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
>  	in = kzalloc(inlen, GFP_KERNEL);
>  	if (!in)
>  		return -ENOMEM;
> @@ -1305,10 +1283,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
>  	MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
> 
>  	list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
> -	for (i = 0, j = 0; i < max_rqt; i++, j += 2)
> +	for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
>  		list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
> 
> -	MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
> +	MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
>  	err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
>  	kfree(in);
>  	if (err)
> @@ -1625,7 +1603,7 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
> 
>  		newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
>  		if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
> -		    newqps > mlx5_vdpa_max_qps(mvdev->max_vqs))
> +		    newqps > ndev->rqt_size)
>  			break;
> 
>  		if (ndev->cur_num_vqs == 2 * newqps) {
> @@ -1989,7 +1967,7 @@ static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
>  	int err;
>  	int i;
> 
> -	for (i = 0; i < 2 * mlx5_vdpa_max_qps(mvdev->max_vqs); i++) {
> +	for (i = 0; i < mvdev->max_vqs; i++) {
>  		err = setup_vq(ndev, &ndev->vqs[i]);
>  		if (err)
>  			goto err_vq;
> @@ -2060,9 +2038,11 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
> 
>  	ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
>  	if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
> -		ndev->cur_num_vqs = 2 * mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
> +		ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
>  	else
> -		ndev->cur_num_vqs = 2;
> +		ndev->rqt_size = 1;
> +
> +	ndev->cur_num_vqs = 2 * ndev->rqt_size;
> 
>  	update_cvq_info(mvdev);
>  	return err;
> @@ -2529,7 +2509,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
>  	struct mlx5_vdpa_virtqueue *mvq;
>  	int i;
> 
> -	for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
> +	for (i = 0; i < ndev->mvdev.max_vqs; ++i) {
>  		mvq = &ndev->vqs[i];
>  		memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
>  		mvq->index = i;
> @@ -2671,7 +2651,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
>  		return -EOPNOTSUPP;
>  	}
> 
> -	max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
> +	max_vqs = min_t(int, MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues),
> +			1 << MLX5_CAP_GEN(mdev, log_max_rqt_size));
>  	if (max_vqs < 2) {
>  		dev_warn(mdev->device,
>  			 "%d virtqueues are supported. At least 2 are required\n",
> @@ -2742,7 +2723,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
>  		ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
>  	}
> 
> -	config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, mlx5_vdpa_max_qps(max_vqs));
> +	config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2);
>  	mvdev->vdev.dma_dev = &mdev->pdev->dev;
>  	err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
>  	if (err)
> @@ -2769,7 +2750,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
>  	ndev->nb.notifier_call = event_handler;
>  	mlx5_notifier_register(mdev, &ndev->nb);
>  	mvdev->vdev.mdev = &mgtdev->mgtdev;
> -	err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1);
> +	err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
>  	if (err)
>  		goto err_reg;
> 
> --
> 2.35.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH RESEND V3 3/3] vdpa/mlx5: Use consistent RQT size
  2022-05-16  8:17 ` Eli Cohen
@ 2022-05-16  8:22     ` Michael S. Tsirkin
  0 siblings, 0 replies; 5+ messages in thread
From: Michael S. Tsirkin @ 2022-05-16  8:22 UTC (permalink / raw)
  To: Eli Cohen; +Cc: jasowang, Cindy Lu, hdanton, virtualization, linux-kernel

On Mon, May 16, 2022 at 08:17:18AM +0000, Eli Cohen wrote:
> Hi Michael,
> 
> When are you going to pull this fix?
> It fixes a real problem and was reviewed and acked.

Do I understand it correctly that this is a stand-alone patch?
Sorry, my process have been thrown off by it being labeled 3/3 but not
being part of a thread. Do not do this for single patches please.
And I suspect 0-day machinery didn't process it either.
Can you repost as a stand-along patch please?
I will then process ASAP.

Thanks!

> > -----Original Message-----
> > From: Eli Cohen <elic@nvidia.com>
> > Sent: Wednesday, April 6, 2022 11:53 AM
> > To: mst@redhat.com; jasowang@redhat.com
> > Cc: hdanton@sina.com; virtualization@lists.linux-foundation.org; linux-kernel@vger.kernel.org; Eli Cohen <elic@nvidia.com>
> > Subject: [PATCH RESEND V3 3/3] vdpa/mlx5: Use consistent RQT size
> > 
> > The current code evaluates RQT size based on the configured number of
> > virtqueues. This can raise an issue in the following scenario:
> > 
> > Assume MQ was negotiated.
> > 1. mlx5_vdpa_set_map() gets called.
> > 2. handle_ctrl_mq() is called setting cur_num_vqs to some value, lower
> >    than the configured max VQs.
> > 3. A second set_map gets called, but now a smaller number of VQs is used
> >    to evaluate the size of the RQT.
> > 4. handle_ctrl_mq() is called with a value larger than what the RQT can
> >    hold. This will emit errors and the driver state is compromised.
> > 
> > To fix this, we use a new field in struct mlx5_vdpa_net to hold the
> > required number of entries in the RQT. This value is evaluated in
> > mlx5_vdpa_set_driver_features() where we have the negotiated features
> > all set up.
> > 
> > In addition to that, we take into consideration the max capability of RQT
> > entries early when the device is added so we don't need to take consider
> > it when creating the RQT.
> > 
> > Last, we remove the use of mlx5_vdpa_max_qps() which just returns the
> > max_vas / 2 and make the code clearer.
> > 
> > Fixes: 52893733f2c5 ("vdpa/mlx5: Add multiqueue support")
> > Acked-by: Jason Wang <jasowang@redhat.com>
> > Signed-off-by: Eli Cohen <elic@nvidia.com>
> > ---
> > V2 -> V3:
> > Fix typo in change log
> > Add acked-by Jason
> > 
> >  drivers/vdpa/mlx5/net/mlx5_vnet.c | 61 +++++++++++--------------------
> >  1 file changed, 21 insertions(+), 40 deletions(-)
> > 
> > diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> > index 79001301b383..e0de44000d92 100644
> > --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> > +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> > @@ -161,6 +161,7 @@ struct mlx5_vdpa_net {
> >  	struct mlx5_flow_handle *rx_rule_mcast;
> >  	bool setup;
> >  	u32 cur_num_vqs;
> > +	u32 rqt_size;
> >  	struct notifier_block nb;
> >  	struct vdpa_callback config_cb;
> >  	struct mlx5_vdpa_wq_ent cvq_ent;
> > @@ -204,17 +205,12 @@ static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
> >  	return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
> >  }
> > 
> > -static inline u32 mlx5_vdpa_max_qps(int max_vqs)
> > -{
> > -	return max_vqs / 2;
> > -}
> > -
> >  static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev)
> >  {
> >  	if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
> >  		return 2;
> > 
> > -	return 2 * mlx5_vdpa_max_qps(mvdev->max_vqs);
> > +	return mvdev->max_vqs;
> >  }
> > 
> >  static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx)
> > @@ -1236,25 +1232,13 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
> >  static int create_rqt(struct mlx5_vdpa_net *ndev)
> >  {
> >  	__be32 *list;
> > -	int max_rqt;
> >  	void *rqtc;
> >  	int inlen;
> >  	void *in;
> >  	int i, j;
> >  	int err;
> > -	int num;
> > -
> > -	if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
> > -		num = 1;
> > -	else
> > -		num = ndev->cur_num_vqs / 2;
> > 
> > -	max_rqt = min_t(int, roundup_pow_of_two(num),
> > -			1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
> > -	if (max_rqt < 1)
> > -		return -EOPNOTSUPP;
> > -
> > -	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
> > +	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
> >  	in = kzalloc(inlen, GFP_KERNEL);
> >  	if (!in)
> >  		return -ENOMEM;
> > @@ -1263,12 +1247,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
> >  	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
> > 
> >  	MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
> > -	MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
> > +	MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size);
> >  	list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
> > -	for (i = 0, j = 0; i < max_rqt; i++, j += 2)
> > -		list[i] = cpu_to_be32(ndev->vqs[j % (2 * num)].virtq_id);
> > +	for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
> > +		list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
> > 
> > -	MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
> > +	MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
> >  	err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
> >  	kfree(in);
> >  	if (err)
> > @@ -1282,19 +1266,13 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
> >  static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
> >  {
> >  	__be32 *list;
> > -	int max_rqt;
> >  	void *rqtc;
> >  	int inlen;
> >  	void *in;
> >  	int i, j;
> >  	int err;
> > 
> > -	max_rqt = min_t(int, roundup_pow_of_two(ndev->cur_num_vqs / 2),
> > -			1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
> > -	if (max_rqt < 1)
> > -		return -EOPNOTSUPP;
> > -
> > -	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
> > +	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
> >  	in = kzalloc(inlen, GFP_KERNEL);
> >  	if (!in)
> >  		return -ENOMEM;
> > @@ -1305,10 +1283,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
> >  	MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
> > 
> >  	list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
> > -	for (i = 0, j = 0; i < max_rqt; i++, j += 2)
> > +	for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
> >  		list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
> > 
> > -	MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
> > +	MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
> >  	err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
> >  	kfree(in);
> >  	if (err)
> > @@ -1625,7 +1603,7 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
> > 
> >  		newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
> >  		if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
> > -		    newqps > mlx5_vdpa_max_qps(mvdev->max_vqs))
> > +		    newqps > ndev->rqt_size)
> >  			break;
> > 
> >  		if (ndev->cur_num_vqs == 2 * newqps) {
> > @@ -1989,7 +1967,7 @@ static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
> >  	int err;
> >  	int i;
> > 
> > -	for (i = 0; i < 2 * mlx5_vdpa_max_qps(mvdev->max_vqs); i++) {
> > +	for (i = 0; i < mvdev->max_vqs; i++) {
> >  		err = setup_vq(ndev, &ndev->vqs[i]);
> >  		if (err)
> >  			goto err_vq;
> > @@ -2060,9 +2038,11 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
> > 
> >  	ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
> >  	if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
> > -		ndev->cur_num_vqs = 2 * mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
> > +		ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
> >  	else
> > -		ndev->cur_num_vqs = 2;
> > +		ndev->rqt_size = 1;
> > +
> > +	ndev->cur_num_vqs = 2 * ndev->rqt_size;
> > 
> >  	update_cvq_info(mvdev);
> >  	return err;
> > @@ -2529,7 +2509,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
> >  	struct mlx5_vdpa_virtqueue *mvq;
> >  	int i;
> > 
> > -	for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
> > +	for (i = 0; i < ndev->mvdev.max_vqs; ++i) {
> >  		mvq = &ndev->vqs[i];
> >  		memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
> >  		mvq->index = i;
> > @@ -2671,7 +2651,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
> >  		return -EOPNOTSUPP;
> >  	}
> > 
> > -	max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
> > +	max_vqs = min_t(int, MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues),
> > +			1 << MLX5_CAP_GEN(mdev, log_max_rqt_size));
> >  	if (max_vqs < 2) {
> >  		dev_warn(mdev->device,
> >  			 "%d virtqueues are supported. At least 2 are required\n",
> > @@ -2742,7 +2723,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
> >  		ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
> >  	}
> > 
> > -	config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, mlx5_vdpa_max_qps(max_vqs));
> > +	config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2);
> >  	mvdev->vdev.dma_dev = &mdev->pdev->dev;
> >  	err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
> >  	if (err)
> > @@ -2769,7 +2750,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
> >  	ndev->nb.notifier_call = event_handler;
> >  	mlx5_notifier_register(mdev, &ndev->nb);
> >  	mvdev->vdev.mdev = &mgtdev->mgtdev;
> > -	err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1);
> > +	err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
> >  	if (err)
> >  		goto err_reg;
> > 
> > --
> > 2.35.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH RESEND V3 3/3] vdpa/mlx5: Use consistent RQT size
@ 2022-05-16  8:22     ` Michael S. Tsirkin
  0 siblings, 0 replies; 5+ messages in thread
From: Michael S. Tsirkin @ 2022-05-16  8:22 UTC (permalink / raw)
  To: Eli Cohen; +Cc: linux-kernel, hdanton, Cindy Lu, virtualization

On Mon, May 16, 2022 at 08:17:18AM +0000, Eli Cohen wrote:
> Hi Michael,
> 
> When are you going to pull this fix?
> It fixes a real problem and was reviewed and acked.

Do I understand it correctly that this is a stand-alone patch?
Sorry, my process have been thrown off by it being labeled 3/3 but not
being part of a thread. Do not do this for single patches please.
And I suspect 0-day machinery didn't process it either.
Can you repost as a stand-along patch please?
I will then process ASAP.

Thanks!

> > -----Original Message-----
> > From: Eli Cohen <elic@nvidia.com>
> > Sent: Wednesday, April 6, 2022 11:53 AM
> > To: mst@redhat.com; jasowang@redhat.com
> > Cc: hdanton@sina.com; virtualization@lists.linux-foundation.org; linux-kernel@vger.kernel.org; Eli Cohen <elic@nvidia.com>
> > Subject: [PATCH RESEND V3 3/3] vdpa/mlx5: Use consistent RQT size
> > 
> > The current code evaluates RQT size based on the configured number of
> > virtqueues. This can raise an issue in the following scenario:
> > 
> > Assume MQ was negotiated.
> > 1. mlx5_vdpa_set_map() gets called.
> > 2. handle_ctrl_mq() is called setting cur_num_vqs to some value, lower
> >    than the configured max VQs.
> > 3. A second set_map gets called, but now a smaller number of VQs is used
> >    to evaluate the size of the RQT.
> > 4. handle_ctrl_mq() is called with a value larger than what the RQT can
> >    hold. This will emit errors and the driver state is compromised.
> > 
> > To fix this, we use a new field in struct mlx5_vdpa_net to hold the
> > required number of entries in the RQT. This value is evaluated in
> > mlx5_vdpa_set_driver_features() where we have the negotiated features
> > all set up.
> > 
> > In addition to that, we take into consideration the max capability of RQT
> > entries early when the device is added so we don't need to take consider
> > it when creating the RQT.
> > 
> > Last, we remove the use of mlx5_vdpa_max_qps() which just returns the
> > max_vas / 2 and make the code clearer.
> > 
> > Fixes: 52893733f2c5 ("vdpa/mlx5: Add multiqueue support")
> > Acked-by: Jason Wang <jasowang@redhat.com>
> > Signed-off-by: Eli Cohen <elic@nvidia.com>
> > ---
> > V2 -> V3:
> > Fix typo in change log
> > Add acked-by Jason
> > 
> >  drivers/vdpa/mlx5/net/mlx5_vnet.c | 61 +++++++++++--------------------
> >  1 file changed, 21 insertions(+), 40 deletions(-)
> > 
> > diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> > index 79001301b383..e0de44000d92 100644
> > --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> > +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> > @@ -161,6 +161,7 @@ struct mlx5_vdpa_net {
> >  	struct mlx5_flow_handle *rx_rule_mcast;
> >  	bool setup;
> >  	u32 cur_num_vqs;
> > +	u32 rqt_size;
> >  	struct notifier_block nb;
> >  	struct vdpa_callback config_cb;
> >  	struct mlx5_vdpa_wq_ent cvq_ent;
> > @@ -204,17 +205,12 @@ static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
> >  	return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
> >  }
> > 
> > -static inline u32 mlx5_vdpa_max_qps(int max_vqs)
> > -{
> > -	return max_vqs / 2;
> > -}
> > -
> >  static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev)
> >  {
> >  	if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
> >  		return 2;
> > 
> > -	return 2 * mlx5_vdpa_max_qps(mvdev->max_vqs);
> > +	return mvdev->max_vqs;
> >  }
> > 
> >  static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx)
> > @@ -1236,25 +1232,13 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
> >  static int create_rqt(struct mlx5_vdpa_net *ndev)
> >  {
> >  	__be32 *list;
> > -	int max_rqt;
> >  	void *rqtc;
> >  	int inlen;
> >  	void *in;
> >  	int i, j;
> >  	int err;
> > -	int num;
> > -
> > -	if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
> > -		num = 1;
> > -	else
> > -		num = ndev->cur_num_vqs / 2;
> > 
> > -	max_rqt = min_t(int, roundup_pow_of_two(num),
> > -			1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
> > -	if (max_rqt < 1)
> > -		return -EOPNOTSUPP;
> > -
> > -	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
> > +	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
> >  	in = kzalloc(inlen, GFP_KERNEL);
> >  	if (!in)
> >  		return -ENOMEM;
> > @@ -1263,12 +1247,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
> >  	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
> > 
> >  	MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
> > -	MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
> > +	MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size);
> >  	list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
> > -	for (i = 0, j = 0; i < max_rqt; i++, j += 2)
> > -		list[i] = cpu_to_be32(ndev->vqs[j % (2 * num)].virtq_id);
> > +	for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
> > +		list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
> > 
> > -	MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
> > +	MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
> >  	err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
> >  	kfree(in);
> >  	if (err)
> > @@ -1282,19 +1266,13 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
> >  static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
> >  {
> >  	__be32 *list;
> > -	int max_rqt;
> >  	void *rqtc;
> >  	int inlen;
> >  	void *in;
> >  	int i, j;
> >  	int err;
> > 
> > -	max_rqt = min_t(int, roundup_pow_of_two(ndev->cur_num_vqs / 2),
> > -			1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
> > -	if (max_rqt < 1)
> > -		return -EOPNOTSUPP;
> > -
> > -	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
> > +	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
> >  	in = kzalloc(inlen, GFP_KERNEL);
> >  	if (!in)
> >  		return -ENOMEM;
> > @@ -1305,10 +1283,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
> >  	MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
> > 
> >  	list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
> > -	for (i = 0, j = 0; i < max_rqt; i++, j += 2)
> > +	for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
> >  		list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
> > 
> > -	MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
> > +	MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
> >  	err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
> >  	kfree(in);
> >  	if (err)
> > @@ -1625,7 +1603,7 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
> > 
> >  		newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
> >  		if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
> > -		    newqps > mlx5_vdpa_max_qps(mvdev->max_vqs))
> > +		    newqps > ndev->rqt_size)
> >  			break;
> > 
> >  		if (ndev->cur_num_vqs == 2 * newqps) {
> > @@ -1989,7 +1967,7 @@ static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
> >  	int err;
> >  	int i;
> > 
> > -	for (i = 0; i < 2 * mlx5_vdpa_max_qps(mvdev->max_vqs); i++) {
> > +	for (i = 0; i < mvdev->max_vqs; i++) {
> >  		err = setup_vq(ndev, &ndev->vqs[i]);
> >  		if (err)
> >  			goto err_vq;
> > @@ -2060,9 +2038,11 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
> > 
> >  	ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
> >  	if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
> > -		ndev->cur_num_vqs = 2 * mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
> > +		ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
> >  	else
> > -		ndev->cur_num_vqs = 2;
> > +		ndev->rqt_size = 1;
> > +
> > +	ndev->cur_num_vqs = 2 * ndev->rqt_size;
> > 
> >  	update_cvq_info(mvdev);
> >  	return err;
> > @@ -2529,7 +2509,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
> >  	struct mlx5_vdpa_virtqueue *mvq;
> >  	int i;
> > 
> > -	for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
> > +	for (i = 0; i < ndev->mvdev.max_vqs; ++i) {
> >  		mvq = &ndev->vqs[i];
> >  		memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
> >  		mvq->index = i;
> > @@ -2671,7 +2651,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
> >  		return -EOPNOTSUPP;
> >  	}
> > 
> > -	max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
> > +	max_vqs = min_t(int, MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues),
> > +			1 << MLX5_CAP_GEN(mdev, log_max_rqt_size));
> >  	if (max_vqs < 2) {
> >  		dev_warn(mdev->device,
> >  			 "%d virtqueues are supported. At least 2 are required\n",
> > @@ -2742,7 +2723,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
> >  		ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
> >  	}
> > 
> > -	config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, mlx5_vdpa_max_qps(max_vqs));
> > +	config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2);
> >  	mvdev->vdev.dma_dev = &mdev->pdev->dev;
> >  	err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
> >  	if (err)
> > @@ -2769,7 +2750,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
> >  	ndev->nb.notifier_call = event_handler;
> >  	mlx5_notifier_register(mdev, &ndev->nb);
> >  	mvdev->vdev.mdev = &mgtdev->mgtdev;
> > -	err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1);
> > +	err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
> >  	if (err)
> >  		goto err_reg;
> > 
> > --
> > 2.35.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH RESEND V3 3/3] vdpa/mlx5: Use consistent RQT size
  2022-05-16  8:22     ` Michael S. Tsirkin
  (?)
@ 2022-05-16  8:39     ` Eli Cohen
  -1 siblings, 0 replies; 5+ messages in thread
From: Eli Cohen @ 2022-05-16  8:39 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: jasowang, Cindy Lu, hdanton, virtualization, linux-kernel

> From: Michael S. Tsirkin <mst@redhat.com>
> Sent: Monday, May 16, 2022 11:23 AM
> To: Eli Cohen <elic@nvidia.com>
> Cc: jasowang@redhat.com; Cindy Lu <lulu@redhat.com>; hdanton@sina.com; virtualization@lists.linux-foundation.org; linux-
> kernel@vger.kernel.org
> Subject: Re: [PATCH RESEND V3 3/3] vdpa/mlx5: Use consistent RQT size
> 
> On Mon, May 16, 2022 at 08:17:18AM +0000, Eli Cohen wrote:
> > Hi Michael,
> >
> > When are you going to pull this fix?
> > It fixes a real problem and was reviewed and acked.
> 
> Do I understand it correctly that this is a stand-alone patch?
> Sorry, my process have been thrown off by it being labeled 3/3 but not
> being part of a thread. Do not do this for single patches please.
> And I suspect 0-day machinery didn't process it either.
> Can you repost as a stand-along patch please?
> I will then process ASAP.

Sure.

> 
> Thanks!
> 
> > > -----Original Message-----
> > > From: Eli Cohen <elic@nvidia.com>
> > > Sent: Wednesday, April 6, 2022 11:53 AM
> > > To: mst@redhat.com; jasowang@redhat.com
> > > Cc: hdanton@sina.com; virtualization@lists.linux-foundation.org; linux-kernel@vger.kernel.org; Eli Cohen <elic@nvidia.com>
> > > Subject: [PATCH RESEND V3 3/3] vdpa/mlx5: Use consistent RQT size
> > >
> > > The current code evaluates RQT size based on the configured number of
> > > virtqueues. This can raise an issue in the following scenario:
> > >
> > > Assume MQ was negotiated.
> > > 1. mlx5_vdpa_set_map() gets called.
> > > 2. handle_ctrl_mq() is called setting cur_num_vqs to some value, lower
> > >    than the configured max VQs.
> > > 3. A second set_map gets called, but now a smaller number of VQs is used
> > >    to evaluate the size of the RQT.
> > > 4. handle_ctrl_mq() is called with a value larger than what the RQT can
> > >    hold. This will emit errors and the driver state is compromised.
> > >
> > > To fix this, we use a new field in struct mlx5_vdpa_net to hold the
> > > required number of entries in the RQT. This value is evaluated in
> > > mlx5_vdpa_set_driver_features() where we have the negotiated features
> > > all set up.
> > >
> > > In addition to that, we take into consideration the max capability of RQT
> > > entries early when the device is added so we don't need to take consider
> > > it when creating the RQT.
> > >
> > > Last, we remove the use of mlx5_vdpa_max_qps() which just returns the
> > > max_vas / 2 and make the code clearer.
> > >
> > > Fixes: 52893733f2c5 ("vdpa/mlx5: Add multiqueue support")
> > > Acked-by: Jason Wang <jasowang@redhat.com>
> > > Signed-off-by: Eli Cohen <elic@nvidia.com>
> > > ---
> > > V2 -> V3:
> > > Fix typo in change log
> > > Add acked-by Jason
> > >
> > >  drivers/vdpa/mlx5/net/mlx5_vnet.c | 61 +++++++++++--------------------
> > >  1 file changed, 21 insertions(+), 40 deletions(-)
> > >
> > > diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> > > index 79001301b383..e0de44000d92 100644
> > > --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
> > > +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
> > > @@ -161,6 +161,7 @@ struct mlx5_vdpa_net {
> > >  	struct mlx5_flow_handle *rx_rule_mcast;
> > >  	bool setup;
> > >  	u32 cur_num_vqs;
> > > +	u32 rqt_size;
> > >  	struct notifier_block nb;
> > >  	struct vdpa_callback config_cb;
> > >  	struct mlx5_vdpa_wq_ent cvq_ent;
> > > @@ -204,17 +205,12 @@ static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
> > >  	return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
> > >  }
> > >
> > > -static inline u32 mlx5_vdpa_max_qps(int max_vqs)
> > > -{
> > > -	return max_vqs / 2;
> > > -}
> > > -
> > >  static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev)
> > >  {
> > >  	if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
> > >  		return 2;
> > >
> > > -	return 2 * mlx5_vdpa_max_qps(mvdev->max_vqs);
> > > +	return mvdev->max_vqs;
> > >  }
> > >
> > >  static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx)
> > > @@ -1236,25 +1232,13 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
> > >  static int create_rqt(struct mlx5_vdpa_net *ndev)
> > >  {
> > >  	__be32 *list;
> > > -	int max_rqt;
> > >  	void *rqtc;
> > >  	int inlen;
> > >  	void *in;
> > >  	int i, j;
> > >  	int err;
> > > -	int num;
> > > -
> > > -	if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
> > > -		num = 1;
> > > -	else
> > > -		num = ndev->cur_num_vqs / 2;
> > >
> > > -	max_rqt = min_t(int, roundup_pow_of_two(num),
> > > -			1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
> > > -	if (max_rqt < 1)
> > > -		return -EOPNOTSUPP;
> > > -
> > > -	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
> > > +	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
> > >  	in = kzalloc(inlen, GFP_KERNEL);
> > >  	if (!in)
> > >  		return -ENOMEM;
> > > @@ -1263,12 +1247,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
> > >  	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
> > >
> > >  	MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
> > > -	MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
> > > +	MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size);
> > >  	list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
> > > -	for (i = 0, j = 0; i < max_rqt; i++, j += 2)
> > > -		list[i] = cpu_to_be32(ndev->vqs[j % (2 * num)].virtq_id);
> > > +	for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
> > > +		list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
> > >
> > > -	MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
> > > +	MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
> > >  	err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
> > >  	kfree(in);
> > >  	if (err)
> > > @@ -1282,19 +1266,13 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
> > >  static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
> > >  {
> > >  	__be32 *list;
> > > -	int max_rqt;
> > >  	void *rqtc;
> > >  	int inlen;
> > >  	void *in;
> > >  	int i, j;
> > >  	int err;
> > >
> > > -	max_rqt = min_t(int, roundup_pow_of_two(ndev->cur_num_vqs / 2),
> > > -			1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
> > > -	if (max_rqt < 1)
> > > -		return -EOPNOTSUPP;
> > > -
> > > -	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
> > > +	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
> > >  	in = kzalloc(inlen, GFP_KERNEL);
> > >  	if (!in)
> > >  		return -ENOMEM;
> > > @@ -1305,10 +1283,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
> > >  	MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
> > >
> > >  	list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
> > > -	for (i = 0, j = 0; i < max_rqt; i++, j += 2)
> > > +	for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
> > >  		list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
> > >
> > > -	MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
> > > +	MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
> > >  	err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
> > >  	kfree(in);
> > >  	if (err)
> > > @@ -1625,7 +1603,7 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
> > >
> > >  		newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
> > >  		if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
> > > -		    newqps > mlx5_vdpa_max_qps(mvdev->max_vqs))
> > > +		    newqps > ndev->rqt_size)
> > >  			break;
> > >
> > >  		if (ndev->cur_num_vqs == 2 * newqps) {
> > > @@ -1989,7 +1967,7 @@ static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
> > >  	int err;
> > >  	int i;
> > >
> > > -	for (i = 0; i < 2 * mlx5_vdpa_max_qps(mvdev->max_vqs); i++) {
> > > +	for (i = 0; i < mvdev->max_vqs; i++) {
> > >  		err = setup_vq(ndev, &ndev->vqs[i]);
> > >  		if (err)
> > >  			goto err_vq;
> > > @@ -2060,9 +2038,11 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
> > >
> > >  	ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
> > >  	if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
> > > -		ndev->cur_num_vqs = 2 * mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
> > > +		ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
> > >  	else
> > > -		ndev->cur_num_vqs = 2;
> > > +		ndev->rqt_size = 1;
> > > +
> > > +	ndev->cur_num_vqs = 2 * ndev->rqt_size;
> > >
> > >  	update_cvq_info(mvdev);
> > >  	return err;
> > > @@ -2529,7 +2509,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
> > >  	struct mlx5_vdpa_virtqueue *mvq;
> > >  	int i;
> > >
> > > -	for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
> > > +	for (i = 0; i < ndev->mvdev.max_vqs; ++i) {
> > >  		mvq = &ndev->vqs[i];
> > >  		memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
> > >  		mvq->index = i;
> > > @@ -2671,7 +2651,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
> > >  		return -EOPNOTSUPP;
> > >  	}
> > >
> > > -	max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
> > > +	max_vqs = min_t(int, MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues),
> > > +			1 << MLX5_CAP_GEN(mdev, log_max_rqt_size));
> > >  	if (max_vqs < 2) {
> > >  		dev_warn(mdev->device,
> > >  			 "%d virtqueues are supported. At least 2 are required\n",
> > > @@ -2742,7 +2723,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
> > >  		ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
> > >  	}
> > >
> > > -	config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, mlx5_vdpa_max_qps(max_vqs));
> > > +	config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2);
> > >  	mvdev->vdev.dma_dev = &mdev->pdev->dev;
> > >  	err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
> > >  	if (err)
> > > @@ -2769,7 +2750,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
> > >  	ndev->nb.notifier_call = event_handler;
> > >  	mlx5_notifier_register(mdev, &ndev->nb);
> > >  	mvdev->vdev.mdev = &mgtdev->mgtdev;
> > > -	err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1);
> > > +	err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
> > >  	if (err)
> > >  		goto err_reg;
> > >
> > > --
> > > 2.35.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-05-16  8:39 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-06  8:53 [PATCH RESEND V3 3/3] vdpa/mlx5: Use consistent RQT size Eli Cohen
2022-05-16  8:17 ` Eli Cohen
2022-05-16  8:22   ` Michael S. Tsirkin
2022-05-16  8:22     ` Michael S. Tsirkin
2022-05-16  8:39     ` Eli Cohen

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.