All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next v2 0/6] Remove RTNL lock protection of CVQ
@ 2024-03-28  4:47 Daniel Jurgens
  2024-03-28  4:47 ` [PATCH net-next v2 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
                   ` (5 more replies)
  0 siblings, 6 replies; 10+ messages in thread
From: Daniel Jurgens @ 2024-03-28  4:47 UTC (permalink / raw)
  To: netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri, Daniel Jurgens

Currently the buffer used for control VQ commands is protected by the
RTNL lock. Previously this wasn't a major concern because the control
VQ was only used during device setup and user interaction. With the
recent addition of dynamic interrupt moderation the control VQ may be
used frequently during normal operation.

This series removes the RNTL lock dependancy by introducing a spin lock
to protect the control buffer and writing SGs to the control VQ.

v2:
	- New patch to only process the provided queue in
	  virtnet_dim_work
	- New patch to lock per queue rx coalescing structure.

Daniel Jurgens (6):
  virtio_net: Store RSS setting in virtnet_info
  virtio_net: Remove command data from control_buf
  virtio_net: Add a lock for the command VQ.
  virtio_net: Do DIM update for specified queue only
  virtio_net: Add a lock for per queue RX coalesce
  virtio_net: Remove rtnl lock protection of command buffers

 drivers/net/virtio_net.c | 243 +++++++++++++++++++++------------------
 1 file changed, 134 insertions(+), 109 deletions(-)

-- 
2.42.0


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH net-next v2 1/6] virtio_net: Store RSS setting in virtnet_info
  2024-03-28  4:47 [PATCH net-next v2 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
@ 2024-03-28  4:47 ` Daniel Jurgens
  2024-03-28  4:47 ` [PATCH net-next v2 2/6] virtio_net: Remove command data from control_buf Daniel Jurgens
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 10+ messages in thread
From: Daniel Jurgens @ 2024-03-28  4:47 UTC (permalink / raw)
  To: netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri, Daniel Jurgens

Stop storing RSS setting in the control buffer. This is prep work for
removing RTNL lock protection of the control buffer.

Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
 drivers/net/virtio_net.c | 40 ++++++++++++++++++++--------------------
 1 file changed, 20 insertions(+), 20 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c22d1118a133..44525e9b09c5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -245,7 +245,6 @@ struct control_buf {
 	u8 allmulti;
 	__virtio16 vid;
 	__virtio64 offloads;
-	struct virtio_net_ctrl_rss rss;
 	struct virtio_net_ctrl_coal_tx coal_tx;
 	struct virtio_net_ctrl_coal_rx coal_rx;
 	struct virtio_net_ctrl_coal_vq coal_vq;
@@ -287,6 +286,7 @@ struct virtnet_info {
 	u16 rss_indir_table_size;
 	u32 rss_hash_types_supported;
 	u32 rss_hash_types_saved;
+	struct virtio_net_ctrl_rss rss;
 
 	/* Has control virtqueue */
 	bool has_cvq;
@@ -3087,17 +3087,17 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
 	sg_init_table(sgs, 4);
 
 	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
-	sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
+	sg_set_buf(&sgs[0], &vi->rss, sg_buf_size);
 
-	sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
-	sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
+	sg_buf_size = sizeof(uint16_t) * (vi->rss.indirection_table_mask + 1);
+	sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
 
 	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
 			- offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
-	sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
+	sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size);
 
 	sg_buf_size = vi->rss_key_size;
-	sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
+	sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size);
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
 				  vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
@@ -3113,21 +3113,21 @@ static void virtnet_init_default_rss(struct virtnet_info *vi)
 	u32 indir_val = 0;
 	int i = 0;
 
-	vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
+	vi->rss.hash_types = vi->rss_hash_types_supported;
 	vi->rss_hash_types_saved = vi->rss_hash_types_supported;
-	vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
+	vi->rss.indirection_table_mask = vi->rss_indir_table_size
 						? vi->rss_indir_table_size - 1 : 0;
-	vi->ctrl->rss.unclassified_queue = 0;
+	vi->rss.unclassified_queue = 0;
 
 	for (; i < vi->rss_indir_table_size; ++i) {
 		indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
-		vi->ctrl->rss.indirection_table[i] = indir_val;
+		vi->rss.indirection_table[i] = indir_val;
 	}
 
-	vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
-	vi->ctrl->rss.hash_key_length = vi->rss_key_size;
+	vi->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
+	vi->rss.hash_key_length = vi->rss_key_size;
 
-	netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
+	netdev_rss_key_fill(vi->rss.key, vi->rss_key_size);
 }
 
 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
@@ -3238,7 +3238,7 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *
 
 	if (new_hashtypes != vi->rss_hash_types_saved) {
 		vi->rss_hash_types_saved = new_hashtypes;
-		vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+		vi->rss.hash_types = vi->rss_hash_types_saved;
 		if (vi->dev->features & NETIF_F_RXHASH)
 			return virtnet_commit_rss_command(vi);
 	}
@@ -3791,11 +3791,11 @@ static int virtnet_get_rxfh(struct net_device *dev,
 
 	if (rxfh->indir) {
 		for (i = 0; i < vi->rss_indir_table_size; ++i)
-			rxfh->indir[i] = vi->ctrl->rss.indirection_table[i];
+			rxfh->indir[i] = vi->rss.indirection_table[i];
 	}
 
 	if (rxfh->key)
-		memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size);
+		memcpy(rxfh->key, vi->rss.key, vi->rss_key_size);
 
 	rxfh->hfunc = ETH_RSS_HASH_TOP;
 
@@ -3815,10 +3815,10 @@ static int virtnet_set_rxfh(struct net_device *dev,
 
 	if (rxfh->indir) {
 		for (i = 0; i < vi->rss_indir_table_size; ++i)
-			vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
+			vi->rss.indirection_table[i] = rxfh->indir[i];
 	}
 	if (rxfh->key)
-		memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
+		memcpy(vi->rss.key, rxfh->key, vi->rss_key_size);
 
 	virtnet_commit_rss_command(vi);
 
@@ -4140,9 +4140,9 @@ static int virtnet_set_features(struct net_device *dev,
 
 	if ((dev->features ^ features) & NETIF_F_RXHASH) {
 		if (features & NETIF_F_RXHASH)
-			vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+			vi->rss.hash_types = vi->rss_hash_types_saved;
 		else
-			vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
+			vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
 
 		if (!virtnet_commit_rss_command(vi))
 			return -EINVAL;
-- 
2.42.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH net-next v2 2/6] virtio_net: Remove command data from control_buf
  2024-03-28  4:47 [PATCH net-next v2 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
  2024-03-28  4:47 ` [PATCH net-next v2 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
@ 2024-03-28  4:47 ` Daniel Jurgens
  2024-03-28 21:58   ` kernel test robot
  2024-03-28  4:47 ` [PATCH net-next v2 3/6] virtio_net: Add a lock for the command VQ Daniel Jurgens
                   ` (3 subsequent siblings)
  5 siblings, 1 reply; 10+ messages in thread
From: Daniel Jurgens @ 2024-03-28  4:47 UTC (permalink / raw)
  To: netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri, Daniel Jurgens

Allocate memory for the data when it's used. Ideally the could be on the
stack, but we can't DMA stack memory. With this change only the header
and status memory are shared between commands, which will allow using a
tighter lock than RTNL.

Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
 drivers/net/virtio_net.c | 111 ++++++++++++++++++++++++++-------------
 1 file changed, 75 insertions(+), 36 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 44525e9b09c5..ff93d18992e4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -240,14 +240,6 @@ struct virtio_net_ctrl_rss {
 struct control_buf {
 	struct virtio_net_ctrl_hdr hdr;
 	virtio_net_ctrl_ack status;
-	struct virtio_net_ctrl_mq mq;
-	u8 promisc;
-	u8 allmulti;
-	__virtio16 vid;
-	__virtio64 offloads;
-	struct virtio_net_ctrl_coal_tx coal_tx;
-	struct virtio_net_ctrl_coal_rx coal_rx;
-	struct virtio_net_ctrl_coal_vq coal_vq;
 };
 
 struct virtnet_info {
@@ -2672,14 +2664,19 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
 
 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 {
+	struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
 	struct scatterlist sg;
 	struct net_device *dev = vi->dev;
 
 	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
 		return 0;
 
-	vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
-	sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
+	mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+	if (!mq)
+		return -ENOMEM;
+
+	mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
+	sg_init_one(&sg, mq, sizeof(*mq));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
 				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
@@ -2708,6 +2705,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 
 static int virtnet_close(struct net_device *dev)
 {
+	u8 *promisc_allmulti  __free(kfree) = NULL;
 	struct virtnet_info *vi = netdev_priv(dev);
 	int i;
 
@@ -2732,6 +2730,7 @@ static void virtnet_rx_mode_work(struct work_struct *work)
 	struct scatterlist sg[2];
 	struct virtio_net_ctrl_mac *mac_data;
 	struct netdev_hw_addr *ha;
+	u8 *promisc_allmulti;
 	int uc_count;
 	int mc_count;
 	void *buf;
@@ -2743,22 +2742,27 @@ static void virtnet_rx_mode_work(struct work_struct *work)
 
 	rtnl_lock();
 
-	vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
-	vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
+	promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_ATOMIC);
+	if (!promisc_allmulti) {
+		dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n");
+		return;
+	}
 
-	sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
+	*promisc_allmulti = !!(dev->flags & IFF_PROMISC);
+	sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
 				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
 		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
-			 vi->ctrl->promisc ? "en" : "dis");
+			 *promisc_allmulti ? "en" : "dis");
 
-	sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
+	*promisc_allmulti = !!(dev->flags & IFF_ALLMULTI);
+	sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
 				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
 		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
-			 vi->ctrl->allmulti ? "en" : "dis");
+			 *promisc_allmulti ? "en" : "dis");
 
 	netif_addr_lock_bh(dev);
 
@@ -2819,10 +2823,15 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
 				   __be16 proto, u16 vid)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
+	__virtio16 *_vid __free(kfree) = NULL;
 	struct scatterlist sg;
 
-	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
-	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
+	_vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
+	if (!_vid)
+		return -ENOMEM;
+
+	*_vid = cpu_to_virtio16(vi->vdev, vid);
+	sg_init_one(&sg, _vid, sizeof(*_vid));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
 				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
@@ -2834,10 +2843,15 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
 				    __be16 proto, u16 vid)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
+	__virtio16 *_vid __free(kfree) = NULL;
 	struct scatterlist sg;
 
-	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
-	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
+	_vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
+	if (!_vid)
+		return -ENOMEM;
+
+	*_vid = cpu_to_virtio16(vi->vdev, vid);
+	sg_init_one(&sg, _vid, sizeof(*_vid));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
 				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
@@ -2950,12 +2964,17 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
 					 u16 vqn, u32 max_usecs, u32 max_packets)
 {
+	struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL;
 	struct scatterlist sgs;
 
-	vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
-	vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
-	vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
-	sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
+	coal_vq = kzalloc(sizeof(*coal_vq), GFP_KERNEL);
+	if (!coal_vq)
+		return -ENOMEM;
+
+	coal_vq->vqn = cpu_to_le16(vqn);
+	coal_vq->coal.max_usecs = cpu_to_le32(max_usecs);
+	coal_vq->coal.max_packets = cpu_to_le32(max_packets);
+	sg_init_one(&sgs, coal_vq, sizeof(*coal_vq));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
 				  VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
@@ -3101,11 +3120,15 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
 				  vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
-				  : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
-		dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
-		return false;
-	}
+				  : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs))
+		goto err;
+
 	return true;
+
+err:
+	dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
+	return false;
+
 }
 
 static void virtnet_init_default_rss(struct virtnet_info *vi)
@@ -3410,12 +3433,17 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
 static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
 					  struct ethtool_coalesce *ec)
 {
+	struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL;
 	struct scatterlist sgs_tx;
 	int i;
 
-	vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
-	vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
-	sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
+	coal_tx = kzalloc(sizeof(*coal_tx), GFP_KERNEL);
+	if (!coal_tx)
+		return -ENOMEM;
+
+	coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+	coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+	sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
 				  VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
@@ -3435,6 +3463,7 @@ static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
 static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 					  struct ethtool_coalesce *ec)
 {
+	struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
 	bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
 	struct scatterlist sgs_rx;
 	int i;
@@ -3453,6 +3482,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 		return 0;
 	}
 
+	coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
+	if (!coal_rx)
+		return -ENOMEM;
+
 	if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
 		vi->rx_dim_enabled = false;
 		for (i = 0; i < vi->max_queue_pairs; i++)
@@ -3463,9 +3496,9 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 	 * we need apply the global new params even if they
 	 * are not updated.
 	 */
-	vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
-	vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
-	sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
+	coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+	coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+	sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
 				  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
@@ -3935,10 +3968,16 @@ static int virtnet_restore_up(struct virtio_device *vdev)
 
 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
 {
+	u64 *_offloads __free(kfree) = NULL;
 	struct scatterlist sg;
-	vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
 
-	sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
+	_offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL);
+	if (!_offloads)
+		return -ENOMEM;
+
+	*_offloads = cpu_to_virtio64(vi->vdev, offloads);
+
+	sg_init_one(&sg, _offloads, sizeof(*_offloads));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
 				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
-- 
2.42.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH net-next v2 3/6] virtio_net: Add a lock for the command VQ.
  2024-03-28  4:47 [PATCH net-next v2 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
  2024-03-28  4:47 ` [PATCH net-next v2 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
  2024-03-28  4:47 ` [PATCH net-next v2 2/6] virtio_net: Remove command data from control_buf Daniel Jurgens
@ 2024-03-28  4:47 ` Daniel Jurgens
  2024-03-28  4:47 ` [PATCH net-next v2 4/6] virtio_net: Do DIM update for specified queue only Daniel Jurgens
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 10+ messages in thread
From: Daniel Jurgens @ 2024-03-28  4:47 UTC (permalink / raw)
  To: netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri, Daniel Jurgens

The command VQ will no longer be protected by the RTNL lock. Use a
spinlock to protect the control buffer header and the VQ.

Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
 drivers/net/virtio_net.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ff93d18992e4..b9298544b1b5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -282,6 +282,7 @@ struct virtnet_info {
 
 	/* Has control virtqueue */
 	bool has_cvq;
+	spinlock_t cvq_lock;
 
 	/* Host can handle any s/g split between our header and packet data */
 	bool any_header_sg;
@@ -2529,6 +2530,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
 	/* Caller should know better */
 	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
 
+	guard(spinlock)(&vi->cvq_lock);
 	vi->ctrl->status = ~0;
 	vi->ctrl->hdr.class = class;
 	vi->ctrl->hdr.cmd = cmd;
@@ -4800,8 +4802,10 @@ static int virtnet_probe(struct virtio_device *vdev)
 	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
 		vi->any_header_sg = true;
 
-	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
+	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
 		vi->has_cvq = true;
+		spin_lock_init(&vi->cvq_lock);
+	}
 
 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
 		mtu = virtio_cread16(vdev,
-- 
2.42.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH net-next v2 4/6] virtio_net: Do DIM update for specified queue only
  2024-03-28  4:47 [PATCH net-next v2 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
                   ` (2 preceding siblings ...)
  2024-03-28  4:47 ` [PATCH net-next v2 3/6] virtio_net: Add a lock for the command VQ Daniel Jurgens
@ 2024-03-28  4:47 ` Daniel Jurgens
  2024-03-28  4:57   ` Heng Qi
  2024-03-28  4:47 ` [PATCH net-next v2 5/6] virtio_net: Add a lock for per queue RX coalesce Daniel Jurgens
  2024-03-28  4:47 ` [PATCH net-next v2 6/6] virtio_net: Remove rtnl lock protection of command buffers Daniel Jurgens
  5 siblings, 1 reply; 10+ messages in thread
From: Daniel Jurgens @ 2024-03-28  4:47 UTC (permalink / raw)
  To: netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri, Daniel Jurgens

Since we no longer have to hold the RTNL lock here just do updates for
the specified queue.

Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
---
 drivers/net/virtio_net.c | 38 ++++++++++++++------------------------
 1 file changed, 14 insertions(+), 24 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b9298544b1b5..9c4bfb1eb15c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3596,36 +3596,26 @@ static void virtnet_rx_dim_work(struct work_struct *work)
 	struct virtnet_info *vi = rq->vq->vdev->priv;
 	struct net_device *dev = vi->dev;
 	struct dim_cq_moder update_moder;
-	int i, qnum, err;
+	int qnum, err;
 
 	if (!rtnl_trylock())
 		return;
 
-	/* Each rxq's work is queued by "net_dim()->schedule_work()"
-	 * in response to NAPI traffic changes. Note that dim->profile_ix
-	 * for each rxq is updated prior to the queuing action.
-	 * So we only need to traverse and update profiles for all rxqs
-	 * in the work which is holding rtnl_lock.
-	 */
-	for (i = 0; i < vi->curr_queue_pairs; i++) {
-		rq = &vi->rq[i];
-		dim = &rq->dim;
-		qnum = rq - vi->rq;
+	qnum = rq - vi->rq;
 
-		if (!rq->dim_enabled)
-			continue;
+	if (!rq->dim_enabled)
+		continue;
 
-		update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
-		if (update_moder.usec != rq->intr_coal.max_usecs ||
-		    update_moder.pkts != rq->intr_coal.max_packets) {
-			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
-							       update_moder.usec,
-							       update_moder.pkts);
-			if (err)
-				pr_debug("%s: Failed to send dim parameters on rxq%d\n",
-					 dev->name, qnum);
-			dim->state = DIM_START_MEASURE;
-		}
+	update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+	if (update_moder.usec != rq->intr_coal.max_usecs ||
+	    update_moder.pkts != rq->intr_coal.max_packets) {
+		err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
+						       update_moder.usec,
+						       update_moder.pkts);
+		if (err)
+			pr_debug("%s: Failed to send dim parameters on rxq%d\n",
+				 dev->name, qnum);
+		dim->state = DIM_START_MEASURE;
 	}
 
 	rtnl_unlock();
-- 
2.42.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH net-next v2 5/6] virtio_net: Add a lock for per queue RX coalesce
  2024-03-28  4:47 [PATCH net-next v2 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
                   ` (3 preceding siblings ...)
  2024-03-28  4:47 ` [PATCH net-next v2 4/6] virtio_net: Do DIM update for specified queue only Daniel Jurgens
@ 2024-03-28  4:47 ` Daniel Jurgens
  2024-03-28  4:47 ` [PATCH net-next v2 6/6] virtio_net: Remove rtnl lock protection of command buffers Daniel Jurgens
  5 siblings, 0 replies; 10+ messages in thread
From: Daniel Jurgens @ 2024-03-28  4:47 UTC (permalink / raw)
  To: netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri, Daniel Jurgens

Once the RTNL locking around the control buffer is removed there can be
contention on the per queue RX interrupt coalescing data. Use a spin
lock per queue.

Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
---
 drivers/net/virtio_net.c | 27 ++++++++++++++++++---------
 1 file changed, 18 insertions(+), 9 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9c4bfb1eb15c..859d767411f8 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -190,6 +190,7 @@ struct receive_queue {
 	u32 packets_in_napi;
 
 	struct virtnet_interrupt_coalesce intr_coal;
+	spinlock_t intr_coal_lock;
 
 	/* Chain pages by the private ptr. */
 	struct page *pages;
@@ -3087,11 +3088,13 @@ static int virtnet_set_ringparam(struct net_device *dev,
 				return err;
 
 			/* The reason is same as the transmit virtqueue reset */
-			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
-							       vi->intr_coal_rx.max_usecs,
-							       vi->intr_coal_rx.max_packets);
-			if (err)
-				return err;
+			scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
+				err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
+								       vi->intr_coal_rx.max_usecs,
+								       vi->intr_coal_rx.max_packets);
+				if (err)
+					return err;
+			}
 		}
 	}
 
@@ -3510,8 +3513,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 	vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
 	vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
 	for (i = 0; i < vi->max_queue_pairs; i++) {
-		vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
-		vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
+		scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
+			vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
+			vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
+		}
 	}
 
 	return 0;
@@ -3542,6 +3547,7 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
 	u32 max_usecs, max_packets;
 	int err;
 
+	guard(spinlock)(&vi->rq[queue].intr_coal_lock);
 	max_usecs = vi->rq[queue].intr_coal.max_usecs;
 	max_packets = vi->rq[queue].intr_coal.max_packets;
 
@@ -3604,8 +3610,9 @@ static void virtnet_rx_dim_work(struct work_struct *work)
 	qnum = rq - vi->rq;
 
 	if (!rq->dim_enabled)
-		continue;
+		goto out;
 
+	guard(spinlock)(&rq->intr_coal_lock);
 	update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
 	if (update_moder.usec != rq->intr_coal.max_usecs ||
 	    update_moder.pkts != rq->intr_coal.max_packets) {
@@ -3617,7 +3624,7 @@ static void virtnet_rx_dim_work(struct work_struct *work)
 				 dev->name, qnum);
 		dim->state = DIM_START_MEASURE;
 	}
-
+out:
 	rtnl_unlock();
 }
 
@@ -3756,6 +3763,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
 		return -EINVAL;
 
 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
+		guard(spinlock)(&vi->rq[queue].intr_coal_lock);
 		ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
 		ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
 		ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
@@ -4485,6 +4493,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
 
 		u64_stats_init(&vi->rq[i].stats.syncp);
 		u64_stats_init(&vi->sq[i].stats.syncp);
+		spin_lock_init(&vi->rq[i].intr_coal_lock);
 	}
 
 	return 0;
-- 
2.42.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH net-next v2 6/6] virtio_net: Remove rtnl lock protection of command buffers
  2024-03-28  4:47 [PATCH net-next v2 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
                   ` (4 preceding siblings ...)
  2024-03-28  4:47 ` [PATCH net-next v2 5/6] virtio_net: Add a lock for per queue RX coalesce Daniel Jurgens
@ 2024-03-28  4:47 ` Daniel Jurgens
  5 siblings, 0 replies; 10+ messages in thread
From: Daniel Jurgens @ 2024-03-28  4:47 UTC (permalink / raw)
  To: netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri, Daniel Jurgens

The rtnl lock is no longer needed to protect the control buffer and
command VQ.

Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
 drivers/net/virtio_net.c | 27 +++++----------------------
 1 file changed, 5 insertions(+), 22 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 859d767411f8..351d9107f472 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2658,14 +2658,12 @@ static void virtnet_stats(struct net_device *dev,
 
 static void virtnet_ack_link_announce(struct virtnet_info *vi)
 {
-	rtnl_lock();
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
 				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
 		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
-	rtnl_unlock();
 }
 
-static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
+static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 {
 	struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
 	struct scatterlist sg;
@@ -2696,16 +2694,6 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 	return 0;
 }
 
-static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
-{
-	int err;
-
-	rtnl_lock();
-	err = _virtnet_set_queues(vi, queue_pairs);
-	rtnl_unlock();
-	return err;
-}
-
 static int virtnet_close(struct net_device *dev)
 {
 	u8 *promisc_allmulti  __free(kfree) = NULL;
@@ -3311,7 +3299,7 @@ static int virtnet_set_channels(struct net_device *dev,
 		return -EINVAL;
 
 	cpus_read_lock();
-	err = _virtnet_set_queues(vi, queue_pairs);
+	err = virtnet_set_queues(vi, queue_pairs);
 	if (err) {
 		cpus_read_unlock();
 		goto err;
@@ -3604,13 +3592,10 @@ static void virtnet_rx_dim_work(struct work_struct *work)
 	struct dim_cq_moder update_moder;
 	int qnum, err;
 
-	if (!rtnl_trylock())
-		return;
-
 	qnum = rq - vi->rq;
 
 	if (!rq->dim_enabled)
-		goto out;
+		return;
 
 	guard(spinlock)(&rq->intr_coal_lock);
 	update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
@@ -3624,8 +3609,6 @@ static void virtnet_rx_dim_work(struct work_struct *work)
 				 dev->name, qnum);
 		dim->state = DIM_START_MEASURE;
 	}
-out:
-	rtnl_unlock();
 }
 
 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
@@ -4077,7 +4060,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
 		synchronize_net();
 	}
 
-	err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
+	err = virtnet_set_queues(vi, curr_qp + xdp_qp);
 	if (err)
 		goto err;
 	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
@@ -4897,7 +4880,7 @@ static int virtnet_probe(struct virtio_device *vdev)
 
 	virtio_device_ready(vdev);
 
-	_virtnet_set_queues(vi, vi->curr_queue_pairs);
+	virtnet_set_queues(vi, vi->curr_queue_pairs);
 
 	/* a random MAC address has been assigned, notify the device.
 	 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
-- 
2.42.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH net-next v2 4/6] virtio_net: Do DIM update for specified queue only
  2024-03-28  4:47 ` [PATCH net-next v2 4/6] virtio_net: Do DIM update for specified queue only Daniel Jurgens
@ 2024-03-28  4:57   ` Heng Qi
  2024-03-28  5:12     ` Dan Jurgens
  0 siblings, 1 reply; 10+ messages in thread
From: Heng Qi @ 2024-03-28  4:57 UTC (permalink / raw)
  To: Daniel Jurgens, netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri



在 2024/3/28 下午12:47, Daniel Jurgens 写道:
> Since we no longer have to hold the RTNL lock here just do updates for
> the specified queue.
>
> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> ---
>   drivers/net/virtio_net.c | 38 ++++++++++++++------------------------
>   1 file changed, 14 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index b9298544b1b5..9c4bfb1eb15c 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -3596,36 +3596,26 @@ static void virtnet_rx_dim_work(struct work_struct *work)
>   	struct virtnet_info *vi = rq->vq->vdev->priv;
>   	struct net_device *dev = vi->dev;
>   	struct dim_cq_moder update_moder;
> -	int i, qnum, err;
> +	int qnum, err;
>   
>   	if (!rtnl_trylock())
>   		return;
>   
> -	/* Each rxq's work is queued by "net_dim()->schedule_work()"
> -	 * in response to NAPI traffic changes. Note that dim->profile_ix
> -	 * for each rxq is updated prior to the queuing action.
> -	 * So we only need to traverse and update profiles for all rxqs
> -	 * in the work which is holding rtnl_lock.
> -	 */
> -	for (i = 0; i < vi->curr_queue_pairs; i++) {
> -		rq = &vi->rq[i];
> -		dim = &rq->dim;
> -		qnum = rq - vi->rq;
> +	qnum = rq - vi->rq;
>   
> -		if (!rq->dim_enabled)
> -			continue;
> +	if (!rq->dim_enabled)
> +		continue;

?

continue what?

For the lock code, please pass the test. It's important.

Regards,
Heng

>   
> -		update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
> -		if (update_moder.usec != rq->intr_coal.max_usecs ||
> -		    update_moder.pkts != rq->intr_coal.max_packets) {
> -			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
> -							       update_moder.usec,
> -							       update_moder.pkts);
> -			if (err)
> -				pr_debug("%s: Failed to send dim parameters on rxq%d\n",
> -					 dev->name, qnum);
> -			dim->state = DIM_START_MEASURE;
> -		}
> +	update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
> +	if (update_moder.usec != rq->intr_coal.max_usecs ||
> +	    update_moder.pkts != rq->intr_coal.max_packets) {
> +		err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
> +						       update_moder.usec,
> +						       update_moder.pkts);
> +		if (err)
> +			pr_debug("%s: Failed to send dim parameters on rxq%d\n",
> +				 dev->name, qnum);
> +		dim->state = DIM_START_MEASURE;
>   	}
>   
>   	rtnl_unlock();


^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH net-next v2 4/6] virtio_net: Do DIM update for specified queue only
  2024-03-28  4:57   ` Heng Qi
@ 2024-03-28  5:12     ` Dan Jurgens
  0 siblings, 0 replies; 10+ messages in thread
From: Dan Jurgens @ 2024-03-28  5:12 UTC (permalink / raw)
  To: Heng Qi, netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, Jiri Pirko

> From: Heng Qi <hengqi@linux.alibaba.com>
> Sent: Wednesday, March 27, 2024 11:57 PM
> To: Dan Jurgens <danielj@nvidia.com>; netdev@vger.kernel.org
> Cc: mst@redhat.com; jasowang@redhat.com; xuanzhuo@linux.alibaba.com;
> virtualization@lists.linux.dev; davem@davemloft.net;
> edumazet@google.com; kuba@kernel.org; pabeni@redhat.com; Jiri Pirko
> <jiri@nvidia.com>
> Subject: Re: [PATCH net-next v2 4/6] virtio_net: Do DIM update for specified
> queue only
> 
> 
> 
> 在 2024/3/28 下午12:47, Daniel Jurgens 写道:
> > Since we no longer have to hold the RTNL lock here just do updates for
> > the specified queue.
> >
> > Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> > ---
> >   drivers/net/virtio_net.c | 38 ++++++++++++++------------------------
> >   1 file changed, 14 insertions(+), 24 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index
> > b9298544b1b5..9c4bfb1eb15c 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -3596,36 +3596,26 @@ static void virtnet_rx_dim_work(struct
> work_struct *work)
> >   	struct virtnet_info *vi = rq->vq->vdev->priv;
> >   	struct net_device *dev = vi->dev;
> >   	struct dim_cq_moder update_moder;
> > -	int i, qnum, err;
> > +	int qnum, err;
> >
> >   	if (!rtnl_trylock())
> >   		return;
> >
> > -	/* Each rxq's work is queued by "net_dim()->schedule_work()"
> > -	 * in response to NAPI traffic changes. Note that dim->profile_ix
> > -	 * for each rxq is updated prior to the queuing action.
> > -	 * So we only need to traverse and update profiles for all rxqs
> > -	 * in the work which is holding rtnl_lock.
> > -	 */
> > -	for (i = 0; i < vi->curr_queue_pairs; i++) {
> > -		rq = &vi->rq[i];
> > -		dim = &rq->dim;
> > -		qnum = rq - vi->rq;
> > +	qnum = rq - vi->rq;
> >
> > -		if (!rq->dim_enabled)
> > -			continue;
> > +	if (!rq->dim_enabled)
> > +		continue;
> 
> ?
> 
> continue what?
> 

Sorry, messed this up when I was testing the patches and put the fix for the continue in the lock patch.

> For the lock code, please pass the test. It's important.

I did some bench testing. I'll do more and send a new set early next week.

> 
> Regards,
> Heng
> 
> >
> > -		update_moder = net_dim_get_rx_moderation(dim->mode,
> dim->profile_ix);
> > -		if (update_moder.usec != rq->intr_coal.max_usecs ||
> > -		    update_moder.pkts != rq->intr_coal.max_packets) {
> > -			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
> > -
> update_moder.usec,
> > -
> update_moder.pkts);
> > -			if (err)
> > -				pr_debug("%s: Failed to send dim
> parameters on rxq%d\n",
> > -					 dev->name, qnum);
> > -			dim->state = DIM_START_MEASURE;
> > -		}
> > +	update_moder = net_dim_get_rx_moderation(dim->mode, dim-
> >profile_ix);
> > +	if (update_moder.usec != rq->intr_coal.max_usecs ||
> > +	    update_moder.pkts != rq->intr_coal.max_packets) {
> > +		err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
> > +						       update_moder.usec,
> > +						       update_moder.pkts);
> > +		if (err)
> > +			pr_debug("%s: Failed to send dim parameters on
> rxq%d\n",
> > +				 dev->name, qnum);
> > +		dim->state = DIM_START_MEASURE;
> >   	}
> >
> >   	rtnl_unlock();


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH net-next v2 2/6] virtio_net: Remove command data from control_buf
  2024-03-28  4:47 ` [PATCH net-next v2 2/6] virtio_net: Remove command data from control_buf Daniel Jurgens
@ 2024-03-28 21:58   ` kernel test robot
  0 siblings, 0 replies; 10+ messages in thread
From: kernel test robot @ 2024-03-28 21:58 UTC (permalink / raw)
  To: Daniel Jurgens, netdev
  Cc: oe-kbuild-all, mst, jasowang, xuanzhuo, virtualization, davem,
	edumazet, kuba, pabeni, jiri, Daniel Jurgens

Hi Daniel,

kernel test robot noticed the following build warnings:

[auto build test WARNING on net-next/main]

url:    https://github.com/intel-lab-lkp/linux/commits/Daniel-Jurgens/virtio_net-Store-RSS-setting-in-virtnet_info/20240328-125022
base:   net-next/main
patch link:    https://lore.kernel.org/r/20240328044715.266641-3-danielj%40nvidia.com
patch subject: [PATCH net-next v2 2/6] virtio_net: Remove command data from control_buf
config: x86_64-randconfig-122-20240328 (https://download.01.org/0day-ci/archive/20240329/202403290542.gM5D7hMG-lkp@intel.com/config)
compiler: clang version 17.0.6 (https://github.com/llvm/llvm-project 6009708b4367171ccdbf4b5905cb6a803753fe18)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240329/202403290542.gM5D7hMG-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202403290542.gM5D7hMG-lkp@intel.com/

sparse warnings: (new ones prefixed by >>)
>> drivers/net/virtio_net.c:3978:20: sparse: sparse: incorrect type in assignment (different base types) @@     expected unsigned long long [usertype] @@     got restricted __virtio64 @@
   drivers/net/virtio_net.c:3978:20: sparse:     expected unsigned long long [usertype]
   drivers/net/virtio_net.c:3978:20: sparse:     got restricted __virtio64

vim +3978 drivers/net/virtio_net.c

  3968	
  3969	static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
  3970	{
  3971		u64 *_offloads __free(kfree) = NULL;
  3972		struct scatterlist sg;
  3973	
  3974		_offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL);
  3975		if (!_offloads)
  3976			return -ENOMEM;
  3977	
> 3978		*_offloads = cpu_to_virtio64(vi->vdev, offloads);
  3979	
  3980		sg_init_one(&sg, _offloads, sizeof(*_offloads));
  3981	
  3982		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
  3983					  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
  3984			dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
  3985			return -EINVAL;
  3986		}
  3987	
  3988		return 0;
  3989	}
  3990	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2024-03-28 21:58 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-03-28  4:47 [PATCH net-next v2 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
2024-03-28  4:47 ` [PATCH net-next v2 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
2024-03-28  4:47 ` [PATCH net-next v2 2/6] virtio_net: Remove command data from control_buf Daniel Jurgens
2024-03-28 21:58   ` kernel test robot
2024-03-28  4:47 ` [PATCH net-next v2 3/6] virtio_net: Add a lock for the command VQ Daniel Jurgens
2024-03-28  4:47 ` [PATCH net-next v2 4/6] virtio_net: Do DIM update for specified queue only Daniel Jurgens
2024-03-28  4:57   ` Heng Qi
2024-03-28  5:12     ` Dan Jurgens
2024-03-28  4:47 ` [PATCH net-next v2 5/6] virtio_net: Add a lock for per queue RX coalesce Daniel Jurgens
2024-03-28  4:47 ` [PATCH net-next v2 6/6] virtio_net: Remove rtnl lock protection of command buffers Daniel Jurgens

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.