All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next v4 0/6] Remove RTNL lock protection of CVQ
@ 2024-04-16 19:30 Daniel Jurgens
  2024-04-16 19:30 ` [PATCH net-next v4 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
                   ` (5 more replies)
  0 siblings, 6 replies; 16+ messages in thread
From: Daniel Jurgens @ 2024-04-16 19:30 UTC (permalink / raw)
  To: netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri, Daniel Jurgens

Currently the buffer used for control VQ commands is protected by the
RTNL lock. Previously this wasn't a major concern because the control
VQ was only used during device setup and user interaction. With the
recent addition of dynamic interrupt moderation the control VQ may be
used frequently during normal operation.

This series removes the RNTL lock dependency by introducing a spin lock
to protect the control buffer and writing SGs to the control VQ.

v4:
	- Protect dim_enabled with same lock as well intr_coal.
	- Rename intr_coal_lock to dim_lock.
	- Remove some scoped_guard where the error path doesn't
	  have to be in the lock.
v3:
	- Changed type of _offloads to __virtio16 to fix static
	  analysis warning.
	- Moved a misplaced hunk to the correct patch.
v2:
	- New patch to only process the provided queue in
	  virtnet_dim_work
	- New patch to lock per queue rx coalescing structure.

Daniel Jurgens (6):
  virtio_net: Store RSS setting in virtnet_info
  virtio_net: Remove command data from control_buf
  virtio_net: Add a lock for the command VQ.
  virtio_net: Do DIM update for specified queue only
  virtio_net: Add a lock for per queue RX coalesce
  virtio_net: Remove rtnl lock protection of command buffers

 drivers/net/virtio_net.c | 256 +++++++++++++++++++++++----------------
 1 file changed, 149 insertions(+), 107 deletions(-)

-- 
2.34.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH net-next v4 1/6] virtio_net: Store RSS setting in virtnet_info
  2024-04-16 19:30 [PATCH net-next v4 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
@ 2024-04-16 19:30 ` Daniel Jurgens
  2024-04-16 19:30 ` [PATCH net-next v4 2/6] virtio_net: Remove command data from control_buf Daniel Jurgens
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 16+ messages in thread
From: Daniel Jurgens @ 2024-04-16 19:30 UTC (permalink / raw)
  To: netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri, Daniel Jurgens

Stop storing RSS setting in the control buffer. This is prep work for
removing RTNL lock protection of the control buffer.

Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
 drivers/net/virtio_net.c | 40 ++++++++++++++++++++--------------------
 1 file changed, 20 insertions(+), 20 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 115c3c5414f2..7248dae54e1c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -245,7 +245,6 @@ struct control_buf {
 	u8 allmulti;
 	__virtio16 vid;
 	__virtio64 offloads;
-	struct virtio_net_ctrl_rss rss;
 	struct virtio_net_ctrl_coal_tx coal_tx;
 	struct virtio_net_ctrl_coal_rx coal_rx;
 	struct virtio_net_ctrl_coal_vq coal_vq;
@@ -287,6 +286,7 @@ struct virtnet_info {
 	u16 rss_indir_table_size;
 	u32 rss_hash_types_supported;
 	u32 rss_hash_types_saved;
+	struct virtio_net_ctrl_rss rss;
 
 	/* Has control virtqueue */
 	bool has_cvq;
@@ -3087,17 +3087,17 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
 	sg_init_table(sgs, 4);
 
 	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
-	sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
+	sg_set_buf(&sgs[0], &vi->rss, sg_buf_size);
 
-	sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
-	sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
+	sg_buf_size = sizeof(uint16_t) * (vi->rss.indirection_table_mask + 1);
+	sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
 
 	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
 			- offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
-	sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
+	sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size);
 
 	sg_buf_size = vi->rss_key_size;
-	sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
+	sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size);
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
 				  vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
@@ -3113,21 +3113,21 @@ static void virtnet_init_default_rss(struct virtnet_info *vi)
 	u32 indir_val = 0;
 	int i = 0;
 
-	vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
+	vi->rss.hash_types = vi->rss_hash_types_supported;
 	vi->rss_hash_types_saved = vi->rss_hash_types_supported;
-	vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
+	vi->rss.indirection_table_mask = vi->rss_indir_table_size
 						? vi->rss_indir_table_size - 1 : 0;
-	vi->ctrl->rss.unclassified_queue = 0;
+	vi->rss.unclassified_queue = 0;
 
 	for (; i < vi->rss_indir_table_size; ++i) {
 		indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
-		vi->ctrl->rss.indirection_table[i] = indir_val;
+		vi->rss.indirection_table[i] = indir_val;
 	}
 
-	vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
-	vi->ctrl->rss.hash_key_length = vi->rss_key_size;
+	vi->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
+	vi->rss.hash_key_length = vi->rss_key_size;
 
-	netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
+	netdev_rss_key_fill(vi->rss.key, vi->rss_key_size);
 }
 
 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
@@ -3238,7 +3238,7 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *
 
 	if (new_hashtypes != vi->rss_hash_types_saved) {
 		vi->rss_hash_types_saved = new_hashtypes;
-		vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+		vi->rss.hash_types = vi->rss_hash_types_saved;
 		if (vi->dev->features & NETIF_F_RXHASH)
 			return virtnet_commit_rss_command(vi);
 	}
@@ -3791,11 +3791,11 @@ static int virtnet_get_rxfh(struct net_device *dev,
 
 	if (rxfh->indir) {
 		for (i = 0; i < vi->rss_indir_table_size; ++i)
-			rxfh->indir[i] = vi->ctrl->rss.indirection_table[i];
+			rxfh->indir[i] = vi->rss.indirection_table[i];
 	}
 
 	if (rxfh->key)
-		memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size);
+		memcpy(rxfh->key, vi->rss.key, vi->rss_key_size);
 
 	rxfh->hfunc = ETH_RSS_HASH_TOP;
 
@@ -3819,7 +3819,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
 			return -EOPNOTSUPP;
 
 		for (i = 0; i < vi->rss_indir_table_size; ++i)
-			vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
+			vi->rss.indirection_table[i] = rxfh->indir[i];
 		update = true;
 	}
 
@@ -3831,7 +3831,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
 		if (!vi->has_rss && !vi->has_rss_hash_report)
 			return -EOPNOTSUPP;
 
-		memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
+		memcpy(vi->rss.key, rxfh->key, vi->rss_key_size);
 		update = true;
 	}
 
@@ -4156,9 +4156,9 @@ static int virtnet_set_features(struct net_device *dev,
 
 	if ((dev->features ^ features) & NETIF_F_RXHASH) {
 		if (features & NETIF_F_RXHASH)
-			vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+			vi->rss.hash_types = vi->rss_hash_types_saved;
 		else
-			vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
+			vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
 
 		if (!virtnet_commit_rss_command(vi))
 			return -EINVAL;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH net-next v4 2/6] virtio_net: Remove command data from control_buf
  2024-04-16 19:30 [PATCH net-next v4 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
  2024-04-16 19:30 ` [PATCH net-next v4 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
@ 2024-04-16 19:30 ` Daniel Jurgens
  2024-04-16 19:30 ` [PATCH net-next v4 3/6] virtio_net: Add a lock for the command VQ Daniel Jurgens
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 16+ messages in thread
From: Daniel Jurgens @ 2024-04-16 19:30 UTC (permalink / raw)
  To: netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri, Daniel Jurgens

Allocate memory for the data when it's used. Ideally the could be on the
stack, but we can't DMA stack memory. With this change only the header
and status memory are shared between commands, which will allow using a
tighter lock than RTNL.

Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
 drivers/net/virtio_net.c | 111 ++++++++++++++++++++++++++-------------
 1 file changed, 75 insertions(+), 36 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7248dae54e1c..0ee192b45e1e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -240,14 +240,6 @@ struct virtio_net_ctrl_rss {
 struct control_buf {
 	struct virtio_net_ctrl_hdr hdr;
 	virtio_net_ctrl_ack status;
-	struct virtio_net_ctrl_mq mq;
-	u8 promisc;
-	u8 allmulti;
-	__virtio16 vid;
-	__virtio64 offloads;
-	struct virtio_net_ctrl_coal_tx coal_tx;
-	struct virtio_net_ctrl_coal_rx coal_rx;
-	struct virtio_net_ctrl_coal_vq coal_vq;
 };
 
 struct virtnet_info {
@@ -2672,14 +2664,19 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
 
 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 {
+	struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
 	struct scatterlist sg;
 	struct net_device *dev = vi->dev;
 
 	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
 		return 0;
 
-	vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
-	sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
+	mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+	if (!mq)
+		return -ENOMEM;
+
+	mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
+	sg_init_one(&sg, mq, sizeof(*mq));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
 				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
@@ -2708,6 +2705,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 
 static int virtnet_close(struct net_device *dev)
 {
+	u8 *promisc_allmulti  __free(kfree) = NULL;
 	struct virtnet_info *vi = netdev_priv(dev);
 	int i;
 
@@ -2732,6 +2730,7 @@ static void virtnet_rx_mode_work(struct work_struct *work)
 	struct scatterlist sg[2];
 	struct virtio_net_ctrl_mac *mac_data;
 	struct netdev_hw_addr *ha;
+	u8 *promisc_allmulti;
 	int uc_count;
 	int mc_count;
 	void *buf;
@@ -2743,22 +2742,27 @@ static void virtnet_rx_mode_work(struct work_struct *work)
 
 	rtnl_lock();
 
-	vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
-	vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
+	promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_ATOMIC);
+	if (!promisc_allmulti) {
+		dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n");
+		return;
+	}
 
-	sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
+	*promisc_allmulti = !!(dev->flags & IFF_PROMISC);
+	sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
 				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
 		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
-			 vi->ctrl->promisc ? "en" : "dis");
+			 *promisc_allmulti ? "en" : "dis");
 
-	sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
+	*promisc_allmulti = !!(dev->flags & IFF_ALLMULTI);
+	sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
 				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
 		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
-			 vi->ctrl->allmulti ? "en" : "dis");
+			 *promisc_allmulti ? "en" : "dis");
 
 	netif_addr_lock_bh(dev);
 
@@ -2819,10 +2823,15 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
 				   __be16 proto, u16 vid)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
+	__virtio16 *_vid __free(kfree) = NULL;
 	struct scatterlist sg;
 
-	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
-	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
+	_vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
+	if (!_vid)
+		return -ENOMEM;
+
+	*_vid = cpu_to_virtio16(vi->vdev, vid);
+	sg_init_one(&sg, _vid, sizeof(*_vid));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
 				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
@@ -2834,10 +2843,15 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
 				    __be16 proto, u16 vid)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
+	__virtio16 *_vid __free(kfree) = NULL;
 	struct scatterlist sg;
 
-	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
-	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
+	_vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
+	if (!_vid)
+		return -ENOMEM;
+
+	*_vid = cpu_to_virtio16(vi->vdev, vid);
+	sg_init_one(&sg, _vid, sizeof(*_vid));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
 				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
@@ -2950,12 +2964,17 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
 					 u16 vqn, u32 max_usecs, u32 max_packets)
 {
+	struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL;
 	struct scatterlist sgs;
 
-	vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
-	vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
-	vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
-	sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
+	coal_vq = kzalloc(sizeof(*coal_vq), GFP_KERNEL);
+	if (!coal_vq)
+		return -ENOMEM;
+
+	coal_vq->vqn = cpu_to_le16(vqn);
+	coal_vq->coal.max_usecs = cpu_to_le32(max_usecs);
+	coal_vq->coal.max_packets = cpu_to_le32(max_packets);
+	sg_init_one(&sgs, coal_vq, sizeof(*coal_vq));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
 				  VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
@@ -3101,11 +3120,15 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
 				  vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
-				  : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
-		dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
-		return false;
-	}
+				  : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs))
+		goto err;
+
 	return true;
+
+err:
+	dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
+	return false;
+
 }
 
 static void virtnet_init_default_rss(struct virtnet_info *vi)
@@ -3410,12 +3433,17 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
 static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
 					  struct ethtool_coalesce *ec)
 {
+	struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL;
 	struct scatterlist sgs_tx;
 	int i;
 
-	vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
-	vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
-	sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
+	coal_tx = kzalloc(sizeof(*coal_tx), GFP_KERNEL);
+	if (!coal_tx)
+		return -ENOMEM;
+
+	coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+	coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+	sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
 				  VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
@@ -3435,6 +3463,7 @@ static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
 static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 					  struct ethtool_coalesce *ec)
 {
+	struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
 	bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
 	struct scatterlist sgs_rx;
 	int i;
@@ -3453,6 +3482,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 		return 0;
 	}
 
+	coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
+	if (!coal_rx)
+		return -ENOMEM;
+
 	if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
 		vi->rx_dim_enabled = false;
 		for (i = 0; i < vi->max_queue_pairs; i++)
@@ -3463,9 +3496,9 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 	 * we need apply the global new params even if they
 	 * are not updated.
 	 */
-	vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
-	vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
-	sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
+	coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+	coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+	sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
 				  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
@@ -3951,10 +3984,16 @@ static int virtnet_restore_up(struct virtio_device *vdev)
 
 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
 {
+	__virtio64 *_offloads __free(kfree) = NULL;
 	struct scatterlist sg;
-	vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
 
-	sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
+	_offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL);
+	if (!_offloads)
+		return -ENOMEM;
+
+	*_offloads = cpu_to_virtio64(vi->vdev, offloads);
+
+	sg_init_one(&sg, _offloads, sizeof(*_offloads));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
 				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH net-next v4 3/6] virtio_net: Add a lock for the command VQ.
  2024-04-16 19:30 [PATCH net-next v4 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
  2024-04-16 19:30 ` [PATCH net-next v4 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
  2024-04-16 19:30 ` [PATCH net-next v4 2/6] virtio_net: Remove command data from control_buf Daniel Jurgens
@ 2024-04-16 19:30 ` Daniel Jurgens
  2024-04-18  6:42   ` Jason Wang
  2024-04-16 19:30 ` [PATCH net-next v4 4/6] virtio_net: Do DIM update for specified queue only Daniel Jurgens
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 16+ messages in thread
From: Daniel Jurgens @ 2024-04-16 19:30 UTC (permalink / raw)
  To: netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri, Daniel Jurgens

The command VQ will no longer be protected by the RTNL lock. Use a
spinlock to protect the control buffer header and the VQ.

Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
 drivers/net/virtio_net.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 0ee192b45e1e..d02f83a919a7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -282,6 +282,7 @@ struct virtnet_info {
 
 	/* Has control virtqueue */
 	bool has_cvq;
+	spinlock_t cvq_lock;
 
 	/* Host can handle any s/g split between our header and packet data */
 	bool any_header_sg;
@@ -2529,6 +2530,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
 	/* Caller should know better */
 	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
 
+	guard(spinlock)(&vi->cvq_lock);
 	vi->ctrl->status = ~0;
 	vi->ctrl->hdr.class = class;
 	vi->ctrl->hdr.cmd = cmd;
@@ -4818,8 +4820,10 @@ static int virtnet_probe(struct virtio_device *vdev)
 	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
 		vi->any_header_sg = true;
 
-	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
+	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
 		vi->has_cvq = true;
+		spin_lock_init(&vi->cvq_lock);
+	}
 
 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
 		mtu = virtio_cread16(vdev,
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH net-next v4 4/6] virtio_net: Do DIM update for specified queue only
  2024-04-16 19:30 [PATCH net-next v4 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
                   ` (2 preceding siblings ...)
  2024-04-16 19:30 ` [PATCH net-next v4 3/6] virtio_net: Add a lock for the command VQ Daniel Jurgens
@ 2024-04-16 19:30 ` Daniel Jurgens
  2024-04-16 19:30 ` [PATCH net-next v4 5/6] virtio_net: Add a lock for per queue RX coalesce Daniel Jurgens
  2024-04-16 19:30 ` [PATCH net-next v4 6/6] virtio_net: Remove rtnl lock protection of command buffers Daniel Jurgens
  5 siblings, 0 replies; 16+ messages in thread
From: Daniel Jurgens @ 2024-04-16 19:30 UTC (permalink / raw)
  To: netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri, Daniel Jurgens

Since we no longer have to hold the RTNL lock here just do updates for
the specified queue.

Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
---
 drivers/net/virtio_net.c | 40 +++++++++++++++-------------------------
 1 file changed, 15 insertions(+), 25 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d02f83a919a7..b3aa4d2a15e9 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3596,38 +3596,28 @@ static void virtnet_rx_dim_work(struct work_struct *work)
 	struct virtnet_info *vi = rq->vq->vdev->priv;
 	struct net_device *dev = vi->dev;
 	struct dim_cq_moder update_moder;
-	int i, qnum, err;
+	int qnum, err;
 
 	if (!rtnl_trylock())
 		return;
 
-	/* Each rxq's work is queued by "net_dim()->schedule_work()"
-	 * in response to NAPI traffic changes. Note that dim->profile_ix
-	 * for each rxq is updated prior to the queuing action.
-	 * So we only need to traverse and update profiles for all rxqs
-	 * in the work which is holding rtnl_lock.
-	 */
-	for (i = 0; i < vi->curr_queue_pairs; i++) {
-		rq = &vi->rq[i];
-		dim = &rq->dim;
-		qnum = rq - vi->rq;
+	qnum = rq - vi->rq;
 
-		if (!rq->dim_enabled)
-			continue;
+	if (!rq->dim_enabled)
+		goto out;
 
-		update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
-		if (update_moder.usec != rq->intr_coal.max_usecs ||
-		    update_moder.pkts != rq->intr_coal.max_packets) {
-			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
-							       update_moder.usec,
-							       update_moder.pkts);
-			if (err)
-				pr_debug("%s: Failed to send dim parameters on rxq%d\n",
-					 dev->name, qnum);
-			dim->state = DIM_START_MEASURE;
-		}
+	update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+	if (update_moder.usec != rq->intr_coal.max_usecs ||
+	    update_moder.pkts != rq->intr_coal.max_packets) {
+		err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
+						       update_moder.usec,
+						       update_moder.pkts);
+		if (err)
+			pr_debug("%s: Failed to send dim parameters on rxq%d\n",
+				 dev->name, qnum);
+		dim->state = DIM_START_MEASURE;
 	}
-
+out:
 	rtnl_unlock();
 }
 
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH net-next v4 5/6] virtio_net: Add a lock for per queue RX coalesce
  2024-04-16 19:30 [PATCH net-next v4 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
                   ` (3 preceding siblings ...)
  2024-04-16 19:30 ` [PATCH net-next v4 4/6] virtio_net: Do DIM update for specified queue only Daniel Jurgens
@ 2024-04-16 19:30 ` Daniel Jurgens
  2024-04-16 19:30 ` [PATCH net-next v4 6/6] virtio_net: Remove rtnl lock protection of command buffers Daniel Jurgens
  5 siblings, 0 replies; 16+ messages in thread
From: Daniel Jurgens @ 2024-04-16 19:30 UTC (permalink / raw)
  To: netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri, Daniel Jurgens

Once the RTNL locking around the control buffer is removed there can be
contention on the per queue RX interrupt coalescing data. Use a spin
lock per queue.

Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
---
 drivers/net/virtio_net.c | 40 +++++++++++++++++++++++++++++++++-------
 1 file changed, 33 insertions(+), 7 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b3aa4d2a15e9..bae5beafe1a1 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -184,6 +184,9 @@ struct receive_queue {
 	/* Is dynamic interrupt moderation enabled? */
 	bool dim_enabled;
 
+	/* Used to protect dim_enabled and inter_coal */
+	spinlock_t dim_lock;
+
 	/* Dynamic Interrupt Moderation */
 	struct dim dim;
 
@@ -2218,6 +2221,10 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
 	/* Out of packets? */
 	if (received < budget) {
 		napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
+		/* Intentionally not taking dim_lock here. This could result
+		 * in a net_dim call with dim now disabled. But virtnet_rx_dim_work
+		 * will take the lock not update settings if dim is now disabled.
+		 */
 		if (napi_complete && rq->dim_enabled)
 			virtnet_rx_dim_update(vi, rq);
 	}
@@ -3087,9 +3094,11 @@ static int virtnet_set_ringparam(struct net_device *dev,
 				return err;
 
 			/* The reason is same as the transmit virtqueue reset */
+			spin_lock(&vi->rq[i].dim_lock);
 			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
 							       vi->intr_coal_rx.max_usecs,
 							       vi->intr_coal_rx.max_packets);
+			spin_unlock(&vi->rq[i].dim_lock);
 			if (err)
 				return err;
 		}
@@ -3468,6 +3477,7 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 	struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
 	bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
 	struct scatterlist sgs_rx;
+	int ret = 0;
 	int i;
 
 	if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
@@ -3477,16 +3487,22 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 			       ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
 		return -EINVAL;
 
+	/* Acquire all queues dim_locks */
+	for (i = 0; i < vi->max_queue_pairs; i++)
+		spin_lock(&vi->rq[i].dim_lock);
+
 	if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
 		vi->rx_dim_enabled = true;
 		for (i = 0; i < vi->max_queue_pairs; i++)
 			vi->rq[i].dim_enabled = true;
-		return 0;
+		goto unlock;
 	}
 
 	coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
-	if (!coal_rx)
-		return -ENOMEM;
+	if (!coal_rx) {
+		ret = -ENOMEM;
+		goto unlock;
+	}
 
 	if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
 		vi->rx_dim_enabled = false;
@@ -3504,8 +3520,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
 				  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
-				  &sgs_rx))
-		return -EINVAL;
+				  &sgs_rx)) {
+		ret = -EINVAL;
+		goto unlock;
+	}
 
 	vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
 	vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
@@ -3513,8 +3531,11 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 		vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
 		vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
 	}
+unlock:
+	for (i = vi->max_queue_pairs - 1; i >= 0; i--)
+		spin_unlock(&vi->rq[i].dim_lock);
 
-	return 0;
+	return ret;
 }
 
 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
@@ -3538,10 +3559,12 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
 					     u16 queue)
 {
 	bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
-	bool cur_rx_dim = vi->rq[queue].dim_enabled;
 	u32 max_usecs, max_packets;
+	bool cur_rx_dim;
 	int err;
 
+	guard(spinlock)(&vi->rq[queue].dim_lock);
+	cur_rx_dim = vi->rq[queue].dim_enabled;
 	max_usecs = vi->rq[queue].intr_coal.max_usecs;
 	max_packets = vi->rq[queue].intr_coal.max_packets;
 
@@ -3603,6 +3626,7 @@ static void virtnet_rx_dim_work(struct work_struct *work)
 
 	qnum = rq - vi->rq;
 
+	guard(spinlock)(&rq->dim_lock);
 	if (!rq->dim_enabled)
 		goto out;
 
@@ -3756,6 +3780,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
 		return -EINVAL;
 
 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
+		guard(spinlock)(&vi->rq[queue].dim_lock);
 		ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
 		ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
 		ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
@@ -4501,6 +4526,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
 
 		u64_stats_init(&vi->rq[i].stats.syncp);
 		u64_stats_init(&vi->sq[i].stats.syncp);
+		spin_lock_init(&vi->rq[i].dim_lock);
 	}
 
 	return 0;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH net-next v4 6/6] virtio_net: Remove rtnl lock protection of command buffers
  2024-04-16 19:30 [PATCH net-next v4 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
                   ` (4 preceding siblings ...)
  2024-04-16 19:30 ` [PATCH net-next v4 5/6] virtio_net: Add a lock for per queue RX coalesce Daniel Jurgens
@ 2024-04-16 19:30 ` Daniel Jurgens
  5 siblings, 0 replies; 16+ messages in thread
From: Daniel Jurgens @ 2024-04-16 19:30 UTC (permalink / raw)
  To: netdev
  Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri, Daniel Jurgens

The rtnl lock is no longer needed to protect the control buffer and
command VQ.

Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
 drivers/net/virtio_net.c | 27 +++++----------------------
 1 file changed, 5 insertions(+), 22 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bae5beafe1a1..5825775af8f8 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2664,14 +2664,12 @@ static void virtnet_stats(struct net_device *dev,
 
 static void virtnet_ack_link_announce(struct virtnet_info *vi)
 {
-	rtnl_lock();
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
 				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
 		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
-	rtnl_unlock();
 }
 
-static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
+static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 {
 	struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
 	struct scatterlist sg;
@@ -2702,16 +2700,6 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 	return 0;
 }
 
-static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
-{
-	int err;
-
-	rtnl_lock();
-	err = _virtnet_set_queues(vi, queue_pairs);
-	rtnl_unlock();
-	return err;
-}
-
 static int virtnet_close(struct net_device *dev)
 {
 	u8 *promisc_allmulti  __free(kfree) = NULL;
@@ -3317,7 +3305,7 @@ static int virtnet_set_channels(struct net_device *dev,
 		return -EINVAL;
 
 	cpus_read_lock();
-	err = _virtnet_set_queues(vi, queue_pairs);
+	err = virtnet_set_queues(vi, queue_pairs);
 	if (err) {
 		cpus_read_unlock();
 		goto err;
@@ -3621,14 +3609,11 @@ static void virtnet_rx_dim_work(struct work_struct *work)
 	struct dim_cq_moder update_moder;
 	int qnum, err;
 
-	if (!rtnl_trylock())
-		return;
-
 	qnum = rq - vi->rq;
 
 	guard(spinlock)(&rq->dim_lock);
 	if (!rq->dim_enabled)
-		goto out;
+		return;
 
 	update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
 	if (update_moder.usec != rq->intr_coal.max_usecs ||
@@ -3641,8 +3626,6 @@ static void virtnet_rx_dim_work(struct work_struct *work)
 				 dev->name, qnum);
 		dim->state = DIM_START_MEASURE;
 	}
-out:
-	rtnl_unlock();
 }
 
 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
@@ -4110,7 +4093,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
 		synchronize_net();
 	}
 
-	err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
+	err = virtnet_set_queues(vi, curr_qp + xdp_qp);
 	if (err)
 		goto err;
 	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
@@ -4932,7 +4915,7 @@ static int virtnet_probe(struct virtio_device *vdev)
 
 	virtio_device_ready(vdev);
 
-	_virtnet_set_queues(vi, vi->curr_queue_pairs);
+	virtnet_set_queues(vi, vi->curr_queue_pairs);
 
 	/* a random MAC address has been assigned, notify the device.
 	 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH net-next v4 3/6] virtio_net: Add a lock for the command VQ.
  2024-04-16 19:30 ` [PATCH net-next v4 3/6] virtio_net: Add a lock for the command VQ Daniel Jurgens
@ 2024-04-18  6:42   ` Jason Wang
  2024-04-18  7:36     ` Heng Qi
  0 siblings, 1 reply; 16+ messages in thread
From: Jason Wang @ 2024-04-18  6:42 UTC (permalink / raw)
  To: Daniel Jurgens
  Cc: netdev, mst, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri

On Wed, Apr 17, 2024 at 3:31 AM Daniel Jurgens <danielj@nvidia.com> wrote:
>
> The command VQ will no longer be protected by the RTNL lock. Use a
> spinlock to protect the control buffer header and the VQ.
>
> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> Reviewed-by: Jiri Pirko <jiri@nvidia.com>
> ---
>  drivers/net/virtio_net.c | 6 +++++-
>  1 file changed, 5 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 0ee192b45e1e..d02f83a919a7 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -282,6 +282,7 @@ struct virtnet_info {
>
>         /* Has control virtqueue */
>         bool has_cvq;
> +       spinlock_t cvq_lock;

Spinlock is instead of mutex which is problematic as there's no
guarantee on when the driver will get a reply. And it became even more
serious after 0d197a147164 ("virtio-net: add cond_resched() to the
command waiting loop").

Any reason we can't use mutex?

Thanks

>
>         /* Host can handle any s/g split between our header and packet data */
>         bool any_header_sg;
> @@ -2529,6 +2530,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
>         /* Caller should know better */
>         BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
>
> +       guard(spinlock)(&vi->cvq_lock);
>         vi->ctrl->status = ~0;
>         vi->ctrl->hdr.class = class;
>         vi->ctrl->hdr.cmd = cmd;
> @@ -4818,8 +4820,10 @@ static int virtnet_probe(struct virtio_device *vdev)
>             virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
>                 vi->any_header_sg = true;
>
> -       if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
> +       if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
>                 vi->has_cvq = true;
> +               spin_lock_init(&vi->cvq_lock);
> +       }
>
>         if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
>                 mtu = virtio_cread16(vdev,
> --
> 2.34.1
>


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH net-next v4 3/6] virtio_net: Add a lock for the command VQ.
  2024-04-18  6:42   ` Jason Wang
@ 2024-04-18  7:36     ` Heng Qi
  2024-04-18 10:56       ` Paolo Abeni
  0 siblings, 1 reply; 16+ messages in thread
From: Heng Qi @ 2024-04-18  7:36 UTC (permalink / raw)
  To: Jason Wang, Daniel Jurgens
  Cc: netdev, mst, xuanzhuo, virtualization, davem, edumazet, kuba,
	pabeni, jiri



在 2024/4/18 下午2:42, Jason Wang 写道:
> On Wed, Apr 17, 2024 at 3:31 AM Daniel Jurgens <danielj@nvidia.com> wrote:
>> The command VQ will no longer be protected by the RTNL lock. Use a
>> spinlock to protect the control buffer header and the VQ.
>>
>> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
>> Reviewed-by: Jiri Pirko <jiri@nvidia.com>
>> ---
>>   drivers/net/virtio_net.c | 6 +++++-
>>   1 file changed, 5 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>> index 0ee192b45e1e..d02f83a919a7 100644
>> --- a/drivers/net/virtio_net.c
>> +++ b/drivers/net/virtio_net.c
>> @@ -282,6 +282,7 @@ struct virtnet_info {
>>
>>          /* Has control virtqueue */
>>          bool has_cvq;
>> +       spinlock_t cvq_lock;
> Spinlock is instead of mutex which is problematic as there's no
> guarantee on when the driver will get a reply. And it became even more
> serious after 0d197a147164 ("virtio-net: add cond_resched() to the
> command waiting loop").
>
> Any reason we can't use mutex?

Hi Jason,

I made a patch set to enable ctrlq's irq on top of this patch set, which 
removes cond_resched().

But I need a little time to test, this is close to fast. So could the 
topic about cond_resched +
spin lock or mutex lock be wait?

Thank you very much!

>
> Thanks
>
>>          /* Host can handle any s/g split between our header and packet data */
>>          bool any_header_sg;
>> @@ -2529,6 +2530,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
>>          /* Caller should know better */
>>          BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
>>
>> +       guard(spinlock)(&vi->cvq_lock);
>>          vi->ctrl->status = ~0;
>>          vi->ctrl->hdr.class = class;
>>          vi->ctrl->hdr.cmd = cmd;
>> @@ -4818,8 +4820,10 @@ static int virtnet_probe(struct virtio_device *vdev)
>>              virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
>>                  vi->any_header_sg = true;
>>
>> -       if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
>> +       if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
>>                  vi->has_cvq = true;
>> +               spin_lock_init(&vi->cvq_lock);
>> +       }
>>
>>          if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
>>                  mtu = virtio_cread16(vdev,
>> --
>> 2.34.1
>>


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH net-next v4 3/6] virtio_net: Add a lock for the command VQ.
  2024-04-18  7:36     ` Heng Qi
@ 2024-04-18 10:56       ` Paolo Abeni
  2024-04-18 15:38         ` Dan Jurgens
  0 siblings, 1 reply; 16+ messages in thread
From: Paolo Abeni @ 2024-04-18 10:56 UTC (permalink / raw)
  To: Heng Qi, Jason Wang, Daniel Jurgens
  Cc: netdev, mst, xuanzhuo, virtualization, davem, edumazet, kuba, jiri

On Thu, 2024-04-18 at 15:36 +0800, Heng Qi wrote:
> 
> 在 2024/4/18 下午2:42, Jason Wang 写道:
> > On Wed, Apr 17, 2024 at 3:31 AM Daniel Jurgens <danielj@nvidia.com> wrote:
> > > The command VQ will no longer be protected by the RTNL lock. Use a
> > > spinlock to protect the control buffer header and the VQ.
> > > 
> > > Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> > > Reviewed-by: Jiri Pirko <jiri@nvidia.com>
> > > ---
> > >   drivers/net/virtio_net.c | 6 +++++-
> > >   1 file changed, 5 insertions(+), 1 deletion(-)
> > > 
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index 0ee192b45e1e..d02f83a919a7 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -282,6 +282,7 @@ struct virtnet_info {
> > > 
> > >          /* Has control virtqueue */
> > >          bool has_cvq;
> > > +       spinlock_t cvq_lock;
> > Spinlock is instead of mutex which is problematic as there's no
> > guarantee on when the driver will get a reply. And it became even more
> > serious after 0d197a147164 ("virtio-net: add cond_resched() to the
> > command waiting loop").
> > 
> > Any reason we can't use mutex?
> 
> Hi Jason,
> 
> I made a patch set to enable ctrlq's irq on top of this patch set, which 
> removes cond_resched().
> 
> But I need a little time to test, this is close to fast. So could the 
> topic about cond_resched +
> spin lock or mutex lock be wait?

The big problem is that until the cond_resched() is there, replacing
the mutex with a spinlock can/will lead to scheduling while atomic
splats. We can't intentionally introduce such scenario.

Side note: the compiler apparently does not like guard() construct,
leading to new warning, here and in later patches. I'm unsure if the
code simplification is worthy.

Cheers,

Paolo


^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH net-next v4 3/6] virtio_net: Add a lock for the command VQ.
  2024-04-18 10:56       ` Paolo Abeni
@ 2024-04-18 15:38         ` Dan Jurgens
  2024-04-18 15:48           ` Paolo Abeni
  2024-04-18 16:06           ` Heng Qi
  0 siblings, 2 replies; 16+ messages in thread
From: Dan Jurgens @ 2024-04-18 15:38 UTC (permalink / raw)
  To: Paolo Abeni, Heng Qi, Jason Wang
  Cc: netdev, mst, xuanzhuo, virtualization, davem, edumazet, kuba, Jiri Pirko

> From: Paolo Abeni <pabeni@redhat.com>
> Sent: Thursday, April 18, 2024 5:57 AM
> On Thu, 2024-04-18 at 15:36 +0800, Heng Qi wrote:
> >
> > 在 2024/4/18 下午2:42, Jason Wang 写道:
> > > On Wed, Apr 17, 2024 at 3:31 AM Daniel Jurgens <danielj@nvidia.com>
> wrote:
> > > > The command VQ will no longer be protected by the RTNL lock. Use a
> > > > spinlock to protect the control buffer header and the VQ.
> > > >
> > > > Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> > > > Reviewed-by: Jiri Pirko <jiri@nvidia.com>
> > > > ---
> > > >   drivers/net/virtio_net.c | 6 +++++-
> > > >   1 file changed, 5 insertions(+), 1 deletion(-)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index 0ee192b45e1e..d02f83a919a7 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -282,6 +282,7 @@ struct virtnet_info {
> > > >
> > > >          /* Has control virtqueue */
> > > >          bool has_cvq;
> > > > +       spinlock_t cvq_lock;
> > > Spinlock is instead of mutex which is problematic as there's no
> > > guarantee on when the driver will get a reply. And it became even
> > > more serious after 0d197a147164 ("virtio-net: add cond_resched() to
> > > the command waiting loop").
> > >
> > > Any reason we can't use mutex?
> >
> > Hi Jason,
> >
> > I made a patch set to enable ctrlq's irq on top of this patch set,
> > which removes cond_resched().
> >
> > But I need a little time to test, this is close to fast. So could the
> > topic about cond_resched + spin lock or mutex lock be wait?
> 
> The big problem is that until the cond_resched() is there, replacing the
> mutex with a spinlock can/will lead to scheduling while atomic splats. We
> can't intentionally introduce such scenario.

When I created the series set_rx_mode wasn't moved to a work queue, and the cond_resched wasn't there. Mutex wasn't possible, then. If the CVQ is made to be event driven, then the lock can be released right after posting the work to the VQ.

> 
> Side note: the compiler apparently does not like guard() construct, leading to
> new warning, here and in later patches. I'm unsure if the code simplification
> is worthy.

I didn't see any warnings with GCC or clang. This is used other places in the kernel as well.
gcc version 13.2.1 20230918 (Red Hat 13.2.1-3) (GCC)
clang version 17.0.6 (Fedora 17.0.6-2.fc39)
> 
> Cheers,
> 
> Paolo


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH net-next v4 3/6] virtio_net: Add a lock for the command VQ.
  2024-04-18 15:38         ` Dan Jurgens
@ 2024-04-18 15:48           ` Paolo Abeni
  2024-04-18 16:01             ` Jakub Kicinski
  2024-04-18 16:12             ` Heng Qi
  2024-04-18 16:06           ` Heng Qi
  1 sibling, 2 replies; 16+ messages in thread
From: Paolo Abeni @ 2024-04-18 15:48 UTC (permalink / raw)
  To: Dan Jurgens, Heng Qi, Jason Wang
  Cc: netdev, mst, xuanzhuo, virtualization, davem, edumazet, kuba, Jiri Pirko

On Thu, 2024-04-18 at 15:38 +0000, Dan Jurgens wrote:
> > From: Paolo Abeni <pabeni@redhat.com>
> > Sent: Thursday, April 18, 2024 5:57 AM
> > On Thu, 2024-04-18 at 15:36 +0800, Heng Qi wrote:
> > > 
> > > 在 2024/4/18 下午2:42, Jason Wang 写道:
> > > > On Wed, Apr 17, 2024 at 3:31 AM Daniel Jurgens <danielj@nvidia.com>
> > wrote:
> > > > > The command VQ will no longer be protected by the RTNL lock. Use a
> > > > > spinlock to protect the control buffer header and the VQ.
> > > > > 
> > > > > Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> > > > > Reviewed-by: Jiri Pirko <jiri@nvidia.com>
> > > > > ---
> > > > >   drivers/net/virtio_net.c | 6 +++++-
> > > > >   1 file changed, 5 insertions(+), 1 deletion(-)
> > > > > 
> > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > index 0ee192b45e1e..d02f83a919a7 100644
> > > > > --- a/drivers/net/virtio_net.c
> > > > > +++ b/drivers/net/virtio_net.c
> > > > > @@ -282,6 +282,7 @@ struct virtnet_info {
> > > > > 
> > > > >          /* Has control virtqueue */
> > > > >          bool has_cvq;
> > > > > +       spinlock_t cvq_lock;
> > > > Spinlock is instead of mutex which is problematic as there's no
> > > > guarantee on when the driver will get a reply. And it became even
> > > > more serious after 0d197a147164 ("virtio-net: add cond_resched() to
> > > > the command waiting loop").
> > > > 
> > > > Any reason we can't use mutex?
> > > 
> > > Hi Jason,
> > > 
> > > I made a patch set to enable ctrlq's irq on top of this patch set,
> > > which removes cond_resched().
> > > 
> > > But I need a little time to test, this is close to fast. So could the
> > > topic about cond_resched + spin lock or mutex lock be wait?
> > 
> > The big problem is that until the cond_resched() is there, replacing the
> > mutex with a spinlock can/will lead to scheduling while atomic splats. We
> > can't intentionally introduce such scenario.
> 
> When I created the series set_rx_mode wasn't moved to a work queue, 
> and the cond_resched wasn't there. 

Unfortunately cond_resched() is there right now.

> Mutex wasn't possible, then. If the CVQ is made to be event driven, then 
> the lock can be released right after posting the work to the VQ.

That should work.

> > Side note: the compiler apparently does not like guard() construct, leading to
> > new warning, here and in later patches. I'm unsure if the code simplification
> > is worthy.
> 
> I didn't see any warnings with GCC or clang. This is used other places in the kernel as well.
> gcc version 13.2.1 20230918 (Red Hat 13.2.1-3) (GCC)
> clang version 17.0.6 (Fedora 17.0.6-2.fc39)
> 

See:

https://patchwork.kernel.org/project/netdevbpf/patch/20240416193039.272997-4-danielj@nvidia.com/
https://netdev.bots.linux.dev/static/nipa/845178/13632442/build_32bit/stderr
https://netdev.bots.linux.dev/static/nipa/845178/13632442/build_allmodconfig_warn/stderr

Cheers,

Paolo


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH net-next v4 3/6] virtio_net: Add a lock for the command VQ.
  2024-04-18 15:48           ` Paolo Abeni
@ 2024-04-18 16:01             ` Jakub Kicinski
  2024-04-18 16:12             ` Heng Qi
  1 sibling, 0 replies; 16+ messages in thread
From: Jakub Kicinski @ 2024-04-18 16:01 UTC (permalink / raw)
  To: Paolo Abeni
  Cc: Dan Jurgens, Heng Qi, Jason Wang, netdev, mst, xuanzhuo,
	virtualization, davem, edumazet, Jiri Pirko

On Thu, 18 Apr 2024 17:48:57 +0200 Paolo Abeni wrote:
> > > Side note: the compiler apparently does not like guard() construct, leading to
> > > new warning, here and in later patches. I'm unsure if the code simplification
> > > is worthy.  
> > 
> > I didn't see any warnings with GCC or clang. This is used other places in the kernel as well.
> > gcc version 13.2.1 20230918 (Red Hat 13.2.1-3) (GCC)
> > clang version 17.0.6 (Fedora 17.0.6-2.fc39)
> >   
> 
> See:
> 
> https://patchwork.kernel.org/project/netdevbpf/patch/20240416193039.272997-4-danielj@nvidia.com/
> https://netdev.bots.linux.dev/static/nipa/845178/13632442/build_32bit/stderr
> https://netdev.bots.linux.dev/static/nipa/845178/13632442/build_allmodconfig_warn/stderr

These are sparse errors, I think, but I agree that there's little gain
here and clearly a cost of wasted time, since the standard kernel
tooling has not caught with with this ugly invention.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH net-next v4 3/6] virtio_net: Add a lock for the command VQ.
  2024-04-18 15:38         ` Dan Jurgens
  2024-04-18 15:48           ` Paolo Abeni
@ 2024-04-18 16:06           ` Heng Qi
  1 sibling, 0 replies; 16+ messages in thread
From: Heng Qi @ 2024-04-18 16:06 UTC (permalink / raw)
  To: Dan Jurgens
  Cc: netdev, mst, xuanzhuo, virtualization, davem, edumazet, kuba,
	Jiri Pirko, Jason Wang, Paolo Abeni

> I didn't see any warnings with GCC or clang. This is used other places in the kernel as well.
> gcc version 13.2.1 20230918 (Red Hat 13.2.1-3) (GCC)
> clang version 17.0.6 (Fedora 17.0.6-2.fc39)
>

I think Paolo is suggesting this[1][2], guard will mess with the sparse 
check and cause a warning:

[1] 
https://patchwork.kernel.org/project/netdevbpf/patch/20240416193039.272997-4-danielj@nvidia.com/
[2] 
https://patchwork.kernel.org/project/netdevbpf/patch/20240416193039.272997-6-danielj@nvidia.com/



^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH net-next v4 3/6] virtio_net: Add a lock for the command VQ.
  2024-04-18 15:48           ` Paolo Abeni
  2024-04-18 16:01             ` Jakub Kicinski
@ 2024-04-18 16:12             ` Heng Qi
  2024-04-19  0:28               ` Jason Wang
  1 sibling, 1 reply; 16+ messages in thread
From: Heng Qi @ 2024-04-18 16:12 UTC (permalink / raw)
  To: Paolo Abeni
  Cc: netdev, mst, xuanzhuo, virtualization, davem, edumazet, kuba,
	Jiri Pirko, Dan Jurgens, Jason Wang



在 2024/4/18 下午11:48, Paolo Abeni 写道:
> On Thu, 2024-04-18 at 15:38 +0000, Dan Jurgens wrote:
>>> From: Paolo Abeni <pabeni@redhat.com>
>>> Sent: Thursday, April 18, 2024 5:57 AM
>>> On Thu, 2024-04-18 at 15:36 +0800, Heng Qi wrote:
>>>> 在 2024/4/18 下午2:42, Jason Wang 写道:
>>>>> On Wed, Apr 17, 2024 at 3:31 AM Daniel Jurgens <danielj@nvidia.com>
>>> wrote:
>>>>>> The command VQ will no longer be protected by the RTNL lock. Use a
>>>>>> spinlock to protect the control buffer header and the VQ.
>>>>>>
>>>>>> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
>>>>>> Reviewed-by: Jiri Pirko <jiri@nvidia.com>
>>>>>> ---
>>>>>>    drivers/net/virtio_net.c | 6 +++++-
>>>>>>    1 file changed, 5 insertions(+), 1 deletion(-)
>>>>>>
>>>>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>>>>>> index 0ee192b45e1e..d02f83a919a7 100644
>>>>>> --- a/drivers/net/virtio_net.c
>>>>>> +++ b/drivers/net/virtio_net.c
>>>>>> @@ -282,6 +282,7 @@ struct virtnet_info {
>>>>>>
>>>>>>           /* Has control virtqueue */
>>>>>>           bool has_cvq;
>>>>>> +       spinlock_t cvq_lock;
>>>>> Spinlock is instead of mutex which is problematic as there's no
>>>>> guarantee on when the driver will get a reply. And it became even
>>>>> more serious after 0d197a147164 ("virtio-net: add cond_resched() to
>>>>> the command waiting loop").
>>>>>
>>>>> Any reason we can't use mutex?
>>>> Hi Jason,
>>>>
>>>> I made a patch set to enable ctrlq's irq on top of this patch set,
>>>> which removes cond_resched().
>>>>
>>>> But I need a little time to test, this is close to fast. So could the
>>>> topic about cond_resched + spin lock or mutex lock be wait?
>>> The big problem is that until the cond_resched() is there, replacing the
>>> mutex with a spinlock can/will lead to scheduling while atomic splats. We
>>> can't intentionally introduce such scenario.
>> When I created the series set_rx_mode wasn't moved to a work queue,
>> and the cond_resched wasn't there.
> Unfortunately cond_resched() is there right now.

YES.

>
>> Mutex wasn't possible, then. If the CVQ is made to be event driven, then
>> the lock can be released right after posting the work to the VQ.
> That should work.

Yes, I will test my new patches (ctrlq with irq enabled) soon, then the 
combination
of the this set and mine MAY make deciding between mutex or spin lock 
easier.

Thanks.

>
>>> Side note: the compiler apparently does not like guard() construct, leading to
>>> new warning, here and in later patches. I'm unsure if the code simplification
>>> is worthy.
>> I didn't see any warnings with GCC or clang. This is used other places in the kernel as well.
>> gcc version 13.2.1 20230918 (Red Hat 13.2.1-3) (GCC)
>> clang version 17.0.6 (Fedora 17.0.6-2.fc39)
>>
> See:
>
> https://patchwork.kernel.org/project/netdevbpf/patch/20240416193039.272997-4-danielj@nvidia.com/
> https://netdev.bots.linux.dev/static/nipa/845178/13632442/build_32bit/stderr
> https://netdev.bots.linux.dev/static/nipa/845178/13632442/build_allmodconfig_warn/stderr
>
> Cheers,
>
> Paolo


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH net-next v4 3/6] virtio_net: Add a lock for the command VQ.
  2024-04-18 16:12             ` Heng Qi
@ 2024-04-19  0:28               ` Jason Wang
  0 siblings, 0 replies; 16+ messages in thread
From: Jason Wang @ 2024-04-19  0:28 UTC (permalink / raw)
  To: Heng Qi
  Cc: Paolo Abeni, netdev, mst, xuanzhuo, virtualization, davem,
	edumazet, kuba, Jiri Pirko, Dan Jurgens

On Fri, Apr 19, 2024 at 12:12 AM Heng Qi <hengqi@linux.alibaba.com> wrote:
>
>
>
> 在 2024/4/18 下午11:48, Paolo Abeni 写道:
> > On Thu, 2024-04-18 at 15:38 +0000, Dan Jurgens wrote:
> >>> From: Paolo Abeni <pabeni@redhat.com>
> >>> Sent: Thursday, April 18, 2024 5:57 AM
> >>> On Thu, 2024-04-18 at 15:36 +0800, Heng Qi wrote:
> >>>> 在 2024/4/18 下午2:42, Jason Wang 写道:
> >>>>> On Wed, Apr 17, 2024 at 3:31 AM Daniel Jurgens <danielj@nvidia.com>
> >>> wrote:
> >>>>>> The command VQ will no longer be protected by the RTNL lock. Use a
> >>>>>> spinlock to protect the control buffer header and the VQ.
> >>>>>>
> >>>>>> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> >>>>>> Reviewed-by: Jiri Pirko <jiri@nvidia.com>
> >>>>>> ---
> >>>>>>    drivers/net/virtio_net.c | 6 +++++-
> >>>>>>    1 file changed, 5 insertions(+), 1 deletion(-)
> >>>>>>
> >>>>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> >>>>>> index 0ee192b45e1e..d02f83a919a7 100644
> >>>>>> --- a/drivers/net/virtio_net.c
> >>>>>> +++ b/drivers/net/virtio_net.c
> >>>>>> @@ -282,6 +282,7 @@ struct virtnet_info {
> >>>>>>
> >>>>>>           /* Has control virtqueue */
> >>>>>>           bool has_cvq;
> >>>>>> +       spinlock_t cvq_lock;
> >>>>> Spinlock is instead of mutex which is problematic as there's no
> >>>>> guarantee on when the driver will get a reply. And it became even
> >>>>> more serious after 0d197a147164 ("virtio-net: add cond_resched() to
> >>>>> the command waiting loop").
> >>>>>
> >>>>> Any reason we can't use mutex?
> >>>> Hi Jason,
> >>>>
> >>>> I made a patch set to enable ctrlq's irq on top of this patch set,
> >>>> which removes cond_resched().
> >>>>
> >>>> But I need a little time to test, this is close to fast. So could the
> >>>> topic about cond_resched + spin lock or mutex lock be wait?
> >>> The big problem is that until the cond_resched() is there, replacing the
> >>> mutex with a spinlock can/will lead to scheduling while atomic splats. We
> >>> can't intentionally introduce such scenario.
> >> When I created the series set_rx_mode wasn't moved to a work queue,
> >> and the cond_resched wasn't there.
> > Unfortunately cond_resched() is there right now.
>
> YES.
>
> >
> >> Mutex wasn't possible, then. If the CVQ is made to be event driven, then
> >> the lock can be released right after posting the work to the VQ.
> > That should work.
>
> Yes, I will test my new patches (ctrlq with irq enabled) soon, then the
> combination
> of the this set and mine MAY make deciding between mutex or spin lock
> easier.
>
> Thanks.

So I guess the plan is to let your series come first?

Thanks

>
> >
> >>> Side note: the compiler apparently does not like guard() construct, leading to
> >>> new warning, here and in later patches. I'm unsure if the code simplification
> >>> is worthy.
> >> I didn't see any warnings with GCC or clang. This is used other places in the kernel as well.
> >> gcc version 13.2.1 20230918 (Red Hat 13.2.1-3) (GCC)
> >> clang version 17.0.6 (Fedora 17.0.6-2.fc39)
> >>
> > See:
> >
> > https://patchwork.kernel.org/project/netdevbpf/patch/20240416193039.272997-4-danielj@nvidia.com/
> > https://netdev.bots.linux.dev/static/nipa/845178/13632442/build_32bit/stderr
> > https://netdev.bots.linux.dev/static/nipa/845178/13632442/build_allmodconfig_warn/stderr
> >
> > Cheers,
> >
> > Paolo
>


^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2024-04-19  0:28 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-04-16 19:30 [PATCH net-next v4 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
2024-04-16 19:30 ` [PATCH net-next v4 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
2024-04-16 19:30 ` [PATCH net-next v4 2/6] virtio_net: Remove command data from control_buf Daniel Jurgens
2024-04-16 19:30 ` [PATCH net-next v4 3/6] virtio_net: Add a lock for the command VQ Daniel Jurgens
2024-04-18  6:42   ` Jason Wang
2024-04-18  7:36     ` Heng Qi
2024-04-18 10:56       ` Paolo Abeni
2024-04-18 15:38         ` Dan Jurgens
2024-04-18 15:48           ` Paolo Abeni
2024-04-18 16:01             ` Jakub Kicinski
2024-04-18 16:12             ` Heng Qi
2024-04-19  0:28               ` Jason Wang
2024-04-18 16:06           ` Heng Qi
2024-04-16 19:30 ` [PATCH net-next v4 4/6] virtio_net: Do DIM update for specified queue only Daniel Jurgens
2024-04-16 19:30 ` [PATCH net-next v4 5/6] virtio_net: Add a lock for per queue RX coalesce Daniel Jurgens
2024-04-16 19:30 ` [PATCH net-next v4 6/6] virtio_net: Remove rtnl lock protection of command buffers Daniel Jurgens

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.