virtualization.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next V2 0/2] virtio-net: don't busy poll for cvq command
@ 2023-04-13  6:40 Jason Wang
  2023-04-13  6:40 ` [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue Jason Wang
                   ` (3 more replies)
  0 siblings, 4 replies; 25+ messages in thread
From: Jason Wang @ 2023-04-13  6:40 UTC (permalink / raw)
  To: mst, jasowang
  Cc: xuanzhuo, linux-kernel, virtualization, eperezma, edumazet,
	maxime.coquelin, kuba, pabeni, david.marchand, davem

Hi all:

The code used to busy poll for cvq command which turns out to have
several side effects:

1) infinite poll for buggy devices
2) bad interaction with scheduler

So this series tries to use sleep instead of busy polling. In this
version, I take a step back: the hardening part is not implemented and
leave for future investigation. We use to aggree to use interruptible
sleep but it doesn't work for a general workqueue.

Please review.

Thanks

Changes since V1:
- use RTNL to synchronize rx mode worker
- use completion for simplicity
- don't try to harden CVQ command

Changes since RFC:

- switch to use BAD_RING in virtio_break_device()
- check virtqueue_is_broken() after being woken up
- use more_used() instead of virtqueue_get_buf() to allow caller to
  get buffers afterwards
  - break the virtio-net device when timeout
  - get buffer manually since the virtio core check more_used() instead

Jason Wang (2):
  virtio-net: convert rx mode setting to use workqueue
  virtio-net: sleep instead of busy waiting for cvq command

 drivers/net/virtio_net.c | 76 ++++++++++++++++++++++++++++++++++------
 1 file changed, 66 insertions(+), 10 deletions(-)

-- 
2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue
  2023-04-13  6:40 [PATCH net-next V2 0/2] virtio-net: don't busy poll for cvq command Jason Wang
@ 2023-04-13  6:40 ` Jason Wang
  2023-04-13 16:25   ` Michael S. Tsirkin
  2023-04-13  6:40 ` [PATCH net-next V2 2/2] virtio-net: sleep instead of busy waiting for cvq command Jason Wang
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 25+ messages in thread
From: Jason Wang @ 2023-04-13  6:40 UTC (permalink / raw)
  To: mst, jasowang
  Cc: xuanzhuo, linux-kernel, virtualization, eperezma, edumazet,
	maxime.coquelin, kuba, pabeni, david.marchand, davem

This patch convert rx mode setting to be done in a workqueue, this is
a must for allow to sleep when waiting for the cvq command to
response since current code is executed under addr spin lock.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
Changes since V1:
- use RTNL to synchronize rx mode worker
---
 drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++---
 1 file changed, 52 insertions(+), 3 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e2560b6f7980..2e56bbf86894 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -265,6 +265,12 @@ struct virtnet_info {
 	/* Work struct for config space updates */
 	struct work_struct config_work;
 
+	/* Work struct for config rx mode */
+	struct work_struct rx_mode_work;
+
+	/* Is rx mode work enabled? */
+	bool rx_mode_work_enabled;
+
 	/* Does the affinity hint is set for virtqueues? */
 	bool affinity_hint_set;
 
@@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi)
 	spin_unlock_bh(&vi->refill_lock);
 }
 
+static void enable_rx_mode_work(struct virtnet_info *vi)
+{
+	rtnl_lock();
+	vi->rx_mode_work_enabled = true;
+	rtnl_unlock();
+}
+
+static void disable_rx_mode_work(struct virtnet_info *vi)
+{
+	rtnl_lock();
+	vi->rx_mode_work_enabled = false;
+	rtnl_unlock();
+}
+
 static void virtqueue_napi_schedule(struct napi_struct *napi,
 				    struct virtqueue *vq)
 {
@@ -2310,9 +2330,11 @@ static int virtnet_close(struct net_device *dev)
 	return 0;
 }
 
-static void virtnet_set_rx_mode(struct net_device *dev)
+static void virtnet_rx_mode_work(struct work_struct *work)
 {
-	struct virtnet_info *vi = netdev_priv(dev);
+	struct virtnet_info *vi =
+		container_of(work, struct virtnet_info, rx_mode_work);
+	struct net_device *dev = vi->dev;
 	struct scatterlist sg[2];
 	struct virtio_net_ctrl_mac *mac_data;
 	struct netdev_hw_addr *ha;
@@ -2325,6 +2347,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
 		return;
 
+	rtnl_lock();
+
 	vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
 	vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
 
@@ -2342,14 +2366,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
 		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
 			 vi->ctrl->allmulti ? "en" : "dis");
 
+	netif_addr_lock_bh(dev);
+
 	uc_count = netdev_uc_count(dev);
 	mc_count = netdev_mc_count(dev);
 	/* MAC filter - use one buffer for both lists */
 	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
 		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
 	mac_data = buf;
-	if (!buf)
+	if (!buf) {
+		netif_addr_unlock_bh(dev);
+		rtnl_unlock();
 		return;
+	}
 
 	sg_init_table(sg, 2);
 
@@ -2370,6 +2399,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
 	netdev_for_each_mc_addr(ha, dev)
 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
 
+	netif_addr_unlock_bh(dev);
+
 	sg_set_buf(&sg[1], mac_data,
 		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
 
@@ -2377,9 +2408,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
 				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
 		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
 
+	rtnl_unlock();
+
 	kfree(buf);
 }
 
+static void virtnet_set_rx_mode(struct net_device *dev)
+{
+	struct virtnet_info *vi = netdev_priv(dev);
+
+	if (vi->rx_mode_work_enabled)
+		schedule_work(&vi->rx_mode_work);
+}
+
 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
 				   __be16 proto, u16 vid)
 {
@@ -3150,6 +3191,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
 
 	/* Make sure no work handler is accessing the device */
 	flush_work(&vi->config_work);
+	disable_rx_mode_work(vi);
+	flush_work(&vi->rx_mode_work);
 
 	netif_tx_lock_bh(vi->dev);
 	netif_device_detach(vi->dev);
@@ -3172,6 +3215,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
 	virtio_device_ready(vdev);
 
 	enable_delayed_refill(vi);
+	enable_rx_mode_work(vi);
 
 	if (netif_running(vi->dev)) {
 		err = virtnet_open(vi->dev);
@@ -3969,6 +4013,7 @@ static int virtnet_probe(struct virtio_device *vdev)
 	vdev->priv = vi;
 
 	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
+	INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
 	spin_lock_init(&vi->refill_lock);
 
 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
@@ -4077,6 +4122,8 @@ static int virtnet_probe(struct virtio_device *vdev)
 	if (vi->has_rss || vi->has_rss_hash_report)
 		virtnet_init_default_rss(vi);
 
+	enable_rx_mode_work(vi);
+
 	/* serialize netdev register + virtio_device_ready() with ndo_open() */
 	rtnl_lock();
 
@@ -4174,6 +4221,8 @@ static void virtnet_remove(struct virtio_device *vdev)
 
 	/* Make sure no work handler is accessing the device. */
 	flush_work(&vi->config_work);
+	disable_rx_mode_work(vi);
+	flush_work(&vi->rx_mode_work);
 
 	unregister_netdev(vi->dev);
 
-- 
2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH net-next V2 2/2] virtio-net: sleep instead of busy waiting for cvq command
  2023-04-13  6:40 [PATCH net-next V2 0/2] virtio-net: don't busy poll for cvq command Jason Wang
  2023-04-13  6:40 ` [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue Jason Wang
@ 2023-04-13  6:40 ` Jason Wang
  2023-04-13  7:27   ` Xuan Zhuo
  2023-05-16 20:54   ` Michael S. Tsirkin
  2023-04-13 13:02 ` [PATCH net-next V2 0/2] virtio-net: don't busy poll " Maxime Coquelin
       [not found] ` <20230413070408.630fa731@kernel.org>
  3 siblings, 2 replies; 25+ messages in thread
From: Jason Wang @ 2023-04-13  6:40 UTC (permalink / raw)
  To: mst, jasowang
  Cc: xuanzhuo, linux-kernel, virtualization, eperezma, edumazet,
	maxime.coquelin, kuba, pabeni, david.marchand, davem

We used to busy waiting on the cvq command this tends to be
problematic since there no way for to schedule another process which
may serve for the control virtqueue. This might be the case when the
control virtqueue is emulated by software. This patch switches to use
completion to allow the CPU to sleep instead of busy waiting for the
cvq command.

Signed-off-by: Jason Wang <jasowang@redhat.com>
---
Changes since V1:
- use completion for simplicity
- don't try to harden the CVQ command which requires more thought
Changes since RFC:
- break the device when timeout
- get buffer manually since the virtio core check more_used() instead
---
 drivers/net/virtio_net.c | 21 ++++++++++++++-------
 1 file changed, 14 insertions(+), 7 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2e56bbf86894..d3eb8fd6c9dc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -19,6 +19,7 @@
 #include <linux/average.h>
 #include <linux/filter.h>
 #include <linux/kernel.h>
+#include <linux/completion.h>
 #include <net/route.h>
 #include <net/xdp.h>
 #include <net/net_failover.h>
@@ -295,6 +296,8 @@ struct virtnet_info {
 
 	/* failover when STANDBY feature enabled */
 	struct failover *failover;
+
+	struct completion completion;
 };
 
 struct padded_vnet_hdr {
@@ -1709,6 +1712,13 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
 	return !oom;
 }
 
+static void virtnet_cvq_done(struct virtqueue *cvq)
+{
+	struct virtnet_info *vi = cvq->vdev->priv;
+
+	complete(&vi->completion);
+}
+
 static void skb_recv_done(struct virtqueue *rvq)
 {
 	struct virtnet_info *vi = rvq->vdev->priv;
@@ -2169,12 +2179,8 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
 	if (unlikely(!virtqueue_kick(vi->cvq)))
 		return vi->ctrl->status == VIRTIO_NET_OK;
 
-	/* Spin for a response, the kick causes an ioport write, trapping
-	 * into the hypervisor, so the request should be handled immediately.
-	 */
-	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
-	       !virtqueue_is_broken(vi->cvq))
-		cpu_relax();
+	wait_for_completion(&vi->completion);
+	virtqueue_get_buf(vi->cvq, &tmp);
 
 	return vi->ctrl->status == VIRTIO_NET_OK;
 }
@@ -3672,7 +3678,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
 
 	/* Parameters for control virtqueue, if any */
 	if (vi->has_cvq) {
-		callbacks[total_vqs - 1] = NULL;
+		callbacks[total_vqs - 1] = virtnet_cvq_done;
 		names[total_vqs - 1] = "control";
 	}
 
@@ -4122,6 +4128,7 @@ static int virtnet_probe(struct virtio_device *vdev)
 	if (vi->has_rss || vi->has_rss_hash_report)
 		virtnet_init_default_rss(vi);
 
+	init_completion(&vi->completion);
 	enable_rx_mode_work(vi);
 
 	/* serialize netdev register + virtio_device_ready() with ndo_open() */
-- 
2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 2/2] virtio-net: sleep instead of busy waiting for cvq command
  2023-04-13  6:40 ` [PATCH net-next V2 2/2] virtio-net: sleep instead of busy waiting for cvq command Jason Wang
@ 2023-04-13  7:27   ` Xuan Zhuo
  2023-04-14  5:09     ` Jason Wang
  2023-05-16 20:54   ` Michael S. Tsirkin
  1 sibling, 1 reply; 25+ messages in thread
From: Xuan Zhuo @ 2023-04-13  7:27 UTC (permalink / raw)
  To: Jason Wang
  Cc: mst, linux-kernel, virtualization, eperezma, edumazet,
	maxime.coquelin, kuba, pabeni, david.marchand, davem

On Thu, 13 Apr 2023 14:40:27 +0800, Jason Wang <jasowang@redhat.com> wrote:
> We used to busy waiting on the cvq command this tends to be
> problematic since there no way for to schedule another process which
> may serve for the control virtqueue. This might be the case when the
> control virtqueue is emulated by software. This patch switches to use
> completion to allow the CPU to sleep instead of busy waiting for the
> cvq command.
>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
> Changes since V1:
> - use completion for simplicity
> - don't try to harden the CVQ command which requires more thought
> Changes since RFC:
> - break the device when timeout
> - get buffer manually since the virtio core check more_used() instead
> ---
>  drivers/net/virtio_net.c | 21 ++++++++++++++-------
>  1 file changed, 14 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 2e56bbf86894..d3eb8fd6c9dc 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -19,6 +19,7 @@
>  #include <linux/average.h>
>  #include <linux/filter.h>
>  #include <linux/kernel.h>
> +#include <linux/completion.h>
>  #include <net/route.h>
>  #include <net/xdp.h>
>  #include <net/net_failover.h>
> @@ -295,6 +296,8 @@ struct virtnet_info {
>
>  	/* failover when STANDBY feature enabled */
>  	struct failover *failover;
> +
> +	struct completion completion;
>  };
>
>  struct padded_vnet_hdr {
> @@ -1709,6 +1712,13 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
>  	return !oom;
>  }
>
> +static void virtnet_cvq_done(struct virtqueue *cvq)
> +{
> +	struct virtnet_info *vi = cvq->vdev->priv;
> +
> +	complete(&vi->completion);
> +}
> +
>  static void skb_recv_done(struct virtqueue *rvq)
>  {
>  	struct virtnet_info *vi = rvq->vdev->priv;
> @@ -2169,12 +2179,8 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
>  	if (unlikely(!virtqueue_kick(vi->cvq)))
>  		return vi->ctrl->status == VIRTIO_NET_OK;
>
> -	/* Spin for a response, the kick causes an ioport write, trapping
> -	 * into the hypervisor, so the request should be handled immediately.
> -	 */
> -	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
> -	       !virtqueue_is_broken(vi->cvq))
> -		cpu_relax();
> +	wait_for_completion(&vi->completion);
> +	virtqueue_get_buf(vi->cvq, &tmp);
>
>  	return vi->ctrl->status == VIRTIO_NET_OK;
>  }
> @@ -3672,7 +3678,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
>
>  	/* Parameters for control virtqueue, if any */
>  	if (vi->has_cvq) {
> -		callbacks[total_vqs - 1] = NULL;
> +		callbacks[total_vqs - 1] = virtnet_cvq_done;

This depends the interrupt, right?

I worry that there may be some devices that may not support interruption on cvq.
Although this may not be in line with SPEC, it may cause problem on the devices
that can work normally at present.

Thanks.


>  		names[total_vqs - 1] = "control";
>  	}
>
> @@ -4122,6 +4128,7 @@ static int virtnet_probe(struct virtio_device *vdev)
>  	if (vi->has_rss || vi->has_rss_hash_report)
>  		virtnet_init_default_rss(vi);
>
> +	init_completion(&vi->completion);
>  	enable_rx_mode_work(vi);
>
>  	/* serialize netdev register + virtio_device_ready() with ndo_open() */
> --
> 2.25.1
>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 0/2] virtio-net: don't busy poll for cvq command
  2023-04-13  6:40 [PATCH net-next V2 0/2] virtio-net: don't busy poll for cvq command Jason Wang
  2023-04-13  6:40 ` [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue Jason Wang
  2023-04-13  6:40 ` [PATCH net-next V2 2/2] virtio-net: sleep instead of busy waiting for cvq command Jason Wang
@ 2023-04-13 13:02 ` Maxime Coquelin
  2023-04-13 13:05   ` Maxime Coquelin
       [not found] ` <20230413070408.630fa731@kernel.org>
  3 siblings, 1 reply; 25+ messages in thread
From: Maxime Coquelin @ 2023-04-13 13:02 UTC (permalink / raw)
  To: Jason Wang, mst
  Cc: xuanzhuo, linux-kernel, virtualization, eperezma, edumazet, kuba,
	pabeni, david.marchand, davem

Hi Jason,

On 4/13/23 08:40, Jason Wang wrote:
> Hi all:
> 
> The code used to busy poll for cvq command which turns out to have
> several side effects:
> 
> 1) infinite poll for buggy devices
> 2) bad interaction with scheduler
> 
> So this series tries to use sleep instead of busy polling. In this
> version, I take a step back: the hardening part is not implemented and
> leave for future investigation. We use to aggree to use interruptible
> sleep but it doesn't work for a general workqueue.
> 
> Please review.

Thanks for working on this.
My DPDK VDUSE RFC missed to set the interrupt, as Xuan Zhou highlighted
it makes the vdpa dev add/del commands to freeze:
[<0>] device_del+0x37/0x3d0
[<0>] device_unregister+0x13/0x60
[<0>] unregister_virtio_device+0x11/0x20
[<0>] device_release_driver_internal+0x193/0x200
[<0>] bus_remove_device+0xbf/0x130
[<0>] device_del+0x174/0x3d0
[<0>] device_unregister+0x13/0x60
[<0>] vdpa_nl_cmd_dev_del_set_doit+0x66/0xe0 [vdpa]
[<0>] genl_family_rcv_msg_doit.isra.0+0xb8/0x100
[<0>] genl_rcv_msg+0x151/0x290
[<0>] netlink_rcv_skb+0x54/0x100
[<0>] genl_rcv+0x24/0x40
[<0>] netlink_unicast+0x217/0x340
[<0>] netlink_sendmsg+0x23e/0x4a0
[<0>] sock_sendmsg+0x8f/0xa0
[<0>] __sys_sendto+0xfc/0x170
[<0>] __x64_sys_sendto+0x20/0x30
[<0>] do_syscall_64+0x59/0x90
[<0>] entry_SYSCALL_64_after_hwframe+0x72/0xdc

Once fixed on DPDK side (you can use my vduse_v1 branch [0] for
testing), it works fine:

Tested-by: Maxime Coquelin <maxime.coquelin@redhat.com>

For the potential missing interrupt with non-compliant devices, I guess
it could be handled with the hardening work as same thing could happen
if the VDUSE application crashed for example.

Regards,
Maxime

[0]:
> Thanks
> 
> Changes since V1:
> - use RTNL to synchronize rx mode worker
> - use completion for simplicity
> - don't try to harden CVQ command
> 
> Changes since RFC:
> 
> - switch to use BAD_RING in virtio_break_device()
> - check virtqueue_is_broken() after being woken up
> - use more_used() instead of virtqueue_get_buf() to allow caller to
>    get buffers afterwards
>    - break the virtio-net device when timeout
>    - get buffer manually since the virtio core check more_used() instead
> 
> Jason Wang (2):
>    virtio-net: convert rx mode setting to use workqueue
>    virtio-net: sleep instead of busy waiting for cvq command
> 
>   drivers/net/virtio_net.c | 76 ++++++++++++++++++++++++++++++++++------
>   1 file changed, 66 insertions(+), 10 deletions(-)
> 

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 0/2] virtio-net: don't busy poll for cvq command
  2023-04-13 13:02 ` [PATCH net-next V2 0/2] virtio-net: don't busy poll " Maxime Coquelin
@ 2023-04-13 13:05   ` Maxime Coquelin
  0 siblings, 0 replies; 25+ messages in thread
From: Maxime Coquelin @ 2023-04-13 13:05 UTC (permalink / raw)
  To: Jason Wang, mst
  Cc: xuanzhuo, linux-kernel, virtualization, eperezma, edumazet, kuba,
	pabeni, david.marchand, davem



On 4/13/23 15:02, Maxime Coquelin wrote:
> Hi Jason,
> 
> On 4/13/23 08:40, Jason Wang wrote:
>> Hi all:
>>
>> The code used to busy poll for cvq command which turns out to have
>> several side effects:
>>
>> 1) infinite poll for buggy devices
>> 2) bad interaction with scheduler
>>
>> So this series tries to use sleep instead of busy polling. In this
>> version, I take a step back: the hardening part is not implemented and
>> leave for future investigation. We use to aggree to use interruptible
>> sleep but it doesn't work for a general workqueue.
>>
>> Please review.
> 
> Thanks for working on this.
> My DPDK VDUSE RFC missed to set the interrupt, as Xuan Zhou highlighted
> it makes the vdpa dev add/del commands to freeze:
> [<0>] device_del+0x37/0x3d0
> [<0>] device_unregister+0x13/0x60
> [<0>] unregister_virtio_device+0x11/0x20
> [<0>] device_release_driver_internal+0x193/0x200
> [<0>] bus_remove_device+0xbf/0x130
> [<0>] device_del+0x174/0x3d0
> [<0>] device_unregister+0x13/0x60
> [<0>] vdpa_nl_cmd_dev_del_set_doit+0x66/0xe0 [vdpa]
> [<0>] genl_family_rcv_msg_doit.isra.0+0xb8/0x100
> [<0>] genl_rcv_msg+0x151/0x290
> [<0>] netlink_rcv_skb+0x54/0x100
> [<0>] genl_rcv+0x24/0x40
> [<0>] netlink_unicast+0x217/0x340
> [<0>] netlink_sendmsg+0x23e/0x4a0
> [<0>] sock_sendmsg+0x8f/0xa0
> [<0>] __sys_sendto+0xfc/0x170
> [<0>] __x64_sys_sendto+0x20/0x30
> [<0>] do_syscall_64+0x59/0x90
> [<0>] entry_SYSCALL_64_after_hwframe+0x72/0xdc
> 
> Once fixed on DPDK side (you can use my vduse_v1 branch [0] for
> testing), it works fine:
> 
> Tested-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> 
> For the potential missing interrupt with non-compliant devices, I guess
> it could be handled with the hardening work as same thing could happen
> if the VDUSE application crashed for example.
> 
> Regards,
> Maxime
> 
> [0]:

Better with the link...

[0]: https://gitlab.com/mcoquelin/dpdk-next-virtio/-/commits/vduse_v1/

>> Thanks
>>
>> Changes since V1:
>> - use RTNL to synchronize rx mode worker
>> - use completion for simplicity
>> - don't try to harden CVQ command
>>
>> Changes since RFC:
>>
>> - switch to use BAD_RING in virtio_break_device()
>> - check virtqueue_is_broken() after being woken up
>> - use more_used() instead of virtqueue_get_buf() to allow caller to
>>    get buffers afterwards
>>    - break the virtio-net device when timeout
>>    - get buffer manually since the virtio core check more_used() instead
>>
>> Jason Wang (2):
>>    virtio-net: convert rx mode setting to use workqueue
>>    virtio-net: sleep instead of busy waiting for cvq command
>>
>>   drivers/net/virtio_net.c | 76 ++++++++++++++++++++++++++++++++++------
>>   1 file changed, 66 insertions(+), 10 deletions(-)
>>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue
  2023-04-13  6:40 ` [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue Jason Wang
@ 2023-04-13 16:25   ` Michael S. Tsirkin
  2023-04-14  5:04     ` Jason Wang
  0 siblings, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2023-04-13 16:25 UTC (permalink / raw)
  To: Jason Wang
  Cc: xuanzhuo, linux-kernel, virtualization, eperezma, edumazet,
	maxime.coquelin, kuba, pabeni, david.marchand, davem

On Thu, Apr 13, 2023 at 02:40:26PM +0800, Jason Wang wrote:
> This patch convert rx mode setting to be done in a workqueue, this is
> a must for allow to sleep when waiting for the cvq command to
> response since current code is executed under addr spin lock.
> 
> Signed-off-by: Jason Wang <jasowang@redhat.com>

I don't like this frankly. This means that setting RX mode which would
previously be reliable, now becomes unreliable.
- first of all configuration is no longer immediate
  and there is no way for driver to find out when
  it actually took effect
- second, if device fails command, this is also not
  propagated to driver, again no way for driver to find out 

VDUSE needs to be fixed to do tricks to fix this
without breaking normal drivers.


> ---
> Changes since V1:
> - use RTNL to synchronize rx mode worker
> ---
>  drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++---
>  1 file changed, 52 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index e2560b6f7980..2e56bbf86894 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -265,6 +265,12 @@ struct virtnet_info {
>  	/* Work struct for config space updates */
>  	struct work_struct config_work;
>  
> +	/* Work struct for config rx mode */
> +	struct work_struct rx_mode_work;
> +
> +	/* Is rx mode work enabled? */
> +	bool rx_mode_work_enabled;
> +
>  	/* Does the affinity hint is set for virtqueues? */
>  	bool affinity_hint_set;
>  
> @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi)
>  	spin_unlock_bh(&vi->refill_lock);
>  }
>  
> +static void enable_rx_mode_work(struct virtnet_info *vi)
> +{
> +	rtnl_lock();
> +	vi->rx_mode_work_enabled = true;
> +	rtnl_unlock();
> +}
> +
> +static void disable_rx_mode_work(struct virtnet_info *vi)
> +{
> +	rtnl_lock();
> +	vi->rx_mode_work_enabled = false;
> +	rtnl_unlock();
> +}
> +
>  static void virtqueue_napi_schedule(struct napi_struct *napi,
>  				    struct virtqueue *vq)
>  {
> @@ -2310,9 +2330,11 @@ static int virtnet_close(struct net_device *dev)
>  	return 0;
>  }
>  
> -static void virtnet_set_rx_mode(struct net_device *dev)
> +static void virtnet_rx_mode_work(struct work_struct *work)
>  {
> -	struct virtnet_info *vi = netdev_priv(dev);
> +	struct virtnet_info *vi =
> +		container_of(work, struct virtnet_info, rx_mode_work);
> +	struct net_device *dev = vi->dev;
>  	struct scatterlist sg[2];
>  	struct virtio_net_ctrl_mac *mac_data;
>  	struct netdev_hw_addr *ha;
> @@ -2325,6 +2347,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
>  	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
>  		return;
>  
> +	rtnl_lock();
> +
>  	vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
>  	vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
>  
> @@ -2342,14 +2366,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
>  		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
>  			 vi->ctrl->allmulti ? "en" : "dis");
>  
> +	netif_addr_lock_bh(dev);
> +
>  	uc_count = netdev_uc_count(dev);
>  	mc_count = netdev_mc_count(dev);
>  	/* MAC filter - use one buffer for both lists */
>  	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
>  		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
>  	mac_data = buf;
> -	if (!buf)
> +	if (!buf) {
> +		netif_addr_unlock_bh(dev);
> +		rtnl_unlock();
>  		return;
> +	}
>  
>  	sg_init_table(sg, 2);
>  
> @@ -2370,6 +2399,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
>  	netdev_for_each_mc_addr(ha, dev)
>  		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
>  
> +	netif_addr_unlock_bh(dev);
> +
>  	sg_set_buf(&sg[1], mac_data,
>  		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
>  
> @@ -2377,9 +2408,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
>  				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
>  		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
>  
> +	rtnl_unlock();
> +
>  	kfree(buf);
>  }
>  
> +static void virtnet_set_rx_mode(struct net_device *dev)
> +{
> +	struct virtnet_info *vi = netdev_priv(dev);
> +
> +	if (vi->rx_mode_work_enabled)
> +		schedule_work(&vi->rx_mode_work);
> +}
> +
>  static int virtnet_vlan_rx_add_vid(struct net_device *dev,
>  				   __be16 proto, u16 vid)
>  {
> @@ -3150,6 +3191,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
>  
>  	/* Make sure no work handler is accessing the device */
>  	flush_work(&vi->config_work);
> +	disable_rx_mode_work(vi);
> +	flush_work(&vi->rx_mode_work);
>  
>  	netif_tx_lock_bh(vi->dev);
>  	netif_device_detach(vi->dev);

So now configuration is not propagated to device.
Won't device later wake up in wrong state?


> @@ -3172,6 +3215,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
>  	virtio_device_ready(vdev);
>  
>  	enable_delayed_refill(vi);
> +	enable_rx_mode_work(vi);
>  
>  	if (netif_running(vi->dev)) {
>  		err = virtnet_open(vi->dev);
> @@ -3969,6 +4013,7 @@ static int virtnet_probe(struct virtio_device *vdev)
>  	vdev->priv = vi;
>  
>  	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
> +	INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
>  	spin_lock_init(&vi->refill_lock);
>  
>  	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
> @@ -4077,6 +4122,8 @@ static int virtnet_probe(struct virtio_device *vdev)
>  	if (vi->has_rss || vi->has_rss_hash_report)
>  		virtnet_init_default_rss(vi);
>  
> +	enable_rx_mode_work(vi);
> +
>  	/* serialize netdev register + virtio_device_ready() with ndo_open() */
>  	rtnl_lock();
>  
> @@ -4174,6 +4221,8 @@ static void virtnet_remove(struct virtio_device *vdev)
>  
>  	/* Make sure no work handler is accessing the device. */
>  	flush_work(&vi->config_work);
> +	disable_rx_mode_work(vi);
> +	flush_work(&vi->rx_mode_work);
>  
>  	unregister_netdev(vi->dev);
>  
> -- 
> 2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 0/2] virtio-net: don't busy poll for cvq command
       [not found] ` <20230413070408.630fa731@kernel.org>
@ 2023-04-14  4:53   ` Jason Wang
  0 siblings, 0 replies; 25+ messages in thread
From: Jason Wang @ 2023-04-14  4:53 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: xuanzhuo, mst, linux-kernel, virtualization, eperezma, edumazet,
	maxime.coquelin, pabeni, david.marchand, davem

On Thu, Apr 13, 2023 at 10:04 PM Jakub Kicinski <kuba@kernel.org> wrote:
>
> On Thu, 13 Apr 2023 14:40:25 +0800 Jason Wang wrote:
> > The code used to busy poll for cvq command which turns out to have
> > several side effects:
> >
> > 1) infinite poll for buggy devices
> > 2) bad interaction with scheduler
> >
> > So this series tries to use sleep instead of busy polling. In this
> > version, I take a step back: the hardening part is not implemented and
> > leave for future investigation. We use to aggree to use interruptible
> > sleep but it doesn't work for a general workqueue.
>
> CC: netdev missing?

My bad. Will cc netdev for any discussion.

Thanks

>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue
  2023-04-13 16:25   ` Michael S. Tsirkin
@ 2023-04-14  5:04     ` Jason Wang
  2023-04-14  7:21       ` Michael S. Tsirkin
  0 siblings, 1 reply; 25+ messages in thread
From: Jason Wang @ 2023-04-14  5:04 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: xuanzhuo, netdev, linux-kernel, virtualization, eperezma,
	edumazet, maxime.coquelin, kuba, pabeni, david.marchand, davem

Forget to cc netdev, adding.

On Fri, Apr 14, 2023 at 12:25 AM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Thu, Apr 13, 2023 at 02:40:26PM +0800, Jason Wang wrote:
> > This patch convert rx mode setting to be done in a workqueue, this is
> > a must for allow to sleep when waiting for the cvq command to
> > response since current code is executed under addr spin lock.
> >
> > Signed-off-by: Jason Wang <jasowang@redhat.com>
>
> I don't like this frankly. This means that setting RX mode which would
> previously be reliable, now becomes unreliable.

It is "unreliable" by design:

      void                    (*ndo_set_rx_mode)(struct net_device *dev);

> - first of all configuration is no longer immediate

Is immediate a hard requirement? I can see a workqueue is used at least:

mlx5e, ipoib, efx, ...

>   and there is no way for driver to find out when
>   it actually took effect

But we know rx mode is best effort e.g it doesn't support vhost and we
survive from this for years.

> - second, if device fails command, this is also not
>   propagated to driver, again no way for driver to find out
>
> VDUSE needs to be fixed to do tricks to fix this
> without breaking normal drivers.

It's not specific to VDUSE. For example, when using virtio-net in the
UP environment with any software cvq (like mlx5 via vDPA or cma
transport).

Thanks

>
>
> > ---
> > Changes since V1:
> > - use RTNL to synchronize rx mode worker
> > ---
> >  drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++---
> >  1 file changed, 52 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index e2560b6f7980..2e56bbf86894 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -265,6 +265,12 @@ struct virtnet_info {
> >       /* Work struct for config space updates */
> >       struct work_struct config_work;
> >
> > +     /* Work struct for config rx mode */
> > +     struct work_struct rx_mode_work;
> > +
> > +     /* Is rx mode work enabled? */
> > +     bool rx_mode_work_enabled;
> > +
> >       /* Does the affinity hint is set for virtqueues? */
> >       bool affinity_hint_set;
> >
> > @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi)
> >       spin_unlock_bh(&vi->refill_lock);
> >  }
> >
> > +static void enable_rx_mode_work(struct virtnet_info *vi)
> > +{
> > +     rtnl_lock();
> > +     vi->rx_mode_work_enabled = true;
> > +     rtnl_unlock();
> > +}
> > +
> > +static void disable_rx_mode_work(struct virtnet_info *vi)
> > +{
> > +     rtnl_lock();
> > +     vi->rx_mode_work_enabled = false;
> > +     rtnl_unlock();
> > +}
> > +
> >  static void virtqueue_napi_schedule(struct napi_struct *napi,
> >                                   struct virtqueue *vq)
> >  {
> > @@ -2310,9 +2330,11 @@ static int virtnet_close(struct net_device *dev)
> >       return 0;
> >  }
> >
> > -static void virtnet_set_rx_mode(struct net_device *dev)
> > +static void virtnet_rx_mode_work(struct work_struct *work)
> >  {
> > -     struct virtnet_info *vi = netdev_priv(dev);
> > +     struct virtnet_info *vi =
> > +             container_of(work, struct virtnet_info, rx_mode_work);
> > +     struct net_device *dev = vi->dev;
> >       struct scatterlist sg[2];
> >       struct virtio_net_ctrl_mac *mac_data;
> >       struct netdev_hw_addr *ha;
> > @@ -2325,6 +2347,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> >       if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
> >               return;
> >
> > +     rtnl_lock();
> > +
> >       vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
> >       vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
> >
> > @@ -2342,14 +2366,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> >               dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
> >                        vi->ctrl->allmulti ? "en" : "dis");
> >
> > +     netif_addr_lock_bh(dev);
> > +
> >       uc_count = netdev_uc_count(dev);
> >       mc_count = netdev_mc_count(dev);
> >       /* MAC filter - use one buffer for both lists */
> >       buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
> >                     (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
> >       mac_data = buf;
> > -     if (!buf)
> > +     if (!buf) {
> > +             netif_addr_unlock_bh(dev);
> > +             rtnl_unlock();
> >               return;
> > +     }
> >
> >       sg_init_table(sg, 2);
> >
> > @@ -2370,6 +2399,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> >       netdev_for_each_mc_addr(ha, dev)
> >               memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
> >
> > +     netif_addr_unlock_bh(dev);
> > +
> >       sg_set_buf(&sg[1], mac_data,
> >                  sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
> >
> > @@ -2377,9 +2408,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> >                                 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
> >               dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
> >
> > +     rtnl_unlock();
> > +
> >       kfree(buf);
> >  }
> >
> > +static void virtnet_set_rx_mode(struct net_device *dev)
> > +{
> > +     struct virtnet_info *vi = netdev_priv(dev);
> > +
> > +     if (vi->rx_mode_work_enabled)
> > +             schedule_work(&vi->rx_mode_work);
> > +}
> > +
> >  static int virtnet_vlan_rx_add_vid(struct net_device *dev,
> >                                  __be16 proto, u16 vid)
> >  {
> > @@ -3150,6 +3191,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
> >
> >       /* Make sure no work handler is accessing the device */
> >       flush_work(&vi->config_work);
> > +     disable_rx_mode_work(vi);
> > +     flush_work(&vi->rx_mode_work);
> >
> >       netif_tx_lock_bh(vi->dev);
> >       netif_device_detach(vi->dev);
>
> So now configuration is not propagated to device.
> Won't device later wake up in wrong state?
>
>
> > @@ -3172,6 +3215,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
> >       virtio_device_ready(vdev);
> >
> >       enable_delayed_refill(vi);
> > +     enable_rx_mode_work(vi);
> >
> >       if (netif_running(vi->dev)) {
> >               err = virtnet_open(vi->dev);
> > @@ -3969,6 +4013,7 @@ static int virtnet_probe(struct virtio_device *vdev)
> >       vdev->priv = vi;
> >
> >       INIT_WORK(&vi->config_work, virtnet_config_changed_work);
> > +     INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
> >       spin_lock_init(&vi->refill_lock);
> >
> >       if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
> > @@ -4077,6 +4122,8 @@ static int virtnet_probe(struct virtio_device *vdev)
> >       if (vi->has_rss || vi->has_rss_hash_report)
> >               virtnet_init_default_rss(vi);
> >
> > +     enable_rx_mode_work(vi);
> > +
> >       /* serialize netdev register + virtio_device_ready() with ndo_open() */
> >       rtnl_lock();
> >
> > @@ -4174,6 +4221,8 @@ static void virtnet_remove(struct virtio_device *vdev)
> >
> >       /* Make sure no work handler is accessing the device. */
> >       flush_work(&vi->config_work);
> > +     disable_rx_mode_work(vi);
> > +     flush_work(&vi->rx_mode_work);
> >
> >       unregister_netdev(vi->dev);
> >
> > --
> > 2.25.1
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 2/2] virtio-net: sleep instead of busy waiting for cvq command
  2023-04-13  7:27   ` Xuan Zhuo
@ 2023-04-14  5:09     ` Jason Wang
  2023-04-14  5:10       ` Jason Wang
  0 siblings, 1 reply; 25+ messages in thread
From: Jason Wang @ 2023-04-14  5:09 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: mst, linux-kernel, virtualization, eperezma, edumazet,
	maxime.coquelin, kuba, pabeni, david.marchand, davem

On Thu, Apr 13, 2023 at 3:31 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Thu, 13 Apr 2023 14:40:27 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > We used to busy waiting on the cvq command this tends to be
> > problematic since there no way for to schedule another process which
> > may serve for the control virtqueue. This might be the case when the
> > control virtqueue is emulated by software. This patch switches to use
> > completion to allow the CPU to sleep instead of busy waiting for the
> > cvq command.
> >
> > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > ---
> > Changes since V1:
> > - use completion for simplicity
> > - don't try to harden the CVQ command which requires more thought
> > Changes since RFC:
> > - break the device when timeout
> > - get buffer manually since the virtio core check more_used() instead
> > ---
> >  drivers/net/virtio_net.c | 21 ++++++++++++++-------
> >  1 file changed, 14 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index 2e56bbf86894..d3eb8fd6c9dc 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -19,6 +19,7 @@
> >  #include <linux/average.h>
> >  #include <linux/filter.h>
> >  #include <linux/kernel.h>
> > +#include <linux/completion.h>
> >  #include <net/route.h>
> >  #include <net/xdp.h>
> >  #include <net/net_failover.h>
> > @@ -295,6 +296,8 @@ struct virtnet_info {
> >
> >       /* failover when STANDBY feature enabled */
> >       struct failover *failover;
> > +
> > +     struct completion completion;
> >  };
> >
> >  struct padded_vnet_hdr {
> > @@ -1709,6 +1712,13 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
> >       return !oom;
> >  }
> >
> > +static void virtnet_cvq_done(struct virtqueue *cvq)
> > +{
> > +     struct virtnet_info *vi = cvq->vdev->priv;
> > +
> > +     complete(&vi->completion);
> > +}
> > +
> >  static void skb_recv_done(struct virtqueue *rvq)
> >  {
> >       struct virtnet_info *vi = rvq->vdev->priv;
> > @@ -2169,12 +2179,8 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
> >       if (unlikely(!virtqueue_kick(vi->cvq)))
> >               return vi->ctrl->status == VIRTIO_NET_OK;
> >
> > -     /* Spin for a response, the kick causes an ioport write, trapping
> > -      * into the hypervisor, so the request should be handled immediately.
> > -      */
> > -     while (!virtqueue_get_buf(vi->cvq, &tmp) &&
> > -            !virtqueue_is_broken(vi->cvq))
> > -             cpu_relax();
> > +     wait_for_completion(&vi->completion);
> > +     virtqueue_get_buf(vi->cvq, &tmp);
> >
> >       return vi->ctrl->status == VIRTIO_NET_OK;
> >  }
> > @@ -3672,7 +3678,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
> >
> >       /* Parameters for control virtqueue, if any */
> >       if (vi->has_cvq) {
> > -             callbacks[total_vqs - 1] = NULL;
> > +             callbacks[total_vqs - 1] = virtnet_cvq_done;
>
> This depends the interrupt, right?

Not necessarily, we have ISR for at last PCI:

static irqreturn_t vp_interrupt(int irq, void *opaque)
{
        struct virtio_pci_device *vp_dev = opaque;
        u8 isr;

        /* reading the ISR has the effect of also clearing it so it's very
         * important to save off the value. */
        isr = ioread8(vp_dev->isr);
...
}

>
> I worry that there may be some devices that may not support interruption on cvq.

Is the device using INTX or MSI-X?

> Although this may not be in line with SPEC, it may cause problem on the devices
> that can work normally at present.

Then the implementation is buggy, it might not work for drivers other
than Linux. Working around such buggy implementation is suboptimal.

Thanks

>
> Thanks.
>
>
> >               names[total_vqs - 1] = "control";
> >       }
> >
> > @@ -4122,6 +4128,7 @@ static int virtnet_probe(struct virtio_device *vdev)
> >       if (vi->has_rss || vi->has_rss_hash_report)
> >               virtnet_init_default_rss(vi);
> >
> > +     init_completion(&vi->completion);
> >       enable_rx_mode_work(vi);
> >
> >       /* serialize netdev register + virtio_device_ready() with ndo_open() */
> > --
> > 2.25.1
> >
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 2/2] virtio-net: sleep instead of busy waiting for cvq command
  2023-04-14  5:09     ` Jason Wang
@ 2023-04-14  5:10       ` Jason Wang
  0 siblings, 0 replies; 25+ messages in thread
From: Jason Wang @ 2023-04-14  5:10 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: mst, netdev, linux-kernel, virtualization, eperezma, edumazet,
	maxime.coquelin, kuba, pabeni, david.marchand, davem

Adding netdev.

On Fri, Apr 14, 2023 at 1:09 PM Jason Wang <jasowang@redhat.com> wrote:
>
> On Thu, Apr 13, 2023 at 3:31 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Thu, 13 Apr 2023 14:40:27 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > We used to busy waiting on the cvq command this tends to be
> > > problematic since there no way for to schedule another process which
> > > may serve for the control virtqueue. This might be the case when the
> > > control virtqueue is emulated by software. This patch switches to use
> > > completion to allow the CPU to sleep instead of busy waiting for the
> > > cvq command.
> > >
> > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > ---
> > > Changes since V1:
> > > - use completion for simplicity
> > > - don't try to harden the CVQ command which requires more thought
> > > Changes since RFC:
> > > - break the device when timeout
> > > - get buffer manually since the virtio core check more_used() instead
> > > ---
> > >  drivers/net/virtio_net.c | 21 ++++++++++++++-------
> > >  1 file changed, 14 insertions(+), 7 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index 2e56bbf86894..d3eb8fd6c9dc 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -19,6 +19,7 @@
> > >  #include <linux/average.h>
> > >  #include <linux/filter.h>
> > >  #include <linux/kernel.h>
> > > +#include <linux/completion.h>
> > >  #include <net/route.h>
> > >  #include <net/xdp.h>
> > >  #include <net/net_failover.h>
> > > @@ -295,6 +296,8 @@ struct virtnet_info {
> > >
> > >       /* failover when STANDBY feature enabled */
> > >       struct failover *failover;
> > > +
> > > +     struct completion completion;
> > >  };
> > >
> > >  struct padded_vnet_hdr {
> > > @@ -1709,6 +1712,13 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
> > >       return !oom;
> > >  }
> > >
> > > +static void virtnet_cvq_done(struct virtqueue *cvq)
> > > +{
> > > +     struct virtnet_info *vi = cvq->vdev->priv;
> > > +
> > > +     complete(&vi->completion);
> > > +}
> > > +
> > >  static void skb_recv_done(struct virtqueue *rvq)
> > >  {
> > >       struct virtnet_info *vi = rvq->vdev->priv;
> > > @@ -2169,12 +2179,8 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
> > >       if (unlikely(!virtqueue_kick(vi->cvq)))
> > >               return vi->ctrl->status == VIRTIO_NET_OK;
> > >
> > > -     /* Spin for a response, the kick causes an ioport write, trapping
> > > -      * into the hypervisor, so the request should be handled immediately.
> > > -      */
> > > -     while (!virtqueue_get_buf(vi->cvq, &tmp) &&
> > > -            !virtqueue_is_broken(vi->cvq))
> > > -             cpu_relax();
> > > +     wait_for_completion(&vi->completion);
> > > +     virtqueue_get_buf(vi->cvq, &tmp);
> > >
> > >       return vi->ctrl->status == VIRTIO_NET_OK;
> > >  }
> > > @@ -3672,7 +3678,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
> > >
> > >       /* Parameters for control virtqueue, if any */
> > >       if (vi->has_cvq) {
> > > -             callbacks[total_vqs - 1] = NULL;
> > > +             callbacks[total_vqs - 1] = virtnet_cvq_done;
> >
> > This depends the interrupt, right?
>
> Not necessarily, we have ISR for at last PCI:
>
> static irqreturn_t vp_interrupt(int irq, void *opaque)
> {
>         struct virtio_pci_device *vp_dev = opaque;
>         u8 isr;
>
>         /* reading the ISR has the effect of also clearing it so it's very
>          * important to save off the value. */
>         isr = ioread8(vp_dev->isr);
> ...
> }
>
> >
> > I worry that there may be some devices that may not support interruption on cvq.
>
> Is the device using INTX or MSI-X?
>
> > Although this may not be in line with SPEC, it may cause problem on the devices
> > that can work normally at present.
>
> Then the implementation is buggy, it might not work for drivers other
> than Linux. Working around such buggy implementation is suboptimal.
>
> Thanks
>
> >
> > Thanks.
> >
> >
> > >               names[total_vqs - 1] = "control";
> > >       }
> > >
> > > @@ -4122,6 +4128,7 @@ static int virtnet_probe(struct virtio_device *vdev)
> > >       if (vi->has_rss || vi->has_rss_hash_report)
> > >               virtnet_init_default_rss(vi);
> > >
> > > +     init_completion(&vi->completion);
> > >       enable_rx_mode_work(vi);
> > >
> > >       /* serialize netdev register + virtio_device_ready() with ndo_open() */
> > > --
> > > 2.25.1
> > >
> >

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue
  2023-04-14  5:04     ` Jason Wang
@ 2023-04-14  7:21       ` Michael S. Tsirkin
  2023-04-17  3:40         ` Jason Wang
  0 siblings, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2023-04-14  7:21 UTC (permalink / raw)
  To: Jason Wang
  Cc: xuanzhuo, netdev, linux-kernel, virtualization, eperezma,
	edumazet, maxime.coquelin, kuba, pabeni, david.marchand, davem

On Fri, Apr 14, 2023 at 01:04:15PM +0800, Jason Wang wrote:
> Forget to cc netdev, adding.
> 
> On Fri, Apr 14, 2023 at 12:25 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Thu, Apr 13, 2023 at 02:40:26PM +0800, Jason Wang wrote:
> > > This patch convert rx mode setting to be done in a workqueue, this is
> > > a must for allow to sleep when waiting for the cvq command to
> > > response since current code is executed under addr spin lock.
> > >
> > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> >
> > I don't like this frankly. This means that setting RX mode which would
> > previously be reliable, now becomes unreliable.
> 
> It is "unreliable" by design:
> 
>       void                    (*ndo_set_rx_mode)(struct net_device *dev);
> 
> > - first of all configuration is no longer immediate
> 
> Is immediate a hard requirement? I can see a workqueue is used at least:
> 
> mlx5e, ipoib, efx, ...
> 
> >   and there is no way for driver to find out when
> >   it actually took effect
> 
> But we know rx mode is best effort e.g it doesn't support vhost and we
> survive from this for years.
> 
> > - second, if device fails command, this is also not
> >   propagated to driver, again no way for driver to find out
> >
> > VDUSE needs to be fixed to do tricks to fix this
> > without breaking normal drivers.
> 
> It's not specific to VDUSE. For example, when using virtio-net in the
> UP environment with any software cvq (like mlx5 via vDPA or cma
> transport).
> 
> Thanks

Hmm. Can we differentiate between these use-cases?

> >
> >
> > > ---
> > > Changes since V1:
> > > - use RTNL to synchronize rx mode worker
> > > ---
> > >  drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++---
> > >  1 file changed, 52 insertions(+), 3 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index e2560b6f7980..2e56bbf86894 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -265,6 +265,12 @@ struct virtnet_info {
> > >       /* Work struct for config space updates */
> > >       struct work_struct config_work;
> > >
> > > +     /* Work struct for config rx mode */
> > > +     struct work_struct rx_mode_work;
> > > +
> > > +     /* Is rx mode work enabled? */
> > > +     bool rx_mode_work_enabled;
> > > +
> > >       /* Does the affinity hint is set for virtqueues? */
> > >       bool affinity_hint_set;
> > >
> > > @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi)
> > >       spin_unlock_bh(&vi->refill_lock);
> > >  }
> > >
> > > +static void enable_rx_mode_work(struct virtnet_info *vi)
> > > +{
> > > +     rtnl_lock();
> > > +     vi->rx_mode_work_enabled = true;
> > > +     rtnl_unlock();
> > > +}
> > > +
> > > +static void disable_rx_mode_work(struct virtnet_info *vi)
> > > +{
> > > +     rtnl_lock();
> > > +     vi->rx_mode_work_enabled = false;
> > > +     rtnl_unlock();
> > > +}
> > > +
> > >  static void virtqueue_napi_schedule(struct napi_struct *napi,
> > >                                   struct virtqueue *vq)
> > >  {
> > > @@ -2310,9 +2330,11 @@ static int virtnet_close(struct net_device *dev)
> > >       return 0;
> > >  }
> > >
> > > -static void virtnet_set_rx_mode(struct net_device *dev)
> > > +static void virtnet_rx_mode_work(struct work_struct *work)
> > >  {
> > > -     struct virtnet_info *vi = netdev_priv(dev);
> > > +     struct virtnet_info *vi =
> > > +             container_of(work, struct virtnet_info, rx_mode_work);
> > > +     struct net_device *dev = vi->dev;
> > >       struct scatterlist sg[2];
> > >       struct virtio_net_ctrl_mac *mac_data;
> > >       struct netdev_hw_addr *ha;
> > > @@ -2325,6 +2347,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> > >       if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
> > >               return;
> > >
> > > +     rtnl_lock();
> > > +
> > >       vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
> > >       vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
> > >
> > > @@ -2342,14 +2366,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> > >               dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
> > >                        vi->ctrl->allmulti ? "en" : "dis");
> > >
> > > +     netif_addr_lock_bh(dev);
> > > +
> > >       uc_count = netdev_uc_count(dev);
> > >       mc_count = netdev_mc_count(dev);
> > >       /* MAC filter - use one buffer for both lists */
> > >       buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
> > >                     (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
> > >       mac_data = buf;
> > > -     if (!buf)
> > > +     if (!buf) {
> > > +             netif_addr_unlock_bh(dev);
> > > +             rtnl_unlock();
> > >               return;
> > > +     }
> > >
> > >       sg_init_table(sg, 2);
> > >
> > > @@ -2370,6 +2399,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> > >       netdev_for_each_mc_addr(ha, dev)
> > >               memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
> > >
> > > +     netif_addr_unlock_bh(dev);
> > > +
> > >       sg_set_buf(&sg[1], mac_data,
> > >                  sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
> > >
> > > @@ -2377,9 +2408,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> > >                                 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
> > >               dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
> > >
> > > +     rtnl_unlock();
> > > +
> > >       kfree(buf);
> > >  }
> > >
> > > +static void virtnet_set_rx_mode(struct net_device *dev)
> > > +{
> > > +     struct virtnet_info *vi = netdev_priv(dev);
> > > +
> > > +     if (vi->rx_mode_work_enabled)
> > > +             schedule_work(&vi->rx_mode_work);
> > > +}
> > > +
> > >  static int virtnet_vlan_rx_add_vid(struct net_device *dev,
> > >                                  __be16 proto, u16 vid)
> > >  {
> > > @@ -3150,6 +3191,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
> > >
> > >       /* Make sure no work handler is accessing the device */
> > >       flush_work(&vi->config_work);
> > > +     disable_rx_mode_work(vi);
> > > +     flush_work(&vi->rx_mode_work);
> > >
> > >       netif_tx_lock_bh(vi->dev);
> > >       netif_device_detach(vi->dev);
> >
> > So now configuration is not propagated to device.
> > Won't device later wake up in wrong state?
> >
> >
> > > @@ -3172,6 +3215,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
> > >       virtio_device_ready(vdev);
> > >
> > >       enable_delayed_refill(vi);
> > > +     enable_rx_mode_work(vi);
> > >
> > >       if (netif_running(vi->dev)) {
> > >               err = virtnet_open(vi->dev);
> > > @@ -3969,6 +4013,7 @@ static int virtnet_probe(struct virtio_device *vdev)
> > >       vdev->priv = vi;
> > >
> > >       INIT_WORK(&vi->config_work, virtnet_config_changed_work);
> > > +     INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
> > >       spin_lock_init(&vi->refill_lock);
> > >
> > >       if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
> > > @@ -4077,6 +4122,8 @@ static int virtnet_probe(struct virtio_device *vdev)
> > >       if (vi->has_rss || vi->has_rss_hash_report)
> > >               virtnet_init_default_rss(vi);
> > >
> > > +     enable_rx_mode_work(vi);
> > > +
> > >       /* serialize netdev register + virtio_device_ready() with ndo_open() */
> > >       rtnl_lock();
> > >
> > > @@ -4174,6 +4221,8 @@ static void virtnet_remove(struct virtio_device *vdev)
> > >
> > >       /* Make sure no work handler is accessing the device. */
> > >       flush_work(&vi->config_work);
> > > +     disable_rx_mode_work(vi);
> > > +     flush_work(&vi->rx_mode_work);
> > >
> > >       unregister_netdev(vi->dev);
> > >
> > > --
> > > 2.25.1
> >

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue
  2023-04-14  7:21       ` Michael S. Tsirkin
@ 2023-04-17  3:40         ` Jason Wang
  2023-05-05  3:46           ` Jason Wang
  2023-05-10  5:32           ` Michael S. Tsirkin
  0 siblings, 2 replies; 25+ messages in thread
From: Jason Wang @ 2023-04-17  3:40 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: xuanzhuo, netdev, linux-kernel, virtualization, eperezma,
	edumazet, maxime.coquelin, kuba, pabeni, david.marchand, davem

On Fri, Apr 14, 2023 at 3:21 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Fri, Apr 14, 2023 at 01:04:15PM +0800, Jason Wang wrote:
> > Forget to cc netdev, adding.
> >
> > On Fri, Apr 14, 2023 at 12:25 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > >
> > > On Thu, Apr 13, 2023 at 02:40:26PM +0800, Jason Wang wrote:
> > > > This patch convert rx mode setting to be done in a workqueue, this is
> > > > a must for allow to sleep when waiting for the cvq command to
> > > > response since current code is executed under addr spin lock.
> > > >
> > > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > >
> > > I don't like this frankly. This means that setting RX mode which would
> > > previously be reliable, now becomes unreliable.
> >
> > It is "unreliable" by design:
> >
> >       void                    (*ndo_set_rx_mode)(struct net_device *dev);
> >
> > > - first of all configuration is no longer immediate
> >
> > Is immediate a hard requirement? I can see a workqueue is used at least:
> >
> > mlx5e, ipoib, efx, ...
> >
> > >   and there is no way for driver to find out when
> > >   it actually took effect
> >
> > But we know rx mode is best effort e.g it doesn't support vhost and we
> > survive from this for years.
> >
> > > - second, if device fails command, this is also not
> > >   propagated to driver, again no way for driver to find out
> > >
> > > VDUSE needs to be fixed to do tricks to fix this
> > > without breaking normal drivers.
> >
> > It's not specific to VDUSE. For example, when using virtio-net in the
> > UP environment with any software cvq (like mlx5 via vDPA or cma
> > transport).
> >
> > Thanks
>
> Hmm. Can we differentiate between these use-cases?

It doesn't look easy since we are drivers for virtio bus. Underlayer
details were hidden from virtio-net.

Or do you have any ideas on this?

Thanks

>
> > >
> > >
> > > > ---
> > > > Changes since V1:
> > > > - use RTNL to synchronize rx mode worker
> > > > ---
> > > >  drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++---
> > > >  1 file changed, 52 insertions(+), 3 deletions(-)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index e2560b6f7980..2e56bbf86894 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -265,6 +265,12 @@ struct virtnet_info {
> > > >       /* Work struct for config space updates */
> > > >       struct work_struct config_work;
> > > >
> > > > +     /* Work struct for config rx mode */
> > > > +     struct work_struct rx_mode_work;
> > > > +
> > > > +     /* Is rx mode work enabled? */
> > > > +     bool rx_mode_work_enabled;
> > > > +
> > > >       /* Does the affinity hint is set for virtqueues? */
> > > >       bool affinity_hint_set;
> > > >
> > > > @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi)
> > > >       spin_unlock_bh(&vi->refill_lock);
> > > >  }
> > > >
> > > > +static void enable_rx_mode_work(struct virtnet_info *vi)
> > > > +{
> > > > +     rtnl_lock();
> > > > +     vi->rx_mode_work_enabled = true;
> > > > +     rtnl_unlock();
> > > > +}
> > > > +
> > > > +static void disable_rx_mode_work(struct virtnet_info *vi)
> > > > +{
> > > > +     rtnl_lock();
> > > > +     vi->rx_mode_work_enabled = false;
> > > > +     rtnl_unlock();
> > > > +}
> > > > +
> > > >  static void virtqueue_napi_schedule(struct napi_struct *napi,
> > > >                                   struct virtqueue *vq)
> > > >  {
> > > > @@ -2310,9 +2330,11 @@ static int virtnet_close(struct net_device *dev)
> > > >       return 0;
> > > >  }
> > > >
> > > > -static void virtnet_set_rx_mode(struct net_device *dev)
> > > > +static void virtnet_rx_mode_work(struct work_struct *work)
> > > >  {
> > > > -     struct virtnet_info *vi = netdev_priv(dev);
> > > > +     struct virtnet_info *vi =
> > > > +             container_of(work, struct virtnet_info, rx_mode_work);
> > > > +     struct net_device *dev = vi->dev;
> > > >       struct scatterlist sg[2];
> > > >       struct virtio_net_ctrl_mac *mac_data;
> > > >       struct netdev_hw_addr *ha;
> > > > @@ -2325,6 +2347,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> > > >       if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
> > > >               return;
> > > >
> > > > +     rtnl_lock();
> > > > +
> > > >       vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
> > > >       vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
> > > >
> > > > @@ -2342,14 +2366,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> > > >               dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
> > > >                        vi->ctrl->allmulti ? "en" : "dis");
> > > >
> > > > +     netif_addr_lock_bh(dev);
> > > > +
> > > >       uc_count = netdev_uc_count(dev);
> > > >       mc_count = netdev_mc_count(dev);
> > > >       /* MAC filter - use one buffer for both lists */
> > > >       buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
> > > >                     (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
> > > >       mac_data = buf;
> > > > -     if (!buf)
> > > > +     if (!buf) {
> > > > +             netif_addr_unlock_bh(dev);
> > > > +             rtnl_unlock();
> > > >               return;
> > > > +     }
> > > >
> > > >       sg_init_table(sg, 2);
> > > >
> > > > @@ -2370,6 +2399,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> > > >       netdev_for_each_mc_addr(ha, dev)
> > > >               memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
> > > >
> > > > +     netif_addr_unlock_bh(dev);
> > > > +
> > > >       sg_set_buf(&sg[1], mac_data,
> > > >                  sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
> > > >
> > > > @@ -2377,9 +2408,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> > > >                                 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
> > > >               dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
> > > >
> > > > +     rtnl_unlock();
> > > > +
> > > >       kfree(buf);
> > > >  }
> > > >
> > > > +static void virtnet_set_rx_mode(struct net_device *dev)
> > > > +{
> > > > +     struct virtnet_info *vi = netdev_priv(dev);
> > > > +
> > > > +     if (vi->rx_mode_work_enabled)
> > > > +             schedule_work(&vi->rx_mode_work);
> > > > +}
> > > > +
> > > >  static int virtnet_vlan_rx_add_vid(struct net_device *dev,
> > > >                                  __be16 proto, u16 vid)
> > > >  {
> > > > @@ -3150,6 +3191,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
> > > >
> > > >       /* Make sure no work handler is accessing the device */
> > > >       flush_work(&vi->config_work);
> > > > +     disable_rx_mode_work(vi);
> > > > +     flush_work(&vi->rx_mode_work);
> > > >
> > > >       netif_tx_lock_bh(vi->dev);
> > > >       netif_device_detach(vi->dev);
> > >
> > > So now configuration is not propagated to device.
> > > Won't device later wake up in wrong state?
> > >
> > >
> > > > @@ -3172,6 +3215,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
> > > >       virtio_device_ready(vdev);
> > > >
> > > >       enable_delayed_refill(vi);
> > > > +     enable_rx_mode_work(vi);
> > > >
> > > >       if (netif_running(vi->dev)) {
> > > >               err = virtnet_open(vi->dev);
> > > > @@ -3969,6 +4013,7 @@ static int virtnet_probe(struct virtio_device *vdev)
> > > >       vdev->priv = vi;
> > > >
> > > >       INIT_WORK(&vi->config_work, virtnet_config_changed_work);
> > > > +     INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
> > > >       spin_lock_init(&vi->refill_lock);
> > > >
> > > >       if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
> > > > @@ -4077,6 +4122,8 @@ static int virtnet_probe(struct virtio_device *vdev)
> > > >       if (vi->has_rss || vi->has_rss_hash_report)
> > > >               virtnet_init_default_rss(vi);
> > > >
> > > > +     enable_rx_mode_work(vi);
> > > > +
> > > >       /* serialize netdev register + virtio_device_ready() with ndo_open() */
> > > >       rtnl_lock();
> > > >
> > > > @@ -4174,6 +4221,8 @@ static void virtnet_remove(struct virtio_device *vdev)
> > > >
> > > >       /* Make sure no work handler is accessing the device. */
> > > >       flush_work(&vi->config_work);
> > > > +     disable_rx_mode_work(vi);
> > > > +     flush_work(&vi->rx_mode_work);
> > > >
> > > >       unregister_netdev(vi->dev);
> > > >
> > > > --
> > > > 2.25.1
> > >
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue
  2023-04-17  3:40         ` Jason Wang
@ 2023-05-05  3:46           ` Jason Wang
  2023-05-10  5:32           ` Michael S. Tsirkin
  1 sibling, 0 replies; 25+ messages in thread
From: Jason Wang @ 2023-05-05  3:46 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: xuanzhuo, netdev, linux-kernel, virtualization, eperezma,
	edumazet, maxime.coquelin, kuba, pabeni, david.marchand, davem


在 2023/4/17 11:40, Jason Wang 写道:
> On Fri, Apr 14, 2023 at 3:21 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>> On Fri, Apr 14, 2023 at 01:04:15PM +0800, Jason Wang wrote:
>>> Forget to cc netdev, adding.
>>>
>>> On Fri, Apr 14, 2023 at 12:25 AM Michael S. Tsirkin <mst@redhat.com> wrote:
>>>> On Thu, Apr 13, 2023 at 02:40:26PM +0800, Jason Wang wrote:
>>>>> This patch convert rx mode setting to be done in a workqueue, this is
>>>>> a must for allow to sleep when waiting for the cvq command to
>>>>> response since current code is executed under addr spin lock.
>>>>>
>>>>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>>>> I don't like this frankly. This means that setting RX mode which would
>>>> previously be reliable, now becomes unreliable.
>>> It is "unreliable" by design:
>>>
>>>        void                    (*ndo_set_rx_mode)(struct net_device *dev);
>>>
>>>> - first of all configuration is no longer immediate
>>> Is immediate a hard requirement? I can see a workqueue is used at least:
>>>
>>> mlx5e, ipoib, efx, ...
>>>
>>>>    and there is no way for driver to find out when
>>>>    it actually took effect
>>> But we know rx mode is best effort e.g it doesn't support vhost and we
>>> survive from this for years.
>>>
>>>> - second, if device fails command, this is also not
>>>>    propagated to driver, again no way for driver to find out
>>>>
>>>> VDUSE needs to be fixed to do tricks to fix this
>>>> without breaking normal drivers.
>>> It's not specific to VDUSE. For example, when using virtio-net in the
>>> UP environment with any software cvq (like mlx5 via vDPA or cma
>>> transport).
>>>
>>> Thanks
>> Hmm. Can we differentiate between these use-cases?
> It doesn't look easy since we are drivers for virtio bus. Underlayer
> details were hidden from virtio-net.
>
> Or do you have any ideas on this?


Michael, any thought on this?

Thanks


>
> Thanks
>
>>>>
>>>>> ---
>>>>> Changes since V1:
>>>>> - use RTNL to synchronize rx mode worker
>>>>> ---
>>>>>   drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++---
>>>>>   1 file changed, 52 insertions(+), 3 deletions(-)
>>>>>
>>>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>>>>> index e2560b6f7980..2e56bbf86894 100644
>>>>> --- a/drivers/net/virtio_net.c
>>>>> +++ b/drivers/net/virtio_net.c
>>>>> @@ -265,6 +265,12 @@ struct virtnet_info {
>>>>>        /* Work struct for config space updates */
>>>>>        struct work_struct config_work;
>>>>>
>>>>> +     /* Work struct for config rx mode */
>>>>> +     struct work_struct rx_mode_work;
>>>>> +
>>>>> +     /* Is rx mode work enabled? */
>>>>> +     bool rx_mode_work_enabled;
>>>>> +
>>>>>        /* Does the affinity hint is set for virtqueues? */
>>>>>        bool affinity_hint_set;
>>>>>
>>>>> @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi)
>>>>>        spin_unlock_bh(&vi->refill_lock);
>>>>>   }
>>>>>
>>>>> +static void enable_rx_mode_work(struct virtnet_info *vi)
>>>>> +{
>>>>> +     rtnl_lock();
>>>>> +     vi->rx_mode_work_enabled = true;
>>>>> +     rtnl_unlock();
>>>>> +}
>>>>> +
>>>>> +static void disable_rx_mode_work(struct virtnet_info *vi)
>>>>> +{
>>>>> +     rtnl_lock();
>>>>> +     vi->rx_mode_work_enabled = false;
>>>>> +     rtnl_unlock();
>>>>> +}
>>>>> +
>>>>>   static void virtqueue_napi_schedule(struct napi_struct *napi,
>>>>>                                    struct virtqueue *vq)
>>>>>   {
>>>>> @@ -2310,9 +2330,11 @@ static int virtnet_close(struct net_device *dev)
>>>>>        return 0;
>>>>>   }
>>>>>
>>>>> -static void virtnet_set_rx_mode(struct net_device *dev)
>>>>> +static void virtnet_rx_mode_work(struct work_struct *work)
>>>>>   {
>>>>> -     struct virtnet_info *vi = netdev_priv(dev);
>>>>> +     struct virtnet_info *vi =
>>>>> +             container_of(work, struct virtnet_info, rx_mode_work);
>>>>> +     struct net_device *dev = vi->dev;
>>>>>        struct scatterlist sg[2];
>>>>>        struct virtio_net_ctrl_mac *mac_data;
>>>>>        struct netdev_hw_addr *ha;
>>>>> @@ -2325,6 +2347,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
>>>>>        if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
>>>>>                return;
>>>>>
>>>>> +     rtnl_lock();
>>>>> +
>>>>>        vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
>>>>>        vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
>>>>>
>>>>> @@ -2342,14 +2366,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
>>>>>                dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
>>>>>                         vi->ctrl->allmulti ? "en" : "dis");
>>>>>
>>>>> +     netif_addr_lock_bh(dev);
>>>>> +
>>>>>        uc_count = netdev_uc_count(dev);
>>>>>        mc_count = netdev_mc_count(dev);
>>>>>        /* MAC filter - use one buffer for both lists */
>>>>>        buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
>>>>>                      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
>>>>>        mac_data = buf;
>>>>> -     if (!buf)
>>>>> +     if (!buf) {
>>>>> +             netif_addr_unlock_bh(dev);
>>>>> +             rtnl_unlock();
>>>>>                return;
>>>>> +     }
>>>>>
>>>>>        sg_init_table(sg, 2);
>>>>>
>>>>> @@ -2370,6 +2399,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
>>>>>        netdev_for_each_mc_addr(ha, dev)
>>>>>                memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
>>>>>
>>>>> +     netif_addr_unlock_bh(dev);
>>>>> +
>>>>>        sg_set_buf(&sg[1], mac_data,
>>>>>                   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
>>>>>
>>>>> @@ -2377,9 +2408,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
>>>>>                                  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
>>>>>                dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
>>>>>
>>>>> +     rtnl_unlock();
>>>>> +
>>>>>        kfree(buf);
>>>>>   }
>>>>>
>>>>> +static void virtnet_set_rx_mode(struct net_device *dev)
>>>>> +{
>>>>> +     struct virtnet_info *vi = netdev_priv(dev);
>>>>> +
>>>>> +     if (vi->rx_mode_work_enabled)
>>>>> +             schedule_work(&vi->rx_mode_work);
>>>>> +}
>>>>> +
>>>>>   static int virtnet_vlan_rx_add_vid(struct net_device *dev,
>>>>>                                   __be16 proto, u16 vid)
>>>>>   {
>>>>> @@ -3150,6 +3191,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
>>>>>
>>>>>        /* Make sure no work handler is accessing the device */
>>>>>        flush_work(&vi->config_work);
>>>>> +     disable_rx_mode_work(vi);
>>>>> +     flush_work(&vi->rx_mode_work);
>>>>>
>>>>>        netif_tx_lock_bh(vi->dev);
>>>>>        netif_device_detach(vi->dev);
>>>> So now configuration is not propagated to device.
>>>> Won't device later wake up in wrong state?
>>>>
>>>>
>>>>> @@ -3172,6 +3215,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
>>>>>        virtio_device_ready(vdev);
>>>>>
>>>>>        enable_delayed_refill(vi);
>>>>> +     enable_rx_mode_work(vi);
>>>>>
>>>>>        if (netif_running(vi->dev)) {
>>>>>                err = virtnet_open(vi->dev);
>>>>> @@ -3969,6 +4013,7 @@ static int virtnet_probe(struct virtio_device *vdev)
>>>>>        vdev->priv = vi;
>>>>>
>>>>>        INIT_WORK(&vi->config_work, virtnet_config_changed_work);
>>>>> +     INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
>>>>>        spin_lock_init(&vi->refill_lock);
>>>>>
>>>>>        if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
>>>>> @@ -4077,6 +4122,8 @@ static int virtnet_probe(struct virtio_device *vdev)
>>>>>        if (vi->has_rss || vi->has_rss_hash_report)
>>>>>                virtnet_init_default_rss(vi);
>>>>>
>>>>> +     enable_rx_mode_work(vi);
>>>>> +
>>>>>        /* serialize netdev register + virtio_device_ready() with ndo_open() */
>>>>>        rtnl_lock();
>>>>>
>>>>> @@ -4174,6 +4221,8 @@ static void virtnet_remove(struct virtio_device *vdev)
>>>>>
>>>>>        /* Make sure no work handler is accessing the device. */
>>>>>        flush_work(&vi->config_work);
>>>>> +     disable_rx_mode_work(vi);
>>>>> +     flush_work(&vi->rx_mode_work);
>>>>>
>>>>>        unregister_netdev(vi->dev);
>>>>>
>>>>> --
>>>>> 2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue
  2023-04-17  3:40         ` Jason Wang
  2023-05-05  3:46           ` Jason Wang
@ 2023-05-10  5:32           ` Michael S. Tsirkin
  2023-05-15  1:05             ` Jason Wang
  1 sibling, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2023-05-10  5:32 UTC (permalink / raw)
  To: Jason Wang
  Cc: xuanzhuo, netdev, linux-kernel, virtualization, eperezma,
	edumazet, maxime.coquelin, kuba, pabeni, david.marchand, davem

On Mon, Apr 17, 2023 at 11:40:58AM +0800, Jason Wang wrote:
> On Fri, Apr 14, 2023 at 3:21 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Fri, Apr 14, 2023 at 01:04:15PM +0800, Jason Wang wrote:
> > > Forget to cc netdev, adding.
> > >
> > > On Fri, Apr 14, 2023 at 12:25 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > >
> > > > On Thu, Apr 13, 2023 at 02:40:26PM +0800, Jason Wang wrote:
> > > > > This patch convert rx mode setting to be done in a workqueue, this is
> > > > > a must for allow to sleep when waiting for the cvq command to
> > > > > response since current code is executed under addr spin lock.
> > > > >
> > > > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > >
> > > > I don't like this frankly. This means that setting RX mode which would
> > > > previously be reliable, now becomes unreliable.
> > >
> > > It is "unreliable" by design:
> > >
> > >       void                    (*ndo_set_rx_mode)(struct net_device *dev);
> > >
> > > > - first of all configuration is no longer immediate
> > >
> > > Is immediate a hard requirement? I can see a workqueue is used at least:
> > >
> > > mlx5e, ipoib, efx, ...
> > >
> > > >   and there is no way for driver to find out when
> > > >   it actually took effect
> > >
> > > But we know rx mode is best effort e.g it doesn't support vhost and we
> > > survive from this for years.
> > >
> > > > - second, if device fails command, this is also not
> > > >   propagated to driver, again no way for driver to find out
> > > >
> > > > VDUSE needs to be fixed to do tricks to fix this
> > > > without breaking normal drivers.
> > >
> > > It's not specific to VDUSE. For example, when using virtio-net in the
> > > UP environment with any software cvq (like mlx5 via vDPA or cma
> > > transport).
> > >
> > > Thanks
> >
> > Hmm. Can we differentiate between these use-cases?
> 
> It doesn't look easy since we are drivers for virtio bus. Underlayer
> details were hidden from virtio-net.
> 
> Or do you have any ideas on this?
> 
> Thanks

I don't know, pass some kind of flag in struct virtqueue?
	"bool slow; /* This vq can be very slow sometimes. Don't wait for it! */"

?

-- 
MST

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue
  2023-05-10  5:32           ` Michael S. Tsirkin
@ 2023-05-15  1:05             ` Jason Wang
  2023-05-15  4:45               ` Michael S. Tsirkin
  0 siblings, 1 reply; 25+ messages in thread
From: Jason Wang @ 2023-05-15  1:05 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: xuanzhuo, netdev, linux-kernel, virtualization, eperezma,
	edumazet, maxime.coquelin, kuba, pabeni, david.marchand, davem

On Wed, May 10, 2023 at 1:33 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Mon, Apr 17, 2023 at 11:40:58AM +0800, Jason Wang wrote:
> > On Fri, Apr 14, 2023 at 3:21 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > >
> > > On Fri, Apr 14, 2023 at 01:04:15PM +0800, Jason Wang wrote:
> > > > Forget to cc netdev, adding.
> > > >
> > > > On Fri, Apr 14, 2023 at 12:25 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > >
> > > > > On Thu, Apr 13, 2023 at 02:40:26PM +0800, Jason Wang wrote:
> > > > > > This patch convert rx mode setting to be done in a workqueue, this is
> > > > > > a must for allow to sleep when waiting for the cvq command to
> > > > > > response since current code is executed under addr spin lock.
> > > > > >
> > > > > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > > >
> > > > > I don't like this frankly. This means that setting RX mode which would
> > > > > previously be reliable, now becomes unreliable.
> > > >
> > > > It is "unreliable" by design:
> > > >
> > > >       void                    (*ndo_set_rx_mode)(struct net_device *dev);
> > > >
> > > > > - first of all configuration is no longer immediate
> > > >
> > > > Is immediate a hard requirement? I can see a workqueue is used at least:
> > > >
> > > > mlx5e, ipoib, efx, ...
> > > >
> > > > >   and there is no way for driver to find out when
> > > > >   it actually took effect
> > > >
> > > > But we know rx mode is best effort e.g it doesn't support vhost and we
> > > > survive from this for years.
> > > >
> > > > > - second, if device fails command, this is also not
> > > > >   propagated to driver, again no way for driver to find out
> > > > >
> > > > > VDUSE needs to be fixed to do tricks to fix this
> > > > > without breaking normal drivers.
> > > >
> > > > It's not specific to VDUSE. For example, when using virtio-net in the
> > > > UP environment with any software cvq (like mlx5 via vDPA or cma
> > > > transport).
> > > >
> > > > Thanks
> > >
> > > Hmm. Can we differentiate between these use-cases?
> >
> > It doesn't look easy since we are drivers for virtio bus. Underlayer
> > details were hidden from virtio-net.
> >
> > Or do you have any ideas on this?
> >
> > Thanks
>
> I don't know, pass some kind of flag in struct virtqueue?
>         "bool slow; /* This vq can be very slow sometimes. Don't wait for it! */"
>
> ?
>

So if it's slow, sleep, otherwise poll?

I feel setting this flag might be tricky, since the driver doesn't
know whether or not it's really slow. E.g smartNIC vendor may allow
virtio-net emulation over PCI.

Thanks

> --
> MST
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue
  2023-05-15  1:05             ` Jason Wang
@ 2023-05-15  4:45               ` Michael S. Tsirkin
  2023-05-15  5:13                 ` Jason Wang
  0 siblings, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2023-05-15  4:45 UTC (permalink / raw)
  To: Jason Wang
  Cc: xuanzhuo, netdev, linux-kernel, virtualization, eperezma,
	edumazet, maxime.coquelin, kuba, pabeni, david.marchand, davem

On Mon, May 15, 2023 at 09:05:54AM +0800, Jason Wang wrote:
> On Wed, May 10, 2023 at 1:33 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Mon, Apr 17, 2023 at 11:40:58AM +0800, Jason Wang wrote:
> > > On Fri, Apr 14, 2023 at 3:21 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > >
> > > > On Fri, Apr 14, 2023 at 01:04:15PM +0800, Jason Wang wrote:
> > > > > Forget to cc netdev, adding.
> > > > >
> > > > > On Fri, Apr 14, 2023 at 12:25 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > >
> > > > > > On Thu, Apr 13, 2023 at 02:40:26PM +0800, Jason Wang wrote:
> > > > > > > This patch convert rx mode setting to be done in a workqueue, this is
> > > > > > > a must for allow to sleep when waiting for the cvq command to
> > > > > > > response since current code is executed under addr spin lock.
> > > > > > >
> > > > > > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > > > >
> > > > > > I don't like this frankly. This means that setting RX mode which would
> > > > > > previously be reliable, now becomes unreliable.
> > > > >
> > > > > It is "unreliable" by design:
> > > > >
> > > > >       void                    (*ndo_set_rx_mode)(struct net_device *dev);
> > > > >
> > > > > > - first of all configuration is no longer immediate
> > > > >
> > > > > Is immediate a hard requirement? I can see a workqueue is used at least:
> > > > >
> > > > > mlx5e, ipoib, efx, ...
> > > > >
> > > > > >   and there is no way for driver to find out when
> > > > > >   it actually took effect
> > > > >
> > > > > But we know rx mode is best effort e.g it doesn't support vhost and we
> > > > > survive from this for years.
> > > > >
> > > > > > - second, if device fails command, this is also not
> > > > > >   propagated to driver, again no way for driver to find out
> > > > > >
> > > > > > VDUSE needs to be fixed to do tricks to fix this
> > > > > > without breaking normal drivers.
> > > > >
> > > > > It's not specific to VDUSE. For example, when using virtio-net in the
> > > > > UP environment with any software cvq (like mlx5 via vDPA or cma
> > > > > transport).
> > > > >
> > > > > Thanks
> > > >
> > > > Hmm. Can we differentiate between these use-cases?
> > >
> > > It doesn't look easy since we are drivers for virtio bus. Underlayer
> > > details were hidden from virtio-net.
> > >
> > > Or do you have any ideas on this?
> > >
> > > Thanks
> >
> > I don't know, pass some kind of flag in struct virtqueue?
> >         "bool slow; /* This vq can be very slow sometimes. Don't wait for it! */"
> >
> > ?
> >
> 
> So if it's slow, sleep, otherwise poll?
> 
> I feel setting this flag might be tricky, since the driver doesn't
> know whether or not it's really slow. E.g smartNIC vendor may allow
> virtio-net emulation over PCI.
> 
> Thanks

driver will have the choice, depending on whether
vq is deterministic or not.


> > --
> > MST
> >

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue
  2023-05-15  4:45               ` Michael S. Tsirkin
@ 2023-05-15  5:13                 ` Jason Wang
  2023-05-15 10:17                   ` Michael S. Tsirkin
  0 siblings, 1 reply; 25+ messages in thread
From: Jason Wang @ 2023-05-15  5:13 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: xuanzhuo, netdev, linux-kernel, virtualization, eperezma,
	edumazet, maxime.coquelin, kuba, pabeni, david.marchand, davem

On Mon, May 15, 2023 at 12:45 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Mon, May 15, 2023 at 09:05:54AM +0800, Jason Wang wrote:
> > On Wed, May 10, 2023 at 1:33 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > >
> > > On Mon, Apr 17, 2023 at 11:40:58AM +0800, Jason Wang wrote:
> > > > On Fri, Apr 14, 2023 at 3:21 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > >
> > > > > On Fri, Apr 14, 2023 at 01:04:15PM +0800, Jason Wang wrote:
> > > > > > Forget to cc netdev, adding.
> > > > > >
> > > > > > On Fri, Apr 14, 2023 at 12:25 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > >
> > > > > > > On Thu, Apr 13, 2023 at 02:40:26PM +0800, Jason Wang wrote:
> > > > > > > > This patch convert rx mode setting to be done in a workqueue, this is
> > > > > > > > a must for allow to sleep when waiting for the cvq command to
> > > > > > > > response since current code is executed under addr spin lock.
> > > > > > > >
> > > > > > > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > > > > >
> > > > > > > I don't like this frankly. This means that setting RX mode which would
> > > > > > > previously be reliable, now becomes unreliable.
> > > > > >
> > > > > > It is "unreliable" by design:
> > > > > >
> > > > > >       void                    (*ndo_set_rx_mode)(struct net_device *dev);
> > > > > >
> > > > > > > - first of all configuration is no longer immediate
> > > > > >
> > > > > > Is immediate a hard requirement? I can see a workqueue is used at least:
> > > > > >
> > > > > > mlx5e, ipoib, efx, ...
> > > > > >
> > > > > > >   and there is no way for driver to find out when
> > > > > > >   it actually took effect
> > > > > >
> > > > > > But we know rx mode is best effort e.g it doesn't support vhost and we
> > > > > > survive from this for years.
> > > > > >
> > > > > > > - second, if device fails command, this is also not
> > > > > > >   propagated to driver, again no way for driver to find out
> > > > > > >
> > > > > > > VDUSE needs to be fixed to do tricks to fix this
> > > > > > > without breaking normal drivers.
> > > > > >
> > > > > > It's not specific to VDUSE. For example, when using virtio-net in the
> > > > > > UP environment with any software cvq (like mlx5 via vDPA or cma
> > > > > > transport).
> > > > > >
> > > > > > Thanks
> > > > >
> > > > > Hmm. Can we differentiate between these use-cases?
> > > >
> > > > It doesn't look easy since we are drivers for virtio bus. Underlayer
> > > > details were hidden from virtio-net.
> > > >
> > > > Or do you have any ideas on this?
> > > >
> > > > Thanks
> > >
> > > I don't know, pass some kind of flag in struct virtqueue?
> > >         "bool slow; /* This vq can be very slow sometimes. Don't wait for it! */"
> > >
> > > ?
> > >
> >
> > So if it's slow, sleep, otherwise poll?
> >
> > I feel setting this flag might be tricky, since the driver doesn't
> > know whether or not it's really slow. E.g smartNIC vendor may allow
> > virtio-net emulation over PCI.
> >
> > Thanks
>
> driver will have the choice, depending on whether
> vq is deterministic or not.

Ok, but the problem is, such booleans are only useful for virtio ring
codes. But in this case, virtio-net knows what to do for cvq. So I'm
not sure who the user is.

Thanks

>
>
> > > --
> > > MST
> > >
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue
  2023-05-15  5:13                 ` Jason Wang
@ 2023-05-15 10:17                   ` Michael S. Tsirkin
  2023-05-16  2:44                     ` Jason Wang
  0 siblings, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2023-05-15 10:17 UTC (permalink / raw)
  To: Jason Wang
  Cc: xuanzhuo, netdev, linux-kernel, virtualization, eperezma,
	edumazet, maxime.coquelin, kuba, pabeni, david.marchand, davem

On Mon, May 15, 2023 at 01:13:33PM +0800, Jason Wang wrote:
> On Mon, May 15, 2023 at 12:45 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Mon, May 15, 2023 at 09:05:54AM +0800, Jason Wang wrote:
> > > On Wed, May 10, 2023 at 1:33 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > >
> > > > On Mon, Apr 17, 2023 at 11:40:58AM +0800, Jason Wang wrote:
> > > > > On Fri, Apr 14, 2023 at 3:21 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > >
> > > > > > On Fri, Apr 14, 2023 at 01:04:15PM +0800, Jason Wang wrote:
> > > > > > > Forget to cc netdev, adding.
> > > > > > >
> > > > > > > On Fri, Apr 14, 2023 at 12:25 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > > >
> > > > > > > > On Thu, Apr 13, 2023 at 02:40:26PM +0800, Jason Wang wrote:
> > > > > > > > > This patch convert rx mode setting to be done in a workqueue, this is
> > > > > > > > > a must for allow to sleep when waiting for the cvq command to
> > > > > > > > > response since current code is executed under addr spin lock.
> > > > > > > > >
> > > > > > > > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > > > > > >
> > > > > > > > I don't like this frankly. This means that setting RX mode which would
> > > > > > > > previously be reliable, now becomes unreliable.
> > > > > > >
> > > > > > > It is "unreliable" by design:
> > > > > > >
> > > > > > >       void                    (*ndo_set_rx_mode)(struct net_device *dev);
> > > > > > >
> > > > > > > > - first of all configuration is no longer immediate
> > > > > > >
> > > > > > > Is immediate a hard requirement? I can see a workqueue is used at least:
> > > > > > >
> > > > > > > mlx5e, ipoib, efx, ...
> > > > > > >
> > > > > > > >   and there is no way for driver to find out when
> > > > > > > >   it actually took effect
> > > > > > >
> > > > > > > But we know rx mode is best effort e.g it doesn't support vhost and we
> > > > > > > survive from this for years.
> > > > > > >
> > > > > > > > - second, if device fails command, this is also not
> > > > > > > >   propagated to driver, again no way for driver to find out
> > > > > > > >
> > > > > > > > VDUSE needs to be fixed to do tricks to fix this
> > > > > > > > without breaking normal drivers.
> > > > > > >
> > > > > > > It's not specific to VDUSE. For example, when using virtio-net in the
> > > > > > > UP environment with any software cvq (like mlx5 via vDPA or cma
> > > > > > > transport).
> > > > > > >
> > > > > > > Thanks
> > > > > >
> > > > > > Hmm. Can we differentiate between these use-cases?
> > > > >
> > > > > It doesn't look easy since we are drivers for virtio bus. Underlayer
> > > > > details were hidden from virtio-net.
> > > > >
> > > > > Or do you have any ideas on this?
> > > > >
> > > > > Thanks
> > > >
> > > > I don't know, pass some kind of flag in struct virtqueue?
> > > >         "bool slow; /* This vq can be very slow sometimes. Don't wait for it! */"
> > > >
> > > > ?
> > > >
> > >
> > > So if it's slow, sleep, otherwise poll?
> > >
> > > I feel setting this flag might be tricky, since the driver doesn't
> > > know whether or not it's really slow. E.g smartNIC vendor may allow
> > > virtio-net emulation over PCI.
> > >
> > > Thanks
> >
> > driver will have the choice, depending on whether
> > vq is deterministic or not.
> 
> Ok, but the problem is, such booleans are only useful for virtio ring
> codes. But in this case, virtio-net knows what to do for cvq. So I'm
> not sure who the user is.
> 
> Thanks

Circling back, what exactly does the architecture you are trying
to fix look like? Who is going to introduce unbounded latency?
The hypervisor? If so do we not maybe want a new feature bit
that documents this? Hypervisor then can detect old guests
that spin and decide what to do, e.g. prioritise cvq more,
or fail FEATURES_OK.

> >
> >
> > > > --
> > > > MST
> > > >
> >

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue
  2023-05-15 10:17                   ` Michael S. Tsirkin
@ 2023-05-16  2:44                     ` Jason Wang
  2023-05-16  4:12                       ` Michael S. Tsirkin
  0 siblings, 1 reply; 25+ messages in thread
From: Jason Wang @ 2023-05-16  2:44 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: xuanzhuo, netdev, linux-kernel, virtualization, eperezma,
	edumazet, maxime.coquelin, kuba, pabeni, david.marchand, davem

On Mon, May 15, 2023 at 6:17 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Mon, May 15, 2023 at 01:13:33PM +0800, Jason Wang wrote:
> > On Mon, May 15, 2023 at 12:45 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > >
> > > On Mon, May 15, 2023 at 09:05:54AM +0800, Jason Wang wrote:
> > > > On Wed, May 10, 2023 at 1:33 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > >
> > > > > On Mon, Apr 17, 2023 at 11:40:58AM +0800, Jason Wang wrote:
> > > > > > On Fri, Apr 14, 2023 at 3:21 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > >
> > > > > > > On Fri, Apr 14, 2023 at 01:04:15PM +0800, Jason Wang wrote:
> > > > > > > > Forget to cc netdev, adding.
> > > > > > > >
> > > > > > > > On Fri, Apr 14, 2023 at 12:25 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > > > >
> > > > > > > > > On Thu, Apr 13, 2023 at 02:40:26PM +0800, Jason Wang wrote:
> > > > > > > > > > This patch convert rx mode setting to be done in a workqueue, this is
> > > > > > > > > > a must for allow to sleep when waiting for the cvq command to
> > > > > > > > > > response since current code is executed under addr spin lock.
> > > > > > > > > >
> > > > > > > > > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > > > > > > >
> > > > > > > > > I don't like this frankly. This means that setting RX mode which would
> > > > > > > > > previously be reliable, now becomes unreliable.
> > > > > > > >
> > > > > > > > It is "unreliable" by design:
> > > > > > > >
> > > > > > > >       void                    (*ndo_set_rx_mode)(struct net_device *dev);
> > > > > > > >
> > > > > > > > > - first of all configuration is no longer immediate
> > > > > > > >
> > > > > > > > Is immediate a hard requirement? I can see a workqueue is used at least:
> > > > > > > >
> > > > > > > > mlx5e, ipoib, efx, ...
> > > > > > > >
> > > > > > > > >   and there is no way for driver to find out when
> > > > > > > > >   it actually took effect
> > > > > > > >
> > > > > > > > But we know rx mode is best effort e.g it doesn't support vhost and we
> > > > > > > > survive from this for years.
> > > > > > > >
> > > > > > > > > - second, if device fails command, this is also not
> > > > > > > > >   propagated to driver, again no way for driver to find out
> > > > > > > > >
> > > > > > > > > VDUSE needs to be fixed to do tricks to fix this
> > > > > > > > > without breaking normal drivers.
> > > > > > > >
> > > > > > > > It's not specific to VDUSE. For example, when using virtio-net in the
> > > > > > > > UP environment with any software cvq (like mlx5 via vDPA or cma
> > > > > > > > transport).
> > > > > > > >
> > > > > > > > Thanks
> > > > > > >
> > > > > > > Hmm. Can we differentiate between these use-cases?
> > > > > >
> > > > > > It doesn't look easy since we are drivers for virtio bus. Underlayer
> > > > > > details were hidden from virtio-net.
> > > > > >
> > > > > > Or do you have any ideas on this?
> > > > > >
> > > > > > Thanks
> > > > >
> > > > > I don't know, pass some kind of flag in struct virtqueue?
> > > > >         "bool slow; /* This vq can be very slow sometimes. Don't wait for it! */"
> > > > >
> > > > > ?
> > > > >
> > > >
> > > > So if it's slow, sleep, otherwise poll?
> > > >
> > > > I feel setting this flag might be tricky, since the driver doesn't
> > > > know whether or not it's really slow. E.g smartNIC vendor may allow
> > > > virtio-net emulation over PCI.
> > > >
> > > > Thanks
> > >
> > > driver will have the choice, depending on whether
> > > vq is deterministic or not.
> >
> > Ok, but the problem is, such booleans are only useful for virtio ring
> > codes. But in this case, virtio-net knows what to do for cvq. So I'm
> > not sure who the user is.
> >
> > Thanks
>
> Circling back, what exactly does the architecture you are trying
> to fix look like? Who is going to introduce unbounded latency?
> The hypervisor?

Hypervisor is one of the possible reason, we have many more:

Hardware device that provides virtio-pci emulation.
Userspace devices like VDUSE.

> If so do we not maybe want a new feature bit
> that documents this? Hypervisor then can detect old guests
> that spin and decide what to do, e.g. prioritise cvq more,
> or fail FEATURES_OK.

We suffer from this for bare metal as well.

But a question is what's wrong with the approach that is used in this
patch? I've answered that set_rx_mode is not reliable, so it should be
fine to use workqueue. Except for this, any other thing that worries
you?

Thanks

>
> > >
> > >
> > > > > --
> > > > > MST
> > > > >
> > >
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue
  2023-05-16  2:44                     ` Jason Wang
@ 2023-05-16  4:12                       ` Michael S. Tsirkin
  2023-05-16  4:17                         ` Jason Wang
  0 siblings, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2023-05-16  4:12 UTC (permalink / raw)
  To: Jason Wang
  Cc: xuanzhuo, netdev, linux-kernel, virtualization, eperezma,
	edumazet, maxime.coquelin, kuba, pabeni, david.marchand, davem

On Tue, May 16, 2023 at 10:44:45AM +0800, Jason Wang wrote:
> On Mon, May 15, 2023 at 6:17 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Mon, May 15, 2023 at 01:13:33PM +0800, Jason Wang wrote:
> > > On Mon, May 15, 2023 at 12:45 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > >
> > > > On Mon, May 15, 2023 at 09:05:54AM +0800, Jason Wang wrote:
> > > > > On Wed, May 10, 2023 at 1:33 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > >
> > > > > > On Mon, Apr 17, 2023 at 11:40:58AM +0800, Jason Wang wrote:
> > > > > > > On Fri, Apr 14, 2023 at 3:21 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > > >
> > > > > > > > On Fri, Apr 14, 2023 at 01:04:15PM +0800, Jason Wang wrote:
> > > > > > > > > Forget to cc netdev, adding.
> > > > > > > > >
> > > > > > > > > On Fri, Apr 14, 2023 at 12:25 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > > > > >
> > > > > > > > > > On Thu, Apr 13, 2023 at 02:40:26PM +0800, Jason Wang wrote:
> > > > > > > > > > > This patch convert rx mode setting to be done in a workqueue, this is
> > > > > > > > > > > a must for allow to sleep when waiting for the cvq command to
> > > > > > > > > > > response since current code is executed under addr spin lock.
> > > > > > > > > > >
> > > > > > > > > > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > > > > > > > >
> > > > > > > > > > I don't like this frankly. This means that setting RX mode which would
> > > > > > > > > > previously be reliable, now becomes unreliable.
> > > > > > > > >
> > > > > > > > > It is "unreliable" by design:
> > > > > > > > >
> > > > > > > > >       void                    (*ndo_set_rx_mode)(struct net_device *dev);
> > > > > > > > >
> > > > > > > > > > - first of all configuration is no longer immediate
> > > > > > > > >
> > > > > > > > > Is immediate a hard requirement? I can see a workqueue is used at least:
> > > > > > > > >
> > > > > > > > > mlx5e, ipoib, efx, ...
> > > > > > > > >
> > > > > > > > > >   and there is no way for driver to find out when
> > > > > > > > > >   it actually took effect
> > > > > > > > >
> > > > > > > > > But we know rx mode is best effort e.g it doesn't support vhost and we
> > > > > > > > > survive from this for years.
> > > > > > > > >
> > > > > > > > > > - second, if device fails command, this is also not
> > > > > > > > > >   propagated to driver, again no way for driver to find out
> > > > > > > > > >
> > > > > > > > > > VDUSE needs to be fixed to do tricks to fix this
> > > > > > > > > > without breaking normal drivers.
> > > > > > > > >
> > > > > > > > > It's not specific to VDUSE. For example, when using virtio-net in the
> > > > > > > > > UP environment with any software cvq (like mlx5 via vDPA or cma
> > > > > > > > > transport).
> > > > > > > > >
> > > > > > > > > Thanks
> > > > > > > >
> > > > > > > > Hmm. Can we differentiate between these use-cases?
> > > > > > >
> > > > > > > It doesn't look easy since we are drivers for virtio bus. Underlayer
> > > > > > > details were hidden from virtio-net.
> > > > > > >
> > > > > > > Or do you have any ideas on this?
> > > > > > >
> > > > > > > Thanks
> > > > > >
> > > > > > I don't know, pass some kind of flag in struct virtqueue?
> > > > > >         "bool slow; /* This vq can be very slow sometimes. Don't wait for it! */"
> > > > > >
> > > > > > ?
> > > > > >
> > > > >
> > > > > So if it's slow, sleep, otherwise poll?
> > > > >
> > > > > I feel setting this flag might be tricky, since the driver doesn't
> > > > > know whether or not it's really slow. E.g smartNIC vendor may allow
> > > > > virtio-net emulation over PCI.
> > > > >
> > > > > Thanks
> > > >
> > > > driver will have the choice, depending on whether
> > > > vq is deterministic or not.
> > >
> > > Ok, but the problem is, such booleans are only useful for virtio ring
> > > codes. But in this case, virtio-net knows what to do for cvq. So I'm
> > > not sure who the user is.
> > >
> > > Thanks
> >
> > Circling back, what exactly does the architecture you are trying
> > to fix look like? Who is going to introduce unbounded latency?
> > The hypervisor?
> 
> Hypervisor is one of the possible reason, we have many more:
> 
> Hardware device that provides virtio-pci emulation.
> Userspace devices like VDUSE.

So let's start by addressing VDUSE maybe?

> > If so do we not maybe want a new feature bit
> > that documents this? Hypervisor then can detect old guests
> > that spin and decide what to do, e.g. prioritise cvq more,
> > or fail FEATURES_OK.
> 
> We suffer from this for bare metal as well.
> 
> But a question is what's wrong with the approach that is used in this
> patch? I've answered that set_rx_mode is not reliable, so it should be
> fine to use workqueue. Except for this, any other thing that worries
> you?
> 
> Thanks

It's not reliable for other drivers but has been reliable for virtio.
I worry some software relied on this.
You are making good points though ... could we get some
maintainer's feedback on this?

> >
> > > >
> > > >
> > > > > > --
> > > > > > MST
> > > > > >
> > > >
> >

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue
  2023-05-16  4:12                       ` Michael S. Tsirkin
@ 2023-05-16  4:17                         ` Jason Wang
  2023-05-16  5:56                           ` Michael S. Tsirkin
  0 siblings, 1 reply; 25+ messages in thread
From: Jason Wang @ 2023-05-16  4:17 UTC (permalink / raw)
  To: Michael S. Tsirkin, kuba
  Cc: xuanzhuo, netdev, linux-kernel, virtualization, eperezma,
	edumazet, maxime.coquelin, pabeni, david.marchand, davem

On Tue, May 16, 2023 at 12:13 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Tue, May 16, 2023 at 10:44:45AM +0800, Jason Wang wrote:
> > On Mon, May 15, 2023 at 6:17 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > >
> > > On Mon, May 15, 2023 at 01:13:33PM +0800, Jason Wang wrote:
> > > > On Mon, May 15, 2023 at 12:45 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > >
> > > > > On Mon, May 15, 2023 at 09:05:54AM +0800, Jason Wang wrote:
> > > > > > On Wed, May 10, 2023 at 1:33 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > >
> > > > > > > On Mon, Apr 17, 2023 at 11:40:58AM +0800, Jason Wang wrote:
> > > > > > > > On Fri, Apr 14, 2023 at 3:21 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > > > >
> > > > > > > > > On Fri, Apr 14, 2023 at 01:04:15PM +0800, Jason Wang wrote:
> > > > > > > > > > Forget to cc netdev, adding.
> > > > > > > > > >
> > > > > > > > > > On Fri, Apr 14, 2023 at 12:25 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > > > > > >
> > > > > > > > > > > On Thu, Apr 13, 2023 at 02:40:26PM +0800, Jason Wang wrote:
> > > > > > > > > > > > This patch convert rx mode setting to be done in a workqueue, this is
> > > > > > > > > > > > a must for allow to sleep when waiting for the cvq command to
> > > > > > > > > > > > response since current code is executed under addr spin lock.
> > > > > > > > > > > >
> > > > > > > > > > > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > > > > > > > > >
> > > > > > > > > > > I don't like this frankly. This means that setting RX mode which would
> > > > > > > > > > > previously be reliable, now becomes unreliable.
> > > > > > > > > >
> > > > > > > > > > It is "unreliable" by design:
> > > > > > > > > >
> > > > > > > > > >       void                    (*ndo_set_rx_mode)(struct net_device *dev);
> > > > > > > > > >
> > > > > > > > > > > - first of all configuration is no longer immediate
> > > > > > > > > >
> > > > > > > > > > Is immediate a hard requirement? I can see a workqueue is used at least:
> > > > > > > > > >
> > > > > > > > > > mlx5e, ipoib, efx, ...
> > > > > > > > > >
> > > > > > > > > > >   and there is no way for driver to find out when
> > > > > > > > > > >   it actually took effect
> > > > > > > > > >
> > > > > > > > > > But we know rx mode is best effort e.g it doesn't support vhost and we
> > > > > > > > > > survive from this for years.
> > > > > > > > > >
> > > > > > > > > > > - second, if device fails command, this is also not
> > > > > > > > > > >   propagated to driver, again no way for driver to find out
> > > > > > > > > > >
> > > > > > > > > > > VDUSE needs to be fixed to do tricks to fix this
> > > > > > > > > > > without breaking normal drivers.
> > > > > > > > > >
> > > > > > > > > > It's not specific to VDUSE. For example, when using virtio-net in the
> > > > > > > > > > UP environment with any software cvq (like mlx5 via vDPA or cma
> > > > > > > > > > transport).
> > > > > > > > > >
> > > > > > > > > > Thanks
> > > > > > > > >
> > > > > > > > > Hmm. Can we differentiate between these use-cases?
> > > > > > > >
> > > > > > > > It doesn't look easy since we are drivers for virtio bus. Underlayer
> > > > > > > > details were hidden from virtio-net.
> > > > > > > >
> > > > > > > > Or do you have any ideas on this?
> > > > > > > >
> > > > > > > > Thanks
> > > > > > >
> > > > > > > I don't know, pass some kind of flag in struct virtqueue?
> > > > > > >         "bool slow; /* This vq can be very slow sometimes. Don't wait for it! */"
> > > > > > >
> > > > > > > ?
> > > > > > >
> > > > > >
> > > > > > So if it's slow, sleep, otherwise poll?
> > > > > >
> > > > > > I feel setting this flag might be tricky, since the driver doesn't
> > > > > > know whether or not it's really slow. E.g smartNIC vendor may allow
> > > > > > virtio-net emulation over PCI.
> > > > > >
> > > > > > Thanks
> > > > >
> > > > > driver will have the choice, depending on whether
> > > > > vq is deterministic or not.
> > > >
> > > > Ok, but the problem is, such booleans are only useful for virtio ring
> > > > codes. But in this case, virtio-net knows what to do for cvq. So I'm
> > > > not sure who the user is.
> > > >
> > > > Thanks
> > >
> > > Circling back, what exactly does the architecture you are trying
> > > to fix look like? Who is going to introduce unbounded latency?
> > > The hypervisor?
> >
> > Hypervisor is one of the possible reason, we have many more:
> >
> > Hardware device that provides virtio-pci emulation.
> > Userspace devices like VDUSE.
>
> So let's start by addressing VDUSE maybe?

It's reported by at least one hardware vendor as well. I remember it
was Alvaro who reported this first in the past.

>
> > > If so do we not maybe want a new feature bit
> > > that documents this? Hypervisor then can detect old guests
> > > that spin and decide what to do, e.g. prioritise cvq more,
> > > or fail FEATURES_OK.
> >
> > We suffer from this for bare metal as well.
> >
> > But a question is what's wrong with the approach that is used in this
> > patch? I've answered that set_rx_mode is not reliable, so it should be
> > fine to use workqueue. Except for this, any other thing that worries
> > you?
> >
> > Thanks
>
> It's not reliable for other drivers but has been reliable for virtio.
> I worry some software relied on this.

It's probably fine since some device like vhost doesn't support this
at all and we manage to survive for several years.

> You are making good points though ... could we get some
> maintainer's feedback on this?

That would be helpful. Jakub, any input on this?

Thanks

>
> > >
> > > > >
> > > > >
> > > > > > > --
> > > > > > > MST
> > > > > > >
> > > > >
> > >
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue
  2023-05-16  4:17                         ` Jason Wang
@ 2023-05-16  5:56                           ` Michael S. Tsirkin
  0 siblings, 0 replies; 25+ messages in thread
From: Michael S. Tsirkin @ 2023-05-16  5:56 UTC (permalink / raw)
  To: Jason Wang
  Cc: xuanzhuo, netdev, linux-kernel, virtualization, eperezma,
	edumazet, maxime.coquelin, kuba, pabeni, david.marchand, davem

On Tue, May 16, 2023 at 12:17:50PM +0800, Jason Wang wrote:
> On Tue, May 16, 2023 at 12:13 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Tue, May 16, 2023 at 10:44:45AM +0800, Jason Wang wrote:
> > > On Mon, May 15, 2023 at 6:17 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > >
> > > > On Mon, May 15, 2023 at 01:13:33PM +0800, Jason Wang wrote:
> > > > > On Mon, May 15, 2023 at 12:45 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > >
> > > > > > On Mon, May 15, 2023 at 09:05:54AM +0800, Jason Wang wrote:
> > > > > > > On Wed, May 10, 2023 at 1:33 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > > >
> > > > > > > > On Mon, Apr 17, 2023 at 11:40:58AM +0800, Jason Wang wrote:
> > > > > > > > > On Fri, Apr 14, 2023 at 3:21 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > > > > >
> > > > > > > > > > On Fri, Apr 14, 2023 at 01:04:15PM +0800, Jason Wang wrote:
> > > > > > > > > > > Forget to cc netdev, adding.
> > > > > > > > > > >
> > > > > > > > > > > On Fri, Apr 14, 2023 at 12:25 AM Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > > > > > > >
> > > > > > > > > > > > On Thu, Apr 13, 2023 at 02:40:26PM +0800, Jason Wang wrote:
> > > > > > > > > > > > > This patch convert rx mode setting to be done in a workqueue, this is
> > > > > > > > > > > > > a must for allow to sleep when waiting for the cvq command to
> > > > > > > > > > > > > response since current code is executed under addr spin lock.
> > > > > > > > > > > > >
> > > > > > > > > > > > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > > > > > > > > > >
> > > > > > > > > > > > I don't like this frankly. This means that setting RX mode which would
> > > > > > > > > > > > previously be reliable, now becomes unreliable.
> > > > > > > > > > >
> > > > > > > > > > > It is "unreliable" by design:
> > > > > > > > > > >
> > > > > > > > > > >       void                    (*ndo_set_rx_mode)(struct net_device *dev);
> > > > > > > > > > >
> > > > > > > > > > > > - first of all configuration is no longer immediate
> > > > > > > > > > >
> > > > > > > > > > > Is immediate a hard requirement? I can see a workqueue is used at least:
> > > > > > > > > > >
> > > > > > > > > > > mlx5e, ipoib, efx, ...
> > > > > > > > > > >
> > > > > > > > > > > >   and there is no way for driver to find out when
> > > > > > > > > > > >   it actually took effect
> > > > > > > > > > >
> > > > > > > > > > > But we know rx mode is best effort e.g it doesn't support vhost and we
> > > > > > > > > > > survive from this for years.
> > > > > > > > > > >
> > > > > > > > > > > > - second, if device fails command, this is also not
> > > > > > > > > > > >   propagated to driver, again no way for driver to find out
> > > > > > > > > > > >
> > > > > > > > > > > > VDUSE needs to be fixed to do tricks to fix this
> > > > > > > > > > > > without breaking normal drivers.
> > > > > > > > > > >
> > > > > > > > > > > It's not specific to VDUSE. For example, when using virtio-net in the
> > > > > > > > > > > UP environment with any software cvq (like mlx5 via vDPA or cma
> > > > > > > > > > > transport).
> > > > > > > > > > >
> > > > > > > > > > > Thanks
> > > > > > > > > >
> > > > > > > > > > Hmm. Can we differentiate between these use-cases?
> > > > > > > > >
> > > > > > > > > It doesn't look easy since we are drivers for virtio bus. Underlayer
> > > > > > > > > details were hidden from virtio-net.
> > > > > > > > >
> > > > > > > > > Or do you have any ideas on this?
> > > > > > > > >
> > > > > > > > > Thanks
> > > > > > > >
> > > > > > > > I don't know, pass some kind of flag in struct virtqueue?
> > > > > > > >         "bool slow; /* This vq can be very slow sometimes. Don't wait for it! */"
> > > > > > > >
> > > > > > > > ?
> > > > > > > >
> > > > > > >
> > > > > > > So if it's slow, sleep, otherwise poll?
> > > > > > >
> > > > > > > I feel setting this flag might be tricky, since the driver doesn't
> > > > > > > know whether or not it's really slow. E.g smartNIC vendor may allow
> > > > > > > virtio-net emulation over PCI.
> > > > > > >
> > > > > > > Thanks
> > > > > >
> > > > > > driver will have the choice, depending on whether
> > > > > > vq is deterministic or not.
> > > > >
> > > > > Ok, but the problem is, such booleans are only useful for virtio ring
> > > > > codes. But in this case, virtio-net knows what to do for cvq. So I'm
> > > > > not sure who the user is.
> > > > >
> > > > > Thanks
> > > >
> > > > Circling back, what exactly does the architecture you are trying
> > > > to fix look like? Who is going to introduce unbounded latency?
> > > > The hypervisor?
> > >
> > > Hypervisor is one of the possible reason, we have many more:
> > >
> > > Hardware device that provides virtio-pci emulation.
> > > Userspace devices like VDUSE.
> >
> > So let's start by addressing VDUSE maybe?
> 
> It's reported by at least one hardware vendor as well. I remember it
> was Alvaro who reported this first in the past.
> 
> >
> > > > If so do we not maybe want a new feature bit
> > > > that documents this? Hypervisor then can detect old guests
> > > > that spin and decide what to do, e.g. prioritise cvq more,
> > > > or fail FEATURES_OK.
> > >
> > > We suffer from this for bare metal as well.
> > >
> > > But a question is what's wrong with the approach that is used in this
> > > patch? I've answered that set_rx_mode is not reliable, so it should be
> > > fine to use workqueue. Except for this, any other thing that worries
> > > you?
> > >
> > > Thanks
> >
> > It's not reliable for other drivers but has been reliable for virtio.
> > I worry some software relied on this.
> 
> It's probably fine since some device like vhost doesn't support this
> at all and we manage to survive for several years.

vhost is often connected to a clever learning backend
such as a bridge which will DTRT without guest configuring
anything at all though, this could be why it works.



> > You are making good points though ... could we get some
> > maintainer's feedback on this?
> 
> That would be helpful. Jakub, any input on this?
> 
> Thanks
> 
> >
> > > >
> > > > > >
> > > > > >
> > > > > > > > --
> > > > > > > > MST
> > > > > > > >
> > > > > >
> > > >
> >

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 2/2] virtio-net: sleep instead of busy waiting for cvq command
  2023-04-13  6:40 ` [PATCH net-next V2 2/2] virtio-net: sleep instead of busy waiting for cvq command Jason Wang
  2023-04-13  7:27   ` Xuan Zhuo
@ 2023-05-16 20:54   ` Michael S. Tsirkin
  2023-05-17  0:50     ` Jason Wang
  1 sibling, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2023-05-16 20:54 UTC (permalink / raw)
  To: Jason Wang
  Cc: xuanzhuo, linux-kernel, virtualization, eperezma, edumazet,
	maxime.coquelin, kuba, pabeni, david.marchand, davem

On Thu, Apr 13, 2023 at 02:40:27PM +0800, Jason Wang wrote:
> We used to busy waiting on the cvq command this tends to be
> problematic since there no way for to schedule another process which
> may serve for the control virtqueue. This might be the case when the
> control virtqueue is emulated by software. This patch switches to use
> completion to allow the CPU to sleep instead of busy waiting for the
> cvq command.
> 
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
> Changes since V1:
> - use completion for simplicity
> - don't try to harden the CVQ command which requires more thought
> Changes since RFC:
> - break the device when timeout
> - get buffer manually since the virtio core check more_used() instead
> ---
>  drivers/net/virtio_net.c | 21 ++++++++++++++-------
>  1 file changed, 14 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 2e56bbf86894..d3eb8fd6c9dc 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -19,6 +19,7 @@
>  #include <linux/average.h>
>  #include <linux/filter.h>
>  #include <linux/kernel.h>
> +#include <linux/completion.h>
>  #include <net/route.h>
>  #include <net/xdp.h>
>  #include <net/net_failover.h>
> @@ -295,6 +296,8 @@ struct virtnet_info {
>  
>  	/* failover when STANDBY feature enabled */
>  	struct failover *failover;
> +
> +	struct completion completion;
>  };
>  
>  struct padded_vnet_hdr {
> @@ -1709,6 +1712,13 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
>  	return !oom;
>  }
>  
> +static void virtnet_cvq_done(struct virtqueue *cvq)
> +{
> +	struct virtnet_info *vi = cvq->vdev->priv;
> +
> +	complete(&vi->completion);
> +}
> +
>  static void skb_recv_done(struct virtqueue *rvq)
>  {
>  	struct virtnet_info *vi = rvq->vdev->priv;
> @@ -2169,12 +2179,8 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
>  	if (unlikely(!virtqueue_kick(vi->cvq)))
>  		return vi->ctrl->status == VIRTIO_NET_OK;
>  
> -	/* Spin for a response, the kick causes an ioport write, trapping
> -	 * into the hypervisor, so the request should be handled immediately.
> -	 */
> -	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
> -	       !virtqueue_is_broken(vi->cvq))
> -		cpu_relax();
> +	wait_for_completion(&vi->completion);
> +	virtqueue_get_buf(vi->cvq, &tmp);
>  
>  	return vi->ctrl->status == VIRTIO_NET_OK;

This seems to break surprise removal and other
situations where vq gets broken since callbacks
aren't usually invoked then.


>  }
> @@ -3672,7 +3678,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
>  
>  	/* Parameters for control virtqueue, if any */
>  	if (vi->has_cvq) {
> -		callbacks[total_vqs - 1] = NULL;
> +		callbacks[total_vqs - 1] = virtnet_cvq_done;
>  		names[total_vqs - 1] = "control";
>  	}
>

There is a cost to this, in that we are burning an extra MSI vector
for the slow path cvq. if device has 3 vectors, suddenly we can't
allocate vectors for rx and tx, big problem.

So I'm afraid we need to pass a new flag that will share
the config changed interrupt and cvq.


  
> @@ -4122,6 +4128,7 @@ static int virtnet_probe(struct virtio_device *vdev)
>  	if (vi->has_rss || vi->has_rss_hash_report)
>  		virtnet_init_default_rss(vi);
>  
> +	init_completion(&vi->completion);
>  	enable_rx_mode_work(vi);
>  
>  	/* serialize netdev register + virtio_device_ready() with ndo_open() */
> -- 
> 2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH net-next V2 2/2] virtio-net: sleep instead of busy waiting for cvq command
  2023-05-16 20:54   ` Michael S. Tsirkin
@ 2023-05-17  0:50     ` Jason Wang
  0 siblings, 0 replies; 25+ messages in thread
From: Jason Wang @ 2023-05-17  0:50 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: xuanzhuo, linux-kernel, virtualization, eperezma, edumazet,
	maxime.coquelin, kuba, pabeni, david.marchand, davem

On Wed, May 17, 2023 at 4:54 AM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Thu, Apr 13, 2023 at 02:40:27PM +0800, Jason Wang wrote:
> > We used to busy waiting on the cvq command this tends to be
> > problematic since there no way for to schedule another process which
> > may serve for the control virtqueue. This might be the case when the
> > control virtqueue is emulated by software. This patch switches to use
> > completion to allow the CPU to sleep instead of busy waiting for the
> > cvq command.
> >
> > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > ---
> > Changes since V1:
> > - use completion for simplicity
> > - don't try to harden the CVQ command which requires more thought
> > Changes since RFC:
> > - break the device when timeout
> > - get buffer manually since the virtio core check more_used() instead
> > ---
> >  drivers/net/virtio_net.c | 21 ++++++++++++++-------
> >  1 file changed, 14 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index 2e56bbf86894..d3eb8fd6c9dc 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -19,6 +19,7 @@
> >  #include <linux/average.h>
> >  #include <linux/filter.h>
> >  #include <linux/kernel.h>
> > +#include <linux/completion.h>
> >  #include <net/route.h>
> >  #include <net/xdp.h>
> >  #include <net/net_failover.h>
> > @@ -295,6 +296,8 @@ struct virtnet_info {
> >
> >       /* failover when STANDBY feature enabled */
> >       struct failover *failover;
> > +
> > +     struct completion completion;
> >  };
> >
> >  struct padded_vnet_hdr {
> > @@ -1709,6 +1712,13 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
> >       return !oom;
> >  }
> >
> > +static void virtnet_cvq_done(struct virtqueue *cvq)
> > +{
> > +     struct virtnet_info *vi = cvq->vdev->priv;
> > +
> > +     complete(&vi->completion);
> > +}
> > +
> >  static void skb_recv_done(struct virtqueue *rvq)
> >  {
> >       struct virtnet_info *vi = rvq->vdev->priv;
> > @@ -2169,12 +2179,8 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
> >       if (unlikely(!virtqueue_kick(vi->cvq)))
> >               return vi->ctrl->status == VIRTIO_NET_OK;
> >
> > -     /* Spin for a response, the kick causes an ioport write, trapping
> > -      * into the hypervisor, so the request should be handled immediately.
> > -      */
> > -     while (!virtqueue_get_buf(vi->cvq, &tmp) &&
> > -            !virtqueue_is_broken(vi->cvq))
> > -             cpu_relax();
> > +     wait_for_completion(&vi->completion);
> > +     virtqueue_get_buf(vi->cvq, &tmp);
> >
> >       return vi->ctrl->status == VIRTIO_NET_OK;
>
> This seems to break surprise removal and other
> situations where vq gets broken since callbacks
> aren't usually invoked then.

Yes, so I think I can go back to the original idea by simply adding
cond_resched() here.

>
>
> >  }
> > @@ -3672,7 +3678,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
> >
> >       /* Parameters for control virtqueue, if any */
> >       if (vi->has_cvq) {
> > -             callbacks[total_vqs - 1] = NULL;
> > +             callbacks[total_vqs - 1] = virtnet_cvq_done;
> >               names[total_vqs - 1] = "control";
> >       }
> >
>
> There is a cost to this, in that we are burning an extra MSI vector
> for the slow path cvq. if device has 3 vectors, suddenly we can't
> allocate vectors for rx and tx, big problem.
>
> So I'm afraid we need to pass a new flag that will share
> the config changed interrupt and cvq.

See above, it looks to me a simple cond_resched() is sufficient, then
we don't need a new vector.

Thanks

>
>
>
> > @@ -4122,6 +4128,7 @@ static int virtnet_probe(struct virtio_device *vdev)
> >       if (vi->has_rss || vi->has_rss_hash_report)
> >               virtnet_init_default_rss(vi);
> >
> > +     init_completion(&vi->completion);
> >       enable_rx_mode_work(vi);
> >
> >       /* serialize netdev register + virtio_device_ready() with ndo_open() */
> > --
> > 2.25.1
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 25+ messages in thread

end of thread, other threads:[~2023-05-17  0:50 UTC | newest]

Thread overview: 25+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-04-13  6:40 [PATCH net-next V2 0/2] virtio-net: don't busy poll for cvq command Jason Wang
2023-04-13  6:40 ` [PATCH net-next V2 1/2] virtio-net: convert rx mode setting to use workqueue Jason Wang
2023-04-13 16:25   ` Michael S. Tsirkin
2023-04-14  5:04     ` Jason Wang
2023-04-14  7:21       ` Michael S. Tsirkin
2023-04-17  3:40         ` Jason Wang
2023-05-05  3:46           ` Jason Wang
2023-05-10  5:32           ` Michael S. Tsirkin
2023-05-15  1:05             ` Jason Wang
2023-05-15  4:45               ` Michael S. Tsirkin
2023-05-15  5:13                 ` Jason Wang
2023-05-15 10:17                   ` Michael S. Tsirkin
2023-05-16  2:44                     ` Jason Wang
2023-05-16  4:12                       ` Michael S. Tsirkin
2023-05-16  4:17                         ` Jason Wang
2023-05-16  5:56                           ` Michael S. Tsirkin
2023-04-13  6:40 ` [PATCH net-next V2 2/2] virtio-net: sleep instead of busy waiting for cvq command Jason Wang
2023-04-13  7:27   ` Xuan Zhuo
2023-04-14  5:09     ` Jason Wang
2023-04-14  5:10       ` Jason Wang
2023-05-16 20:54   ` Michael S. Tsirkin
2023-05-17  0:50     ` Jason Wang
2023-04-13 13:02 ` [PATCH net-next V2 0/2] virtio-net: don't busy poll " Maxime Coquelin
2023-04-13 13:05   ` Maxime Coquelin
     [not found] ` <20230413070408.630fa731@kernel.org>
2023-04-14  4:53   ` Jason Wang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).