All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next v7 0/4] net: vhost: improve performance when enable busyloop
@ 2018-08-01  3:00 xiangxia.m.yue
  2018-08-01  3:00 ` [PATCH net-next v7 1/4] net: vhost: lock the vqs one by one xiangxia.m.yue
                   ` (6 more replies)
  0 siblings, 7 replies; 37+ messages in thread
From: xiangxia.m.yue @ 2018-08-01  3:00 UTC (permalink / raw)
  To: jasowang; +Cc: netdev, virtualization, mst

From: Tonghao Zhang <xiangxia.m.yue@gmail.com>

This patches improve the guest receive performance.
On the handle_tx side, we poll the sock receive queue
at the same time. handle_rx do that in the same way.

For more performance report, see patch 4.

v6->v7:
fix issue and rebase codes:
1. on tx, busypoll will vhost_net_disable/enable_vq rx vq.
[This is suggested by Toshiaki Makita <makita.toshiaki@lab.ntt.co.jp>]

2. introduce common helper vhost_net_busy_poll_try_queue().

v5->v6:
rebase the codes.

Tonghao Zhang (4):
  net: vhost: lock the vqs one by one
  net: vhost: replace magic number of lock annotation
  net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  net: vhost: add rx busy polling in tx path

 drivers/vhost/net.c   | 168 +++++++++++++++++++++++++++++++-------------------
 drivers/vhost/vhost.c |  24 +++-----
 2 files changed, 113 insertions(+), 79 deletions(-)

-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [PATCH net-next v7 1/4] net: vhost: lock the vqs one by one
  2018-08-01  3:00 [PATCH net-next v7 0/4] net: vhost: improve performance when enable busyloop xiangxia.m.yue
@ 2018-08-01  3:00 ` xiangxia.m.yue
  2018-08-01  3:00 ` [PATCH net-next v7 2/4] net: vhost: replace magic number of lock annotation xiangxia.m.yue
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 37+ messages in thread
From: xiangxia.m.yue @ 2018-08-01  3:00 UTC (permalink / raw)
  To: jasowang; +Cc: netdev, virtualization, mst

From: Tonghao Zhang <xiangxia.m.yue@gmail.com>

This patch changes the way that lock all vqs
at the same, to lock them one by one. It will
be used for next patch to avoid the deadlock.

Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 drivers/vhost/vhost.c | 24 +++++++-----------------
 1 file changed, 7 insertions(+), 17 deletions(-)

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index a502f1a..a1c06e7 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -294,8 +294,11 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
 {
 	int i;
 
-	for (i = 0; i < d->nvqs; ++i)
+	for (i = 0; i < d->nvqs; ++i) {
+		mutex_lock(&d->vqs[i]->mutex);
 		__vhost_vq_meta_reset(d->vqs[i]);
+		mutex_unlock(&d->vqs[i]->mutex);
+	}
 }
 
 static void vhost_vq_reset(struct vhost_dev *dev,
@@ -890,20 +893,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
 #define vhost_get_used(vq, x, ptr) \
 	vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
 
-static void vhost_dev_lock_vqs(struct vhost_dev *d)
-{
-	int i = 0;
-	for (i = 0; i < d->nvqs; ++i)
-		mutex_lock_nested(&d->vqs[i]->mutex, i);
-}
-
-static void vhost_dev_unlock_vqs(struct vhost_dev *d)
-{
-	int i = 0;
-	for (i = 0; i < d->nvqs; ++i)
-		mutex_unlock(&d->vqs[i]->mutex);
-}
-
 static int vhost_new_umem_range(struct vhost_umem *umem,
 				u64 start, u64 size, u64 end,
 				u64 userspace_addr, int perm)
@@ -953,7 +942,10 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
 		if (msg->iova <= vq_msg->iova &&
 		    msg->iova + msg->size - 1 > vq_msg->iova &&
 		    vq_msg->type == VHOST_IOTLB_MISS) {
+			mutex_lock(&node->vq->mutex);
 			vhost_poll_queue(&node->vq->poll);
+			mutex_unlock(&node->vq->mutex);
+
 			list_del(&node->node);
 			kfree(node);
 		}
@@ -985,7 +977,6 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
 	int ret = 0;
 
 	mutex_lock(&dev->mutex);
-	vhost_dev_lock_vqs(dev);
 	switch (msg->type) {
 	case VHOST_IOTLB_UPDATE:
 		if (!dev->iotlb) {
@@ -1019,7 +1010,6 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
 		break;
 	}
 
-	vhost_dev_unlock_vqs(dev);
 	mutex_unlock(&dev->mutex);
 
 	return ret;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH net-next v7 2/4] net: vhost: replace magic number of lock annotation
  2018-08-01  3:00 [PATCH net-next v7 0/4] net: vhost: improve performance when enable busyloop xiangxia.m.yue
  2018-08-01  3:00 ` [PATCH net-next v7 1/4] net: vhost: lock the vqs one by one xiangxia.m.yue
  2018-08-01  3:00 ` [PATCH net-next v7 2/4] net: vhost: replace magic number of lock annotation xiangxia.m.yue
@ 2018-08-01  3:00 ` xiangxia.m.yue
  2018-08-01  3:00 ` [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll() xiangxia.m.yue
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 37+ messages in thread
From: xiangxia.m.yue @ 2018-08-01  3:00 UTC (permalink / raw)
  To: jasowang; +Cc: mst, makita.toshiaki, virtualization, netdev, Tonghao Zhang

From: Tonghao Zhang <xiangxia.m.yue@gmail.com>

Use the VHOST_NET_VQ_XXX as a subclass for mutex_lock_nested.

Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
Acked-by: Jason Wang <jasowang@redhat.com>
---
 drivers/vhost/net.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 367d802..32c1b52 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -712,7 +712,7 @@ static void handle_tx(struct vhost_net *net)
 	struct vhost_virtqueue *vq = &nvq->vq;
 	struct socket *sock;
 
-	mutex_lock(&vq->mutex);
+	mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
 	sock = vq->private_data;
 	if (!sock)
 		goto out;
@@ -777,7 +777,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
 		/* Flush batched heads first */
 		vhost_net_signal_used(rnvq);
 		/* Both tx vq and rx socket were polled here */
-		mutex_lock_nested(&tvq->mutex, 1);
+		mutex_lock_nested(&tvq->mutex, VHOST_NET_VQ_TX);
 		vhost_disable_notify(&net->dev, tvq);
 
 		preempt_disable();
@@ -919,7 +919,7 @@ static void handle_rx(struct vhost_net *net)
 	__virtio16 num_buffers;
 	int recv_pkts = 0;
 
-	mutex_lock_nested(&vq->mutex, 0);
+	mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
 	sock = vq->private_data;
 	if (!sock)
 		goto out;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH net-next v7 2/4] net: vhost: replace magic number of lock annotation
  2018-08-01  3:00 [PATCH net-next v7 0/4] net: vhost: improve performance when enable busyloop xiangxia.m.yue
  2018-08-01  3:00 ` [PATCH net-next v7 1/4] net: vhost: lock the vqs one by one xiangxia.m.yue
@ 2018-08-01  3:00 ` xiangxia.m.yue
  2018-08-01  3:00 ` xiangxia.m.yue
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 37+ messages in thread
From: xiangxia.m.yue @ 2018-08-01  3:00 UTC (permalink / raw)
  To: jasowang; +Cc: netdev, virtualization, mst

From: Tonghao Zhang <xiangxia.m.yue@gmail.com>

Use the VHOST_NET_VQ_XXX as a subclass for mutex_lock_nested.

Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
Acked-by: Jason Wang <jasowang@redhat.com>
---
 drivers/vhost/net.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 367d802..32c1b52 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -712,7 +712,7 @@ static void handle_tx(struct vhost_net *net)
 	struct vhost_virtqueue *vq = &nvq->vq;
 	struct socket *sock;
 
-	mutex_lock(&vq->mutex);
+	mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
 	sock = vq->private_data;
 	if (!sock)
 		goto out;
@@ -777,7 +777,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
 		/* Flush batched heads first */
 		vhost_net_signal_used(rnvq);
 		/* Both tx vq and rx socket were polled here */
-		mutex_lock_nested(&tvq->mutex, 1);
+		mutex_lock_nested(&tvq->mutex, VHOST_NET_VQ_TX);
 		vhost_disable_notify(&net->dev, tvq);
 
 		preempt_disable();
@@ -919,7 +919,7 @@ static void handle_rx(struct vhost_net *net)
 	__virtio16 num_buffers;
 	int recv_pkts = 0;
 
-	mutex_lock_nested(&vq->mutex, 0);
+	mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
 	sock = vq->private_data;
 	if (!sock)
 		goto out;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-01  3:00 [PATCH net-next v7 0/4] net: vhost: improve performance when enable busyloop xiangxia.m.yue
                   ` (2 preceding siblings ...)
  2018-08-01  3:00 ` xiangxia.m.yue
@ 2018-08-01  3:00 ` xiangxia.m.yue
  2018-08-01  6:01   ` Jason Wang
  2018-08-01  3:00 ` xiangxia.m.yue
                   ` (2 subsequent siblings)
  6 siblings, 1 reply; 37+ messages in thread
From: xiangxia.m.yue @ 2018-08-01  3:00 UTC (permalink / raw)
  To: jasowang; +Cc: mst, makita.toshiaki, virtualization, netdev, Tonghao Zhang

From: Tonghao Zhang <xiangxia.m.yue@gmail.com>

Factor out generic busy polling logic and will be
used for in tx path in the next patch. And with the patch,
qemu can set differently the busyloop_timeout for rx queue.

In the handle_tx, the busypoll will vhost_net_disable/enable_vq
because we have poll the sock. This can improve performance.
[This is suggested by Toshiaki Makita <makita.toshiaki@lab.ntt.co.jp>]

And when the sock receive skb, we should queue the poll if necessary.

Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
---
 drivers/vhost/net.c | 131 ++++++++++++++++++++++++++++++++++++----------------
 1 file changed, 91 insertions(+), 40 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 32c1b52..5b45463 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -440,6 +440,95 @@ static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
 	nvq->done_idx = 0;
 }
 
+static int sk_has_rx_data(struct sock *sk)
+{
+	struct socket *sock = sk->sk_socket;
+
+	if (sock->ops->peek_len)
+		return sock->ops->peek_len(sock);
+
+	return skb_queue_empty(&sk->sk_receive_queue);
+}
+
+static void vhost_net_busy_poll_try_queue(struct vhost_net *net,
+					  struct vhost_virtqueue *vq)
+{
+	if (!vhost_vq_avail_empty(&net->dev, vq)) {
+		vhost_poll_queue(&vq->poll);
+	} else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+		vhost_disable_notify(&net->dev, vq);
+		vhost_poll_queue(&vq->poll);
+	}
+}
+
+static void vhost_net_busy_poll_check(struct vhost_net *net,
+				      struct vhost_virtqueue *rvq,
+				      struct vhost_virtqueue *tvq,
+				      bool rx)
+{
+	struct socket *sock = rvq->private_data;
+
+	if (rx)
+		vhost_net_busy_poll_try_queue(net, tvq);
+	else if (sock && sk_has_rx_data(sock->sk))
+		vhost_net_busy_poll_try_queue(net, rvq);
+	else {
+		/* On tx here, sock has no rx data, so we
+		 * will wait for sock wakeup for rx, and
+		 * vhost_enable_notify() is not needed. */
+	}
+}
+
+static void vhost_net_busy_poll(struct vhost_net *net,
+				struct vhost_virtqueue *rvq,
+				struct vhost_virtqueue *tvq,
+				bool *busyloop_intr,
+				bool rx)
+{
+	unsigned long busyloop_timeout;
+	unsigned long endtime;
+	struct socket *sock;
+	struct vhost_virtqueue *vq = rx ? tvq : rvq;
+
+	mutex_lock_nested(&vq->mutex, rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX);
+	vhost_disable_notify(&net->dev, vq);
+	sock = rvq->private_data;
+
+	busyloop_timeout = rx ? rvq->busyloop_timeout:
+				tvq->busyloop_timeout;
+
+
+	/* Busypoll the sock, so don't need rx wakeups during it. */
+	if (!rx)
+		vhost_net_disable_vq(net, vq);
+
+	preempt_disable();
+	endtime = busy_clock() + busyloop_timeout;
+
+	while (vhost_can_busy_poll(endtime)) {
+		if (vhost_has_work(&net->dev)) {
+			*busyloop_intr = true;
+			break;
+		}
+
+		if ((sock && sk_has_rx_data(sock->sk) &&
+		     !vhost_vq_avail_empty(&net->dev, rvq)) ||
+		    !vhost_vq_avail_empty(&net->dev, tvq))
+			break;
+
+		cpu_relax();
+	}
+
+	preempt_enable();
+
+	if (!rx)
+		vhost_net_enable_vq(net, vq);
+
+	vhost_net_busy_poll_check(net, rvq, tvq, rx);
+
+	mutex_unlock(&vq->mutex);
+}
+
 static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
 				    struct vhost_net_virtqueue *nvq,
 				    unsigned int *out_num, unsigned int *in_num,
@@ -753,16 +842,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
 	return len;
 }
 
-static int sk_has_rx_data(struct sock *sk)
-{
-	struct socket *sock = sk->sk_socket;
-
-	if (sock->ops->peek_len)
-		return sock->ops->peek_len(sock);
-
-	return skb_queue_empty(&sk->sk_receive_queue);
-}
-
 static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
 				      bool *busyloop_intr)
 {
@@ -770,41 +849,13 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
 	struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
 	struct vhost_virtqueue *rvq = &rnvq->vq;
 	struct vhost_virtqueue *tvq = &tnvq->vq;
-	unsigned long uninitialized_var(endtime);
 	int len = peek_head_len(rnvq, sk);
 
-	if (!len && tvq->busyloop_timeout) {
+	if (!len && rvq->busyloop_timeout) {
 		/* Flush batched heads first */
 		vhost_net_signal_used(rnvq);
 		/* Both tx vq and rx socket were polled here */
-		mutex_lock_nested(&tvq->mutex, VHOST_NET_VQ_TX);
-		vhost_disable_notify(&net->dev, tvq);
-
-		preempt_disable();
-		endtime = busy_clock() + tvq->busyloop_timeout;
-
-		while (vhost_can_busy_poll(endtime)) {
-			if (vhost_has_work(&net->dev)) {
-				*busyloop_intr = true;
-				break;
-			}
-			if ((sk_has_rx_data(sk) &&
-			     !vhost_vq_avail_empty(&net->dev, rvq)) ||
-			    !vhost_vq_avail_empty(&net->dev, tvq))
-				break;
-			cpu_relax();
-		}
-
-		preempt_enable();
-
-		if (!vhost_vq_avail_empty(&net->dev, tvq)) {
-			vhost_poll_queue(&tvq->poll);
-		} else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
-			vhost_disable_notify(&net->dev, tvq);
-			vhost_poll_queue(&tvq->poll);
-		}
-
-		mutex_unlock(&tvq->mutex);
+		vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
 
 		len = peek_head_len(rnvq, sk);
 	}
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-01  3:00 [PATCH net-next v7 0/4] net: vhost: improve performance when enable busyloop xiangxia.m.yue
                   ` (3 preceding siblings ...)
  2018-08-01  3:00 ` [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll() xiangxia.m.yue
@ 2018-08-01  3:00 ` xiangxia.m.yue
  2018-08-01  3:00 ` [PATCH net-next v7 4/4] net: vhost: add rx busy polling in tx path xiangxia.m.yue
  2018-08-01  3:00 ` xiangxia.m.yue
  6 siblings, 0 replies; 37+ messages in thread
From: xiangxia.m.yue @ 2018-08-01  3:00 UTC (permalink / raw)
  To: jasowang; +Cc: netdev, virtualization, mst

From: Tonghao Zhang <xiangxia.m.yue@gmail.com>

Factor out generic busy polling logic and will be
used for in tx path in the next patch. And with the patch,
qemu can set differently the busyloop_timeout for rx queue.

In the handle_tx, the busypoll will vhost_net_disable/enable_vq
because we have poll the sock. This can improve performance.
[This is suggested by Toshiaki Makita <makita.toshiaki@lab.ntt.co.jp>]

And when the sock receive skb, we should queue the poll if necessary.

Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
---
 drivers/vhost/net.c | 131 ++++++++++++++++++++++++++++++++++++----------------
 1 file changed, 91 insertions(+), 40 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 32c1b52..5b45463 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -440,6 +440,95 @@ static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
 	nvq->done_idx = 0;
 }
 
+static int sk_has_rx_data(struct sock *sk)
+{
+	struct socket *sock = sk->sk_socket;
+
+	if (sock->ops->peek_len)
+		return sock->ops->peek_len(sock);
+
+	return skb_queue_empty(&sk->sk_receive_queue);
+}
+
+static void vhost_net_busy_poll_try_queue(struct vhost_net *net,
+					  struct vhost_virtqueue *vq)
+{
+	if (!vhost_vq_avail_empty(&net->dev, vq)) {
+		vhost_poll_queue(&vq->poll);
+	} else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+		vhost_disable_notify(&net->dev, vq);
+		vhost_poll_queue(&vq->poll);
+	}
+}
+
+static void vhost_net_busy_poll_check(struct vhost_net *net,
+				      struct vhost_virtqueue *rvq,
+				      struct vhost_virtqueue *tvq,
+				      bool rx)
+{
+	struct socket *sock = rvq->private_data;
+
+	if (rx)
+		vhost_net_busy_poll_try_queue(net, tvq);
+	else if (sock && sk_has_rx_data(sock->sk))
+		vhost_net_busy_poll_try_queue(net, rvq);
+	else {
+		/* On tx here, sock has no rx data, so we
+		 * will wait for sock wakeup for rx, and
+		 * vhost_enable_notify() is not needed. */
+	}
+}
+
+static void vhost_net_busy_poll(struct vhost_net *net,
+				struct vhost_virtqueue *rvq,
+				struct vhost_virtqueue *tvq,
+				bool *busyloop_intr,
+				bool rx)
+{
+	unsigned long busyloop_timeout;
+	unsigned long endtime;
+	struct socket *sock;
+	struct vhost_virtqueue *vq = rx ? tvq : rvq;
+
+	mutex_lock_nested(&vq->mutex, rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX);
+	vhost_disable_notify(&net->dev, vq);
+	sock = rvq->private_data;
+
+	busyloop_timeout = rx ? rvq->busyloop_timeout:
+				tvq->busyloop_timeout;
+
+
+	/* Busypoll the sock, so don't need rx wakeups during it. */
+	if (!rx)
+		vhost_net_disable_vq(net, vq);
+
+	preempt_disable();
+	endtime = busy_clock() + busyloop_timeout;
+
+	while (vhost_can_busy_poll(endtime)) {
+		if (vhost_has_work(&net->dev)) {
+			*busyloop_intr = true;
+			break;
+		}
+
+		if ((sock && sk_has_rx_data(sock->sk) &&
+		     !vhost_vq_avail_empty(&net->dev, rvq)) ||
+		    !vhost_vq_avail_empty(&net->dev, tvq))
+			break;
+
+		cpu_relax();
+	}
+
+	preempt_enable();
+
+	if (!rx)
+		vhost_net_enable_vq(net, vq);
+
+	vhost_net_busy_poll_check(net, rvq, tvq, rx);
+
+	mutex_unlock(&vq->mutex);
+}
+
 static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
 				    struct vhost_net_virtqueue *nvq,
 				    unsigned int *out_num, unsigned int *in_num,
@@ -753,16 +842,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
 	return len;
 }
 
-static int sk_has_rx_data(struct sock *sk)
-{
-	struct socket *sock = sk->sk_socket;
-
-	if (sock->ops->peek_len)
-		return sock->ops->peek_len(sock);
-
-	return skb_queue_empty(&sk->sk_receive_queue);
-}
-
 static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
 				      bool *busyloop_intr)
 {
@@ -770,41 +849,13 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
 	struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
 	struct vhost_virtqueue *rvq = &rnvq->vq;
 	struct vhost_virtqueue *tvq = &tnvq->vq;
-	unsigned long uninitialized_var(endtime);
 	int len = peek_head_len(rnvq, sk);
 
-	if (!len && tvq->busyloop_timeout) {
+	if (!len && rvq->busyloop_timeout) {
 		/* Flush batched heads first */
 		vhost_net_signal_used(rnvq);
 		/* Both tx vq and rx socket were polled here */
-		mutex_lock_nested(&tvq->mutex, VHOST_NET_VQ_TX);
-		vhost_disable_notify(&net->dev, tvq);
-
-		preempt_disable();
-		endtime = busy_clock() + tvq->busyloop_timeout;
-
-		while (vhost_can_busy_poll(endtime)) {
-			if (vhost_has_work(&net->dev)) {
-				*busyloop_intr = true;
-				break;
-			}
-			if ((sk_has_rx_data(sk) &&
-			     !vhost_vq_avail_empty(&net->dev, rvq)) ||
-			    !vhost_vq_avail_empty(&net->dev, tvq))
-				break;
-			cpu_relax();
-		}
-
-		preempt_enable();
-
-		if (!vhost_vq_avail_empty(&net->dev, tvq)) {
-			vhost_poll_queue(&tvq->poll);
-		} else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
-			vhost_disable_notify(&net->dev, tvq);
-			vhost_poll_queue(&tvq->poll);
-		}
-
-		mutex_unlock(&tvq->mutex);
+		vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
 
 		len = peek_head_len(rnvq, sk);
 	}
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH net-next v7 4/4] net: vhost: add rx busy polling in tx path
  2018-08-01  3:00 [PATCH net-next v7 0/4] net: vhost: improve performance when enable busyloop xiangxia.m.yue
                   ` (5 preceding siblings ...)
  2018-08-01  3:00 ` [PATCH net-next v7 4/4] net: vhost: add rx busy polling in tx path xiangxia.m.yue
@ 2018-08-01  3:00 ` xiangxia.m.yue
  6 siblings, 0 replies; 37+ messages in thread
From: xiangxia.m.yue @ 2018-08-01  3:00 UTC (permalink / raw)
  To: jasowang; +Cc: mst, makita.toshiaki, virtualization, netdev, Tonghao Zhang

From: Tonghao Zhang <xiangxia.m.yue@gmail.com>

This patch improves the guest receive performance.
On the handle_tx side, we poll the sock receive queue at the
same time. handle_rx do that in the same way.

We set the poll-us=100us and use the netperf to test throughput
and mean latency. When running the tests, the vhost-net kthread
of that VM, is alway 100% CPU. The commands are shown as below.

Rx performance is greatly improved by this patch. There is not
notable performance change on tx with this series though. This
patch is useful for bi-directional traffic.

netperf -H IP -t TCP_STREAM -l 20 -- -O "THROUGHPUT, THROUGHPUT_UNITS, MEAN_LATENCY"

Topology:
[Host] ->linux bridge -> tap vhost-net ->[Guest]

TCP_STREAM:
* Without the patch:  19842.95 Mbps, 6.50 us mean latency
* With the patch:     38570.00 Mbps, 3.43 us mean latency

Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
---
 drivers/vhost/net.c | 33 +++++++++++++--------------------
 1 file changed, 13 insertions(+), 20 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 5b45463..b56db65 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -530,31 +530,24 @@ static void vhost_net_busy_poll(struct vhost_net *net,
 }
 
 static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
-				    struct vhost_net_virtqueue *nvq,
+				    struct vhost_net_virtqueue *tnvq,
 				    unsigned int *out_num, unsigned int *in_num,
 				    bool *busyloop_intr)
 {
-	struct vhost_virtqueue *vq = &nvq->vq;
-	unsigned long uninitialized_var(endtime);
-	int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+	struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
+	struct vhost_virtqueue *rvq = &rnvq->vq;
+	struct vhost_virtqueue *tvq = &tnvq->vq;
+
+	int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
 				  out_num, in_num, NULL, NULL);
 
-	if (r == vq->num && vq->busyloop_timeout) {
-		if (!vhost_sock_zcopy(vq->private_data))
-			vhost_net_signal_used(nvq);
-		preempt_disable();
-		endtime = busy_clock() + vq->busyloop_timeout;
-		while (vhost_can_busy_poll(endtime)) {
-			if (vhost_has_work(vq->dev)) {
-				*busyloop_intr = true;
-				break;
-			}
-			if (!vhost_vq_avail_empty(vq->dev, vq))
-				break;
-			cpu_relax();
-		}
-		preempt_enable();
-		r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+	if (r == tvq->num && tvq->busyloop_timeout) {
+		if (!vhost_sock_zcopy(tvq->private_data))
+			vhost_net_signal_used(tnvq);
+
+		vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
+
+		r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
 				      out_num, in_num, NULL, NULL);
 	}
 
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH net-next v7 4/4] net: vhost: add rx busy polling in tx path
  2018-08-01  3:00 [PATCH net-next v7 0/4] net: vhost: improve performance when enable busyloop xiangxia.m.yue
                   ` (4 preceding siblings ...)
  2018-08-01  3:00 ` xiangxia.m.yue
@ 2018-08-01  3:00 ` xiangxia.m.yue
  2018-08-01  3:00 ` xiangxia.m.yue
  6 siblings, 0 replies; 37+ messages in thread
From: xiangxia.m.yue @ 2018-08-01  3:00 UTC (permalink / raw)
  To: jasowang; +Cc: netdev, virtualization, mst

From: Tonghao Zhang <xiangxia.m.yue@gmail.com>

This patch improves the guest receive performance.
On the handle_tx side, we poll the sock receive queue at the
same time. handle_rx do that in the same way.

We set the poll-us=100us and use the netperf to test throughput
and mean latency. When running the tests, the vhost-net kthread
of that VM, is alway 100% CPU. The commands are shown as below.

Rx performance is greatly improved by this patch. There is not
notable performance change on tx with this series though. This
patch is useful for bi-directional traffic.

netperf -H IP -t TCP_STREAM -l 20 -- -O "THROUGHPUT, THROUGHPUT_UNITS, MEAN_LATENCY"

Topology:
[Host] ->linux bridge -> tap vhost-net ->[Guest]

TCP_STREAM:
* Without the patch:  19842.95 Mbps, 6.50 us mean latency
* With the patch:     38570.00 Mbps, 3.43 us mean latency

Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
---
 drivers/vhost/net.c | 33 +++++++++++++--------------------
 1 file changed, 13 insertions(+), 20 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 5b45463..b56db65 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -530,31 +530,24 @@ static void vhost_net_busy_poll(struct vhost_net *net,
 }
 
 static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
-				    struct vhost_net_virtqueue *nvq,
+				    struct vhost_net_virtqueue *tnvq,
 				    unsigned int *out_num, unsigned int *in_num,
 				    bool *busyloop_intr)
 {
-	struct vhost_virtqueue *vq = &nvq->vq;
-	unsigned long uninitialized_var(endtime);
-	int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+	struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
+	struct vhost_virtqueue *rvq = &rnvq->vq;
+	struct vhost_virtqueue *tvq = &tnvq->vq;
+
+	int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
 				  out_num, in_num, NULL, NULL);
 
-	if (r == vq->num && vq->busyloop_timeout) {
-		if (!vhost_sock_zcopy(vq->private_data))
-			vhost_net_signal_used(nvq);
-		preempt_disable();
-		endtime = busy_clock() + vq->busyloop_timeout;
-		while (vhost_can_busy_poll(endtime)) {
-			if (vhost_has_work(vq->dev)) {
-				*busyloop_intr = true;
-				break;
-			}
-			if (!vhost_vq_avail_empty(vq->dev, vq))
-				break;
-			cpu_relax();
-		}
-		preempt_enable();
-		r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+	if (r == tvq->num && tvq->busyloop_timeout) {
+		if (!vhost_sock_zcopy(tvq->private_data))
+			vhost_net_signal_used(tnvq);
+
+		vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
+
+		r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
 				      out_num, in_num, NULL, NULL);
 	}
 
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-01  3:00 ` [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll() xiangxia.m.yue
@ 2018-08-01  6:01   ` Jason Wang
  2018-08-01  9:52     ` Tonghao Zhang
  0 siblings, 1 reply; 37+ messages in thread
From: Jason Wang @ 2018-08-01  6:01 UTC (permalink / raw)
  To: xiangxia.m.yue; +Cc: netdev, virtualization, mst



On 2018年08月01日 11:00, xiangxia.m.yue@gmail.com wrote:
> From: Tonghao Zhang <xiangxia.m.yue@gmail.com>
>
> Factor out generic busy polling logic and will be
> used for in tx path in the next patch. And with the patch,
> qemu can set differently the busyloop_timeout for rx queue.
>
> In the handle_tx, the busypoll will vhost_net_disable/enable_vq
> because we have poll the sock. This can improve performance.
> [This is suggested by Toshiaki Makita <makita.toshiaki@lab.ntt.co.jp>]
>
> And when the sock receive skb, we should queue the poll if necessary.
>
> Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
> ---
>   drivers/vhost/net.c | 131 ++++++++++++++++++++++++++++++++++++----------------
>   1 file changed, 91 insertions(+), 40 deletions(-)
>
> diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
> index 32c1b52..5b45463 100644
> --- a/drivers/vhost/net.c
> +++ b/drivers/vhost/net.c
> @@ -440,6 +440,95 @@ static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
>   	nvq->done_idx = 0;
>   }
>   
> +static int sk_has_rx_data(struct sock *sk)
> +{
> +	struct socket *sock = sk->sk_socket;
> +
> +	if (sock->ops->peek_len)
> +		return sock->ops->peek_len(sock);
> +
> +	return skb_queue_empty(&sk->sk_receive_queue);
> +}
> +
> +static void vhost_net_busy_poll_try_queue(struct vhost_net *net,
> +					  struct vhost_virtqueue *vq)
> +{
> +	if (!vhost_vq_avail_empty(&net->dev, vq)) {
> +		vhost_poll_queue(&vq->poll);
> +	} else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
> +		vhost_disable_notify(&net->dev, vq);
> +		vhost_poll_queue(&vq->poll);
> +	}
> +}
> +
> +static void vhost_net_busy_poll_check(struct vhost_net *net,
> +				      struct vhost_virtqueue *rvq,
> +				      struct vhost_virtqueue *tvq,
> +				      bool rx)
> +{
> +	struct socket *sock = rvq->private_data;
> +
> +	if (rx)
> +		vhost_net_busy_poll_try_queue(net, tvq);
> +	else if (sock && sk_has_rx_data(sock->sk))
> +		vhost_net_busy_poll_try_queue(net, rvq);
> +	else {
> +		/* On tx here, sock has no rx data, so we
> +		 * will wait for sock wakeup for rx, and
> +		 * vhost_enable_notify() is not needed. */

A possible case is we do have rx data but guest does not refill the rx 
queue. In this case we may lose notifications from guest.

> +	}
> +}
> +
> +static void vhost_net_busy_poll(struct vhost_net *net,
> +				struct vhost_virtqueue *rvq,
> +				struct vhost_virtqueue *tvq,
> +				bool *busyloop_intr,
> +				bool rx)
> +{
> +	unsigned long busyloop_timeout;
> +	unsigned long endtime;
> +	struct socket *sock;
> +	struct vhost_virtqueue *vq = rx ? tvq : rvq;
> +
> +	mutex_lock_nested(&vq->mutex, rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX);
> +	vhost_disable_notify(&net->dev, vq);
> +	sock = rvq->private_data;
> +
> +	busyloop_timeout = rx ? rvq->busyloop_timeout:
> +				tvq->busyloop_timeout;
> +
> +
> +	/* Busypoll the sock, so don't need rx wakeups during it. */
> +	if (!rx)
> +		vhost_net_disable_vq(net, vq);

Actually this piece of code is not a factoring out. I would suggest to 
add this in another patch, or on top of this series.

> +
> +	preempt_disable();
> +	endtime = busy_clock() + busyloop_timeout;
> +
> +	while (vhost_can_busy_poll(endtime)) {
> +		if (vhost_has_work(&net->dev)) {
> +			*busyloop_intr = true;
> +			break;
> +		}
> +
> +		if ((sock && sk_has_rx_data(sock->sk) &&
> +		     !vhost_vq_avail_empty(&net->dev, rvq)) ||
> +		    !vhost_vq_avail_empty(&net->dev, tvq))
> +			break;

Some checks were duplicated in vhost_net_busy_poll_check(). Need 
consider to unify them.

> +
> +		cpu_relax();
> +	}
> +
> +	preempt_enable();
> +
> +	if (!rx)
> +		vhost_net_enable_vq(net, vq);

No need to enable rx virtqueue, if we are sure handle_rx() will be 
called soon.

> +
> +	vhost_net_busy_poll_check(net, rvq, tvq, rx);

It looks to me just open code all check here is better and easier to be 
reviewed.

Thanks

> +
> +	mutex_unlock(&vq->mutex);
> +}
> +
>   static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
>   				    struct vhost_net_virtqueue *nvq,
>   				    unsigned int *out_num, unsigned int *in_num,
> @@ -753,16 +842,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
>   	return len;
>   }
>   
> -static int sk_has_rx_data(struct sock *sk)
> -{
> -	struct socket *sock = sk->sk_socket;
> -
> -	if (sock->ops->peek_len)
> -		return sock->ops->peek_len(sock);
> -
> -	return skb_queue_empty(&sk->sk_receive_queue);
> -}
> -
>   static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
>   				      bool *busyloop_intr)
>   {
> @@ -770,41 +849,13 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
>   	struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
>   	struct vhost_virtqueue *rvq = &rnvq->vq;
>   	struct vhost_virtqueue *tvq = &tnvq->vq;
> -	unsigned long uninitialized_var(endtime);
>   	int len = peek_head_len(rnvq, sk);
>   
> -	if (!len && tvq->busyloop_timeout) {
> +	if (!len && rvq->busyloop_timeout) {
>   		/* Flush batched heads first */
>   		vhost_net_signal_used(rnvq);
>   		/* Both tx vq and rx socket were polled here */
> -		mutex_lock_nested(&tvq->mutex, VHOST_NET_VQ_TX);
> -		vhost_disable_notify(&net->dev, tvq);
> -
> -		preempt_disable();
> -		endtime = busy_clock() + tvq->busyloop_timeout;
> -
> -		while (vhost_can_busy_poll(endtime)) {
> -			if (vhost_has_work(&net->dev)) {
> -				*busyloop_intr = true;
> -				break;
> -			}
> -			if ((sk_has_rx_data(sk) &&
> -			     !vhost_vq_avail_empty(&net->dev, rvq)) ||
> -			    !vhost_vq_avail_empty(&net->dev, tvq))
> -				break;
> -			cpu_relax();
> -		}
> -
> -		preempt_enable();
> -
> -		if (!vhost_vq_avail_empty(&net->dev, tvq)) {
> -			vhost_poll_queue(&tvq->poll);
> -		} else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
> -			vhost_disable_notify(&net->dev, tvq);
> -			vhost_poll_queue(&tvq->poll);
> -		}
> -
> -		mutex_unlock(&tvq->mutex);
> +		vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
>   
>   		len = peek_head_len(rnvq, sk);
>   	}

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-01  6:01   ` Jason Wang
@ 2018-08-01  9:52     ` Tonghao Zhang
  2018-08-02  8:18       ` Jason Wang
  2018-08-02  8:18       ` Jason Wang
  0 siblings, 2 replies; 37+ messages in thread
From: Tonghao Zhang @ 2018-08-01  9:52 UTC (permalink / raw)
  To: jasowang; +Cc: Linux Kernel Network Developers, virtualization, mst

On Wed, Aug 1, 2018 at 2:01 PM Jason Wang <jasowang@redhat.com> wrote:
>
>
>
> On 2018年08月01日 11:00, xiangxia.m.yue@gmail.com wrote:
> > From: Tonghao Zhang <xiangxia.m.yue@gmail.com>
> >
> > Factor out generic busy polling logic and will be
> > used for in tx path in the next patch. And with the patch,
> > qemu can set differently the busyloop_timeout for rx queue.
> >
> > In the handle_tx, the busypoll will vhost_net_disable/enable_vq
> > because we have poll the sock. This can improve performance.
> > [This is suggested by Toshiaki Makita <makita.toshiaki@lab.ntt.co.jp>]
> >
> > And when the sock receive skb, we should queue the poll if necessary.
> >
> > Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
> > ---
> >   drivers/vhost/net.c | 131 ++++++++++++++++++++++++++++++++++++----------------
> >   1 file changed, 91 insertions(+), 40 deletions(-)
> >
> > diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
> > index 32c1b52..5b45463 100644
> > --- a/drivers/vhost/net.c
> > +++ b/drivers/vhost/net.c
> > @@ -440,6 +440,95 @@ static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
> >       nvq->done_idx = 0;
> >   }
> >
> > +static int sk_has_rx_data(struct sock *sk)
> > +{
> > +     struct socket *sock = sk->sk_socket;
> > +
> > +     if (sock->ops->peek_len)
> > +             return sock->ops->peek_len(sock);
> > +
> > +     return skb_queue_empty(&sk->sk_receive_queue);
> > +}
> > +
> > +static void vhost_net_busy_poll_try_queue(struct vhost_net *net,
> > +                                       struct vhost_virtqueue *vq)
> > +{
> > +     if (!vhost_vq_avail_empty(&net->dev, vq)) {
> > +             vhost_poll_queue(&vq->poll);
> > +     } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
> > +             vhost_disable_notify(&net->dev, vq);
> > +             vhost_poll_queue(&vq->poll);
> > +     }
> > +}
> > +
> > +static void vhost_net_busy_poll_check(struct vhost_net *net,
> > +                                   struct vhost_virtqueue *rvq,
> > +                                   struct vhost_virtqueue *tvq,
> > +                                   bool rx)
> > +{
> > +     struct socket *sock = rvq->private_data;
> > +
> > +     if (rx)
> > +             vhost_net_busy_poll_try_queue(net, tvq);
> > +     else if (sock && sk_has_rx_data(sock->sk))
> > +             vhost_net_busy_poll_try_queue(net, rvq);
> > +     else {
> > +             /* On tx here, sock has no rx data, so we
> > +              * will wait for sock wakeup for rx, and
> > +              * vhost_enable_notify() is not needed. */
>
> A possible case is we do have rx data but guest does not refill the rx
> queue. In this case we may lose notifications from guest.
Yes, should consider this case. thanks.
> > +     }
> > +}
> > +
> > +static void vhost_net_busy_poll(struct vhost_net *net,
> > +                             struct vhost_virtqueue *rvq,
> > +                             struct vhost_virtqueue *tvq,
> > +                             bool *busyloop_intr,
> > +                             bool rx)
> > +{
> > +     unsigned long busyloop_timeout;
> > +     unsigned long endtime;
> > +     struct socket *sock;
> > +     struct vhost_virtqueue *vq = rx ? tvq : rvq;
> > +
> > +     mutex_lock_nested(&vq->mutex, rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX);
> > +     vhost_disable_notify(&net->dev, vq);
> > +     sock = rvq->private_data;
> > +
> > +     busyloop_timeout = rx ? rvq->busyloop_timeout:
> > +                             tvq->busyloop_timeout;
> > +
> > +
> > +     /* Busypoll the sock, so don't need rx wakeups during it. */
> > +     if (!rx)
> > +             vhost_net_disable_vq(net, vq);
>
> Actually this piece of code is not a factoring out. I would suggest to
> add this in another patch, or on top of this series.
I will add this in another patch.
> > +
> > +     preempt_disable();
> > +     endtime = busy_clock() + busyloop_timeout;
> > +
> > +     while (vhost_can_busy_poll(endtime)) {
> > +             if (vhost_has_work(&net->dev)) {
> > +                     *busyloop_intr = true;
> > +                     break;
> > +             }
> > +
> > +             if ((sock && sk_has_rx_data(sock->sk) &&
> > +                  !vhost_vq_avail_empty(&net->dev, rvq)) ||
> > +                 !vhost_vq_avail_empty(&net->dev, tvq))
> > +                     break;
>
> Some checks were duplicated in vhost_net_busy_poll_check(). Need
> consider to unify them.
OK
> > +
> > +             cpu_relax();
> > +     }
> > +
> > +     preempt_enable();
> > +
> > +     if (!rx)
> > +             vhost_net_enable_vq(net, vq);
>
> No need to enable rx virtqueue, if we are sure handle_rx() will be
> called soon.
If we disable rx virtqueue in handle_tx and don't send packets from
guest anymore(handle_tx is not called), so we can wake up for sock rx.
so the network is broken.
> > +
> > +     vhost_net_busy_poll_check(net, rvq, tvq, rx);
>
> It looks to me just open code all check here is better and easier to be
> reviewed.
will be changed.
> Thanks
>
> > +
> > +     mutex_unlock(&vq->mutex);
> > +}
> > +
> >   static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
> >                                   struct vhost_net_virtqueue *nvq,
> >                                   unsigned int *out_num, unsigned int *in_num,
> > @@ -753,16 +842,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
> >       return len;
> >   }
> >
> > -static int sk_has_rx_data(struct sock *sk)
> > -{
> > -     struct socket *sock = sk->sk_socket;
> > -
> > -     if (sock->ops->peek_len)
> > -             return sock->ops->peek_len(sock);
> > -
> > -     return skb_queue_empty(&sk->sk_receive_queue);
> > -}
> > -
> >   static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
> >                                     bool *busyloop_intr)
> >   {
> > @@ -770,41 +849,13 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
> >       struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
> >       struct vhost_virtqueue *rvq = &rnvq->vq;
> >       struct vhost_virtqueue *tvq = &tnvq->vq;
> > -     unsigned long uninitialized_var(endtime);
> >       int len = peek_head_len(rnvq, sk);
> >
> > -     if (!len && tvq->busyloop_timeout) {
> > +     if (!len && rvq->busyloop_timeout) {
> >               /* Flush batched heads first */
> >               vhost_net_signal_used(rnvq);
> >               /* Both tx vq and rx socket were polled here */
> > -             mutex_lock_nested(&tvq->mutex, VHOST_NET_VQ_TX);
> > -             vhost_disable_notify(&net->dev, tvq);
> > -
> > -             preempt_disable();
> > -             endtime = busy_clock() + tvq->busyloop_timeout;
> > -
> > -             while (vhost_can_busy_poll(endtime)) {
> > -                     if (vhost_has_work(&net->dev)) {
> > -                             *busyloop_intr = true;
> > -                             break;
> > -                     }
> > -                     if ((sk_has_rx_data(sk) &&
> > -                          !vhost_vq_avail_empty(&net->dev, rvq)) ||
> > -                         !vhost_vq_avail_empty(&net->dev, tvq))
> > -                             break;
> > -                     cpu_relax();
> > -             }
> > -
> > -             preempt_enable();
> > -
> > -             if (!vhost_vq_avail_empty(&net->dev, tvq)) {
> > -                     vhost_poll_queue(&tvq->poll);
> > -             } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
> > -                     vhost_disable_notify(&net->dev, tvq);
> > -                     vhost_poll_queue(&tvq->poll);
> > -             }
> > -
> > -             mutex_unlock(&tvq->mutex);
> > +             vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
> >
> >               len = peek_head_len(rnvq, sk);
> >       }
>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-01  9:52     ` Tonghao Zhang
@ 2018-08-02  8:18       ` Jason Wang
  2018-08-02  8:41         ` Toshiaki Makita
  2018-08-02  8:18       ` Jason Wang
  1 sibling, 1 reply; 37+ messages in thread
From: Jason Wang @ 2018-08-02  8:18 UTC (permalink / raw)
  To: Tonghao Zhang
  Cc: mst, makita.toshiaki, virtualization, Linux Kernel Network Developers



On 2018年08月01日 17:52, Tonghao Zhang wrote:
>>> +
>>> +             cpu_relax();
>>> +     }
>>> +
>>> +     preempt_enable();
>>> +
>>> +     if (!rx)
>>> +             vhost_net_enable_vq(net, vq);
>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>> called soon.
> If we disable rx virtqueue in handle_tx and don't send packets from
> guest anymore(handle_tx is not called), so we can wake up for sock rx.
> so the network is broken.

Not sure I understand here. I mean is we schedule work for handle_rx(), 
there's no need to enable it since handle_rx() will do this for us.

Thanks

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-01  9:52     ` Tonghao Zhang
  2018-08-02  8:18       ` Jason Wang
@ 2018-08-02  8:18       ` Jason Wang
  1 sibling, 0 replies; 37+ messages in thread
From: Jason Wang @ 2018-08-02  8:18 UTC (permalink / raw)
  To: Tonghao Zhang; +Cc: Linux Kernel Network Developers, virtualization, mst



On 2018年08月01日 17:52, Tonghao Zhang wrote:
>>> +
>>> +             cpu_relax();
>>> +     }
>>> +
>>> +     preempt_enable();
>>> +
>>> +     if (!rx)
>>> +             vhost_net_enable_vq(net, vq);
>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>> called soon.
> If we disable rx virtqueue in handle_tx and don't send packets from
> guest anymore(handle_tx is not called), so we can wake up for sock rx.
> so the network is broken.

Not sure I understand here. I mean is we schedule work for handle_rx(), 
there's no need to enable it since handle_rx() will do this for us.

Thanks
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-02  8:18       ` Jason Wang
@ 2018-08-02  8:41         ` Toshiaki Makita
  2018-08-02  9:23           ` Jason Wang
  0 siblings, 1 reply; 37+ messages in thread
From: Toshiaki Makita @ 2018-08-02  8:41 UTC (permalink / raw)
  To: Jason Wang, Tonghao Zhang
  Cc: Linux Kernel Network Developers, virtualization, mst

On 2018/08/02 17:18, Jason Wang wrote:
> On 2018年08月01日 17:52, Tonghao Zhang wrote:
>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
>>> +                                   struct vhost_virtqueue *rvq,
>>> +                                   struct vhost_virtqueue *tvq,
>>> +                                   bool rx)
>>> +{
>>> +     struct socket *sock = rvq->private_data;
>>> +
>>> +     if (rx)
>>> +             vhost_net_busy_poll_try_queue(net, tvq);
>>> +     else if (sock && sk_has_rx_data(sock->sk))
>>> +             vhost_net_busy_poll_try_queue(net, rvq);
>>> +     else {
>>> +             /* On tx here, sock has no rx data, so we
>>> +              * will wait for sock wakeup for rx, and
>>> +              * vhost_enable_notify() is not needed. */
>>
>> A possible case is we do have rx data but guest does not refill the rx
>> queue. In this case we may lose notifications from guest.
> Yes, should consider this case. thanks.

I'm a bit confused. Isn't this covered by the previous
"else if (sock && sk_has_rx_data(...))" block?

>>>> +
>>>> +             cpu_relax();
>>>> +     }
>>>> +
>>>> +     preempt_enable();
>>>> +
>>>> +     if (!rx)
>>>> +             vhost_net_enable_vq(net, vq);
>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>>> called soon.
>> If we disable rx virtqueue in handle_tx and don't send packets from
>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
>> so the network is broken.
> 
> Not sure I understand here. I mean is we schedule work for handle_rx(),
> there's no need to enable it since handle_rx() will do this for us.

Looks like in the last "else" block in vhost_net_busy_poll_check() we
need to enable vq since in that case we have no rx data and handle_rx()
is not scheduled.

-- 
Toshiaki Makita

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-02  8:41         ` Toshiaki Makita
@ 2018-08-02  9:23           ` Jason Wang
  2018-08-02  9:57             ` Toshiaki Makita
                               ` (4 more replies)
  0 siblings, 5 replies; 37+ messages in thread
From: Jason Wang @ 2018-08-02  9:23 UTC (permalink / raw)
  To: Toshiaki Makita, Tonghao Zhang
  Cc: Linux Kernel Network Developers, virtualization, mst



On 2018年08月02日 16:41, Toshiaki Makita wrote:
> On 2018/08/02 17:18, Jason Wang wrote:
>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
>>>> +                                   struct vhost_virtqueue *rvq,
>>>> +                                   struct vhost_virtqueue *tvq,
>>>> +                                   bool rx)
>>>> +{
>>>> +     struct socket *sock = rvq->private_data;
>>>> +
>>>> +     if (rx)
>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
>>>> +     else if (sock && sk_has_rx_data(sock->sk))
>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
>>>> +     else {
>>>> +             /* On tx here, sock has no rx data, so we
>>>> +              * will wait for sock wakeup for rx, and
>>>> +              * vhost_enable_notify() is not needed. */
>>> A possible case is we do have rx data but guest does not refill the rx
>>> queue. In this case we may lose notifications from guest.
>> Yes, should consider this case. thanks.
> I'm a bit confused. Isn't this covered by the previous
> "else if (sock && sk_has_rx_data(...))" block?

The problem is it does nothing if vhost_vq_avail_empty() is true and 
vhost_enble_notify() is false.

>
>>>>> +
>>>>> +             cpu_relax();
>>>>> +     }
>>>>> +
>>>>> +     preempt_enable();
>>>>> +
>>>>> +     if (!rx)
>>>>> +             vhost_net_enable_vq(net, vq);
>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>>>> called soon.
>>> If we disable rx virtqueue in handle_tx and don't send packets from
>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
>>> so the network is broken.
>> Not sure I understand here. I mean is we schedule work for handle_rx(),
>> there's no need to enable it since handle_rx() will do this for us.
> Looks like in the last "else" block in vhost_net_busy_poll_check() we
> need to enable vq since in that case we have no rx data and handle_rx()
> is not scheduled.
>

Yes.

Thanks
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-02  9:23           ` Jason Wang
@ 2018-08-02  9:57             ` Toshiaki Makita
  2018-08-03  2:38               ` Jason Wang
  2018-08-03  2:38               ` Jason Wang
  2018-08-03  2:51             ` Tonghao Zhang
                               ` (3 subsequent siblings)
  4 siblings, 2 replies; 37+ messages in thread
From: Toshiaki Makita @ 2018-08-02  9:57 UTC (permalink / raw)
  To: Jason Wang, Tonghao Zhang
  Cc: Linux Kernel Network Developers, virtualization, mst

On 2018/08/02 18:23, Jason Wang wrote:
> On 2018年08月02日 16:41, Toshiaki Makita wrote:
>> On 2018/08/02 17:18, Jason Wang wrote:
>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
>>>>> +                                   struct vhost_virtqueue *rvq,
>>>>> +                                   struct vhost_virtqueue *tvq,
>>>>> +                                   bool rx)
>>>>> +{
>>>>> +     struct socket *sock = rvq->private_data;
>>>>> +
>>>>> +     if (rx)
>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
>>>>> +     else {
>>>>> +             /* On tx here, sock has no rx data, so we
>>>>> +              * will wait for sock wakeup for rx, and
>>>>> +              * vhost_enable_notify() is not needed. */
>>>> A possible case is we do have rx data but guest does not refill the rx
>>>> queue. In this case we may lose notifications from guest.
>>> Yes, should consider this case. thanks.
>> I'm a bit confused. Isn't this covered by the previous
>> "else if (sock && sk_has_rx_data(...))" block?
> 
> The problem is it does nothing if vhost_vq_avail_empty() is true and
> vhost_enble_notify() is false.

If vhost_enable_notify() is false, guest will eventually kicks vq, no?

-- 
Toshiaki Makita

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-02  9:57             ` Toshiaki Makita
  2018-08-03  2:38               ` Jason Wang
@ 2018-08-03  2:38               ` Jason Wang
  1 sibling, 0 replies; 37+ messages in thread
From: Jason Wang @ 2018-08-03  2:38 UTC (permalink / raw)
  To: Toshiaki Makita, Tonghao Zhang
  Cc: mst, virtualization, Linux Kernel Network Developers



On 2018年08月02日 17:57, Toshiaki Makita wrote:
> On 2018/08/02 18:23, Jason Wang wrote:
>> On 2018年08月02日 16:41, Toshiaki Makita wrote:
>>> On 2018/08/02 17:18, Jason Wang wrote:
>>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
>>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
>>>>>> +                                   struct vhost_virtqueue *rvq,
>>>>>> +                                   struct vhost_virtqueue *tvq,
>>>>>> +                                   bool rx)
>>>>>> +{
>>>>>> +     struct socket *sock = rvq->private_data;
>>>>>> +
>>>>>> +     if (rx)
>>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
>>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
>>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
>>>>>> +     else {
>>>>>> +             /* On tx here, sock has no rx data, so we
>>>>>> +              * will wait for sock wakeup for rx, and
>>>>>> +              * vhost_enable_notify() is not needed. */
>>>>> A possible case is we do have rx data but guest does not refill the rx
>>>>> queue. In this case we may lose notifications from guest.
>>>> Yes, should consider this case. thanks.
>>> I'm a bit confused. Isn't this covered by the previous
>>> "else if (sock && sk_has_rx_data(...))" block?
>> The problem is it does nothing if vhost_vq_avail_empty() is true and
>> vhost_enble_notify() is false.
> If vhost_enable_notify() is false, guest will eventually kicks vq, no?
>

Yes, you are right.

Thanks

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-02  9:57             ` Toshiaki Makita
@ 2018-08-03  2:38               ` Jason Wang
  2018-08-03  2:38               ` Jason Wang
  1 sibling, 0 replies; 37+ messages in thread
From: Jason Wang @ 2018-08-03  2:38 UTC (permalink / raw)
  To: Toshiaki Makita, Tonghao Zhang
  Cc: Linux Kernel Network Developers, virtualization, mst



On 2018年08月02日 17:57, Toshiaki Makita wrote:
> On 2018/08/02 18:23, Jason Wang wrote:
>> On 2018年08月02日 16:41, Toshiaki Makita wrote:
>>> On 2018/08/02 17:18, Jason Wang wrote:
>>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
>>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
>>>>>> +                                   struct vhost_virtqueue *rvq,
>>>>>> +                                   struct vhost_virtqueue *tvq,
>>>>>> +                                   bool rx)
>>>>>> +{
>>>>>> +     struct socket *sock = rvq->private_data;
>>>>>> +
>>>>>> +     if (rx)
>>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
>>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
>>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
>>>>>> +     else {
>>>>>> +             /* On tx here, sock has no rx data, so we
>>>>>> +              * will wait for sock wakeup for rx, and
>>>>>> +              * vhost_enable_notify() is not needed. */
>>>>> A possible case is we do have rx data but guest does not refill the rx
>>>>> queue. In this case we may lose notifications from guest.
>>>> Yes, should consider this case. thanks.
>>> I'm a bit confused. Isn't this covered by the previous
>>> "else if (sock && sk_has_rx_data(...))" block?
>> The problem is it does nothing if vhost_vq_avail_empty() is true and
>> vhost_enble_notify() is false.
> If vhost_enable_notify() is false, guest will eventually kicks vq, no?
>

Yes, you are right.

Thanks

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-02  9:23           ` Jason Wang
  2018-08-02  9:57             ` Toshiaki Makita
  2018-08-03  2:51             ` Tonghao Zhang
@ 2018-08-03  2:51             ` Tonghao Zhang
  2018-08-03  3:07               ` Jason Wang
  2018-08-03  3:07               ` Jason Wang
  2018-08-03  3:07             ` Jason Wang
  2018-08-03  3:07             ` Jason Wang
  4 siblings, 2 replies; 37+ messages in thread
From: Tonghao Zhang @ 2018-08-03  2:51 UTC (permalink / raw)
  To: jasowang
  Cc: makita.toshiaki, mst, virtualization, Linux Kernel Network Developers

On Thu, Aug 2, 2018 at 5:23 PM Jason Wang <jasowang@redhat.com> wrote:
>
>
>
> On 2018年08月02日 16:41, Toshiaki Makita wrote:
> > On 2018/08/02 17:18, Jason Wang wrote:
> >> On 2018年08月01日 17:52, Tonghao Zhang wrote:
> >>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
> >>>> +                                   struct vhost_virtqueue *rvq,
> >>>> +                                   struct vhost_virtqueue *tvq,
> >>>> +                                   bool rx)
> >>>> +{
> >>>> +     struct socket *sock = rvq->private_data;
> >>>> +
> >>>> +     if (rx)
> >>>> +             vhost_net_busy_poll_try_queue(net, tvq);
> >>>> +     else if (sock && sk_has_rx_data(sock->sk))
> >>>> +             vhost_net_busy_poll_try_queue(net, rvq);
> >>>> +     else {
> >>>> +             /* On tx here, sock has no rx data, so we
> >>>> +              * will wait for sock wakeup for rx, and
> >>>> +              * vhost_enable_notify() is not needed. */
> >>> A possible case is we do have rx data but guest does not refill the rx
> >>> queue. In this case we may lose notifications from guest.
> >> Yes, should consider this case. thanks.
> > I'm a bit confused. Isn't this covered by the previous
> > "else if (sock && sk_has_rx_data(...))" block?
>
> The problem is it does nothing if vhost_vq_avail_empty() is true and
> vhost_enble_notify() is false.
>
> >
> >>>>> +
> >>>>> +             cpu_relax();
> >>>>> +     }
> >>>>> +
> >>>>> +     preempt_enable();
> >>>>> +
> >>>>> +     if (!rx)
> >>>>> +             vhost_net_enable_vq(net, vq);
> >>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
> >>>> called soon.
> >>> If we disable rx virtqueue in handle_tx and don't send packets from
> >>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
> >>> so the network is broken.
> >> Not sure I understand here. I mean is we schedule work for handle_rx(),
> >> there's no need to enable it since handle_rx() will do this for us.
> > Looks like in the last "else" block in vhost_net_busy_poll_check() we
> > need to enable vq since in that case we have no rx data and handle_rx()
> > is not scheduled.
> >
>
> Yes.
So we will use the vhost_has_work() to check whether or not the
handle_rx is scheduled ?
If we use the vhost_has_work(), the work in the dev work_list may be
rx work, or tx work, right ?

> Thanks

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-02  9:23           ` Jason Wang
  2018-08-02  9:57             ` Toshiaki Makita
@ 2018-08-03  2:51             ` Tonghao Zhang
  2018-08-03  2:51             ` Tonghao Zhang
                               ` (2 subsequent siblings)
  4 siblings, 0 replies; 37+ messages in thread
From: Tonghao Zhang @ 2018-08-03  2:51 UTC (permalink / raw)
  To: jasowang; +Cc: Linux Kernel Network Developers, mst, virtualization

On Thu, Aug 2, 2018 at 5:23 PM Jason Wang <jasowang@redhat.com> wrote:
>
>
>
> On 2018年08月02日 16:41, Toshiaki Makita wrote:
> > On 2018/08/02 17:18, Jason Wang wrote:
> >> On 2018年08月01日 17:52, Tonghao Zhang wrote:
> >>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
> >>>> +                                   struct vhost_virtqueue *rvq,
> >>>> +                                   struct vhost_virtqueue *tvq,
> >>>> +                                   bool rx)
> >>>> +{
> >>>> +     struct socket *sock = rvq->private_data;
> >>>> +
> >>>> +     if (rx)
> >>>> +             vhost_net_busy_poll_try_queue(net, tvq);
> >>>> +     else if (sock && sk_has_rx_data(sock->sk))
> >>>> +             vhost_net_busy_poll_try_queue(net, rvq);
> >>>> +     else {
> >>>> +             /* On tx here, sock has no rx data, so we
> >>>> +              * will wait for sock wakeup for rx, and
> >>>> +              * vhost_enable_notify() is not needed. */
> >>> A possible case is we do have rx data but guest does not refill the rx
> >>> queue. In this case we may lose notifications from guest.
> >> Yes, should consider this case. thanks.
> > I'm a bit confused. Isn't this covered by the previous
> > "else if (sock && sk_has_rx_data(...))" block?
>
> The problem is it does nothing if vhost_vq_avail_empty() is true and
> vhost_enble_notify() is false.
>
> >
> >>>>> +
> >>>>> +             cpu_relax();
> >>>>> +     }
> >>>>> +
> >>>>> +     preempt_enable();
> >>>>> +
> >>>>> +     if (!rx)
> >>>>> +             vhost_net_enable_vq(net, vq);
> >>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
> >>>> called soon.
> >>> If we disable rx virtqueue in handle_tx and don't send packets from
> >>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
> >>> so the network is broken.
> >> Not sure I understand here. I mean is we schedule work for handle_rx(),
> >> there's no need to enable it since handle_rx() will do this for us.
> > Looks like in the last "else" block in vhost_net_busy_poll_check() we
> > need to enable vq since in that case we have no rx data and handle_rx()
> > is not scheduled.
> >
>
> Yes.
So we will use the vhost_has_work() to check whether or not the
handle_rx is scheduled ?
If we use the vhost_has_work(), the work in the dev work_list may be
rx work, or tx work, right ?

> Thanks
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-02  9:23           ` Jason Wang
                               ` (2 preceding siblings ...)
  2018-08-03  2:51             ` Tonghao Zhang
@ 2018-08-03  3:07             ` Jason Wang
  2018-08-03  3:32               ` Toshiaki Makita
  2018-08-03  3:07             ` Jason Wang
  4 siblings, 1 reply; 37+ messages in thread
From: Jason Wang @ 2018-08-03  3:07 UTC (permalink / raw)
  To: Toshiaki Makita, Tonghao Zhang
  Cc: mst, virtualization, Linux Kernel Network Developers



On 2018年08月02日 17:23, Jason Wang wrote:
>>>>>>
>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>>>>> called soon.
>>>> If we disable rx virtqueue in handle_tx and don't send packets from
>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
>>>> so the network is broken.
>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
>>> there's no need to enable it since handle_rx() will do this for us.
>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
>> need to enable vq since in that case we have no rx data and handle_rx()
>> is not scheduled.
>>
>
> Yes.
>
> Thanks 

Rethink about this, looks not. We enable rx wakeups in this case, so if 
there's pending data, handle_rx() will be schedule after 
vhost_net_enable_vq().

Thanks

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-02  9:23           ` Jason Wang
                               ` (3 preceding siblings ...)
  2018-08-03  3:07             ` Jason Wang
@ 2018-08-03  3:07             ` Jason Wang
  4 siblings, 0 replies; 37+ messages in thread
From: Jason Wang @ 2018-08-03  3:07 UTC (permalink / raw)
  To: Toshiaki Makita, Tonghao Zhang
  Cc: Linux Kernel Network Developers, virtualization, mst



On 2018年08月02日 17:23, Jason Wang wrote:
>>>>>>
>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>>>>> called soon.
>>>> If we disable rx virtqueue in handle_tx and don't send packets from
>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
>>>> so the network is broken.
>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
>>> there's no need to enable it since handle_rx() will do this for us.
>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
>> need to enable vq since in that case we have no rx data and handle_rx()
>> is not scheduled.
>>
>
> Yes.
>
> Thanks 

Rethink about this, looks not. We enable rx wakeups in this case, so if 
there's pending data, handle_rx() will be schedule after 
vhost_net_enable_vq().

Thanks
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  2:51             ` Tonghao Zhang
  2018-08-03  3:07               ` Jason Wang
@ 2018-08-03  3:07               ` Jason Wang
  2018-08-03  3:24                 ` Tonghao Zhang
  2018-08-03  3:24                 ` Tonghao Zhang
  1 sibling, 2 replies; 37+ messages in thread
From: Jason Wang @ 2018-08-03  3:07 UTC (permalink / raw)
  To: Tonghao Zhang
  Cc: makita.toshiaki, mst, virtualization, Linux Kernel Network Developers



On 2018年08月03日 10:51, Tonghao Zhang wrote:
> On Thu, Aug 2, 2018 at 5:23 PM Jason Wang <jasowang@redhat.com> wrote:
>>
>>
>> On 2018年08月02日 16:41, Toshiaki Makita wrote:
>>> On 2018/08/02 17:18, Jason Wang wrote:
>>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
>>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
>>>>>> +                                   struct vhost_virtqueue *rvq,
>>>>>> +                                   struct vhost_virtqueue *tvq,
>>>>>> +                                   bool rx)
>>>>>> +{
>>>>>> +     struct socket *sock = rvq->private_data;
>>>>>> +
>>>>>> +     if (rx)
>>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
>>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
>>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
>>>>>> +     else {
>>>>>> +             /* On tx here, sock has no rx data, so we
>>>>>> +              * will wait for sock wakeup for rx, and
>>>>>> +              * vhost_enable_notify() is not needed. */
>>>>> A possible case is we do have rx data but guest does not refill the rx
>>>>> queue. In this case we may lose notifications from guest.
>>>> Yes, should consider this case. thanks.
>>> I'm a bit confused. Isn't this covered by the previous
>>> "else if (sock && sk_has_rx_data(...))" block?
>> The problem is it does nothing if vhost_vq_avail_empty() is true and
>> vhost_enble_notify() is false.
>>
>>>>>>> +
>>>>>>> +             cpu_relax();
>>>>>>> +     }
>>>>>>> +
>>>>>>> +     preempt_enable();
>>>>>>> +
>>>>>>> +     if (!rx)
>>>>>>> +             vhost_net_enable_vq(net, vq);
>>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>>>>>> called soon.
>>>>> If we disable rx virtqueue in handle_tx and don't send packets from
>>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
>>>>> so the network is broken.
>>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
>>>> there's no need to enable it since handle_rx() will do this for us.
>>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
>>> need to enable vq since in that case we have no rx data and handle_rx()
>>> is not scheduled.
>>>
>> Yes.
> So we will use the vhost_has_work() to check whether or not the
> handle_rx is scheduled ?
> If we use the vhost_has_work(), the work in the dev work_list may be
> rx work, or tx work, right ?

Yes. We can add a boolean to record whether or not we've called 
vhost_poll_queue() for rvq. And avoid calling vhost_net_enable_vq() if 
it was true.

So here's the needed changes:

1) Split the wakeup disabling to another patch
2) Squash the vhost_net_busy_poll_try_queue() and 
vhost_net_busy_poll_check() into vhost_net_busy_poll() and reduce 
duplicated checks.
3) If possible, rename the boolean rx in vhost_net_busy_poll() to 
poll_rx, this makes code more readable.

Thanks

>> Thanks

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  2:51             ` Tonghao Zhang
@ 2018-08-03  3:07               ` Jason Wang
  2018-08-03  3:07               ` Jason Wang
  1 sibling, 0 replies; 37+ messages in thread
From: Jason Wang @ 2018-08-03  3:07 UTC (permalink / raw)
  To: Tonghao Zhang; +Cc: Linux Kernel Network Developers, mst, virtualization



On 2018年08月03日 10:51, Tonghao Zhang wrote:
> On Thu, Aug 2, 2018 at 5:23 PM Jason Wang <jasowang@redhat.com> wrote:
>>
>>
>> On 2018年08月02日 16:41, Toshiaki Makita wrote:
>>> On 2018/08/02 17:18, Jason Wang wrote:
>>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
>>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
>>>>>> +                                   struct vhost_virtqueue *rvq,
>>>>>> +                                   struct vhost_virtqueue *tvq,
>>>>>> +                                   bool rx)
>>>>>> +{
>>>>>> +     struct socket *sock = rvq->private_data;
>>>>>> +
>>>>>> +     if (rx)
>>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
>>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
>>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
>>>>>> +     else {
>>>>>> +             /* On tx here, sock has no rx data, so we
>>>>>> +              * will wait for sock wakeup for rx, and
>>>>>> +              * vhost_enable_notify() is not needed. */
>>>>> A possible case is we do have rx data but guest does not refill the rx
>>>>> queue. In this case we may lose notifications from guest.
>>>> Yes, should consider this case. thanks.
>>> I'm a bit confused. Isn't this covered by the previous
>>> "else if (sock && sk_has_rx_data(...))" block?
>> The problem is it does nothing if vhost_vq_avail_empty() is true and
>> vhost_enble_notify() is false.
>>
>>>>>>> +
>>>>>>> +             cpu_relax();
>>>>>>> +     }
>>>>>>> +
>>>>>>> +     preempt_enable();
>>>>>>> +
>>>>>>> +     if (!rx)
>>>>>>> +             vhost_net_enable_vq(net, vq);
>>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>>>>>> called soon.
>>>>> If we disable rx virtqueue in handle_tx and don't send packets from
>>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
>>>>> so the network is broken.
>>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
>>>> there's no need to enable it since handle_rx() will do this for us.
>>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
>>> need to enable vq since in that case we have no rx data and handle_rx()
>>> is not scheduled.
>>>
>> Yes.
> So we will use the vhost_has_work() to check whether or not the
> handle_rx is scheduled ?
> If we use the vhost_has_work(), the work in the dev work_list may be
> rx work, or tx work, right ?

Yes. We can add a boolean to record whether or not we've called 
vhost_poll_queue() for rvq. And avoid calling vhost_net_enable_vq() if 
it was true.

So here's the needed changes:

1) Split the wakeup disabling to another patch
2) Squash the vhost_net_busy_poll_try_queue() and 
vhost_net_busy_poll_check() into vhost_net_busy_poll() and reduce 
duplicated checks.
3) If possible, rename the boolean rx in vhost_net_busy_poll() to 
poll_rx, this makes code more readable.

Thanks

>> Thanks

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  3:07               ` Jason Wang
@ 2018-08-03  3:24                 ` Tonghao Zhang
  2018-08-03  3:40                   ` Toshiaki Makita
                                     ` (2 more replies)
  2018-08-03  3:24                 ` Tonghao Zhang
  1 sibling, 3 replies; 37+ messages in thread
From: Tonghao Zhang @ 2018-08-03  3:24 UTC (permalink / raw)
  To: jasowang
  Cc: makita.toshiaki, mst, virtualization, Linux Kernel Network Developers

On Fri, Aug 3, 2018 at 11:07 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
>
> On 2018年08月03日 10:51, Tonghao Zhang wrote:
> > On Thu, Aug 2, 2018 at 5:23 PM Jason Wang <jasowang@redhat.com> wrote:
> >>
> >>
> >> On 2018年08月02日 16:41, Toshiaki Makita wrote:
> >>> On 2018/08/02 17:18, Jason Wang wrote:
> >>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
> >>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
> >>>>>> +                                   struct vhost_virtqueue *rvq,
> >>>>>> +                                   struct vhost_virtqueue *tvq,
> >>>>>> +                                   bool rx)
> >>>>>> +{
> >>>>>> +     struct socket *sock = rvq->private_data;
> >>>>>> +
> >>>>>> +     if (rx)
> >>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
> >>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
> >>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
> >>>>>> +     else {
> >>>>>> +             /* On tx here, sock has no rx data, so we
> >>>>>> +              * will wait for sock wakeup for rx, and
> >>>>>> +              * vhost_enable_notify() is not needed. */
> >>>>> A possible case is we do have rx data but guest does not refill the rx
> >>>>> queue. In this case we may lose notifications from guest.
> >>>> Yes, should consider this case. thanks.
> >>> I'm a bit confused. Isn't this covered by the previous
> >>> "else if (sock && sk_has_rx_data(...))" block?
> >> The problem is it does nothing if vhost_vq_avail_empty() is true and
> >> vhost_enble_notify() is false.
> >>
> >>>>>>> +
> >>>>>>> +             cpu_relax();
> >>>>>>> +     }
> >>>>>>> +
> >>>>>>> +     preempt_enable();
> >>>>>>> +
> >>>>>>> +     if (!rx)
> >>>>>>> +             vhost_net_enable_vq(net, vq);
> >>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
> >>>>>> called soon.
> >>>>> If we disable rx virtqueue in handle_tx and don't send packets from
> >>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
> >>>>> so the network is broken.
> >>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
> >>>> there's no need to enable it since handle_rx() will do this for us.
> >>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
> >>> need to enable vq since in that case we have no rx data and handle_rx()
> >>> is not scheduled.
> >>>
> >> Yes.
> > So we will use the vhost_has_work() to check whether or not the
> > handle_rx is scheduled ?
> > If we use the vhost_has_work(), the work in the dev work_list may be
> > rx work, or tx work, right ?
>
> Yes. We can add a boolean to record whether or not we've called
> vhost_poll_queue() for rvq. And avoid calling vhost_net_enable_vq() if
> it was true.
so, the commit be294a51a "vhost_net: Avoid rx queue wake-ups during busypoll"
may not consider the case: work is tx work in the dev work list.

> So here's the needed changes:
>
> 1) Split the wakeup disabling to another patch
> 2) Squash the vhost_net_busy_poll_try_queue() and
> vhost_net_busy_poll_check() into vhost_net_busy_poll() and reduce
> duplicated checks.
> 3) If possible, rename the boolean rx in vhost_net_busy_poll() to
> poll_rx, this makes code more readable.
OK
> Thanks
>
> >> Thanks
>

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  3:07               ` Jason Wang
  2018-08-03  3:24                 ` Tonghao Zhang
@ 2018-08-03  3:24                 ` Tonghao Zhang
  1 sibling, 0 replies; 37+ messages in thread
From: Tonghao Zhang @ 2018-08-03  3:24 UTC (permalink / raw)
  To: jasowang; +Cc: Linux Kernel Network Developers, mst, virtualization

On Fri, Aug 3, 2018 at 11:07 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
>
> On 2018年08月03日 10:51, Tonghao Zhang wrote:
> > On Thu, Aug 2, 2018 at 5:23 PM Jason Wang <jasowang@redhat.com> wrote:
> >>
> >>
> >> On 2018年08月02日 16:41, Toshiaki Makita wrote:
> >>> On 2018/08/02 17:18, Jason Wang wrote:
> >>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
> >>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
> >>>>>> +                                   struct vhost_virtqueue *rvq,
> >>>>>> +                                   struct vhost_virtqueue *tvq,
> >>>>>> +                                   bool rx)
> >>>>>> +{
> >>>>>> +     struct socket *sock = rvq->private_data;
> >>>>>> +
> >>>>>> +     if (rx)
> >>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
> >>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
> >>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
> >>>>>> +     else {
> >>>>>> +             /* On tx here, sock has no rx data, so we
> >>>>>> +              * will wait for sock wakeup for rx, and
> >>>>>> +              * vhost_enable_notify() is not needed. */
> >>>>> A possible case is we do have rx data but guest does not refill the rx
> >>>>> queue. In this case we may lose notifications from guest.
> >>>> Yes, should consider this case. thanks.
> >>> I'm a bit confused. Isn't this covered by the previous
> >>> "else if (sock && sk_has_rx_data(...))" block?
> >> The problem is it does nothing if vhost_vq_avail_empty() is true and
> >> vhost_enble_notify() is false.
> >>
> >>>>>>> +
> >>>>>>> +             cpu_relax();
> >>>>>>> +     }
> >>>>>>> +
> >>>>>>> +     preempt_enable();
> >>>>>>> +
> >>>>>>> +     if (!rx)
> >>>>>>> +             vhost_net_enable_vq(net, vq);
> >>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
> >>>>>> called soon.
> >>>>> If we disable rx virtqueue in handle_tx and don't send packets from
> >>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
> >>>>> so the network is broken.
> >>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
> >>>> there's no need to enable it since handle_rx() will do this for us.
> >>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
> >>> need to enable vq since in that case we have no rx data and handle_rx()
> >>> is not scheduled.
> >>>
> >> Yes.
> > So we will use the vhost_has_work() to check whether or not the
> > handle_rx is scheduled ?
> > If we use the vhost_has_work(), the work in the dev work_list may be
> > rx work, or tx work, right ?
>
> Yes. We can add a boolean to record whether or not we've called
> vhost_poll_queue() for rvq. And avoid calling vhost_net_enable_vq() if
> it was true.
so, the commit be294a51a "vhost_net: Avoid rx queue wake-ups during busypoll"
may not consider the case: work is tx work in the dev work list.

> So here's the needed changes:
>
> 1) Split the wakeup disabling to another patch
> 2) Squash the vhost_net_busy_poll_try_queue() and
> vhost_net_busy_poll_check() into vhost_net_busy_poll() and reduce
> duplicated checks.
> 3) If possible, rename the boolean rx in vhost_net_busy_poll() to
> poll_rx, this makes code more readable.
OK
> Thanks
>
> >> Thanks
>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  3:07             ` Jason Wang
@ 2018-08-03  3:32               ` Toshiaki Makita
  2018-08-03  3:44                 ` Jason Wang
  2018-08-03  3:44                 ` Jason Wang
  0 siblings, 2 replies; 37+ messages in thread
From: Toshiaki Makita @ 2018-08-03  3:32 UTC (permalink / raw)
  To: Jason Wang, Tonghao Zhang
  Cc: Linux Kernel Network Developers, virtualization, mst

On 2018/08/03 12:07, Jason Wang wrote:
> On 2018年08月02日 17:23, Jason Wang wrote:
>>>>>>>
>>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>>>>>> called soon.
>>>>> If we disable rx virtqueue in handle_tx and don't send packets from
>>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
>>>>> so the network is broken.
>>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
>>>> there's no need to enable it since handle_rx() will do this for us.
>>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
>>> need to enable vq since in that case we have no rx data and handle_rx()
>>> is not scheduled.
>>>
> Rethink about this, looks not. We enable rx wakeups in this case, so if
> there's pending data, handle_rx() will be schedule after
> vhost_net_enable_vq().

You are right, but what I wanted to say is vhost_net_enable_vq() should
be needed (I was talking about what would happen if
vhost_net_enable_vq() were removed). Also, I think we should move
vhost_net_enable_vq() from vhost_net_busy_poll() to this last "else"
block because this is the case where rx wakeups is required.
Anyway this part will be refactored so let's see what this code will
look like in next version.

-- 
Toshiaki Makita

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  3:24                 ` Tonghao Zhang
@ 2018-08-03  3:40                   ` Toshiaki Makita
  2018-08-03  4:14                     ` Tonghao Zhang
  2018-08-03  4:14                     ` Tonghao Zhang
  2018-08-03  3:43                   ` Jason Wang
  2018-08-03  3:43                   ` Jason Wang
  2 siblings, 2 replies; 37+ messages in thread
From: Toshiaki Makita @ 2018-08-03  3:40 UTC (permalink / raw)
  To: Tonghao Zhang; +Cc: Linux Kernel Network Developers, virtualization, mst

On 2018/08/03 12:24, Tonghao Zhang wrote:
> On Fri, Aug 3, 2018 at 11:07 AM Jason Wang <jasowang@redhat.com> wrote:
>> On 2018年08月03日 10:51, Tonghao Zhang wrote:
>>> On Thu, Aug 2, 2018 at 5:23 PM Jason Wang <jasowang@redhat.com> wrote:
>>>> On 2018年08月02日 16:41, Toshiaki Makita wrote:
>>>>> On 2018/08/02 17:18, Jason Wang wrote:
>>>>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
>>>>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
>>>>>>>> +                                   struct vhost_virtqueue *rvq,
>>>>>>>> +                                   struct vhost_virtqueue *tvq,
>>>>>>>> +                                   bool rx)
>>>>>>>> +{
>>>>>>>> +     struct socket *sock = rvq->private_data;
>>>>>>>> +
>>>>>>>> +     if (rx)
>>>>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
>>>>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
>>>>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
>>>>>>>> +     else {
>>>>>>>> +             /* On tx here, sock has no rx data, so we
>>>>>>>> +              * will wait for sock wakeup for rx, and
>>>>>>>> +              * vhost_enable_notify() is not needed. */
>>>>>>> A possible case is we do have rx data but guest does not refill the rx
>>>>>>> queue. In this case we may lose notifications from guest.
>>>>>> Yes, should consider this case. thanks.
>>>>> I'm a bit confused. Isn't this covered by the previous
>>>>> "else if (sock && sk_has_rx_data(...))" block?
>>>> The problem is it does nothing if vhost_vq_avail_empty() is true and
>>>> vhost_enble_notify() is false.
>>>>
>>>>>>>>> +
>>>>>>>>> +             cpu_relax();
>>>>>>>>> +     }
>>>>>>>>> +
>>>>>>>>> +     preempt_enable();
>>>>>>>>> +
>>>>>>>>> +     if (!rx)
>>>>>>>>> +             vhost_net_enable_vq(net, vq);
>>>>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>>>>>>>> called soon.
>>>>>>> If we disable rx virtqueue in handle_tx and don't send packets from
>>>>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
>>>>>>> so the network is broken.
>>>>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
>>>>>> there's no need to enable it since handle_rx() will do this for us.
>>>>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
>>>>> need to enable vq since in that case we have no rx data and handle_rx()
>>>>> is not scheduled.
>>>>>
>>>> Yes.
>>> So we will use the vhost_has_work() to check whether or not the
>>> handle_rx is scheduled ?
>>> If we use the vhost_has_work(), the work in the dev work_list may be
>>> rx work, or tx work, right ?
>>
>> Yes. We can add a boolean to record whether or not we've called
>> vhost_poll_queue() for rvq. And avoid calling vhost_net_enable_vq() if
>> it was true.
> so, the commit be294a51a "vhost_net: Avoid rx queue wake-ups during busypoll"
> may not consider the case: work is tx work in the dev work list.

Not sure what you are concerned but what I can say is that we need to
poll rx work if vhost_has_work() detects tx work in
vhost_net_rx_peek_head_len() since rx busypoll exits prematurely in that
case.

>> So here's the needed changes:
>>
>> 1) Split the wakeup disabling to another patch
>> 2) Squash the vhost_net_busy_poll_try_queue() and
>> vhost_net_busy_poll_check() into vhost_net_busy_poll() and reduce
>> duplicated checks.
>> 3) If possible, rename the boolean rx in vhost_net_busy_poll() to
>> poll_rx, this makes code more readable.
> OK
>> Thanks
>>
>>>> Thanks
>>
> 
> 

-- 
Toshiaki Makita

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  3:24                 ` Tonghao Zhang
  2018-08-03  3:40                   ` Toshiaki Makita
  2018-08-03  3:43                   ` Jason Wang
@ 2018-08-03  3:43                   ` Jason Wang
  2018-08-03  4:04                     ` Tonghao Zhang
  2 siblings, 1 reply; 37+ messages in thread
From: Jason Wang @ 2018-08-03  3:43 UTC (permalink / raw)
  To: Tonghao Zhang
  Cc: makita.toshiaki, mst, virtualization, Linux Kernel Network Developers



On 2018年08月03日 11:24, Tonghao Zhang wrote:
> On Fri, Aug 3, 2018 at 11:07 AM Jason Wang <jasowang@redhat.com> wrote:
>>
>>
>> On 2018年08月03日 10:51, Tonghao Zhang wrote:
>>> On Thu, Aug 2, 2018 at 5:23 PM Jason Wang <jasowang@redhat.com> wrote:
>>>>
>>>> On 2018年08月02日 16:41, Toshiaki Makita wrote:
>>>>> On 2018/08/02 17:18, Jason Wang wrote:
>>>>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
>>>>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
>>>>>>>> +                                   struct vhost_virtqueue *rvq,
>>>>>>>> +                                   struct vhost_virtqueue *tvq,
>>>>>>>> +                                   bool rx)
>>>>>>>> +{
>>>>>>>> +     struct socket *sock = rvq->private_data;
>>>>>>>> +
>>>>>>>> +     if (rx)
>>>>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
>>>>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
>>>>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
>>>>>>>> +     else {
>>>>>>>> +             /* On tx here, sock has no rx data, so we
>>>>>>>> +              * will wait for sock wakeup for rx, and
>>>>>>>> +              * vhost_enable_notify() is not needed. */
>>>>>>> A possible case is we do have rx data but guest does not refill the rx
>>>>>>> queue. In this case we may lose notifications from guest.
>>>>>> Yes, should consider this case. thanks.
>>>>> I'm a bit confused. Isn't this covered by the previous
>>>>> "else if (sock && sk_has_rx_data(...))" block?
>>>> The problem is it does nothing if vhost_vq_avail_empty() is true and
>>>> vhost_enble_notify() is false.
>>>>
>>>>>>>>> +
>>>>>>>>> +             cpu_relax();
>>>>>>>>> +     }
>>>>>>>>> +
>>>>>>>>> +     preempt_enable();
>>>>>>>>> +
>>>>>>>>> +     if (!rx)
>>>>>>>>> +             vhost_net_enable_vq(net, vq);
>>>>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>>>>>>>> called soon.
>>>>>>> If we disable rx virtqueue in handle_tx and don't send packets from
>>>>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
>>>>>>> so the network is broken.
>>>>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
>>>>>> there's no need to enable it since handle_rx() will do this for us.
>>>>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
>>>>> need to enable vq since in that case we have no rx data and handle_rx()
>>>>> is not scheduled.
>>>>>
>>>> Yes.
>>> So we will use the vhost_has_work() to check whether or not the
>>> handle_rx is scheduled ?
>>> If we use the vhost_has_work(), the work in the dev work_list may be
>>> rx work, or tx work, right ?
>> Yes. We can add a boolean to record whether or not we've called
>> vhost_poll_queue() for rvq. And avoid calling vhost_net_enable_vq() if
>> it was true.
> so, the commit be294a51a "vhost_net: Avoid rx queue wake-ups during busypoll"
> may not consider the case: work is tx work in the dev work list.

So two kinds of work, tx kick or tx wakeup.

For tx kick, we check vhost_vq_avail_empty() and avoid unnecessary kicks 
by not enabling kick if we found something is pending on txq. For tx 
wakeup, yes, the commit does not consider it. And that's why we want to 
disable tx wakeups during busy polling.

Thanks

>
>> So here's the needed changes:
>>
>> 1) Split the wakeup disabling to another patch
>> 2) Squash the vhost_net_busy_poll_try_queue() and
>> vhost_net_busy_poll_check() into vhost_net_busy_poll() and reduce
>> duplicated checks.
>> 3) If possible, rename the boolean rx in vhost_net_busy_poll() to
>> poll_rx, this makes code more readable.
> OK
>> Thanks
>>
>>>> Thanks

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  3:24                 ` Tonghao Zhang
  2018-08-03  3:40                   ` Toshiaki Makita
@ 2018-08-03  3:43                   ` Jason Wang
  2018-08-03  3:43                   ` Jason Wang
  2 siblings, 0 replies; 37+ messages in thread
From: Jason Wang @ 2018-08-03  3:43 UTC (permalink / raw)
  To: Tonghao Zhang; +Cc: Linux Kernel Network Developers, mst, virtualization



On 2018年08月03日 11:24, Tonghao Zhang wrote:
> On Fri, Aug 3, 2018 at 11:07 AM Jason Wang <jasowang@redhat.com> wrote:
>>
>>
>> On 2018年08月03日 10:51, Tonghao Zhang wrote:
>>> On Thu, Aug 2, 2018 at 5:23 PM Jason Wang <jasowang@redhat.com> wrote:
>>>>
>>>> On 2018年08月02日 16:41, Toshiaki Makita wrote:
>>>>> On 2018/08/02 17:18, Jason Wang wrote:
>>>>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
>>>>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
>>>>>>>> +                                   struct vhost_virtqueue *rvq,
>>>>>>>> +                                   struct vhost_virtqueue *tvq,
>>>>>>>> +                                   bool rx)
>>>>>>>> +{
>>>>>>>> +     struct socket *sock = rvq->private_data;
>>>>>>>> +
>>>>>>>> +     if (rx)
>>>>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
>>>>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
>>>>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
>>>>>>>> +     else {
>>>>>>>> +             /* On tx here, sock has no rx data, so we
>>>>>>>> +              * will wait for sock wakeup for rx, and
>>>>>>>> +              * vhost_enable_notify() is not needed. */
>>>>>>> A possible case is we do have rx data but guest does not refill the rx
>>>>>>> queue. In this case we may lose notifications from guest.
>>>>>> Yes, should consider this case. thanks.
>>>>> I'm a bit confused. Isn't this covered by the previous
>>>>> "else if (sock && sk_has_rx_data(...))" block?
>>>> The problem is it does nothing if vhost_vq_avail_empty() is true and
>>>> vhost_enble_notify() is false.
>>>>
>>>>>>>>> +
>>>>>>>>> +             cpu_relax();
>>>>>>>>> +     }
>>>>>>>>> +
>>>>>>>>> +     preempt_enable();
>>>>>>>>> +
>>>>>>>>> +     if (!rx)
>>>>>>>>> +             vhost_net_enable_vq(net, vq);
>>>>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>>>>>>>> called soon.
>>>>>>> If we disable rx virtqueue in handle_tx and don't send packets from
>>>>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
>>>>>>> so the network is broken.
>>>>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
>>>>>> there's no need to enable it since handle_rx() will do this for us.
>>>>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
>>>>> need to enable vq since in that case we have no rx data and handle_rx()
>>>>> is not scheduled.
>>>>>
>>>> Yes.
>>> So we will use the vhost_has_work() to check whether or not the
>>> handle_rx is scheduled ?
>>> If we use the vhost_has_work(), the work in the dev work_list may be
>>> rx work, or tx work, right ?
>> Yes. We can add a boolean to record whether or not we've called
>> vhost_poll_queue() for rvq. And avoid calling vhost_net_enable_vq() if
>> it was true.
> so, the commit be294a51a "vhost_net: Avoid rx queue wake-ups during busypoll"
> may not consider the case: work is tx work in the dev work list.

So two kinds of work, tx kick or tx wakeup.

For tx kick, we check vhost_vq_avail_empty() and avoid unnecessary kicks 
by not enabling kick if we found something is pending on txq. For tx 
wakeup, yes, the commit does not consider it. And that's why we want to 
disable tx wakeups during busy polling.

Thanks

>
>> So here's the needed changes:
>>
>> 1) Split the wakeup disabling to another patch
>> 2) Squash the vhost_net_busy_poll_try_queue() and
>> vhost_net_busy_poll_check() into vhost_net_busy_poll() and reduce
>> duplicated checks.
>> 3) If possible, rename the boolean rx in vhost_net_busy_poll() to
>> poll_rx, this makes code more readable.
> OK
>> Thanks
>>
>>>> Thanks

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  3:32               ` Toshiaki Makita
  2018-08-03  3:44                 ` Jason Wang
@ 2018-08-03  3:44                 ` Jason Wang
  1 sibling, 0 replies; 37+ messages in thread
From: Jason Wang @ 2018-08-03  3:44 UTC (permalink / raw)
  To: Toshiaki Makita, Tonghao Zhang
  Cc: mst, virtualization, Linux Kernel Network Developers



On 2018年08月03日 11:32, Toshiaki Makita wrote:
> On 2018/08/03 12:07, Jason Wang wrote:
>> On 2018年08月02日 17:23, Jason Wang wrote:
>>>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>>>>>>> called soon.
>>>>>> If we disable rx virtqueue in handle_tx and don't send packets from
>>>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
>>>>>> so the network is broken.
>>>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
>>>>> there's no need to enable it since handle_rx() will do this for us.
>>>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
>>>> need to enable vq since in that case we have no rx data and handle_rx()
>>>> is not scheduled.
>>>>
>> Rethink about this, looks not. We enable rx wakeups in this case, so if
>> there's pending data, handle_rx() will be schedule after
>> vhost_net_enable_vq().
> You are right, but what I wanted to say is vhost_net_enable_vq() should
> be needed (I was talking about what would happen if
> vhost_net_enable_vq() were removed). Also, I think we should move
> vhost_net_enable_vq() from vhost_net_busy_poll() to this last "else"
> block because this is the case where rx wakeups is required.
> Anyway this part will be refactored so let's see what this code will
> look like in next version.
>

I get your point.

Thanks

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  3:32               ` Toshiaki Makita
@ 2018-08-03  3:44                 ` Jason Wang
  2018-08-03  3:44                 ` Jason Wang
  1 sibling, 0 replies; 37+ messages in thread
From: Jason Wang @ 2018-08-03  3:44 UTC (permalink / raw)
  To: Toshiaki Makita, Tonghao Zhang
  Cc: Linux Kernel Network Developers, virtualization, mst



On 2018年08月03日 11:32, Toshiaki Makita wrote:
> On 2018/08/03 12:07, Jason Wang wrote:
>> On 2018年08月02日 17:23, Jason Wang wrote:
>>>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>>>>>>> called soon.
>>>>>> If we disable rx virtqueue in handle_tx and don't send packets from
>>>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
>>>>>> so the network is broken.
>>>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
>>>>> there's no need to enable it since handle_rx() will do this for us.
>>>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
>>>> need to enable vq since in that case we have no rx data and handle_rx()
>>>> is not scheduled.
>>>>
>> Rethink about this, looks not. We enable rx wakeups in this case, so if
>> there's pending data, handle_rx() will be schedule after
>> vhost_net_enable_vq().
> You are right, but what I wanted to say is vhost_net_enable_vq() should
> be needed (I was talking about what would happen if
> vhost_net_enable_vq() were removed). Also, I think we should move
> vhost_net_enable_vq() from vhost_net_busy_poll() to this last "else"
> block because this is the case where rx wakeups is required.
> Anyway this part will be refactored so let's see what this code will
> look like in next version.
>

I get your point.

Thanks

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  3:43                   ` Jason Wang
@ 2018-08-03  4:04                     ` Tonghao Zhang
  2018-08-03  5:07                       ` Jason Wang
  0 siblings, 1 reply; 37+ messages in thread
From: Tonghao Zhang @ 2018-08-03  4:04 UTC (permalink / raw)
  To: jasowang; +Cc: Linux Kernel Network Developers, mst, virtualization

On Fri, Aug 3, 2018 at 11:43 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
>
> On 2018年08月03日 11:24, Tonghao Zhang wrote:
> > On Fri, Aug 3, 2018 at 11:07 AM Jason Wang <jasowang@redhat.com> wrote:
> >>
> >>
> >> On 2018年08月03日 10:51, Tonghao Zhang wrote:
> >>> On Thu, Aug 2, 2018 at 5:23 PM Jason Wang <jasowang@redhat.com> wrote:
> >>>>
> >>>> On 2018年08月02日 16:41, Toshiaki Makita wrote:
> >>>>> On 2018/08/02 17:18, Jason Wang wrote:
> >>>>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
> >>>>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
> >>>>>>>> +                                   struct vhost_virtqueue *rvq,
> >>>>>>>> +                                   struct vhost_virtqueue *tvq,
> >>>>>>>> +                                   bool rx)
> >>>>>>>> +{
> >>>>>>>> +     struct socket *sock = rvq->private_data;
> >>>>>>>> +
> >>>>>>>> +     if (rx)
> >>>>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
> >>>>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
> >>>>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
> >>>>>>>> +     else {
> >>>>>>>> +             /* On tx here, sock has no rx data, so we
> >>>>>>>> +              * will wait for sock wakeup for rx, and
> >>>>>>>> +              * vhost_enable_notify() is not needed. */
> >>>>>>> A possible case is we do have rx data but guest does not refill the rx
> >>>>>>> queue. In this case we may lose notifications from guest.
> >>>>>> Yes, should consider this case. thanks.
> >>>>> I'm a bit confused. Isn't this covered by the previous
> >>>>> "else if (sock && sk_has_rx_data(...))" block?
> >>>> The problem is it does nothing if vhost_vq_avail_empty() is true and
> >>>> vhost_enble_notify() is false.
> >>>>
> >>>>>>>>> +
> >>>>>>>>> +             cpu_relax();
> >>>>>>>>> +     }
> >>>>>>>>> +
> >>>>>>>>> +     preempt_enable();
> >>>>>>>>> +
> >>>>>>>>> +     if (!rx)
> >>>>>>>>> +             vhost_net_enable_vq(net, vq);
> >>>>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
> >>>>>>>> called soon.
> >>>>>>> If we disable rx virtqueue in handle_tx and don't send packets from
> >>>>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
> >>>>>>> so the network is broken.
> >>>>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
> >>>>>> there's no need to enable it since handle_rx() will do this for us.
> >>>>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
> >>>>> need to enable vq since in that case we have no rx data and handle_rx()
> >>>>> is not scheduled.
> >>>>>
> >>>> Yes.
> >>> So we will use the vhost_has_work() to check whether or not the
> >>> handle_rx is scheduled ?
> >>> If we use the vhost_has_work(), the work in the dev work_list may be
> >>> rx work, or tx work, right ?
> >> Yes. We can add a boolean to record whether or not we've called
> >> vhost_poll_queue() for rvq. And avoid calling vhost_net_enable_vq() if
> >> it was true.
> > so, the commit be294a51a "vhost_net: Avoid rx queue wake-ups during busypoll"
> > may not consider the case: work is tx work in the dev work list.
>
> So two kinds of work, tx kick or tx wakeup.
>
> For tx kick, we check vhost_vq_avail_empty() and avoid unnecessary kicks
> by not enabling kick if we found something is pending on txq. For tx
> wakeup, yes, the commit does not consider it. And that's why we want to
> disable tx wakeups during busy polling.
And in the handle_rx but not busy polling, the tx can wakeup anytime
and the tx work will be added to dev work list. In that case, if we
add
the rx poll to the queue, it is necessary ? the commit be294a51a may
check whether the rx work is in the dev work list.
> Thanks
>
> >
> >> So here's the needed changes:
> >>
> >> 1) Split the wakeup disabling to another patch
> >> 2) Squash the vhost_net_busy_poll_try_queue() and
> >> vhost_net_busy_poll_check() into vhost_net_busy_poll() and reduce
> >> duplicated checks.
> >> 3) If possible, rename the boolean rx in vhost_net_busy_poll() to
> >> poll_rx, this makes code more readable.
> > OK
> >> Thanks
> >>
> >>>> Thanks
>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  3:40                   ` Toshiaki Makita
@ 2018-08-03  4:14                     ` Tonghao Zhang
  2018-08-03  4:25                       ` Toshiaki Makita
  2018-08-03  4:14                     ` Tonghao Zhang
  1 sibling, 1 reply; 37+ messages in thread
From: Tonghao Zhang @ 2018-08-03  4:14 UTC (permalink / raw)
  To: makita.toshiaki
  Cc: jasowang, mst, virtualization, Linux Kernel Network Developers

On Fri, Aug 3, 2018 at 11:40 AM Toshiaki Makita
<makita.toshiaki@lab.ntt.co.jp> wrote:
>
> On 2018/08/03 12:24, Tonghao Zhang wrote:
> > On Fri, Aug 3, 2018 at 11:07 AM Jason Wang <jasowang@redhat.com> wrote:
> >> On 2018年08月03日 10:51, Tonghao Zhang wrote:
> >>> On Thu, Aug 2, 2018 at 5:23 PM Jason Wang <jasowang@redhat.com> wrote:
> >>>> On 2018年08月02日 16:41, Toshiaki Makita wrote:
> >>>>> On 2018/08/02 17:18, Jason Wang wrote:
> >>>>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
> >>>>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
> >>>>>>>> +                                   struct vhost_virtqueue *rvq,
> >>>>>>>> +                                   struct vhost_virtqueue *tvq,
> >>>>>>>> +                                   bool rx)
> >>>>>>>> +{
> >>>>>>>> +     struct socket *sock = rvq->private_data;
> >>>>>>>> +
> >>>>>>>> +     if (rx)
> >>>>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
> >>>>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
> >>>>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
> >>>>>>>> +     else {
> >>>>>>>> +             /* On tx here, sock has no rx data, so we
> >>>>>>>> +              * will wait for sock wakeup for rx, and
> >>>>>>>> +              * vhost_enable_notify() is not needed. */
> >>>>>>> A possible case is we do have rx data but guest does not refill the rx
> >>>>>>> queue. In this case we may lose notifications from guest.
> >>>>>> Yes, should consider this case. thanks.
> >>>>> I'm a bit confused. Isn't this covered by the previous
> >>>>> "else if (sock && sk_has_rx_data(...))" block?
> >>>> The problem is it does nothing if vhost_vq_avail_empty() is true and
> >>>> vhost_enble_notify() is false.
> >>>>
> >>>>>>>>> +
> >>>>>>>>> +             cpu_relax();
> >>>>>>>>> +     }
> >>>>>>>>> +
> >>>>>>>>> +     preempt_enable();
> >>>>>>>>> +
> >>>>>>>>> +     if (!rx)
> >>>>>>>>> +             vhost_net_enable_vq(net, vq);
> >>>>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
> >>>>>>>> called soon.
> >>>>>>> If we disable rx virtqueue in handle_tx and don't send packets from
> >>>>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
> >>>>>>> so the network is broken.
> >>>>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
> >>>>>> there's no need to enable it since handle_rx() will do this for us.
> >>>>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
> >>>>> need to enable vq since in that case we have no rx data and handle_rx()
> >>>>> is not scheduled.
> >>>>>
> >>>> Yes.
> >>> So we will use the vhost_has_work() to check whether or not the
> >>> handle_rx is scheduled ?
> >>> If we use the vhost_has_work(), the work in the dev work_list may be
> >>> rx work, or tx work, right ?
> >>
> >> Yes. We can add a boolean to record whether or not we've called
> >> vhost_poll_queue() for rvq. And avoid calling vhost_net_enable_vq() if
> >> it was true.
> > so, the commit be294a51a "vhost_net: Avoid rx queue wake-ups during busypoll"
> > may not consider the case: work is tx work in the dev work list.
>
> Not sure what you are concerned but what I can say is that we need to
> poll rx work if vhost_has_work() detects tx work in
> vhost_net_rx_peek_head_len() since rx busypoll exits prematurely in that
> case.
In the handle_rx, when we busy poll, the vhost_has_work() return true,
because the tx but not rx work is in the dev work list.
and it is the most case, because tx work may be added to dev work list
anytime(not during busy poll) when guest kick the vhost-net.
so it is not necessary to add it., right ?

> >> So here's the needed changes:
> >>
> >> 1) Split the wakeup disabling to another patch
> >> 2) Squash the vhost_net_busy_poll_try_queue() and
> >> vhost_net_busy_poll_check() into vhost_net_busy_poll() and reduce
> >> duplicated checks.
> >> 3) If possible, rename the boolean rx in vhost_net_busy_poll() to
> >> poll_rx, this makes code more readable.
> > OK
> >> Thanks
> >>
> >>>> Thanks
> >>
> >
> >
>
> --
> Toshiaki Makita
>

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  3:40                   ` Toshiaki Makita
  2018-08-03  4:14                     ` Tonghao Zhang
@ 2018-08-03  4:14                     ` Tonghao Zhang
  1 sibling, 0 replies; 37+ messages in thread
From: Tonghao Zhang @ 2018-08-03  4:14 UTC (permalink / raw)
  To: makita.toshiaki; +Cc: Linux Kernel Network Developers, virtualization, mst

On Fri, Aug 3, 2018 at 11:40 AM Toshiaki Makita
<makita.toshiaki@lab.ntt.co.jp> wrote:
>
> On 2018/08/03 12:24, Tonghao Zhang wrote:
> > On Fri, Aug 3, 2018 at 11:07 AM Jason Wang <jasowang@redhat.com> wrote:
> >> On 2018年08月03日 10:51, Tonghao Zhang wrote:
> >>> On Thu, Aug 2, 2018 at 5:23 PM Jason Wang <jasowang@redhat.com> wrote:
> >>>> On 2018年08月02日 16:41, Toshiaki Makita wrote:
> >>>>> On 2018/08/02 17:18, Jason Wang wrote:
> >>>>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
> >>>>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
> >>>>>>>> +                                   struct vhost_virtqueue *rvq,
> >>>>>>>> +                                   struct vhost_virtqueue *tvq,
> >>>>>>>> +                                   bool rx)
> >>>>>>>> +{
> >>>>>>>> +     struct socket *sock = rvq->private_data;
> >>>>>>>> +
> >>>>>>>> +     if (rx)
> >>>>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
> >>>>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
> >>>>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
> >>>>>>>> +     else {
> >>>>>>>> +             /* On tx here, sock has no rx data, so we
> >>>>>>>> +              * will wait for sock wakeup for rx, and
> >>>>>>>> +              * vhost_enable_notify() is not needed. */
> >>>>>>> A possible case is we do have rx data but guest does not refill the rx
> >>>>>>> queue. In this case we may lose notifications from guest.
> >>>>>> Yes, should consider this case. thanks.
> >>>>> I'm a bit confused. Isn't this covered by the previous
> >>>>> "else if (sock && sk_has_rx_data(...))" block?
> >>>> The problem is it does nothing if vhost_vq_avail_empty() is true and
> >>>> vhost_enble_notify() is false.
> >>>>
> >>>>>>>>> +
> >>>>>>>>> +             cpu_relax();
> >>>>>>>>> +     }
> >>>>>>>>> +
> >>>>>>>>> +     preempt_enable();
> >>>>>>>>> +
> >>>>>>>>> +     if (!rx)
> >>>>>>>>> +             vhost_net_enable_vq(net, vq);
> >>>>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
> >>>>>>>> called soon.
> >>>>>>> If we disable rx virtqueue in handle_tx and don't send packets from
> >>>>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
> >>>>>>> so the network is broken.
> >>>>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
> >>>>>> there's no need to enable it since handle_rx() will do this for us.
> >>>>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
> >>>>> need to enable vq since in that case we have no rx data and handle_rx()
> >>>>> is not scheduled.
> >>>>>
> >>>> Yes.
> >>> So we will use the vhost_has_work() to check whether or not the
> >>> handle_rx is scheduled ?
> >>> If we use the vhost_has_work(), the work in the dev work_list may be
> >>> rx work, or tx work, right ?
> >>
> >> Yes. We can add a boolean to record whether or not we've called
> >> vhost_poll_queue() for rvq. And avoid calling vhost_net_enable_vq() if
> >> it was true.
> > so, the commit be294a51a "vhost_net: Avoid rx queue wake-ups during busypoll"
> > may not consider the case: work is tx work in the dev work list.
>
> Not sure what you are concerned but what I can say is that we need to
> poll rx work if vhost_has_work() detects tx work in
> vhost_net_rx_peek_head_len() since rx busypoll exits prematurely in that
> case.
In the handle_rx, when we busy poll, the vhost_has_work() return true,
because the tx but not rx work is in the dev work list.
and it is the most case, because tx work may be added to dev work list
anytime(not during busy poll) when guest kick the vhost-net.
so it is not necessary to add it., right ?

> >> So here's the needed changes:
> >>
> >> 1) Split the wakeup disabling to another patch
> >> 2) Squash the vhost_net_busy_poll_try_queue() and
> >> vhost_net_busy_poll_check() into vhost_net_busy_poll() and reduce
> >> duplicated checks.
> >> 3) If possible, rename the boolean rx in vhost_net_busy_poll() to
> >> poll_rx, this makes code more readable.
> > OK
> >> Thanks
> >>
> >>>> Thanks
> >>
> >
> >
>
> --
> Toshiaki Makita
>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  4:14                     ` Tonghao Zhang
@ 2018-08-03  4:25                       ` Toshiaki Makita
  0 siblings, 0 replies; 37+ messages in thread
From: Toshiaki Makita @ 2018-08-03  4:25 UTC (permalink / raw)
  To: Tonghao Zhang; +Cc: Linux Kernel Network Developers, virtualization, mst

On 2018/08/03 13:14, Tonghao Zhang wrote:
> On Fri, Aug 3, 2018 at 11:40 AM Toshiaki Makita
> <makita.toshiaki@lab.ntt.co.jp> wrote:
>>
>> On 2018/08/03 12:24, Tonghao Zhang wrote:
>>> On Fri, Aug 3, 2018 at 11:07 AM Jason Wang <jasowang@redhat.com> wrote:
>>>> On 2018年08月03日 10:51, Tonghao Zhang wrote:
>>>>> On Thu, Aug 2, 2018 at 5:23 PM Jason Wang <jasowang@redhat.com> wrote:
>>>>>> On 2018年08月02日 16:41, Toshiaki Makita wrote:
>>>>>>> On 2018/08/02 17:18, Jason Wang wrote:
>>>>>>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
>>>>>>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
>>>>>>>>>> +                                   struct vhost_virtqueue *rvq,
>>>>>>>>>> +                                   struct vhost_virtqueue *tvq,
>>>>>>>>>> +                                   bool rx)
>>>>>>>>>> +{
>>>>>>>>>> +     struct socket *sock = rvq->private_data;
>>>>>>>>>> +
>>>>>>>>>> +     if (rx)
>>>>>>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
>>>>>>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
>>>>>>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
>>>>>>>>>> +     else {
>>>>>>>>>> +             /* On tx here, sock has no rx data, so we
>>>>>>>>>> +              * will wait for sock wakeup for rx, and
>>>>>>>>>> +              * vhost_enable_notify() is not needed. */
>>>>>>>>> A possible case is we do have rx data but guest does not refill the rx
>>>>>>>>> queue. In this case we may lose notifications from guest.
>>>>>>>> Yes, should consider this case. thanks.
>>>>>>> I'm a bit confused. Isn't this covered by the previous
>>>>>>> "else if (sock && sk_has_rx_data(...))" block?
>>>>>> The problem is it does nothing if vhost_vq_avail_empty() is true and
>>>>>> vhost_enble_notify() is false.
>>>>>>
>>>>>>>>>>> +
>>>>>>>>>>> +             cpu_relax();
>>>>>>>>>>> +     }
>>>>>>>>>>> +
>>>>>>>>>>> +     preempt_enable();
>>>>>>>>>>> +
>>>>>>>>>>> +     if (!rx)
>>>>>>>>>>> +             vhost_net_enable_vq(net, vq);
>>>>>>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>>>>>>>>>> called soon.
>>>>>>>>> If we disable rx virtqueue in handle_tx and don't send packets from
>>>>>>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
>>>>>>>>> so the network is broken.
>>>>>>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
>>>>>>>> there's no need to enable it since handle_rx() will do this for us.
>>>>>>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
>>>>>>> need to enable vq since in that case we have no rx data and handle_rx()
>>>>>>> is not scheduled.
>>>>>>>
>>>>>> Yes.
>>>>> So we will use the vhost_has_work() to check whether or not the
>>>>> handle_rx is scheduled ?
>>>>> If we use the vhost_has_work(), the work in the dev work_list may be
>>>>> rx work, or tx work, right ?
>>>>
>>>> Yes. We can add a boolean to record whether or not we've called
>>>> vhost_poll_queue() for rvq. And avoid calling vhost_net_enable_vq() if
>>>> it was true.
>>> so, the commit be294a51a "vhost_net: Avoid rx queue wake-ups during busypoll"
>>> may not consider the case: work is tx work in the dev work list.
>>
>> Not sure what you are concerned but what I can say is that we need to
>> poll rx work if vhost_has_work() detects tx work in
>> vhost_net_rx_peek_head_len() since rx busypoll exits prematurely in that
>> case.
> In the handle_rx, when we busy poll, the vhost_has_work() return true,
> because the tx but not rx work is in the dev work list.
> and it is the most case, because tx work may be added to dev work list
> anytime(not during busy poll) when guest kick the vhost-net.
> so it is not necessary to add it., right ?

I'm lost.
What is the part you think is not needed?

1. When there is a tx work we exit rx busypoll.
2. When we exit rx busypoll by tx work, we poll rx work (so that we can
   continue rx busypoll later).

-- 
Toshiaki Makita

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  4:04                     ` Tonghao Zhang
@ 2018-08-03  5:07                       ` Jason Wang
  2018-08-03  5:25                         ` Toshiaki Makita
  0 siblings, 1 reply; 37+ messages in thread
From: Jason Wang @ 2018-08-03  5:07 UTC (permalink / raw)
  To: Tonghao Zhang; +Cc: Linux Kernel Network Developers, mst, virtualization



On 2018年08月03日 12:04, Tonghao Zhang wrote:
> On Fri, Aug 3, 2018 at 11:43 AM Jason Wang<jasowang@redhat.com>  wrote:
>>
>> On 2018年08月03日 11:24, Tonghao Zhang wrote:
>>> On Fri, Aug 3, 2018 at 11:07 AM Jason Wang<jasowang@redhat.com>  wrote:
>>>> On 2018年08月03日 10:51, Tonghao Zhang wrote:
>>>>> On Thu, Aug 2, 2018 at 5:23 PM Jason Wang<jasowang@redhat.com>  wrote:
>>>>>> On 2018年08月02日 16:41, Toshiaki Makita wrote:
>>>>>>> On 2018/08/02 17:18, Jason Wang wrote:
>>>>>>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
>>>>>>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
>>>>>>>>>> +                                   struct vhost_virtqueue *rvq,
>>>>>>>>>> +                                   struct vhost_virtqueue *tvq,
>>>>>>>>>> +                                   bool rx)
>>>>>>>>>> +{
>>>>>>>>>> +     struct socket *sock = rvq->private_data;
>>>>>>>>>> +
>>>>>>>>>> +     if (rx)
>>>>>>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
>>>>>>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
>>>>>>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
>>>>>>>>>> +     else {
>>>>>>>>>> +             /* On tx here, sock has no rx data, so we
>>>>>>>>>> +              * will wait for sock wakeup for rx, and
>>>>>>>>>> +              * vhost_enable_notify() is not needed. */
>>>>>>>>> A possible case is we do have rx data but guest does not refill the rx
>>>>>>>>> queue. In this case we may lose notifications from guest.
>>>>>>>> Yes, should consider this case. thanks.
>>>>>>> I'm a bit confused. Isn't this covered by the previous
>>>>>>> "else if (sock && sk_has_rx_data(...))" block?
>>>>>> The problem is it does nothing if vhost_vq_avail_empty() is true and
>>>>>> vhost_enble_notify() is false.
>>>>>>
>>>>>>>>>>> +
>>>>>>>>>>> +             cpu_relax();
>>>>>>>>>>> +     }
>>>>>>>>>>> +
>>>>>>>>>>> +     preempt_enable();
>>>>>>>>>>> +
>>>>>>>>>>> +     if (!rx)
>>>>>>>>>>> +             vhost_net_enable_vq(net, vq);
>>>>>>>>>> No need to enable rx virtqueue, if we are sure handle_rx() will be
>>>>>>>>>> called soon.
>>>>>>>>> If we disable rx virtqueue in handle_tx and don't send packets from
>>>>>>>>> guest anymore(handle_tx is not called), so we can wake up for sock rx.
>>>>>>>>> so the network is broken.
>>>>>>>> Not sure I understand here. I mean is we schedule work for handle_rx(),
>>>>>>>> there's no need to enable it since handle_rx() will do this for us.
>>>>>>> Looks like in the last "else" block in vhost_net_busy_poll_check() we
>>>>>>> need to enable vq since in that case we have no rx data and handle_rx()
>>>>>>> is not scheduled.
>>>>>>>
>>>>>> Yes.
>>>>> So we will use the vhost_has_work() to check whether or not the
>>>>> handle_rx is scheduled ?
>>>>> If we use the vhost_has_work(), the work in the dev work_list may be
>>>>> rx work, or tx work, right ?
>>>> Yes. We can add a boolean to record whether or not we've called
>>>> vhost_poll_queue() for rvq. And avoid calling vhost_net_enable_vq() if
>>>> it was true.
>>> so, the commit be294a51a "vhost_net: Avoid rx queue wake-ups during busypoll"
>>> may not consider the case: work is tx work in the dev work list.
>> So two kinds of work, tx kick or tx wakeup.
>>
>> For tx kick, we check vhost_vq_avail_empty() and avoid unnecessary kicks
>> by not enabling kick if we found something is pending on txq. For tx
>> wakeup, yes, the commit does not consider it. And that's why we want to
>> disable tx wakeups during busy polling.
> And in the handle_rx but not busy polling, the tx can wakeup anytime
> and the tx work will be added to dev work list. In that case, if we
> add
> the rx poll to the queue, it is necessary ? the commit be294a51a may
> check whether the rx work is in the dev work list.

I think the point this we don't poll rx during tx at that time. So if rx 
poll is interrupted, we should reschedule handle_rx(). After we poll rx 
on handle_tx(), we can try to optimize this on top.

Thanks

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll()
  2018-08-03  5:07                       ` Jason Wang
@ 2018-08-03  5:25                         ` Toshiaki Makita
  0 siblings, 0 replies; 37+ messages in thread
From: Toshiaki Makita @ 2018-08-03  5:25 UTC (permalink / raw)
  To: Jason Wang, Tonghao Zhang
  Cc: Linux Kernel Network Developers, virtualization, mst

On 2018/08/03 14:07, Jason Wang wrote:
> On 2018年08月03日 12:04, Tonghao Zhang wrote:
>> On Fri, Aug 3, 2018 at 11:43 AM Jason Wang<jasowang@redhat.com>  wrote:
>>>
>>> On 2018年08月03日 11:24, Tonghao Zhang wrote:
>>>> On Fri, Aug 3, 2018 at 11:07 AM Jason Wang<jasowang@redhat.com>  wrote:
>>>>> On 2018年08月03日 10:51, Tonghao Zhang wrote:
>>>>>> On Thu, Aug 2, 2018 at 5:23 PM Jason Wang<jasowang@redhat.com> 
>>>>>> wrote:
>>>>>>> On 2018年08月02日 16:41, Toshiaki Makita wrote:
>>>>>>>> On 2018/08/02 17:18, Jason Wang wrote:
>>>>>>>>> On 2018年08月01日 17:52, Tonghao Zhang wrote:
>>>>>>>>>>> +static void vhost_net_busy_poll_check(struct vhost_net *net,
>>>>>>>>>>> +                                   struct vhost_virtqueue *rvq,
>>>>>>>>>>> +                                   struct vhost_virtqueue *tvq,
>>>>>>>>>>> +                                   bool rx)
>>>>>>>>>>> +{
>>>>>>>>>>> +     struct socket *sock = rvq->private_data;
>>>>>>>>>>> +
>>>>>>>>>>> +     if (rx)
>>>>>>>>>>> +             vhost_net_busy_poll_try_queue(net, tvq);
>>>>>>>>>>> +     else if (sock && sk_has_rx_data(sock->sk))
>>>>>>>>>>> +             vhost_net_busy_poll_try_queue(net, rvq);
>>>>>>>>>>> +     else {
>>>>>>>>>>> +             /* On tx here, sock has no rx data, so we
>>>>>>>>>>> +              * will wait for sock wakeup for rx, and
>>>>>>>>>>> +              * vhost_enable_notify() is not needed. */
>>>>>>>>>> A possible case is we do have rx data but guest does not
>>>>>>>>>> refill the rx
>>>>>>>>>> queue. In this case we may lose notifications from guest.
>>>>>>>>> Yes, should consider this case. thanks.
>>>>>>>> I'm a bit confused. Isn't this covered by the previous
>>>>>>>> "else if (sock && sk_has_rx_data(...))" block?
>>>>>>> The problem is it does nothing if vhost_vq_avail_empty() is true and
>>>>>>> vhost_enble_notify() is false.
>>>>>>>
>>>>>>>>>>>> +
>>>>>>>>>>>> +             cpu_relax();
>>>>>>>>>>>> +     }
>>>>>>>>>>>> +
>>>>>>>>>>>> +     preempt_enable();
>>>>>>>>>>>> +
>>>>>>>>>>>> +     if (!rx)
>>>>>>>>>>>> +             vhost_net_enable_vq(net, vq);
>>>>>>>>>>> No need to enable rx virtqueue, if we are sure handle_rx()
>>>>>>>>>>> will be
>>>>>>>>>>> called soon.
>>>>>>>>>> If we disable rx virtqueue in handle_tx and don't send packets
>>>>>>>>>> from
>>>>>>>>>> guest anymore(handle_tx is not called), so we can wake up for
>>>>>>>>>> sock rx.
>>>>>>>>>> so the network is broken.
>>>>>>>>> Not sure I understand here. I mean is we schedule work for
>>>>>>>>> handle_rx(),
>>>>>>>>> there's no need to enable it since handle_rx() will do this for
>>>>>>>>> us.
>>>>>>>> Looks like in the last "else" block in
>>>>>>>> vhost_net_busy_poll_check() we
>>>>>>>> need to enable vq since in that case we have no rx data and
>>>>>>>> handle_rx()
>>>>>>>> is not scheduled.
>>>>>>>>
>>>>>>> Yes.
>>>>>> So we will use the vhost_has_work() to check whether or not the
>>>>>> handle_rx is scheduled ?
>>>>>> If we use the vhost_has_work(), the work in the dev work_list may be
>>>>>> rx work, or tx work, right ?
>>>>> Yes. We can add a boolean to record whether or not we've called
>>>>> vhost_poll_queue() for rvq. And avoid calling vhost_net_enable_vq() if
>>>>> it was true.
>>>> so, the commit be294a51a "vhost_net: Avoid rx queue wake-ups during
>>>> busypoll"
>>>> may not consider the case: work is tx work in the dev work list.
>>> So two kinds of work, tx kick or tx wakeup.
>>>
>>> For tx kick, we check vhost_vq_avail_empty() and avoid unnecessary kicks
>>> by not enabling kick if we found something is pending on txq. For tx
>>> wakeup, yes, the commit does not consider it. And that's why we want to
>>> disable tx wakeups during busy polling.
>> And in the handle_rx but not busy polling, the tx can wakeup anytime
>> and the tx work will be added to dev work list. In that case, if we
>> add
>> the rx poll to the queue, it is necessary ? the commit be294a51a may
>> check whether the rx work is in the dev work list.
> 
> I think the point this we don't poll rx during tx at that time. So if rx
> poll is interrupted, we should reschedule handle_rx(). After we poll rx
> on handle_tx(), we can try to optimize this on top.

That's true. We may be able to skip poll_queue in handle_rx/tx after
rx/tx busypolling is unified by this patch set.

-- 
Toshiaki Makita

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 37+ messages in thread

end of thread, other threads:[~2018-08-03  6:08 UTC | newest]

Thread overview: 37+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-01  3:00 [PATCH net-next v7 0/4] net: vhost: improve performance when enable busyloop xiangxia.m.yue
2018-08-01  3:00 ` [PATCH net-next v7 1/4] net: vhost: lock the vqs one by one xiangxia.m.yue
2018-08-01  3:00 ` [PATCH net-next v7 2/4] net: vhost: replace magic number of lock annotation xiangxia.m.yue
2018-08-01  3:00 ` xiangxia.m.yue
2018-08-01  3:00 ` [PATCH net-next v7 3/4] net: vhost: factor out busy polling logic to vhost_net_busy_poll() xiangxia.m.yue
2018-08-01  6:01   ` Jason Wang
2018-08-01  9:52     ` Tonghao Zhang
2018-08-02  8:18       ` Jason Wang
2018-08-02  8:41         ` Toshiaki Makita
2018-08-02  9:23           ` Jason Wang
2018-08-02  9:57             ` Toshiaki Makita
2018-08-03  2:38               ` Jason Wang
2018-08-03  2:38               ` Jason Wang
2018-08-03  2:51             ` Tonghao Zhang
2018-08-03  2:51             ` Tonghao Zhang
2018-08-03  3:07               ` Jason Wang
2018-08-03  3:07               ` Jason Wang
2018-08-03  3:24                 ` Tonghao Zhang
2018-08-03  3:40                   ` Toshiaki Makita
2018-08-03  4:14                     ` Tonghao Zhang
2018-08-03  4:25                       ` Toshiaki Makita
2018-08-03  4:14                     ` Tonghao Zhang
2018-08-03  3:43                   ` Jason Wang
2018-08-03  3:43                   ` Jason Wang
2018-08-03  4:04                     ` Tonghao Zhang
2018-08-03  5:07                       ` Jason Wang
2018-08-03  5:25                         ` Toshiaki Makita
2018-08-03  3:24                 ` Tonghao Zhang
2018-08-03  3:07             ` Jason Wang
2018-08-03  3:32               ` Toshiaki Makita
2018-08-03  3:44                 ` Jason Wang
2018-08-03  3:44                 ` Jason Wang
2018-08-03  3:07             ` Jason Wang
2018-08-02  8:18       ` Jason Wang
2018-08-01  3:00 ` xiangxia.m.yue
2018-08-01  3:00 ` [PATCH net-next v7 4/4] net: vhost: add rx busy polling in tx path xiangxia.m.yue
2018-08-01  3:00 ` xiangxia.m.yue

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.