* [PATCH net-next v2 1/5] virtio-net: napi helper functions
2017-04-18 20:21 [PATCH net-next v2 0/5] virtio-net tx napi Willem de Bruijn
@ 2017-04-18 20:21 ` Willem de Bruijn
2017-04-18 20:21 ` Willem de Bruijn
` (8 subsequent siblings)
9 siblings, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-18 20:21 UTC (permalink / raw)
To: netdev; +Cc: Willem de Bruijn, virtualization, davem, mst
From: Willem de Bruijn <willemb@google.com>
Prepare virtio-net for tx napi by converting existing napi code to
use helper functions. This also deduplicates some logic.
Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/net/virtio_net.c | 65 ++++++++++++++++++++++++++----------------------
1 file changed, 35 insertions(+), 30 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 666ada6130ab..b9c1df29892c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -239,6 +239,26 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
return p;
}
+static void virtqueue_napi_schedule(struct napi_struct *napi,
+ struct virtqueue *vq)
+{
+ if (napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(vq);
+ __napi_schedule(napi);
+ }
+}
+
+static void virtqueue_napi_complete(struct napi_struct *napi,
+ struct virtqueue *vq, int processed)
+{
+ int opaque;
+
+ opaque = virtqueue_enable_cb_prepare(vq);
+ if (napi_complete_done(napi, processed) &&
+ unlikely(virtqueue_poll(vq, opaque)))
+ virtqueue_napi_schedule(napi, vq);
+}
+
static void skb_xmit_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
@@ -936,27 +956,20 @@ static void skb_recv_done(struct virtqueue *rvq)
struct virtnet_info *vi = rvq->vdev->priv;
struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
- /* Schedule NAPI, Suppress further interrupts if successful. */
- if (napi_schedule_prep(&rq->napi)) {
- virtqueue_disable_cb(rvq);
- __napi_schedule(&rq->napi);
- }
+ virtqueue_napi_schedule(&rq->napi, rvq);
}
-static void virtnet_napi_enable(struct receive_queue *rq)
+static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
{
- napi_enable(&rq->napi);
+ napi_enable(napi);
/* If all buffers were filled by other side before we napi_enabled, we
- * won't get another interrupt, so process any outstanding packets
- * now. virtnet_poll wants re-enable the queue, so we disable here.
- * We synchronize against interrupts via NAPI_STATE_SCHED */
- if (napi_schedule_prep(&rq->napi)) {
- virtqueue_disable_cb(rq->vq);
- local_bh_disable();
- __napi_schedule(&rq->napi);
- local_bh_enable();
- }
+ * won't get another interrupt, so process any outstanding packets now.
+ * Call local_bh_enable after to trigger softIRQ processing.
+ */
+ local_bh_disable();
+ virtqueue_napi_schedule(napi, vq);
+ local_bh_enable();
}
static void refill_work(struct work_struct *work)
@@ -971,7 +984,7 @@ static void refill_work(struct work_struct *work)
napi_disable(&rq->napi);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_enable(rq);
+ virtnet_napi_enable(rq->vq, &rq->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
@@ -1011,21 +1024,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
- unsigned int r, received;
+ unsigned int received;
received = virtnet_receive(rq, budget);
/* Out of packets? */
- if (received < budget) {
- r = virtqueue_enable_cb_prepare(rq->vq);
- if (napi_complete_done(napi, received)) {
- if (unlikely(virtqueue_poll(rq->vq, r)) &&
- napi_schedule_prep(napi)) {
- virtqueue_disable_cb(rq->vq);
- __napi_schedule(napi);
- }
- }
- }
+ if (received < budget)
+ virtqueue_napi_complete(napi, rq->vq, received);
return received;
}
@@ -1040,7 +1045,7 @@ static int virtnet_open(struct net_device *dev)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
- virtnet_napi_enable(&vi->rq[i]);
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
}
return 0;
@@ -1747,7 +1752,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
schedule_delayed_work(&vi->refill, 0);
for (i = 0; i < vi->max_queue_pairs; i++)
- virtnet_napi_enable(&vi->rq[i]);
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
}
netif_device_attach(vi->dev);
--
2.12.2.816.g2cccc81164-goog
^ permalink raw reply related [flat|nested] 35+ messages in thread
* [PATCH net-next v2 1/5] virtio-net: napi helper functions
2017-04-18 20:21 [PATCH net-next v2 0/5] virtio-net tx napi Willem de Bruijn
2017-04-18 20:21 ` [PATCH net-next v2 1/5] virtio-net: napi helper functions Willem de Bruijn
@ 2017-04-18 20:21 ` Willem de Bruijn
2017-04-18 20:21 ` [PATCH net-next v2 2/5] virtio-net: transmit napi Willem de Bruijn
` (7 subsequent siblings)
9 siblings, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-18 20:21 UTC (permalink / raw)
To: netdev; +Cc: mst, jasowang, virtualization, davem, Willem de Bruijn
From: Willem de Bruijn <willemb@google.com>
Prepare virtio-net for tx napi by converting existing napi code to
use helper functions. This also deduplicates some logic.
Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/net/virtio_net.c | 65 ++++++++++++++++++++++++++----------------------
1 file changed, 35 insertions(+), 30 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 666ada6130ab..b9c1df29892c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -239,6 +239,26 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
return p;
}
+static void virtqueue_napi_schedule(struct napi_struct *napi,
+ struct virtqueue *vq)
+{
+ if (napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(vq);
+ __napi_schedule(napi);
+ }
+}
+
+static void virtqueue_napi_complete(struct napi_struct *napi,
+ struct virtqueue *vq, int processed)
+{
+ int opaque;
+
+ opaque = virtqueue_enable_cb_prepare(vq);
+ if (napi_complete_done(napi, processed) &&
+ unlikely(virtqueue_poll(vq, opaque)))
+ virtqueue_napi_schedule(napi, vq);
+}
+
static void skb_xmit_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
@@ -936,27 +956,20 @@ static void skb_recv_done(struct virtqueue *rvq)
struct virtnet_info *vi = rvq->vdev->priv;
struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
- /* Schedule NAPI, Suppress further interrupts if successful. */
- if (napi_schedule_prep(&rq->napi)) {
- virtqueue_disable_cb(rvq);
- __napi_schedule(&rq->napi);
- }
+ virtqueue_napi_schedule(&rq->napi, rvq);
}
-static void virtnet_napi_enable(struct receive_queue *rq)
+static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
{
- napi_enable(&rq->napi);
+ napi_enable(napi);
/* If all buffers were filled by other side before we napi_enabled, we
- * won't get another interrupt, so process any outstanding packets
- * now. virtnet_poll wants re-enable the queue, so we disable here.
- * We synchronize against interrupts via NAPI_STATE_SCHED */
- if (napi_schedule_prep(&rq->napi)) {
- virtqueue_disable_cb(rq->vq);
- local_bh_disable();
- __napi_schedule(&rq->napi);
- local_bh_enable();
- }
+ * won't get another interrupt, so process any outstanding packets now.
+ * Call local_bh_enable after to trigger softIRQ processing.
+ */
+ local_bh_disable();
+ virtqueue_napi_schedule(napi, vq);
+ local_bh_enable();
}
static void refill_work(struct work_struct *work)
@@ -971,7 +984,7 @@ static void refill_work(struct work_struct *work)
napi_disable(&rq->napi);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_enable(rq);
+ virtnet_napi_enable(rq->vq, &rq->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
@@ -1011,21 +1024,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
- unsigned int r, received;
+ unsigned int received;
received = virtnet_receive(rq, budget);
/* Out of packets? */
- if (received < budget) {
- r = virtqueue_enable_cb_prepare(rq->vq);
- if (napi_complete_done(napi, received)) {
- if (unlikely(virtqueue_poll(rq->vq, r)) &&
- napi_schedule_prep(napi)) {
- virtqueue_disable_cb(rq->vq);
- __napi_schedule(napi);
- }
- }
- }
+ if (received < budget)
+ virtqueue_napi_complete(napi, rq->vq, received);
return received;
}
@@ -1040,7 +1045,7 @@ static int virtnet_open(struct net_device *dev)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
- virtnet_napi_enable(&vi->rq[i]);
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
}
return 0;
@@ -1747,7 +1752,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
schedule_delayed_work(&vi->refill, 0);
for (i = 0; i < vi->max_queue_pairs; i++)
- virtnet_napi_enable(&vi->rq[i]);
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
}
netif_device_attach(vi->dev);
--
2.12.2.816.g2cccc81164-goog
^ permalink raw reply related [flat|nested] 35+ messages in thread
* [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-18 20:21 [PATCH net-next v2 0/5] virtio-net tx napi Willem de Bruijn
2017-04-18 20:21 ` [PATCH net-next v2 1/5] virtio-net: napi helper functions Willem de Bruijn
2017-04-18 20:21 ` Willem de Bruijn
@ 2017-04-18 20:21 ` Willem de Bruijn
2017-04-18 20:21 ` Willem de Bruijn
` (6 subsequent siblings)
9 siblings, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-18 20:21 UTC (permalink / raw)
To: netdev; +Cc: Willem de Bruijn, virtualization, davem, mst
From: Willem de Bruijn <willemb@google.com>
Convert virtio-net to a standard napi tx completion path. This enables
better TCP pacing using TCP small queues and increases single stream
throughput.
The virtio-net driver currently cleans tx descriptors on transmission
of new packets in ndo_start_xmit. Latency depends on new traffic, so
is unbounded. To avoid deadlock when a socket reaches its snd limit,
packets are orphaned on tranmission. This breaks socket backpressure,
including TSQ.
Napi increases the number of interrupts generated compared to the
current model, which keeps interrupts disabled as long as the ring
has enough free descriptors. Keep tx napi optional and disabled for
now. Follow-on patches will reduce the interrupt cost.
Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/net/virtio_net.c | 77 +++++++++++++++++++++++++++++++++++++++++-------
1 file changed, 67 insertions(+), 10 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b9c1df29892c..c173e85dc7b8 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -33,9 +33,10 @@
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
-static bool csum = true, gso = true;
+static bool csum = true, gso = true, napi_tx;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
+module_param(napi_tx, bool, 0644);
/* FIXME: MTU in config. */
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
@@ -86,6 +87,8 @@ struct send_queue {
/* Name of the send queue: output.$index */
char name[40];
+
+ struct napi_struct napi;
};
/* Internal representation of a receive virtqueue */
@@ -262,12 +265,16 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
static void skb_xmit_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
+ struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
/* Suppress further interrupts. */
virtqueue_disable_cb(vq);
- /* We were probably waiting for more output buffers. */
- netif_wake_subqueue(vi->dev, vq2txq(vq));
+ if (napi->weight)
+ virtqueue_napi_schedule(napi, vq);
+ else
+ /* We were probably waiting for more output buffers. */
+ netif_wake_subqueue(vi->dev, vq2txq(vq));
}
static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
@@ -972,6 +979,21 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
local_bh_enable();
}
+static void virtnet_napi_tx_enable(struct virtnet_info *vi,
+ struct virtqueue *vq,
+ struct napi_struct *napi)
+{
+ if (!napi->weight)
+ return;
+
+ if (!vi->affinity_hint_set) {
+ napi->weight = 0;
+ return;
+ }
+
+ return virtnet_napi_enable(vq, napi);
+}
+
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi =
@@ -1046,6 +1068,7 @@ static int virtnet_open(struct net_device *dev)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
}
return 0;
@@ -1081,6 +1104,25 @@ static void free_old_xmit_skbs(struct send_queue *sq)
u64_stats_update_end(&stats->tx_syncp);
}
+static int virtnet_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct send_queue *sq = container_of(napi, struct send_queue, napi);
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
+
+ if (__netif_tx_trylock(txq)) {
+ free_old_xmit_skbs(sq);
+ __netif_tx_unlock(txq);
+ }
+
+ virtqueue_napi_complete(napi, sq->vq, 0);
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_tx_wake_queue(txq);
+
+ return 0;
+}
+
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -1130,9 +1172,11 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
int err;
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
bool kick = !skb->xmit_more;
+ bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
- free_old_xmit_skbs(sq);
+ if (!use_napi)
+ free_old_xmit_skbs(sq);
/* timestamp packet in software */
skb_tx_timestamp(skb);
@@ -1152,8 +1196,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Don't wait up for transmitted skbs to be freed. */
- skb_orphan(skb);
- nf_reset(skb);
+ if (!use_napi) {
+ skb_orphan(skb);
+ nf_reset(skb);
+ }
/* If running out of space, stop queue to avoid getting packets that we
* are then unable to transmit.
@@ -1167,7 +1213,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
netif_stop_subqueue(dev, qnum);
- if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+ if (!use_napi &&
+ unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
free_old_xmit_skbs(sq);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
@@ -1371,8 +1418,10 @@ static int virtnet_close(struct net_device *dev)
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
+ napi_disable(&vi->sq[i].napi);
+ }
return 0;
}
@@ -1727,8 +1776,10 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
+ napi_disable(&vi->sq[i].napi);
+ }
}
}
@@ -1751,8 +1802,11 @@ static int virtnet_restore_up(struct virtio_device *vdev)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+ &vi->sq[i].napi);
+ }
}
netif_device_attach(vi->dev);
@@ -1957,6 +2011,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
napi_hash_del(&vi->rq[i].napi);
netif_napi_del(&vi->rq[i].napi);
+ netif_napi_del(&vi->sq[i].napi);
}
/* We called napi_hash_del() before netif_napi_del(),
@@ -2142,6 +2197,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
vi->rq[i].pages = NULL;
netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
napi_weight);
+ netif_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
+ napi_tx ? napi_weight : 0);
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
--
2.12.2.816.g2cccc81164-goog
^ permalink raw reply related [flat|nested] 35+ messages in thread
* [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-18 20:21 [PATCH net-next v2 0/5] virtio-net tx napi Willem de Bruijn
` (2 preceding siblings ...)
2017-04-18 20:21 ` [PATCH net-next v2 2/5] virtio-net: transmit napi Willem de Bruijn
@ 2017-04-18 20:21 ` Willem de Bruijn
2017-04-20 6:12 ` Jason Wang
` (2 more replies)
2017-04-18 20:21 ` [PATCH net-next v2 3/5] virtio-net: move free_old_xmit_skbs Willem de Bruijn
` (5 subsequent siblings)
9 siblings, 3 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-18 20:21 UTC (permalink / raw)
To: netdev; +Cc: mst, jasowang, virtualization, davem, Willem de Bruijn
From: Willem de Bruijn <willemb@google.com>
Convert virtio-net to a standard napi tx completion path. This enables
better TCP pacing using TCP small queues and increases single stream
throughput.
The virtio-net driver currently cleans tx descriptors on transmission
of new packets in ndo_start_xmit. Latency depends on new traffic, so
is unbounded. To avoid deadlock when a socket reaches its snd limit,
packets are orphaned on tranmission. This breaks socket backpressure,
including TSQ.
Napi increases the number of interrupts generated compared to the
current model, which keeps interrupts disabled as long as the ring
has enough free descriptors. Keep tx napi optional and disabled for
now. Follow-on patches will reduce the interrupt cost.
Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/net/virtio_net.c | 77 +++++++++++++++++++++++++++++++++++++++++-------
1 file changed, 67 insertions(+), 10 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b9c1df29892c..c173e85dc7b8 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -33,9 +33,10 @@
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
-static bool csum = true, gso = true;
+static bool csum = true, gso = true, napi_tx;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
+module_param(napi_tx, bool, 0644);
/* FIXME: MTU in config. */
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
@@ -86,6 +87,8 @@ struct send_queue {
/* Name of the send queue: output.$index */
char name[40];
+
+ struct napi_struct napi;
};
/* Internal representation of a receive virtqueue */
@@ -262,12 +265,16 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
static void skb_xmit_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
+ struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
/* Suppress further interrupts. */
virtqueue_disable_cb(vq);
- /* We were probably waiting for more output buffers. */
- netif_wake_subqueue(vi->dev, vq2txq(vq));
+ if (napi->weight)
+ virtqueue_napi_schedule(napi, vq);
+ else
+ /* We were probably waiting for more output buffers. */
+ netif_wake_subqueue(vi->dev, vq2txq(vq));
}
static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
@@ -972,6 +979,21 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
local_bh_enable();
}
+static void virtnet_napi_tx_enable(struct virtnet_info *vi,
+ struct virtqueue *vq,
+ struct napi_struct *napi)
+{
+ if (!napi->weight)
+ return;
+
+ if (!vi->affinity_hint_set) {
+ napi->weight = 0;
+ return;
+ }
+
+ return virtnet_napi_enable(vq, napi);
+}
+
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi =
@@ -1046,6 +1068,7 @@ static int virtnet_open(struct net_device *dev)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
}
return 0;
@@ -1081,6 +1104,25 @@ static void free_old_xmit_skbs(struct send_queue *sq)
u64_stats_update_end(&stats->tx_syncp);
}
+static int virtnet_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct send_queue *sq = container_of(napi, struct send_queue, napi);
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
+
+ if (__netif_tx_trylock(txq)) {
+ free_old_xmit_skbs(sq);
+ __netif_tx_unlock(txq);
+ }
+
+ virtqueue_napi_complete(napi, sq->vq, 0);
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_tx_wake_queue(txq);
+
+ return 0;
+}
+
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -1130,9 +1172,11 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
int err;
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
bool kick = !skb->xmit_more;
+ bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
- free_old_xmit_skbs(sq);
+ if (!use_napi)
+ free_old_xmit_skbs(sq);
/* timestamp packet in software */
skb_tx_timestamp(skb);
@@ -1152,8 +1196,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Don't wait up for transmitted skbs to be freed. */
- skb_orphan(skb);
- nf_reset(skb);
+ if (!use_napi) {
+ skb_orphan(skb);
+ nf_reset(skb);
+ }
/* If running out of space, stop queue to avoid getting packets that we
* are then unable to transmit.
@@ -1167,7 +1213,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
netif_stop_subqueue(dev, qnum);
- if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+ if (!use_napi &&
+ unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
free_old_xmit_skbs(sq);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
@@ -1371,8 +1418,10 @@ static int virtnet_close(struct net_device *dev)
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
+ napi_disable(&vi->sq[i].napi);
+ }
return 0;
}
@@ -1727,8 +1776,10 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
+ napi_disable(&vi->sq[i].napi);
+ }
}
}
@@ -1751,8 +1802,11 @@ static int virtnet_restore_up(struct virtio_device *vdev)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+ &vi->sq[i].napi);
+ }
}
netif_device_attach(vi->dev);
@@ -1957,6 +2011,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
napi_hash_del(&vi->rq[i].napi);
netif_napi_del(&vi->rq[i].napi);
+ netif_napi_del(&vi->sq[i].napi);
}
/* We called napi_hash_del() before netif_napi_del(),
@@ -2142,6 +2197,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
vi->rq[i].pages = NULL;
netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
napi_weight);
+ netif_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
+ napi_tx ? napi_weight : 0);
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
--
2.12.2.816.g2cccc81164-goog
^ permalink raw reply related [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-18 20:21 ` Willem de Bruijn
@ 2017-04-20 6:12 ` Jason Wang
2017-04-20 6:12 ` Jason Wang
2017-04-20 6:27 ` Jason Wang
2 siblings, 0 replies; 35+ messages in thread
From: Jason Wang @ 2017-04-20 6:12 UTC (permalink / raw)
To: Willem de Bruijn, netdev; +Cc: Willem de Bruijn, virtualization, davem, mst
On 2017年04月19日 04:21, Willem de Bruijn wrote:
> From: Willem de Bruijn <willemb@google.com>
>
> Convert virtio-net to a standard napi tx completion path. This enables
> better TCP pacing using TCP small queues and increases single stream
> throughput.
>
> The virtio-net driver currently cleans tx descriptors on transmission
> of new packets in ndo_start_xmit. Latency depends on new traffic, so
> is unbounded. To avoid deadlock when a socket reaches its snd limit,
> packets are orphaned on tranmission. This breaks socket backpressure,
> including TSQ.
>
> Napi increases the number of interrupts generated compared to the
> current model, which keeps interrupts disabled as long as the ring
> has enough free descriptors. Keep tx napi optional and disabled for
> now. Follow-on patches will reduce the interrupt cost.
>
> Signed-off-by: Willem de Bruijn <willemb@google.com>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
> drivers/net/virtio_net.c | 77 +++++++++++++++++++++++++++++++++++++++++-------
> 1 file changed, 67 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index b9c1df29892c..c173e85dc7b8 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -33,9 +33,10 @@
> static int napi_weight = NAPI_POLL_WEIGHT;
> module_param(napi_weight, int, 0444);
>
> -static bool csum = true, gso = true;
> +static bool csum = true, gso = true, napi_tx;
> module_param(csum, bool, 0444);
> module_param(gso, bool, 0444);
> +module_param(napi_tx, bool, 0644);
>
> /* FIXME: MTU in config. */
> #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
> @@ -86,6 +87,8 @@ struct send_queue {
>
> /* Name of the send queue: output.$index */
> char name[40];
> +
> + struct napi_struct napi;
> };
>
> /* Internal representation of a receive virtqueue */
> @@ -262,12 +265,16 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
> static void skb_xmit_done(struct virtqueue *vq)
> {
> struct virtnet_info *vi = vq->vdev->priv;
> + struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
>
> /* Suppress further interrupts. */
> virtqueue_disable_cb(vq);
>
> - /* We were probably waiting for more output buffers. */
> - netif_wake_subqueue(vi->dev, vq2txq(vq));
> + if (napi->weight)
> + virtqueue_napi_schedule(napi, vq);
> + else
> + /* We were probably waiting for more output buffers. */
> + netif_wake_subqueue(vi->dev, vq2txq(vq));
> }
>
> static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
> @@ -972,6 +979,21 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
> local_bh_enable();
> }
>
> +static void virtnet_napi_tx_enable(struct virtnet_info *vi,
> + struct virtqueue *vq,
> + struct napi_struct *napi)
> +{
> + if (!napi->weight)
> + return;
> +
> + if (!vi->affinity_hint_set) {
> + napi->weight = 0;
> + return;
> + }
> +
> + return virtnet_napi_enable(vq, napi);
> +}
> +
> static void refill_work(struct work_struct *work)
> {
> struct virtnet_info *vi =
> @@ -1046,6 +1068,7 @@ static int virtnet_open(struct net_device *dev)
> if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
> schedule_delayed_work(&vi->refill, 0);
> virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
> + virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
> }
>
> return 0;
> @@ -1081,6 +1104,25 @@ static void free_old_xmit_skbs(struct send_queue *sq)
> u64_stats_update_end(&stats->tx_syncp);
> }
>
> +static int virtnet_poll_tx(struct napi_struct *napi, int budget)
> +{
> + struct send_queue *sq = container_of(napi, struct send_queue, napi);
> + struct virtnet_info *vi = sq->vq->vdev->priv;
> + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
> +
> + if (__netif_tx_trylock(txq)) {
> + free_old_xmit_skbs(sq);
> + __netif_tx_unlock(txq);
> + }
> +
> + virtqueue_napi_complete(napi, sq->vq, 0);
> +
> + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> + netif_tx_wake_queue(txq);
> +
> + return 0;
> +}
> +
> static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
> {
> struct virtio_net_hdr_mrg_rxbuf *hdr;
> @@ -1130,9 +1172,11 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
> int err;
> struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
> bool kick = !skb->xmit_more;
> + bool use_napi = sq->napi.weight;
>
> /* Free up any pending old buffers before queueing new ones. */
> - free_old_xmit_skbs(sq);
> + if (!use_napi)
> + free_old_xmit_skbs(sq);
I'm not sure this is best or even correct. Consider we clean xmit
packets speculatively in virtnet_poll_tx(), we need call
free_old_xmit_skbs() unconditionally. This can also help to reduce the
possible of napi rescheduling in virtnet_poll_tx().
Thanks
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-18 20:21 ` Willem de Bruijn
2017-04-20 6:12 ` Jason Wang
@ 2017-04-20 6:12 ` Jason Wang
2017-04-20 16:02 ` Willem de Bruijn
2017-04-20 16:02 ` Willem de Bruijn
2017-04-20 6:27 ` Jason Wang
2 siblings, 2 replies; 35+ messages in thread
From: Jason Wang @ 2017-04-20 6:12 UTC (permalink / raw)
To: Willem de Bruijn, netdev; +Cc: mst, virtualization, davem, Willem de Bruijn
On 2017年04月19日 04:21, Willem de Bruijn wrote:
> From: Willem de Bruijn <willemb@google.com>
>
> Convert virtio-net to a standard napi tx completion path. This enables
> better TCP pacing using TCP small queues and increases single stream
> throughput.
>
> The virtio-net driver currently cleans tx descriptors on transmission
> of new packets in ndo_start_xmit. Latency depends on new traffic, so
> is unbounded. To avoid deadlock when a socket reaches its snd limit,
> packets are orphaned on tranmission. This breaks socket backpressure,
> including TSQ.
>
> Napi increases the number of interrupts generated compared to the
> current model, which keeps interrupts disabled as long as the ring
> has enough free descriptors. Keep tx napi optional and disabled for
> now. Follow-on patches will reduce the interrupt cost.
>
> Signed-off-by: Willem de Bruijn <willemb@google.com>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
> drivers/net/virtio_net.c | 77 +++++++++++++++++++++++++++++++++++++++++-------
> 1 file changed, 67 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index b9c1df29892c..c173e85dc7b8 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -33,9 +33,10 @@
> static int napi_weight = NAPI_POLL_WEIGHT;
> module_param(napi_weight, int, 0444);
>
> -static bool csum = true, gso = true;
> +static bool csum = true, gso = true, napi_tx;
> module_param(csum, bool, 0444);
> module_param(gso, bool, 0444);
> +module_param(napi_tx, bool, 0644);
>
> /* FIXME: MTU in config. */
> #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
> @@ -86,6 +87,8 @@ struct send_queue {
>
> /* Name of the send queue: output.$index */
> char name[40];
> +
> + struct napi_struct napi;
> };
>
> /* Internal representation of a receive virtqueue */
> @@ -262,12 +265,16 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
> static void skb_xmit_done(struct virtqueue *vq)
> {
> struct virtnet_info *vi = vq->vdev->priv;
> + struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
>
> /* Suppress further interrupts. */
> virtqueue_disable_cb(vq);
>
> - /* We were probably waiting for more output buffers. */
> - netif_wake_subqueue(vi->dev, vq2txq(vq));
> + if (napi->weight)
> + virtqueue_napi_schedule(napi, vq);
> + else
> + /* We were probably waiting for more output buffers. */
> + netif_wake_subqueue(vi->dev, vq2txq(vq));
> }
>
> static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
> @@ -972,6 +979,21 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
> local_bh_enable();
> }
>
> +static void virtnet_napi_tx_enable(struct virtnet_info *vi,
> + struct virtqueue *vq,
> + struct napi_struct *napi)
> +{
> + if (!napi->weight)
> + return;
> +
> + if (!vi->affinity_hint_set) {
> + napi->weight = 0;
> + return;
> + }
> +
> + return virtnet_napi_enable(vq, napi);
> +}
> +
> static void refill_work(struct work_struct *work)
> {
> struct virtnet_info *vi =
> @@ -1046,6 +1068,7 @@ static int virtnet_open(struct net_device *dev)
> if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
> schedule_delayed_work(&vi->refill, 0);
> virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
> + virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
> }
>
> return 0;
> @@ -1081,6 +1104,25 @@ static void free_old_xmit_skbs(struct send_queue *sq)
> u64_stats_update_end(&stats->tx_syncp);
> }
>
> +static int virtnet_poll_tx(struct napi_struct *napi, int budget)
> +{
> + struct send_queue *sq = container_of(napi, struct send_queue, napi);
> + struct virtnet_info *vi = sq->vq->vdev->priv;
> + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
> +
> + if (__netif_tx_trylock(txq)) {
> + free_old_xmit_skbs(sq);
> + __netif_tx_unlock(txq);
> + }
> +
> + virtqueue_napi_complete(napi, sq->vq, 0);
> +
> + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> + netif_tx_wake_queue(txq);
> +
> + return 0;
> +}
> +
> static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
> {
> struct virtio_net_hdr_mrg_rxbuf *hdr;
> @@ -1130,9 +1172,11 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
> int err;
> struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
> bool kick = !skb->xmit_more;
> + bool use_napi = sq->napi.weight;
>
> /* Free up any pending old buffers before queueing new ones. */
> - free_old_xmit_skbs(sq);
> + if (!use_napi)
> + free_old_xmit_skbs(sq);
I'm not sure this is best or even correct. Consider we clean xmit
packets speculatively in virtnet_poll_tx(), we need call
free_old_xmit_skbs() unconditionally. This can also help to reduce the
possible of napi rescheduling in virtnet_poll_tx().
Thanks
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-20 6:12 ` Jason Wang
@ 2017-04-20 16:02 ` Willem de Bruijn
2017-04-21 18:10 ` Willem de Bruijn
2017-04-21 18:10 ` Willem de Bruijn
2017-04-20 16:02 ` Willem de Bruijn
1 sibling, 2 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-20 16:02 UTC (permalink / raw)
To: Jason Wang
Cc: Network Development, Michael S. Tsirkin, virtualization,
David Miller, Willem de Bruijn
>> static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
>> {
>> struct virtio_net_hdr_mrg_rxbuf *hdr;
>> @@ -1130,9 +1172,11 @@ static netdev_tx_t start_xmit(struct sk_buff *skb,
>> struct net_device *dev)
>> int err;
>> struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
>> bool kick = !skb->xmit_more;
>> + bool use_napi = sq->napi.weight;
>> /* Free up any pending old buffers before queueing new ones. */
>> - free_old_xmit_skbs(sq);
>> + if (!use_napi)
>> + free_old_xmit_skbs(sq);
>
>
> I'm not sure this is best or even correct. Consider we clean xmit packets
> speculatively in virtnet_poll_tx(), we need call free_old_xmit_skbs()
> unconditionally. This can also help to reduce the possible of napi
> rescheduling in virtnet_poll_tx().
Because of the use of trylock there. Absolutely, thanks! Perhaps I should
only use trylock in the opportunistic clean path from the rx softirq and
full locking in the tx softirq.
I previously observed that cleaning here would, counterintuitively,
reduce efficiency. It reverted the improvements of cleaning transmit
completions from the receive softirq. Going through my data, I did
not observe this regression anymore on the latest patchset.
Let me test again, with and without the new
virtqueue_enable_cb_delayed patch. Perhaps that made a
difference.
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-20 16:02 ` Willem de Bruijn
@ 2017-04-21 18:10 ` Willem de Bruijn
2017-04-21 18:10 ` Willem de Bruijn
1 sibling, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-21 18:10 UTC (permalink / raw)
To: Jason Wang
Cc: Network Development, Michael S. Tsirkin, virtualization,
David Miller, Willem de Bruijn
On Thu, Apr 20, 2017 at 12:02 PM, Willem de Bruijn
<willemdebruijn.kernel@gmail.com> wrote:
>>> static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
>>> {
>>> struct virtio_net_hdr_mrg_rxbuf *hdr;
>>> @@ -1130,9 +1172,11 @@ static netdev_tx_t start_xmit(struct sk_buff *skb,
>>> struct net_device *dev)
>>> int err;
>>> struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
>>> bool kick = !skb->xmit_more;
>>> + bool use_napi = sq->napi.weight;
>>> /* Free up any pending old buffers before queueing new ones. */
>>> - free_old_xmit_skbs(sq);
>>> + if (!use_napi)
>>> + free_old_xmit_skbs(sq);
>>
>>
>> I'm not sure this is best or even correct. Consider we clean xmit packets
>> speculatively in virtnet_poll_tx(), we need call free_old_xmit_skbs()
>> unconditionally. This can also help to reduce the possible of napi
>> rescheduling in virtnet_poll_tx().
>
> Because of the use of trylock there. Absolutely, thanks! Perhaps I should
> only use trylock in the opportunistic clean path from the rx softirq and
> full locking in the tx softirq.
>
> I previously observed that cleaning here would, counterintuitively,
> reduce efficiency. It reverted the improvements of cleaning transmit
> completions from the receive softirq. Going through my data, I did
> not observe this regression anymore on the latest patchset.
>
> Let me test again, with and without the new
> virtqueue_enable_cb_delayed patch. Perhaps that made a
> difference.
Neither cleaning in start_xmit nor converting the napi tx trylock to lock
shows a significant impact on loadtests, whether cpu affine or not.
I'll make both changes, as the first reduces patch size and code complexity
and the second is a more obviously correct codepath than than with trylock.
To be clear, the variant called from the rx napi handler will still
opportunistically use trylock.
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-20 16:02 ` Willem de Bruijn
2017-04-21 18:10 ` Willem de Bruijn
@ 2017-04-21 18:10 ` Willem de Bruijn
1 sibling, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-21 18:10 UTC (permalink / raw)
To: Jason Wang
Cc: Network Development, Willem de Bruijn, virtualization,
David Miller, Michael S. Tsirkin
On Thu, Apr 20, 2017 at 12:02 PM, Willem de Bruijn
<willemdebruijn.kernel@gmail.com> wrote:
>>> static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
>>> {
>>> struct virtio_net_hdr_mrg_rxbuf *hdr;
>>> @@ -1130,9 +1172,11 @@ static netdev_tx_t start_xmit(struct sk_buff *skb,
>>> struct net_device *dev)
>>> int err;
>>> struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
>>> bool kick = !skb->xmit_more;
>>> + bool use_napi = sq->napi.weight;
>>> /* Free up any pending old buffers before queueing new ones. */
>>> - free_old_xmit_skbs(sq);
>>> + if (!use_napi)
>>> + free_old_xmit_skbs(sq);
>>
>>
>> I'm not sure this is best or even correct. Consider we clean xmit packets
>> speculatively in virtnet_poll_tx(), we need call free_old_xmit_skbs()
>> unconditionally. This can also help to reduce the possible of napi
>> rescheduling in virtnet_poll_tx().
>
> Because of the use of trylock there. Absolutely, thanks! Perhaps I should
> only use trylock in the opportunistic clean path from the rx softirq and
> full locking in the tx softirq.
>
> I previously observed that cleaning here would, counterintuitively,
> reduce efficiency. It reverted the improvements of cleaning transmit
> completions from the receive softirq. Going through my data, I did
> not observe this regression anymore on the latest patchset.
>
> Let me test again, with and without the new
> virtqueue_enable_cb_delayed patch. Perhaps that made a
> difference.
Neither cleaning in start_xmit nor converting the napi tx trylock to lock
shows a significant impact on loadtests, whether cpu affine or not.
I'll make both changes, as the first reduces patch size and code complexity
and the second is a more obviously correct codepath than than with trylock.
To be clear, the variant called from the rx napi handler will still
opportunistically use trylock.
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-20 6:12 ` Jason Wang
2017-04-20 16:02 ` Willem de Bruijn
@ 2017-04-20 16:02 ` Willem de Bruijn
1 sibling, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-20 16:02 UTC (permalink / raw)
To: Jason Wang
Cc: Network Development, Willem de Bruijn, virtualization,
David Miller, Michael S. Tsirkin
>> static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
>> {
>> struct virtio_net_hdr_mrg_rxbuf *hdr;
>> @@ -1130,9 +1172,11 @@ static netdev_tx_t start_xmit(struct sk_buff *skb,
>> struct net_device *dev)
>> int err;
>> struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
>> bool kick = !skb->xmit_more;
>> + bool use_napi = sq->napi.weight;
>> /* Free up any pending old buffers before queueing new ones. */
>> - free_old_xmit_skbs(sq);
>> + if (!use_napi)
>> + free_old_xmit_skbs(sq);
>
>
> I'm not sure this is best or even correct. Consider we clean xmit packets
> speculatively in virtnet_poll_tx(), we need call free_old_xmit_skbs()
> unconditionally. This can also help to reduce the possible of napi
> rescheduling in virtnet_poll_tx().
Because of the use of trylock there. Absolutely, thanks! Perhaps I should
only use trylock in the opportunistic clean path from the rx softirq and
full locking in the tx softirq.
I previously observed that cleaning here would, counterintuitively,
reduce efficiency. It reverted the improvements of cleaning transmit
completions from the receive softirq. Going through my data, I did
not observe this regression anymore on the latest patchset.
Let me test again, with and without the new
virtqueue_enable_cb_delayed patch. Perhaps that made a
difference.
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-18 20:21 ` Willem de Bruijn
2017-04-20 6:12 ` Jason Wang
2017-04-20 6:12 ` Jason Wang
@ 2017-04-20 6:27 ` Jason Wang
2017-04-20 13:58 ` Willem de Bruijn
2017-04-20 13:58 ` Willem de Bruijn
2 siblings, 2 replies; 35+ messages in thread
From: Jason Wang @ 2017-04-20 6:27 UTC (permalink / raw)
To: Willem de Bruijn, netdev; +Cc: Willem de Bruijn, virtualization, davem, mst
On 2017年04月19日 04:21, Willem de Bruijn wrote:
> +static void virtnet_napi_tx_enable(struct virtnet_info *vi,
> + struct virtqueue *vq,
> + struct napi_struct *napi)
> +{
> + if (!napi->weight)
> + return;
> +
> + if (!vi->affinity_hint_set) {
> + napi->weight = 0;
> + return;
> + }
> +
> + return virtnet_napi_enable(vq, napi);
> +}
> +
> static void refill_work(struct work_struct *work)
Maybe I was wrong, but according to Michael's comment it looks like he
want check affinity_hint_set just for speculative tx polling on rx napi
instead of disabling it at all.
And I'm not convinced this is really needed, driver only provide
affinity hint instead of affinity, so it's not guaranteed that tx and rx
interrupt are in the same vcpus.
Thanks
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-20 6:27 ` Jason Wang
@ 2017-04-20 13:58 ` Willem de Bruijn
2017-04-20 13:58 ` Willem de Bruijn
1 sibling, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-20 13:58 UTC (permalink / raw)
To: Jason Wang
Cc: Network Development, Willem de Bruijn, virtualization,
David Miller, Michael S. Tsirkin
On Thu, Apr 20, 2017 at 2:27 AM, Jason Wang <jasowang@redhat.com> wrote:
>
>
> On 2017年04月19日 04:21, Willem de Bruijn wrote:
>>
>> +static void virtnet_napi_tx_enable(struct virtnet_info *vi,
>> + struct virtqueue *vq,
>> + struct napi_struct *napi)
>> +{
>> + if (!napi->weight)
>> + return;
>> +
>> + if (!vi->affinity_hint_set) {
>> + napi->weight = 0;
>> + return;
>> + }
>> +
>> + return virtnet_napi_enable(vq, napi);
>> +}
>> +
>> static void refill_work(struct work_struct *work)
>
>
> Maybe I was wrong, but according to Michael's comment it looks like he want
> check affinity_hint_set just for speculative tx polling on rx napi instead
> of disabling it at all.
>
> And I'm not convinced this is really needed, driver only provide affinity
> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
> are in the same vcpus.
You're right. I made the restriction broader than the request, to really err
on the side of caution for the initial merge of napi tx. And enabling
the optimization is always a win over keeping it off, even without irq
affinity.
The cycle cost is significant without affinity regardless of whether the
optimization is used. Though this is not limited to napi-tx, it is more
pronounced in that mode than without napi.
1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
upstream:
1,1,1: 28985 Mbps, 278 Gcyc
1,0,2: 30067 Mbps, 402 Gcyc
napi tx:
1,1,1: 34492 Mbps, 269 Gcyc
1,0,2: 36527 Mbps, 537 Gcyc (!)
1,0,1: 36269 Mbps, 394 Gcyc
1,0,0: 34674 Mbps, 402 Gcyc
This is a particularly strong example. It is also representative
of most RR tests. It is less pronounced in other streaming tests.
10x TCP_RR, for instance:
upstream:
1,1,1: 42267 Mbps, 301 Gcyc
1,0,2: 40663 Mbps, 445 Gcyc
napi tx:
1,1,1: 42420 Mbps, 303 Gcyc
1,0,2: 42267 Mbps, 431 Gcyc
These numbers were obtained with the virtqueue_enable_cb_delayed
optimization after xmit_skb, btw. It turns out that moving that before
increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
100x TCP_RR a bit.
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-20 6:27 ` Jason Wang
2017-04-20 13:58 ` Willem de Bruijn
@ 2017-04-20 13:58 ` Willem de Bruijn
2017-04-21 3:53 ` Jason Wang
1 sibling, 1 reply; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-20 13:58 UTC (permalink / raw)
To: Jason Wang
Cc: Network Development, Michael S. Tsirkin, virtualization,
David Miller, Willem de Bruijn
On Thu, Apr 20, 2017 at 2:27 AM, Jason Wang <jasowang@redhat.com> wrote:
>
>
> On 2017年04月19日 04:21, Willem de Bruijn wrote:
>>
>> +static void virtnet_napi_tx_enable(struct virtnet_info *vi,
>> + struct virtqueue *vq,
>> + struct napi_struct *napi)
>> +{
>> + if (!napi->weight)
>> + return;
>> +
>> + if (!vi->affinity_hint_set) {
>> + napi->weight = 0;
>> + return;
>> + }
>> +
>> + return virtnet_napi_enable(vq, napi);
>> +}
>> +
>> static void refill_work(struct work_struct *work)
>
>
> Maybe I was wrong, but according to Michael's comment it looks like he want
> check affinity_hint_set just for speculative tx polling on rx napi instead
> of disabling it at all.
>
> And I'm not convinced this is really needed, driver only provide affinity
> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
> are in the same vcpus.
You're right. I made the restriction broader than the request, to really err
on the side of caution for the initial merge of napi tx. And enabling
the optimization is always a win over keeping it off, even without irq
affinity.
The cycle cost is significant without affinity regardless of whether the
optimization is used. Though this is not limited to napi-tx, it is more
pronounced in that mode than without napi.
1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
upstream:
1,1,1: 28985 Mbps, 278 Gcyc
1,0,2: 30067 Mbps, 402 Gcyc
napi tx:
1,1,1: 34492 Mbps, 269 Gcyc
1,0,2: 36527 Mbps, 537 Gcyc (!)
1,0,1: 36269 Mbps, 394 Gcyc
1,0,0: 34674 Mbps, 402 Gcyc
This is a particularly strong example. It is also representative
of most RR tests. It is less pronounced in other streaming tests.
10x TCP_RR, for instance:
upstream:
1,1,1: 42267 Mbps, 301 Gcyc
1,0,2: 40663 Mbps, 445 Gcyc
napi tx:
1,1,1: 42420 Mbps, 303 Gcyc
1,0,2: 42267 Mbps, 431 Gcyc
These numbers were obtained with the virtqueue_enable_cb_delayed
optimization after xmit_skb, btw. It turns out that moving that before
increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
100x TCP_RR a bit.
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-20 13:58 ` Willem de Bruijn
@ 2017-04-21 3:53 ` Jason Wang
2017-04-21 14:50 ` Willem de Bruijn
2017-04-21 14:50 ` Willem de Bruijn
0 siblings, 2 replies; 35+ messages in thread
From: Jason Wang @ 2017-04-21 3:53 UTC (permalink / raw)
To: Willem de Bruijn
Cc: Network Development, Willem de Bruijn, virtualization,
David Miller, Michael S. Tsirkin
On 2017年04月20日 21:58, Willem de Bruijn wrote:
> On Thu, Apr 20, 2017 at 2:27 AM, Jason Wang <jasowang@redhat.com> wrote:
>>
>> On 2017年04月19日 04:21, Willem de Bruijn wrote:
>>> +static void virtnet_napi_tx_enable(struct virtnet_info *vi,
>>> + struct virtqueue *vq,
>>> + struct napi_struct *napi)
>>> +{
>>> + if (!napi->weight)
>>> + return;
>>> +
>>> + if (!vi->affinity_hint_set) {
>>> + napi->weight = 0;
>>> + return;
>>> + }
>>> +
>>> + return virtnet_napi_enable(vq, napi);
>>> +}
>>> +
>>> static void refill_work(struct work_struct *work)
>>
>> Maybe I was wrong, but according to Michael's comment it looks like he want
>> check affinity_hint_set just for speculative tx polling on rx napi instead
>> of disabling it at all.
>>
>> And I'm not convinced this is really needed, driver only provide affinity
>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
>> are in the same vcpus.
> You're right. I made the restriction broader than the request, to really err
> on the side of caution for the initial merge of napi tx. And enabling
> the optimization is always a win over keeping it off, even without irq
> affinity.
>
> The cycle cost is significant without affinity regardless of whether the
> optimization is used.
Yes, I noticed this in the past too.
> Though this is not limited to napi-tx, it is more
> pronounced in that mode than without napi.
>
> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>
> upstream:
>
> 1,1,1: 28985 Mbps, 278 Gcyc
> 1,0,2: 30067 Mbps, 402 Gcyc
>
> napi tx:
>
> 1,1,1: 34492 Mbps, 269 Gcyc
> 1,0,2: 36527 Mbps, 537 Gcyc (!)
> 1,0,1: 36269 Mbps, 394 Gcyc
> 1,0,0: 34674 Mbps, 402 Gcyc
>
> This is a particularly strong example. It is also representative
> of most RR tests. It is less pronounced in other streaming tests.
> 10x TCP_RR, for instance:
>
> upstream:
>
> 1,1,1: 42267 Mbps, 301 Gcyc
> 1,0,2: 40663 Mbps, 445 Gcyc
>
> napi tx:
>
> 1,1,1: 42420 Mbps, 303 Gcyc
> 1,0,2: 42267 Mbps, 431 Gcyc
>
> These numbers were obtained with the virtqueue_enable_cb_delayed
> optimization after xmit_skb, btw. It turns out that moving that before
> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
> 100x TCP_RR a bit.
I see, so I think we can leave the affinity hint optimization/check for
future investigation:
- to avoid endless optimization (e.g we may want to share a single
vector/napi for tx/rx queue pairs in the future) for this series.
- tx napi is disabled by default which means we can do optimization on top.
Thanks
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-21 3:53 ` Jason Wang
@ 2017-04-21 14:50 ` Willem de Bruijn
2017-04-24 16:40 ` Michael S. Tsirkin
2017-04-21 14:50 ` Willem de Bruijn
1 sibling, 1 reply; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-21 14:50 UTC (permalink / raw)
To: Jason Wang
Cc: Network Development, Michael S. Tsirkin, virtualization,
David Miller, Willem de Bruijn
>>> Maybe I was wrong, but according to Michael's comment it looks like he
>>> want
>>> check affinity_hint_set just for speculative tx polling on rx napi
>>> instead
>>> of disabling it at all.
>>>
>>> And I'm not convinced this is really needed, driver only provide affinity
>>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
>>> are in the same vcpus.
>>
>> You're right. I made the restriction broader than the request, to really
>> err
>> on the side of caution for the initial merge of napi tx. And enabling
>> the optimization is always a win over keeping it off, even without irq
>> affinity.
>>
>> The cycle cost is significant without affinity regardless of whether the
>> optimization is used.
>
>
> Yes, I noticed this in the past too.
>
>> Though this is not limited to napi-tx, it is more
>> pronounced in that mode than without napi.
>>
>> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>>
>> upstream:
>>
>> 1,1,1: 28985 Mbps, 278 Gcyc
>> 1,0,2: 30067 Mbps, 402 Gcyc
>>
>> napi tx:
>>
>> 1,1,1: 34492 Mbps, 269 Gcyc
>> 1,0,2: 36527 Mbps, 537 Gcyc (!)
>> 1,0,1: 36269 Mbps, 394 Gcyc
>> 1,0,0: 34674 Mbps, 402 Gcyc
>>
>> This is a particularly strong example. It is also representative
>> of most RR tests. It is less pronounced in other streaming tests.
>> 10x TCP_RR, for instance:
>>
>> upstream:
>>
>> 1,1,1: 42267 Mbps, 301 Gcyc
>> 1,0,2: 40663 Mbps, 445 Gcyc
>>
>> napi tx:
>>
>> 1,1,1: 42420 Mbps, 303 Gcyc
>> 1,0,2: 42267 Mbps, 431 Gcyc
>>
>> These numbers were obtained with the virtqueue_enable_cb_delayed
>> optimization after xmit_skb, btw. It turns out that moving that before
>> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
>> 100x TCP_RR a bit.
>
>
> I see, so I think we can leave the affinity hint optimization/check for
> future investigation:
>
> - to avoid endless optimization (e.g we may want to share a single
> vector/napi for tx/rx queue pairs in the future) for this series.
> - tx napi is disabled by default which means we can do optimization on top.
Okay. I'll drop the vi->affinity_hint_set from the patch set for now.
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-21 14:50 ` Willem de Bruijn
@ 2017-04-24 16:40 ` Michael S. Tsirkin
2017-04-24 17:05 ` Willem de Bruijn
` (2 more replies)
0 siblings, 3 replies; 35+ messages in thread
From: Michael S. Tsirkin @ 2017-04-24 16:40 UTC (permalink / raw)
To: Willem de Bruijn
Cc: Willem de Bruijn, Network Development, David Miller, virtualization
On Fri, Apr 21, 2017 at 10:50:12AM -0400, Willem de Bruijn wrote:
> >>> Maybe I was wrong, but according to Michael's comment it looks like he
> >>> want
> >>> check affinity_hint_set just for speculative tx polling on rx napi
> >>> instead
> >>> of disabling it at all.
> >>>
> >>> And I'm not convinced this is really needed, driver only provide affinity
> >>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
> >>> are in the same vcpus.
> >>
> >> You're right. I made the restriction broader than the request, to really
> >> err
> >> on the side of caution for the initial merge of napi tx. And enabling
> >> the optimization is always a win over keeping it off, even without irq
> >> affinity.
> >>
> >> The cycle cost is significant without affinity regardless of whether the
> >> optimization is used.
> >
> >
> > Yes, I noticed this in the past too.
> >
> >> Though this is not limited to napi-tx, it is more
> >> pronounced in that mode than without napi.
> >>
> >> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
> >>
> >> upstream:
> >>
> >> 1,1,1: 28985 Mbps, 278 Gcyc
> >> 1,0,2: 30067 Mbps, 402 Gcyc
> >>
> >> napi tx:
> >>
> >> 1,1,1: 34492 Mbps, 269 Gcyc
> >> 1,0,2: 36527 Mbps, 537 Gcyc (!)
> >> 1,0,1: 36269 Mbps, 394 Gcyc
> >> 1,0,0: 34674 Mbps, 402 Gcyc
> >>
> >> This is a particularly strong example. It is also representative
> >> of most RR tests. It is less pronounced in other streaming tests.
> >> 10x TCP_RR, for instance:
> >>
> >> upstream:
> >>
> >> 1,1,1: 42267 Mbps, 301 Gcyc
> >> 1,0,2: 40663 Mbps, 445 Gcyc
> >>
> >> napi tx:
> >>
> >> 1,1,1: 42420 Mbps, 303 Gcyc
> >> 1,0,2: 42267 Mbps, 431 Gcyc
> >>
> >> These numbers were obtained with the virtqueue_enable_cb_delayed
> >> optimization after xmit_skb, btw. It turns out that moving that before
> >> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
> >> 100x TCP_RR a bit.
> >
> >
> > I see, so I think we can leave the affinity hint optimization/check for
> > future investigation:
> >
> > - to avoid endless optimization (e.g we may want to share a single
> > vector/napi for tx/rx queue pairs in the future) for this series.
> > - tx napi is disabled by default which means we can do optimization on top.
>
> Okay. I'll drop the vi->affinity_hint_set from the patch set for now.
I kind of like it, let's be conservative. But I'd prefer a comment
near it explaining why it's there.
--
MST
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-24 16:40 ` Michael S. Tsirkin
@ 2017-04-24 17:05 ` Willem de Bruijn
2017-04-24 17:14 ` Michael S. Tsirkin
2017-04-24 17:05 ` Willem de Bruijn
2017-04-25 8:39 ` Jason Wang
2 siblings, 1 reply; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-24 17:05 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: Jason Wang, Network Development, virtualization, David Miller,
Willem de Bruijn
On Mon, Apr 24, 2017 at 12:40 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Fri, Apr 21, 2017 at 10:50:12AM -0400, Willem de Bruijn wrote:
>> >>> Maybe I was wrong, but according to Michael's comment it looks like he
>> >>> want
>> >>> check affinity_hint_set just for speculative tx polling on rx napi
>> >>> instead
>> >>> of disabling it at all.
>> >>>
>> >>> And I'm not convinced this is really needed, driver only provide affinity
>> >>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
>> >>> are in the same vcpus.
>> >>
>> >> You're right. I made the restriction broader than the request, to really
>> >> err
>> >> on the side of caution for the initial merge of napi tx. And enabling
>> >> the optimization is always a win over keeping it off, even without irq
>> >> affinity.
>> >>
>> >> The cycle cost is significant without affinity regardless of whether the
>> >> optimization is used.
>> >
>> >
>> > Yes, I noticed this in the past too.
>> >
>> >> Though this is not limited to napi-tx, it is more
>> >> pronounced in that mode than without napi.
>> >>
>> >> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>> >>
>> >> upstream:
>> >>
>> >> 1,1,1: 28985 Mbps, 278 Gcyc
>> >> 1,0,2: 30067 Mbps, 402 Gcyc
>> >>
>> >> napi tx:
>> >>
>> >> 1,1,1: 34492 Mbps, 269 Gcyc
>> >> 1,0,2: 36527 Mbps, 537 Gcyc (!)
>> >> 1,0,1: 36269 Mbps, 394 Gcyc
>> >> 1,0,0: 34674 Mbps, 402 Gcyc
>> >>
>> >> This is a particularly strong example. It is also representative
>> >> of most RR tests. It is less pronounced in other streaming tests.
>> >> 10x TCP_RR, for instance:
>> >>
>> >> upstream:
>> >>
>> >> 1,1,1: 42267 Mbps, 301 Gcyc
>> >> 1,0,2: 40663 Mbps, 445 Gcyc
>> >>
>> >> napi tx:
>> >>
>> >> 1,1,1: 42420 Mbps, 303 Gcyc
>> >> 1,0,2: 42267 Mbps, 431 Gcyc
>> >>
>> >> These numbers were obtained with the virtqueue_enable_cb_delayed
>> >> optimization after xmit_skb, btw. It turns out that moving that before
>> >> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
>> >> 100x TCP_RR a bit.
>> >
>> >
>> > I see, so I think we can leave the affinity hint optimization/check for
>> > future investigation:
>> >
>> > - to avoid endless optimization (e.g we may want to share a single
>> > vector/napi for tx/rx queue pairs in the future) for this series.
>> > - tx napi is disabled by default which means we can do optimization on top.
>>
>> Okay. I'll drop the vi->affinity_hint_set from the patch set for now.
>
> I kind of like it, let's be conservative. But I'd prefer a comment
> near it explaining why it's there.
I don't feel strongly. Was minutes away from sending a v3 with this
code reverted, but I'll reinstate it and add a comment. Other planned
changes based on Jason's feedback to v2:
v2 -> v3:
- convert __netif_tx_trylock to __netif_tx_lock on tx napi poll
ensure that the handler always cleans, to avoid deadlock
- unconditionally clean in start_xmit
avoid adding an unnecessary "if (use_napi)" branch
- remove virtqueue_disable_cb in patch 5/5
a noop in the common event_idx based loop
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-24 17:05 ` Willem de Bruijn
@ 2017-04-24 17:14 ` Michael S. Tsirkin
2017-04-24 17:51 ` Willem de Bruijn
2017-04-24 17:51 ` Willem de Bruijn
0 siblings, 2 replies; 35+ messages in thread
From: Michael S. Tsirkin @ 2017-04-24 17:14 UTC (permalink / raw)
To: Willem de Bruijn
Cc: Willem de Bruijn, Network Development, David Miller, virtualization
On Mon, Apr 24, 2017 at 01:05:45PM -0400, Willem de Bruijn wrote:
> On Mon, Apr 24, 2017 at 12:40 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > On Fri, Apr 21, 2017 at 10:50:12AM -0400, Willem de Bruijn wrote:
> >> >>> Maybe I was wrong, but according to Michael's comment it looks like he
> >> >>> want
> >> >>> check affinity_hint_set just for speculative tx polling on rx napi
> >> >>> instead
> >> >>> of disabling it at all.
> >> >>>
> >> >>> And I'm not convinced this is really needed, driver only provide affinity
> >> >>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
> >> >>> are in the same vcpus.
> >> >>
> >> >> You're right. I made the restriction broader than the request, to really
> >> >> err
> >> >> on the side of caution for the initial merge of napi tx. And enabling
> >> >> the optimization is always a win over keeping it off, even without irq
> >> >> affinity.
> >> >>
> >> >> The cycle cost is significant without affinity regardless of whether the
> >> >> optimization is used.
> >> >
> >> >
> >> > Yes, I noticed this in the past too.
> >> >
> >> >> Though this is not limited to napi-tx, it is more
> >> >> pronounced in that mode than without napi.
> >> >>
> >> >> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
> >> >>
> >> >> upstream:
> >> >>
> >> >> 1,1,1: 28985 Mbps, 278 Gcyc
> >> >> 1,0,2: 30067 Mbps, 402 Gcyc
> >> >>
> >> >> napi tx:
> >> >>
> >> >> 1,1,1: 34492 Mbps, 269 Gcyc
> >> >> 1,0,2: 36527 Mbps, 537 Gcyc (!)
> >> >> 1,0,1: 36269 Mbps, 394 Gcyc
> >> >> 1,0,0: 34674 Mbps, 402 Gcyc
> >> >>
> >> >> This is a particularly strong example. It is also representative
> >> >> of most RR tests. It is less pronounced in other streaming tests.
> >> >> 10x TCP_RR, for instance:
> >> >>
> >> >> upstream:
> >> >>
> >> >> 1,1,1: 42267 Mbps, 301 Gcyc
> >> >> 1,0,2: 40663 Mbps, 445 Gcyc
> >> >>
> >> >> napi tx:
> >> >>
> >> >> 1,1,1: 42420 Mbps, 303 Gcyc
> >> >> 1,0,2: 42267 Mbps, 431 Gcyc
> >> >>
> >> >> These numbers were obtained with the virtqueue_enable_cb_delayed
> >> >> optimization after xmit_skb, btw. It turns out that moving that before
> >> >> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
> >> >> 100x TCP_RR a bit.
> >> >
> >> >
> >> > I see, so I think we can leave the affinity hint optimization/check for
> >> > future investigation:
> >> >
> >> > - to avoid endless optimization (e.g we may want to share a single
> >> > vector/napi for tx/rx queue pairs in the future) for this series.
> >> > - tx napi is disabled by default which means we can do optimization on top.
> >>
> >> Okay. I'll drop the vi->affinity_hint_set from the patch set for now.
> >
> > I kind of like it, let's be conservative. But I'd prefer a comment
> > near it explaining why it's there.
>
> I don't feel strongly. Was minutes away from sending a v3 with this
> code reverted, but I'll reinstate it and add a comment. Other planned
> changes based on Jason's feedback to v2:
>
> v2 -> v3:
> - convert __netif_tx_trylock to __netif_tx_lock on tx napi poll
> ensure that the handler always cleans, to avoid deadlock
> - unconditionally clean in start_xmit
> avoid adding an unnecessary "if (use_napi)" branch
> - remove virtqueue_disable_cb in patch 5/5
> a noop in the common event_idx based loop
Makes sense, thanks!
--
MST
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-24 17:14 ` Michael S. Tsirkin
@ 2017-04-24 17:51 ` Willem de Bruijn
2017-04-24 17:51 ` Willem de Bruijn
1 sibling, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-24 17:51 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: Willem de Bruijn, Network Development, David Miller, virtualization
On Mon, Apr 24, 2017 at 1:14 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Mon, Apr 24, 2017 at 01:05:45PM -0400, Willem de Bruijn wrote:
>> On Mon, Apr 24, 2017 at 12:40 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>> > On Fri, Apr 21, 2017 at 10:50:12AM -0400, Willem de Bruijn wrote:
>> >> >>> Maybe I was wrong, but according to Michael's comment it looks like he
>> >> >>> want
>> >> >>> check affinity_hint_set just for speculative tx polling on rx napi
>> >> >>> instead
>> >> >>> of disabling it at all.
>> >> >>>
>> >> >>> And I'm not convinced this is really needed, driver only provide affinity
>> >> >>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
>> >> >>> are in the same vcpus.
>> >> >>
>> >> >> You're right. I made the restriction broader than the request, to really
>> >> >> err
>> >> >> on the side of caution for the initial merge of napi tx. And enabling
>> >> >> the optimization is always a win over keeping it off, even without irq
>> >> >> affinity.
>> >> >>
>> >> >> The cycle cost is significant without affinity regardless of whether the
>> >> >> optimization is used.
>> >> >
>> >> >
>> >> > Yes, I noticed this in the past too.
>> >> >
>> >> >> Though this is not limited to napi-tx, it is more
>> >> >> pronounced in that mode than without napi.
>> >> >>
>> >> >> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>> >> >>
>> >> >> upstream:
>> >> >>
>> >> >> 1,1,1: 28985 Mbps, 278 Gcyc
>> >> >> 1,0,2: 30067 Mbps, 402 Gcyc
>> >> >>
>> >> >> napi tx:
>> >> >>
>> >> >> 1,1,1: 34492 Mbps, 269 Gcyc
>> >> >> 1,0,2: 36527 Mbps, 537 Gcyc (!)
>> >> >> 1,0,1: 36269 Mbps, 394 Gcyc
>> >> >> 1,0,0: 34674 Mbps, 402 Gcyc
>> >> >>
>> >> >> This is a particularly strong example. It is also representative
>> >> >> of most RR tests. It is less pronounced in other streaming tests.
>> >> >> 10x TCP_RR, for instance:
>> >> >>
>> >> >> upstream:
>> >> >>
>> >> >> 1,1,1: 42267 Mbps, 301 Gcyc
>> >> >> 1,0,2: 40663 Mbps, 445 Gcyc
>> >> >>
>> >> >> napi tx:
>> >> >>
>> >> >> 1,1,1: 42420 Mbps, 303 Gcyc
>> >> >> 1,0,2: 42267 Mbps, 431 Gcyc
>> >> >>
>> >> >> These numbers were obtained with the virtqueue_enable_cb_delayed
>> >> >> optimization after xmit_skb, btw. It turns out that moving that before
>> >> >> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
>> >> >> 100x TCP_RR a bit.
>> >> >
>> >> >
>> >> > I see, so I think we can leave the affinity hint optimization/check for
>> >> > future investigation:
>> >> >
>> >> > - to avoid endless optimization (e.g we may want to share a single
>> >> > vector/napi for tx/rx queue pairs in the future) for this series.
>> >> > - tx napi is disabled by default which means we can do optimization on top.
>> >>
>> >> Okay. I'll drop the vi->affinity_hint_set from the patch set for now.
>> >
>> > I kind of like it, let's be conservative. But I'd prefer a comment
>> > near it explaining why it's there.
>>
>> I don't feel strongly. Was minutes away from sending a v3 with this
>> code reverted, but I'll reinstate it and add a comment. Other planned
>> changes based on Jason's feedback to v2:
>>
>> v2 -> v3:
>> - convert __netif_tx_trylock to __netif_tx_lock on tx napi poll
>> ensure that the handler always cleans, to avoid deadlock
>> - unconditionally clean in start_xmit
>> avoid adding an unnecessary "if (use_napi)" branch
>> - remove virtqueue_disable_cb in patch 5/5
>> a noop in the common event_idx based loop
>
> Makes sense, thanks!
Great. Sent that, thanks.
The actual diff to v2 is quite small:
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b107ae011632..003143835766 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -986,6 +986,9 @@ static void virtnet_napi_tx_enable(struct virtnet_info *vi,
if (!napi->weight)
return;
+ /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
+ * enable the feature if this is likely affine with the transmit path.
+ */
if (!vi->affinity_hint_set) {
napi->weight = 0;
return;
@@ -1131,10 +1134,9 @@ static int virtnet_poll_tx(struct napi_struct
*napi, int budget)
struct virtnet_info *vi = sq->vq->vdev->priv;
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
- if (__netif_tx_trylock(txq)) {
- free_old_xmit_skbs(sq);
- __netif_tx_unlock(txq);
- }
+ __netif_tx_lock(txq, raw_smp_processor_id());
+ free_old_xmit_skbs(sq);
+ __netif_tx_unlock(txq);
virtqueue_napi_complete(napi, sq->vq, 0);
@@ -1196,14 +1198,10 @@ static netdev_tx_t start_xmit(struct sk_buff
*skb, struct net_device *dev)
bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
- if (use_napi) {
- if (kick)
- virtqueue_enable_cb_delayed(sq->vq);
- else
- virtqueue_disable_cb(sq->vq);
- } else {
- free_old_xmit_skbs(sq);
- }
+ free_old_xmit_skbs(sq);
+
+ if (use_napi && kick)
+ virtqueue_enable_cb_delayed(sq->vq);
(gmail will munge the identation, sorry)
^ permalink raw reply related [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-24 17:14 ` Michael S. Tsirkin
2017-04-24 17:51 ` Willem de Bruijn
@ 2017-04-24 17:51 ` Willem de Bruijn
1 sibling, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-24 17:51 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: Jason Wang, Network Development, virtualization, David Miller,
Willem de Bruijn
On Mon, Apr 24, 2017 at 1:14 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Mon, Apr 24, 2017 at 01:05:45PM -0400, Willem de Bruijn wrote:
>> On Mon, Apr 24, 2017 at 12:40 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>> > On Fri, Apr 21, 2017 at 10:50:12AM -0400, Willem de Bruijn wrote:
>> >> >>> Maybe I was wrong, but according to Michael's comment it looks like he
>> >> >>> want
>> >> >>> check affinity_hint_set just for speculative tx polling on rx napi
>> >> >>> instead
>> >> >>> of disabling it at all.
>> >> >>>
>> >> >>> And I'm not convinced this is really needed, driver only provide affinity
>> >> >>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
>> >> >>> are in the same vcpus.
>> >> >>
>> >> >> You're right. I made the restriction broader than the request, to really
>> >> >> err
>> >> >> on the side of caution for the initial merge of napi tx. And enabling
>> >> >> the optimization is always a win over keeping it off, even without irq
>> >> >> affinity.
>> >> >>
>> >> >> The cycle cost is significant without affinity regardless of whether the
>> >> >> optimization is used.
>> >> >
>> >> >
>> >> > Yes, I noticed this in the past too.
>> >> >
>> >> >> Though this is not limited to napi-tx, it is more
>> >> >> pronounced in that mode than without napi.
>> >> >>
>> >> >> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>> >> >>
>> >> >> upstream:
>> >> >>
>> >> >> 1,1,1: 28985 Mbps, 278 Gcyc
>> >> >> 1,0,2: 30067 Mbps, 402 Gcyc
>> >> >>
>> >> >> napi tx:
>> >> >>
>> >> >> 1,1,1: 34492 Mbps, 269 Gcyc
>> >> >> 1,0,2: 36527 Mbps, 537 Gcyc (!)
>> >> >> 1,0,1: 36269 Mbps, 394 Gcyc
>> >> >> 1,0,0: 34674 Mbps, 402 Gcyc
>> >> >>
>> >> >> This is a particularly strong example. It is also representative
>> >> >> of most RR tests. It is less pronounced in other streaming tests.
>> >> >> 10x TCP_RR, for instance:
>> >> >>
>> >> >> upstream:
>> >> >>
>> >> >> 1,1,1: 42267 Mbps, 301 Gcyc
>> >> >> 1,0,2: 40663 Mbps, 445 Gcyc
>> >> >>
>> >> >> napi tx:
>> >> >>
>> >> >> 1,1,1: 42420 Mbps, 303 Gcyc
>> >> >> 1,0,2: 42267 Mbps, 431 Gcyc
>> >> >>
>> >> >> These numbers were obtained with the virtqueue_enable_cb_delayed
>> >> >> optimization after xmit_skb, btw. It turns out that moving that before
>> >> >> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
>> >> >> 100x TCP_RR a bit.
>> >> >
>> >> >
>> >> > I see, so I think we can leave the affinity hint optimization/check for
>> >> > future investigation:
>> >> >
>> >> > - to avoid endless optimization (e.g we may want to share a single
>> >> > vector/napi for tx/rx queue pairs in the future) for this series.
>> >> > - tx napi is disabled by default which means we can do optimization on top.
>> >>
>> >> Okay. I'll drop the vi->affinity_hint_set from the patch set for now.
>> >
>> > I kind of like it, let's be conservative. But I'd prefer a comment
>> > near it explaining why it's there.
>>
>> I don't feel strongly. Was minutes away from sending a v3 with this
>> code reverted, but I'll reinstate it and add a comment. Other planned
>> changes based on Jason's feedback to v2:
>>
>> v2 -> v3:
>> - convert __netif_tx_trylock to __netif_tx_lock on tx napi poll
>> ensure that the handler always cleans, to avoid deadlock
>> - unconditionally clean in start_xmit
>> avoid adding an unnecessary "if (use_napi)" branch
>> - remove virtqueue_disable_cb in patch 5/5
>> a noop in the common event_idx based loop
>
> Makes sense, thanks!
Great. Sent that, thanks.
The actual diff to v2 is quite small:
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b107ae011632..003143835766 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -986,6 +986,9 @@ static void virtnet_napi_tx_enable(struct virtnet_info *vi,
if (!napi->weight)
return;
+ /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
+ * enable the feature if this is likely affine with the transmit path.
+ */
if (!vi->affinity_hint_set) {
napi->weight = 0;
return;
@@ -1131,10 +1134,9 @@ static int virtnet_poll_tx(struct napi_struct
*napi, int budget)
struct virtnet_info *vi = sq->vq->vdev->priv;
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
- if (__netif_tx_trylock(txq)) {
- free_old_xmit_skbs(sq);
- __netif_tx_unlock(txq);
- }
+ __netif_tx_lock(txq, raw_smp_processor_id());
+ free_old_xmit_skbs(sq);
+ __netif_tx_unlock(txq);
virtqueue_napi_complete(napi, sq->vq, 0);
@@ -1196,14 +1198,10 @@ static netdev_tx_t start_xmit(struct sk_buff
*skb, struct net_device *dev)
bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
- if (use_napi) {
- if (kick)
- virtqueue_enable_cb_delayed(sq->vq);
- else
- virtqueue_disable_cb(sq->vq);
- } else {
- free_old_xmit_skbs(sq);
- }
+ free_old_xmit_skbs(sq);
+
+ if (use_napi && kick)
+ virtqueue_enable_cb_delayed(sq->vq);
(gmail will munge the identation, sorry)
^ permalink raw reply related [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-24 16:40 ` Michael S. Tsirkin
2017-04-24 17:05 ` Willem de Bruijn
@ 2017-04-24 17:05 ` Willem de Bruijn
2017-04-25 8:39 ` Jason Wang
2 siblings, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-24 17:05 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: Willem de Bruijn, Network Development, David Miller, virtualization
On Mon, Apr 24, 2017 at 12:40 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Fri, Apr 21, 2017 at 10:50:12AM -0400, Willem de Bruijn wrote:
>> >>> Maybe I was wrong, but according to Michael's comment it looks like he
>> >>> want
>> >>> check affinity_hint_set just for speculative tx polling on rx napi
>> >>> instead
>> >>> of disabling it at all.
>> >>>
>> >>> And I'm not convinced this is really needed, driver only provide affinity
>> >>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
>> >>> are in the same vcpus.
>> >>
>> >> You're right. I made the restriction broader than the request, to really
>> >> err
>> >> on the side of caution for the initial merge of napi tx. And enabling
>> >> the optimization is always a win over keeping it off, even without irq
>> >> affinity.
>> >>
>> >> The cycle cost is significant without affinity regardless of whether the
>> >> optimization is used.
>> >
>> >
>> > Yes, I noticed this in the past too.
>> >
>> >> Though this is not limited to napi-tx, it is more
>> >> pronounced in that mode than without napi.
>> >>
>> >> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>> >>
>> >> upstream:
>> >>
>> >> 1,1,1: 28985 Mbps, 278 Gcyc
>> >> 1,0,2: 30067 Mbps, 402 Gcyc
>> >>
>> >> napi tx:
>> >>
>> >> 1,1,1: 34492 Mbps, 269 Gcyc
>> >> 1,0,2: 36527 Mbps, 537 Gcyc (!)
>> >> 1,0,1: 36269 Mbps, 394 Gcyc
>> >> 1,0,0: 34674 Mbps, 402 Gcyc
>> >>
>> >> This is a particularly strong example. It is also representative
>> >> of most RR tests. It is less pronounced in other streaming tests.
>> >> 10x TCP_RR, for instance:
>> >>
>> >> upstream:
>> >>
>> >> 1,1,1: 42267 Mbps, 301 Gcyc
>> >> 1,0,2: 40663 Mbps, 445 Gcyc
>> >>
>> >> napi tx:
>> >>
>> >> 1,1,1: 42420 Mbps, 303 Gcyc
>> >> 1,0,2: 42267 Mbps, 431 Gcyc
>> >>
>> >> These numbers were obtained with the virtqueue_enable_cb_delayed
>> >> optimization after xmit_skb, btw. It turns out that moving that before
>> >> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
>> >> 100x TCP_RR a bit.
>> >
>> >
>> > I see, so I think we can leave the affinity hint optimization/check for
>> > future investigation:
>> >
>> > - to avoid endless optimization (e.g we may want to share a single
>> > vector/napi for tx/rx queue pairs in the future) for this series.
>> > - tx napi is disabled by default which means we can do optimization on top.
>>
>> Okay. I'll drop the vi->affinity_hint_set from the patch set for now.
>
> I kind of like it, let's be conservative. But I'd prefer a comment
> near it explaining why it's there.
I don't feel strongly. Was minutes away from sending a v3 with this
code reverted, but I'll reinstate it and add a comment. Other planned
changes based on Jason's feedback to v2:
v2 -> v3:
- convert __netif_tx_trylock to __netif_tx_lock on tx napi poll
ensure that the handler always cleans, to avoid deadlock
- unconditionally clean in start_xmit
avoid adding an unnecessary "if (use_napi)" branch
- remove virtqueue_disable_cb in patch 5/5
a noop in the common event_idx based loop
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-24 16:40 ` Michael S. Tsirkin
2017-04-24 17:05 ` Willem de Bruijn
2017-04-24 17:05 ` Willem de Bruijn
@ 2017-04-25 8:39 ` Jason Wang
2 siblings, 0 replies; 35+ messages in thread
From: Jason Wang @ 2017-04-25 8:39 UTC (permalink / raw)
To: Michael S. Tsirkin, Willem de Bruijn
Cc: Network Development, Willem de Bruijn, David Miller, virtualization
On 2017年04月25日 00:40, Michael S. Tsirkin wrote:
> On Fri, Apr 21, 2017 at 10:50:12AM -0400, Willem de Bruijn wrote:
>>>>> Maybe I was wrong, but according to Michael's comment it looks like he
>>>>> want
>>>>> check affinity_hint_set just for speculative tx polling on rx napi
>>>>> instead
>>>>> of disabling it at all.
>>>>>
>>>>> And I'm not convinced this is really needed, driver only provide affinity
>>>>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
>>>>> are in the same vcpus.
>>>> You're right. I made the restriction broader than the request, to really
>>>> err
>>>> on the side of caution for the initial merge of napi tx. And enabling
>>>> the optimization is always a win over keeping it off, even without irq
>>>> affinity.
>>>>
>>>> The cycle cost is significant without affinity regardless of whether the
>>>> optimization is used.
>>>
>>> Yes, I noticed this in the past too.
>>>
>>>> Though this is not limited to napi-tx, it is more
>>>> pronounced in that mode than without napi.
>>>>
>>>> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>>>>
>>>> upstream:
>>>>
>>>> 1,1,1: 28985 Mbps, 278 Gcyc
>>>> 1,0,2: 30067 Mbps, 402 Gcyc
>>>>
>>>> napi tx:
>>>>
>>>> 1,1,1: 34492 Mbps, 269 Gcyc
>>>> 1,0,2: 36527 Mbps, 537 Gcyc (!)
>>>> 1,0,1: 36269 Mbps, 394 Gcyc
>>>> 1,0,0: 34674 Mbps, 402 Gcyc
>>>>
>>>> This is a particularly strong example. It is also representative
>>>> of most RR tests. It is less pronounced in other streaming tests.
>>>> 10x TCP_RR, for instance:
>>>>
>>>> upstream:
>>>>
>>>> 1,1,1: 42267 Mbps, 301 Gcyc
>>>> 1,0,2: 40663 Mbps, 445 Gcyc
>>>>
>>>> napi tx:
>>>>
>>>> 1,1,1: 42420 Mbps, 303 Gcyc
>>>> 1,0,2: 42267 Mbps, 431 Gcyc
>>>>
>>>> These numbers were obtained with the virtqueue_enable_cb_delayed
>>>> optimization after xmit_skb, btw. It turns out that moving that before
>>>> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
>>>> 100x TCP_RR a bit.
>>>
>>> I see, so I think we can leave the affinity hint optimization/check for
>>> future investigation:
>>>
>>> - to avoid endless optimization (e.g we may want to share a single
>>> vector/napi for tx/rx queue pairs in the future) for this series.
>>> - tx napi is disabled by default which means we can do optimization on top.
>> Okay. I'll drop the vi->affinity_hint_set from the patch set for now.
> I kind of like it, let's be conservative. But I'd prefer a comment
> near it explaining why it's there.
>
Another issue for affinity_hint_set is that it could be changed when
setting channels. I think we've already conservative enough (e.g it was
disabled by default).
Thanks
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 2/5] virtio-net: transmit napi
2017-04-21 3:53 ` Jason Wang
2017-04-21 14:50 ` Willem de Bruijn
@ 2017-04-21 14:50 ` Willem de Bruijn
1 sibling, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-21 14:50 UTC (permalink / raw)
To: Jason Wang
Cc: Network Development, Willem de Bruijn, virtualization,
David Miller, Michael S. Tsirkin
>>> Maybe I was wrong, but according to Michael's comment it looks like he
>>> want
>>> check affinity_hint_set just for speculative tx polling on rx napi
>>> instead
>>> of disabling it at all.
>>>
>>> And I'm not convinced this is really needed, driver only provide affinity
>>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
>>> are in the same vcpus.
>>
>> You're right. I made the restriction broader than the request, to really
>> err
>> on the side of caution for the initial merge of napi tx. And enabling
>> the optimization is always a win over keeping it off, even without irq
>> affinity.
>>
>> The cycle cost is significant without affinity regardless of whether the
>> optimization is used.
>
>
> Yes, I noticed this in the past too.
>
>> Though this is not limited to napi-tx, it is more
>> pronounced in that mode than without napi.
>>
>> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>>
>> upstream:
>>
>> 1,1,1: 28985 Mbps, 278 Gcyc
>> 1,0,2: 30067 Mbps, 402 Gcyc
>>
>> napi tx:
>>
>> 1,1,1: 34492 Mbps, 269 Gcyc
>> 1,0,2: 36527 Mbps, 537 Gcyc (!)
>> 1,0,1: 36269 Mbps, 394 Gcyc
>> 1,0,0: 34674 Mbps, 402 Gcyc
>>
>> This is a particularly strong example. It is also representative
>> of most RR tests. It is less pronounced in other streaming tests.
>> 10x TCP_RR, for instance:
>>
>> upstream:
>>
>> 1,1,1: 42267 Mbps, 301 Gcyc
>> 1,0,2: 40663 Mbps, 445 Gcyc
>>
>> napi tx:
>>
>> 1,1,1: 42420 Mbps, 303 Gcyc
>> 1,0,2: 42267 Mbps, 431 Gcyc
>>
>> These numbers were obtained with the virtqueue_enable_cb_delayed
>> optimization after xmit_skb, btw. It turns out that moving that before
>> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
>> 100x TCP_RR a bit.
>
>
> I see, so I think we can leave the affinity hint optimization/check for
> future investigation:
>
> - to avoid endless optimization (e.g we may want to share a single
> vector/napi for tx/rx queue pairs in the future) for this series.
> - tx napi is disabled by default which means we can do optimization on top.
Okay. I'll drop the vi->affinity_hint_set from the patch set for now.
^ permalink raw reply [flat|nested] 35+ messages in thread
* [PATCH net-next v2 3/5] virtio-net: move free_old_xmit_skbs
2017-04-18 20:21 [PATCH net-next v2 0/5] virtio-net tx napi Willem de Bruijn
` (3 preceding siblings ...)
2017-04-18 20:21 ` Willem de Bruijn
@ 2017-04-18 20:21 ` Willem de Bruijn
2017-04-18 20:21 ` Willem de Bruijn
` (4 subsequent siblings)
9 siblings, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-18 20:21 UTC (permalink / raw)
To: netdev; +Cc: Willem de Bruijn, virtualization, davem, mst
From: Willem de Bruijn <willemb@google.com>
An upcoming patch will call free_old_xmit_skbs indirectly from
virtnet_poll. Move the function above this to avoid having to
introduce a forward declaration.
This is a pure move: no code changes.
Signed-off-by: Willem de Bruijn <willemb@google.com>
---
drivers/net/virtio_net.c | 60 ++++++++++++++++++++++++------------------------
1 file changed, 30 insertions(+), 30 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c173e85dc7b8..81a98de864e6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1042,6 +1042,36 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
return received;
}
+static void free_old_xmit_skbs(struct send_queue *sq)
+{
+ struct sk_buff *skb;
+ unsigned int len;
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
+ unsigned int packets = 0;
+ unsigned int bytes = 0;
+
+ while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ pr_debug("Sent skb %p\n", skb);
+
+ bytes += skb->len;
+ packets++;
+
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Avoid overhead when no packets have been processed
+ * happens when called speculatively from start_xmit.
+ */
+ if (!packets)
+ return;
+
+ u64_stats_update_begin(&stats->tx_syncp);
+ stats->tx_bytes += bytes;
+ stats->tx_packets += packets;
+ u64_stats_update_end(&stats->tx_syncp);
+}
+
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
@@ -1074,36 +1104,6 @@ static int virtnet_open(struct net_device *dev)
return 0;
}
-static void free_old_xmit_skbs(struct send_queue *sq)
-{
- struct sk_buff *skb;
- unsigned int len;
- struct virtnet_info *vi = sq->vq->vdev->priv;
- struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
- unsigned int packets = 0;
- unsigned int bytes = 0;
-
- while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- pr_debug("Sent skb %p\n", skb);
-
- bytes += skb->len;
- packets++;
-
- dev_kfree_skb_any(skb);
- }
-
- /* Avoid overhead when no packets have been processed
- * happens when called speculatively from start_xmit.
- */
- if (!packets)
- return;
-
- u64_stats_update_begin(&stats->tx_syncp);
- stats->tx_bytes += bytes;
- stats->tx_packets += packets;
- u64_stats_update_end(&stats->tx_syncp);
-}
-
static int virtnet_poll_tx(struct napi_struct *napi, int budget)
{
struct send_queue *sq = container_of(napi, struct send_queue, napi);
--
2.12.2.816.g2cccc81164-goog
^ permalink raw reply related [flat|nested] 35+ messages in thread
* [PATCH net-next v2 3/5] virtio-net: move free_old_xmit_skbs
2017-04-18 20:21 [PATCH net-next v2 0/5] virtio-net tx napi Willem de Bruijn
` (4 preceding siblings ...)
2017-04-18 20:21 ` [PATCH net-next v2 3/5] virtio-net: move free_old_xmit_skbs Willem de Bruijn
@ 2017-04-18 20:21 ` Willem de Bruijn
2017-04-18 20:21 ` [PATCH net-next v2 4/5] virtio-net: clean tx descriptors from rx napi Willem de Bruijn
` (3 subsequent siblings)
9 siblings, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-18 20:21 UTC (permalink / raw)
To: netdev; +Cc: mst, jasowang, virtualization, davem, Willem de Bruijn
From: Willem de Bruijn <willemb@google.com>
An upcoming patch will call free_old_xmit_skbs indirectly from
virtnet_poll. Move the function above this to avoid having to
introduce a forward declaration.
This is a pure move: no code changes.
Signed-off-by: Willem de Bruijn <willemb@google.com>
---
drivers/net/virtio_net.c | 60 ++++++++++++++++++++++++------------------------
1 file changed, 30 insertions(+), 30 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c173e85dc7b8..81a98de864e6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1042,6 +1042,36 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
return received;
}
+static void free_old_xmit_skbs(struct send_queue *sq)
+{
+ struct sk_buff *skb;
+ unsigned int len;
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
+ unsigned int packets = 0;
+ unsigned int bytes = 0;
+
+ while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ pr_debug("Sent skb %p\n", skb);
+
+ bytes += skb->len;
+ packets++;
+
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Avoid overhead when no packets have been processed
+ * happens when called speculatively from start_xmit.
+ */
+ if (!packets)
+ return;
+
+ u64_stats_update_begin(&stats->tx_syncp);
+ stats->tx_bytes += bytes;
+ stats->tx_packets += packets;
+ u64_stats_update_end(&stats->tx_syncp);
+}
+
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
@@ -1074,36 +1104,6 @@ static int virtnet_open(struct net_device *dev)
return 0;
}
-static void free_old_xmit_skbs(struct send_queue *sq)
-{
- struct sk_buff *skb;
- unsigned int len;
- struct virtnet_info *vi = sq->vq->vdev->priv;
- struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
- unsigned int packets = 0;
- unsigned int bytes = 0;
-
- while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- pr_debug("Sent skb %p\n", skb);
-
- bytes += skb->len;
- packets++;
-
- dev_kfree_skb_any(skb);
- }
-
- /* Avoid overhead when no packets have been processed
- * happens when called speculatively from start_xmit.
- */
- if (!packets)
- return;
-
- u64_stats_update_begin(&stats->tx_syncp);
- stats->tx_bytes += bytes;
- stats->tx_packets += packets;
- u64_stats_update_end(&stats->tx_syncp);
-}
-
static int virtnet_poll_tx(struct napi_struct *napi, int budget)
{
struct send_queue *sq = container_of(napi, struct send_queue, napi);
--
2.12.2.816.g2cccc81164-goog
^ permalink raw reply related [flat|nested] 35+ messages in thread
* [PATCH net-next v2 4/5] virtio-net: clean tx descriptors from rx napi
2017-04-18 20:21 [PATCH net-next v2 0/5] virtio-net tx napi Willem de Bruijn
` (5 preceding siblings ...)
2017-04-18 20:21 ` Willem de Bruijn
@ 2017-04-18 20:21 ` Willem de Bruijn
2017-04-18 20:21 ` Willem de Bruijn
` (2 subsequent siblings)
9 siblings, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-18 20:21 UTC (permalink / raw)
To: netdev; +Cc: mst, jasowang, virtualization, davem, Willem de Bruijn
From: Willem de Bruijn <willemb@google.com>
Amortize the cost of virtual interrupts by doing both rx and tx work
on reception of a receive interrupt if tx napi is enabled. With
VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
interrupts for bidirectional workloads.
Signed-off-by: Willem de Bruijn <willemb@google.com>
---
drivers/net/virtio_net.c | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 81a98de864e6..b14c82ce0032 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1072,12 +1072,33 @@ static void free_old_xmit_skbs(struct send_queue *sq)
u64_stats_update_end(&stats->tx_syncp);
}
+static void virtnet_poll_cleantx(struct receive_queue *rq)
+{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ unsigned int index = vq2rxq(rq->vq);
+ struct send_queue *sq = &vi->sq[index];
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
+
+ if (!sq->napi.weight)
+ return;
+
+ if (__netif_tx_trylock(txq)) {
+ free_old_xmit_skbs(sq);
+ __netif_tx_unlock(txq);
+ }
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_tx_wake_queue(txq);
+}
+
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
unsigned int received;
+ virtnet_poll_cleantx(rq);
+
received = virtnet_receive(rq, budget);
/* Out of packets? */
--
2.12.2.816.g2cccc81164-goog
^ permalink raw reply related [flat|nested] 35+ messages in thread
* [PATCH net-next v2 4/5] virtio-net: clean tx descriptors from rx napi
2017-04-18 20:21 [PATCH net-next v2 0/5] virtio-net tx napi Willem de Bruijn
` (6 preceding siblings ...)
2017-04-18 20:21 ` [PATCH net-next v2 4/5] virtio-net: clean tx descriptors from rx napi Willem de Bruijn
@ 2017-04-18 20:21 ` Willem de Bruijn
2017-04-18 20:21 ` [PATCH net-next v2 5/5] virtio-net: keep tx interrupts disabled unless kick Willem de Bruijn
2017-04-18 20:21 ` Willem de Bruijn
9 siblings, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-18 20:21 UTC (permalink / raw)
To: netdev; +Cc: Willem de Bruijn, virtualization, davem, mst
From: Willem de Bruijn <willemb@google.com>
Amortize the cost of virtual interrupts by doing both rx and tx work
on reception of a receive interrupt if tx napi is enabled. With
VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
interrupts for bidirectional workloads.
Signed-off-by: Willem de Bruijn <willemb@google.com>
---
drivers/net/virtio_net.c | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 81a98de864e6..b14c82ce0032 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1072,12 +1072,33 @@ static void free_old_xmit_skbs(struct send_queue *sq)
u64_stats_update_end(&stats->tx_syncp);
}
+static void virtnet_poll_cleantx(struct receive_queue *rq)
+{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ unsigned int index = vq2rxq(rq->vq);
+ struct send_queue *sq = &vi->sq[index];
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
+
+ if (!sq->napi.weight)
+ return;
+
+ if (__netif_tx_trylock(txq)) {
+ free_old_xmit_skbs(sq);
+ __netif_tx_unlock(txq);
+ }
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_tx_wake_queue(txq);
+}
+
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
unsigned int received;
+ virtnet_poll_cleantx(rq);
+
received = virtnet_receive(rq, budget);
/* Out of packets? */
--
2.12.2.816.g2cccc81164-goog
^ permalink raw reply related [flat|nested] 35+ messages in thread
* [PATCH net-next v2 5/5] virtio-net: keep tx interrupts disabled unless kick
2017-04-18 20:21 [PATCH net-next v2 0/5] virtio-net tx napi Willem de Bruijn
` (7 preceding siblings ...)
2017-04-18 20:21 ` Willem de Bruijn
@ 2017-04-18 20:21 ` Willem de Bruijn
2017-04-18 20:21 ` Willem de Bruijn
9 siblings, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-18 20:21 UTC (permalink / raw)
To: netdev; +Cc: Willem de Bruijn, virtualization, davem, mst
From: Willem de Bruijn <willemb@google.com>
Tx napi mode increases the rate of transmit interrupts. Suppress some
by masking interrupts while more packets are expected. The interrupts
will be reenabled before the last packet is sent.
This optimization reduces the througput drop with tx napi for
unidirectional flows such as UDP_STREAM that do not benefit from
cleaning tx completions in the the receive napi handler.
Signed-off-by: Willem de Bruijn <willemb@google.com>
---
drivers/net/virtio_net.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b14c82ce0032..b107ae011632 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1196,8 +1196,14 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
- if (!use_napi)
+ if (use_napi) {
+ if (kick)
+ virtqueue_enable_cb_delayed(sq->vq);
+ else
+ virtqueue_disable_cb(sq->vq);
+ } else {
free_old_xmit_skbs(sq);
+ }
/* timestamp packet in software */
skb_tx_timestamp(skb);
--
2.12.2.816.g2cccc81164-goog
^ permalink raw reply related [flat|nested] 35+ messages in thread
* [PATCH net-next v2 5/5] virtio-net: keep tx interrupts disabled unless kick
2017-04-18 20:21 [PATCH net-next v2 0/5] virtio-net tx napi Willem de Bruijn
` (8 preceding siblings ...)
2017-04-18 20:21 ` [PATCH net-next v2 5/5] virtio-net: keep tx interrupts disabled unless kick Willem de Bruijn
@ 2017-04-18 20:21 ` Willem de Bruijn
2017-04-20 6:17 ` Jason Wang
9 siblings, 1 reply; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-18 20:21 UTC (permalink / raw)
To: netdev; +Cc: mst, jasowang, virtualization, davem, Willem de Bruijn
From: Willem de Bruijn <willemb@google.com>
Tx napi mode increases the rate of transmit interrupts. Suppress some
by masking interrupts while more packets are expected. The interrupts
will be reenabled before the last packet is sent.
This optimization reduces the througput drop with tx napi for
unidirectional flows such as UDP_STREAM that do not benefit from
cleaning tx completions in the the receive napi handler.
Signed-off-by: Willem de Bruijn <willemb@google.com>
---
drivers/net/virtio_net.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b14c82ce0032..b107ae011632 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1196,8 +1196,14 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
- if (!use_napi)
+ if (use_napi) {
+ if (kick)
+ virtqueue_enable_cb_delayed(sq->vq);
+ else
+ virtqueue_disable_cb(sq->vq);
+ } else {
free_old_xmit_skbs(sq);
+ }
/* timestamp packet in software */
skb_tx_timestamp(skb);
--
2.12.2.816.g2cccc81164-goog
^ permalink raw reply related [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 5/5] virtio-net: keep tx interrupts disabled unless kick
2017-04-18 20:21 ` Willem de Bruijn
@ 2017-04-20 6:17 ` Jason Wang
2017-04-20 14:03 ` Willem de Bruijn
2017-04-20 14:03 ` Willem de Bruijn
0 siblings, 2 replies; 35+ messages in thread
From: Jason Wang @ 2017-04-20 6:17 UTC (permalink / raw)
To: Willem de Bruijn, netdev; +Cc: Willem de Bruijn, virtualization, davem, mst
On 2017年04月19日 04:21, Willem de Bruijn wrote:
> From: Willem de Bruijn <willemb@google.com>
>
> Tx napi mode increases the rate of transmit interrupts. Suppress some
> by masking interrupts while more packets are expected. The interrupts
> will be reenabled before the last packet is sent.
>
> This optimization reduces the througput drop with tx napi for
> unidirectional flows such as UDP_STREAM that do not benefit from
> cleaning tx completions in the the receive napi handler.
>
> Signed-off-by: Willem de Bruijn <willemb@google.com>
> ---
> drivers/net/virtio_net.c | 8 +++++++-
> 1 file changed, 7 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index b14c82ce0032..b107ae011632 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -1196,8 +1196,14 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
> bool use_napi = sq->napi.weight;
>
> /* Free up any pending old buffers before queueing new ones. */
> - if (!use_napi)
> + if (use_napi) {
> + if (kick)
> + virtqueue_enable_cb_delayed(sq->vq);
> + else
> + virtqueue_disable_cb(sq->vq);
Since virtqueue_disable_cb() do nothing for event idx. I wonder whether
or not just calling enable_cb_dealyed() is ok here.
Btw, it does not disable interrupt at all, I propose a patch in the past
which can do more than this:
https://patchwork.kernel.org/patch/6472601/
Thanks
> + } else {
> free_old_xmit_skbs(sq);
> + }
>
> /* timestamp packet in software */
> skb_tx_timestamp(skb);
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 5/5] virtio-net: keep tx interrupts disabled unless kick
2017-04-20 6:17 ` Jason Wang
@ 2017-04-20 14:03 ` Willem de Bruijn
2017-04-21 23:13 ` Willem de Bruijn
2017-04-21 23:13 ` Willem de Bruijn
2017-04-20 14:03 ` Willem de Bruijn
1 sibling, 2 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-20 14:03 UTC (permalink / raw)
To: Jason Wang
Cc: Network Development, Michael S. Tsirkin, virtualization,
David Miller, Willem de Bruijn
>> - if (!use_napi)
>> + if (use_napi) {
>> + if (kick)
>> + virtqueue_enable_cb_delayed(sq->vq);
>> + else
>> + virtqueue_disable_cb(sq->vq);
>
>
> Since virtqueue_disable_cb() do nothing for event idx. I wonder whether or
> not just calling enable_cb_dealyed() is ok here.
Good point.
> Btw, it does not disable interrupt at all, I propose a patch in the past
> which can do more than this:
>
> https://patchwork.kernel.org/patch/6472601/
Interesting. Yes, let me evaluate that variant.
Thanks for reviewing,
Willem
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 5/5] virtio-net: keep tx interrupts disabled unless kick
2017-04-20 14:03 ` Willem de Bruijn
@ 2017-04-21 23:13 ` Willem de Bruijn
2017-04-21 23:13 ` Willem de Bruijn
1 sibling, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-21 23:13 UTC (permalink / raw)
To: Jason Wang
Cc: Network Development, Willem de Bruijn, virtualization,
David Miller, Michael S. Tsirkin
On Thu, Apr 20, 2017 at 10:03 AM, Willem de Bruijn
<willemdebruijn.kernel@gmail.com> wrote:
>>> - if (!use_napi)
>>> + if (use_napi) {
>>> + if (kick)
>>> + virtqueue_enable_cb_delayed(sq->vq);
>>> + else
>>> + virtqueue_disable_cb(sq->vq);
>>
>>
>> Since virtqueue_disable_cb() do nothing for event idx. I wonder whether or
>> not just calling enable_cb_dealyed() is ok here.
>
> Good point.
>
>> Btw, it does not disable interrupt at all, I propose a patch in the past
>> which can do more than this:
>>
>> https://patchwork.kernel.org/patch/6472601/
>
> Interesting. Yes, let me evaluate that variant.
In initial tests I don't see a significant change, but we can look
into this more closely as a follow-on patch.
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 5/5] virtio-net: keep tx interrupts disabled unless kick
2017-04-20 14:03 ` Willem de Bruijn
2017-04-21 23:13 ` Willem de Bruijn
@ 2017-04-21 23:13 ` Willem de Bruijn
1 sibling, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-21 23:13 UTC (permalink / raw)
To: Jason Wang
Cc: Network Development, Michael S. Tsirkin, virtualization,
David Miller, Willem de Bruijn
On Thu, Apr 20, 2017 at 10:03 AM, Willem de Bruijn
<willemdebruijn.kernel@gmail.com> wrote:
>>> - if (!use_napi)
>>> + if (use_napi) {
>>> + if (kick)
>>> + virtqueue_enable_cb_delayed(sq->vq);
>>> + else
>>> + virtqueue_disable_cb(sq->vq);
>>
>>
>> Since virtqueue_disable_cb() do nothing for event idx. I wonder whether or
>> not just calling enable_cb_dealyed() is ok here.
>
> Good point.
>
>> Btw, it does not disable interrupt at all, I propose a patch in the past
>> which can do more than this:
>>
>> https://patchwork.kernel.org/patch/6472601/
>
> Interesting. Yes, let me evaluate that variant.
In initial tests I don't see a significant change, but we can look
into this more closely as a follow-on patch.
^ permalink raw reply [flat|nested] 35+ messages in thread
* Re: [PATCH net-next v2 5/5] virtio-net: keep tx interrupts disabled unless kick
2017-04-20 6:17 ` Jason Wang
2017-04-20 14:03 ` Willem de Bruijn
@ 2017-04-20 14:03 ` Willem de Bruijn
1 sibling, 0 replies; 35+ messages in thread
From: Willem de Bruijn @ 2017-04-20 14:03 UTC (permalink / raw)
To: Jason Wang
Cc: Network Development, Willem de Bruijn, virtualization,
David Miller, Michael S. Tsirkin
>> - if (!use_napi)
>> + if (use_napi) {
>> + if (kick)
>> + virtqueue_enable_cb_delayed(sq->vq);
>> + else
>> + virtqueue_disable_cb(sq->vq);
>
>
> Since virtqueue_disable_cb() do nothing for event idx. I wonder whether or
> not just calling enable_cb_dealyed() is ok here.
Good point.
> Btw, it does not disable interrupt at all, I propose a patch in the past
> which can do more than this:
>
> https://patchwork.kernel.org/patch/6472601/
Interesting. Yes, let me evaluate that variant.
Thanks for reviewing,
Willem
^ permalink raw reply [flat|nested] 35+ messages in thread