* [PATCH net-next 1/3] virtio-net: napi helper functions
2017-04-02 20:10 [PATCH net-next 0/3] virtio-net tx napi Willem de Bruijn
@ 2017-04-02 20:10 ` Willem de Bruijn
2017-04-02 20:10 ` Willem de Bruijn
` (4 subsequent siblings)
5 siblings, 0 replies; 22+ messages in thread
From: Willem de Bruijn @ 2017-04-02 20:10 UTC (permalink / raw)
To: netdev; +Cc: mst, jasowang, virtualization, davem, Willem de Bruijn
From: Willem de Bruijn <willemb@google.com>
Prepare virtio-net for tx napi by converting existing napi code to
use helper functions. This also deduplicates some logic.
Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/net/virtio_net.c | 65 ++++++++++++++++++++++++++----------------------
1 file changed, 35 insertions(+), 30 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b0d241d110ec..6aac0ad0d9b2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -239,6 +239,26 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
return p;
}
+static void virtqueue_napi_schedule(struct napi_struct *napi,
+ struct virtqueue *vq)
+{
+ if (napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(vq);
+ __napi_schedule(napi);
+ }
+}
+
+static void virtqueue_napi_complete(struct napi_struct *napi,
+ struct virtqueue *vq, int processed)
+{
+ int opaque;
+
+ opaque = virtqueue_enable_cb_prepare(vq);
+ if (napi_complete_done(napi, processed) &&
+ unlikely(virtqueue_poll(vq, opaque)))
+ virtqueue_napi_schedule(napi, vq);
+}
+
static void skb_xmit_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
@@ -936,27 +956,20 @@ static void skb_recv_done(struct virtqueue *rvq)
struct virtnet_info *vi = rvq->vdev->priv;
struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
- /* Schedule NAPI, Suppress further interrupts if successful. */
- if (napi_schedule_prep(&rq->napi)) {
- virtqueue_disable_cb(rvq);
- __napi_schedule(&rq->napi);
- }
+ virtqueue_napi_schedule(&rq->napi, rvq);
}
-static void virtnet_napi_enable(struct receive_queue *rq)
+static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
{
- napi_enable(&rq->napi);
+ napi_enable(napi);
/* If all buffers were filled by other side before we napi_enabled, we
- * won't get another interrupt, so process any outstanding packets
- * now. virtnet_poll wants re-enable the queue, so we disable here.
- * We synchronize against interrupts via NAPI_STATE_SCHED */
- if (napi_schedule_prep(&rq->napi)) {
- virtqueue_disable_cb(rq->vq);
- local_bh_disable();
- __napi_schedule(&rq->napi);
- local_bh_enable();
- }
+ * won't get another interrupt, so process any outstanding packets now.
+ * Call local_bh_enable after to trigger softIRQ processing.
+ */
+ local_bh_disable();
+ virtqueue_napi_schedule(napi, vq);
+ local_bh_enable();
}
static void refill_work(struct work_struct *work)
@@ -971,7 +984,7 @@ static void refill_work(struct work_struct *work)
napi_disable(&rq->napi);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_enable(rq);
+ virtnet_napi_enable(rq->vq, &rq->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
@@ -1011,21 +1024,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
- unsigned int r, received;
+ unsigned int received;
received = virtnet_receive(rq, budget);
/* Out of packets? */
- if (received < budget) {
- r = virtqueue_enable_cb_prepare(rq->vq);
- if (napi_complete_done(napi, received)) {
- if (unlikely(virtqueue_poll(rq->vq, r)) &&
- napi_schedule_prep(napi)) {
- virtqueue_disable_cb(rq->vq);
- __napi_schedule(napi);
- }
- }
- }
+ if (received < budget)
+ virtqueue_napi_complete(napi, rq->vq, received);
return received;
}
@@ -1040,7 +1045,7 @@ static int virtnet_open(struct net_device *dev)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
- virtnet_napi_enable(&vi->rq[i]);
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
}
return 0;
@@ -1747,7 +1752,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
schedule_delayed_work(&vi->refill, 0);
for (i = 0; i < vi->max_queue_pairs; i++)
- virtnet_napi_enable(&vi->rq[i]);
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
}
netif_device_attach(vi->dev);
--
2.12.2.564.g063fe858b8-goog
^ permalink raw reply related [flat|nested] 22+ messages in thread
* [PATCH net-next 1/3] virtio-net: napi helper functions
2017-04-02 20:10 [PATCH net-next 0/3] virtio-net tx napi Willem de Bruijn
2017-04-02 20:10 ` [PATCH net-next 1/3] virtio-net: napi helper functions Willem de Bruijn
@ 2017-04-02 20:10 ` Willem de Bruijn
2017-04-02 20:10 ` [PATCH net-next 2/3] virtio-net: transmit napi Willem de Bruijn
` (3 subsequent siblings)
5 siblings, 0 replies; 22+ messages in thread
From: Willem de Bruijn @ 2017-04-02 20:10 UTC (permalink / raw)
To: netdev; +Cc: Willem de Bruijn, virtualization, davem, mst
From: Willem de Bruijn <willemb@google.com>
Prepare virtio-net for tx napi by converting existing napi code to
use helper functions. This also deduplicates some logic.
Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/net/virtio_net.c | 65 ++++++++++++++++++++++++++----------------------
1 file changed, 35 insertions(+), 30 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b0d241d110ec..6aac0ad0d9b2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -239,6 +239,26 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
return p;
}
+static void virtqueue_napi_schedule(struct napi_struct *napi,
+ struct virtqueue *vq)
+{
+ if (napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(vq);
+ __napi_schedule(napi);
+ }
+}
+
+static void virtqueue_napi_complete(struct napi_struct *napi,
+ struct virtqueue *vq, int processed)
+{
+ int opaque;
+
+ opaque = virtqueue_enable_cb_prepare(vq);
+ if (napi_complete_done(napi, processed) &&
+ unlikely(virtqueue_poll(vq, opaque)))
+ virtqueue_napi_schedule(napi, vq);
+}
+
static void skb_xmit_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
@@ -936,27 +956,20 @@ static void skb_recv_done(struct virtqueue *rvq)
struct virtnet_info *vi = rvq->vdev->priv;
struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
- /* Schedule NAPI, Suppress further interrupts if successful. */
- if (napi_schedule_prep(&rq->napi)) {
- virtqueue_disable_cb(rvq);
- __napi_schedule(&rq->napi);
- }
+ virtqueue_napi_schedule(&rq->napi, rvq);
}
-static void virtnet_napi_enable(struct receive_queue *rq)
+static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
{
- napi_enable(&rq->napi);
+ napi_enable(napi);
/* If all buffers were filled by other side before we napi_enabled, we
- * won't get another interrupt, so process any outstanding packets
- * now. virtnet_poll wants re-enable the queue, so we disable here.
- * We synchronize against interrupts via NAPI_STATE_SCHED */
- if (napi_schedule_prep(&rq->napi)) {
- virtqueue_disable_cb(rq->vq);
- local_bh_disable();
- __napi_schedule(&rq->napi);
- local_bh_enable();
- }
+ * won't get another interrupt, so process any outstanding packets now.
+ * Call local_bh_enable after to trigger softIRQ processing.
+ */
+ local_bh_disable();
+ virtqueue_napi_schedule(napi, vq);
+ local_bh_enable();
}
static void refill_work(struct work_struct *work)
@@ -971,7 +984,7 @@ static void refill_work(struct work_struct *work)
napi_disable(&rq->napi);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_enable(rq);
+ virtnet_napi_enable(rq->vq, &rq->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
@@ -1011,21 +1024,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
- unsigned int r, received;
+ unsigned int received;
received = virtnet_receive(rq, budget);
/* Out of packets? */
- if (received < budget) {
- r = virtqueue_enable_cb_prepare(rq->vq);
- if (napi_complete_done(napi, received)) {
- if (unlikely(virtqueue_poll(rq->vq, r)) &&
- napi_schedule_prep(napi)) {
- virtqueue_disable_cb(rq->vq);
- __napi_schedule(napi);
- }
- }
- }
+ if (received < budget)
+ virtqueue_napi_complete(napi, rq->vq, received);
return received;
}
@@ -1040,7 +1045,7 @@ static int virtnet_open(struct net_device *dev)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
- virtnet_napi_enable(&vi->rq[i]);
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
}
return 0;
@@ -1747,7 +1752,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
schedule_delayed_work(&vi->refill, 0);
for (i = 0; i < vi->max_queue_pairs; i++)
- virtnet_napi_enable(&vi->rq[i]);
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
}
netif_device_attach(vi->dev);
--
2.12.2.564.g063fe858b8-goog
^ permalink raw reply related [flat|nested] 22+ messages in thread
* [PATCH net-next 2/3] virtio-net: transmit napi
2017-04-02 20:10 [PATCH net-next 0/3] virtio-net tx napi Willem de Bruijn
2017-04-02 20:10 ` [PATCH net-next 1/3] virtio-net: napi helper functions Willem de Bruijn
2017-04-02 20:10 ` Willem de Bruijn
@ 2017-04-02 20:10 ` Willem de Bruijn
2017-04-02 20:10 ` Willem de Bruijn
` (2 subsequent siblings)
5 siblings, 0 replies; 22+ messages in thread
From: Willem de Bruijn @ 2017-04-02 20:10 UTC (permalink / raw)
To: netdev; +Cc: Willem de Bruijn, virtualization, davem, mst
From: Willem de Bruijn <willemb@google.com>
Convert virtio-net to a standard napi tx completion path. This enables
better TCP pacing using TCP small queues and increases single stream
throughput.
The virtio-net driver currently cleans tx descriptors on transmission
of new packets in ndo_start_xmit. Latency depends on new traffic, so
is unbounded. To avoid deadlock when a socket reaches its snd limit,
packets are orphaned on tranmission. This breaks socket backpressure,
including TSQ.
Napi increases the number of interrupts generated compared to the
current model, which keeps interrupts disabled as long as the ring
has enough free descriptors. Keep tx napi optional for now. Follow-on
patches will reduce the interrupt cost.
Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/net/virtio_net.c | 63 ++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 53 insertions(+), 10 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6aac0ad0d9b2..95d938e82080 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -33,9 +33,10 @@
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
-static bool csum = true, gso = true;
+static bool csum = true, gso = true, napi_tx = true;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
+module_param(napi_tx, bool, 0644);
/* FIXME: MTU in config. */
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
@@ -86,6 +87,8 @@ struct send_queue {
/* Name of the send queue: output.$index */
char name[40];
+
+ struct napi_struct napi;
};
/* Internal representation of a receive virtqueue */
@@ -262,12 +265,16 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
static void skb_xmit_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
+ struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
/* Suppress further interrupts. */
virtqueue_disable_cb(vq);
- /* We were probably waiting for more output buffers. */
- netif_wake_subqueue(vi->dev, vq2txq(vq));
+ if (napi->weight)
+ virtqueue_napi_schedule(napi, vq);
+ else
+ /* We were probably waiting for more output buffers. */
+ netif_wake_subqueue(vi->dev, vq2txq(vq));
}
static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
@@ -961,6 +968,9 @@ static void skb_recv_done(struct virtqueue *rvq)
static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
{
+ if (!napi->weight)
+ return;
+
napi_enable(napi);
/* If all buffers were filled by other side before we napi_enabled, we
@@ -1046,6 +1056,7 @@ static int virtnet_open(struct net_device *dev)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_enable(vi->sq[i].vq, &vi->sq[i].napi);
}
return 0;
@@ -1081,6 +1092,24 @@ static void free_old_xmit_skbs(struct send_queue *sq)
u64_stats_update_end(&stats->tx_syncp);
}
+static int virtnet_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct send_queue *sq = container_of(napi, struct send_queue, napi);
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
+
+ __netif_tx_lock(txq, smp_processor_id());
+ free_old_xmit_skbs(sq);
+ __netif_tx_unlock(txq);
+
+ virtqueue_napi_complete(napi, sq->vq, 0);
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_wake_subqueue(vi->dev, vq2txq(sq->vq));
+
+ return 0;
+}
+
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -1130,9 +1159,11 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
int err;
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
bool kick = !skb->xmit_more;
+ bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
- free_old_xmit_skbs(sq);
+ if (!use_napi)
+ free_old_xmit_skbs(sq);
/* timestamp packet in software */
skb_tx_timestamp(skb);
@@ -1152,8 +1183,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Don't wait up for transmitted skbs to be freed. */
- skb_orphan(skb);
- nf_reset(skb);
+ if (!use_napi) {
+ skb_orphan(skb);
+ nf_reset(skb);
+ }
/* If running out of space, stop queue to avoid getting packets that we
* are then unable to transmit.
@@ -1167,7 +1200,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
netif_stop_subqueue(dev, qnum);
- if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+ if (!use_napi &&
+ unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
free_old_xmit_skbs(sq);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
@@ -1371,8 +1405,10 @@ static int virtnet_close(struct net_device *dev)
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
+ napi_disable(&vi->sq[i].napi);
+ }
return 0;
}
@@ -1727,8 +1763,10 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
+ napi_disable(&vi->sq[i].napi);
+ }
}
}
@@ -1751,8 +1789,10 @@ static int virtnet_restore_up(struct virtio_device *vdev)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_enable(vi->sq[i].vq, &vi->sq[i].napi);
+ }
}
netif_device_attach(vi->dev);
@@ -1957,6 +1997,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
napi_hash_del(&vi->rq[i].napi);
netif_napi_del(&vi->rq[i].napi);
+ netif_napi_del(&vi->sq[i].napi);
}
/* We called napi_hash_del() before netif_napi_del(),
@@ -2142,6 +2183,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
vi->rq[i].pages = NULL;
netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
napi_weight);
+ netif_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
+ napi_tx ? napi_weight : 0);
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
--
2.12.2.564.g063fe858b8-goog
^ permalink raw reply related [flat|nested] 22+ messages in thread
* [PATCH net-next 2/3] virtio-net: transmit napi
2017-04-02 20:10 [PATCH net-next 0/3] virtio-net tx napi Willem de Bruijn
` (2 preceding siblings ...)
2017-04-02 20:10 ` [PATCH net-next 2/3] virtio-net: transmit napi Willem de Bruijn
@ 2017-04-02 20:10 ` Willem de Bruijn
2017-04-03 2:30 ` Michael S. Tsirkin
2017-04-02 20:10 ` [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi Willem de Bruijn
2017-04-02 20:10 ` Willem de Bruijn
5 siblings, 1 reply; 22+ messages in thread
From: Willem de Bruijn @ 2017-04-02 20:10 UTC (permalink / raw)
To: netdev; +Cc: mst, jasowang, virtualization, davem, Willem de Bruijn
From: Willem de Bruijn <willemb@google.com>
Convert virtio-net to a standard napi tx completion path. This enables
better TCP pacing using TCP small queues and increases single stream
throughput.
The virtio-net driver currently cleans tx descriptors on transmission
of new packets in ndo_start_xmit. Latency depends on new traffic, so
is unbounded. To avoid deadlock when a socket reaches its snd limit,
packets are orphaned on tranmission. This breaks socket backpressure,
including TSQ.
Napi increases the number of interrupts generated compared to the
current model, which keeps interrupts disabled as long as the ring
has enough free descriptors. Keep tx napi optional for now. Follow-on
patches will reduce the interrupt cost.
Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/net/virtio_net.c | 63 ++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 53 insertions(+), 10 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6aac0ad0d9b2..95d938e82080 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -33,9 +33,10 @@
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
-static bool csum = true, gso = true;
+static bool csum = true, gso = true, napi_tx = true;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
+module_param(napi_tx, bool, 0644);
/* FIXME: MTU in config. */
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
@@ -86,6 +87,8 @@ struct send_queue {
/* Name of the send queue: output.$index */
char name[40];
+
+ struct napi_struct napi;
};
/* Internal representation of a receive virtqueue */
@@ -262,12 +265,16 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
static void skb_xmit_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
+ struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
/* Suppress further interrupts. */
virtqueue_disable_cb(vq);
- /* We were probably waiting for more output buffers. */
- netif_wake_subqueue(vi->dev, vq2txq(vq));
+ if (napi->weight)
+ virtqueue_napi_schedule(napi, vq);
+ else
+ /* We were probably waiting for more output buffers. */
+ netif_wake_subqueue(vi->dev, vq2txq(vq));
}
static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
@@ -961,6 +968,9 @@ static void skb_recv_done(struct virtqueue *rvq)
static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
{
+ if (!napi->weight)
+ return;
+
napi_enable(napi);
/* If all buffers were filled by other side before we napi_enabled, we
@@ -1046,6 +1056,7 @@ static int virtnet_open(struct net_device *dev)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_enable(vi->sq[i].vq, &vi->sq[i].napi);
}
return 0;
@@ -1081,6 +1092,24 @@ static void free_old_xmit_skbs(struct send_queue *sq)
u64_stats_update_end(&stats->tx_syncp);
}
+static int virtnet_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct send_queue *sq = container_of(napi, struct send_queue, napi);
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
+
+ __netif_tx_lock(txq, smp_processor_id());
+ free_old_xmit_skbs(sq);
+ __netif_tx_unlock(txq);
+
+ virtqueue_napi_complete(napi, sq->vq, 0);
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_wake_subqueue(vi->dev, vq2txq(sq->vq));
+
+ return 0;
+}
+
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -1130,9 +1159,11 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
int err;
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
bool kick = !skb->xmit_more;
+ bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
- free_old_xmit_skbs(sq);
+ if (!use_napi)
+ free_old_xmit_skbs(sq);
/* timestamp packet in software */
skb_tx_timestamp(skb);
@@ -1152,8 +1183,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Don't wait up for transmitted skbs to be freed. */
- skb_orphan(skb);
- nf_reset(skb);
+ if (!use_napi) {
+ skb_orphan(skb);
+ nf_reset(skb);
+ }
/* If running out of space, stop queue to avoid getting packets that we
* are then unable to transmit.
@@ -1167,7 +1200,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
netif_stop_subqueue(dev, qnum);
- if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+ if (!use_napi &&
+ unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
free_old_xmit_skbs(sq);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
@@ -1371,8 +1405,10 @@ static int virtnet_close(struct net_device *dev)
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
+ napi_disable(&vi->sq[i].napi);
+ }
return 0;
}
@@ -1727,8 +1763,10 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
+ napi_disable(&vi->sq[i].napi);
+ }
}
}
@@ -1751,8 +1789,10 @@ static int virtnet_restore_up(struct virtio_device *vdev)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_enable(vi->sq[i].vq, &vi->sq[i].napi);
+ }
}
netif_device_attach(vi->dev);
@@ -1957,6 +1997,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
napi_hash_del(&vi->rq[i].napi);
netif_napi_del(&vi->rq[i].napi);
+ netif_napi_del(&vi->sq[i].napi);
}
/* We called napi_hash_del() before netif_napi_del(),
@@ -2142,6 +2183,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
vi->rq[i].pages = NULL;
netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
napi_weight);
+ netif_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
+ napi_tx ? napi_weight : 0);
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
--
2.12.2.564.g063fe858b8-goog
^ permalink raw reply related [flat|nested] 22+ messages in thread
* Re: [PATCH net-next 2/3] virtio-net: transmit napi
2017-04-02 20:10 ` Willem de Bruijn
@ 2017-04-03 2:30 ` Michael S. Tsirkin
2017-04-03 5:07 ` Willem de Bruijn
2017-04-03 5:07 ` Willem de Bruijn
0 siblings, 2 replies; 22+ messages in thread
From: Michael S. Tsirkin @ 2017-04-03 2:30 UTC (permalink / raw)
To: Willem de Bruijn; +Cc: Willem de Bruijn, netdev, davem, virtualization
On Sun, Apr 02, 2017 at 04:10:11PM -0400, Willem de Bruijn wrote:
> From: Willem de Bruijn <willemb@google.com>
>
> Convert virtio-net to a standard napi tx completion path. This enables
> better TCP pacing using TCP small queues and increases single stream
> throughput.
>
> The virtio-net driver currently cleans tx descriptors on transmission
> of new packets in ndo_start_xmit. Latency depends on new traffic, so
> is unbounded. To avoid deadlock when a socket reaches its snd limit,
> packets are orphaned on tranmission. This breaks socket backpressure,
> including TSQ.
>
> Napi increases the number of interrupts generated compared to the
> current model, which keeps interrupts disabled as long as the ring
> has enough free descriptors. Keep tx napi optional for now. Follow-on
> patches will reduce the interrupt cost.
>
> Signed-off-by: Willem de Bruijn <willemb@google.com>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
> drivers/net/virtio_net.c | 63 ++++++++++++++++++++++++++++++++++++++++--------
> 1 file changed, 53 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 6aac0ad0d9b2..95d938e82080 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -33,9 +33,10 @@
> static int napi_weight = NAPI_POLL_WEIGHT;
> module_param(napi_weight, int, 0444);
>
> -static bool csum = true, gso = true;
> +static bool csum = true, gso = true, napi_tx = true;
> module_param(csum, bool, 0444);
> module_param(gso, bool, 0444);
> +module_param(napi_tx, bool, 0644);
>
> /* FIXME: MTU in config. */
> #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
Off by default seems safer until we can find better ways
to reduce the overhead, esp for UDP.
> @@ -86,6 +87,8 @@ struct send_queue {
>
> /* Name of the send queue: output.$index */
> char name[40];
> +
> + struct napi_struct napi;
> };
>
> /* Internal representation of a receive virtqueue */
> @@ -262,12 +265,16 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
> static void skb_xmit_done(struct virtqueue *vq)
> {
> struct virtnet_info *vi = vq->vdev->priv;
> + struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
>
> /* Suppress further interrupts. */
> virtqueue_disable_cb(vq);
>
> - /* We were probably waiting for more output buffers. */
> - netif_wake_subqueue(vi->dev, vq2txq(vq));
> + if (napi->weight)
> + virtqueue_napi_schedule(napi, vq);
> + else
> + /* We were probably waiting for more output buffers. */
> + netif_wake_subqueue(vi->dev, vq2txq(vq));
> }
>
> static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
> @@ -961,6 +968,9 @@ static void skb_recv_done(struct virtqueue *rvq)
>
> static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
> {
> + if (!napi->weight)
> + return;
> +
> napi_enable(napi);
>
> /* If all buffers were filled by other side before we napi_enabled, we
> @@ -1046,6 +1056,7 @@ static int virtnet_open(struct net_device *dev)
> if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
> schedule_delayed_work(&vi->refill, 0);
> virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
> + virtnet_napi_enable(vi->sq[i].vq, &vi->sq[i].napi);
> }
>
> return 0;
> @@ -1081,6 +1092,24 @@ static void free_old_xmit_skbs(struct send_queue *sq)
> u64_stats_update_end(&stats->tx_syncp);
> }
>
> +static int virtnet_poll_tx(struct napi_struct *napi, int budget)
> +{
> + struct send_queue *sq = container_of(napi, struct send_queue, napi);
> + struct virtnet_info *vi = sq->vq->vdev->priv;
> + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
> +
> + __netif_tx_lock(txq, smp_processor_id());
> + free_old_xmit_skbs(sq);
> + __netif_tx_unlock(txq);
> +
> + virtqueue_napi_complete(napi, sq->vq, 0);
> +
> + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> + netif_wake_subqueue(vi->dev, vq2txq(sq->vq));
> +
> + return 0;
> +}
> +
> static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
> {
> struct virtio_net_hdr_mrg_rxbuf *hdr;
> @@ -1130,9 +1159,11 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
> int err;
> struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
> bool kick = !skb->xmit_more;
> + bool use_napi = sq->napi.weight;
>
> /* Free up any pending old buffers before queueing new ones. */
> - free_old_xmit_skbs(sq);
> + if (!use_napi)
> + free_old_xmit_skbs(sq);
>
> /* timestamp packet in software */
> skb_tx_timestamp(skb);
> @@ -1152,8 +1183,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
> }
>
> /* Don't wait up for transmitted skbs to be freed. */
> - skb_orphan(skb);
> - nf_reset(skb);
> + if (!use_napi) {
> + skb_orphan(skb);
> + nf_reset(skb);
> + }
>
> /* If running out of space, stop queue to avoid getting packets that we
> * are then unable to transmit.
> @@ -1167,7 +1200,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
> */
> if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
> netif_stop_subqueue(dev, qnum);
> - if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
> + if (!use_napi &&
> + unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
> /* More just got used, free them then recheck. */
> free_old_xmit_skbs(sq);
> if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
> @@ -1371,8 +1405,10 @@ static int virtnet_close(struct net_device *dev)
> /* Make sure refill_work doesn't re-enable napi! */
> cancel_delayed_work_sync(&vi->refill);
>
> - for (i = 0; i < vi->max_queue_pairs; i++)
> + for (i = 0; i < vi->max_queue_pairs; i++) {
> napi_disable(&vi->rq[i].napi);
> + napi_disable(&vi->sq[i].napi);
> + }
>
> return 0;
> }
> @@ -1727,8 +1763,10 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
> cancel_delayed_work_sync(&vi->refill);
>
> if (netif_running(vi->dev)) {
> - for (i = 0; i < vi->max_queue_pairs; i++)
> + for (i = 0; i < vi->max_queue_pairs; i++) {
> napi_disable(&vi->rq[i].napi);
> + napi_disable(&vi->sq[i].napi);
> + }
> }
> }
>
> @@ -1751,8 +1789,10 @@ static int virtnet_restore_up(struct virtio_device *vdev)
> if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
> schedule_delayed_work(&vi->refill, 0);
>
> - for (i = 0; i < vi->max_queue_pairs; i++)
> + for (i = 0; i < vi->max_queue_pairs; i++) {
> virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
> + virtnet_napi_enable(vi->sq[i].vq, &vi->sq[i].napi);
> + }
> }
>
> netif_device_attach(vi->dev);
> @@ -1957,6 +1997,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
> for (i = 0; i < vi->max_queue_pairs; i++) {
> napi_hash_del(&vi->rq[i].napi);
> netif_napi_del(&vi->rq[i].napi);
> + netif_napi_del(&vi->sq[i].napi);
> }
>
> /* We called napi_hash_del() before netif_napi_del(),
> @@ -2142,6 +2183,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
> vi->rq[i].pages = NULL;
> netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
> napi_weight);
> + netif_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
> + napi_tx ? napi_weight : 0);
>
> sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
> ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
> --
> 2.12.2.564.g063fe858b8-goog
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH net-next 2/3] virtio-net: transmit napi
2017-04-03 2:30 ` Michael S. Tsirkin
@ 2017-04-03 5:07 ` Willem de Bruijn
2017-04-03 5:07 ` Willem de Bruijn
1 sibling, 0 replies; 22+ messages in thread
From: Willem de Bruijn @ 2017-04-03 5:07 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: Willem de Bruijn, Network Development, David Miller, virtualization
On Sun, Apr 2, 2017 at 10:30 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Sun, Apr 02, 2017 at 04:10:11PM -0400, Willem de Bruijn wrote:
>> From: Willem de Bruijn <willemb@google.com>
>>
>> Convert virtio-net to a standard napi tx completion path. This enables
>> better TCP pacing using TCP small queues and increases single stream
>> throughput.
>>
>> The virtio-net driver currently cleans tx descriptors on transmission
>> of new packets in ndo_start_xmit. Latency depends on new traffic, so
>> is unbounded. To avoid deadlock when a socket reaches its snd limit,
>> packets are orphaned on tranmission. This breaks socket backpressure,
>> including TSQ.
>>
>> Napi increases the number of interrupts generated compared to the
>> current model, which keeps interrupts disabled as long as the ring
>> has enough free descriptors. Keep tx napi optional for now. Follow-on
>> patches will reduce the interrupt cost.
>>
>> Signed-off-by: Willem de Bruijn <willemb@google.com>
>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>> ---
>> drivers/net/virtio_net.c | 63 ++++++++++++++++++++++++++++++++++++++++--------
>> 1 file changed, 53 insertions(+), 10 deletions(-)
>>
>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>> index 6aac0ad0d9b2..95d938e82080 100644
>> --- a/drivers/net/virtio_net.c
>> +++ b/drivers/net/virtio_net.c
>> @@ -33,9 +33,10 @@
>> static int napi_weight = NAPI_POLL_WEIGHT;
>> module_param(napi_weight, int, 0444);
>>
>> -static bool csum = true, gso = true;
>> +static bool csum = true, gso = true, napi_tx = true;
>> module_param(csum, bool, 0444);
>> module_param(gso, bool, 0444);
>> +module_param(napi_tx, bool, 0644);
>>
>> /* FIXME: MTU in config. */
>> #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
>
> Off by default seems safer until we can find better ways
> to reduce the overhead, esp for UDP.
Okay, I'll change that. I don't have an immediate idea for that
unidirectional case.
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH net-next 2/3] virtio-net: transmit napi
2017-04-03 2:30 ` Michael S. Tsirkin
2017-04-03 5:07 ` Willem de Bruijn
@ 2017-04-03 5:07 ` Willem de Bruijn
1 sibling, 0 replies; 22+ messages in thread
From: Willem de Bruijn @ 2017-04-03 5:07 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: Network Development, Jason Wang, virtualization, David Miller,
Willem de Bruijn
On Sun, Apr 2, 2017 at 10:30 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Sun, Apr 02, 2017 at 04:10:11PM -0400, Willem de Bruijn wrote:
>> From: Willem de Bruijn <willemb@google.com>
>>
>> Convert virtio-net to a standard napi tx completion path. This enables
>> better TCP pacing using TCP small queues and increases single stream
>> throughput.
>>
>> The virtio-net driver currently cleans tx descriptors on transmission
>> of new packets in ndo_start_xmit. Latency depends on new traffic, so
>> is unbounded. To avoid deadlock when a socket reaches its snd limit,
>> packets are orphaned on tranmission. This breaks socket backpressure,
>> including TSQ.
>>
>> Napi increases the number of interrupts generated compared to the
>> current model, which keeps interrupts disabled as long as the ring
>> has enough free descriptors. Keep tx napi optional for now. Follow-on
>> patches will reduce the interrupt cost.
>>
>> Signed-off-by: Willem de Bruijn <willemb@google.com>
>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>> ---
>> drivers/net/virtio_net.c | 63 ++++++++++++++++++++++++++++++++++++++++--------
>> 1 file changed, 53 insertions(+), 10 deletions(-)
>>
>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>> index 6aac0ad0d9b2..95d938e82080 100644
>> --- a/drivers/net/virtio_net.c
>> +++ b/drivers/net/virtio_net.c
>> @@ -33,9 +33,10 @@
>> static int napi_weight = NAPI_POLL_WEIGHT;
>> module_param(napi_weight, int, 0444);
>>
>> -static bool csum = true, gso = true;
>> +static bool csum = true, gso = true, napi_tx = true;
>> module_param(csum, bool, 0444);
>> module_param(gso, bool, 0444);
>> +module_param(napi_tx, bool, 0644);
>>
>> /* FIXME: MTU in config. */
>> #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
>
> Off by default seems safer until we can find better ways
> to reduce the overhead, esp for UDP.
Okay, I'll change that. I don't have an immediate idea for that
unidirectional case.
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi
2017-04-02 20:10 [PATCH net-next 0/3] virtio-net tx napi Willem de Bruijn
` (3 preceding siblings ...)
2017-04-02 20:10 ` Willem de Bruijn
@ 2017-04-02 20:10 ` Willem de Bruijn
2017-04-02 20:10 ` Willem de Bruijn
5 siblings, 0 replies; 22+ messages in thread
From: Willem de Bruijn @ 2017-04-02 20:10 UTC (permalink / raw)
To: netdev; +Cc: Willem de Bruijn, virtualization, davem, mst
From: Willem de Bruijn <willemb@google.com>
Amortize the cost of virtual interrupts by doing both rx and tx work
on reception of a receive interrupt if tx napi is enabled. With
VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
interrupts for bidirectional workloads.
Signed-off-by: Willem de Bruijn <willemb@google.com>
---
drivers/net/virtio_net.c | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 95d938e82080..af830eb212bf 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1030,12 +1030,34 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
return received;
}
+static void free_old_xmit_skbs(struct send_queue *sq);
+
+static void virtnet_poll_cleantx(struct receive_queue *rq)
+{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ unsigned int index = vq2rxq(rq->vq);
+ struct send_queue *sq = &vi->sq[index];
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
+
+ if (!sq->napi.weight)
+ return;
+
+ __netif_tx_lock(txq, smp_processor_id());
+ free_old_xmit_skbs(sq);
+ __netif_tx_unlock(txq);
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_wake_subqueue(vi->dev, vq2txq(sq->vq));
+}
+
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
unsigned int received;
+ virtnet_poll_cleantx(rq);
+
received = virtnet_receive(rq, budget);
/* Out of packets? */
--
2.12.2.564.g063fe858b8-goog
^ permalink raw reply related [flat|nested] 22+ messages in thread
* [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi
2017-04-02 20:10 [PATCH net-next 0/3] virtio-net tx napi Willem de Bruijn
` (4 preceding siblings ...)
2017-04-02 20:10 ` [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi Willem de Bruijn
@ 2017-04-02 20:10 ` Willem de Bruijn
2017-04-03 2:47 ` Michael S. Tsirkin
2017-04-03 2:47 ` Michael S. Tsirkin
5 siblings, 2 replies; 22+ messages in thread
From: Willem de Bruijn @ 2017-04-02 20:10 UTC (permalink / raw)
To: netdev; +Cc: mst, jasowang, virtualization, davem, Willem de Bruijn
From: Willem de Bruijn <willemb@google.com>
Amortize the cost of virtual interrupts by doing both rx and tx work
on reception of a receive interrupt if tx napi is enabled. With
VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
interrupts for bidirectional workloads.
Signed-off-by: Willem de Bruijn <willemb@google.com>
---
drivers/net/virtio_net.c | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 95d938e82080..af830eb212bf 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1030,12 +1030,34 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
return received;
}
+static void free_old_xmit_skbs(struct send_queue *sq);
+
+static void virtnet_poll_cleantx(struct receive_queue *rq)
+{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ unsigned int index = vq2rxq(rq->vq);
+ struct send_queue *sq = &vi->sq[index];
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
+
+ if (!sq->napi.weight)
+ return;
+
+ __netif_tx_lock(txq, smp_processor_id());
+ free_old_xmit_skbs(sq);
+ __netif_tx_unlock(txq);
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_wake_subqueue(vi->dev, vq2txq(sq->vq));
+}
+
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
unsigned int received;
+ virtnet_poll_cleantx(rq);
+
received = virtnet_receive(rq, budget);
/* Out of packets? */
--
2.12.2.564.g063fe858b8-goog
^ permalink raw reply related [flat|nested] 22+ messages in thread
* Re: [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi
2017-04-02 20:10 ` Willem de Bruijn
@ 2017-04-03 2:47 ` Michael S. Tsirkin
2017-04-03 5:02 ` Willem de Bruijn
2017-04-03 5:02 ` Willem de Bruijn
2017-04-03 2:47 ` Michael S. Tsirkin
1 sibling, 2 replies; 22+ messages in thread
From: Michael S. Tsirkin @ 2017-04-03 2:47 UTC (permalink / raw)
To: Willem de Bruijn
Cc: netdev, jasowang, virtualization, davem, Willem de Bruijn
On Sun, Apr 02, 2017 at 04:10:12PM -0400, Willem de Bruijn wrote:
> From: Willem de Bruijn <willemb@google.com>
>
> Amortize the cost of virtual interrupts by doing both rx and tx work
> on reception of a receive interrupt if tx napi is enabled. With
> VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
> interrupts for bidirectional workloads.
>
> Signed-off-by: Willem de Bruijn <willemb@google.com>
> ---
> drivers/net/virtio_net.c | 22 ++++++++++++++++++++++
> 1 file changed, 22 insertions(+)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 95d938e82080..af830eb212bf 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -1030,12 +1030,34 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
> return received;
> }
>
> +static void free_old_xmit_skbs(struct send_queue *sq);
> +
Could you pls re-arrange code to avoid forward declarations?
> +static void virtnet_poll_cleantx(struct receive_queue *rq)
> +{
> + struct virtnet_info *vi = rq->vq->vdev->priv;
> + unsigned int index = vq2rxq(rq->vq);
> + struct send_queue *sq = &vi->sq[index];
> + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
> +
> + if (!sq->napi.weight)
> + return;
> +
> + __netif_tx_lock(txq, smp_processor_id());
> + free_old_xmit_skbs(sq);
> + __netif_tx_unlock(txq);
> +
> + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> + netif_wake_subqueue(vi->dev, vq2txq(sq->vq));
> +}
> +
Looks very similar to virtnet_poll_tx.
I think this might be waking the tx queue too early, so
it will tend to stay almost full for long periods of time.
Why not defer wakeup until queue is at least half empty?
I wonder whether it's worth it to handle very short queues
correctly - they previously made very slow progress,
not they are never woken up.
I'm a bit concerned about the cost of these wakeups
and locking. I note that this wake is called basically
every time queue is not full.
Would it make sense to limit the amount of tx polling?
Maybe use trylock to reduce the conflict with xmit?
> static int virtnet_poll(struct napi_struct *napi, int budget)
> {
> struct receive_queue *rq =
> container_of(napi, struct receive_queue, napi);
> unsigned int received;
>
> + virtnet_poll_cleantx(rq);
> +
> received = virtnet_receive(rq, budget);
>
> /* Out of packets? */
> --
> 2.12.2.564.g063fe858b8-goog
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi
2017-04-03 2:47 ` Michael S. Tsirkin
@ 2017-04-03 5:02 ` Willem de Bruijn
2017-04-03 5:02 ` Willem de Bruijn
1 sibling, 0 replies; 22+ messages in thread
From: Willem de Bruijn @ 2017-04-03 5:02 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: Willem de Bruijn, Network Development, David Miller, virtualization
On Sun, Apr 2, 2017 at 10:47 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Sun, Apr 02, 2017 at 04:10:12PM -0400, Willem de Bruijn wrote:
>> From: Willem de Bruijn <willemb@google.com>
>>
>> Amortize the cost of virtual interrupts by doing both rx and tx work
>> on reception of a receive interrupt if tx napi is enabled. With
>> VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
>> interrupts for bidirectional workloads.
>>
>> Signed-off-by: Willem de Bruijn <willemb@google.com>
>> ---
>> drivers/net/virtio_net.c | 22 ++++++++++++++++++++++
>> 1 file changed, 22 insertions(+)
>>
>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>> index 95d938e82080..af830eb212bf 100644
>> --- a/drivers/net/virtio_net.c
>> +++ b/drivers/net/virtio_net.c
>> @@ -1030,12 +1030,34 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
>> return received;
>> }
>>
>> +static void free_old_xmit_skbs(struct send_queue *sq);
>> +
>
> Could you pls re-arrange code to avoid forward declarations?
Okay. I'll do the move in a separate patch to simplify review.
>> +static void virtnet_poll_cleantx(struct receive_queue *rq)
>> +{
>> + struct virtnet_info *vi = rq->vq->vdev->priv;
>> + unsigned int index = vq2rxq(rq->vq);
>> + struct send_queue *sq = &vi->sq[index];
>> + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
>> +
>> + if (!sq->napi.weight)
>> + return;
>> +
>> + __netif_tx_lock(txq, smp_processor_id());
>> + free_old_xmit_skbs(sq);
>> + __netif_tx_unlock(txq);
>> +
>> + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
>> + netif_wake_subqueue(vi->dev, vq2txq(sq->vq));
>> +}
>> +
>
> Looks very similar to virtnet_poll_tx.
>
> I think this might be waking the tx queue too early, so
> it will tend to stay almost full for long periods of time.
> Why not defer wakeup until queue is at least half empty?
I'll test that. Delaying wake-up longer than necessary can cause
queue build up at the qdisc and higher tail latency, I imagine. But
it may reduce the number of __netif_schedule calls.
> I wonder whether it's worth it to handle very short queues
> correctly - they previously made very slow progress,
> not they are never woken up.
>
> I'm a bit concerned about the cost of these wakeups
> and locking. I note that this wake is called basically
> every time queue is not full.
>
> Would it make sense to limit the amount of tx polling?
> Maybe use trylock to reduce the conflict with xmit?
Yes, that sounds good. I did test that previously and saw no
difference then. But when multiple cpus contend for a single
txq it should help.
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi
2017-04-03 2:47 ` Michael S. Tsirkin
2017-04-03 5:02 ` Willem de Bruijn
@ 2017-04-03 5:02 ` Willem de Bruijn
2017-04-07 19:28 ` Michael S. Tsirkin
2017-04-07 19:28 ` Michael S. Tsirkin
1 sibling, 2 replies; 22+ messages in thread
From: Willem de Bruijn @ 2017-04-03 5:02 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: Network Development, Jason Wang, virtualization, David Miller,
Willem de Bruijn
On Sun, Apr 2, 2017 at 10:47 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Sun, Apr 02, 2017 at 04:10:12PM -0400, Willem de Bruijn wrote:
>> From: Willem de Bruijn <willemb@google.com>
>>
>> Amortize the cost of virtual interrupts by doing both rx and tx work
>> on reception of a receive interrupt if tx napi is enabled. With
>> VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
>> interrupts for bidirectional workloads.
>>
>> Signed-off-by: Willem de Bruijn <willemb@google.com>
>> ---
>> drivers/net/virtio_net.c | 22 ++++++++++++++++++++++
>> 1 file changed, 22 insertions(+)
>>
>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>> index 95d938e82080..af830eb212bf 100644
>> --- a/drivers/net/virtio_net.c
>> +++ b/drivers/net/virtio_net.c
>> @@ -1030,12 +1030,34 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
>> return received;
>> }
>>
>> +static void free_old_xmit_skbs(struct send_queue *sq);
>> +
>
> Could you pls re-arrange code to avoid forward declarations?
Okay. I'll do the move in a separate patch to simplify review.
>> +static void virtnet_poll_cleantx(struct receive_queue *rq)
>> +{
>> + struct virtnet_info *vi = rq->vq->vdev->priv;
>> + unsigned int index = vq2rxq(rq->vq);
>> + struct send_queue *sq = &vi->sq[index];
>> + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
>> +
>> + if (!sq->napi.weight)
>> + return;
>> +
>> + __netif_tx_lock(txq, smp_processor_id());
>> + free_old_xmit_skbs(sq);
>> + __netif_tx_unlock(txq);
>> +
>> + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
>> + netif_wake_subqueue(vi->dev, vq2txq(sq->vq));
>> +}
>> +
>
> Looks very similar to virtnet_poll_tx.
>
> I think this might be waking the tx queue too early, so
> it will tend to stay almost full for long periods of time.
> Why not defer wakeup until queue is at least half empty?
I'll test that. Delaying wake-up longer than necessary can cause
queue build up at the qdisc and higher tail latency, I imagine. But
it may reduce the number of __netif_schedule calls.
> I wonder whether it's worth it to handle very short queues
> correctly - they previously made very slow progress,
> not they are never woken up.
>
> I'm a bit concerned about the cost of these wakeups
> and locking. I note that this wake is called basically
> every time queue is not full.
>
> Would it make sense to limit the amount of tx polling?
> Maybe use trylock to reduce the conflict with xmit?
Yes, that sounds good. I did test that previously and saw no
difference then. But when multiple cpus contend for a single
txq it should help.
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi
2017-04-03 5:02 ` Willem de Bruijn
@ 2017-04-07 19:28 ` Michael S. Tsirkin
2017-04-07 19:28 ` Michael S. Tsirkin
1 sibling, 0 replies; 22+ messages in thread
From: Michael S. Tsirkin @ 2017-04-07 19:28 UTC (permalink / raw)
To: Willem de Bruijn
Cc: Willem de Bruijn, Network Development, David Miller, virtualization
On Mon, Apr 03, 2017 at 01:02:13AM -0400, Willem de Bruijn wrote:
> On Sun, Apr 2, 2017 at 10:47 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > On Sun, Apr 02, 2017 at 04:10:12PM -0400, Willem de Bruijn wrote:
> >> From: Willem de Bruijn <willemb@google.com>
> >>
> >> Amortize the cost of virtual interrupts by doing both rx and tx work
> >> on reception of a receive interrupt if tx napi is enabled. With
> >> VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
> >> interrupts for bidirectional workloads.
> >>
> >> Signed-off-by: Willem de Bruijn <willemb@google.com>
This is a popular approach, but I think this will only work well if tx
and rx interrupts are processed on the same CPU and if tx queue is per
cpu. If they target different CPUs or if tx queue is used from multiple
CPUs they will conflict on the shared locks.
This can even change dynamically as CPUs/queues are reconfigured.
How about adding a flag and skipping the tx poll if there's no match?
--
MST
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi
2017-04-03 5:02 ` Willem de Bruijn
2017-04-07 19:28 ` Michael S. Tsirkin
@ 2017-04-07 19:28 ` Michael S. Tsirkin
2017-04-07 20:59 ` Willem de Bruijn
2017-04-07 20:59 ` Willem de Bruijn
1 sibling, 2 replies; 22+ messages in thread
From: Michael S. Tsirkin @ 2017-04-07 19:28 UTC (permalink / raw)
To: Willem de Bruijn
Cc: Network Development, Jason Wang, virtualization, David Miller,
Willem de Bruijn
On Mon, Apr 03, 2017 at 01:02:13AM -0400, Willem de Bruijn wrote:
> On Sun, Apr 2, 2017 at 10:47 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > On Sun, Apr 02, 2017 at 04:10:12PM -0400, Willem de Bruijn wrote:
> >> From: Willem de Bruijn <willemb@google.com>
> >>
> >> Amortize the cost of virtual interrupts by doing both rx and tx work
> >> on reception of a receive interrupt if tx napi is enabled. With
> >> VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
> >> interrupts for bidirectional workloads.
> >>
> >> Signed-off-by: Willem de Bruijn <willemb@google.com>
This is a popular approach, but I think this will only work well if tx
and rx interrupts are processed on the same CPU and if tx queue is per
cpu. If they target different CPUs or if tx queue is used from multiple
CPUs they will conflict on the shared locks.
This can even change dynamically as CPUs/queues are reconfigured.
How about adding a flag and skipping the tx poll if there's no match?
--
MST
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi
2017-04-07 19:28 ` Michael S. Tsirkin
@ 2017-04-07 20:59 ` Willem de Bruijn
2017-04-07 20:59 ` Willem de Bruijn
1 sibling, 0 replies; 22+ messages in thread
From: Willem de Bruijn @ 2017-04-07 20:59 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: Willem de Bruijn, Network Development, David Miller, virtualization
On Fri, Apr 7, 2017 at 3:28 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Mon, Apr 03, 2017 at 01:02:13AM -0400, Willem de Bruijn wrote:
>> On Sun, Apr 2, 2017 at 10:47 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>> > On Sun, Apr 02, 2017 at 04:10:12PM -0400, Willem de Bruijn wrote:
>> >> From: Willem de Bruijn <willemb@google.com>
>> >>
>> >> Amortize the cost of virtual interrupts by doing both rx and tx work
>> >> on reception of a receive interrupt if tx napi is enabled. With
>> >> VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
>> >> interrupts for bidirectional workloads.
>> >>
>> >> Signed-off-by: Willem de Bruijn <willemb@google.com>
>
> This is a popular approach, but I think this will only work well if tx
> and rx interrupts are processed on the same CPU and if tx queue is per
> cpu. If they target different CPUs or if tx queue is used from multiple
> CPUs they will conflict on the shared locks.
Yes. As a result of this discussion I started running a few vcpu affinity tests.
The data is not complete. In particular, I don't have the data yet to
compare having tx and rx irq on the same cpu (0,0) vs on different
(0,2) for this patchset. Which is the relevant data to your point.
Initial results for unmodified upstream driver at {1, 10, 100}x
TCP_STREAM, for irq cpu affinity (rx,tx). Process is always pinned to cpu
1. This is a 4 vcpu system pinned by the host to 4 cores on the same
socket. The previously reported results were obtained with txq, rtx
and process on different vcpus (0,2). Running all on the same vcpu
lower cycle count considerably:
irq 0,0
1 throughput_Mbps=29767.14 391,488,924,526 cycles
10 throughput_Mbps=40808.64 424,530,251,896 cycles
100 throughput_Mbps=33475.13 414,622,071,167 cycles
irq 0,2
1 throughput_Mbps=30176.05 395,673,200,747 cycles
10 throughput_Mbps=40729.26 433,948,374,991 cycles
100 throughput_Mbps=33758.68 436,291,949,393 cycles
irq 1,1
1 throughput_Mbps=26635.20 269,071,002,844 cycles
10 throughput_Mbps=42385.05 299,945,944,516 cycles
100 throughput_Mbps=33580.98 283,272,895,507 cycles
With this patch set applied, cpu (1,1)
1 throughput_Mbps=34980.76 276,504,805,414 cycles
10 throughput_Mbps=42519.92 298,105,889,785 cycles
100 throughput_Mbps=35268.86 296,670,598,712 cycles
I will need to get data for (0,2) vs (0,0).
> This can even change dynamically as CPUs/queues are reconfigured.
> How about adding a flag and skipping the tx poll if there's no match?
I suspect that even with the cache invalidations this optimization
will be an improvement over handling all tx interrupts in the tx napi
handler. I will get the datapoint for that.
That said, we can make this conditional. What flag exactly do you
propose? Compare raw_smp_processor_id() in the rx softint with one
previously stored in the napi tx callback?
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi
2017-04-07 19:28 ` Michael S. Tsirkin
2017-04-07 20:59 ` Willem de Bruijn
@ 2017-04-07 20:59 ` Willem de Bruijn
2017-04-07 21:10 ` Michael S. Tsirkin
2017-04-07 21:10 ` Michael S. Tsirkin
1 sibling, 2 replies; 22+ messages in thread
From: Willem de Bruijn @ 2017-04-07 20:59 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: Network Development, Jason Wang, virtualization, David Miller,
Willem de Bruijn
On Fri, Apr 7, 2017 at 3:28 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Mon, Apr 03, 2017 at 01:02:13AM -0400, Willem de Bruijn wrote:
>> On Sun, Apr 2, 2017 at 10:47 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>> > On Sun, Apr 02, 2017 at 04:10:12PM -0400, Willem de Bruijn wrote:
>> >> From: Willem de Bruijn <willemb@google.com>
>> >>
>> >> Amortize the cost of virtual interrupts by doing both rx and tx work
>> >> on reception of a receive interrupt if tx napi is enabled. With
>> >> VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
>> >> interrupts for bidirectional workloads.
>> >>
>> >> Signed-off-by: Willem de Bruijn <willemb@google.com>
>
> This is a popular approach, but I think this will only work well if tx
> and rx interrupts are processed on the same CPU and if tx queue is per
> cpu. If they target different CPUs or if tx queue is used from multiple
> CPUs they will conflict on the shared locks.
Yes. As a result of this discussion I started running a few vcpu affinity tests.
The data is not complete. In particular, I don't have the data yet to
compare having tx and rx irq on the same cpu (0,0) vs on different
(0,2) for this patchset. Which is the relevant data to your point.
Initial results for unmodified upstream driver at {1, 10, 100}x
TCP_STREAM, for irq cpu affinity (rx,tx). Process is always pinned to cpu
1. This is a 4 vcpu system pinned by the host to 4 cores on the same
socket. The previously reported results were obtained with txq, rtx
and process on different vcpus (0,2). Running all on the same vcpu
lower cycle count considerably:
irq 0,0
1 throughput_Mbps=29767.14 391,488,924,526 cycles
10 throughput_Mbps=40808.64 424,530,251,896 cycles
100 throughput_Mbps=33475.13 414,622,071,167 cycles
irq 0,2
1 throughput_Mbps=30176.05 395,673,200,747 cycles
10 throughput_Mbps=40729.26 433,948,374,991 cycles
100 throughput_Mbps=33758.68 436,291,949,393 cycles
irq 1,1
1 throughput_Mbps=26635.20 269,071,002,844 cycles
10 throughput_Mbps=42385.05 299,945,944,516 cycles
100 throughput_Mbps=33580.98 283,272,895,507 cycles
With this patch set applied, cpu (1,1)
1 throughput_Mbps=34980.76 276,504,805,414 cycles
10 throughput_Mbps=42519.92 298,105,889,785 cycles
100 throughput_Mbps=35268.86 296,670,598,712 cycles
I will need to get data for (0,2) vs (0,0).
> This can even change dynamically as CPUs/queues are reconfigured.
> How about adding a flag and skipping the tx poll if there's no match?
I suspect that even with the cache invalidations this optimization
will be an improvement over handling all tx interrupts in the tx napi
handler. I will get the datapoint for that.
That said, we can make this conditional. What flag exactly do you
propose? Compare raw_smp_processor_id() in the rx softint with one
previously stored in the napi tx callback?
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi
2017-04-07 20:59 ` Willem de Bruijn
@ 2017-04-07 21:10 ` Michael S. Tsirkin
2017-04-07 21:10 ` Michael S. Tsirkin
1 sibling, 0 replies; 22+ messages in thread
From: Michael S. Tsirkin @ 2017-04-07 21:10 UTC (permalink / raw)
To: Willem de Bruijn
Cc: Willem de Bruijn, Network Development, David Miller, virtualization
On Fri, Apr 07, 2017 at 04:59:58PM -0400, Willem de Bruijn wrote:
> On Fri, Apr 7, 2017 at 3:28 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > On Mon, Apr 03, 2017 at 01:02:13AM -0400, Willem de Bruijn wrote:
> >> On Sun, Apr 2, 2017 at 10:47 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> >> > On Sun, Apr 02, 2017 at 04:10:12PM -0400, Willem de Bruijn wrote:
> >> >> From: Willem de Bruijn <willemb@google.com>
> >> >>
> >> >> Amortize the cost of virtual interrupts by doing both rx and tx work
> >> >> on reception of a receive interrupt if tx napi is enabled. With
> >> >> VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
> >> >> interrupts for bidirectional workloads.
> >> >>
> >> >> Signed-off-by: Willem de Bruijn <willemb@google.com>
> >
> > This is a popular approach, but I think this will only work well if tx
> > and rx interrupts are processed on the same CPU and if tx queue is per
> > cpu. If they target different CPUs or if tx queue is used from multiple
> > CPUs they will conflict on the shared locks.
>
> Yes. As a result of this discussion I started running a few vcpu affinity tests.
>
> The data is not complete. In particular, I don't have the data yet to
> compare having tx and rx irq on the same cpu (0,0) vs on different
> (0,2) for this patchset. Which is the relevant data to your point.
>
> Initial results for unmodified upstream driver at {1, 10, 100}x
> TCP_STREAM, for irq cpu affinity (rx,tx). Process is always pinned to cpu
> 1. This is a 4 vcpu system pinned by the host to 4 cores on the same
> socket. The previously reported results were obtained with txq, rtx
> and process on different vcpus (0,2). Running all on the same vcpu
> lower cycle count considerably:
>
> irq 0,0
> 1 throughput_Mbps=29767.14 391,488,924,526 cycles
> 10 throughput_Mbps=40808.64 424,530,251,896 cycles
> 100 throughput_Mbps=33475.13 414,622,071,167 cycles
>
> irq 0,2
> 1 throughput_Mbps=30176.05 395,673,200,747 cycles
> 10 throughput_Mbps=40729.26 433,948,374,991 cycles
> 100 throughput_Mbps=33758.68 436,291,949,393 cycles
>
> irq 1,1
> 1 throughput_Mbps=26635.20 269,071,002,844 cycles
> 10 throughput_Mbps=42385.05 299,945,944,516 cycles
> 100 throughput_Mbps=33580.98 283,272,895,507 cycles
>
> With this patch set applied, cpu (1,1)
>
> 1 throughput_Mbps=34980.76 276,504,805,414 cycles
> 10 throughput_Mbps=42519.92 298,105,889,785 cycles
> 100 throughput_Mbps=35268.86 296,670,598,712 cycles
>
> I will need to get data for (0,2) vs (0,0).
>
> > This can even change dynamically as CPUs/queues are reconfigured.
> > How about adding a flag and skipping the tx poll if there's no match?
>
> I suspect that even with the cache invalidations this optimization
> will be an improvement over handling all tx interrupts in the tx napi
> handler. I will get the datapoint for that.
>
> That said, we can make this conditional. What flag exactly do you
> propose? Compare raw_smp_processor_id() in the rx softint with one
> previously stored in the napi tx callback?
I'm not sure. Another idea is to check vi->affinity_hint_set.
If set we know rq and sq are on the same CPU.
--
MST
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi
2017-04-07 20:59 ` Willem de Bruijn
2017-04-07 21:10 ` Michael S. Tsirkin
@ 2017-04-07 21:10 ` Michael S. Tsirkin
2017-04-07 21:14 ` Willem de Bruijn
2017-04-07 21:14 ` Willem de Bruijn
1 sibling, 2 replies; 22+ messages in thread
From: Michael S. Tsirkin @ 2017-04-07 21:10 UTC (permalink / raw)
To: Willem de Bruijn
Cc: Network Development, Jason Wang, virtualization, David Miller,
Willem de Bruijn
On Fri, Apr 07, 2017 at 04:59:58PM -0400, Willem de Bruijn wrote:
> On Fri, Apr 7, 2017 at 3:28 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > On Mon, Apr 03, 2017 at 01:02:13AM -0400, Willem de Bruijn wrote:
> >> On Sun, Apr 2, 2017 at 10:47 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> >> > On Sun, Apr 02, 2017 at 04:10:12PM -0400, Willem de Bruijn wrote:
> >> >> From: Willem de Bruijn <willemb@google.com>
> >> >>
> >> >> Amortize the cost of virtual interrupts by doing both rx and tx work
> >> >> on reception of a receive interrupt if tx napi is enabled. With
> >> >> VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
> >> >> interrupts for bidirectional workloads.
> >> >>
> >> >> Signed-off-by: Willem de Bruijn <willemb@google.com>
> >
> > This is a popular approach, but I think this will only work well if tx
> > and rx interrupts are processed on the same CPU and if tx queue is per
> > cpu. If they target different CPUs or if tx queue is used from multiple
> > CPUs they will conflict on the shared locks.
>
> Yes. As a result of this discussion I started running a few vcpu affinity tests.
>
> The data is not complete. In particular, I don't have the data yet to
> compare having tx and rx irq on the same cpu (0,0) vs on different
> (0,2) for this patchset. Which is the relevant data to your point.
>
> Initial results for unmodified upstream driver at {1, 10, 100}x
> TCP_STREAM, for irq cpu affinity (rx,tx). Process is always pinned to cpu
> 1. This is a 4 vcpu system pinned by the host to 4 cores on the same
> socket. The previously reported results were obtained with txq, rtx
> and process on different vcpus (0,2). Running all on the same vcpu
> lower cycle count considerably:
>
> irq 0,0
> 1 throughput_Mbps=29767.14 391,488,924,526 cycles
> 10 throughput_Mbps=40808.64 424,530,251,896 cycles
> 100 throughput_Mbps=33475.13 414,622,071,167 cycles
>
> irq 0,2
> 1 throughput_Mbps=30176.05 395,673,200,747 cycles
> 10 throughput_Mbps=40729.26 433,948,374,991 cycles
> 100 throughput_Mbps=33758.68 436,291,949,393 cycles
>
> irq 1,1
> 1 throughput_Mbps=26635.20 269,071,002,844 cycles
> 10 throughput_Mbps=42385.05 299,945,944,516 cycles
> 100 throughput_Mbps=33580.98 283,272,895,507 cycles
>
> With this patch set applied, cpu (1,1)
>
> 1 throughput_Mbps=34980.76 276,504,805,414 cycles
> 10 throughput_Mbps=42519.92 298,105,889,785 cycles
> 100 throughput_Mbps=35268.86 296,670,598,712 cycles
>
> I will need to get data for (0,2) vs (0,0).
>
> > This can even change dynamically as CPUs/queues are reconfigured.
> > How about adding a flag and skipping the tx poll if there's no match?
>
> I suspect that even with the cache invalidations this optimization
> will be an improvement over handling all tx interrupts in the tx napi
> handler. I will get the datapoint for that.
>
> That said, we can make this conditional. What flag exactly do you
> propose? Compare raw_smp_processor_id() in the rx softint with one
> previously stored in the napi tx callback?
I'm not sure. Another idea is to check vi->affinity_hint_set.
If set we know rq and sq are on the same CPU.
--
MST
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi
2017-04-07 21:10 ` Michael S. Tsirkin
@ 2017-04-07 21:14 ` Willem de Bruijn
2017-04-07 21:14 ` Willem de Bruijn
1 sibling, 0 replies; 22+ messages in thread
From: Willem de Bruijn @ 2017-04-07 21:14 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: Network Development, Jason Wang, virtualization, David Miller,
Willem de Bruijn
On Fri, Apr 7, 2017 at 5:10 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Fri, Apr 07, 2017 at 04:59:58PM -0400, Willem de Bruijn wrote:
>> On Fri, Apr 7, 2017 at 3:28 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>> > On Mon, Apr 03, 2017 at 01:02:13AM -0400, Willem de Bruijn wrote:
>> >> On Sun, Apr 2, 2017 at 10:47 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>> >> > On Sun, Apr 02, 2017 at 04:10:12PM -0400, Willem de Bruijn wrote:
>> >> >> From: Willem de Bruijn <willemb@google.com>
>> >> >>
>> >> >> Amortize the cost of virtual interrupts by doing both rx and tx work
>> >> >> on reception of a receive interrupt if tx napi is enabled. With
>> >> >> VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
>> >> >> interrupts for bidirectional workloads.
>> >> >>
>> >> >> Signed-off-by: Willem de Bruijn <willemb@google.com>
>> >
>> > This is a popular approach, but I think this will only work well if tx
>> > and rx interrupts are processed on the same CPU and if tx queue is per
>> > cpu. If they target different CPUs or if tx queue is used from multiple
>> > CPUs they will conflict on the shared locks.
>>
>> Yes. As a result of this discussion I started running a few vcpu affinity tests.
>>
>> > This can even change dynamically as CPUs/queues are reconfigured.
>> > How about adding a flag and skipping the tx poll if there's no match?
>>
>> I suspect that even with the cache invalidations this optimization
>> will be an improvement over handling all tx interrupts in the tx napi
>> handler. I will get the datapoint for that.
>>
>> That said, we can make this conditional. What flag exactly do you
>> propose? Compare raw_smp_processor_id() in the rx softint with one
>> previously stored in the napi tx callback?
>
> I'm not sure. Another idea is to check vi->affinity_hint_set.
> If set we know rq and sq are on the same CPU.
I was not aware of that flag, thanks. Yes, that looks like it should work.
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi
2017-04-07 21:10 ` Michael S. Tsirkin
2017-04-07 21:14 ` Willem de Bruijn
@ 2017-04-07 21:14 ` Willem de Bruijn
1 sibling, 0 replies; 22+ messages in thread
From: Willem de Bruijn @ 2017-04-07 21:14 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: Willem de Bruijn, Network Development, David Miller, virtualization
On Fri, Apr 7, 2017 at 5:10 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Fri, Apr 07, 2017 at 04:59:58PM -0400, Willem de Bruijn wrote:
>> On Fri, Apr 7, 2017 at 3:28 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>> > On Mon, Apr 03, 2017 at 01:02:13AM -0400, Willem de Bruijn wrote:
>> >> On Sun, Apr 2, 2017 at 10:47 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>> >> > On Sun, Apr 02, 2017 at 04:10:12PM -0400, Willem de Bruijn wrote:
>> >> >> From: Willem de Bruijn <willemb@google.com>
>> >> >>
>> >> >> Amortize the cost of virtual interrupts by doing both rx and tx work
>> >> >> on reception of a receive interrupt if tx napi is enabled. With
>> >> >> VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
>> >> >> interrupts for bidirectional workloads.
>> >> >>
>> >> >> Signed-off-by: Willem de Bruijn <willemb@google.com>
>> >
>> > This is a popular approach, but I think this will only work well if tx
>> > and rx interrupts are processed on the same CPU and if tx queue is per
>> > cpu. If they target different CPUs or if tx queue is used from multiple
>> > CPUs they will conflict on the shared locks.
>>
>> Yes. As a result of this discussion I started running a few vcpu affinity tests.
>>
>> > This can even change dynamically as CPUs/queues are reconfigured.
>> > How about adding a flag and skipping the tx poll if there's no match?
>>
>> I suspect that even with the cache invalidations this optimization
>> will be an improvement over handling all tx interrupts in the tx napi
>> handler. I will get the datapoint for that.
>>
>> That said, we can make this conditional. What flag exactly do you
>> propose? Compare raw_smp_processor_id() in the rx softint with one
>> previously stored in the napi tx callback?
>
> I'm not sure. Another idea is to check vi->affinity_hint_set.
> If set we know rq and sq are on the same CPU.
I was not aware of that flag, thanks. Yes, that looks like it should work.
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH net-next 3/3] virtio-net: clean tx descriptors from rx napi
2017-04-02 20:10 ` Willem de Bruijn
2017-04-03 2:47 ` Michael S. Tsirkin
@ 2017-04-03 2:47 ` Michael S. Tsirkin
1 sibling, 0 replies; 22+ messages in thread
From: Michael S. Tsirkin @ 2017-04-03 2:47 UTC (permalink / raw)
To: Willem de Bruijn; +Cc: Willem de Bruijn, netdev, davem, virtualization
On Sun, Apr 02, 2017 at 04:10:12PM -0400, Willem de Bruijn wrote:
> From: Willem de Bruijn <willemb@google.com>
>
> Amortize the cost of virtual interrupts by doing both rx and tx work
> on reception of a receive interrupt if tx napi is enabled. With
> VIRTIO_F_EVENT_IDX, this suppresses most explicit tx completion
> interrupts for bidirectional workloads.
>
> Signed-off-by: Willem de Bruijn <willemb@google.com>
> ---
> drivers/net/virtio_net.c | 22 ++++++++++++++++++++++
> 1 file changed, 22 insertions(+)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 95d938e82080..af830eb212bf 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -1030,12 +1030,34 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
> return received;
> }
>
> +static void free_old_xmit_skbs(struct send_queue *sq);
> +
Could you pls re-arrange code to avoid forward declarations?
> +static void virtnet_poll_cleantx(struct receive_queue *rq)
> +{
> + struct virtnet_info *vi = rq->vq->vdev->priv;
> + unsigned int index = vq2rxq(rq->vq);
> + struct send_queue *sq = &vi->sq[index];
> + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
> +
> + if (!sq->napi.weight)
> + return;
> +
> + __netif_tx_lock(txq, smp_processor_id());
> + free_old_xmit_skbs(sq);
> + __netif_tx_unlock(txq);
> +
> + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> + netif_wake_subqueue(vi->dev, vq2txq(sq->vq));
> +}
> +
Looks very similar to virtnet_poll_tx.
I think this might be waking the tx queue too early, so
it will tend to stay almost full for long periods of time.
Why not defer wakeup until queue is at least half empty?
I wonder whether it's worth it to handle very short queues
correctly - they previously made very slow progress,
not they are never woken up.
I'm a bit concerned about the cost of these wakeups
and locking. I note that this wake is called basically
every time queue is not full.
Would it make sense to limit the amount of tx polling?
Maybe use trylock to reduce the conflict with xmit?
> static int virtnet_poll(struct napi_struct *napi, int budget)
> {
> struct receive_queue *rq =
> container_of(napi, struct receive_queue, napi);
> unsigned int received;
>
> + virtnet_poll_cleantx(rq);
> +
> received = virtnet_receive(rq, budget);
>
> /* Out of packets? */
> --
> 2.12.2.564.g063fe858b8-goog
^ permalink raw reply [flat|nested] 22+ messages in thread