All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next 0/5] add xdp ethtool stats to mvneta driver
@ 2020-02-16 21:07 Lorenzo Bianconi
  2020-02-16 21:07 ` [PATCH net-next 1/5] net: mvneta: move refill_err and skb_alloc_err in per-cpu stats Lorenzo Bianconi
                   ` (5 more replies)
  0 siblings, 6 replies; 17+ messages in thread
From: Lorenzo Bianconi @ 2020-02-16 21:07 UTC (permalink / raw)
  To: netdev; +Cc: ilias.apalodimas, davem, lorenzo.bianconi, brouer

Rework mvneta stats accounting in order to introduce xdp ethtool
statistics in the mvneta driver.
Introduce xdp_redirect, xdp_pass, xdp_drop and xdp_tx counters to
ethtool statistics.
Fix skb_alloc_error and refill_error ethtool accounting

Lorenzo Bianconi (5):
  net: mvneta: move refill_err and skb_alloc_err in per-cpu stats
  net: mvneta: rely on open-coding updating stats for non-xdp and tx
    path
  net: mvneta: rely on struct mvneta_stats in mvneta_update_stats
    routine
  net: mvneta: introduce xdp counters to ethtool
  net: mvneta: get rid of xdp_ret in mvneta_swbm_rx_frame

 drivers/net/ethernet/marvell/mvneta.c | 227 ++++++++++++++++++--------
 1 file changed, 163 insertions(+), 64 deletions(-)

-- 
2.24.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH net-next 1/5] net: mvneta: move refill_err and skb_alloc_err in per-cpu stats
  2020-02-16 21:07 [PATCH net-next 0/5] add xdp ethtool stats to mvneta driver Lorenzo Bianconi
@ 2020-02-16 21:07 ` Lorenzo Bianconi
  2020-02-16 21:07 ` [PATCH net-next 2/5] net: mvneta: rely on open-coding updating stats for non-xdp and tx path Lorenzo Bianconi
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 17+ messages in thread
From: Lorenzo Bianconi @ 2020-02-16 21:07 UTC (permalink / raw)
  To: netdev; +Cc: ilias.apalodimas, davem, lorenzo.bianconi, brouer

mvneta_ethtool_update_stats routine is currently reporting
skb_alloc_error and refill_error only for the first rx queue.
Fix the issue moving skb_alloc_err and refill_err in
mvneta_pcpu_stats structure.
Moreover this patch will be used to introduce xdp statistics
to ethtool for the mvneta driver

Fixes: 17a96da62716 ("net: mvneta: discriminate error cause for missed packet")
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 drivers/net/ethernet/marvell/mvneta.c | 69 +++++++++++++++++++++------
 1 file changed, 55 insertions(+), 14 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 98017e7d5dd0..7433a305e0ff 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -397,8 +397,15 @@ static const struct mvneta_statistic mvneta_statistics[] = {
 	{ ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
 };
 
+struct mvneta_ethtool_stats {
+	u64	skb_alloc_error;
+	u64	refill_error;
+};
+
 struct mvneta_pcpu_stats {
-	struct	u64_stats_sync syncp;
+	struct u64_stats_sync syncp;
+
+	struct mvneta_ethtool_stats es;
 	u64	rx_packets;
 	u64	rx_bytes;
 	u64	rx_dropped;
@@ -660,10 +667,6 @@ struct mvneta_rx_queue {
 	/* pointer to uncomplete skb buffer */
 	struct sk_buff *skb;
 	int left_size;
-
-	/* error counters */
-	u32 skb_alloc_err;
-	u32 refill_err;
 };
 
 static enum cpuhp_state online_hpstate;
@@ -1969,9 +1972,15 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
 		rx_desc = rxq->descs + curr_desc;
 		if (!(rx_desc->buf_phys_addr)) {
 			if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
+				struct mvneta_pcpu_stats *stats;
+
 				pr_err("Can't refill queue %d. Done %d from %d\n",
 				       rxq->id, i, rxq->refill_num);
-				rxq->refill_err++;
+
+				stats = this_cpu_ptr(pp->stats);
+				u64_stats_update_begin(&stats->syncp);
+				stats->es.refill_error++;
+				u64_stats_update_end(&stats->syncp);
 				break;
 			}
 		}
@@ -2193,9 +2202,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
 		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 
 		netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id);
-		rxq->skb_alloc_err++;
 
 		u64_stats_update_begin(&stats->syncp);
+		stats->es.skb_alloc_error++;
 		stats->rx_dropped++;
 		u64_stats_update_end(&stats->syncp);
 
@@ -2423,8 +2432,15 @@ static int mvneta_rx_hwbm(struct napi_struct *napi,
 		/* Refill processing */
 		err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
 		if (err) {
+			struct mvneta_pcpu_stats *stats;
+
 			netdev_err(dev, "Linux processing - Can't refill\n");
-			rxq->refill_err++;
+
+			stats = this_cpu_ptr(pp->stats);
+			u64_stats_update_begin(&stats->syncp);
+			stats->es.refill_error++;
+			u64_stats_update_end(&stats->syncp);
+
 			goto err_drop_frame_ret_pool;
 		}
 
@@ -4420,45 +4436,70 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
 	}
 }
 
+static void
+mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
+				 struct mvneta_ethtool_stats *es)
+{
+	unsigned int start;
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct mvneta_pcpu_stats *stats;
+		u64 skb_alloc_error;
+		u64 refill_error;
+
+		stats = per_cpu_ptr(pp->stats, cpu);
+		do {
+			start = u64_stats_fetch_begin_irq(&stats->syncp);
+			skb_alloc_error = stats->es.skb_alloc_error;
+			refill_error = stats->es.refill_error;
+		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+		es->skb_alloc_error += skb_alloc_error;
+		es->refill_error += refill_error;
+	}
+}
+
 static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
 {
+	struct mvneta_ethtool_stats stats = {};
 	const struct mvneta_statistic *s;
 	void __iomem *base = pp->base;
 	u32 high, low;
 	u64 val;
 	int i;
 
+	mvneta_ethtool_update_pcpu_stats(pp, &stats);
 	for (i = 0, s = mvneta_statistics;
 	     s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
 	     s++, i++) {
-		val = 0;
-
 		switch (s->type) {
 		case T_REG_32:
 			val = readl_relaxed(base + s->offset);
+			pp->ethtool_stats[i] += val;
 			break;
 		case T_REG_64:
 			/* Docs say to read low 32-bit then high */
 			low = readl_relaxed(base + s->offset);
 			high = readl_relaxed(base + s->offset + 4);
 			val = (u64)high << 32 | low;
+			pp->ethtool_stats[i] += val;
 			break;
 		case T_SW:
 			switch (s->offset) {
 			case ETHTOOL_STAT_EEE_WAKEUP:
 				val = phylink_get_eee_err(pp->phylink);
+				pp->ethtool_stats[i] += val;
 				break;
 			case ETHTOOL_STAT_SKB_ALLOC_ERR:
-				val = pp->rxqs[0].skb_alloc_err;
+				pp->ethtool_stats[i] = stats.skb_alloc_error;
 				break;
 			case ETHTOOL_STAT_REFILL_ERR:
-				val = pp->rxqs[0].refill_err;
+				pp->ethtool_stats[i] = stats.refill_error;
 				break;
 			}
 			break;
 		}
-
-		pp->ethtool_stats[i] += val;
 	}
 }
 
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 2/5] net: mvneta: rely on open-coding updating stats for non-xdp and tx path
  2020-02-16 21:07 [PATCH net-next 0/5] add xdp ethtool stats to mvneta driver Lorenzo Bianconi
  2020-02-16 21:07 ` [PATCH net-next 1/5] net: mvneta: move refill_err and skb_alloc_err in per-cpu stats Lorenzo Bianconi
@ 2020-02-16 21:07 ` Lorenzo Bianconi
  2020-02-16 21:07 ` [PATCH net-next 3/5] net: mvneta: rely on struct mvneta_stats in mvneta_update_stats routine Lorenzo Bianconi
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 17+ messages in thread
From: Lorenzo Bianconi @ 2020-02-16 21:07 UTC (permalink / raw)
  To: netdev; +Cc: ilias.apalodimas, davem, lorenzo.bianconi, brouer

In oreder to avoid unnecessary instructions rely on open-coding updating
per-cpu stats in mvneta_tx/mvneta_xdp_submit_frame and mvneta_rx_hwbm
routines. This patch will be used to add xdp support to ethtool for the
mvneta driver

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 drivers/net/ethernet/marvell/mvneta.c | 40 ++++++++++++++++-----------
 1 file changed, 24 insertions(+), 16 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 7433a305e0ff..6552b84be7c9 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1945,19 +1945,13 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
 }
 
 static void
-mvneta_update_stats(struct mvneta_port *pp, u32 pkts,
-		    u32 len, bool tx)
+mvneta_update_stats(struct mvneta_port *pp, u32 pkts, u32 len)
 {
 	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 
 	u64_stats_update_begin(&stats->syncp);
-	if (tx) {
-		stats->tx_packets += pkts;
-		stats->tx_bytes += len;
-	} else {
-		stats->rx_packets += pkts;
-		stats->rx_bytes += len;
-	}
+	stats->rx_packets += pkts;
+	stats->rx_bytes += len;
 	u64_stats_update_end(&stats->syncp);
 }
 
@@ -1996,6 +1990,7 @@ static int
 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
 			struct xdp_frame *xdpf, bool dma_map)
 {
+	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 	struct mvneta_tx_desc *tx_desc;
 	struct mvneta_tx_buf *buf;
 	dma_addr_t dma_addr;
@@ -2030,7 +2025,11 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
 	tx_desc->buf_phys_addr = dma_addr;
 	tx_desc->data_size = xdpf->len;
 
-	mvneta_update_stats(pp, 1, xdpf->len, true);
+	u64_stats_update_begin(&stats->syncp);
+	stats->tx_bytes += xdpf->len;
+	stats->tx_packets++;
+	u64_stats_update_end(&stats->syncp);
+
 	mvneta_txq_inc_put(txq);
 	txq->pending++;
 	txq->count++;
@@ -2189,8 +2188,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
 		ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp);
 		if (ret != MVNETA_XDP_PASS) {
 			mvneta_update_stats(pp, 1,
-					    xdp->data_end - xdp->data,
-					    false);
+					    xdp->data_end - xdp->data);
 			rx_desc->buf_phys_addr = 0;
 			*xdp_ret |= ret;
 			return ret;
@@ -2340,7 +2338,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 		xdp_do_flush_map();
 
 	if (rcvd_pkts)
-		mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
+		mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes);
 
 	/* return some buffers to hardware queue, one at a time is too slow */
 	refill = mvneta_rx_refill_queue(pp, rxq);
@@ -2470,8 +2468,14 @@ static int mvneta_rx_hwbm(struct napi_struct *napi,
 		napi_gro_receive(napi, skb);
 	}
 
-	if (rcvd_pkts)
-		mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
+	if (rcvd_pkts) {
+		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+		u64_stats_update_begin(&stats->syncp);
+		stats->rx_packets += rcvd_pkts;
+		stats->rx_bytes += rcvd_bytes;
+		u64_stats_update_end(&stats->syncp);
+	}
 
 	/* Update rxq management counters */
 	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
@@ -2727,6 +2731,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 out:
 	if (frags > 0) {
 		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 
 		netdev_tx_sent_queue(nq, len);
 
@@ -2740,7 +2745,10 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 		else
 			txq->pending += frags;
 
-		mvneta_update_stats(pp, 1, len, true);
+		u64_stats_update_begin(&stats->syncp);
+		stats->tx_bytes += len;
+		stats->tx_packets++;
+		u64_stats_update_end(&stats->syncp);
 	} else {
 		dev->stats.tx_dropped++;
 		dev_kfree_skb_any(skb);
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 3/5] net: mvneta: rely on struct mvneta_stats in mvneta_update_stats routine
  2020-02-16 21:07 [PATCH net-next 0/5] add xdp ethtool stats to mvneta driver Lorenzo Bianconi
  2020-02-16 21:07 ` [PATCH net-next 1/5] net: mvneta: move refill_err and skb_alloc_err in per-cpu stats Lorenzo Bianconi
  2020-02-16 21:07 ` [PATCH net-next 2/5] net: mvneta: rely on open-coding updating stats for non-xdp and tx path Lorenzo Bianconi
@ 2020-02-16 21:07 ` Lorenzo Bianconi
  2020-02-16 21:07 ` [PATCH net-next 4/5] net: mvneta: introduce xdp counters to ethtool Lorenzo Bianconi
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 17+ messages in thread
From: Lorenzo Bianconi @ 2020-02-16 21:07 UTC (permalink / raw)
  To: netdev; +Cc: ilias.apalodimas, davem, lorenzo.bianconi, brouer

Introduce mvneta_stats structure in mvneta_update_stats routine signature
in order to collect all the rx stats and update them at the end at the
napi loop. mvneta_stats will be reused adding xdp statistics support to
ethtool.

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 drivers/net/ethernet/marvell/mvneta.c | 73 +++++++++++++++------------
 1 file changed, 41 insertions(+), 32 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 6552b84be7c9..d41fc7044fa6 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -397,7 +397,15 @@ static const struct mvneta_statistic mvneta_statistics[] = {
 	{ ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
 };
 
+struct mvneta_stats {
+	u64	rx_packets;
+	u64	rx_bytes;
+	u64	tx_packets;
+	u64	tx_bytes;
+};
+
 struct mvneta_ethtool_stats {
+	struct mvneta_stats ps;
 	u64	skb_alloc_error;
 	u64	refill_error;
 };
@@ -406,12 +414,8 @@ struct mvneta_pcpu_stats {
 	struct u64_stats_sync syncp;
 
 	struct mvneta_ethtool_stats es;
-	u64	rx_packets;
-	u64	rx_bytes;
 	u64	rx_dropped;
 	u64	rx_errors;
-	u64	tx_packets;
-	u64	tx_bytes;
 };
 
 struct mvneta_pcpu_port {
@@ -751,12 +755,12 @@ mvneta_get_stats64(struct net_device *dev,
 		cpu_stats = per_cpu_ptr(pp->stats, cpu);
 		do {
 			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
-			rx_packets = cpu_stats->rx_packets;
-			rx_bytes   = cpu_stats->rx_bytes;
+			rx_packets = cpu_stats->es.ps.rx_packets;
+			rx_bytes   = cpu_stats->es.ps.rx_bytes;
 			rx_dropped = cpu_stats->rx_dropped;
 			rx_errors  = cpu_stats->rx_errors;
-			tx_packets = cpu_stats->tx_packets;
-			tx_bytes   = cpu_stats->tx_bytes;
+			tx_packets = cpu_stats->es.ps.tx_packets;
+			tx_bytes   = cpu_stats->es.ps.tx_bytes;
 		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
 
 		stats->rx_packets += rx_packets;
@@ -1945,13 +1949,14 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
 }
 
 static void
-mvneta_update_stats(struct mvneta_port *pp, u32 pkts, u32 len)
+mvneta_update_stats(struct mvneta_port *pp,
+		    struct mvneta_stats *ps)
 {
 	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 
 	u64_stats_update_begin(&stats->syncp);
-	stats->rx_packets += pkts;
-	stats->rx_bytes += len;
+	stats->es.ps.rx_packets += ps->rx_packets;
+	stats->es.ps.rx_bytes += ps->rx_bytes;
 	u64_stats_update_end(&stats->syncp);
 }
 
@@ -2026,8 +2031,8 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
 	tx_desc->data_size = xdpf->len;
 
 	u64_stats_update_begin(&stats->syncp);
-	stats->tx_bytes += xdpf->len;
-	stats->tx_packets++;
+	stats->es.ps.tx_bytes += xdpf->len;
+	stats->es.ps.tx_packets++;
 	u64_stats_update_end(&stats->syncp);
 
 	mvneta_txq_inc_put(txq);
@@ -2098,7 +2103,8 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
 
 static int
 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
-	       struct bpf_prog *prog, struct xdp_buff *xdp)
+	       struct bpf_prog *prog, struct xdp_buff *xdp,
+	       struct mvneta_stats *stats)
 {
 	unsigned int len;
 	u32 ret, act;
@@ -2108,8 +2114,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 
 	switch (act) {
 	case XDP_PASS:
-		ret = MVNETA_XDP_PASS;
-		break;
+		return MVNETA_XDP_PASS;
 	case XDP_REDIRECT: {
 		int err;
 
@@ -2145,6 +2150,9 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 		break;
 	}
 
+	stats->rx_bytes += xdp->data_end - xdp->data;
+	stats->rx_packets++;
+
 	return ret;
 }
 
@@ -2154,7 +2162,8 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
 		     struct mvneta_rx_queue *rxq,
 		     struct xdp_buff *xdp,
 		     struct bpf_prog *xdp_prog,
-		     struct page *page, u32 *xdp_ret)
+		     struct page *page, u32 *xdp_ret,
+		     struct mvneta_stats *stats)
 {
 	unsigned char *data = page_address(page);
 	int data_len = -MVNETA_MH_SIZE, len;
@@ -2185,10 +2194,8 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
 	if (xdp_prog) {
 		u32 ret;
 
-		ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp);
+		ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp, stats);
 		if (ret != MVNETA_XDP_PASS) {
-			mvneta_update_stats(pp, 1,
-					    xdp->data_end - xdp->data);
 			rx_desc->buf_phys_addr = 0;
 			*xdp_ret |= ret;
 			return ret;
@@ -2259,11 +2266,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 			  struct mvneta_port *pp, int budget,
 			  struct mvneta_rx_queue *rxq)
 {
-	int rcvd_pkts = 0, rcvd_bytes = 0, rx_proc = 0;
+	int rx_proc = 0, rx_todo, refill;
 	struct net_device *dev = pp->dev;
+	struct mvneta_stats ps = {};
 	struct bpf_prog *xdp_prog;
 	struct xdp_buff xdp_buf;
-	int rx_todo, refill;
 	u32 xdp_ret = 0;
 
 	/* Get number of received packets */
@@ -2297,7 +2304,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 			}
 
 			err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
-						   xdp_prog, page, &xdp_ret);
+						   xdp_prog, page, &xdp_ret,
+						   &ps);
 			if (err)
 				continue;
 		} else {
@@ -2321,8 +2329,9 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 			rxq->skb = NULL;
 			continue;
 		}
-		rcvd_pkts++;
-		rcvd_bytes += rxq->skb->len;
+
+		ps.rx_bytes += rxq->skb->len;
+		ps.rx_packets++;
 
 		/* Linux processing */
 		rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
@@ -2337,8 +2346,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 	if (xdp_ret & MVNETA_XDP_REDIR)
 		xdp_do_flush_map();
 
-	if (rcvd_pkts)
-		mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes);
+	if (ps.rx_packets)
+		mvneta_update_stats(pp, &ps);
 
 	/* return some buffers to hardware queue, one at a time is too slow */
 	refill = mvneta_rx_refill_queue(pp, rxq);
@@ -2346,7 +2355,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 	/* Update rxq management counters */
 	mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
 
-	return rcvd_pkts;
+	return ps.rx_packets;
 }
 
 /* Main rx processing when using hardware buffer management */
@@ -2472,8 +2481,8 @@ static int mvneta_rx_hwbm(struct napi_struct *napi,
 		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 
 		u64_stats_update_begin(&stats->syncp);
-		stats->rx_packets += rcvd_pkts;
-		stats->rx_bytes += rcvd_bytes;
+		stats->es.ps.rx_packets += rcvd_pkts;
+		stats->es.ps.rx_bytes += rcvd_bytes;
 		u64_stats_update_end(&stats->syncp);
 	}
 
@@ -2746,8 +2755,8 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 			txq->pending += frags;
 
 		u64_stats_update_begin(&stats->syncp);
-		stats->tx_bytes += len;
-		stats->tx_packets++;
+		stats->es.ps.tx_bytes += len;
+		stats->es.ps.tx_packets++;
 		u64_stats_update_end(&stats->syncp);
 	} else {
 		dev->stats.tx_dropped++;
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 4/5] net: mvneta: introduce xdp counters to ethtool
  2020-02-16 21:07 [PATCH net-next 0/5] add xdp ethtool stats to mvneta driver Lorenzo Bianconi
                   ` (2 preceding siblings ...)
  2020-02-16 21:07 ` [PATCH net-next 3/5] net: mvneta: rely on struct mvneta_stats in mvneta_update_stats routine Lorenzo Bianconi
@ 2020-02-16 21:07 ` Lorenzo Bianconi
  2020-02-17 10:17   ` Jesper Dangaard Brouer
  2020-02-16 21:07 ` [PATCH net-next 5/5] net: mvneta: get rid of xdp_ret in mvneta_swbm_rx_frame Lorenzo Bianconi
  2020-02-17  4:04 ` [PATCH net-next 0/5] add xdp ethtool stats to mvneta driver David Miller
  5 siblings, 1 reply; 17+ messages in thread
From: Lorenzo Bianconi @ 2020-02-16 21:07 UTC (permalink / raw)
  To: netdev; +Cc: ilias.apalodimas, davem, lorenzo.bianconi, brouer

Add xdp_redirect, xdp_pass, xdp_drop and xdp_tx counters
to ethtool statistics

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 drivers/net/ethernet/marvell/mvneta.c | 45 +++++++++++++++++++++++++++
 1 file changed, 45 insertions(+)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d41fc7044fa6..e4eb2bd097d4 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -341,6 +341,10 @@ enum {
 	ETHTOOL_STAT_EEE_WAKEUP,
 	ETHTOOL_STAT_SKB_ALLOC_ERR,
 	ETHTOOL_STAT_REFILL_ERR,
+	ETHTOOL_XDP_REDIRECT,
+	ETHTOOL_XDP_PASS,
+	ETHTOOL_XDP_DROP,
+	ETHTOOL_XDP_TX,
 	ETHTOOL_MAX_STATS,
 };
 
@@ -395,6 +399,10 @@ static const struct mvneta_statistic mvneta_statistics[] = {
 	{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
 	{ ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
 	{ ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
+	{ ETHTOOL_XDP_REDIRECT, T_SW, "xdp_redirect", },
+	{ ETHTOOL_XDP_PASS, T_SW, "xdp_pass", },
+	{ ETHTOOL_XDP_DROP, T_SW, "xdp_drop", },
+	{ ETHTOOL_XDP_TX, T_SW, "xdp_tx", },
 };
 
 struct mvneta_stats {
@@ -402,6 +410,11 @@ struct mvneta_stats {
 	u64	rx_bytes;
 	u64	tx_packets;
 	u64	tx_bytes;
+	/* xdp */
+	u64	xdp_redirect;
+	u64	xdp_pass;
+	u64	xdp_drop;
+	u64	xdp_tx;
 };
 
 struct mvneta_ethtool_stats {
@@ -1957,6 +1970,10 @@ mvneta_update_stats(struct mvneta_port *pp,
 	u64_stats_update_begin(&stats->syncp);
 	stats->es.ps.rx_packets += ps->rx_packets;
 	stats->es.ps.rx_bytes += ps->rx_bytes;
+	/* xdp */
+	stats->es.ps.xdp_redirect += ps->xdp_redirect;
+	stats->es.ps.xdp_pass += ps->xdp_pass;
+	stats->es.ps.xdp_drop += ps->xdp_drop;
 	u64_stats_update_end(&stats->syncp);
 }
 
@@ -2033,6 +2050,7 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
 	u64_stats_update_begin(&stats->syncp);
 	stats->es.ps.tx_bytes += xdpf->len;
 	stats->es.ps.tx_packets++;
+	stats->es.ps.xdp_tx++;
 	u64_stats_update_end(&stats->syncp);
 
 	mvneta_txq_inc_put(txq);
@@ -2114,6 +2132,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 
 	switch (act) {
 	case XDP_PASS:
+		stats->xdp_pass++;
 		return MVNETA_XDP_PASS;
 	case XDP_REDIRECT: {
 		int err;
@@ -2126,6 +2145,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 					     len, true);
 		} else {
 			ret = MVNETA_XDP_REDIR;
+			stats->xdp_redirect++;
 		}
 		break;
 	}
@@ -2147,6 +2167,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 				     virt_to_head_page(xdp->data),
 				     len, true);
 		ret = MVNETA_XDP_DROPPED;
+		stats->xdp_drop++;
 		break;
 	}
 
@@ -4464,16 +4485,28 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
 		struct mvneta_pcpu_stats *stats;
 		u64 skb_alloc_error;
 		u64 refill_error;
+		u64 xdp_redirect;
+		u64 xdp_pass;
+		u64 xdp_drop;
+		u64 xdp_tx;
 
 		stats = per_cpu_ptr(pp->stats, cpu);
 		do {
 			start = u64_stats_fetch_begin_irq(&stats->syncp);
 			skb_alloc_error = stats->es.skb_alloc_error;
 			refill_error = stats->es.refill_error;
+			xdp_redirect = stats->es.ps.xdp_redirect;
+			xdp_pass = stats->es.ps.xdp_pass;
+			xdp_drop = stats->es.ps.xdp_drop;
+			xdp_tx = stats->es.ps.xdp_tx;
 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
 
 		es->skb_alloc_error += skb_alloc_error;
 		es->refill_error += refill_error;
+		es->ps.xdp_redirect += xdp_redirect;
+		es->ps.xdp_pass += xdp_pass;
+		es->ps.xdp_drop += xdp_drop;
+		es->ps.xdp_tx += xdp_tx;
 	}
 }
 
@@ -4514,6 +4547,18 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
 			case ETHTOOL_STAT_REFILL_ERR:
 				pp->ethtool_stats[i] = stats.refill_error;
 				break;
+			case ETHTOOL_XDP_REDIRECT:
+				pp->ethtool_stats[i] = stats.ps.xdp_redirect;
+				break;
+			case ETHTOOL_XDP_PASS:
+				pp->ethtool_stats[i] = stats.ps.xdp_pass;
+				break;
+			case ETHTOOL_XDP_DROP:
+				pp->ethtool_stats[i] = stats.ps.xdp_drop;
+				break;
+			case ETHTOOL_XDP_TX:
+				pp->ethtool_stats[i] = stats.ps.xdp_tx;
+				break;
 			}
 			break;
 		}
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH net-next 5/5] net: mvneta: get rid of xdp_ret in mvneta_swbm_rx_frame
  2020-02-16 21:07 [PATCH net-next 0/5] add xdp ethtool stats to mvneta driver Lorenzo Bianconi
                   ` (3 preceding siblings ...)
  2020-02-16 21:07 ` [PATCH net-next 4/5] net: mvneta: introduce xdp counters to ethtool Lorenzo Bianconi
@ 2020-02-16 21:07 ` Lorenzo Bianconi
  2020-02-17  4:04 ` [PATCH net-next 0/5] add xdp ethtool stats to mvneta driver David Miller
  5 siblings, 0 replies; 17+ messages in thread
From: Lorenzo Bianconi @ 2020-02-16 21:07 UTC (permalink / raw)
  To: netdev; +Cc: ilias.apalodimas, davem, lorenzo.bianconi, brouer

Get rid of xdp_ret in mvneta_swbm_rx_frame routine since now
we can rely on xdp_stats to flush in case of xdp_redirect

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 drivers/net/ethernet/marvell/mvneta.c | 30 ++++++++++++---------------
 1 file changed, 13 insertions(+), 17 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e4eb2bd097d4..b7045b6a15c2 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -358,10 +358,10 @@ struct mvneta_statistic {
 #define T_REG_64	64
 #define T_SW		1
 
-#define MVNETA_XDP_PASS		BIT(0)
-#define MVNETA_XDP_DROPPED	BIT(1)
-#define MVNETA_XDP_TX		BIT(2)
-#define MVNETA_XDP_REDIR	BIT(3)
+#define MVNETA_XDP_PASS		0
+#define MVNETA_XDP_DROPPED	BIT(0)
+#define MVNETA_XDP_TX		BIT(1)
+#define MVNETA_XDP_REDIR	BIT(2)
 
 static const struct mvneta_statistic mvneta_statistics[] = {
 	{ 0x3000, T_REG_64, "good_octets_received", },
@@ -2183,13 +2183,14 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
 		     struct mvneta_rx_queue *rxq,
 		     struct xdp_buff *xdp,
 		     struct bpf_prog *xdp_prog,
-		     struct page *page, u32 *xdp_ret,
+		     struct page *page,
 		     struct mvneta_stats *stats)
 {
 	unsigned char *data = page_address(page);
 	int data_len = -MVNETA_MH_SIZE, len;
 	struct net_device *dev = pp->dev;
 	enum dma_data_direction dma_dir;
+	int ret = 0;
 
 	if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
 		len = MVNETA_MAX_RX_BUF_SIZE;
@@ -2213,14 +2214,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
 	xdp_set_data_meta_invalid(xdp);
 
 	if (xdp_prog) {
-		u32 ret;
-
 		ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp, stats);
-		if (ret != MVNETA_XDP_PASS) {
-			rx_desc->buf_phys_addr = 0;
-			*xdp_ret |= ret;
-			return ret;
-		}
+		if (ret)
+			goto out;
 	}
 
 	rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
@@ -2244,9 +2240,11 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
 	mvneta_rx_csum(pp, rx_desc->status, rxq->skb);
 
 	rxq->left_size = rx_desc->data_size - len;
+
+out:
 	rx_desc->buf_phys_addr = 0;
 
-	return 0;
+	return ret;
 }
 
 static void
@@ -2292,7 +2290,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 	struct mvneta_stats ps = {};
 	struct bpf_prog *xdp_prog;
 	struct xdp_buff xdp_buf;
-	u32 xdp_ret = 0;
 
 	/* Get number of received packets */
 	rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -2325,8 +2322,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 			}
 
 			err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
-						   xdp_prog, page, &xdp_ret,
-						   &ps);
+						   xdp_prog, page, &ps);
 			if (err)
 				continue;
 		} else {
@@ -2364,7 +2360,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
 	}
 	rcu_read_unlock();
 
-	if (xdp_ret & MVNETA_XDP_REDIR)
+	if (ps.xdp_redirect)
 		xdp_do_flush_map();
 
 	if (ps.rx_packets)
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH net-next 0/5] add xdp ethtool stats to mvneta driver
  2020-02-16 21:07 [PATCH net-next 0/5] add xdp ethtool stats to mvneta driver Lorenzo Bianconi
                   ` (4 preceding siblings ...)
  2020-02-16 21:07 ` [PATCH net-next 5/5] net: mvneta: get rid of xdp_ret in mvneta_swbm_rx_frame Lorenzo Bianconi
@ 2020-02-17  4:04 ` David Miller
  2020-02-17  9:58   ` Jesper Dangaard Brouer
  5 siblings, 1 reply; 17+ messages in thread
From: David Miller @ 2020-02-17  4:04 UTC (permalink / raw)
  To: lorenzo; +Cc: netdev, ilias.apalodimas, lorenzo.bianconi, brouer

From: Lorenzo Bianconi <lorenzo@kernel.org>
Date: Sun, 16 Feb 2020 22:07:28 +0100

> Rework mvneta stats accounting in order to introduce xdp ethtool
> statistics in the mvneta driver.
> Introduce xdp_redirect, xdp_pass, xdp_drop and xdp_tx counters to
> ethtool statistics.
> Fix skb_alloc_error and refill_error ethtool accounting

Series applied, thanks Lorenzo.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH net-next 0/5] add xdp ethtool stats to mvneta driver
  2020-02-17  4:04 ` [PATCH net-next 0/5] add xdp ethtool stats to mvneta driver David Miller
@ 2020-02-17  9:58   ` Jesper Dangaard Brouer
  2020-02-17 22:10     ` David Miller
  0 siblings, 1 reply; 17+ messages in thread
From: Jesper Dangaard Brouer @ 2020-02-17  9:58 UTC (permalink / raw)
  To: David Miller
  Cc: lorenzo, netdev, ilias.apalodimas, lorenzo.bianconi, brouer, David Ahern

On Sun, 16 Feb 2020 20:04:57 -0800 (PST)
David Miller <davem@davemloft.net> wrote:

> From: Lorenzo Bianconi <lorenzo@kernel.org>
> Date: Sun, 16 Feb 2020 22:07:28 +0100
> 
> > Rework mvneta stats accounting in order to introduce xdp ethtool
> > statistics in the mvneta driver.
> > Introduce xdp_redirect, xdp_pass, xdp_drop and xdp_tx counters to
> > ethtool statistics.
> > Fix skb_alloc_error and refill_error ethtool accounting  
> 
> Series applied, thanks Lorenzo.

Hey DaveM,

I think this series got applied a bit too fast.

I didn't have time to review it.  It got posted Sunday night/evening
around (22:07 my TZ), and applied in Monday morning before I woke up.

And I have issues with the patches...
-- 
Best regards,
  Jesper Dangaard Brouer
  MSc.CS, Principal Kernel Engineer at Red Hat
  LinkedIn: http://www.linkedin.com/in/brouer


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH net-next 4/5] net: mvneta: introduce xdp counters to ethtool
  2020-02-16 21:07 ` [PATCH net-next 4/5] net: mvneta: introduce xdp counters to ethtool Lorenzo Bianconi
@ 2020-02-17 10:17   ` Jesper Dangaard Brouer
  2020-02-17 10:25     ` Lorenzo Bianconi
  0 siblings, 1 reply; 17+ messages in thread
From: Jesper Dangaard Brouer @ 2020-02-17 10:17 UTC (permalink / raw)
  To: Lorenzo Bianconi
  Cc: netdev, ilias.apalodimas, davem, lorenzo.bianconi, brouer,
	David Ahern, BPF-dev-list

On Sun, 16 Feb 2020 22:07:32 +0100
Lorenzo Bianconi <lorenzo@kernel.org> wrote:

> @@ -2033,6 +2050,7 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
>  	u64_stats_update_begin(&stats->syncp);
>  	stats->es.ps.tx_bytes += xdpf->len;
>  	stats->es.ps.tx_packets++;
> +	stats->es.ps.xdp_tx++;
>  	u64_stats_update_end(&stats->syncp);

I find it confusing that this ethtool stats is named "xdp_tx".
Because you use it as an "xmit" counter and not for the action XDP_TX.

Both XDP_TX and XDP_REDIRECT out this device will increment this
"xdp_tx" counter.  I don't think end-users will comprehend this...

What about naming it "xdp_xmit" ?


>  	mvneta_txq_inc_put(txq);
> @@ -2114,6 +2132,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
>  
>  	switch (act) {
>  	case XDP_PASS:
> +		stats->xdp_pass++;
>  		return MVNETA_XDP_PASS;
>  	case XDP_REDIRECT: {
>  		int err;
> @@ -2126,6 +2145,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
>  					     len, true);
>  		} else {
>  			ret = MVNETA_XDP_REDIR;
> +			stats->xdp_redirect++;
>  		}
>  		break;
>  	}
> @@ -2147,6 +2167,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
>  				     virt_to_head_page(xdp->data),
>  				     len, true);
>  		ret = MVNETA_XDP_DROPPED;
> +		stats->xdp_drop++;
>  		break;
>  	}


-- 
Best regards,
  Jesper Dangaard Brouer
  MSc.CS, Principal Kernel Engineer at Red Hat
  LinkedIn: http://www.linkedin.com/in/brouer


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH net-next 4/5] net: mvneta: introduce xdp counters to ethtool
  2020-02-17 10:17   ` Jesper Dangaard Brouer
@ 2020-02-17 10:25     ` Lorenzo Bianconi
  2020-02-17 10:32       ` Jesper Dangaard Brouer
  2020-02-17 22:18       ` David Miller
  0 siblings, 2 replies; 17+ messages in thread
From: Lorenzo Bianconi @ 2020-02-17 10:25 UTC (permalink / raw)
  To: Jesper Dangaard Brouer
  Cc: Lorenzo Bianconi, netdev, ilias.apalodimas, davem, David Ahern,
	BPF-dev-list

[-- Attachment #1: Type: text/plain, Size: 1972 bytes --]

> On Sun, 16 Feb 2020 22:07:32 +0100
> Lorenzo Bianconi <lorenzo@kernel.org> wrote:
> 
> > @@ -2033,6 +2050,7 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
> >  	u64_stats_update_begin(&stats->syncp);
> >  	stats->es.ps.tx_bytes += xdpf->len;
> >  	stats->es.ps.tx_packets++;
> > +	stats->es.ps.xdp_tx++;
> >  	u64_stats_update_end(&stats->syncp);
> 
> I find it confusing that this ethtool stats is named "xdp_tx".
> Because you use it as an "xmit" counter and not for the action XDP_TX.
> 
> Both XDP_TX and XDP_REDIRECT out this device will increment this
> "xdp_tx" counter.  I don't think end-users will comprehend this...
> 
> What about naming it "xdp_xmit" ?

Hi Jesper,

yes, I think it is definitely better. So to follow up:
- rename current "xdp_tx" counter in "xdp_xmit" and increment it for
  XDP_TX verdict and for ndo_xdp_xmit
- introduce a new "xdp_tx" counter only for XDP_TX verdict.

If we agree I can post a follow-up patch.

Regards,
Lorenzo

> 
> 
> >  	mvneta_txq_inc_put(txq);
> > @@ -2114,6 +2132,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
> >  
> >  	switch (act) {
> >  	case XDP_PASS:
> > +		stats->xdp_pass++;
> >  		return MVNETA_XDP_PASS;
> >  	case XDP_REDIRECT: {
> >  		int err;
> > @@ -2126,6 +2145,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
> >  					     len, true);
> >  		} else {
> >  			ret = MVNETA_XDP_REDIR;
> > +			stats->xdp_redirect++;
> >  		}
> >  		break;
> >  	}
> > @@ -2147,6 +2167,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
> >  				     virt_to_head_page(xdp->data),
> >  				     len, true);
> >  		ret = MVNETA_XDP_DROPPED;
> > +		stats->xdp_drop++;
> >  		break;
> >  	}
> 
> 
> -- 
> Best regards,
>   Jesper Dangaard Brouer
>   MSc.CS, Principal Kernel Engineer at Red Hat
>   LinkedIn: http://www.linkedin.com/in/brouer
> 

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 228 bytes --]

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH net-next 4/5] net: mvneta: introduce xdp counters to ethtool
  2020-02-17 10:25     ` Lorenzo Bianconi
@ 2020-02-17 10:32       ` Jesper Dangaard Brouer
  2020-02-17 13:05         ` Andrew Lunn
  2020-02-17 22:18       ` David Miller
  1 sibling, 1 reply; 17+ messages in thread
From: Jesper Dangaard Brouer @ 2020-02-17 10:32 UTC (permalink / raw)
  To: Lorenzo Bianconi
  Cc: Lorenzo Bianconi, netdev, ilias.apalodimas, davem, David Ahern,
	BPF-dev-list, brouer

On Mon, 17 Feb 2020 11:25:50 +0100
Lorenzo Bianconi <lorenzo.bianconi@redhat.com> wrote:

> > On Sun, 16 Feb 2020 22:07:32 +0100
> > Lorenzo Bianconi <lorenzo@kernel.org> wrote:
> >   
> > > @@ -2033,6 +2050,7 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
> > >  	u64_stats_update_begin(&stats->syncp);
> > >  	stats->es.ps.tx_bytes += xdpf->len;
> > >  	stats->es.ps.tx_packets++;
> > > +	stats->es.ps.xdp_tx++;
> > >  	u64_stats_update_end(&stats->syncp);  
> > 
> > I find it confusing that this ethtool stats is named "xdp_tx".
> > Because you use it as an "xmit" counter and not for the action XDP_TX.
> > 
> > Both XDP_TX and XDP_REDIRECT out this device will increment this
> > "xdp_tx" counter.  I don't think end-users will comprehend this...
> > 
> > What about naming it "xdp_xmit" ?  
> 
> Hi Jesper,
> 
> yes, I think it is definitely better. So to follow up:
> - rename current "xdp_tx" counter in "xdp_xmit" and increment it for
>   XDP_TX verdict and for ndo_xdp_xmit
> - introduce a new "xdp_tx" counter only for XDP_TX verdict.
> 
> If we agree I can post a follow-up patch.

I agree, that sounds like an improvement to this patchset.


I suspect David Ahern have some opinions about more general stats for
XDP, but that it is a more general discussion, that it outside this
patchset, but we should also have that discussion.

-- 
Best regards,
  Jesper Dangaard Brouer
  MSc.CS, Principal Kernel Engineer at Red Hat
  LinkedIn: http://www.linkedin.com/in/brouer


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH net-next 4/5] net: mvneta: introduce xdp counters to ethtool
  2020-02-17 10:32       ` Jesper Dangaard Brouer
@ 2020-02-17 13:05         ` Andrew Lunn
  2020-02-17 14:51           ` Jesper Dangaard Brouer
  0 siblings, 1 reply; 17+ messages in thread
From: Andrew Lunn @ 2020-02-17 13:05 UTC (permalink / raw)
  To: Jesper Dangaard Brouer
  Cc: Lorenzo Bianconi, Lorenzo Bianconi, netdev, ilias.apalodimas,
	davem, David Ahern, BPF-dev-list

On Mon, Feb 17, 2020 at 11:32:09AM +0100, Jesper Dangaard Brouer wrote:
> On Mon, 17 Feb 2020 11:25:50 +0100
> Lorenzo Bianconi <lorenzo.bianconi@redhat.com> wrote:
> 
> > > On Sun, 16 Feb 2020 22:07:32 +0100
> > > Lorenzo Bianconi <lorenzo@kernel.org> wrote:
> > >   
> > > > @@ -2033,6 +2050,7 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
> > > >  	u64_stats_update_begin(&stats->syncp);
> > > >  	stats->es.ps.tx_bytes += xdpf->len;
> > > >  	stats->es.ps.tx_packets++;
> > > > +	stats->es.ps.xdp_tx++;
> > > >  	u64_stats_update_end(&stats->syncp);  
> > > 
> > > I find it confusing that this ethtool stats is named "xdp_tx".
> > > Because you use it as an "xmit" counter and not for the action XDP_TX.
> > > 
> > > Both XDP_TX and XDP_REDIRECT out this device will increment this
> > > "xdp_tx" counter.  I don't think end-users will comprehend this...
> > > 
> > > What about naming it "xdp_xmit" ?  
> > 
> > Hi Jesper,
> > 
> > yes, I think it is definitely better. So to follow up:
> > - rename current "xdp_tx" counter in "xdp_xmit" and increment it for
> >   XDP_TX verdict and for ndo_xdp_xmit
> > - introduce a new "xdp_tx" counter only for XDP_TX verdict.
> > 
> > If we agree I can post a follow-up patch.
> 
> I agree, that sounds like an improvement to this patchset.
> 
> 
> I suspect David Ahern have some opinions about more general stats for
> XDP, but that it is a more general discussion, that it outside this
> patchset, but we should also have that discussion.

Hi Jesper

I've not been following XDP too much, but xdp_xmit seems pretty
generic. It would be nice if all drivers used the same statistics
names. Less user confusion that way. So why is this outside of the
discussion?

	Andrew

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH net-next 4/5] net: mvneta: introduce xdp counters to ethtool
  2020-02-17 13:05         ` Andrew Lunn
@ 2020-02-17 14:51           ` Jesper Dangaard Brouer
  2020-02-17 18:17             ` David Ahern
  0 siblings, 1 reply; 17+ messages in thread
From: Jesper Dangaard Brouer @ 2020-02-17 14:51 UTC (permalink / raw)
  To: Andrew Lunn
  Cc: Lorenzo Bianconi, Lorenzo Bianconi, netdev, ilias.apalodimas,
	davem, David Ahern, BPF-dev-list, brouer

On Mon, 17 Feb 2020 14:05:15 +0100
Andrew Lunn <andrew@lunn.ch> wrote:

> On Mon, Feb 17, 2020 at 11:32:09AM +0100, Jesper Dangaard Brouer wrote:
> > On Mon, 17 Feb 2020 11:25:50 +0100
> > Lorenzo Bianconi <lorenzo.bianconi@redhat.com> wrote:
> >   
[...]
> > > 
> > > yes, I think it is definitely better. So to follow up:
> > > - rename current "xdp_tx" counter in "xdp_xmit" and increment it for
> > >   XDP_TX verdict and for ndo_xdp_xmit
> > > - introduce a new "xdp_tx" counter only for XDP_TX verdict.
> > > 
> > > If we agree I can post a follow-up patch.  
> > 
> > I agree, that sounds like an improvement to this patchset.
> > 
> > 
> > I suspect David Ahern have some opinions about more general stats for
> > XDP, but that it is a more general discussion, that it outside this
> > patchset, but we should also have that discussion.  
> 
> Hi Jesper
> 
> I've not been following XDP too much, but xdp_xmit seems pretty
> generic. It would be nice if all drivers used the same statistics
> names. Less user confusion that way. So why is this outside of the
> discussion?

I do want to have this discussion, please.

I had hoped this patchset sparked this that discussion... maybe we can
have it despite this patchset already got applied?

My only request is that, if we don't revert, we fixup the "xdp_tx"
counter name.  It would make it easier for us[1] if we can keep them
applied, as we are preparing (asciinema) demos for [1].

That said, I think it is rather important to standardize on same
statistics names across drivers... which is an assignment that Lorenzo
have already signed up for [2].


[1] https://netdevconf.info/0x14/session.html?tutorial-add-XDP-support-to-a-NIC-driver
[2] https://github.com/xdp-project/xdp-project/blob/master/planning.org#consistency-for-statistics-with-xdp
-- 
Best regards,
  Jesper Dangaard Brouer
  MSc.CS, Principal Kernel Engineer at Red Hat
  LinkedIn: http://www.linkedin.com/in/brouer


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH net-next 4/5] net: mvneta: introduce xdp counters to ethtool
  2020-02-17 14:51           ` Jesper Dangaard Brouer
@ 2020-02-17 18:17             ` David Ahern
  0 siblings, 0 replies; 17+ messages in thread
From: David Ahern @ 2020-02-17 18:17 UTC (permalink / raw)
  To: Jesper Dangaard Brouer, Andrew Lunn
  Cc: Lorenzo Bianconi, Lorenzo Bianconi, netdev, ilias.apalodimas,
	davem, David Ahern, BPF-dev-list

On 2/17/20 7:51 AM, Jesper Dangaard Brouer wrote:
> On Mon, 17 Feb 2020 14:05:15 +0100
> Andrew Lunn <andrew@lunn.ch> wrote:
> 
>> On Mon, Feb 17, 2020 at 11:32:09AM +0100, Jesper Dangaard Brouer wrote:
>>> On Mon, 17 Feb 2020 11:25:50 +0100
>>> Lorenzo Bianconi <lorenzo.bianconi@redhat.com> wrote:
>>>   
> [...]
>>>>
>>>> yes, I think it is definitely better. So to follow up:
>>>> - rename current "xdp_tx" counter in "xdp_xmit" and increment it for
>>>>   XDP_TX verdict and for ndo_xdp_xmit
>>>> - introduce a new "xdp_tx" counter only for XDP_TX verdict.
>>>>
>>>> If we agree I can post a follow-up patch.  
>>>
>>> I agree, that sounds like an improvement to this patchset.
>>>
>>>
>>> I suspect David Ahern have some opinions about more general stats for
>>> XDP, but that it is a more general discussion, that it outside this
>>> patchset, but we should also have that discussion.  
>>
>> Hi Jesper
>>
>> I've not been following XDP too much, but xdp_xmit seems pretty
>> generic. It would be nice if all drivers used the same statistics
>> names. Less user confusion that way. So why is this outside of the
>> discussion?

Hi Andrew: I brought this up over a year ago - the need for some
consistency in XDP stats (names and meaning) across drivers:

https://lore.kernel.org/netdev/1d9a6548-4d1d-6624-e808-6ab0460a8655@gmail.com/

I don't have strong preferences on which driver is right in the current
naming, only that we have consistency. There has not been much progress
in the past 15 months, so I am glad to see someone take this on.

> 
> I do want to have this discussion, please.
> 
> I had hoped this patchset sparked this that discussion... maybe we can
> have it despite this patchset already got applied?
> 
> My only request is that, if we don't revert, we fixup the "xdp_tx"
> counter name.  It would make it easier for us[1] if we can keep them
> applied, as we are preparing (asciinema) demos for [1].

Jesper: what about the mlx5 naming scheme:

     rx_xdp_drop: 86468350180
     rx_xdp_redirect: 18860584
     rx_xdp_tx_xmit: 0

The rx prefix shows the xdp action is in the Rx path, and then the Tx
path has tx_xdp_xmit.

i40e seems to have something similar for the Rx path:
     rx-0.xdp.pass: 0
     rx-0.xdp.drop: 0
     rx-0.xdp.tx: 0
     rx-0.xdp.unknown: 0
     rx-0.xdp.redirect: 0
     rx-0.xdp.redirect_fail: 0

I don't see any Tx stats for xdp, but this is an older kernel so not
sure what 5.x has.

Looks like sfc has a similar naming scheme:
     rx_xdp_drops: 0
     rx_xdp_bad_drops: 0
     rx_xdp_tx: 0
     rx_xdp_redirect: 0

So if mvneta follows these 3, the names just need rx_ prepended.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH net-next 0/5] add xdp ethtool stats to mvneta driver
  2020-02-17  9:58   ` Jesper Dangaard Brouer
@ 2020-02-17 22:10     ` David Miller
  0 siblings, 0 replies; 17+ messages in thread
From: David Miller @ 2020-02-17 22:10 UTC (permalink / raw)
  To: brouer; +Cc: lorenzo, netdev, ilias.apalodimas, lorenzo.bianconi, dsahern

From: Jesper Dangaard Brouer <brouer@redhat.com>
Date: Mon, 17 Feb 2020 10:58:25 +0100

> And I have issues with the patches...

We can fix them up in follow-on patches, the merge window isn't tomorrow :-)

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH net-next 4/5] net: mvneta: introduce xdp counters to ethtool
  2020-02-17 10:25     ` Lorenzo Bianconi
  2020-02-17 10:32       ` Jesper Dangaard Brouer
@ 2020-02-17 22:18       ` David Miller
  2020-02-17 22:38         ` Lorenzo Bianconi
  1 sibling, 1 reply; 17+ messages in thread
From: David Miller @ 2020-02-17 22:18 UTC (permalink / raw)
  To: lorenzo.bianconi; +Cc: brouer, lorenzo, netdev, ilias.apalodimas, dsahern, bpf

From: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
Date: Mon, 17 Feb 2020 11:25:50 +0100

> yes, I think it is definitely better. So to follow up:
> - rename current "xdp_tx" counter in "xdp_xmit" and increment it for
>   XDP_TX verdict and for ndo_xdp_xmit
> - introduce a new "xdp_tx" counter only for XDP_TX verdict.
> 
> If we agree I can post a follow-up patch.

What names do other drivers use?  Consistency is important.  I noticed
while reviewing these patches that mellanox drivers export similar
statistics in the exact same way.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH net-next 4/5] net: mvneta: introduce xdp counters to ethtool
  2020-02-17 22:18       ` David Miller
@ 2020-02-17 22:38         ` Lorenzo Bianconi
  0 siblings, 0 replies; 17+ messages in thread
From: Lorenzo Bianconi @ 2020-02-17 22:38 UTC (permalink / raw)
  To: David Miller; +Cc: brouer, lorenzo, netdev, ilias.apalodimas, dsahern, bpf

[-- Attachment #1: Type: text/plain, Size: 1000 bytes --]

> From: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
> Date: Mon, 17 Feb 2020 11:25:50 +0100
> 
> > yes, I think it is definitely better. So to follow up:
> > - rename current "xdp_tx" counter in "xdp_xmit" and increment it for
> >   XDP_TX verdict and for ndo_xdp_xmit
> > - introduce a new "xdp_tx" counter only for XDP_TX verdict.
> > 
> > If we agree I can post a follow-up patch.
> 
> What names do other drivers use?  Consistency is important.  I noticed
> while reviewing these patches that mellanox drivers export similar
> statistics in the exact same way.

According to David's suggestion, I am following mellanox implementation
adding "rx_" prefix for xdp actions on rx path and, based on Jesper's comment,
I am differentiating between XDP_TX and ndo_xdp_xmit.
So, to follow up:

XDP_TX: rx_xdp_tx_xmit
XDP_DROP: rx_xdp_drop
XDP_PASS: rx_xdp_pass
XDP_REDIRECT: rx_xdp_redirect
ndo_xdp_xmit: tx_xdp_xmit

I will post a RFC patch soon.

Regards,
Lorenzo

> 

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 228 bytes --]

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2020-02-17 22:38 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-02-16 21:07 [PATCH net-next 0/5] add xdp ethtool stats to mvneta driver Lorenzo Bianconi
2020-02-16 21:07 ` [PATCH net-next 1/5] net: mvneta: move refill_err and skb_alloc_err in per-cpu stats Lorenzo Bianconi
2020-02-16 21:07 ` [PATCH net-next 2/5] net: mvneta: rely on open-coding updating stats for non-xdp and tx path Lorenzo Bianconi
2020-02-16 21:07 ` [PATCH net-next 3/5] net: mvneta: rely on struct mvneta_stats in mvneta_update_stats routine Lorenzo Bianconi
2020-02-16 21:07 ` [PATCH net-next 4/5] net: mvneta: introduce xdp counters to ethtool Lorenzo Bianconi
2020-02-17 10:17   ` Jesper Dangaard Brouer
2020-02-17 10:25     ` Lorenzo Bianconi
2020-02-17 10:32       ` Jesper Dangaard Brouer
2020-02-17 13:05         ` Andrew Lunn
2020-02-17 14:51           ` Jesper Dangaard Brouer
2020-02-17 18:17             ` David Ahern
2020-02-17 22:18       ` David Miller
2020-02-17 22:38         ` Lorenzo Bianconi
2020-02-16 21:07 ` [PATCH net-next 5/5] net: mvneta: get rid of xdp_ret in mvneta_swbm_rx_frame Lorenzo Bianconi
2020-02-17  4:04 ` [PATCH net-next 0/5] add xdp ethtool stats to mvneta driver David Miller
2020-02-17  9:58   ` Jesper Dangaard Brouer
2020-02-17 22:10     ` David Miller

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.