All of lore.kernel.org
 help / color / mirror / Atom feed
From: Aurelien Aptel <aaptel@nvidia.com>
To: linux-nvme@lists.infradead.org, netdev@vger.kernel.org,
	sagi@grimberg.me, hch@lst.de, kbusch@kernel.org, axboe@fb.com,
	chaitanyak@nvidia.com, davem@davemloft.net, kuba@kernel.org
Cc: Aurelien Aptel <aaptel@nvidia.com>,
	aurelien.aptel@gmail.com, smalin@nvidia.com, malin1024@gmail.com,
	ogerlitz@nvidia.com, yorayz@nvidia.com, borisp@nvidia.com
Subject: [PATCH v8 25/25] net/mlx5e: NVMEoTCP, statistics
Date: Mon,  9 Jan 2023 15:31:16 +0200	[thread overview]
Message-ID: <20230109133116.20801-26-aaptel@nvidia.com> (raw)
In-Reply-To: <20230109133116.20801-1-aaptel@nvidia.com>

NVMEoTCP offload statistics include both control and data path
statistic: counters for the netdev ddp ops, offloaded packets/bytes
and dropped packets.

Expose the statistics using the new ethtool_ops->get_ulp_ddp_stats()
and the new ETH_SS_ULP_DDP_STATS string set instead of the regular
statistics flow.

Signed-off-by: Ben Ben-Ishay <benishay@nvidia.com>
Signed-off-by: Boris Pismenny <borisp@nvidia.com>
Signed-off-by: Or Gerlitz <ogerlitz@nvidia.com>
Signed-off-by: Yoray Zack <yorayz@nvidia.com>
Signed-off-by: Shai Malin <smalin@nvidia.com>
Signed-off-by: Aurelien Aptel <aaptel@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
---
 .../net/ethernet/mellanox/mlx5/core/Makefile  |   3 +-
 .../mellanox/mlx5/core/en_accel/nvmeotcp.c    |  43 +++++--
 .../mellanox/mlx5/core/en_accel/nvmeotcp.h    |  19 +++
 .../mlx5/core/en_accel/nvmeotcp_rxtx.c        |  11 +-
 .../mlx5/core/en_accel/nvmeotcp_stats.c       | 108 ++++++++++++++++++
 .../ethernet/mellanox/mlx5/core/en_ethtool.c  |  17 +++
 .../ethernet/mellanox/mlx5/core/en_stats.c    |  17 +++
 .../ethernet/mellanox/mlx5/core/en_stats.h    |  10 ++
 8 files changed, 217 insertions(+), 11 deletions(-)
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_stats.c

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9804bd086bf4..d48bde4ca8de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -103,7 +103,8 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
 				   en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
 				   en_accel/ktls_tx.o en_accel/ktls_rx.o
 
-mlx5_core-$(CONFIG_MLX5_EN_NVMEOTCP) += en_accel/fs_tcp.o en_accel/nvmeotcp.o en_accel/nvmeotcp_rxtx.o
+mlx5_core-$(CONFIG_MLX5_EN_NVMEOTCP) += en_accel/fs_tcp.o en_accel/nvmeotcp.o \
+					en_accel/nvmeotcp_rxtx.o en_accel/nvmeotcp_stats.o
 
 mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
 					steering/dr_matcher.o steering/dr_rule.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
index b440ed10c373..6e07cea438ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
@@ -614,9 +614,15 @@ mlx5e_nvmeotcp_queue_init(struct net_device *netdev,
 {
 	struct nvme_tcp_ddp_config *config = &tconfig->nvmeotcp;
 	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5e_nvmeotcp_sw_stats *sw_stats;
 	struct mlx5_core_dev *mdev = priv->mdev;
 	struct mlx5e_nvmeotcp_queue *queue;
 	int queue_id, err;
+	u32 channel_ix;
+
+	channel_ix = mlx5e_get_channel_ix_from_io_cpu(&priv->channels.params,
+						      config->io_cpu);
+	sw_stats = &priv->nvmeotcp->sw_stats;
 
 	if (tconfig->type != ULP_DDP_NVME) {
 		err = -EOPNOTSUPP;
@@ -643,11 +649,11 @@ mlx5e_nvmeotcp_queue_init(struct net_device *netdev,
 	queue->id = queue_id;
 	queue->dgst = config->dgst;
 	queue->pda = config->cpda;
-	queue->channel_ix = mlx5e_get_channel_ix_from_io_cpu(&priv->channels.params,
-							     config->io_cpu);
+	queue->channel_ix = channel_ix;
 	queue->size = config->queue_size;
 	queue->max_klms_per_wqe = MLX5E_MAX_KLM_PER_WQE(mdev);
 	queue->priv = priv;
+	queue->sw_stats = sw_stats;
 	init_completion(&queue->static_params_done);
 
 	err = mlx5e_nvmeotcp_queue_rx_init(queue, config, netdev);
@@ -659,6 +665,7 @@ mlx5e_nvmeotcp_queue_init(struct net_device *netdev,
 	if (err)
 		goto destroy_rx;
 
+	atomic64_inc(&sw_stats->rx_nvmeotcp_sk_add);
 	write_lock_bh(&sk->sk_callback_lock);
 	ulp_ddp_set_ctx(sk, queue);
 	write_unlock_bh(&sk->sk_callback_lock);
@@ -672,6 +679,7 @@ mlx5e_nvmeotcp_queue_init(struct net_device *netdev,
 free_queue:
 	kfree(queue);
 out:
+	atomic64_inc(&sw_stats->rx_nvmeotcp_sk_add_fail);
 	return err;
 }
 
@@ -685,6 +693,8 @@ mlx5e_nvmeotcp_queue_teardown(struct net_device *netdev,
 
 	queue = container_of(ulp_ddp_get_ctx(sk), struct mlx5e_nvmeotcp_queue, ulp_ddp_ctx);
 
+	atomic64_inc(&queue->sw_stats->rx_nvmeotcp_sk_del);
+
 	WARN_ON(refcount_read(&queue->ref_count) != 1);
 	mlx5e_nvmeotcp_destroy_rx(priv, queue, mdev);
 
@@ -816,25 +826,35 @@ mlx5e_nvmeotcp_ddp_setup(struct net_device *netdev,
 			 struct ulp_ddp_io *ddp)
 {
 	struct scatterlist *sg = ddp->sg_table.sgl;
+	struct mlx5e_nvmeotcp_sw_stats *sw_stats;
 	struct mlx5e_nvmeotcp_queue_entry *nvqt;
 	struct mlx5e_nvmeotcp_queue *queue;
 	struct mlx5_core_dev *mdev;
 	int i, size = 0, count = 0;
+	int ret = 0;
 
 	queue = container_of(ulp_ddp_get_ctx(sk),
 			     struct mlx5e_nvmeotcp_queue, ulp_ddp_ctx);
+	sw_stats = queue->sw_stats;
 	mdev = queue->priv->mdev;
 	count = dma_map_sg(mdev->device, ddp->sg_table.sgl, ddp->nents,
 			   DMA_FROM_DEVICE);
 
-	if (count <= 0)
-		return -EINVAL;
+	if (count <= 0) {
+		ret = -EINVAL;
+		goto ddp_setup_fail;
+	}
+	atomic64_inc(&sw_stats->rx_nvmeotcp_ddp_setup);
 
-	if (WARN_ON(count > mlx5e_get_max_sgl(mdev)))
-		return -ENOSPC;
+	if (WARN_ON(count > mlx5e_get_max_sgl(mdev))) {
+		ret = -ENOSPC;
+		goto ddp_setup_fail;
+	}
 
-	if (!mlx5e_nvmeotcp_validate_sgl(sg, count, READ_ONCE(netdev->mtu)))
-		return -EOPNOTSUPP;
+	if (!mlx5e_nvmeotcp_validate_sgl(sg, count, READ_ONCE(netdev->mtu))) {
+		ret = -EOPNOTSUPP;
+		goto ddp_setup_fail;
+	}
 
 	for (i = 0; i < count; i++)
 		size += sg_dma_len(&sg[i]);
@@ -846,8 +866,12 @@ mlx5e_nvmeotcp_ddp_setup(struct net_device *netdev,
 	nvqt->ccid_gen++;
 	nvqt->sgl_length = count;
 	mlx5e_nvmeotcp_post_klm_wqe(queue, KLM_UMR, ddp->command_id, count);
-
 	return 0;
+
+ddp_setup_fail:
+	dma_unmap_sg(mdev->device, ddp->sg_table.sgl, count, DMA_FROM_DEVICE);
+	atomic64_inc(&sw_stats->rx_nvmeotcp_ddp_setup_fail);
+	return ret;
 }
 
 void mlx5e_nvmeotcp_ctx_complete(struct mlx5e_icosq_wqe_info *wi)
@@ -894,6 +918,7 @@ mlx5e_nvmeotcp_ddp_teardown(struct net_device *netdev,
 	q_entry->queue = queue;
 
 	mlx5e_nvmeotcp_post_klm_wqe(queue, KLM_INV_UMR, ddp->command_id, 0);
+	atomic64_inc(&queue->sw_stats->rx_nvmeotcp_ddp_teardown);
 }
 
 static void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.h
index a5cfd9e31be7..2d6a12b40429 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.h
@@ -9,6 +9,15 @@
 #include "en.h"
 #include "en/params.h"
 
+struct mlx5e_nvmeotcp_sw_stats {
+	atomic64_t rx_nvmeotcp_sk_add;
+	atomic64_t rx_nvmeotcp_sk_add_fail;
+	atomic64_t rx_nvmeotcp_sk_del;
+	atomic64_t rx_nvmeotcp_ddp_setup;
+	atomic64_t rx_nvmeotcp_ddp_setup_fail;
+	atomic64_t rx_nvmeotcp_ddp_teardown;
+};
+
 struct mlx5e_nvmeotcp_queue_entry {
 	struct mlx5e_nvmeotcp_queue *queue;
 	u32 sgl_length;
@@ -52,6 +61,7 @@ struct mlx5e_nvmeotcp_queue_handler {
  *	@sk: The socket used by the NVMe-TCP queue
  *	@crc_rx: CRC Rx offload indication for this queue
  *	@priv: mlx5e netdev priv
+ *	@sw_stats: Global software statistics for nvmeotcp offload
  *	@static_params_done: Async completion structure for the initial umr mapping
  *	synchronization
  *	@sq_lock: Spin lock for the icosq
@@ -88,6 +98,7 @@ struct mlx5e_nvmeotcp_queue {
 	u8 crc_rx:1;
 	/* for ddp invalidate flow */
 	struct mlx5e_priv *priv;
+	struct mlx5e_nvmeotcp_sw_stats *sw_stats;
 	/* end of data-path section */
 
 	struct completion static_params_done;
@@ -97,6 +108,7 @@ struct mlx5e_nvmeotcp_queue {
 };
 
 struct mlx5e_nvmeotcp {
+	struct mlx5e_nvmeotcp_sw_stats sw_stats;
 	struct ida queue_ids;
 	struct rhashtable queue_hash;
 	bool enabled;
@@ -113,6 +125,9 @@ void mlx5e_nvmeotcp_ddp_inv_done(struct mlx5e_icosq_wqe_info *wi);
 void mlx5e_nvmeotcp_ctx_complete(struct mlx5e_icosq_wqe_info *wi);
 static inline void mlx5e_nvmeotcp_init_rx(struct mlx5e_priv *priv) {}
 void mlx5e_nvmeotcp_cleanup_rx(struct mlx5e_priv *priv);
+int mlx5e_nvmeotcp_get_count(struct mlx5e_priv *priv);
+int mlx5e_nvmeotcp_get_strings(struct mlx5e_priv *priv, uint8_t *data);
+int mlx5e_nvmeotcp_get_stats(struct mlx5e_priv *priv, u64 *data);
 extern const struct ulp_ddp_dev_ops mlx5e_nvmeotcp_ops;
 #else
 
@@ -122,5 +137,9 @@ static inline void mlx5e_nvmeotcp_cleanup(struct mlx5e_priv *priv) {}
 static inline int set_ulp_ddp_nvme_tcp(struct net_device *dev, bool en) { return -EOPNOTSUPP; }
 static inline void mlx5e_nvmeotcp_init_rx(struct mlx5e_priv *priv) {}
 static inline void mlx5e_nvmeotcp_cleanup_rx(struct mlx5e_priv *priv) {}
+static inline int mlx5e_nvmeotcp_get_count(struct mlx5e_priv *priv) { return 0; }
+static inline int mlx5e_nvmeotcp_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+{ return 0; }
+static inline int mlx5e_nvmeotcp_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; }
 #endif
 #endif /* __MLX5E_NVMEOTCP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_rxtx.c
index 4c7dab28ef56..4f23f6e396b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_rxtx.c
@@ -111,6 +111,7 @@ mlx5e_nvmeotcp_rebuild_rx_skb_nonlinear(struct mlx5e_rq *rq, struct sk_buff *skb
 	int ccoff, cclen, hlen, ccid, remaining, fragsz, to_copy = 0;
 	struct net_device *netdev = rq->netdev;
 	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5e_rq_stats *stats = rq->stats;
 	struct mlx5e_nvmeotcp_queue_entry *nqe;
 	skb_frag_t org_frags[MAX_SKB_FRAGS];
 	struct mlx5e_nvmeotcp_queue *queue;
@@ -122,12 +123,14 @@ mlx5e_nvmeotcp_rebuild_rx_skb_nonlinear(struct mlx5e_rq *rq, struct sk_buff *skb
 	queue = mlx5e_nvmeotcp_get_queue(priv->nvmeotcp, queue_id);
 	if (unlikely(!queue)) {
 		dev_kfree_skb_any(skb);
+		stats->nvmeotcp_drop++;
 		return false;
 	}
 
 	cqe128 = container_of(cqe, struct mlx5e_cqe128, cqe64);
 	if (cqe_is_nvmeotcp_resync(cqe)) {
 		nvmeotcp_update_resync(queue, cqe128);
+		stats->nvmeotcp_resync++;
 		mlx5e_nvmeotcp_put_queue(queue);
 		return true;
 	}
@@ -201,7 +204,8 @@ mlx5e_nvmeotcp_rebuild_rx_skb_nonlinear(struct mlx5e_rq *rq, struct sk_buff *skb
 						 org_nr_frags,
 						 frag_index);
 	}
-
+	stats->nvmeotcp_offload_packets++;
+	stats->nvmeotcp_offload_bytes += cclen;
 	mlx5e_nvmeotcp_put_queue(queue);
 	return true;
 }
@@ -213,6 +217,7 @@ mlx5e_nvmeotcp_rebuild_rx_skb_linear(struct mlx5e_rq *rq, struct sk_buff *skb,
 	int ccoff, cclen, hlen, ccid, remaining, fragsz, to_copy = 0;
 	struct net_device *netdev = rq->netdev;
 	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5e_rq_stats *stats = rq->stats;
 	struct mlx5e_nvmeotcp_queue_entry *nqe;
 	struct mlx5e_nvmeotcp_queue *queue;
 	struct mlx5e_cqe128 *cqe128;
@@ -222,12 +227,14 @@ mlx5e_nvmeotcp_rebuild_rx_skb_linear(struct mlx5e_rq *rq, struct sk_buff *skb,
 	queue = mlx5e_nvmeotcp_get_queue(priv->nvmeotcp, queue_id);
 	if (unlikely(!queue)) {
 		dev_kfree_skb_any(skb);
+		stats->nvmeotcp_drop++;
 		return false;
 	}
 
 	cqe128 = container_of(cqe, struct mlx5e_cqe128, cqe64);
 	if (cqe_is_nvmeotcp_resync(cqe)) {
 		nvmeotcp_update_resync(queue, cqe128);
+		stats->nvmeotcp_resync++;
 		mlx5e_nvmeotcp_put_queue(queue);
 		return true;
 	}
@@ -301,6 +308,8 @@ mlx5e_nvmeotcp_rebuild_rx_skb_linear(struct mlx5e_rq *rq, struct sk_buff *skb,
 				       hlen + cclen, remaining);
 	}
 
+	stats->nvmeotcp_offload_packets++;
+	stats->nvmeotcp_offload_bytes += cclen;
 	mlx5e_nvmeotcp_put_queue(queue);
 	return true;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_stats.c
new file mode 100644
index 000000000000..8e800886cf27
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_stats.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
+
+#include "en_accel/nvmeotcp.h"
+
+/* Global counters */
+static const struct counter_desc nvmeotcp_sw_stats_desc[] = {
+	{ MLX5E_DECLARE_STAT(struct mlx5e_nvmeotcp_sw_stats, rx_nvmeotcp_sk_add) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_nvmeotcp_sw_stats, rx_nvmeotcp_sk_add_fail) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_nvmeotcp_sw_stats, rx_nvmeotcp_sk_del) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_nvmeotcp_sw_stats, rx_nvmeotcp_ddp_setup) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_nvmeotcp_sw_stats, rx_nvmeotcp_ddp_setup_fail) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_nvmeotcp_sw_stats, rx_nvmeotcp_ddp_teardown) },
+};
+
+/* Per-rx-queue counters */
+static const struct counter_desc nvmeotcp_rq_stats_desc[] = {
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, nvmeotcp_drop) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, nvmeotcp_resync) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, nvmeotcp_offload_packets) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, nvmeotcp_offload_bytes) },
+};
+
+/* Names of sums of the per-rx-queue counters
+ *
+ * The per-queue desc have the queue number in their name, so we
+ * cannot use them for the sums. We don't store the sums in sw_stats
+ * so there are no struct offsets to specify.
+ */
+static const char *const nvmeotcp_rq_sum_names[] = {
+	"rx_nvmeotcp_drop",
+	"rx_nvmeotcp_resync",
+	"rx_nvmeotcp_offload_packets",
+	"rx_nvmeotcp_offload_bytes",
+};
+
+static_assert(ARRAY_SIZE(nvmeotcp_rq_stats_desc) == ARRAY_SIZE(nvmeotcp_rq_sum_names));
+
+#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
+	atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
+
+int mlx5e_nvmeotcp_get_count(struct mlx5e_priv *priv)
+{
+	int max_nch = priv->stats_nch;
+
+	if (!priv->nvmeotcp)
+		return 0;
+
+	return ARRAY_SIZE(nvmeotcp_sw_stats_desc) +
+		ARRAY_SIZE(nvmeotcp_rq_stats_desc) +
+		(max_nch * ARRAY_SIZE(nvmeotcp_rq_stats_desc));
+}
+
+int mlx5e_nvmeotcp_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+{
+	unsigned int i, ch, n = 0, idx = 0;
+
+	if (!priv->nvmeotcp)
+		return 0;
+
+	/* global counters */
+	for (i = 0; i < ARRAY_SIZE(nvmeotcp_sw_stats_desc); i++, n++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN,
+		       nvmeotcp_sw_stats_desc[i].format);
+
+	/* summed per-rx-queue counters */
+	for (i = 0; i < ARRAY_SIZE(nvmeotcp_rq_stats_desc); i++, n++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN,
+		       nvmeotcp_rq_sum_names[i]);
+
+	/* per-rx-queue counters */
+	for (ch = 0; ch < priv->stats_nch; ch++)
+		for (i = 0; i < ARRAY_SIZE(nvmeotcp_rq_stats_desc); i++, n++)
+			sprintf(data + (idx++) * ETH_GSTRING_LEN,
+				nvmeotcp_rq_stats_desc[i].format, ch);
+
+	return n;
+}
+
+int mlx5e_nvmeotcp_get_stats(struct mlx5e_priv *priv, u64 *data)
+{
+	unsigned int i, ch, n = 0, idx = 0, sum_start = 0;
+
+	if (!priv->nvmeotcp)
+		return 0;
+
+	/* global counters */
+	for (i = 0; i < ARRAY_SIZE(nvmeotcp_sw_stats_desc); i++, n++)
+		data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->nvmeotcp->sw_stats,
+						      nvmeotcp_sw_stats_desc, i);
+
+	/* summed per-rx-queue counters */
+	sum_start = idx;
+	for (i = 0; i < ARRAY_SIZE(nvmeotcp_rq_stats_desc); i++, n++)
+		data[idx++] = 0;
+
+	/* per-rx-queue counters */
+	for (ch = 0; ch < priv->stats_nch; ch++) {
+		for (i = 0; i < ARRAY_SIZE(nvmeotcp_rq_stats_desc); i++, n++) {
+			u64 v = MLX5E_READ_CTR64_CPU(&priv->channel_stats[ch]->rq,
+						     nvmeotcp_rq_stats_desc, i);
+			data[idx++] = v;
+			data[sum_start + i] += v;
+		}
+	}
+
+	return n;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 7f763152f989..dc9fc48eff12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -246,6 +246,8 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
 		return MLX5E_NUM_PFLAGS;
 	case ETH_SS_TEST:
 		return mlx5e_self_test_num(priv);
+	case ETH_SS_ULP_DDP_STATS:
+		return mlx5e_stats_ulp_ddp_total_num(priv);
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -276,6 +278,10 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
 	case ETH_SS_STATS:
 		mlx5e_stats_fill_strings(priv, data);
 		break;
+
+	case ETH_SS_ULP_DDP_STATS:
+		mlx5e_stats_ulp_ddp_fill_strings(priv, data);
+		break;
 	}
 }
 
@@ -2400,6 +2406,16 @@ static void mlx5e_get_rmon_stats(struct net_device *netdev,
 }
 
 #ifdef CONFIG_MLX5_EN_NVMEOTCP
+static int mlx5e_get_ulp_ddp_stats(struct net_device *netdev,
+				   u64 *stats)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	mlx5e_stats_ulp_ddp_get(priv, stats);
+
+	return 0;
+}
+
 static int mlx5e_set_ulp_ddp_capabilities(struct net_device *netdev, unsigned long *new_caps)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -2501,6 +2517,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
 	.get_rmon_stats    = mlx5e_get_rmon_stats,
 	.get_link_ext_stats = mlx5e_get_link_ext_stats,
 #ifdef CONFIG_MLX5_EN_NVMEOTCP
+	.get_ulp_ddp_stats = mlx5e_get_ulp_ddp_stats,
 	.set_ulp_ddp_capabilities = mlx5e_set_ulp_ddp_capabilities,
 #endif
 };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 6687b8136e44..811f71ed8153 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -2497,3 +2497,20 @@ unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
 {
 	return ARRAY_SIZE(mlx5e_nic_stats_grps);
 }
+
+/* ULP DDP stats */
+
+unsigned int mlx5e_stats_ulp_ddp_total_num(struct mlx5e_priv *priv)
+{
+	return mlx5e_nvmeotcp_get_count(priv);
+}
+
+void mlx5e_stats_ulp_ddp_fill_strings(struct mlx5e_priv *priv, u8 *data)
+{
+	mlx5e_nvmeotcp_get_strings(priv, data);
+}
+
+void mlx5e_stats_ulp_ddp_get(struct mlx5e_priv *priv, u64 *stats)
+{
+	mlx5e_nvmeotcp_get_stats(priv, stats);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 375752d6546d..1b2a2c7de824 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -129,6 +129,10 @@ void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
 void mlx5e_get_link_ext_stats(struct net_device *dev,
 			      struct ethtool_link_ext_stats *stats);
 
+unsigned int mlx5e_stats_ulp_ddp_total_num(struct mlx5e_priv *priv);
+void mlx5e_stats_ulp_ddp_fill_strings(struct mlx5e_priv *priv, u8 *data);
+void mlx5e_stats_ulp_ddp_get(struct mlx5e_priv *priv, u64 *stats);
+
 /* Concrete NIC Stats */
 
 struct mlx5e_sw_stats {
@@ -395,6 +399,12 @@ struct mlx5e_rq_stats {
 	u64 tls_resync_res_skip;
 	u64 tls_err;
 #endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+	u64 nvmeotcp_drop;
+	u64 nvmeotcp_resync;
+	u64 nvmeotcp_offload_packets;
+	u64 nvmeotcp_offload_bytes;
+#endif
 };
 
 struct mlx5e_sq_stats {
-- 
2.31.1


      parent reply	other threads:[~2023-01-09 13:37 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-09 13:30 [PATCH v8 00/25] nvme-tcp receive offloads Aurelien Aptel
2023-01-09 13:30 ` [PATCH v8 01/25] net: Introduce direct data placement tcp offload Aurelien Aptel
2023-01-12  4:47   ` Jakub Kicinski
2023-01-13 14:17     ` Aurelien Aptel
2023-01-09 13:30 ` [PATCH v8 02/25] net/ethtool: add new stringset ETH_SS_ULP_DDP{,_STATS} Aurelien Aptel
2023-01-09 13:30 ` [PATCH v8 03/25] net/ethtool: add ULP_DDP_{GET,SET} operations for caps and stats Aurelien Aptel
2023-01-12  4:46   ` Jakub Kicinski
2023-01-12 18:50     ` Aurelien Aptel
2023-01-12 23:47       ` Jakub Kicinski
2023-01-13 14:16         ` Aurelien Aptel
2023-01-09 13:30 ` [PATCH v8 04/25] Documentation: document netlink ULP_DDP_GET/SET messages Aurelien Aptel
2023-01-09 13:30 ` [PATCH v8 05/25] iov_iter: skip copy if src == dst for direct data placement Aurelien Aptel
2023-01-09 13:30 ` [PATCH v8 06/25] net/tls,core: export get_netdev_for_sock Aurelien Aptel
2023-01-09 17:49   ` kernel test robot
2023-01-09 19:10   ` kernel test robot
2023-01-09 13:30 ` [PATCH v8 07/25] nvme-tcp: Add DDP offload control path Aurelien Aptel
2023-01-09 13:30 ` [PATCH v8 08/25] nvme-tcp: Add DDP data-path Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 09/25] nvme-tcp: RX DDGST offload Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 10/25] nvme-tcp: Deal with netdevice DOWN events Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 11/25] nvme-tcp: Add modparam to control the ULP offload enablement Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 12/25] Documentation: add ULP DDP offload documentation Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 13/25] net/mlx5e: Rename from tls to transport static params Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 14/25] net/mlx5e: Refactor ico sq polling to get budget Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 15/25] net/mlx5e: Have mdev pointer directly on the icosq structure Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 16/25] net/mlx5e: Refactor doorbell function to allow avoiding a completion Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 17/25] net/mlx5: Add NVMEoTCP caps, HW bits, 128B CQE and enumerations Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 18/25] net/mlx5e: NVMEoTCP, offload initialization Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 19/25] net/mlx5e: TCP flow steering for nvme-tcp acceleration Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 20/25] net/mlx5e: NVMEoTCP, use KLM UMRs for buffer registration Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 21/25] net/mlx5e: NVMEoTCP, queue init/teardown Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 22/25] net/mlx5e: NVMEoTCP, ddp setup and resync Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 23/25] net/mlx5e: NVMEoTCP, async ddp invalidation Aurelien Aptel
2023-01-09 13:31 ` [PATCH v8 24/25] net/mlx5e: NVMEoTCP, data-path for DDP+DDGST offload Aurelien Aptel
2023-01-09 13:31 ` Aurelien Aptel [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230109133116.20801-26-aaptel@nvidia.com \
    --to=aaptel@nvidia.com \
    --cc=aurelien.aptel@gmail.com \
    --cc=axboe@fb.com \
    --cc=borisp@nvidia.com \
    --cc=chaitanyak@nvidia.com \
    --cc=davem@davemloft.net \
    --cc=hch@lst.de \
    --cc=kbusch@kernel.org \
    --cc=kuba@kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=malin1024@gmail.com \
    --cc=netdev@vger.kernel.org \
    --cc=ogerlitz@nvidia.com \
    --cc=sagi@grimberg.me \
    --cc=smalin@nvidia.com \
    --cc=yorayz@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.