All of lore.kernel.org
 help / color / mirror / Atom feed
From: Joyce Kong <joyce.kong@arm.com>
To: olivier.matz@6wind.com, andrew.rybchenko@oktetlabs.ru,
	ruifeng.wang@arm.com, honnappa.nagarahalli@arm.com
Cc: dev@dpdk.org, nd@arm.com
Subject: [dpdk-dev] [PATCH v2] lib/mempool: distinguish debug counters from cache and pool
Date: Thu, 18 Mar 2021 19:20:22 +0800	[thread overview]
Message-ID: <20210318112022.10510-1-joyce.kong@arm.com> (raw)

If cache is enabled, objects will be retrieved/put from/to cache,
subsequently from/to the common pool. Now the debug stats calculate
the objects retrieved/put from/to cache and pool together, it is
better to distinguish the data number from local cache and common
pool.

Signed-off-by: Joyce Kong <joyce.kong@arm.com>
---
 lib/librte_mempool/rte_mempool.c | 12 ++++++
 lib/librte_mempool/rte_mempool.h | 64 ++++++++++++++++++++++----------
 2 files changed, 57 insertions(+), 19 deletions(-)

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index afb1239c8..9cb69367a 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -1244,8 +1244,14 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)
 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
 		sum.put_bulk += mp->stats[lcore_id].put_bulk;
 		sum.put_objs += mp->stats[lcore_id].put_objs;
+		sum.put_objs_cache += mp->stats[lcore_id].put_objs_cache;
+		sum.put_objs_pool += mp->stats[lcore_id].put_objs_pool;
+		sum.put_objs_flush += mp->stats[lcore_id].put_objs_flush;
 		sum.get_success_bulk += mp->stats[lcore_id].get_success_bulk;
 		sum.get_success_objs += mp->stats[lcore_id].get_success_objs;
+		sum.get_success_objs_cache += mp->stats[lcore_id].get_success_objs_cache;
+		sum.get_success_objs_pool += mp->stats[lcore_id].get_success_objs_pool;
+		sum.get_success_objs_refill += mp->stats[lcore_id].get_success_objs_refill;
 		sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk;
 		sum.get_fail_objs += mp->stats[lcore_id].get_fail_objs;
 		sum.get_success_blks += mp->stats[lcore_id].get_success_blks;
@@ -1254,8 +1260,14 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)
 	fprintf(f, "  stats:\n");
 	fprintf(f, "    put_bulk=%"PRIu64"\n", sum.put_bulk);
 	fprintf(f, "    put_objs=%"PRIu64"\n", sum.put_objs);
+	fprintf(f, "    put_objs_cache=%"PRIu64"\n", sum.put_objs_cache);
+	fprintf(f, "    put_objs_pool=%"PRIu64"\n", sum.put_objs_pool);
+	fprintf(f, "    put_objs_flush=%"PRIu64"\n", sum.put_objs_flush);
 	fprintf(f, "    get_success_bulk=%"PRIu64"\n", sum.get_success_bulk);
 	fprintf(f, "    get_success_objs=%"PRIu64"\n", sum.get_success_objs);
+	fprintf(f, "    get_success_objs_cache=%"PRIu64"\n", sum.get_success_objs_cache);
+	fprintf(f, "    get_success_objs_pool=%"PRIu64"\n", sum.get_success_objs_pool);
+	fprintf(f, "    get_success_objs_refill=%"PRIu64"\n", sum.get_success_objs_refill);
 	fprintf(f, "    get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk);
 	fprintf(f, "    get_fail_objs=%"PRIu64"\n", sum.get_fail_objs);
 	if (info.contig_block_size > 0) {
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index c551cf733..29d80d97e 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -66,12 +66,18 @@ extern "C" {
  * A structure that stores the mempool statistics (per-lcore).
  */
 struct rte_mempool_debug_stats {
-	uint64_t put_bulk;         /**< Number of puts. */
-	uint64_t put_objs;         /**< Number of objects successfully put. */
-	uint64_t get_success_bulk; /**< Successful allocation number. */
-	uint64_t get_success_objs; /**< Objects successfully allocated. */
-	uint64_t get_fail_bulk;    /**< Failed allocation number. */
-	uint64_t get_fail_objs;    /**< Objects that failed to be allocated. */
+	uint64_t put_bulk;		  /**< Number of puts. */
+	uint64_t put_objs;		  /**< Number of objects successfully put. */
+	uint64_t put_objs_cache;	  /**< Number of objects successfully put to cache. */
+	uint64_t put_objs_pool;		  /**< Number of objects successfully put to pool. */
+	uint64_t put_objs_flush;	  /**< Number of flushing objects from cache to pool. */
+	uint64_t get_success_bulk;	  /**< Successful allocation number. */
+	uint64_t get_success_objs;	  /**< Objects successfully allocated. */
+	uint64_t get_success_objs_cache;  /**< Objects successfully allocated from cache. */
+	uint64_t get_success_objs_pool;	  /**< Objects successfully allocated from pool. */
+	uint64_t get_success_objs_refill; /**< Number of refilling objects from pool to cache. */
+	uint64_t get_fail_bulk;		  /**< Failed allocation number. */
+	uint64_t get_fail_objs;		  /**< Objects that failed to be allocated. */
 	/** Successful allocation number of contiguous blocks. */
 	uint64_t get_success_blks;
 	/** Failed allocation number of contiguous blocks. */
@@ -270,22 +276,34 @@ struct rte_mempool {
  *   Number to add to the object-oriented statistics.
  */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-#define __MEMPOOL_STAT_ADD(mp, name, n) do {                    \
-		unsigned __lcore_id = rte_lcore_id();           \
-		if (__lcore_id < RTE_MAX_LCORE) {               \
+#define __MEMPOOL_STAT_ADD(mp, name, n) do {			\
+		unsigned __lcore_id = rte_lcore_id();		\
+		if (__lcore_id < RTE_MAX_LCORE) {		\
 			mp->stats[__lcore_id].name##_objs += n;	\
-			mp->stats[__lcore_id].name##_bulk += 1;	\
-		}                                               \
-	} while(0)
-#define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do {                    \
-		unsigned int __lcore_id = rte_lcore_id();       \
-		if (__lcore_id < RTE_MAX_LCORE) {               \
+			mp->stats[__lcore_id].name##_bulk += 1; \
+		}						\
+	} while (0)
+#define __MEMPOOL_OBJS_STAT_ADD(mp, name1, name2, n) do {	\
+		unsigned __lcore_id = rte_lcore_id();		\
+		if (__lcore_id < RTE_MAX_LCORE)			\
+			mp->stats[__lcore_id].name1##_objs_##name2 += n;	\
+	} while (0)
+#define __MEMPOOL_OBJS_STAT_SUB(mp, name1, name2, n) do {	\
+		unsigned __lcore_id = rte_lcore_id();		\
+		if (__lcore_id < RTE_MAX_LCORE)			\
+			mp->stats[__lcore_id].name1##_objs_##name2 -= n;	\
+	} while (0)
+#define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do {	\
+		unsigned int __lcore_id = rte_lcore_id();	\
+		if (__lcore_id < RTE_MAX_LCORE) {		\
 			mp->stats[__lcore_id].name##_blks += n;	\
 			mp->stats[__lcore_id].name##_bulk += 1;	\
-		}                                               \
+		}						\
 	} while (0)
 #else
-#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
+#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while (0)
+#define __MEMPOOL_OBJS_STAT_ADD(mp, name1, name2, n) do {} while (0)
+#define __MEMPOOL_OBJS_STAT_SUB(mp, name1, nmae2, n) do {} while (0)
 #define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do {} while (0)
 #endif
 
@@ -1305,10 +1323,13 @@ __mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
 
 	/* Add elements back into the cache */
 	rte_memcpy(&cache_objs[0], obj_table, sizeof(void *) * n);
-
+	__MEMPOOL_OBJS_STAT_ADD(mp, put, cache, n);
 	cache->len += n;
 
 	if (cache->len >= cache->flushthresh) {
+		__MEMPOOL_OBJS_STAT_SUB(mp, put, cache, cache->len - cache->size);
+		__MEMPOOL_OBJS_STAT_ADD(mp, put, pool, cache->len - cache->size);
+		__MEMPOOL_OBJS_STAT_ADD(mp, put, flush, 1);
 		rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size],
 				cache->len - cache->size);
 		cache->len = cache->size;
@@ -1318,6 +1339,7 @@ __mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
 
 ring_enqueue:
 
+	__MEMPOOL_OBJS_STAT_ADD(mp, put, pool, n);
 	/* push remaining objects in ring */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 	if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)
@@ -1437,6 +1459,7 @@ __mempool_generic_get(struct rte_mempool *mp, void **obj_table,
 			goto ring_dequeue;
 		}
 
+		__MEMPOOL_OBJS_STAT_ADD(mp, get_success, refill, 1);
 		cache->len += req;
 	}
 
@@ -1447,6 +1470,7 @@ __mempool_generic_get(struct rte_mempool *mp, void **obj_table,
 	cache->len -= n;
 
 	__MEMPOOL_STAT_ADD(mp, get_success, n);
+	__MEMPOOL_OBJS_STAT_ADD(mp, get_success, cache, n);
 
 	return 0;
 
@@ -1457,8 +1481,10 @@ __mempool_generic_get(struct rte_mempool *mp, void **obj_table,
 
 	if (ret < 0)
 		__MEMPOOL_STAT_ADD(mp, get_fail, n);
-	else
+	else {
 		__MEMPOOL_STAT_ADD(mp, get_success, n);
+		__MEMPOOL_OBJS_STAT_ADD(mp, get_success, pool, n);
+	}
 
 	return ret;
 }
-- 
2.30.0


             reply	other threads:[~2021-03-18 11:20 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-18 11:20 Joyce Kong [this message]
2021-04-07 14:28 ` [dpdk-dev] [PATCH v2] lib/mempool: distinguish debug counters from cache and pool Olivier Matz
2021-04-20  0:31   ` Dharmik Thakkar
2021-04-20  0:07 ` [dpdk-dev] [PATCH v3 0/2] lib/mempool: add debug stats Dharmik Thakkar
2021-04-20  0:07   ` [dpdk-dev] [PATCH v3 1/2] lib/mempool: make stats macro generic Dharmik Thakkar
2021-04-21 16:09     ` Olivier Matz
2021-04-20  0:08   ` [dpdk-dev] [PATCH v3 2/2] lib/mempool: distinguish debug counters from cache and pool Dharmik Thakkar
2021-04-21 16:29     ` Olivier Matz
2021-04-22 21:27       ` Dharmik Thakkar
2021-04-22 21:47         ` Honnappa Nagarahalli
2021-04-23 10:41       ` Kinsella, Ray
2021-04-23  1:29   ` [dpdk-dev] [PATCH v4 0/2] lib/mempool: add debug stats Dharmik Thakkar
2021-04-23  1:29     ` [dpdk-dev] [PATCH v4 1/2] lib/mempool: make stats macro generic Dharmik Thakkar
2021-04-23  1:29     ` [dpdk-dev] [PATCH v4 2/2] lib/mempool: distinguish debug counters from cache and pool Dharmik Thakkar
2021-04-23 20:29       ` Dharmik Thakkar
2021-04-27 12:18       ` Olivier Matz
2021-04-27 12:28     ` [dpdk-dev] [PATCH v4 0/2] lib/mempool: add debug stats Olivier Matz
2021-04-27 16:01     ` [dpdk-dev] [PATCH v5 0/2] mempool: " Dharmik Thakkar
2021-04-27 16:01       ` [dpdk-dev] [PATCH v5 1/2] mempool: make stats macro generic Dharmik Thakkar
2021-04-27 16:01       ` [dpdk-dev] [PATCH v5 2/2] mempool: distinguish debug counters from cache and pool Dharmik Thakkar
2021-05-04  6:54         ` Olivier Matz
2021-05-04  7:02       ` [dpdk-dev] [PATCH v5 0/2] mempool: add debug stats David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210318112022.10510-1-joyce.kong@arm.com \
    --to=joyce.kong@arm.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=dev@dpdk.org \
    --cc=honnappa.nagarahalli@arm.com \
    --cc=nd@arm.com \
    --cc=olivier.matz@6wind.com \
    --cc=ruifeng.wang@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.