From mboxrd@z Thu Jan 1 00:00:00 1970 From: Wenfeng Liu Subject: [PATCH v3] mempool: don't check mempool flags when cache is enabled Date: Tue, 10 Jan 2017 08:26:42 +0000 Message-ID: <1484036802-3031-1-git-send-email-liuwf@arraynetworks.com.cn> References: <1484032450-1329-1-git-send-email-liuwf@arraynetworks.com.cn> Mime-Version: 1.0 Content-Type: text/plain Cc: To: , Return-path: Received: from mail01.arraynetworks.com.cn (mail.arraynetworks.com.cn [124.42.99.121]) by dpdk.org (Postfix) with ESMTP id D2A7B1E2F for ; Tue, 10 Jan 2017 09:39:54 +0100 (CET) In-Reply-To: <1484032450-1329-1-git-send-email-liuwf@arraynetworks.com.cn> List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Currently we will check mempool flags when we put/get objects from mempool. However, this makes cache useless when mempool is SC|SP, SC|MP, MC|SP cases. This patch makes cache available in above cases and improves performance. Signed-off-by: Wenfeng Liu --- lib/librte_mempool/rte_mempool.h | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index d315d42..aca2f1b 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -1038,19 +1038,15 @@ static inline struct rte_mempool_cache *__attribute__((always_inline)) */ static inline void __attribute__((always_inline)) __mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, - unsigned n, struct rte_mempool_cache *cache, int flags) + unsigned n, struct rte_mempool_cache *cache) { void **cache_objs; /* increment stat now, adding in mempool always success */ __MEMPOOL_STAT_ADD(mp, put, n); - /* No cache provided or single producer */ - if (unlikely(cache == NULL || flags & MEMPOOL_F_SP_PUT)) - goto ring_enqueue; - - /* Go straight to ring if put would overflow mem allocated for cache */ - if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE)) + /* No cache provided or if put would overflow mem allocated for cache */ + if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE)) goto ring_enqueue; cache_objs = &cache->objs[cache->len]; @@ -1104,10 +1100,10 @@ static inline void __attribute__((always_inline)) */ static inline void __attribute__((always_inline)) rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, - unsigned n, struct rte_mempool_cache *cache, int flags) + unsigned n, struct rte_mempool_cache *cache, __rte_unused int flags) { __mempool_check_cookies(mp, obj_table, n, 0); - __mempool_generic_put(mp, obj_table, n, cache, flags); + __mempool_generic_put(mp, obj_table, n, cache); } /** @@ -1244,15 +1240,14 @@ static inline void __attribute__((always_inline)) */ static inline int __attribute__((always_inline)) __mempool_generic_get(struct rte_mempool *mp, void **obj_table, - unsigned n, struct rte_mempool_cache *cache, int flags) + unsigned n, struct rte_mempool_cache *cache) { int ret; uint32_t index, len; void **cache_objs; - /* No cache provided or single consumer */ - if (unlikely(cache == NULL || flags & MEMPOOL_F_SC_GET || - n >= cache->size)) + /* No cache provided or cannot be satisfied from cache */ + if (unlikely(cache == NULL || n >= cache->size)) goto ring_dequeue; cache_objs = cache->objs; @@ -1326,10 +1321,10 @@ static inline int __attribute__((always_inline)) */ static inline int __attribute__((always_inline)) rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n, - struct rte_mempool_cache *cache, int flags) + struct rte_mempool_cache *cache, __rte_unused int flags) { int ret; - ret = __mempool_generic_get(mp, obj_table, n, cache, flags); + ret = __mempool_generic_get(mp, obj_table, n, cache); if (ret == 0) __mempool_check_cookies(mp, obj_table, n, 1); return ret; -- 1.8.3.1