From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by smtp.lore.kernel.org (Postfix) with ESMTP id 57E9CC7EE23 for ; Thu, 2 Mar 2023 00:48:30 +0000 (UTC) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id CF78942BC9; Thu, 2 Mar 2023 01:48:08 +0100 (CET) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id C564E4114B for ; Thu, 2 Mar 2023 01:48:02 +0100 (CET) Received: by linux.microsoft.com (Postfix, from userid 1086) id 1C53A20B9C3D; Wed, 1 Mar 2023 16:48:01 -0800 (PST) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 1C53A20B9C3D DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1677718082; bh=v7bX/IZDWji7zuUvfdgDI1zNnPd7EEraibw3/SQnAZQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=E4PCQXjM2qCcshoLC4BsGTxVns0KNV9S1QodMJMvB7JGmLtfpu/dwjqPfqoR3IWmp 89XtfBj2xkPhyn9Tnliphgm36/2+nZaL9JRd+y6dhm6fSm1YoBa7RjRUgRTxpcy+7B jqnoTpPetILjKBrrjiqBbkw8TkgVQq4IQZleHDTU= From: Tyler Retzlaff To: dev@dpdk.org Cc: Honnappa.Nagarahalli@arm.com, thomas@monjalon.net, Tyler Retzlaff Subject: [PATCH 04/17] eal: use previous value atomic fetch operations Date: Wed, 1 Mar 2023 16:47:35 -0800 Message-Id: <1677718068-2412-5-git-send-email-roretzla@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1677718068-2412-1-git-send-email-roretzla@linux.microsoft.com> References: <1677718068-2412-1-git-send-email-roretzla@linux.microsoft.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Use __atomic_fetch_{add,and,or,sub,xor} instead of __atomic_{add,and,or,sub,xor}_fetch when we have no interest in the result of the operation. Reduces unnecessary codegen that provided the result of the atomic operation that was not used. Change brings closer alignment with atomics available in C11 standard and will reduce review effort when they are integrated. Signed-off-by: Tyler Retzlaff --- lib/eal/common/eal_common_trace.c | 8 ++++---- lib/eal/common/rte_service.c | 8 ++++---- lib/eal/ppc/include/rte_atomic.h | 16 ++++++++-------- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/lib/eal/common/eal_common_trace.c b/lib/eal/common/eal_common_trace.c index 75162b7..cb980af 100644 --- a/lib/eal/common/eal_common_trace.c +++ b/lib/eal/common/eal_common_trace.c @@ -103,10 +103,10 @@ struct trace_point_head * trace_mode_set(rte_trace_point_t *t, enum rte_trace_mode mode) { if (mode == RTE_TRACE_MODE_OVERWRITE) - __atomic_and_fetch(t, ~__RTE_TRACE_FIELD_ENABLE_DISCARD, + __atomic_fetch_and(t, ~__RTE_TRACE_FIELD_ENABLE_DISCARD, __ATOMIC_RELEASE); else - __atomic_or_fetch(t, __RTE_TRACE_FIELD_ENABLE_DISCARD, + __atomic_fetch_or(t, __RTE_TRACE_FIELD_ENABLE_DISCARD, __ATOMIC_RELEASE); } @@ -155,7 +155,7 @@ rte_trace_mode rte_trace_mode_get(void) prev = __atomic_fetch_or(t, __RTE_TRACE_FIELD_ENABLE_MASK, __ATOMIC_RELEASE); if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) == 0) - __atomic_add_fetch(&trace.status, 1, __ATOMIC_RELEASE); + __atomic_fetch_add(&trace.status, 1, __ATOMIC_RELEASE); return 0; } @@ -169,7 +169,7 @@ rte_trace_mode rte_trace_mode_get(void) prev = __atomic_fetch_and(t, ~__RTE_TRACE_FIELD_ENABLE_MASK, __ATOMIC_RELEASE); if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) != 0) - __atomic_sub_fetch(&trace.status, 1, __ATOMIC_RELEASE); + __atomic_fetch_sub(&trace.status, 1, __ATOMIC_RELEASE); return 0; } diff --git a/lib/eal/common/rte_service.c b/lib/eal/common/rte_service.c index 42ca1d0..7ab48f2 100644 --- a/lib/eal/common/rte_service.c +++ b/lib/eal/common/rte_service.c @@ -464,11 +464,11 @@ struct core_state { /* Increment num_mapped_cores to reflect that this core is * now mapped capable of running the service. */ - __atomic_add_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&s->num_mapped_cores, 1, __ATOMIC_RELAXED); int ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe); - __atomic_sub_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED); + __atomic_fetch_sub(&s->num_mapped_cores, 1, __ATOMIC_RELAXED); return ret; } @@ -638,12 +638,12 @@ struct core_state { if (*set && !lcore_mapped) { lcore_states[lcore].service_mask |= sid_mask; - __atomic_add_fetch(&rte_services[sid].num_mapped_cores, + __atomic_fetch_add(&rte_services[sid].num_mapped_cores, 1, __ATOMIC_RELAXED); } if (!*set && lcore_mapped) { lcore_states[lcore].service_mask &= ~(sid_mask); - __atomic_sub_fetch(&rte_services[sid].num_mapped_cores, + __atomic_fetch_sub(&rte_services[sid].num_mapped_cores, 1, __ATOMIC_RELAXED); } } diff --git a/lib/eal/ppc/include/rte_atomic.h b/lib/eal/ppc/include/rte_atomic.h index 663b4d3..2ab735b 100644 --- a/lib/eal/ppc/include/rte_atomic.h +++ b/lib/eal/ppc/include/rte_atomic.h @@ -60,13 +60,13 @@ static inline int rte_atomic16_test_and_set(rte_atomic16_t *v) static inline void rte_atomic16_inc(rte_atomic16_t *v) { - __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE); + __atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE); } static inline void rte_atomic16_dec(rte_atomic16_t *v) { - __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE); + __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE); } static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) @@ -102,13 +102,13 @@ static inline int rte_atomic32_test_and_set(rte_atomic32_t *v) static inline void rte_atomic32_inc(rte_atomic32_t *v) { - __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE); + __atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE); } static inline void rte_atomic32_dec(rte_atomic32_t *v) { - __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE); + __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE); } static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) @@ -157,25 +157,25 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) static inline void rte_atomic64_add(rte_atomic64_t *v, int64_t inc) { - __atomic_add_fetch(&v->cnt, inc, __ATOMIC_ACQUIRE); + __atomic_fetch_add(&v->cnt, inc, __ATOMIC_ACQUIRE); } static inline void rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) { - __atomic_sub_fetch(&v->cnt, dec, __ATOMIC_ACQUIRE); + __atomic_fetch_sub(&v->cnt, dec, __ATOMIC_ACQUIRE); } static inline void rte_atomic64_inc(rte_atomic64_t *v) { - __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE); + __atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE); } static inline void rte_atomic64_dec(rte_atomic64_t *v) { - __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE); + __atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE); } static inline int64_t -- 1.8.3.1