From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.8 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_HELO_NONE,SPF_PASS, USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 3440AC54FD0 for ; Thu, 23 Apr 2020 16:33:04 +0000 (UTC) Received: from dpdk.org (dpdk.org [92.243.14.124]) by mail.kernel.org (Postfix) with ESMTP id C667E20728 for ; Thu, 23 Apr 2020 16:33:03 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org C667E20728 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=arm.com Authentication-Results: mail.kernel.org; spf=pass smtp.mailfrom=dev-bounces@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8D07B1D538; Thu, 23 Apr 2020 18:32:26 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by dpdk.org (Postfix) with ESMTP id 541B11D532 for ; Thu, 23 Apr 2020 18:32:25 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id DC5A930E; Thu, 23 Apr 2020 09:32:24 -0700 (PDT) Received: from phil-VirtualBox.arm.com (A010647.Arm.com [10.170.243.60]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 117573F68F; Thu, 23 Apr 2020 09:32:21 -0700 (PDT) From: Phil Yang To: harry.van.haaren@intel.com, dev@dpdk.org Cc: thomas@monjalon.net, david.marchand@redhat.com, konstantin.ananyev@intel.com, jerinj@marvell.com, hemant.agrawal@nxp.com, Honnappa.Nagarahalli@arm.com, gavin.hu@arm.com, nd@arm.com Date: Fri, 24 Apr 2020 00:31:22 +0800 Message-Id: <1587659482-27133-7-git-send-email-phil.yang@arm.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587659482-27133-1-git-send-email-phil.yang@arm.com> References: <1584407863-774-8-git-send-email-phil.yang@arm.com> <1587659482-27133-1-git-send-email-phil.yang@arm.com> Subject: [dpdk-dev] [PATCH v2 6/6] service: relax barriers with C11 atomics X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The runstate, comp_runstate and app_runstate are used as guard variables in the service core lib. To guarantee the inter-threads visibility of these guard variables, it uses rte_smp_r/wmb. This patch use c11 atomic built-ins to relax these barriers. Signed-off-by: Phil Yang Reviewed-by: Honnappa Nagarahalli --- lib/librte_eal/common/rte_service.c | 115 ++++++++++++++++++++++++++---------- 1 file changed, 84 insertions(+), 31 deletions(-) diff --git a/lib/librte_eal/common/rte_service.c b/lib/librte_eal/common/rte_service.c index 8cac265..dbb8211 100644 --- a/lib/librte_eal/common/rte_service.c +++ b/lib/librte_eal/common/rte_service.c @@ -265,7 +265,6 @@ rte_service_component_register(const struct rte_service_spec *spec, s->spec = *spec; s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK; - rte_smp_wmb(); rte_service_count++; if (id_ptr) @@ -282,7 +281,6 @@ rte_service_component_unregister(uint32_t id) SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); rte_service_count--; - rte_smp_wmb(); s->internal_flags &= ~(SERVICE_F_REGISTERED); @@ -301,12 +299,17 @@ rte_service_component_runstate_set(uint32_t id, uint32_t runstate) struct rte_service_spec_impl *s; SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); + /* comp_runstate act as the guard variable. Use store-release + * memory order. This synchronizes with load-acquire in + * service_run and service_runstate_get function. + */ if (runstate) - s->comp_runstate = RUNSTATE_RUNNING; + __atomic_store_n(&s->comp_runstate, RUNSTATE_RUNNING, + __ATOMIC_RELEASE); else - s->comp_runstate = RUNSTATE_STOPPED; + __atomic_store_n(&s->comp_runstate, RUNSTATE_STOPPED, + __ATOMIC_RELEASE); - rte_smp_wmb(); return 0; } @@ -316,12 +319,17 @@ rte_service_runstate_set(uint32_t id, uint32_t runstate) struct rte_service_spec_impl *s; SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); + /* app_runstate act as the guard variable. Use store-release + * memory order. This synchronizes with load-acquire in + * service_run runstate_get function. + */ if (runstate) - s->app_runstate = RUNSTATE_RUNNING; + __atomic_store_n(&s->app_runstate, RUNSTATE_RUNNING, + __ATOMIC_RELEASE); else - s->app_runstate = RUNSTATE_STOPPED; + __atomic_store_n(&s->app_runstate, RUNSTATE_STOPPED, + __ATOMIC_RELEASE); - rte_smp_wmb(); return 0; } @@ -330,15 +338,24 @@ rte_service_runstate_get(uint32_t id) { struct rte_service_spec_impl *s; SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); - rte_smp_rmb(); - int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK); - int lcore_mapped = (__atomic_load_n(&s->num_mapped_cores, + /* comp_runstate and app_runstate act as the guard variables. + * Use load-acquire memory order. This synchronizes with + * store-release in service state set functions. + */ + if (__atomic_load_n(&s->comp_runstate, + __ATOMIC_ACQUIRE) == RUNSTATE_RUNNING && + __atomic_load_n(&s->app_runstate, + __ATOMIC_ACQUIRE) == RUNSTATE_RUNNING) { + int check_disabled = !(s->internal_flags & + SERVICE_F_START_CHECK); + int lcore_mapped = (__atomic_load_n(&s->num_mapped_cores, __ATOMIC_RELAXED) > 0); - return (s->app_runstate == RUNSTATE_RUNNING) && - (s->comp_runstate == RUNSTATE_RUNNING) && - (check_disabled | lcore_mapped); + return (check_disabled | lcore_mapped); + } else + return 0; + } static inline void @@ -367,9 +384,15 @@ service_run(uint32_t i, struct core_state *cs, uint64_t service_mask, if (!s) return -EINVAL; - if (s->comp_runstate != RUNSTATE_RUNNING || - s->app_runstate != RUNSTATE_RUNNING || - !(service_mask & (UINT64_C(1) << i))) { + /* comp_runstate and app_runstate act as the guard variables. + * Use load-acquire memory order. This synchronizes with + * store-release in service state set functions. + */ + if (__atomic_load_n(&s->comp_runstate, + __ATOMIC_ACQUIRE) != RUNSTATE_RUNNING || + __atomic_load_n(&s->app_runstate, + __ATOMIC_ACQUIRE) != RUNSTATE_RUNNING || + !(service_mask & (UINT64_C(1) << i))) { cs->service_active_on_lcore[i] = 0; return -ENOEXEC; } @@ -434,7 +457,12 @@ service_runner_func(void *arg) const int lcore = rte_lcore_id(); struct core_state *cs = &lcore_states[lcore]; - while (cs->runstate == RUNSTATE_RUNNING) { + /* runstate act as the guard variable. Use load-acquire + * memory order here to synchronize with store-release + * in runstate update functions. + */ + while (__atomic_load_n(&cs->runstate, + __ATOMIC_ACQUIRE) == RUNSTATE_RUNNING) { const uint64_t service_mask = cs->service_mask; for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { @@ -445,8 +473,6 @@ service_runner_func(void *arg) } cs->loops++; - - rte_smp_rmb(); } lcore_config[lcore].state = WAIT; @@ -614,15 +640,18 @@ rte_service_lcore_reset_all(void) if (lcore_states[i].is_service_core) { lcore_states[i].service_mask = 0; set_lcore_state(i, ROLE_RTE); - lcore_states[i].runstate = RUNSTATE_STOPPED; + /* runstate act as guard variable Use + * store-release memory order here to synchronize + * with load-acquire in runstate read functions. + */ + __atomic_store_n(&lcore_states[i].runstate, + RUNSTATE_STOPPED, __ATOMIC_RELEASE); } } for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) __atomic_store_n(&rte_services[i].num_mapped_cores, 0, __ATOMIC_RELAXED); - rte_smp_wmb(); - return 0; } @@ -638,9 +667,11 @@ rte_service_lcore_add(uint32_t lcore) /* ensure that after adding a core the mask and state are defaults */ lcore_states[lcore].service_mask = 0; - lcore_states[lcore].runstate = RUNSTATE_STOPPED; - - rte_smp_wmb(); + /* Use store-release memory order here to synchronize with + * load-acquire in runstate read functions. + */ + __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED, + __ATOMIC_RELEASE); return rte_eal_wait_lcore(lcore); } @@ -655,7 +686,12 @@ rte_service_lcore_del(uint32_t lcore) if (!cs->is_service_core) return -EINVAL; - if (cs->runstate != RUNSTATE_STOPPED) + /* runstate act as the guard variable. Use load-acquire + * memory order here to synchronize with store-release + * in runstate update functions. + */ + if (__atomic_load_n(&cs->runstate, + __ATOMIC_ACQUIRE) != RUNSTATE_STOPPED) return -EBUSY; set_lcore_state(lcore, ROLE_RTE); @@ -674,13 +710,21 @@ rte_service_lcore_start(uint32_t lcore) if (!cs->is_service_core) return -EINVAL; - if (cs->runstate == RUNSTATE_RUNNING) + /* runstate act as the guard variable. Use load-acquire + * memory order here to synchronize with store-release + * in runstate update functions. + */ + if (__atomic_load_n(&cs->runstate, + __ATOMIC_ACQUIRE) == RUNSTATE_RUNNING) return -EALREADY; /* set core to run state first, and then launch otherwise it will * return immediately as runstate keeps it in the service poll loop */ - cs->runstate = RUNSTATE_RUNNING; + /* Use load-acquire memory order here to synchronize with + * store-release in runstate update functions. + */ + __atomic_store_n(&cs->runstate, RUNSTATE_RUNNING, __ATOMIC_RELEASE); int ret = rte_eal_remote_launch(service_runner_func, 0, lcore); /* returns -EBUSY if the core is already launched, 0 on success */ @@ -693,7 +737,12 @@ rte_service_lcore_stop(uint32_t lcore) if (lcore >= RTE_MAX_LCORE) return -EINVAL; - if (lcore_states[lcore].runstate == RUNSTATE_STOPPED) + /* runstate act as the guard variable. Use load-acquire + * memory order here to synchronize with store-release + * in runstate update functions. + */ + if (__atomic_load_n(&lcore_states[lcore].runstate, + __ATOMIC_ACQUIRE) == RUNSTATE_STOPPED) return -EALREADY; uint32_t i; @@ -713,7 +762,11 @@ rte_service_lcore_stop(uint32_t lcore) return -EBUSY; } - lcore_states[lcore].runstate = RUNSTATE_STOPPED; + /* Use store-release memory order here to synchronize with + * load-acquire in runstate read functions. + */ + __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED, + __ATOMIC_RELEASE); return 0; } -- 2.7.4