From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932096AbdCHUdn (ORCPT ); Wed, 8 Mar 2017 15:33:43 -0500 Received: from mail.kernel.org ([198.145.29.136]:33810 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754443AbdCHUbU (ORCPT ); Wed, 8 Mar 2017 15:31:20 -0500 Message-Id: <20170308202253.806267416@goodmis.org> User-Agent: quilt/0.63-1 Date: Wed, 08 Mar 2017 15:22:28 -0500 From: Steven Rostedt To: linux-kernel@vger.kernel.org, linux-rt-users Cc: Thomas Gleixner , Carsten Emde , Sebastian Andrzej Siewior , John Kacur , Paul Gortmaker , Julia Cartwright , stable-rt@vger.kernel.org Subject: [PATCH RT 1/7] radix-tree: use local locks References: <20170308202227.841639815@goodmis.org> MIME-Version: 1.0 Content-Type: text/plain; charset=ISO-8859-15 Content-Disposition: inline; filename=0001-radix-tree-use-local-locks.patch Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org 3.12.70-rt95-rc1 stable review patch. If anyone has any objections, please let me know. ------------------ From: Sebastian Andrzej Siewior The preload functionality uses per-CPU variables and preempt-disable to ensure that it does not switch CPUs during its usage. This patch adds local_locks() instead preempt_disable() for the same purpose and to remain preemptible on -RT. Cc: stable-rt@vger.kernel.org Reported-and-debugged-by: Mike Galbraith Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Steven Rostedt (VMware) --- include/linux/radix-tree.h | 12 ++---------- lib/radix-tree.c | 23 ++++++++++++++--------- 2 files changed, 16 insertions(+), 19 deletions(-) diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 148ae0fb87ad..c0df4bc0d297 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -227,13 +227,10 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, unsigned long *indices, unsigned long first_index, unsigned int max_items); -#ifndef CONFIG_PREEMPT_RT_FULL int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); -#else -static inline int radix_tree_preload(gfp_t gm) { return 0; } -static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; } -#endif +void radix_tree_preload_end(void); + void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, unsigned int tag); @@ -256,11 +253,6 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); -static inline void radix_tree_preload_end(void) -{ - preempt_enable_nort(); -} - /** * struct radix_tree_iter - radix tree iterator state * diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 980e869e19a8..1236857dfa1f 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -33,7 +33,7 @@ #include #include #include /* in_interrupt() */ - +#include #ifdef __KERNEL__ #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) @@ -94,6 +94,7 @@ struct radix_tree_preload { struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE]; }; static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; +static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock); static inline void *ptr_to_indirect(void *ptr) { @@ -221,13 +222,13 @@ radix_tree_node_alloc(struct radix_tree_root *root) * succeed in getting a node here (and never reach * kmem_cache_alloc) */ - rtp = &get_cpu_var(radix_tree_preloads); + rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes[rtp->nr - 1]; rtp->nodes[rtp->nr - 1] = NULL; rtp->nr--; } - put_cpu_var(radix_tree_preloads); + put_locked_var(radix_tree_preloads_lock, radix_tree_preloads); } if (ret == NULL) ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); @@ -262,7 +263,6 @@ radix_tree_node_free(struct radix_tree_node *node) call_rcu(&node->rcu_head, radix_tree_node_rcu_free); } -#ifndef CONFIG_PREEMPT_RT_FULL /* * Load up this CPU's radix_tree_node buffer with sufficient objects to * ensure that the addition of a single element in the tree cannot fail. On @@ -278,14 +278,14 @@ static int __radix_tree_preload(gfp_t gfp_mask) struct radix_tree_node *node; int ret = -ENOMEM; - preempt_disable(); + local_lock(radix_tree_preloads_lock); rtp = &__get_cpu_var(radix_tree_preloads); while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { - preempt_enable(); + local_unlock(radix_tree_preloads_lock); node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); if (node == NULL) goto out; - preempt_disable(); + local_lock(radix_tree_preloads_lock); rtp = &__get_cpu_var(radix_tree_preloads); if (rtp->nr < ARRAY_SIZE(rtp->nodes)) rtp->nodes[rtp->nr++] = node; @@ -324,11 +324,16 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) if (gfp_mask & __GFP_WAIT) return __radix_tree_preload(gfp_mask); /* Preloading doesn't help anything with this gfp mask, skip it */ - preempt_disable(); + local_lock(radix_tree_preloads_lock); return 0; } EXPORT_SYMBOL(radix_tree_maybe_preload); -#endif + +void radix_tree_preload_end(void) +{ + local_unlock(radix_tree_preloads_lock); +} +EXPORT_SYMBOL(radix_tree_preload_end); /* * Return the maximum key which can be store into a -- 2.10.2