From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756056AbcFGTgS (ORCPT ); Tue, 7 Jun 2016 15:36:18 -0400 Received: from g9t1613g.houston.hpe.com ([15.241.32.99]:32300 "EHLO g9t1613g.houston.hpe.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1755902AbcFGTgP (ORCPT ); Tue, 7 Jun 2016 15:36:15 -0400 From: Waiman Long To: Alexander Viro , Jan Kara , Jeff Layton , "J. Bruce Fields" , Tejun Heo , Christoph Lameter Cc: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org, Ingo Molnar , Peter Zijlstra , Andi Kleen , Dave Chinner , Boqun Feng , Scott J Norton , Douglas Hatch , Waiman Long Subject: [RESEND PATCH 2/5] lib/dlock-list: Add __percpu modifier for parameters Date: Tue, 7 Jun 2016 15:35:52 -0400 Message-Id: <1465328155-56754-3-git-send-email-Waiman.Long@hpe.com> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1465328155-56754-1-git-send-email-Waiman.Long@hpe.com> References: <1465328155-56754-1-git-send-email-Waiman.Long@hpe.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Boqun Feng Add __percpu modifier properly to help: 1. Differ pointers to actual structures with those to percpu structures, which could improve readability. 2. Prevent sparse from complaining about "different address spaces" Signed-off-by: Boqun Feng Signed-off-by: Waiman Long --- include/linux/dlock-list.h | 18 ++++++++++-------- lib/dlock-list.c | 5 +++-- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/include/linux/dlock-list.h b/include/linux/dlock-list.h index 43355f8..a8e1fd2 100644 --- a/include/linux/dlock-list.h +++ b/include/linux/dlock-list.h @@ -108,7 +108,8 @@ static inline void init_dlock_list_node(struct dlock_list_node *node) node->lockptr = NULL; } -static inline void free_dlock_list_head(struct dlock_list_head **pdlock_head) +static inline void +free_dlock_list_head(struct dlock_list_head __percpu **pdlock_head) { free_percpu(*pdlock_head); *pdlock_head = NULL; @@ -117,7 +118,7 @@ static inline void free_dlock_list_head(struct dlock_list_head **pdlock_head) /* * Check if all the per-cpu lists are empty */ -static inline bool dlock_list_empty(struct dlock_list_head *dlock_head) +static inline bool dlock_list_empty(struct dlock_list_head __percpu *dlock_head) { int cpu; @@ -134,7 +135,7 @@ static inline bool dlock_list_empty(struct dlock_list_head *dlock_head) * Return: true if the entry is found, false if all the lists exhausted */ static __always_inline bool -__dlock_list_next_cpu(struct dlock_list_head *head, +__dlock_list_next_cpu(struct dlock_list_head __percpu *head, struct dlock_list_state *state) { if (state->lock) @@ -172,7 +173,7 @@ next_cpu: * * Return: true if the next entry is found, false if all the entries iterated */ -static inline bool dlock_list_iterate(struct dlock_list_head *head, +static inline bool dlock_list_iterate(struct dlock_list_head __percpu *head, struct dlock_list_state *state) { /* @@ -200,8 +201,9 @@ static inline bool dlock_list_iterate(struct dlock_list_head *head, * * Return: true if the next entry is found, false if all the entries iterated */ -static inline bool dlock_list_iterate_safe(struct dlock_list_head *head, - struct dlock_list_state *state) +static inline bool +dlock_list_iterate_safe(struct dlock_list_head __percpu *head, + struct dlock_list_state *state) { /* * Find next entry @@ -226,8 +228,8 @@ static inline bool dlock_list_iterate_safe(struct dlock_list_head *head, } extern void dlock_list_add(struct dlock_list_node *node, - struct dlock_list_head *head); + struct dlock_list_head __percpu *head); extern void dlock_list_del(struct dlock_list_node *node); -extern int init_dlock_list_head(struct dlock_list_head **pdlock_head); +extern int init_dlock_list_head(struct dlock_list_head __percpu **pdlock_head); #endif /* __LINUX_DLOCK_LIST_H */ diff --git a/lib/dlock-list.c b/lib/dlock-list.c index 84d4623..e1a1930 100644 --- a/lib/dlock-list.c +++ b/lib/dlock-list.c @@ -27,7 +27,7 @@ static struct lock_class_key dlock_list_key; /* * Initialize the per-cpu list head */ -int init_dlock_list_head(struct dlock_list_head **pdlock_head) +int init_dlock_list_head(struct dlock_list_head __percpu **pdlock_head) { struct dlock_list_head *dlock_head; int cpu; @@ -53,7 +53,8 @@ int init_dlock_list_head(struct dlock_list_head **pdlock_head) * function is called. However, deletion may be done by a different CPU. * So we still need to use a lock to protect the content of the list. */ -void dlock_list_add(struct dlock_list_node *node, struct dlock_list_head *head) +void dlock_list_add(struct dlock_list_node *node, + struct dlock_list_head __percpu *head) { struct dlock_list_head *myhead; -- 1.7.1