[2/5] rhashtable: reorder some inline functions and macros.
diff mbox series

Message ID 155503392797.17793.15780367123758287135.stgit@noble.brown
State Accepted
Commit c5783311a1248c437614d438b69c5f31fe483ecb
Headers show
Series
  • Fix rhashtable bit-locking for m68k
Related show

Commit Message

NeilBrown April 12, 2019, 1:52 a.m. UTC
This patch only moves some code around, it doesn't
change the code at all.
A subsequent patch will benefit from this as it needs
to add calls to functions which are now defined before the
call-site, but weren't before.

Signed-off-by: NeilBrown <neilb@suse.com>
---
 include/linux/rhashtable.h |  142 ++++++++++++++++++++++----------------------
 1 file changed, 71 insertions(+), 71 deletions(-)

Comments

Jakub Kicinski May 14, 2019, 7:25 p.m. UTC | #1
On Fri, 12 Apr 2019 11:52:08 +1000, NeilBrown wrote:
> This patch only moves some code around, it doesn't
> change the code at all.
> A subsequent patch will benefit from this as it needs
> to add calls to functions which are now defined before the
> call-site, but weren't before.
> 
> Signed-off-by: NeilBrown <neilb@suse.com>

Hi Neil, I think this patch introduces as slew of sparse warnigns:

include/linux/rhashtable.h:724:23: warning: incorrect type in argument 2 (different address spaces)
include/linux/rhashtable.h:724:23:    expected struct rhash_lock_head **bkt
include/linux/rhashtable.h:724:23:    got struct rhash_lock_head [noderef] <asn:4>**[assigned] bkt
include/linux/rhashtable.h:728:33: warning: incorrect type in argument 2 (different address spaces)
include/linux/rhashtable.h:728:33:    expected struct rhash_lock_head **bkt
include/linux/rhashtable.h:728:33:    got struct rhash_lock_head [noderef] <asn:4>**[assigned] bkt
include/linux/rhashtable.h:760:41: warning: incorrect type in argument 2 (different address spaces)
include/linux/rhashtable.h:760:41:    expected struct rhash_lock_head **bkt
include/linux/rhashtable.h:760:41:    got struct rhash_lock_head [noderef] <asn:4>**[assigned] bkt
include/linux/rhashtable.h:801:25: warning: incorrect type in argument 2 (different address spaces)
include/linux/rhashtable.h:801:25:    expected struct rhash_lock_head **bkt
include/linux/rhashtable.h:801:25:    got struct rhash_lock_head [noderef] <asn:4>**[assigned] bkt
include/linux/rhashtable.h:1003:23: warning: incorrect type in argument 2 (different address spaces)
include/linux/rhashtable.h:1003:23:    expected struct rhash_lock_head **bkt
include/linux/rhashtable.h:1003:23:    got struct rhash_lock_head [noderef] <asn:4>**[assigned] bkt
include/linux/rhashtable.h:1047:41: warning: incorrect type in argument 2 (different address spaces)
include/linux/rhashtable.h:1047:41:    expected struct rhash_lock_head **bkt
include/linux/rhashtable.h:1047:41:    got struct rhash_lock_head [noderef] <asn:4>**[assigned] bkt
include/linux/rhashtable.h:1054:25: warning: incorrect type in argument 2 (different address spaces)
include/linux/rhashtable.h:1054:25:    expected struct rhash_lock_head **bkt
include/linux/rhashtable.h:1054:25:    got struct rhash_lock_head [noderef] <asn:4>**[assigned] bkt

Is there any chance to get those fixed?  Presumably a __force would be
appropriate?  Maybe like this?

diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index f7714d3b46bd..997017a85032 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -325,27 +325,28 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
  */
 
 static inline void rht_lock(struct bucket_table *tbl,
-                           struct rhash_lock_head **bkt)
+                           struct rhash_lock_head __rcu **bkt)
 {
+
        local_bh_disable();
-       bit_spin_lock(0, (unsigned long *)bkt);
+       bit_spin_lock(0, (unsigned long __force *)bkt);
        lock_map_acquire(&tbl->dep_map);
 }
 
 static inline void rht_lock_nested(struct bucket_table *tbl,
-                                  struct rhash_lock_head **bucket,
+                                  struct rhash_lock_head __rcu **bkt,
                                   unsigned int subclass)
 {
        local_bh_disable();
-       bit_spin_lock(0, (unsigned long *)bucket);
+       bit_spin_lock(0, (unsigned long __force *)bkt);
        lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
 }
 
 static inline void rht_unlock(struct bucket_table *tbl,
-                             struct rhash_lock_head **bkt)
+                             struct rhash_lock_head __rcu **bkt)
 {
        lock_map_release(&tbl->dep_map);
-       bit_spin_unlock(0, (unsigned long *)bkt);
+       bit_spin_unlock(0, (unsigned long __force *)bkt);
        local_bh_enable();
 }

Patch
diff mbox series

diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 2711cbf01b64..c504cd820736 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -87,77 +87,6 @@  struct bucket_table {
 	struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
 };
 
-/*
- * We lock a bucket by setting BIT(1) in the pointer - this is always
- * zero in real pointers and in the nulls marker.
- * bit_spin_locks do not handle contention well, but the whole point
- * of the hashtable design is to achieve minimum per-bucket contention.
- * A nested hash table might not have a bucket pointer.  In that case
- * we cannot get a lock.  For remove and replace the bucket cannot be
- * interesting and doesn't need locking.
- * For insert we allocate the bucket if this is the last bucket_table,
- * and then take the lock.
- * Sometimes we unlock a bucket by writing a new pointer there.  In that
- * case we don't need to unlock, but we do need to reset state such as
- * local_bh. For that we have rht_assign_unlock().  As rcu_assign_pointer()
- * provides the same release semantics that bit_spin_unlock() provides,
- * this is safe.
- */
-
-static inline void rht_lock(struct bucket_table *tbl,
-			    struct rhash_lock_head **bkt)
-{
-	local_bh_disable();
-	bit_spin_lock(1, (unsigned long *)bkt);
-	lock_map_acquire(&tbl->dep_map);
-}
-
-static inline void rht_lock_nested(struct bucket_table *tbl,
-				   struct rhash_lock_head **bucket,
-				   unsigned int subclass)
-{
-	local_bh_disable();
-	bit_spin_lock(1, (unsigned long *)bucket);
-	lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
-}
-
-static inline void rht_unlock(struct bucket_table *tbl,
-			      struct rhash_lock_head **bkt)
-{
-	lock_map_release(&tbl->dep_map);
-	bit_spin_unlock(1, (unsigned long *)bkt);
-	local_bh_enable();
-}
-
-static inline void rht_assign_unlock(struct bucket_table *tbl,
-				     struct rhash_lock_head __rcu **bkt,
-				     struct rhash_head *obj)
-{
-	struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
-
-	lock_map_release(&tbl->dep_map);
-	rcu_assign_pointer(*p, obj);
-	preempt_enable();
-	__release(bitlock);
-	local_bh_enable();
-}
-
-/*
- * If 'p' is a bucket head and might be locked:
- *   rht_ptr() returns the address without the lock bit.
- *   rht_ptr_locked() returns the address WITH the lock bit.
- */
-static inline struct rhash_head __rcu *rht_ptr(const struct rhash_lock_head *p)
-{
-	return (void *)(((unsigned long)p) & ~BIT(1));
-}
-
-static inline struct rhash_lock_head __rcu *rht_ptr_locked(const
-							   struct rhash_head *p)
-{
-	return (void *)(((unsigned long)p) | BIT(1));
-}
-
 /*
  * NULLS_MARKER() expects a hash value with the low
  * bits mostly likely to be significant, and it discards
@@ -372,6 +301,77 @@  static inline struct rhash_lock_head __rcu **rht_bucket_insert(
 				     &tbl->buckets[hash];
 }
 
+/*
+ * We lock a bucket by setting BIT(1) in the pointer - this is always
+ * zero in real pointers and in the nulls marker.
+ * bit_spin_locks do not handle contention well, but the whole point
+ * of the hashtable design is to achieve minimum per-bucket contention.
+ * A nested hash table might not have a bucket pointer.  In that case
+ * we cannot get a lock.  For remove and replace the bucket cannot be
+ * interesting and doesn't need locking.
+ * For insert we allocate the bucket if this is the last bucket_table,
+ * and then take the lock.
+ * Sometimes we unlock a bucket by writing a new pointer there.  In that
+ * case we don't need to unlock, but we do need to reset state such as
+ * local_bh. For that we have rht_assign_unlock().  As rcu_assign_pointer()
+ * provides the same release semantics that bit_spin_unlock() provides,
+ * this is safe.
+ */
+
+static inline void rht_lock(struct bucket_table *tbl,
+			    struct rhash_lock_head **bkt)
+{
+	local_bh_disable();
+	bit_spin_lock(1, (unsigned long *)bkt);
+	lock_map_acquire(&tbl->dep_map);
+}
+
+static inline void rht_lock_nested(struct bucket_table *tbl,
+				   struct rhash_lock_head **bucket,
+				   unsigned int subclass)
+{
+	local_bh_disable();
+	bit_spin_lock(1, (unsigned long *)bucket);
+	lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
+}
+
+static inline void rht_unlock(struct bucket_table *tbl,
+			      struct rhash_lock_head **bkt)
+{
+	lock_map_release(&tbl->dep_map);
+	bit_spin_unlock(1, (unsigned long *)bkt);
+	local_bh_enable();
+}
+
+/*
+ * If 'p' is a bucket head and might be locked:
+ *   rht_ptr() returns the address without the lock bit.
+ *   rht_ptr_locked() returns the address WITH the lock bit.
+ */
+static inline struct rhash_head __rcu *rht_ptr(const struct rhash_lock_head *p)
+{
+	return (void *)(((unsigned long)p) & ~BIT(1));
+}
+
+static inline struct rhash_lock_head __rcu *rht_ptr_locked(const
+							   struct rhash_head *p)
+{
+	return (void *)(((unsigned long)p) | BIT(1));
+}
+
+static inline void rht_assign_unlock(struct bucket_table *tbl,
+				     struct rhash_lock_head __rcu **bkt,
+				     struct rhash_head *obj)
+{
+	struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
+
+	lock_map_release(&tbl->dep_map);
+	rcu_assign_pointer(*p, obj);
+	preempt_enable();
+	__release(bitlock);
+	local_bh_enable();
+}
+
 /**
  * rht_for_each_from - iterate over hash chain from given head
  * @pos:	the &struct rhash_head to use as a loop cursor.