linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC v2 0/7] generic hashtable implementation
@ 2012-08-03 14:23 Sasha Levin
  2012-08-03 14:23 ` [RFC v2 1/7] hashtable: introduce a small and naive hashtable Sasha Levin
                   ` (6 more replies)
  0 siblings, 7 replies; 34+ messages in thread
From: Sasha Levin @ 2012-08-03 14:23 UTC (permalink / raw)
  To: torvalds
  Cc: tj, akpm, linux-kernel, linux-mm, paul.gortmaker, davem, rostedt,
	mingo, ebiederm, aarcange, ericvh, netdev, Sasha Levin

There are quite a few places in the kernel which implement a hashtable
in a very similar way. Instead of having implementations of a hashtable
all over the kernel, we can re-use the code.

This patch series introduces a very simple hashtable implementation, and
modifies three (random) modules to use it. I've limited it to 3 only
so that it would be easy to review and modify, and to show that even
at this number we already eliminate a big amount of duplicated code.

If this basic hashtable looks ok, future code will include:

 - RCU support
 - Self locking (list_bl?)
 - Converting more code to use the hashtable


Changes in V2:

 - Address review comments by Tejun Heo, Josh Triplett and Eric Beiderman (Thanks all!).
 - Rebase on top of latest master.
 - Convert more places to use the hashtable. Hopefully it will trigger more reviews by
 touching more subsystems.

Sasha Levin (7):
  hashtable: introduce a small and naive hashtable
  user_ns: use new hashtable implementation
  mm,ksm: use new hashtable implementation
  workqueue: use new hashtable implementation
  mm/huge_memory: use new hashtable implementation
  tracepoint: use new hashtable implementation
  net,9p: use new hashtable implementation

 include/linux/hashtable.h |   75 ++++++++++++++++++++++++++++++++++++
 kernel/tracepoint.c       |   26 ++++--------
 kernel/user.c             |   53 +++++++++-----------------
 kernel/workqueue.c        |   93 +++++++--------------------------------------
 mm/huge_memory.c          |   56 +++++----------------------
 mm/ksm.c                  |   29 ++++----------
 net/9p/error.c            |   17 ++++----
 7 files changed, 144 insertions(+), 205 deletions(-)
 create mode 100644 include/linux/hashtable.h

-- 
1.7.8.6


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 14:23 [RFC v2 0/7] generic hashtable implementation Sasha Levin
@ 2012-08-03 14:23 ` Sasha Levin
  2012-08-03 17:15   ` Tejun Heo
  2012-08-03 17:39   ` Eric Dumazet
  2012-08-03 14:23 ` [RFC v2 2/7] user_ns: use new hashtable implementation Sasha Levin
                   ` (5 subsequent siblings)
  6 siblings, 2 replies; 34+ messages in thread
From: Sasha Levin @ 2012-08-03 14:23 UTC (permalink / raw)
  To: torvalds
  Cc: tj, akpm, linux-kernel, linux-mm, paul.gortmaker, davem, rostedt,
	mingo, ebiederm, aarcange, ericvh, netdev, Sasha Levin

This hashtable implementation is using hlist buckets to provide a simple
hashtable to prevent it from getting reimplemented all over the kernel.

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 include/linux/hashtable.h |   75 +++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 75 insertions(+), 0 deletions(-)
 create mode 100644 include/linux/hashtable.h

diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
new file mode 100644
index 0000000..b004cf7
--- /dev/null
+++ b/include/linux/hashtable.h
@@ -0,0 +1,75 @@
+#ifndef _LINUX_HASHTABLE_H
+#define _LINUX_HASHTABLE_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/hash.h>
+
+struct hash_table {
+	size_t bits;
+	struct hlist_head buckets[];
+};
+
+#define DEFINE_STATIC_HASHTABLE(n, b)					\
+	static struct hash_table n = { .bits = (b),			\
+		.buckets = { [0 ... ((1 << (b)) - 1)] = HLIST_HEAD_INIT } }
+
+#define DEFINE_HASHTABLE(n, b)						\
+	union {								\
+		struct hash_table n;					\
+		struct {						\
+			size_t bits;					\
+			struct hlist_head buckets[1 << (b)];		\
+		} __##n ;						\
+	};
+
+#define HASH_BITS(name) ((name)->bits)
+#define HASH_SIZE(name) (1 << (HASH_BITS(name)))
+
+__attribute__ ((unused))
+static void hash_init(struct hash_table *ht, size_t bits)
+{
+	size_t i;
+
+	ht->bits = bits;
+	for (i = 0; i < (1 << bits); i++)
+		INIT_HLIST_HEAD(&ht->buckets[i]);
+}
+
+static void hash_add(struct hash_table *ht, struct hlist_node *node, long key)
+{
+	hlist_add_head(node,
+		&ht->buckets[hash_long((unsigned long)key, HASH_BITS(ht))]);
+}
+
+
+#define hash_get(name, key, type, member, cmp_fn)			\
+({									\
+	struct hlist_node *__node;					\
+	typeof(key) __key = key;					\
+	type *__obj = NULL;						\
+	hlist_for_each_entry(__obj, __node, &(name)->buckets[		\
+			hash_long((unsigned long) __key,		\
+			HASH_BITS(name))], member)			\
+		if (cmp_fn(__obj, __key))				\
+			break;						\
+	__obj;								\
+})
+
+__attribute__ ((unused))
+static void hash_del(struct hlist_node *node)
+{
+	hlist_del_init(node);
+}
+
+#define hash_for_each(bkt, node, name, obj, member)			\
+	for (bkt = 0; bkt < HASH_SIZE(name); bkt++)			\
+		hlist_for_each_entry(obj, node, &(name)->buckets[i], member)
+
+#define hash_for_each_possible(name, node, obj, member, key)		\
+	hlist_for_each_entry(obj, node,					\
+		&(name)->buckets[hash_long((unsigned long) key,		\
+			HASH_BITS(name))], member)
+
+#endif
-- 
1.7.8.6


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RFC v2 2/7] user_ns: use new hashtable implementation
  2012-08-03 14:23 [RFC v2 0/7] generic hashtable implementation Sasha Levin
  2012-08-03 14:23 ` [RFC v2 1/7] hashtable: introduce a small and naive hashtable Sasha Levin
@ 2012-08-03 14:23 ` Sasha Levin
  2012-08-05  0:58   ` Eric W. Biederman
  2012-08-03 14:23 ` [RFC v2 3/7] mm,ksm: " Sasha Levin
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 34+ messages in thread
From: Sasha Levin @ 2012-08-03 14:23 UTC (permalink / raw)
  To: torvalds
  Cc: tj, akpm, linux-kernel, linux-mm, paul.gortmaker, davem, rostedt,
	mingo, ebiederm, aarcange, ericvh, netdev, Sasha Levin

Switch user_ns to use the new hashtable implementation. This reduces the amount of
generic unrelated code in user_ns.

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 kernel/user.c |   53 ++++++++++++++++++-----------------------------------
 1 files changed, 18 insertions(+), 35 deletions(-)

diff --git a/kernel/user.c b/kernel/user.c
index b815fef..555c71a 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -16,6 +16,7 @@
 #include <linux/interrupt.h>
 #include <linux/export.h>
 #include <linux/user_namespace.h>
+#include <linux/hashtable.h>
 
 /*
  * userns count is 1 for root user, 1 for init_uts_ns,
@@ -50,15 +51,14 @@ EXPORT_SYMBOL_GPL(init_user_ns);
  * UID task count cache, to get fast user lookup in "alloc_uid"
  * when changing user ID's (ie setuid() and friends).
  */
-
-#define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
-#define UIDHASH_SZ	(1 << UIDHASH_BITS)
-#define UIDHASH_MASK		(UIDHASH_SZ - 1)
-#define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
-#define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))
+#define UIDHASH_BITS		(CONFIG_BASE_SMALL ? 3 : 7)
+#define UIDHASH_CMP(obj, key) 	((obj)->uid == (key))
+#define uidhash_entry(key)	(hash_get(&uidhash_table, key,		\
+				struct user_struct, uidhash_node,	\
+				UIDHASH_CMP))
 
 static struct kmem_cache *uid_cachep;
-struct hlist_head uidhash_table[UIDHASH_SZ];
+DEFINE_STATIC_HASHTABLE(uidhash_table, UIDHASH_BITS);
 
 /*
  * The uidhash_lock is mostly taken from process context, but it is
@@ -84,29 +84,14 @@ struct user_struct root_user = {
 /*
  * These routines must be called with the uidhash spinlock held!
  */
-static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
+static void uid_hash_insert(struct user_struct *up)
 {
-	hlist_add_head(&up->uidhash_node, hashent);
+	hash_add(&uidhash_table, &up->uidhash_node, up->uid);
 }
 
 static void uid_hash_remove(struct user_struct *up)
 {
-	hlist_del_init(&up->uidhash_node);
-}
-
-static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
-{
-	struct user_struct *user;
-	struct hlist_node *h;
-
-	hlist_for_each_entry(user, h, hashent, uidhash_node) {
-		if (uid_eq(user->uid, uid)) {
-			atomic_inc(&user->__count);
-			return user;
-		}
-	}
-
-	return NULL;
+	hash_del(&up->uidhash_node);
 }
 
 /* IRQs are disabled and uidhash_lock is held upon function entry.
@@ -135,7 +120,9 @@ struct user_struct *find_user(kuid_t uid)
 	unsigned long flags;
 
 	spin_lock_irqsave(&uidhash_lock, flags);
-	ret = uid_hash_find(uid, uidhashentry(uid));
+	ret = uidhash_entry(uid);
+	if (ret)
+		atomic_inc(&ret->__count);
 	spin_unlock_irqrestore(&uidhash_lock, flags);
 	return ret;
 }
@@ -156,11 +143,10 @@ void free_uid(struct user_struct *up)
 
 struct user_struct *alloc_uid(kuid_t uid)
 {
-	struct hlist_head *hashent = uidhashentry(uid);
 	struct user_struct *up, *new;
 
 	spin_lock_irq(&uidhash_lock);
-	up = uid_hash_find(uid, hashent);
+	up = uidhash_entry(uid);
 	spin_unlock_irq(&uidhash_lock);
 
 	if (!up) {
@@ -176,13 +162,13 @@ struct user_struct *alloc_uid(kuid_t uid)
 		 * on adding the same user already..
 		 */
 		spin_lock_irq(&uidhash_lock);
-		up = uid_hash_find(uid, hashent);
+		up = uidhash_entry(uid);
 		if (up) {
 			key_put(new->uid_keyring);
 			key_put(new->session_keyring);
 			kmem_cache_free(uid_cachep, new);
 		} else {
-			uid_hash_insert(new, hashent);
+			uid_hash_insert(new);
 			up = new;
 		}
 		spin_unlock_irq(&uidhash_lock);
@@ -196,17 +182,14 @@ out_unlock:
 
 static int __init uid_cache_init(void)
 {
-	int n;
-
 	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 
-	for(n = 0; n < UIDHASH_SZ; ++n)
-		INIT_HLIST_HEAD(uidhash_table + n);
+	hash_init(&uidhash_table, UIDHASH_BITS);
 
 	/* Insert the root user immediately (init already runs as root) */
 	spin_lock_irq(&uidhash_lock);
-	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
+	uid_hash_insert(&root_user);
 	spin_unlock_irq(&uidhash_lock);
 
 	return 0;
-- 
1.7.8.6


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RFC v2 3/7] mm,ksm: use new hashtable implementation
  2012-08-03 14:23 [RFC v2 0/7] generic hashtable implementation Sasha Levin
  2012-08-03 14:23 ` [RFC v2 1/7] hashtable: introduce a small and naive hashtable Sasha Levin
  2012-08-03 14:23 ` [RFC v2 2/7] user_ns: use new hashtable implementation Sasha Levin
@ 2012-08-03 14:23 ` Sasha Levin
  2012-08-03 14:23 ` [RFC v2 4/7] workqueue: " Sasha Levin
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 34+ messages in thread
From: Sasha Levin @ 2012-08-03 14:23 UTC (permalink / raw)
  To: torvalds
  Cc: tj, akpm, linux-kernel, linux-mm, paul.gortmaker, davem, rostedt,
	mingo, ebiederm, aarcange, ericvh, netdev, Sasha Levin

Switch ksm to use the new hashtable implementation. This reduces the amount of
generic unrelated code in the ksm module.

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 mm/ksm.c |   29 +++++++++--------------------
 1 files changed, 9 insertions(+), 20 deletions(-)

diff --git a/mm/ksm.c b/mm/ksm.c
index 47c8853..72d062a 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -33,7 +33,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/swap.h>
 #include <linux/ksm.h>
-#include <linux/hash.h>
+#include <linux/hashtable.h>
 #include <linux/freezer.h>
 #include <linux/oom.h>
 
@@ -157,8 +157,8 @@ static struct rb_root root_stable_tree = RB_ROOT;
 static struct rb_root root_unstable_tree = RB_ROOT;
 
 #define MM_SLOTS_HASH_SHIFT 10
-#define MM_SLOTS_HASH_HEADS (1 << MM_SLOTS_HASH_SHIFT)
-static struct hlist_head mm_slots_hash[MM_SLOTS_HASH_HEADS];
+#define mm_hash_cmp(slot, key) ((slot)->mm == (key))
+DEFINE_STATIC_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_SHIFT);
 
 static struct mm_slot ksm_mm_head = {
 	.mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
@@ -275,26 +275,15 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
 
 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
 {
-	struct mm_slot *mm_slot;
-	struct hlist_head *bucket;
-	struct hlist_node *node;
-
-	bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)];
-	hlist_for_each_entry(mm_slot, node, bucket, link) {
-		if (mm == mm_slot->mm)
-			return mm_slot;
-	}
-	return NULL;
+	return hash_get(&mm_slots_hash, mm, struct mm_slot,
+				link, mm_hash_cmp);
 }
 
 static void insert_to_mm_slots_hash(struct mm_struct *mm,
 				    struct mm_slot *mm_slot)
 {
-	struct hlist_head *bucket;
-
-	bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)];
 	mm_slot->mm = mm;
-	hlist_add_head(&mm_slot->link, bucket);
+	hash_add(&mm_slots_hash, &mm_slot->link, (long)mm);
 }
 
 static inline int in_stable_tree(struct rmap_item *rmap_item)
@@ -647,7 +636,7 @@ static int unmerge_and_remove_all_rmap_items(void)
 		ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
 						struct mm_slot, mm_list);
 		if (ksm_test_exit(mm)) {
-			hlist_del(&mm_slot->link);
+			hash_del(&mm_slot->link);
 			list_del(&mm_slot->mm_list);
 			spin_unlock(&ksm_mmlist_lock);
 
@@ -1385,7 +1374,7 @@ next_mm:
 		 * or when all VM_MERGEABLE areas have been unmapped (and
 		 * mmap_sem then protects against race with MADV_MERGEABLE).
 		 */
-		hlist_del(&slot->link);
+		hash_del(&slot->link);
 		list_del(&slot->mm_list);
 		spin_unlock(&ksm_mmlist_lock);
 
@@ -1548,7 +1537,7 @@ void __ksm_exit(struct mm_struct *mm)
 	mm_slot = get_mm_slot(mm);
 	if (mm_slot && ksm_scan.mm_slot != mm_slot) {
 		if (!mm_slot->rmap_list) {
-			hlist_del(&mm_slot->link);
+			hash_del(&mm_slot->link);
 			list_del(&mm_slot->mm_list);
 			easy_to_free = 1;
 		} else {
-- 
1.7.8.6


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RFC v2 4/7] workqueue: use new hashtable implementation
  2012-08-03 14:23 [RFC v2 0/7] generic hashtable implementation Sasha Levin
                   ` (2 preceding siblings ...)
  2012-08-03 14:23 ` [RFC v2 3/7] mm,ksm: " Sasha Levin
@ 2012-08-03 14:23 ` Sasha Levin
  2012-08-03 14:23 ` [RFC v2 5/7] mm/huge_memory: " Sasha Levin
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 34+ messages in thread
From: Sasha Levin @ 2012-08-03 14:23 UTC (permalink / raw)
  To: torvalds
  Cc: tj, akpm, linux-kernel, linux-mm, paul.gortmaker, davem, rostedt,
	mingo, ebiederm, aarcange, ericvh, netdev, Sasha Levin

Switch workqueues to use the new hashtable implementation. This reduces the amount of
generic unrelated code in the workqueues.

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 kernel/workqueue.c |   93 ++++++++--------------------------------------------
 1 files changed, 14 insertions(+), 79 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 692d976..480975d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,6 +41,7 @@
 #include <linux/debug_locks.h>
 #include <linux/lockdep.h>
 #include <linux/idr.h>
+#include <linux/hashtable.h>
 
 #include "workqueue_sched.h"
 
@@ -82,8 +83,6 @@ enum {
 	NR_WORKER_POOLS		= 2,		/* # worker pools per gcwq */
 
 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
-	BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER,
-	BUSY_WORKER_HASH_MASK	= BUSY_WORKER_HASH_SIZE - 1,
 
 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
@@ -102,6 +101,8 @@ enum {
 	HIGHPRI_NICE_LEVEL	= -20,
 };
 
+#define worker_hash_cmp(obj, key) ((obj)->current_work == (key))
+
 /*
  * Structure fields follow one of the following exclusion rules.
  *
@@ -180,7 +181,7 @@ struct global_cwq {
 	unsigned int		flags;		/* L: GCWQ_* flags */
 
 	/* workers are chained either in busy_hash or pool idle_list */
-	struct hlist_head	busy_hash[BUSY_WORKER_HASH_SIZE];
+	DEFINE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
 						/* L: hash of busy workers */
 
 	struct worker_pool	pools[2];	/* normal and highpri pools */
@@ -287,10 +288,6 @@ EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
 	for ((pool) = &(gcwq)->pools[0];				\
 	     (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
 
-#define for_each_busy_worker(worker, i, pos, gcwq)			\
-	for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)			\
-		hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
-
 static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
 				  unsigned int sw)
 {
@@ -822,70 +819,11 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 }
 
 /**
- * busy_worker_head - return the busy hash head for a work
- * @gcwq: gcwq of interest
- * @work: work to be hashed
- *
- * Return hash head of @gcwq for @work.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- *
- * RETURNS:
- * Pointer to the hash head.
- */
-static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
-					   struct work_struct *work)
-{
-	const int base_shift = ilog2(sizeof(struct work_struct));
-	unsigned long v = (unsigned long)work;
-
-	/* simple shift and fold hash, do we need something better? */
-	v >>= base_shift;
-	v += v >> BUSY_WORKER_HASH_ORDER;
-	v &= BUSY_WORKER_HASH_MASK;
-
-	return &gcwq->busy_hash[v];
-}
-
-/**
- * __find_worker_executing_work - find worker which is executing a work
- * @gcwq: gcwq of interest
- * @bwh: hash head as returned by busy_worker_head()
- * @work: work to find worker for
- *
- * Find a worker which is executing @work on @gcwq.  @bwh should be
- * the hash head obtained by calling busy_worker_head() with the same
- * work.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- *
- * RETURNS:
- * Pointer to worker which is executing @work if found, NULL
- * otherwise.
- */
-static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
-						   struct hlist_head *bwh,
-						   struct work_struct *work)
-{
-	struct worker *worker;
-	struct hlist_node *tmp;
-
-	hlist_for_each_entry(worker, tmp, bwh, hentry)
-		if (worker->current_work == work)
-			return worker;
-	return NULL;
-}
-
-/**
  * find_worker_executing_work - find worker which is executing a work
  * @gcwq: gcwq of interest
  * @work: work to find worker for
  *
- * Find a worker which is executing @work on @gcwq.  This function is
- * identical to __find_worker_executing_work() except that this
- * function calculates @bwh itself.
+ * Find a worker which is executing @work on @gcwq.
  *
  * CONTEXT:
  * spin_lock_irq(gcwq->lock).
@@ -897,8 +835,8 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
 static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
 						 struct work_struct *work)
 {
-	return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
-					    work);
+	return hash_get(&gcwq->busy_hash, work, struct worker,
+		hentry, worker_hash_cmp);
 }
 
 /**
@@ -959,7 +897,7 @@ static bool is_chained_work(struct workqueue_struct *wq)
 		int i;
 
 		spin_lock_irqsave(&gcwq->lock, flags);
-		for_each_busy_worker(worker, i, pos, gcwq) {
+		hash_for_each(i, pos, &gcwq->busy_hash, worker, hentry) {
 			if (worker->task != current)
 				continue;
 			spin_unlock_irqrestore(&gcwq->lock, flags);
@@ -1432,7 +1370,7 @@ retry:
 	wake_up_all(&gcwq->rebind_hold);
 
 	/* rebind busy workers */
-	for_each_busy_worker(worker, i, pos, gcwq) {
+	hash_for_each(i, pos, &gcwq->busy_hash, worker, hentry) {
 		struct work_struct *rebind_work = &worker->rebind_work;
 
 		/* morph UNBOUND to REBIND */
@@ -1932,7 +1870,6 @@ __acquires(&gcwq->lock)
 	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
 	struct worker_pool *pool = worker->pool;
 	struct global_cwq *gcwq = pool->gcwq;
-	struct hlist_head *bwh = busy_worker_head(gcwq, work);
 	bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
 	work_func_t f = work->func;
 	int work_color;
@@ -1964,7 +1901,7 @@ __acquires(&gcwq->lock)
 	 * already processing the work.  If so, defer the work to the
 	 * currently executing one.
 	 */
-	collision = __find_worker_executing_work(gcwq, bwh, work);
+	collision = find_worker_executing_work(gcwq, work);
 	if (unlikely(collision)) {
 		move_linked_works(work, &collision->scheduled, NULL);
 		return;
@@ -1972,7 +1909,7 @@ __acquires(&gcwq->lock)
 
 	/* claim and process */
 	debug_work_deactivate(work);
-	hlist_add_head(&worker->hentry, bwh);
+	hash_add(&gcwq->busy_hash, &worker->hentry, (long)work);
 	worker->current_work = work;
 	worker->current_cwq = cwq;
 	work_color = get_work_color(work);
@@ -2027,7 +1964,7 @@ __acquires(&gcwq->lock)
 		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
 
 	/* we're done with it, release */
-	hlist_del_init(&worker->hentry);
+	hash_del(&worker->hentry);
 	worker->current_work = NULL;
 	worker->current_cwq = NULL;
 	cwq_dec_nr_in_flight(cwq, work_color, false);
@@ -3405,7 +3342,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
 		list_for_each_entry(worker, &pool->idle_list, entry)
 			worker->flags |= WORKER_UNBOUND;
 
-	for_each_busy_worker(worker, i, pos, gcwq)
+	hash_for_each(i, pos, &gcwq->busy_hash, worker, hentry)
 		worker->flags |= WORKER_UNBOUND;
 
 	gcwq->flags |= GCWQ_DISASSOCIATED;
@@ -3690,7 +3627,6 @@ out_unlock:
 static int __init init_workqueues(void)
 {
 	unsigned int cpu;
-	int i;
 
 	cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
 	cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
@@ -3704,8 +3640,7 @@ static int __init init_workqueues(void)
 		gcwq->cpu = cpu;
 		gcwq->flags |= GCWQ_DISASSOCIATED;
 
-		for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
-			INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
+		hash_init(&gcwq->busy_hash, BUSY_WORKER_HASH_ORDER);
 
 		for_each_worker_pool(pool, gcwq) {
 			pool->gcwq = gcwq;
-- 
1.7.8.6


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RFC v2 5/7] mm/huge_memory: use new hashtable implementation
  2012-08-03 14:23 [RFC v2 0/7] generic hashtable implementation Sasha Levin
                   ` (3 preceding siblings ...)
  2012-08-03 14:23 ` [RFC v2 4/7] workqueue: " Sasha Levin
@ 2012-08-03 14:23 ` Sasha Levin
  2012-08-03 14:23 ` [RFC v2 6/7] tracepoint: " Sasha Levin
  2012-08-03 14:23 ` [RFC v2 7/7] net,9p: " Sasha Levin
  6 siblings, 0 replies; 34+ messages in thread
From: Sasha Levin @ 2012-08-03 14:23 UTC (permalink / raw)
  To: torvalds
  Cc: tj, akpm, linux-kernel, linux-mm, paul.gortmaker, davem, rostedt,
	mingo, ebiederm, aarcange, ericvh, netdev, Sasha Levin

Switch hugemem to use the new hashtable implementation. This reduces the amount of
generic unrelated code in the hugemem.

This also removes the dymanic allocation of the hash table. The size of the table is
constant so there's no point in paying the price of an extra dereference when accessing
it.

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 mm/huge_memory.c |   56 ++++++++++-------------------------------------------
 1 files changed, 11 insertions(+), 45 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 57c4b93..7b2cad5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -17,6 +17,7 @@
 #include <linux/khugepaged.h>
 #include <linux/freezer.h>
 #include <linux/mman.h>
+#include <linux/hashtable.h>
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
 #include "internal.h"
@@ -57,12 +58,13 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
 
 static int khugepaged(void *none);
-static int mm_slots_hash_init(void);
 static int khugepaged_slab_init(void);
 static void khugepaged_slab_free(void);
 
-#define MM_SLOTS_HASH_HEADS 1024
-static struct hlist_head *mm_slots_hash __read_mostly;
+#define MM_SLOTS_HASH_BITS 10
+#define MM_SLOTS_HASH_CMP(mm_slot, obj) ((mm_slot)->mm == (obj))
+DEFINE_STATIC_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
+
 static struct kmem_cache *mm_slot_cache __read_mostly;
 
 /**
@@ -140,7 +142,7 @@ static int start_khugepaged(void)
 	int err = 0;
 	if (khugepaged_enabled()) {
 		int wakeup;
-		if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
+		if (unlikely(!mm_slot_cache)) {
 			err = -ENOMEM;
 			goto out;
 		}
@@ -554,12 +556,6 @@ static int __init hugepage_init(void)
 	if (err)
 		goto out;
 
-	err = mm_slots_hash_init();
-	if (err) {
-		khugepaged_slab_free();
-		goto out;
-	}
-
 	/*
 	 * By default disable transparent hugepages on smaller systems,
 	 * where the extra memory used could hurt more than TLB overhead
@@ -1562,47 +1558,17 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
 	kmem_cache_free(mm_slot_cache, mm_slot);
 }
 
-static int __init mm_slots_hash_init(void)
-{
-	mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
-				GFP_KERNEL);
-	if (!mm_slots_hash)
-		return -ENOMEM;
-	return 0;
-}
-
-#if 0
-static void __init mm_slots_hash_free(void)
-{
-	kfree(mm_slots_hash);
-	mm_slots_hash = NULL;
-}
-#endif
-
 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
 {
-	struct mm_slot *mm_slot;
-	struct hlist_head *bucket;
-	struct hlist_node *node;
-
-	bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
-				% MM_SLOTS_HASH_HEADS];
-	hlist_for_each_entry(mm_slot, node, bucket, hash) {
-		if (mm == mm_slot->mm)
-			return mm_slot;
-	}
-	return NULL;
+	return hash_get(&mm_slots_hash, mm, struct mm_slot,
+		hash, MM_SLOTS_HASH_CMP); 
 }
 
 static void insert_to_mm_slots_hash(struct mm_struct *mm,
 				    struct mm_slot *mm_slot)
 {
-	struct hlist_head *bucket;
-
-	bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
-				% MM_SLOTS_HASH_HEADS];
 	mm_slot->mm = mm;
-	hlist_add_head(&mm_slot->hash, bucket);
+	hash_add(&mm_slots_hash, &mm_slot->hash, (long)mm);
 }
 
 static inline int khugepaged_test_exit(struct mm_struct *mm)
@@ -1675,7 +1641,7 @@ void __khugepaged_exit(struct mm_struct *mm)
 	spin_lock(&khugepaged_mm_lock);
 	mm_slot = get_mm_slot(mm);
 	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
-		hlist_del(&mm_slot->hash);
+		hash_del(&mm_slot->hash);
 		list_del(&mm_slot->mm_node);
 		free = 1;
 	}
@@ -2089,7 +2055,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
 
 	if (khugepaged_test_exit(mm)) {
 		/* free mm_slot */
-		hlist_del(&mm_slot->hash);
+		hash_del(&mm_slot->hash);
 		list_del(&mm_slot->mm_node);
 
 		/*
-- 
1.7.8.6


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RFC v2 6/7] tracepoint: use new hashtable implementation
  2012-08-03 14:23 [RFC v2 0/7] generic hashtable implementation Sasha Levin
                   ` (4 preceding siblings ...)
  2012-08-03 14:23 ` [RFC v2 5/7] mm/huge_memory: " Sasha Levin
@ 2012-08-03 14:23 ` Sasha Levin
  2012-08-05  0:36   ` Steven Rostedt
  2012-08-03 14:23 ` [RFC v2 7/7] net,9p: " Sasha Levin
  6 siblings, 1 reply; 34+ messages in thread
From: Sasha Levin @ 2012-08-03 14:23 UTC (permalink / raw)
  To: torvalds
  Cc: tj, akpm, linux-kernel, linux-mm, paul.gortmaker, davem, rostedt,
	mingo, ebiederm, aarcange, ericvh, netdev, Sasha Levin

Switch tracepoints to use the new hashtable implementation. This reduces the amount of
generic unrelated code in the tracepoints.

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 kernel/tracepoint.c |   26 +++++++++-----------------
 1 files changed, 9 insertions(+), 17 deletions(-)

diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index d96ba22..b5a2650 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -26,6 +26,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/static_key.h>
+#include <linux/hashtable.h>
 
 extern struct tracepoint * const __start___tracepoints_ptrs[];
 extern struct tracepoint * const __stop___tracepoints_ptrs[];
@@ -49,8 +50,7 @@ static LIST_HEAD(tracepoint_module_list);
  * Protected by tracepoints_mutex.
  */
 #define TRACEPOINT_HASH_BITS 6
-#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
-static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
+DEFINE_STATIC_HASHTABLE(tracepoint_table, TRACEPOINT_HASH_BITS);
 
 /*
  * Note about RCU :
@@ -191,16 +191,14 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
  */
 static struct tracepoint_entry *get_tracepoint(const char *name)
 {
-	struct hlist_head *head;
 	struct hlist_node *node;
 	struct tracepoint_entry *e;
 	u32 hash = jhash(name, strlen(name), 0);
 
-	head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
-	hlist_for_each_entry(e, node, head, hlist) {
+	hash_for_each_possible(&tracepoint_table, node, e, hlist, hash)
 		if (!strcmp(name, e->name))
 			return e;
-	}
+
 	return NULL;
 }
 
@@ -210,19 +208,13 @@ static struct tracepoint_entry *get_tracepoint(const char *name)
  */
 static struct tracepoint_entry *add_tracepoint(const char *name)
 {
-	struct hlist_head *head;
-	struct hlist_node *node;
 	struct tracepoint_entry *e;
 	size_t name_len = strlen(name) + 1;
 	u32 hash = jhash(name, name_len-1, 0);
 
-	head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
-	hlist_for_each_entry(e, node, head, hlist) {
-		if (!strcmp(name, e->name)) {
-			printk(KERN_NOTICE
-				"tracepoint %s busy\n", name);
-			return ERR_PTR(-EEXIST);	/* Already there */
-		}
+	if (get_tracepoint(name)) {
+		printk(KERN_NOTICE "tracepoint %s busy\n", name);
+		return ERR_PTR(-EEXIST);	/* Already there */
 	}
 	/*
 	 * Using kmalloc here to allocate a variable length element. Could
@@ -234,7 +226,7 @@ static struct tracepoint_entry *add_tracepoint(const char *name)
 	memcpy(&e->name[0], name, name_len);
 	e->funcs = NULL;
 	e->refcount = 0;
-	hlist_add_head(&e->hlist, head);
+	hash_add(&tracepoint_table, &e->hlist, hash);
 	return e;
 }
 
@@ -244,7 +236,7 @@ static struct tracepoint_entry *add_tracepoint(const char *name)
  */
 static inline void remove_tracepoint(struct tracepoint_entry *e)
 {
-	hlist_del(&e->hlist);
+	hash_del(&e->hlist);
 	kfree(e);
 }
 
-- 
1.7.8.6


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [RFC v2 7/7] net,9p: use new hashtable implementation
  2012-08-03 14:23 [RFC v2 0/7] generic hashtable implementation Sasha Levin
                   ` (5 preceding siblings ...)
  2012-08-03 14:23 ` [RFC v2 6/7] tracepoint: " Sasha Levin
@ 2012-08-03 14:23 ` Sasha Levin
  2012-08-03 18:00   ` Eric Dumazet
  6 siblings, 1 reply; 34+ messages in thread
From: Sasha Levin @ 2012-08-03 14:23 UTC (permalink / raw)
  To: torvalds
  Cc: tj, akpm, linux-kernel, linux-mm, paul.gortmaker, davem, rostedt,
	mingo, ebiederm, aarcange, ericvh, netdev, Sasha Levin

Switch 9p error table to use the new hashtable implementation. This reduces the amount of
generic unrelated code in 9p.

Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
 net/9p/error.c |   17 ++++++++---------
 1 files changed, 8 insertions(+), 9 deletions(-)

diff --git a/net/9p/error.c b/net/9p/error.c
index 2ab2de7..f1037db 100644
--- a/net/9p/error.c
+++ b/net/9p/error.c
@@ -34,7 +34,7 @@
 #include <linux/jhash.h>
 #include <linux/errno.h>
 #include <net/9p/9p.h>
-
+#include <linux/hashtable.h>
 /**
  * struct errormap - map string errors from Plan 9 to Linux numeric ids
  * @name: string sent over 9P
@@ -50,8 +50,8 @@ struct errormap {
 	struct hlist_node list;
 };
 
-#define ERRHASHSZ		32
-static struct hlist_head hash_errmap[ERRHASHSZ];
+#define ERRHASHSZ 5
+DEFINE_STATIC_HASHTABLE(hash_errmap, ERRHASHSZ);
 
 /* FixMe - reduce to a reasonable size */
 static struct errormap errmap[] = {
@@ -196,15 +196,14 @@ int p9_error_init(void)
 	int bucket;
 
 	/* initialize hash table */
-	for (bucket = 0; bucket < ERRHASHSZ; bucket++)
-		INIT_HLIST_HEAD(&hash_errmap[bucket]);
+	hash_init(&hash_errmap, ERRHASHSZ);
 
 	/* load initial error map into hash table */
 	for (c = errmap; c->name != NULL; c++) {
 		c->namelen = strlen(c->name);
-		bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ;
+		bucket = jhash(c->name, c->namelen, 0);
 		INIT_HLIST_NODE(&c->list);
-		hlist_add_head(&c->list, &hash_errmap[bucket]);
+		hash_add(&hash_errmap, &c->list, bucket);
 	}
 
 	return 1;
@@ -228,8 +227,8 @@ int p9_errstr2errno(char *errstr, int len)
 	errno = 0;
 	p = NULL;
 	c = NULL;
-	bucket = jhash(errstr, len, 0) % ERRHASHSZ;
-	hlist_for_each_entry(c, p, &hash_errmap[bucket], list) {
+	bucket = jhash(errstr, len, 0);
+	hash_for_each_possible(&hash_errmap, p, c, list, bucket) {
 		if (c->namelen == len && !memcmp(c->name, errstr, len)) {
 			errno = c->val;
 			break;
-- 
1.7.8.6


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 14:23 ` [RFC v2 1/7] hashtable: introduce a small and naive hashtable Sasha Levin
@ 2012-08-03 17:15   ` Tejun Heo
  2012-08-03 17:16     ` Tejun Heo
  2012-08-03 21:19     ` Sasha Levin
  2012-08-03 17:39   ` Eric Dumazet
  1 sibling, 2 replies; 34+ messages in thread
From: Tejun Heo @ 2012-08-03 17:15 UTC (permalink / raw)
  To: Sasha Levin
  Cc: torvalds, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

Hello, Sasha.

On Fri, Aug 03, 2012 at 04:23:02PM +0200, Sasha Levin wrote:
> +#define DEFINE_STATIC_HASHTABLE(n, b)					\
> +	static struct hash_table n = { .bits = (b),			\
> +		.buckets = { [0 ... ((1 << (b)) - 1)] = HLIST_HEAD_INIT } }

What does this "static" mean?

> +#define DEFINE_HASHTABLE(n, b)						\
> +	union {								\
> +		struct hash_table n;					\
> +		struct {						\
> +			size_t bits;					\
> +			struct hlist_head buckets[1 << (b)];		\
> +		} __##n ;						\
> +	};

Is this supposed to be embedded in struct definition?  If so, the name
is rather misleading as DEFINE_* is supposed to define and initialize
stand-alone constructs.  Also, for struct members, simply putting hash
entries after struct hash_table should work.

Wouldn't using DEFINE_HASHTABLE() for the first macro and
DEFINE_HASHTABLE_MEMBER() for the latter be better?

> +#define HASH_BITS(name) ((name)->bits)
> +#define HASH_SIZE(name) (1 << (HASH_BITS(name)))
> +
> +__attribute__ ((unused))

Are we using __attribute__((unused)) for functions defined in headers
instead of static inline now?  If so, why? 

> +static void hash_init(struct hash_table *ht, size_t bits)
> +{
> +	size_t i;

I would prefer int here but no biggie.

> +	ht->bits = bits;
> +	for (i = 0; i < (1 << bits); i++)
> +		INIT_HLIST_HEAD(&ht->buckets[i]);
> +}
> +
> +static void hash_add(struct hash_table *ht, struct hlist_node *node, long key)
> +{
> +	hlist_add_head(node,
> +		&ht->buckets[hash_long((unsigned long)key, HASH_BITS(ht))]);
> +}
> +
> +
> +#define hash_get(name, key, type, member, cmp_fn)			\
> +({									\
> +	struct hlist_node *__node;					\
> +	typeof(key) __key = key;					\
> +	type *__obj = NULL;						\
> +	hlist_for_each_entry(__obj, __node, &(name)->buckets[		\
> +			hash_long((unsigned long) __key,		\
> +			HASH_BITS(name))], member)			\
> +		if (cmp_fn(__obj, __key))				\
> +			break;						\
> +	__obj;								\
> +})

As opposed to using hash_for_each_possible(), how much difference does
this make?  Is it really worthwhile?

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 17:15   ` Tejun Heo
@ 2012-08-03 17:16     ` Tejun Heo
  2012-08-03 21:19     ` Sasha Levin
  1 sibling, 0 replies; 34+ messages in thread
From: Tejun Heo @ 2012-08-03 17:16 UTC (permalink / raw)
  To: Sasha Levin
  Cc: torvalds, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

Ooh, one more thing.  Comments please.

-- 
tejun

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 14:23 ` [RFC v2 1/7] hashtable: introduce a small and naive hashtable Sasha Levin
  2012-08-03 17:15   ` Tejun Heo
@ 2012-08-03 17:39   ` Eric Dumazet
  1 sibling, 0 replies; 34+ messages in thread
From: Eric Dumazet @ 2012-08-03 17:39 UTC (permalink / raw)
  To: Sasha Levin
  Cc: torvalds, tj, akpm, linux-kernel, linux-mm, paul.gortmaker,
	davem, rostedt, mingo, ebiederm, aarcange, ericvh, netdev

On Fri, 2012-08-03 at 16:23 +0200, Sasha Levin wrote:
> This hashtable implementation is using hlist buckets to provide a simple
> hashtable to prevent it from getting reimplemented all over the kernel.
> 

> +static void hash_add(struct hash_table *ht, struct hlist_node *node, long key)
> +{
> +	hlist_add_head(node,
> +		&ht->buckets[hash_long((unsigned long)key, HASH_BITS(ht))]);
> +}
> +

Why key is a long, casted later to "unsigned long" ?

hash_long() is expensive on 64bit arches, and not really needed
if key is an u32 from the beginning ( I am referring to your patches 6 &
7 using jhash()  )

Maybe you could use a macro, so that we can automatically select
hash_32() if key is an u32, and hash_long() for other types.




^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 7/7] net,9p: use new hashtable implementation
  2012-08-03 14:23 ` [RFC v2 7/7] net,9p: " Sasha Levin
@ 2012-08-03 18:00   ` Eric Dumazet
  2012-08-03 21:14     ` Sasha Levin
  0 siblings, 1 reply; 34+ messages in thread
From: Eric Dumazet @ 2012-08-03 18:00 UTC (permalink / raw)
  To: Sasha Levin
  Cc: torvalds, tj, akpm, linux-kernel, linux-mm, paul.gortmaker,
	davem, rostedt, mingo, ebiederm, aarcange, ericvh, netdev

On Fri, 2012-08-03 at 16:23 +0200, Sasha Levin wrote:
> Switch 9p error table to use the new hashtable implementation. This reduces the amount of
> generic unrelated code in 9p.
> 
> Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
> ---
>  net/9p/error.c |   17 ++++++++---------
>  1 files changed, 8 insertions(+), 9 deletions(-)
> 
> diff --git a/net/9p/error.c b/net/9p/error.c
> index 2ab2de7..f1037db 100644
> --- a/net/9p/error.c
> +++ b/net/9p/error.c
> @@ -34,7 +34,7 @@
>  #include <linux/jhash.h>
>  #include <linux/errno.h>
>  #include <net/9p/9p.h>
> -
> +#include <linux/hashtable.h>
>  /**
>   * struct errormap - map string errors from Plan 9 to Linux numeric ids
>   * @name: string sent over 9P
> @@ -50,8 +50,8 @@ struct errormap {
>  	struct hlist_node list;
>  };
>  
> -#define ERRHASHSZ		32
> -static struct hlist_head hash_errmap[ERRHASHSZ];


> +#define ERRHASHSZ 5

This name is confusing, it should mention SHIFT or BITS maybe...


> +DEFINE_STATIC_HASHTABLE(hash_errmap, ERRHASHSZ);
>  
>  /* FixMe - reduce to a reasonable size */
>  static struct errormap errmap[] = {
> @@ -196,15 +196,14 @@ int p9_error_init(void)
>  	int bucket;

remove "int bucket" and use :

	u32 hash;

>  
>  	/* initialize hash table */
> -	for (bucket = 0; bucket < ERRHASHSZ; bucket++)
> -		INIT_HLIST_HEAD(&hash_errmap[bucket]);
> +	hash_init(&hash_errmap, ERRHASHSZ);

Why is hash_init() even needed ?

If hash is "DEFINE_STATIC_HASHTABLE(...)", its already ready for use !

>  
>  	/* load initial error map into hash table */
>  	for (c = errmap; c->name != NULL; c++) {
>  		c->namelen = strlen(c->name);
> -		bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ;
> +		bucket = jhash(c->name, c->namelen, 0);

bucket is a wrong name here, its more like "key" or "hash"

>  		INIT_HLIST_NODE(&c->list);
> -		hlist_add_head(&c->list, &hash_errmap[bucket]);
> +		hash_add(&hash_errmap, &c->list, bucket);
>  	}
>  
>  	return 1;
> @@ -228,8 +227,8 @@ int p9_errstr2errno(char *errstr, int len)
>  	errno = 0;
>  	p = NULL;
>  	c = NULL;
> -	bucket = jhash(errstr, len, 0) % ERRHASHSZ;
> -	hlist_for_each_entry(c, p, &hash_errmap[bucket], list) {
> +	bucket = jhash(errstr, len, 0);

	hash = jhash(errstr, len, 0);

> +	hash_for_each_possible(&hash_errmap, p, c, list, bucket) {
>  		if (c->namelen == len && !memcmp(c->name, errstr, len)) {
>  			errno = c->val;
>  			break;



^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 7/7] net,9p: use new hashtable implementation
  2012-08-03 18:00   ` Eric Dumazet
@ 2012-08-03 21:14     ` Sasha Levin
  0 siblings, 0 replies; 34+ messages in thread
From: Sasha Levin @ 2012-08-03 21:14 UTC (permalink / raw)
  To: Eric Dumazet
  Cc: torvalds, tj, akpm, linux-kernel, linux-mm, paul.gortmaker,
	davem, rostedt, mingo, ebiederm, aarcange, ericvh, netdev

On 08/03/2012 08:00 PM, Eric Dumazet wrote:
> On Fri, 2012-08-03 at 16:23 +0200, Sasha Levin wrote:
>>  	/* initialize hash table */
>> -	for (bucket = 0; bucket < ERRHASHSZ; bucket++)
>> -		INIT_HLIST_HEAD(&hash_errmap[bucket]);
>> +	hash_init(&hash_errmap, ERRHASHSZ);
> 
> Why is hash_init() even needed ?
> 
> If hash is "DEFINE_STATIC_HASHTABLE(...)", its already ready for use !

Indeed it is.

I've removed it, and then decided to put it back since the definition of the hashtable isn't fully cooked yet, and I didn't want to miss this initialization point if it turn out we need to initialize that hashtable afterall.

I will remove it once the hashtable definitions are clear.

The rest of the review comments will be addressed.

Thanks!

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 17:15   ` Tejun Heo
  2012-08-03 17:16     ` Tejun Heo
@ 2012-08-03 21:19     ` Sasha Levin
  2012-08-03 21:30       ` Tejun Heo
  1 sibling, 1 reply; 34+ messages in thread
From: Sasha Levin @ 2012-08-03 21:19 UTC (permalink / raw)
  To: Tejun Heo
  Cc: torvalds, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

On 08/03/2012 07:15 PM, Tejun Heo wrote:
> Hello, Sasha.
> 
> On Fri, Aug 03, 2012 at 04:23:02PM +0200, Sasha Levin wrote:
>> +#define DEFINE_STATIC_HASHTABLE(n, b)					\
>> +	static struct hash_table n = { .bits = (b),			\
>> +		.buckets = { [0 ... ((1 << (b)) - 1)] = HLIST_HEAD_INIT } }
> 
> What does this "static" mean?
> 
>> +#define DEFINE_HASHTABLE(n, b)						\
>> +	union {								\
>> +		struct hash_table n;					\
>> +		struct {						\
>> +			size_t bits;					\
>> +			struct hlist_head buckets[1 << (b)];		\
>> +		} __##n ;						\
>> +	};
> 
> Is this supposed to be embedded in struct definition?  If so, the name
> is rather misleading as DEFINE_* is supposed to define and initialize
> stand-alone constructs.  Also, for struct members, simply putting hash
> entries after struct hash_table should work.

It would work, but I didn't want to just put them in the union since I feel it's safer to keep them in a separate struct so they won't be used by mistake,

> Wouldn't using DEFINE_HASHTABLE() for the first macro and
> DEFINE_HASHTABLE_MEMBER() for the latter be better?

Indeed that sounds better, will fix.

>> +#define HASH_BITS(name) ((name)->bits)
>> +#define HASH_SIZE(name) (1 << (HASH_BITS(name)))
>> +
>> +__attribute__ ((unused))
> 
> Are we using __attribute__((unused)) for functions defined in headers
> instead of static inline now?  If so, why? 
> 
>> +static void hash_init(struct hash_table *ht, size_t bits)
>> +{
>> +	size_t i;
> 
> I would prefer int here but no biggie.

Just wondering, is there a particular reason behind it?

>> +	ht->bits = bits;
>> +	for (i = 0; i < (1 << bits); i++)
>> +		INIT_HLIST_HEAD(&ht->buckets[i]);
>> +}
>> +
>> +static void hash_add(struct hash_table *ht, struct hlist_node *node, long key)
>> +{
>> +	hlist_add_head(node,
>> +		&ht->buckets[hash_long((unsigned long)key, HASH_BITS(ht))]);
>> +}
>> +
>> +
>> +#define hash_get(name, key, type, member, cmp_fn)			\
>> +({									\
>> +	struct hlist_node *__node;					\
>> +	typeof(key) __key = key;					\
>> +	type *__obj = NULL;						\
>> +	hlist_for_each_entry(__obj, __node, &(name)->buckets[		\
>> +			hash_long((unsigned long) __key,		\
>> +			HASH_BITS(name))], member)			\
>> +		if (cmp_fn(__obj, __key))				\
>> +			break;						\
>> +	__obj;								\
>> +})
> 
> As opposed to using hash_for_each_possible(), how much difference does
> this make?  Is it really worthwhile?

Most of the places I've switched to using this hashtable so far (4 out of 6) are using hash_get(). I think that the code looks cleaner when you an just provide a comparison function instead of implementing the iteration itself.

I think hash_for_for_each_possible() is useful if the comparison condition is more complex than a simple comparison of one of the object members with the key - there's no need to force it on all the users.

> 
> Thanks.
> 


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 21:19     ` Sasha Levin
@ 2012-08-03 21:30       ` Tejun Heo
  2012-08-03 21:36         ` Sasha Levin
  2012-08-03 21:41         ` Sasha Levin
  0 siblings, 2 replies; 34+ messages in thread
From: Tejun Heo @ 2012-08-03 21:30 UTC (permalink / raw)
  To: Sasha Levin
  Cc: torvalds, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

Hello,

On Fri, Aug 03, 2012 at 11:19:57PM +0200, Sasha Levin wrote:
> > Is this supposed to be embedded in struct definition?  If so, the name
> > is rather misleading as DEFINE_* is supposed to define and initialize
> > stand-alone constructs.  Also, for struct members, simply putting hash
> > entries after struct hash_table should work.
> 
> It would work, but I didn't want to just put them in the union since
> I feel it's safer to keep them in a separate struct so they won't be
> used by mistake,

Just use ugly enough pre/postfixes.  If the user still accesses that,
it's the user's fault.

> >> +static void hash_init(struct hash_table *ht, size_t bits)
> >> +{
> >> +	size_t i;
> > 
> > I would prefer int here but no biggie.
> 
> Just wondering, is there a particular reason behind it?

It isn't a size and using unsigned when signed suffices seems to cause
more headache than helps anything usually due to lack of values to use
for exceptional conditions (usually -errno or -1).

> > As opposed to using hash_for_each_possible(), how much difference does
> > this make?  Is it really worthwhile?
> 
> Most of the places I've switched to using this hashtable so far (4
> out of 6) are using hash_get(). I think that the code looks cleaner
> when you an just provide a comparison function instead of
> implementing the iteration itself.
>
> I think hash_for_for_each_possible() is useful if the comparison
> condition is more complex than a simple comparison of one of the
> object members with the key - there's no need to force it on all the
> users.

I don't know.  What's the difference?  In terms of LOC, it might even
not save any thanks to the extra function definition, right?  I don't
think it's saving enough complexity to justify a separate rather
unusual interface.

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 21:30       ` Tejun Heo
@ 2012-08-03 21:36         ` Sasha Levin
  2012-08-03 21:44           ` Tejun Heo
  2012-08-03 21:41         ` Sasha Levin
  1 sibling, 1 reply; 34+ messages in thread
From: Sasha Levin @ 2012-08-03 21:36 UTC (permalink / raw)
  To: Tejun Heo
  Cc: torvalds, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

On 08/03/2012 11:30 PM, Tejun Heo wrote:
>> I think hash_for_for_each_possible() is useful if the comparison
>> > condition is more complex than a simple comparison of one of the
>> > object members with the key - there's no need to force it on all the
>> > users.
> I don't know.  What's the difference?  In terms of LOC, it might even
> not save any thanks to the extra function definition, right?  I don't
> think it's saving enough complexity to justify a separate rather
> unusual interface.

The function definition itself is just a macro, for example:

	#define MM_SLOTS_HASH_CMP(mm_slot, obj) ((mm_slot)->mm == (obj))

As an alternative, what do you think about simplifying that to be just a 'cond' instead of a function? Something like:

	hash_get(&mm_slots_hash, mm, struct mm_slot, hash, mm);

In that case, the last param ("mm") will get unrolled to a condition like this:

	if ((obj)->mm == key)

Which will be simple and easy for the user.


The only reason I want to keep this interface is that most cases I've stumbled so far were easy short comparisons of a struct member with the key, and I don't want to make them more complex than they need to be. I probably will switch hash_get() to use hash_for_each_possible() as well, which will cut down on how hash_get() is a separate case.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 21:30       ` Tejun Heo
  2012-08-03 21:36         ` Sasha Levin
@ 2012-08-03 21:41         ` Sasha Levin
  2012-08-03 21:48           ` Tejun Heo
  1 sibling, 1 reply; 34+ messages in thread
From: Sasha Levin @ 2012-08-03 21:41 UTC (permalink / raw)
  To: Tejun Heo
  Cc: torvalds, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

On 08/03/2012 11:30 PM, Tejun Heo wrote:
> Hello,
> 
> On Fri, Aug 03, 2012 at 11:19:57PM +0200, Sasha Levin wrote:
>>> Is this supposed to be embedded in struct definition?  If so, the name
>>> is rather misleading as DEFINE_* is supposed to define and initialize
>>> stand-alone constructs.  Also, for struct members, simply putting hash
>>> entries after struct hash_table should work.
>>
>> It would work, but I didn't want to just put them in the union since
>> I feel it's safer to keep them in a separate struct so they won't be
>> used by mistake,
> 
> Just use ugly enough pre/postfixes.  If the user still accesses that,
> it's the user's fault.

I forgot to comment on that one, sorry.

If we put hash entries after struct hash_table we don't take the bits field size into account, or did I miss something?

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 21:36         ` Sasha Levin
@ 2012-08-03 21:44           ` Tejun Heo
  0 siblings, 0 replies; 34+ messages in thread
From: Tejun Heo @ 2012-08-03 21:44 UTC (permalink / raw)
  To: Sasha Levin
  Cc: torvalds, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

Hello, Sasha.

On Fri, Aug 03, 2012 at 11:36:49PM +0200, Sasha Levin wrote:
> On 08/03/2012 11:30 PM, Tejun Heo wrote:
> The function definition itself is just a macro, for example:
> 
> 	#define MM_SLOTS_HASH_CMP(mm_slot, obj) ((mm_slot)->mm == (obj))

It seems like it would make things more difficult to follow and
error-prone.  I'd definitely prefer just using functions.

> As an alternative, what do you think about simplifying that to be
> just a 'cond' instead of a function? Something like:
> 
> 	hash_get(&mm_slots_hash, mm, struct mm_slot, hash, mm);
> 
> In that case, the last param ("mm") will get unrolled to a condition like this:
> 
> 	if ((obj)->mm == key)
> 
> Which will be simple and easy for the user.

It seems a bit too magical(tm) to me. ;)

> The only reason I want to keep this interface is that most cases
> I've stumbled so far were easy short comparisons of a struct member
> with the key, and I don't want to make them more complex than they
> need to be. I probably will switch hash_get() to use
> hash_for_each_possible() as well, which will cut down on how
> hash_get() is a separate case.

I can understand that but I think the benefit we're talking about is a
bit too miniscule to matter and to have two different interfaces.
What do others think?

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 21:41         ` Sasha Levin
@ 2012-08-03 21:48           ` Tejun Heo
  2012-08-03 22:20             ` Sasha Levin
  0 siblings, 1 reply; 34+ messages in thread
From: Tejun Heo @ 2012-08-03 21:48 UTC (permalink / raw)
  To: Sasha Levin
  Cc: torvalds, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

Hello,

On Fri, Aug 03, 2012 at 11:41:34PM +0200, Sasha Levin wrote:
> I forgot to comment on that one, sorry.
> 
> If we put hash entries after struct hash_table we don't take the
> bits field size into account, or did I miss something?

So, if you do the following,

	struct {
		struct {
			int i;
			long ar[];
		} B;
		long __ar_storage[32];
	} A;

It should always be safe to dereference A.B.ar[31].  I'm not sure
whether this is something guaranteed by C tho.  Maybe compilers are
allowed to put members in reverse order but I think we already depend
on the above.

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 21:48           ` Tejun Heo
@ 2012-08-03 22:20             ` Sasha Levin
  2012-08-03 22:23               ` Tejun Heo
  0 siblings, 1 reply; 34+ messages in thread
From: Sasha Levin @ 2012-08-03 22:20 UTC (permalink / raw)
  To: Tejun Heo
  Cc: torvalds, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

On 08/03/2012 11:48 PM, Tejun Heo wrote:
> Hello,
> 
> On Fri, Aug 03, 2012 at 11:41:34PM +0200, Sasha Levin wrote:
>> I forgot to comment on that one, sorry.
>>
>> If we put hash entries after struct hash_table we don't take the
>> bits field size into account, or did I miss something?
> 
> So, if you do the following,
> 
> 	struct {
> 		struct {
> 			int i;
> 			long ar[];
> 		} B;
> 		long __ar_storage[32];
> 	} A;

struct A should have been an union, right?

> It should always be safe to dereference A.B.ar[31].  I'm not sure
> whether this is something guaranteed by C tho.  Maybe compilers are
> allowed to put members in reverse order but I think we already depend
> on the above.

why is accessing A.B.ar[31] safe?

__ar_storage is only 32*sizeof(long) bytes long, while struct B would need to be 32*sizeof(long) + sizeof(int) bytes long so that A.B.ar[31] access would be safe.


> Thanks.
> 


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 22:20             ` Sasha Levin
@ 2012-08-03 22:23               ` Tejun Heo
  2012-08-03 22:26                 ` Sasha Levin
  2012-08-03 22:29                 ` Linus Torvalds
  0 siblings, 2 replies; 34+ messages in thread
From: Tejun Heo @ 2012-08-03 22:23 UTC (permalink / raw)
  To: Sasha Levin
  Cc: torvalds, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

Hello,

On Sat, Aug 04, 2012 at 12:20:02AM +0200, Sasha Levin wrote:
> On 08/03/2012 11:48 PM, Tejun Heo wrote:
> > On Fri, Aug 03, 2012 at 11:41:34PM +0200, Sasha Levin wrote:
> >> I forgot to comment on that one, sorry.
> >>
> >> If we put hash entries after struct hash_table we don't take the
> >> bits field size into account, or did I miss something?
> > 
> > So, if you do the following,
> > 
> > 	struct {
> > 		struct {
> > 			int i;
> > 			long ar[];
> > 		} B;
> > 		long __ar_storage[32];
> > 	} A;
> 
> struct A should have been an union, right?

I actually meant an enclosing struct.  When you're defining a struct
member, simply putting the storage after a struct with var array
should be good enough.  If that doesn't work, quite a few things in
the kernel will break.

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 22:23               ` Tejun Heo
@ 2012-08-03 22:26                 ` Sasha Levin
  2012-08-03 22:29                 ` Linus Torvalds
  1 sibling, 0 replies; 34+ messages in thread
From: Sasha Levin @ 2012-08-03 22:26 UTC (permalink / raw)
  To: Tejun Heo
  Cc: torvalds, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

On 08/04/2012 12:23 AM, Tejun Heo wrote:
> Hello,
> 
> On Sat, Aug 04, 2012 at 12:20:02AM +0200, Sasha Levin wrote:
>> On 08/03/2012 11:48 PM, Tejun Heo wrote:
>>> On Fri, Aug 03, 2012 at 11:41:34PM +0200, Sasha Levin wrote:
>>>> I forgot to comment on that one, sorry.
>>>>
>>>> If we put hash entries after struct hash_table we don't take the
>>>> bits field size into account, or did I miss something?
>>>
>>> So, if you do the following,
>>>
>>> 	struct {
>>> 		struct {
>>> 			int i;
>>> 			long ar[];
>>> 		} B;
>>> 		long __ar_storage[32];
>>> 	} A;
>>
>> struct A should have been an union, right?
> 
> I actually meant an enclosing struct.  When you're defining a struct
> member, simply putting the storage after a struct with var array
> should be good enough.  If that doesn't work, quite a few things in
> the kernel will break.

Ah, I see, I've missed that part.

Thanks!

> Thanks.
> 


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 22:23               ` Tejun Heo
  2012-08-03 22:26                 ` Sasha Levin
@ 2012-08-03 22:29                 ` Linus Torvalds
  2012-08-03 22:36                   ` Tejun Heo
  1 sibling, 1 reply; 34+ messages in thread
From: Linus Torvalds @ 2012-08-03 22:29 UTC (permalink / raw)
  To: Tejun Heo
  Cc: Sasha Levin, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

On Fri, Aug 3, 2012 at 3:23 PM, Tejun Heo <tj@kernel.org> wrote:
>
> I actually meant an enclosing struct.  When you're defining a struct
> member, simply putting the storage after a struct with var array
> should be good enough.  If that doesn't work, quite a few things in
> the kernel will break.

The unsigned member of a struct has to be the last one, so your struct
won't work.

            Linus

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 22:29                 ` Linus Torvalds
@ 2012-08-03 22:36                   ` Tejun Heo
  2012-08-03 23:47                     ` Linus Torvalds
  0 siblings, 1 reply; 34+ messages in thread
From: Tejun Heo @ 2012-08-03 22:36 UTC (permalink / raw)
  To: Linus Torvalds
  Cc: Sasha Levin, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

On Fri, Aug 03, 2012 at 03:29:10PM -0700, Linus Torvalds wrote:
> On Fri, Aug 3, 2012 at 3:23 PM, Tejun Heo <tj@kernel.org> wrote:
> >
> > I actually meant an enclosing struct.  When you're defining a struct
> > member, simply putting the storage after a struct with var array
> > should be good enough.  If that doesn't work, quite a few things in
> > the kernel will break.
> 
> The unsigned member of a struct has to be the last one, so your struct
> won't work.

I suppose you mean unsized.  I remember this working.  Maybe I'm
confusing it with zero-sized array.  Hmm... gcc doesn't complain about
the following.  --std=c99 seems happy too.

  #include <stdio.h>

  struct A {
	  int i;
	  long ar[];
  };

  struct B {
	  struct A a;
	  long ar_storage[32];
  };

  int main(void)
  {
	  printf("sizeof(A)=%zd sizeof(B)=%zd\n", sizeof(struct A), sizeof(struct B));
	  return 0;
  }

$ ./a.out
sizeof(A)=8 sizeof(B)=264

-- 
tejun

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 22:36                   ` Tejun Heo
@ 2012-08-03 23:47                     ` Linus Torvalds
  2012-08-04  0:03                       ` Sasha Levin
  2012-08-04  0:05                       ` Tejun Heo
  0 siblings, 2 replies; 34+ messages in thread
From: Linus Torvalds @ 2012-08-03 23:47 UTC (permalink / raw)
  To: Tejun Heo
  Cc: Sasha Levin, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

On Fri, Aug 3, 2012 at 3:36 PM, Tejun Heo <tj@kernel.org> wrote:
>
> I suppose you mean unsized.  I remember this working.  Maybe I'm
> confusing it with zero-sized array.  Hmm... gcc doesn't complain about
> the following.  --std=c99 seems happy too.

Ok, I'm surprised, but maybe it's supposed to work if you do it inside
another struct like that, exactly so that you can preallocate things..

Or maybe it's just a gcc bug. I do think this all is way hackier than
Sasha's original simple code that didn't need these kinds of games,
and didn't need a size member at all.

I really think all the extra complexity and overhead is just *bad*.
The first simple version was much nicer and likely generated better
code too.

               Linus

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 23:47                     ` Linus Torvalds
@ 2012-08-04  0:03                       ` Sasha Levin
  2012-08-04  0:05                         ` Linus Torvalds
  2012-08-04  0:05                       ` Tejun Heo
  1 sibling, 1 reply; 34+ messages in thread
From: Sasha Levin @ 2012-08-04  0:03 UTC (permalink / raw)
  To: Linus Torvalds
  Cc: Tejun Heo, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

Hi Linus,

On 08/04/2012 01:47 AM, Linus Torvalds wrote:
> Or maybe it's just a gcc bug. I do think this all is way hackier than
> Sasha's original simple code that didn't need these kinds of games,
> and didn't need a size member at all.
> 
> I really think all the extra complexity and overhead is just *bad*.
> The first simple version was much nicer and likely generated better
> code too.

The problem with that code was that it doesn't work with dynamically allocated hashtables, or hashtables that grow/shrink.

The alternative to going down this path, is going back to the old code and saying that it only works for the simple case, and if you're interested in something more complex it should have it's own different implementation.

Does it make sense? We'll keep the simple & common case simple, and let anyone who needs something more complex than this write it as an extension?

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-03 23:47                     ` Linus Torvalds
  2012-08-04  0:03                       ` Sasha Levin
@ 2012-08-04  0:05                       ` Tejun Heo
  1 sibling, 0 replies; 34+ messages in thread
From: Tejun Heo @ 2012-08-04  0:05 UTC (permalink / raw)
  To: Linus Torvalds
  Cc: Sasha Levin, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

Hello,

On Fri, Aug 03, 2012 at 04:47:47PM -0700, Linus Torvalds wrote:
> On Fri, Aug 3, 2012 at 3:36 PM, Tejun Heo <tj@kernel.org> wrote:
> >
> > I suppose you mean unsized.  I remember this working.  Maybe I'm
> > confusing it with zero-sized array.  Hmm... gcc doesn't complain about
> > the following.  --std=c99 seems happy too.
> 
> Ok, I'm surprised, but maybe it's supposed to work if you do it inside
> another struct like that, exactly so that you can preallocate things..

Yeah, I think the rule is var array should be the last member of any
given struct definition.  Once a struct is defined, its alignment and
size are fixed and it behaves like any other struct.

> Or maybe it's just a gcc bug. I do think this all is way hackier than
> Sasha's original simple code that didn't need these kinds of games,
> and didn't need a size member at all.
> 
> I really think all the extra complexity and overhead is just *bad*.
> The first simple version was much nicer and likely generated better
> code too.

The size member could have performance impact in extreme cases.  If
we're looking for something simple & fast, maybe just pass in @size as
argument and be done with it?

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-04  0:03                       ` Sasha Levin
@ 2012-08-04  0:05                         ` Linus Torvalds
  2012-08-04  0:33                           ` Sasha Levin
  0 siblings, 1 reply; 34+ messages in thread
From: Linus Torvalds @ 2012-08-04  0:05 UTC (permalink / raw)
  To: Sasha Levin
  Cc: Tejun Heo, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

On Fri, Aug 3, 2012 at 5:03 PM, Sasha Levin <levinsasha928@gmail.com> wrote:
>
> The problem with that code was that it doesn't work with dynamically allocated hashtables, or hashtables that grow/shrink.

Sure. But once you have that kind of complexity, why would you care
about the trivial cases?

             Linus

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 1/7] hashtable: introduce a small and naive hashtable
  2012-08-04  0:05                         ` Linus Torvalds
@ 2012-08-04  0:33                           ` Sasha Levin
  0 siblings, 0 replies; 34+ messages in thread
From: Sasha Levin @ 2012-08-04  0:33 UTC (permalink / raw)
  To: Linus Torvalds
  Cc: Tejun Heo, akpm, linux-kernel, linux-mm, paul.gortmaker, davem,
	rostedt, mingo, ebiederm, aarcange, ericvh, netdev

On 08/04/2012 02:05 AM, Linus Torvalds wrote:
> On Fri, Aug 3, 2012 at 5:03 PM, Sasha Levin <levinsasha928@gmail.com> wrote:
>>
>> The problem with that code was that it doesn't work with dynamically allocated hashtables, or hashtables that grow/shrink.
> 
> Sure. But once you have that kind of complexity, why would you care
> about the trivial cases?

Because there are far more trivial cases than complex ones - I've counted 50+ of these "trivial" cases.

None of them need the complexity we're trying to deal with at the moment.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 6/7] tracepoint: use new hashtable implementation
  2012-08-03 14:23 ` [RFC v2 6/7] tracepoint: " Sasha Levin
@ 2012-08-05  0:36   ` Steven Rostedt
  2012-08-05 16:31     ` Mathieu Desnoyers
  0 siblings, 1 reply; 34+ messages in thread
From: Steven Rostedt @ 2012-08-05  0:36 UTC (permalink / raw)
  To: Sasha Levin
  Cc: torvalds, tj, akpm, linux-kernel, linux-mm, paul.gortmaker,
	davem, mingo, ebiederm, aarcange, ericvh, netdev,
	Mathieu Desnoyers

FYI, Mathieu is the author of this file.

-- Steve


On Fri, 2012-08-03 at 16:23 +0200, Sasha Levin wrote:
> Switch tracepoints to use the new hashtable implementation. This reduces the amount of
> generic unrelated code in the tracepoints.
> 
> Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
> ---
>  kernel/tracepoint.c |   26 +++++++++-----------------
>  1 files changed, 9 insertions(+), 17 deletions(-)
> 
> diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
> index d96ba22..b5a2650 100644
> --- a/kernel/tracepoint.c
> +++ b/kernel/tracepoint.c
> @@ -26,6 +26,7 @@
>  #include <linux/slab.h>
>  #include <linux/sched.h>
>  #include <linux/static_key.h>
> +#include <linux/hashtable.h>
>  
>  extern struct tracepoint * const __start___tracepoints_ptrs[];
>  extern struct tracepoint * const __stop___tracepoints_ptrs[];
> @@ -49,8 +50,7 @@ static LIST_HEAD(tracepoint_module_list);
>   * Protected by tracepoints_mutex.
>   */
>  #define TRACEPOINT_HASH_BITS 6
> -#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
> -static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
> +DEFINE_STATIC_HASHTABLE(tracepoint_table, TRACEPOINT_HASH_BITS);
>  
>  /*
>   * Note about RCU :
> @@ -191,16 +191,14 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
>   */
>  static struct tracepoint_entry *get_tracepoint(const char *name)
>  {
> -	struct hlist_head *head;
>  	struct hlist_node *node;
>  	struct tracepoint_entry *e;
>  	u32 hash = jhash(name, strlen(name), 0);
>  
> -	head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
> -	hlist_for_each_entry(e, node, head, hlist) {
> +	hash_for_each_possible(&tracepoint_table, node, e, hlist, hash)
>  		if (!strcmp(name, e->name))
>  			return e;
> -	}
> +
>  	return NULL;
>  }
>  
> @@ -210,19 +208,13 @@ static struct tracepoint_entry *get_tracepoint(const char *name)
>   */
>  static struct tracepoint_entry *add_tracepoint(const char *name)
>  {
> -	struct hlist_head *head;
> -	struct hlist_node *node;
>  	struct tracepoint_entry *e;
>  	size_t name_len = strlen(name) + 1;
>  	u32 hash = jhash(name, name_len-1, 0);
>  
> -	head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
> -	hlist_for_each_entry(e, node, head, hlist) {
> -		if (!strcmp(name, e->name)) {
> -			printk(KERN_NOTICE
> -				"tracepoint %s busy\n", name);
> -			return ERR_PTR(-EEXIST);	/* Already there */
> -		}
> +	if (get_tracepoint(name)) {
> +		printk(KERN_NOTICE "tracepoint %s busy\n", name);
> +		return ERR_PTR(-EEXIST);	/* Already there */
>  	}
>  	/*
>  	 * Using kmalloc here to allocate a variable length element. Could
> @@ -234,7 +226,7 @@ static struct tracepoint_entry *add_tracepoint(const char *name)
>  	memcpy(&e->name[0], name, name_len);
>  	e->funcs = NULL;
>  	e->refcount = 0;
> -	hlist_add_head(&e->hlist, head);
> +	hash_add(&tracepoint_table, &e->hlist, hash);
>  	return e;
>  }
>  
> @@ -244,7 +236,7 @@ static struct tracepoint_entry *add_tracepoint(const char *name)
>   */
>  static inline void remove_tracepoint(struct tracepoint_entry *e)
>  {
> -	hlist_del(&e->hlist);
> +	hash_del(&e->hlist);
>  	kfree(e);
>  }
>  



^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 2/7] user_ns: use new hashtable implementation
  2012-08-03 14:23 ` [RFC v2 2/7] user_ns: use new hashtable implementation Sasha Levin
@ 2012-08-05  0:58   ` Eric W. Biederman
  0 siblings, 0 replies; 34+ messages in thread
From: Eric W. Biederman @ 2012-08-05  0:58 UTC (permalink / raw)
  To: Sasha Levin
  Cc: torvalds, tj, akpm, linux-kernel, linux-mm, paul.gortmaker,
	davem, rostedt, mingo, aarcange, ericvh, netdev

Sasha Levin <levinsasha928@gmail.com> writes:

> Switch user_ns to use the new hashtable implementation. This reduces the amount of
> generic unrelated code in user_ns.


Just looking at this ick.

- Your comparison function is broken.
- The naming is awkward.
    hash_get without a reference count being  incremented?
- The magic is deep.
   hash_get is named like a function but takes a piece of code to call
   like only a macro can.
- uid_hash_find always bumped the reference count
  but your uidhash_entry doesn't nor do all of the callers of
  uidhash_entry bump the reference count.

Nacked-by: "Eric W. Biederman" <ebiederm@xmission.com>

I don't have the time for a new improved better hash table that makes
the code buggier.

Eric


> Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
> ---
>  kernel/user.c |   53 ++++++++++++++++++-----------------------------------
>  1 files changed, 18 insertions(+), 35 deletions(-)
>
> diff --git a/kernel/user.c b/kernel/user.c
> index b815fef..555c71a 100644
> --- a/kernel/user.c
> +++ b/kernel/user.c
> @@ -16,6 +16,7 @@
>  #include <linux/interrupt.h>
>  #include <linux/export.h>
>  #include <linux/user_namespace.h>
> +#include <linux/hashtable.h>
>  
>  /*
>   * userns count is 1 for root user, 1 for init_uts_ns,
> @@ -50,15 +51,14 @@ EXPORT_SYMBOL_GPL(init_user_ns);
>   * UID task count cache, to get fast user lookup in "alloc_uid"
>   * when changing user ID's (ie setuid() and friends).
>   */
> -
> -#define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
> -#define UIDHASH_SZ	(1 << UIDHASH_BITS)
> -#define UIDHASH_MASK		(UIDHASH_SZ - 1)
> -#define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
> -#define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))
> +#define UIDHASH_BITS		(CONFIG_BASE_SMALL ? 3 : 7)
> +#define UIDHASH_CMP(obj, key) 	((obj)->uid == (key))
> +#define uidhash_entry(key)	(hash_get(&uidhash_table, key,		\
> +				struct user_struct, uidhash_node,	\
> +				UIDHASH_CMP))
>  
>  static struct kmem_cache *uid_cachep;
> -struct hlist_head uidhash_table[UIDHASH_SZ];
> +DEFINE_STATIC_HASHTABLE(uidhash_table, UIDHASH_BITS);
>  
>  /*
>   * The uidhash_lock is mostly taken from process context, but it is
> @@ -84,29 +84,14 @@ struct user_struct root_user = {
>  /*
>   * These routines must be called with the uidhash spinlock held!
>   */
> -static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
> +static void uid_hash_insert(struct user_struct *up)
>  {
> -	hlist_add_head(&up->uidhash_node, hashent);
> +	hash_add(&uidhash_table, &up->uidhash_node, up->uid);
>  }
>  
>  static void uid_hash_remove(struct user_struct *up)
>  {
> -	hlist_del_init(&up->uidhash_node);
> -}
> -
> -static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
> -{
> -	struct user_struct *user;
> -	struct hlist_node *h;
> -
> -	hlist_for_each_entry(user, h, hashent, uidhash_node) {
> -		if (uid_eq(user->uid, uid)) {
> -			atomic_inc(&user->__count);
> -			return user;
> -		}
> -	}
> -
> -	return NULL;
> +	hash_del(&up->uidhash_node);
>  }
>  
>  /* IRQs are disabled and uidhash_lock is held upon function entry.
> @@ -135,7 +120,9 @@ struct user_struct *find_user(kuid_t uid)
>  	unsigned long flags;
>  
>  	spin_lock_irqsave(&uidhash_lock, flags);
> -	ret = uid_hash_find(uid, uidhashentry(uid));
> +	ret = uidhash_entry(uid);
> +	if (ret)
> +		atomic_inc(&ret->__count);
>  	spin_unlock_irqrestore(&uidhash_lock, flags);
>  	return ret;
>  }
> @@ -156,11 +143,10 @@ void free_uid(struct user_struct *up)
>  
>  struct user_struct *alloc_uid(kuid_t uid)
>  {
> -	struct hlist_head *hashent = uidhashentry(uid);
>  	struct user_struct *up, *new;
>  
>  	spin_lock_irq(&uidhash_lock);
> -	up = uid_hash_find(uid, hashent);
> +	up = uidhash_entry(uid);
>  	spin_unlock_irq(&uidhash_lock);
>  
>  	if (!up) {
> @@ -176,13 +162,13 @@ struct user_struct *alloc_uid(kuid_t uid)
>  		 * on adding the same user already..
>  		 */
>  		spin_lock_irq(&uidhash_lock);
> -		up = uid_hash_find(uid, hashent);
> +		up = uidhash_entry(uid);
>  		if (up) {
>  			key_put(new->uid_keyring);
>  			key_put(new->session_keyring);
>  			kmem_cache_free(uid_cachep, new);
>  		} else {
> -			uid_hash_insert(new, hashent);
> +			uid_hash_insert(new);
>  			up = new;
>  		}
>  		spin_unlock_irq(&uidhash_lock);
> @@ -196,17 +182,14 @@ out_unlock:
>  
>  static int __init uid_cache_init(void)
>  {
> -	int n;
> -
>  	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
>  			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
>  
> -	for(n = 0; n < UIDHASH_SZ; ++n)
> -		INIT_HLIST_HEAD(uidhash_table + n);
> +	hash_init(&uidhash_table, UIDHASH_BITS);
>  
>  	/* Insert the root user immediately (init already runs as root) */
>  	spin_lock_irq(&uidhash_lock);
> -	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
> +	uid_hash_insert(&root_user);
>  	spin_unlock_irq(&uidhash_lock);
>  
>  	return 0;

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 6/7] tracepoint: use new hashtable implementation
  2012-08-05  0:36   ` Steven Rostedt
@ 2012-08-05 16:31     ` Mathieu Desnoyers
  2012-08-05 17:03       ` Sasha Levin
  0 siblings, 1 reply; 34+ messages in thread
From: Mathieu Desnoyers @ 2012-08-05 16:31 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: Sasha Levin, torvalds, tj, akpm, linux-kernel, linux-mm,
	paul.gortmaker, davem, mingo, ebiederm, aarcange, ericvh, netdev

* Steven Rostedt (rostedt@goodmis.org) wrote:
> FYI, Mathieu is the author of this file.
> 
> -- Steve
> 
> 
> On Fri, 2012-08-03 at 16:23 +0200, Sasha Levin wrote:
> > Switch tracepoints to use the new hashtable implementation. This reduces the amount of
> > generic unrelated code in the tracepoints.
> > 
> > Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
> > ---
> >  kernel/tracepoint.c |   26 +++++++++-----------------
> >  1 files changed, 9 insertions(+), 17 deletions(-)
> > 
> > diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
> > index d96ba22..b5a2650 100644
> > --- a/kernel/tracepoint.c
> > +++ b/kernel/tracepoint.c
> > @@ -26,6 +26,7 @@
> >  #include <linux/slab.h>
> >  #include <linux/sched.h>
> >  #include <linux/static_key.h>
> > +#include <linux/hashtable.h>
> >  
> >  extern struct tracepoint * const __start___tracepoints_ptrs[];
> >  extern struct tracepoint * const __stop___tracepoints_ptrs[];
> > @@ -49,8 +50,7 @@ static LIST_HEAD(tracepoint_module_list);
> >   * Protected by tracepoints_mutex.
> >   */
> >  #define TRACEPOINT_HASH_BITS 6
> > -#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
> > -static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
> > +DEFINE_STATIC_HASHTABLE(tracepoint_table, TRACEPOINT_HASH_BITS);

I wonder why the "static" has been embedded within
"DEFINE_STATIC_HASHTABLE" ? I'm used to see something similar to:

static DEFINE_HASHTABLE(tracepoint_table, TRACEPOINT_HASH_BITS);

elsewhere in the kernel (e.g. static DEFINE_PER_CPU(), static
DEFINE_MUTEX(), etc).

> >  
> >  /*
> >   * Note about RCU :
> > @@ -191,16 +191,14 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
> >   */
> >  static struct tracepoint_entry *get_tracepoint(const char *name)
> >  {
> > -	struct hlist_head *head;
> >  	struct hlist_node *node;
> >  	struct tracepoint_entry *e;
> >  	u32 hash = jhash(name, strlen(name), 0);
> >  
> > -	head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
> > -	hlist_for_each_entry(e, node, head, hlist) {
> > +	hash_for_each_possible(&tracepoint_table, node, e, hlist, hash)
> >  		if (!strcmp(name, e->name))
> >  			return e;
> > -	}
> > +

Typically, where there are 2 or more nesting levels, I try to keep the
outer brackets, even if the 1st level only contain a single statement
(this is what I did across tracepoint.c). This is especially useful when
nesting multiple if levels, and ensures the "else" clause match the
right if. We might want to keep it that way within the file, to ensure
style consistency.

Other than that, it looks good!

Thanks!

Mathieu

> >  	return NULL;
> >  }
> >  
> > @@ -210,19 +208,13 @@ static struct tracepoint_entry *get_tracepoint(const char *name)
> >   */
> >  static struct tracepoint_entry *add_tracepoint(const char *name)
> >  {
> > -	struct hlist_head *head;
> > -	struct hlist_node *node;
> >  	struct tracepoint_entry *e;
> >  	size_t name_len = strlen(name) + 1;
> >  	u32 hash = jhash(name, name_len-1, 0);
> >  
> > -	head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
> > -	hlist_for_each_entry(e, node, head, hlist) {
> > -		if (!strcmp(name, e->name)) {
> > -			printk(KERN_NOTICE
> > -				"tracepoint %s busy\n", name);
> > -			return ERR_PTR(-EEXIST);	/* Already there */
> > -		}
> > +	if (get_tracepoint(name)) {
> > +		printk(KERN_NOTICE "tracepoint %s busy\n", name);
> > +		return ERR_PTR(-EEXIST);	/* Already there */
> >  	}
> >  	/*
> >  	 * Using kmalloc here to allocate a variable length element. Could
> > @@ -234,7 +226,7 @@ static struct tracepoint_entry *add_tracepoint(const char *name)
> >  	memcpy(&e->name[0], name, name_len);
> >  	e->funcs = NULL;
> >  	e->refcount = 0;
> > -	hlist_add_head(&e->hlist, head);
> > +	hash_add(&tracepoint_table, &e->hlist, hash);
> >  	return e;
> >  }
> >  
> > @@ -244,7 +236,7 @@ static struct tracepoint_entry *add_tracepoint(const char *name)
> >   */
> >  static inline void remove_tracepoint(struct tracepoint_entry *e)
> >  {
> > -	hlist_del(&e->hlist);
> > +	hash_del(&e->hlist);
> >  	kfree(e);
> >  }
> >  
> 
> 

-- 
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 6/7] tracepoint: use new hashtable implementation
  2012-08-05 16:31     ` Mathieu Desnoyers
@ 2012-08-05 17:03       ` Sasha Levin
  2012-08-05 17:12         ` Mathieu Desnoyers
  0 siblings, 1 reply; 34+ messages in thread
From: Sasha Levin @ 2012-08-05 17:03 UTC (permalink / raw)
  To: Mathieu Desnoyers
  Cc: Steven Rostedt, torvalds, tj, akpm, linux-kernel, linux-mm,
	paul.gortmaker, davem, mingo, ebiederm, aarcange, ericvh, netdev

On 08/05/2012 06:31 PM, Mathieu Desnoyers wrote:
> * Steven Rostedt (rostedt@goodmis.org) wrote:
>> FYI, Mathieu is the author of this file.
>>
>> -- Steve
>>
>>
>> On Fri, 2012-08-03 at 16:23 +0200, Sasha Levin wrote:
>>> Switch tracepoints to use the new hashtable implementation. This reduces the amount of
>>> generic unrelated code in the tracepoints.
>>>
>>> Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
>>> ---
>>>  kernel/tracepoint.c |   26 +++++++++-----------------
>>>  1 files changed, 9 insertions(+), 17 deletions(-)
>>>
>>> diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
>>> index d96ba22..b5a2650 100644
>>> --- a/kernel/tracepoint.c
>>> +++ b/kernel/tracepoint.c
>>> @@ -26,6 +26,7 @@
>>>  #include <linux/slab.h>
>>>  #include <linux/sched.h>
>>>  #include <linux/static_key.h>
>>> +#include <linux/hashtable.h>
>>>  
>>>  extern struct tracepoint * const __start___tracepoints_ptrs[];
>>>  extern struct tracepoint * const __stop___tracepoints_ptrs[];
>>> @@ -49,8 +50,7 @@ static LIST_HEAD(tracepoint_module_list);
>>>   * Protected by tracepoints_mutex.
>>>   */
>>>  #define TRACEPOINT_HASH_BITS 6
>>> -#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
>>> -static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
>>> +DEFINE_STATIC_HASHTABLE(tracepoint_table, TRACEPOINT_HASH_BITS);
> 
> I wonder why the "static" has been embedded within
> "DEFINE_STATIC_HASHTABLE" ? I'm used to see something similar to:
> 
> static DEFINE_HASHTABLE(tracepoint_table, TRACEPOINT_HASH_BITS);
> 
> elsewhere in the kernel (e.g. static DEFINE_PER_CPU(), static
> DEFINE_MUTEX(), etc).

We had to create two different definitions of hashtable, one to be used as static and one to be used in structs. I chose the uglier way of doing it, and Tejun already pointed it out :)

It will be much nicer in the future.

>>>  
>>>  /*
>>>   * Note about RCU :
>>> @@ -191,16 +191,14 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
>>>   */
>>>  static struct tracepoint_entry *get_tracepoint(const char *name)
>>>  {
>>> -	struct hlist_head *head;
>>>  	struct hlist_node *node;
>>>  	struct tracepoint_entry *e;
>>>  	u32 hash = jhash(name, strlen(name), 0);
>>>  
>>> -	head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
>>> -	hlist_for_each_entry(e, node, head, hlist) {
>>> +	hash_for_each_possible(&tracepoint_table, node, e, hlist, hash)
>>>  		if (!strcmp(name, e->name))
>>>  			return e;
>>> -	}
>>> +
> 
> Typically, where there are 2 or more nesting levels, I try to keep the
> outer brackets, even if the 1st level only contain a single statement
> (this is what I did across tracepoint.c). This is especially useful when
> nesting multiple if levels, and ensures the "else" clause match the
> right if. We might want to keep it that way within the file, to ensure
> style consistency.

Roger that, will fix.

> Other than that, it looks good!
> 
> Thanks!
> 
> Mathieu
> 

Thanks for the review Mathieu!


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [RFC v2 6/7] tracepoint: use new hashtable implementation
  2012-08-05 17:03       ` Sasha Levin
@ 2012-08-05 17:12         ` Mathieu Desnoyers
  0 siblings, 0 replies; 34+ messages in thread
From: Mathieu Desnoyers @ 2012-08-05 17:12 UTC (permalink / raw)
  To: Sasha Levin
  Cc: Steven Rostedt, torvalds, tj, akpm, linux-kernel, linux-mm,
	paul.gortmaker, davem, mingo, ebiederm, aarcange, ericvh, netdev,
	Lai Jiangshan, Paul E. McKenney

* Sasha Levin (levinsasha928@gmail.com) wrote:
[...]
> > Other than that, it looks good!
> > 
> > Thanks!
> > 
> > Mathieu
> > 
> 
> Thanks for the review Mathieu!

No problem! By the way, if you want to have a look at another hash table
API for ideas, here is the RCU lock-free hash table API, within the
Userspace RCU tree:

from git://git.lttng.org/userspace-rcu.git
branch: master
API: urcu/rculfhash.h
core code: rculfhash.c
hash table index memory management:
        rculfhash-mm-chunk.c, rculfhash-mm-mmap.c, rculfhash-mm-order.c

The git tree is down today due to electrical maintenance, so I am
appending the hash table API below.


#ifndef _URCU_RCULFHASH_H
#define _URCU_RCULFHASH_H

/*
 * urcu/rculfhash.h
 *
 * Userspace RCU library - Lock-Free RCU Hash Table
 *
 * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
 * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 *
 * Include this file _after_ including your URCU flavor.
 */

#include <stdint.h>
#include <urcu/compiler.h>
#include <urcu-call-rcu.h>
#include <urcu-flavor.h>

#ifdef __cplusplus
extern "C" {
#endif

/*
 * cds_lfht_node: Contains the next pointers and reverse-hash
 * value required for lookup and traversal of the hash table.
 *
 * struct cds_lfht_node should be aligned on 8-bytes boundaries because
 * the three lower bits are used as flags. It is worth noting that the
 * information contained within these three bits could be represented on
 * two bits by re-using the same bit for REMOVAL_OWNER_FLAG and
 * BUCKET_FLAG. This can be done if we ensure that no iterator nor
 * updater check the BUCKET_FLAG after it detects that the REMOVED_FLAG
 * is set. Given the minimum size of struct cds_lfht_node is 8 bytes on
 * 32-bit architectures, we choose to go for simplicity and reserve
 * three bits.
 *
 * struct cds_lfht_node can be embedded into a structure (as a field).
 * caa_container_of() can be used to get the structure from the struct
 * cds_lfht_node after a lookup.
 *
 * The structure which embeds it typically holds the key (or key-value
 * pair) of the object. The caller code is responsible for calculation
 * of the hash value for cds_lfht APIs.
 */
struct cds_lfht_node {
	struct cds_lfht_node *next;	/* ptr | REMOVAL_OWNER_FLAG | BUCKET_FLAG | REMOVED_FLAG */
	unsigned long reverse_hash;
} __attribute__((aligned(8)));

/* cds_lfht_iter: Used to track state while traversing a hash chain. */
struct cds_lfht_iter {
	struct cds_lfht_node *node, *next;
};

static inline
struct cds_lfht_node *cds_lfht_iter_get_node(struct cds_lfht_iter *iter)
{
	return iter->node;
}

struct cds_lfht;

/*
 * Caution !
 * Ensure reader and writer threads are registered as urcu readers.
 */

typedef int (*cds_lfht_match_fct)(struct cds_lfht_node *node, const void *key);

/*
 * cds_lfht_node_init - initialize a hash table node
 * @node: the node to initialize.
 *
 * This function is kept to be eventually used for debugging purposes
 * (detection of memory corruption).
 */
static inline
void cds_lfht_node_init(struct cds_lfht_node *node)
{
}

/*
 * Hash table creation flags.
 */
enum {
	CDS_LFHT_AUTO_RESIZE = (1U << 0),
	CDS_LFHT_ACCOUNTING = (1U << 1),
};

struct cds_lfht_mm_type {
	struct cds_lfht *(*alloc_cds_lfht)(unsigned long min_nr_alloc_buckets,
			unsigned long max_nr_buckets);
	void (*alloc_bucket_table)(struct cds_lfht *ht, unsigned long order);
	void (*free_bucket_table)(struct cds_lfht *ht, unsigned long order);
	struct cds_lfht_node *(*bucket_at)(struct cds_lfht *ht,
			unsigned long index);
};

extern const struct cds_lfht_mm_type cds_lfht_mm_order;
extern const struct cds_lfht_mm_type cds_lfht_mm_chunk;
extern const struct cds_lfht_mm_type cds_lfht_mm_mmap;

/*
 * _cds_lfht_new - API used by cds_lfht_new wrapper. Do not use directly.
 */
struct cds_lfht *_cds_lfht_new(unsigned long init_size,
			unsigned long min_nr_alloc_buckets,
			unsigned long max_nr_buckets,
			int flags,
			const struct cds_lfht_mm_type *mm,
			const struct rcu_flavor_struct *flavor,
			pthread_attr_t *attr);

/*
 * cds_lfht_new - allocate a hash table.
 * @init_size: number of buckets to allocate initially. Must be power of two.
 * @min_nr_alloc_buckets: the minimum number of allocated buckets.
 *                        (must be power of two)
 * @max_nr_buckets: the maximum number of hash table buckets allowed.
 *                  (must be power of two)
 * @flags: hash table creation flags (can be combined with bitwise or: '|').
 *           0: no flags.
 *           CDS_LFHT_AUTO_RESIZE: automatically resize hash table.
 *           CDS_LFHT_ACCOUNTING: count the number of node addition
 *                                and removal in the table
 * @attr: optional resize worker thread attributes. NULL for default.
 *
 * Return NULL on error.
 * Note: the RCU flavor must be already included before the hash table header.
 *
 * The programmer is responsible for ensuring that resize operation has a
 * priority equal to hash table updater threads. It should be performed by
 * specifying the appropriate priority in the pthread "attr" argument, and,
 * for CDS_LFHT_AUTO_RESIZE, by ensuring that call_rcu worker threads also have
 * this priority level. Having lower priority for call_rcu and resize threads
 * does not pose any correctness issue, but the resize operations could be
 * starved by updates, thus leading to long hash table bucket chains.
 * Threads calling cds_lfht_new are NOT required to be registered RCU
 * read-side threads. It can be called very early. (e.g. before RCU is
 * initialized)
 */
static inline
struct cds_lfht *cds_lfht_new(unsigned long init_size,
			unsigned long min_nr_alloc_buckets,
			unsigned long max_nr_buckets,
			int flags,
			pthread_attr_t *attr)
{
	return _cds_lfht_new(init_size, min_nr_alloc_buckets, max_nr_buckets,
			flags, NULL, &rcu_flavor, attr);
}

/*
 * cds_lfht_destroy - destroy a hash table.
 * @ht: the hash table to destroy.
 * @attr: (output) resize worker thread attributes, as received by cds_lfht_new.
 *        The caller will typically want to free this pointer if dynamically
 *        allocated. The attr point can be NULL if the caller does not
 *        need to be informed of the value passed to cds_lfht_new().
 *
 * Return 0 on success, negative error value on error.
 * Threads calling this API need to be registered RCU read-side threads.
 */
int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr);

/*
 * cds_lfht_count_nodes - count the number of nodes in the hash table.
 * @ht: the hash table.
 * @split_count_before: sample the node count split-counter before traversal.
 * @count: traverse the hash table, count the number of nodes observed.
 * @split_count_after: sample the node count split-counter after traversal.
 *
 * Call with rcu_read_lock held.
 * Threads calling this API need to be registered RCU read-side threads.
 */
void cds_lfht_count_nodes(struct cds_lfht *ht,
		long *split_count_before,
		unsigned long *count,
		long *split_count_after);

/*
 * cds_lfht_lookup - lookup a node by key.
 * @ht: the hash table.
 * @hash: the key hash.
 * @match: the key match function.
 * @key: the current node key.
 * @iter: node, if found (output). *iter->node set to NULL if not found.
 *
 * Call with rcu_read_lock held.
 * Threads calling this API need to be registered RCU read-side threads.
 * This function acts as a rcu_dereference() to read the node pointer.
 */
void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash,
		cds_lfht_match_fct match, const void *key,
		struct cds_lfht_iter *iter);

/*
 * cds_lfht_next_duplicate - get the next item with same key, after iterator.
 * @ht: the hash table.
 * @match: the key match function.
 * @key: the current node key.
 * @iter: input: current iterator.
 *        output: node, if found. *iter->node set to NULL if not found.
 *
 * Uses an iterator initialized by a lookup or traversal. Important: the
 * iterator _needs_ to be initialized before calling
 * cds_lfht_next_duplicate.
 * Sets *iter-node to the following node with same key.
 * Sets *iter->node to NULL if no following node exists with same key.
 * RCU read-side lock must be held across cds_lfht_lookup and
 * cds_lfht_next calls, and also between cds_lfht_next calls using the
 * node returned by a previous cds_lfht_next.
 * Call with rcu_read_lock held.
 * Threads calling this API need to be registered RCU read-side threads.
 * This function acts as a rcu_dereference() to read the node pointer.
 */
void cds_lfht_next_duplicate(struct cds_lfht *ht,
		cds_lfht_match_fct match, const void *key,
		struct cds_lfht_iter *iter);

/*
 * cds_lfht_first - get the first node in the table.
 * @ht: the hash table.
 * @iter: First node, if exists (output). *iter->node set to NULL if not found.
 *
 * Output in "*iter". *iter->node set to NULL if table is empty.
 * Call with rcu_read_lock held.
 * Threads calling this API need to be registered RCU read-side threads.
 * This function acts as a rcu_dereference() to read the node pointer.
 */
void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter);

/*
 * cds_lfht_next - get the next node in the table.
 * @ht: the hash table.
 * @iter: input: current iterator.
 *        output: next node, if exists. *iter->node set to NULL if not found.
 *
 * Input/Output in "*iter". *iter->node set to NULL if *iter was
 * pointing to the last table node.
 * Call with rcu_read_lock held.
 * Threads calling this API need to be registered RCU read-side threads.
 * This function acts as a rcu_dereference() to read the node pointer.
 */
void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter);

/*
 * cds_lfht_add - add a node to the hash table.
 * @ht: the hash table.
 * @hash: the key hash.
 * @node: the node to add.
 *
 * This function supports adding redundant keys into the table.
 * Call with rcu_read_lock held.
 * Threads calling this API need to be registered RCU read-side threads.
 * This function issues a full memory barrier before and after its
 * atomic commit.
 */
void cds_lfht_add(struct cds_lfht *ht, unsigned long hash,
		struct cds_lfht_node *node);

/*
 * cds_lfht_add_unique - add a node to hash table, if key is not present.
 * @ht: the hash table.
 * @hash: the node's hash.
 * @match: the key match function.
 * @key: the node's key.
 * @node: the node to try adding.
 *
 * Return the node added upon success.
 * Return the unique node already present upon failure. If
 * cds_lfht_add_unique fails, the node passed as parameter should be
 * freed by the caller. In this case, the caller does NOT need to wait
 * for a grace period before freeing the node.
 * Call with rcu_read_lock held.
 * Threads calling this API need to be registered RCU read-side threads.
 *
 * The semantic of this function is that if only this function is used
 * to add keys into the table, no duplicated keys should ever be
 * observable in the table. The same guarantee apply for combination of
 * add_unique and add_replace (see below).
 *
 * Upon success, this function issues a full memory barrier before and
 * after its atomic commit. Upon failure, this function acts like a
 * simple lookup operation: it acts as a rcu_dereference() to read the
 * node pointer. The failure case does not guarantee any other memory
 * barrier.
 */
struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
		unsigned long hash,
		cds_lfht_match_fct match,
		const void *key,
		struct cds_lfht_node *node);

/*
 * cds_lfht_add_replace - replace or add a node within hash table.
 * @ht: the hash table.
 * @hash: the node's hash.
 * @match: the key match function.
 * @key: the node's key.
 * @node: the node to add.
 *
 * Return the node replaced upon success. If no node matching the key
 * was present, return NULL, which also means the operation succeeded.
 * This replacement operation should never fail.
 * Call with rcu_read_lock held.
 * Threads calling this API need to be registered RCU read-side threads.
 * After successful replacement, a grace period must be waited for before
 * freeing the memory reserved for the returned node.
 *
 * The semantic of replacement vs lookups and traversals is the
 * following: if lookups and traversals are performed between a key
 * unique insertion and its removal, we guarantee that the lookups and
 * traversals will always find exactly one instance of the key if it is
 * replaced concurrently with the lookups.
 *
 * Providing this semantic allows us to ensure that replacement-only
 * schemes will never generate duplicated keys. It also allows us to
 * guarantee that a combination of add_replace and add_unique updates
 * will never generate duplicated keys.
 *
 * This function issues a full memory barrier before and after its
 * atomic commit.
 */
struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
		unsigned long hash,
		cds_lfht_match_fct match,
		const void *key,
		struct cds_lfht_node *node);

/*
 * cds_lfht_replace - replace a node pointed to by iter within hash table.
 * @ht: the hash table.
 * @old_iter: the iterator position of the node to replace.
 * @hash: the node's hash.
 * @match: the key match function.
 * @key: the node's key.
 * @new_node: the new node to use as replacement.
 *
 * Return 0 if replacement is successful, negative value otherwise.
 * Replacing a NULL old node or an already removed node will fail with
 * -ENOENT.
 * If the hash or value of the node to replace and the new node differ,
 * this function returns -EINVAL without proceeding to the replacement.
 * Old node can be looked up with cds_lfht_lookup and cds_lfht_next.
 * RCU read-side lock must be held between lookup and replacement.
 * Call with rcu_read_lock held.
 * Threads calling this API need to be registered RCU read-side threads.
 * After successful replacement, a grace period must be waited for before
 * freeing the memory reserved for the old node (which can be accessed
 * with cds_lfht_iter_get_node).
 *
 * The semantic of replacement vs lookups is the same as
 * cds_lfht_add_replace().
 *
 * Upon success, this function issues a full memory barrier before and
 * after its atomic commit. Upon failure, this function does not issue
 * any memory barrier.
 */
int cds_lfht_replace(struct cds_lfht *ht,
		struct cds_lfht_iter *old_iter,
		unsigned long hash,
		cds_lfht_match_fct match,
		const void *key,
		struct cds_lfht_node *new_node);

/*
 * cds_lfht_del - remove node pointed to by iterator from hash table.
 * @ht: the hash table.
 * @node: the node to delete.
 *
 * Return 0 if the node is successfully removed, negative value
 * otherwise.
 * Deleting a NULL node or an already removed node will fail with a
 * negative value.
 * Node can be looked up with cds_lfht_lookup and cds_lfht_next,
 * followed by use of cds_lfht_iter_get_node.
 * RCU read-side lock must be held between lookup and removal.
 * Call with rcu_read_lock held.
 * Threads calling this API need to be registered RCU read-side threads.
 * After successful removal, a grace period must be waited for before
 * freeing the memory reserved for old node (which can be accessed with
 * cds_lfht_iter_get_node).
 * Upon success, this function issues a full memory barrier before and
 * after its atomic commit. Upon failure, this function does not issue
 * any memory barrier.
 */
int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node);

/*
 * cds_lfht_is_node_deleted - query whether a node is removed from hash table.
 *
 * Return non-zero if the node is deleted from the hash table, 0
 * otherwise.
 * Node can be looked up with cds_lfht_lookup and cds_lfht_next,
 * followed by use of cds_lfht_iter_get_node.
 * RCU read-side lock must be held between lookup and call to this
 * function.
 * Call with rcu_read_lock held.
 * Threads calling this API need to be registered RCU read-side threads.
 * This function does not issue any memory barrier.
 */
int cds_lfht_is_node_deleted(struct cds_lfht_node *node);

/*
 * cds_lfht_resize - Force a hash table resize
 * @ht: the hash table.
 * @new_size: update to this hash table size.
 *
 * Threads calling this API need to be registered RCU read-side threads.
 * This function does not (necessarily) issue memory barriers.
 */
void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size);

/*
 * Note: it is safe to perform element removal (del), replacement, or
 * any hash table update operation during any of the following hash
 * table traversals.
 * These functions act as rcu_dereference() to read the node pointers.
 */
#define cds_lfht_for_each(ht, iter, node)				\
	for (cds_lfht_first(ht, iter),					\
			node = cds_lfht_iter_get_node(iter);		\
		node != NULL;						\
		cds_lfht_next(ht, iter),				\
			node = cds_lfht_iter_get_node(iter))

#define cds_lfht_for_each_duplicate(ht, hash, match, key, iter, node)	\
	for (cds_lfht_lookup(ht, hash, match, key, iter),		\
			node = cds_lfht_iter_get_node(iter);		\
		node != NULL;						\
		cds_lfht_next_duplicate(ht, match, key, iter),		\
			node = cds_lfht_iter_get_node(iter))

#define cds_lfht_for_each_entry(ht, iter, pos, member)			\
	for (cds_lfht_first(ht, iter),					\
			pos = caa_container_of(cds_lfht_iter_get_node(iter), \
					__typeof__(*(pos)), member);	\
		&(pos)->member != NULL;					\
		cds_lfht_next(ht, iter),				\
			pos = caa_container_of(cds_lfht_iter_get_node(iter), \
					__typeof__(*(pos)), member))

#define cds_lfht_for_each_entry_duplicate(ht, hash, match, key,		\
				iter, pos, member)			\
	for (cds_lfht_lookup(ht, hash, match, key, iter),		\
			pos = caa_container_of(cds_lfht_iter_get_node(iter), \
					__typeof__(*(pos)), member);	\
		&(pos)->member != NULL;					\
		cds_lfht_next_duplicate(ht, match, key, iter),		\
			pos = caa_container_of(cds_lfht_iter_get_node(iter), \
					__typeof__(*(pos)), member))

#ifdef __cplusplus
}
#endif

#endif /* _URCU_RCULFHASH_H */




-- 
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com

^ permalink raw reply	[flat|nested] 34+ messages in thread

end of thread, other threads:[~2012-08-05 17:13 UTC | newest]

Thread overview: 34+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-08-03 14:23 [RFC v2 0/7] generic hashtable implementation Sasha Levin
2012-08-03 14:23 ` [RFC v2 1/7] hashtable: introduce a small and naive hashtable Sasha Levin
2012-08-03 17:15   ` Tejun Heo
2012-08-03 17:16     ` Tejun Heo
2012-08-03 21:19     ` Sasha Levin
2012-08-03 21:30       ` Tejun Heo
2012-08-03 21:36         ` Sasha Levin
2012-08-03 21:44           ` Tejun Heo
2012-08-03 21:41         ` Sasha Levin
2012-08-03 21:48           ` Tejun Heo
2012-08-03 22:20             ` Sasha Levin
2012-08-03 22:23               ` Tejun Heo
2012-08-03 22:26                 ` Sasha Levin
2012-08-03 22:29                 ` Linus Torvalds
2012-08-03 22:36                   ` Tejun Heo
2012-08-03 23:47                     ` Linus Torvalds
2012-08-04  0:03                       ` Sasha Levin
2012-08-04  0:05                         ` Linus Torvalds
2012-08-04  0:33                           ` Sasha Levin
2012-08-04  0:05                       ` Tejun Heo
2012-08-03 17:39   ` Eric Dumazet
2012-08-03 14:23 ` [RFC v2 2/7] user_ns: use new hashtable implementation Sasha Levin
2012-08-05  0:58   ` Eric W. Biederman
2012-08-03 14:23 ` [RFC v2 3/7] mm,ksm: " Sasha Levin
2012-08-03 14:23 ` [RFC v2 4/7] workqueue: " Sasha Levin
2012-08-03 14:23 ` [RFC v2 5/7] mm/huge_memory: " Sasha Levin
2012-08-03 14:23 ` [RFC v2 6/7] tracepoint: " Sasha Levin
2012-08-05  0:36   ` Steven Rostedt
2012-08-05 16:31     ` Mathieu Desnoyers
2012-08-05 17:03       ` Sasha Levin
2012-08-05 17:12         ` Mathieu Desnoyers
2012-08-03 14:23 ` [RFC v2 7/7] net,9p: " Sasha Levin
2012-08-03 18:00   ` Eric Dumazet
2012-08-03 21:14     ` Sasha Levin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).