linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/5] Implement bitlock map allocator
@ 2016-06-20  4:55 Byungchul Park
  2016-06-20  4:55 ` [PATCH 1/5] lockdep: " Byungchul Park
                   ` (5 more replies)
  0 siblings, 6 replies; 16+ messages in thread
From: Byungchul Park @ 2016-06-20  4:55 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, npiggin, sergey.senozhatsky, gregkh, minchan

Currently, bit-based lock e.g. bit_spin_lock cannot use the lock
correctness validator using lockdep. However, it would be useful if
the validator supports for even bit-based lock as well.

Therefore, this patch provides interface for allocation and freeing
lockdep_map for bit-based lock so that the bit-based lock can also use
the lock correctness validator with the lockdep_map, allocated for each
bit address.

This patch can be applied to any bit_spin_lock user except slab
allocator where I am not sure if using kmalloc is safe. Anyway I chose
two example to apply bitlock map allocator, zram and buffer head.
And applied it and included it in this patch set.

Byungchul Park (5):
  lockdep: Implement bitlock map allocator
  lockdep: Apply bitlock to bit_spin_lock
  lockdep: Apply bit_spin_lock lockdep to zram
  fs/buffer.c: Remove trailing white space
  lockdep: Apply bit_spin_lock lockdep to BH_Uptodate_Lock

 drivers/block/zram/zram_drv.c |  10 +++
 fs/buffer.c                   |  24 +++----
 include/linux/bit_spinlock.h  |  57 ++++++++++++++--
 include/linux/bitlock.h       |  20 ++++++
 kernel/locking/Makefile       |   1 +
 kernel/locking/bitlock_map.c  | 147 ++++++++++++++++++++++++++++++++++++++++++
 lib/Kconfig.debug             |  10 +++
 7 files changed, 252 insertions(+), 17 deletions(-)
 create mode 100644 include/linux/bitlock.h
 create mode 100644 kernel/locking/bitlock_map.c

-- 
1.9.1

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/5] lockdep: Implement bitlock map allocator
  2016-06-20  4:55 [PATCH 0/5] Implement bitlock map allocator Byungchul Park
@ 2016-06-20  4:55 ` Byungchul Park
  2016-06-30 12:59   ` Peter Zijlstra
  2016-06-20  4:55 ` [PATCH 2/5] lockdep: Apply bitlock to bit_spin_lock Byungchul Park
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 16+ messages in thread
From: Byungchul Park @ 2016-06-20  4:55 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, npiggin, sergey.senozhatsky, gregkh, minchan

Currently, bit-based lock e.g. bit_spin_lock cannot use the lock
correctness validator using lockdep. However, it would be useful if
the validator supports for even bit-based lock as well.

Therefore, this patch provides interface for allocation and freeing
lockdep_map for bit-based lock so that the bit-based lock can also use
the lock correctness validator with the lockdep_map, allocated for each
bit address.

Signed-off-by: Byungchul Park <byungchul.park@lge.com>
---
 include/linux/bitlock.h      |  20 ++++++
 kernel/locking/Makefile      |   1 +
 kernel/locking/bitlock_map.c | 147 +++++++++++++++++++++++++++++++++++++++++++
 lib/Kconfig.debug            |  10 +++
 4 files changed, 178 insertions(+)
 create mode 100644 include/linux/bitlock.h
 create mode 100644 kernel/locking/bitlock_map.c

diff --git a/include/linux/bitlock.h b/include/linux/bitlock.h
new file mode 100644
index 0000000..1c8a46f
--- /dev/null
+++ b/include/linux/bitlock.h
@@ -0,0 +1,20 @@
+#ifndef __LINUX_BITLOCK_H
+#define __LINUX_BITLOCK_H
+
+#include <linux/lockdep.h>
+
+struct bitlock_map {
+	struct hlist_node	hash_entry;
+	unsigned long		bitaddr; /* ID */
+	struct lockdep_map	map;
+	int			ref; /* reference count */
+};
+
+#define BIT_ACQUIRE 0 /* Increase bmap reference count */
+#define BIT_RELEASE 1 /* Decrease bmap reference count */
+#define BIT_OTHER   2 /* No touch bmap reference count */
+
+extern struct lockdep_map *bitlock_get_map(int bitnum, unsigned long *addr, int type);
+extern void bitlock_init(int bitnum, unsigned long *addr, const char *name, struct lock_class_key *key);
+extern void bitlock_free(int bitnum, unsigned long *addr);
+#endif /* __LINUX_BITLOCK_H */
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 8e96f6c..8f4aa9e 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -26,3 +26,4 @@ obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
 obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
 obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
 obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
+obj-$(CONFIG_BITLOCK_ALLOC) += bitlock_map.o
diff --git a/kernel/locking/bitlock_map.c b/kernel/locking/bitlock_map.c
new file mode 100644
index 0000000..e2b576f
--- /dev/null
+++ b/kernel/locking/bitlock_map.c
@@ -0,0 +1,147 @@
+/*
+ * kernel/bitlock_map.c
+ *
+ * Lockdep allocator for bit-based lock
+ *
+ * Written by Byungchul Park:
+ *
+ * Thanks to Minchan Kim for coming up with the initial suggestion, that is
+ * to make even a kind of bitlock possible to use the runtime locking
+ * correctness validator.
+ */
+
+#include <linux/bitlock.h>
+#include <linux/hash.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define BITLOCK_HASH_BITS	15U
+#define BITLOCK_HASH_SIZE	(1U << BITLOCK_HASH_BITS)
+#define bitlock_hashentry(key)	(bitlock_hash + hash_long(key, BITLOCK_HASH_BITS))
+
+static struct hlist_head bitlock_hash[BITLOCK_HASH_SIZE];
+
+static DEFINE_SPINLOCK(bitlock_spin);
+
+static inline unsigned long get_bitaddr(int bitnum, unsigned long *addr)
+{
+	return (unsigned long)((char *)addr + bitnum);
+}
+
+/* Caller must hold a lock to protect hlist traversal */
+static struct bitlock_map *look_up_bmap(int bitnum, unsigned long *addr)
+{
+	struct hlist_head *hash_head;
+	struct bitlock_map *bmap;
+	unsigned long bitaddr = get_bitaddr(bitnum, addr);
+
+	hash_head = bitlock_hashentry(bitaddr);
+	hlist_for_each_entry(bmap, hash_head, hash_entry)
+		if (bmap->bitaddr == bitaddr)
+			return bmap;
+
+	return NULL;
+}
+
+static struct bitlock_map *alloc_bmap(void)
+{
+	struct bitlock_map *ret;
+
+	ret = kmalloc(sizeof(struct bitlock_map), GFP_NOWAIT | __GFP_NOWARN);
+	if (!ret)
+		pr_warn("bitlock: Can't kmalloc a bitlock map.\n");
+
+	return ret;
+}
+
+static void free_bmap(struct bitlock_map *bmap)
+{
+	kfree(bmap);
+}
+
+struct lockdep_map *bitlock_get_map(int bitnum, unsigned long *addr, int type)
+{
+	struct bitlock_map *bmap;
+	struct lockdep_map *map = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bitlock_spin, flags);
+
+	bmap = look_up_bmap(bitnum, addr);
+	if (bmap) {
+		/*
+		 * bmap->ref is for checking reliablity.
+		 * One pair e.i. bitlock_acquire and
+		 * bitlock_release should keep bmap->ref
+		 * zero.
+		 */
+		if (type == BIT_ACQUIRE)
+			bmap->ref++;
+		else if (type == BIT_RELEASE)
+			bmap->ref--;
+		map = &bmap->map;
+	}
+
+	spin_unlock_irqrestore(&bitlock_spin, flags);
+
+	return map;
+}
+EXPORT_SYMBOL_GPL(bitlock_get_map);
+
+void bitlock_init(int bitnum, unsigned long *addr, const char *name,
+		struct lock_class_key *key)
+{
+	struct hlist_head *hash_head;
+	struct bitlock_map *bmap;
+	unsigned long flags;
+	unsigned long bitaddr = get_bitaddr(bitnum, addr);
+
+	spin_lock_irqsave(&bitlock_spin, flags);
+
+	bmap = look_up_bmap(bitnum, addr);
+
+	/*
+	 * We cannot call bitlock_init() for one address more
+	 * than once without bitlock_free(). We regard it as
+	 * a bug.
+	 */
+	BUG_ON(bmap);
+	if (!bmap) {
+		bmap = alloc_bmap();
+		if (bmap) {
+			hash_head = bitlock_hashentry(bitaddr);
+			bmap->bitaddr = bitaddr;
+			bmap->ref = 0;
+			lockdep_init_map(&bmap->map, name, key, 0);
+			hlist_add_head(&bmap->hash_entry, hash_head);
+		}
+	}
+
+	spin_unlock_irqrestore(&bitlock_spin, flags);
+}
+EXPORT_SYMBOL_GPL(bitlock_init);
+
+void bitlock_free(int bitnum, unsigned long *addr)
+{
+	struct bitlock_map *bmap;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bitlock_spin, flags);
+
+	bmap = look_up_bmap(bitnum, addr);
+	if (bmap) {
+		/*
+		 * bmap->ref will be increased when acquiring
+		 * while decreased when releasing. Thus it
+		 * normally should be zero when bitlock_free()
+		 * is called. Otherwise, it's a bug.
+		 */
+		BUG_ON(bmap->ref);
+		hlist_del(&bmap->hash_entry);
+		free_bmap(bmap);
+	}
+
+	spin_unlock_irqrestore(&bitlock_spin, flags);
+}
+EXPORT_SYMBOL_GPL(bitlock_free);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8bfd1ac..ca2d2ee 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -993,6 +993,16 @@ config DEBUG_LOCK_ALLOC
 	 spin_lock_init()/mutex_init()/etc., or whether there is any lock
 	 held during task exit.
 
+config BITLOCK_ALLOC
+	bool "Lock debugging: lockdep_map allocator for bitlock"
+	depends on LOCKDEP_SUPPORT
+	select LOCKDEP
+	default n
+	help
+	 This feature makes it possible to allocate lockdep_map for
+	 bit-based lock e.g. bit_spin_lock. lockdep_map instance is
+	 necessary for lock correctness checking to be used.
+
 config PROVE_LOCKING
 	bool "Lock debugging: prove locking correctness"
 	depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/5] lockdep: Apply bitlock to bit_spin_lock
  2016-06-20  4:55 [PATCH 0/5] Implement bitlock map allocator Byungchul Park
  2016-06-20  4:55 ` [PATCH 1/5] lockdep: " Byungchul Park
@ 2016-06-20  4:55 ` Byungchul Park
  2016-06-20  4:55 ` [PATCH 3/5] lockdep: Apply bit_spin_lock lockdep to zram Byungchul Park
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 16+ messages in thread
From: Byungchul Park @ 2016-06-20  4:55 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, npiggin, sergey.senozhatsky, gregkh, minchan

Currently, bit_spin_lock does not use lockdep_map at all. Of course,
the lock correctness validator is not supported for bit_spin_lock.
This patch makes bit_spin_lock possible to use the validator using
CONFIG_BITLOCK_ALLOC.

Signed-off-by: Byungchul Park <byungchul.park@lge.com>
---
 include/linux/bit_spinlock.h | 57 +++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 51 insertions(+), 6 deletions(-)

diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index 3b5bafc..3f8b013 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -6,13 +6,43 @@
 #include <linux/atomic.h>
 #include <linux/bug.h>
 
+#ifdef CONFIG_BITLOCK_ALLOC
+#include <linux/bitlock.h>
+#define bit_spin_init(b, a)			\
+do {						\
+	static struct lock_class_key __key;	\
+	bitlock_init(b, a, #b "@" #a, &__key);	\
+} while (0)
+
+static inline void bit_spin_free(int bitnum, unsigned long *addr)
+{
+	bitlock_free(bitnum, addr);
+}
+
+static inline void bit_spin_acquire(int bitnum, unsigned long *addr, int try)
+{
+	struct lockdep_map *map = bitlock_get_map(bitnum, addr, BIT_ACQUIRE);
+	if (map)
+		spin_acquire(map, 0, try, _RET_IP_);
+}
+
+static inline void bit_spin_release(int bitnum, unsigned long *addr)
+{
+	struct lockdep_map *map = bitlock_get_map(bitnum, addr, BIT_RELEASE);
+	if (map)
+		spin_release(map, 0, _RET_IP_);
+}
+#else
+static inline void bit_spin_init(int bitnum, unsigned long *addr) {}
+static inline void bit_spin_free(int bitnum, unsigned long *addr) {}
+static inline void bit_spin_acquire(int bitnum, unsigned long *addr, int try) {}
+static inline void bit_spin_release(int bitnum, unsigned long *addr) {}
+#endif
+
 /*
- *  bit-based spin_lock()
- *
- * Don't use this unless you really need to: spin_lock() and spin_unlock()
- * are significantly faster.
+ * bit-based spin_lock() without lock acquiring
  */
-static inline void bit_spin_lock(int bitnum, unsigned long *addr)
+static inline void do_raw_bit_spin_lock(int bitnum, unsigned long *addr)
 {
 	/*
 	 * Assuming the lock is uncontended, this never enters
@@ -21,7 +51,6 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr)
 	 * busywait with less bus contention for a good time to
 	 * attempt to acquire the lock bit.
 	 */
-	preempt_disable();
 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 	while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
 		preempt_enable();
@@ -35,6 +64,19 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr)
 }
 
 /*
+ *  bit-based spin_lock()
+ *
+ * Don't use this unless you really need to: spin_lock() and spin_unlock()
+ * are significantly faster.
+ */
+static inline void bit_spin_lock(int bitnum, unsigned long *addr)
+{
+	preempt_disable();
+	bit_spin_acquire(bitnum, addr, 0);
+	do_raw_bit_spin_lock(bitnum, addr);
+}
+
+/*
  * Return true if it was acquired
  */
 static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
@@ -46,6 +88,7 @@ static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
 		return 0;
 	}
 #endif
+	bit_spin_acquire(bitnum, addr, 1);
 	__acquire(bitlock);
 	return 1;
 }
@@ -55,6 +98,7 @@ static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
  */
 static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
 {
+	bit_spin_release(bitnum, addr);
 #ifdef CONFIG_DEBUG_SPINLOCK
 	BUG_ON(!test_bit(bitnum, addr));
 #endif
@@ -72,6 +116,7 @@ static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
  */
 static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
 {
+	bit_spin_release(bitnum, addr);
 #ifdef CONFIG_DEBUG_SPINLOCK
 	BUG_ON(!test_bit(bitnum, addr));
 #endif
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 3/5] lockdep: Apply bit_spin_lock lockdep to zram
  2016-06-20  4:55 [PATCH 0/5] Implement bitlock map allocator Byungchul Park
  2016-06-20  4:55 ` [PATCH 1/5] lockdep: " Byungchul Park
  2016-06-20  4:55 ` [PATCH 2/5] lockdep: Apply bitlock to bit_spin_lock Byungchul Park
@ 2016-06-20  4:55 ` Byungchul Park
  2016-06-20 15:36   ` Sergey Senozhatsky
  2016-06-20  4:55 ` [PATCH 4/5] fs/buffer.c: Remove trailing white space Byungchul Park
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 16+ messages in thread
From: Byungchul Park @ 2016-06-20  4:55 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, npiggin, sergey.senozhatsky, gregkh, minchan

In order to use lockdep-enabled bit_spin_lock, we have to call
bit_spin_init() when a instance including the bit used as lock
creates, and bit_spin_free() when the instance including the bit
destroys.

The zram is one of bit_spin_lock users. And this patch adds
bit_spin_init() and bit_spin_free() properly to apply the lock
correctness validator to bit_spin_lock the rzam is using.

Signed-off-by: Byungchul Park <byungchul.park@lge.com>
---
 drivers/block/zram/zram_drv.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 370c2f7..2bc3bde 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -495,6 +495,11 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
 	}
 
 	zs_destroy_pool(meta->mem_pool);
+
+	for (index = 0; index < num_pages; index++) {
+		bit_spin_free(ZRAM_ACCESS, &meta->table[index].value);
+	}
+
 	vfree(meta->table);
 	kfree(meta);
 }
@@ -503,6 +508,7 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
 {
 	size_t num_pages;
 	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
+	int index;
 
 	if (!meta)
 		return NULL;
@@ -520,6 +526,10 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
 		goto out_error;
 	}
 
+	for (index = 0; index < num_pages; index++) {
+		bit_spin_init(ZRAM_ACCESS, &meta->table[index].value);
+	}
+
 	return meta;
 
 out_error:
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 4/5] fs/buffer.c: Remove trailing white space
  2016-06-20  4:55 [PATCH 0/5] Implement bitlock map allocator Byungchul Park
                   ` (2 preceding siblings ...)
  2016-06-20  4:55 ` [PATCH 3/5] lockdep: Apply bit_spin_lock lockdep to zram Byungchul Park
@ 2016-06-20  4:55 ` Byungchul Park
  2016-06-20  4:55 ` [PATCH 5/5] lockdep: Apply bit_spin_lock lockdep to BH_Uptodate_Lock Byungchul Park
  2016-06-29 12:45 ` [PATCH 0/5] Implement bitlock map allocator Byungchul Park
  5 siblings, 0 replies; 16+ messages in thread
From: Byungchul Park @ 2016-06-20  4:55 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, npiggin, sergey.senozhatsky, gregkh, minchan

Trailing white space is not accepted in kernel coding style. Remove
them.

Signed-off-by: Byungchul Park <byungchul.park@lge.com>
---
 fs/buffer.c | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/fs/buffer.c b/fs/buffer.c
index e1632ab..a75ca74 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -439,7 +439,7 @@ EXPORT_SYMBOL(mark_buffer_async_write);
  * try_to_free_buffers() will be operating against the *blockdev* mapping
  * at the time, not against the S_ISREG file which depends on those buffers.
  * So the locking for private_list is via the private_lock in the address_space
- * which backs the buffers.  Which is different from the address_space 
+ * which backs the buffers.  Which is different from the address_space
  * against which the buffers are listed.  So for a particular address_space,
  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
  * mapping->private_list will always be protected by the backing blockdev's
@@ -713,7 +713,7 @@ EXPORT_SYMBOL(__set_page_dirty_buffers);
  * Do this in two main stages: first we copy dirty buffers to a
  * temporary inode list, queueing the writes as we go.  Then we clean
  * up, waiting for those writes to complete.
- * 
+ *
  * During this second stage, any subsequent updates to the file may end
  * up refiling the buffer on the original inode's dirty list again, so
  * there is a chance we will end up with a buffer queued for write but
@@ -791,7 +791,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 		brelse(bh);
 		spin_lock(lock);
 	}
-	
+
 	spin_unlock(lock);
 	err2 = osync_buffers_list(lock, list);
 	if (err)
@@ -901,7 +901,7 @@ no_grow:
 	/*
 	 * Return failure for non-async IO requests.  Async IO requests
 	 * are not allowed to fail, so we have to wait until buffer heads
-	 * become available.  But we don't want tasks sleeping with 
+	 * become available.  But we don't want tasks sleeping with
 	 * partially complete buffers, so all were released above.
 	 */
 	if (!retry)
@@ -910,7 +910,7 @@ no_grow:
 	/* We're _really_ low on memory. Now we just
 	 * wait for old buffer heads to become free due to
 	 * finishing IO.  Since this is an async request and
-	 * the reserve list is empty, we're sure there are 
+	 * the reserve list is empty, we're sure there are
 	 * async buffer heads in use.
 	 */
 	free_more_memory();
@@ -946,7 +946,7 @@ static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
 
 /*
  * Initialise the state of a blockdev page's buffers.
- */ 
+ */
 static sector_t
 init_page_buffers(struct page *page, struct block_device *bdev,
 			sector_t block, int size)
@@ -1448,7 +1448,7 @@ static bool has_bh_in_lru(int cpu, void *dummy)
 {
 	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
 	int i;
-	
+
 	for (i = 0; i < BH_LRU_SIZE; i++) {
 		if (b->bhs[i])
 			return 1;
@@ -1952,7 +1952,7 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
 		if (PageUptodate(page)) {
 			if (!buffer_uptodate(bh))
 				set_buffer_uptodate(bh);
-			continue; 
+			continue;
 		}
 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
 		    !buffer_unwritten(bh) &&
@@ -2258,7 +2258,7 @@ EXPORT_SYMBOL(block_read_full_page);
 
 /* utility function for filesystems that need to do work on expanding
  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
- * deal with the hole.  
+ * deal with the hole.
  */
 int generic_cont_expand_simple(struct inode *inode, loff_t size)
 {
@@ -2819,7 +2819,7 @@ int block_truncate_page(struct address_space *mapping,
 
 	length = blocksize - length;
 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-	
+
 	page = grab_cache_page(mapping, index);
 	err = -ENOMEM;
 	if (!page)
@@ -3069,7 +3069,7 @@ EXPORT_SYMBOL(submit_bh);
  *
  * ll_rw_block sets b_end_io to simple completion handler that marks
  * the buffer up-to-date (if appropriate), unlocks the buffer and wakes
- * any waiters. 
+ * any waiters.
  *
  * All of the buffers must be for the same device, and must also be a
  * multiple of the current approved size for the device.
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 5/5] lockdep: Apply bit_spin_lock lockdep to BH_Uptodate_Lock
  2016-06-20  4:55 [PATCH 0/5] Implement bitlock map allocator Byungchul Park
                   ` (3 preceding siblings ...)
  2016-06-20  4:55 ` [PATCH 4/5] fs/buffer.c: Remove trailing white space Byungchul Park
@ 2016-06-20  4:55 ` Byungchul Park
  2016-06-29 12:45 ` [PATCH 0/5] Implement bitlock map allocator Byungchul Park
  5 siblings, 0 replies; 16+ messages in thread
From: Byungchul Park @ 2016-06-20  4:55 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, npiggin, sergey.senozhatsky, gregkh, minchan

In order to use lockdep-enabled bit_spin_lock, we have to call
bit_spin_init() when a instance including the bit used as lock
creates, and bit_spin_free() when the instance including the bit
destroys.

BH_Uptodate_Lock bit of buffer head's b_state is one of
bit_spin_lock users. And this patch adds bit_spin_init() and
bit_spin_free() properly to apply the lock correctness validator.

Signed-off-by: Byungchul Park <byungchul.park@lge.com>
---
 fs/buffer.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/fs/buffer.c b/fs/buffer.c
index a75ca74..65c0b8a 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3317,6 +3317,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 	if (ret) {
 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
 		preempt_disable();
+		bit_spin_init(BH_Uptodate_Lock, &ret->b_state);
 		__this_cpu_inc(bh_accounting.nr);
 		recalc_bh_state();
 		preempt_enable();
@@ -3328,6 +3329,7 @@ EXPORT_SYMBOL(alloc_buffer_head);
 void free_buffer_head(struct buffer_head *bh)
 {
 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
+	bit_spin_free(BH_Uptodate_Lock, &bh->b_state);
 	kmem_cache_free(bh_cachep, bh);
 	preempt_disable();
 	__this_cpu_dec(bh_accounting.nr);
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/5] lockdep: Apply bit_spin_lock lockdep to zram
  2016-06-20  4:55 ` [PATCH 3/5] lockdep: Apply bit_spin_lock lockdep to zram Byungchul Park
@ 2016-06-20 15:36   ` Sergey Senozhatsky
  2016-06-21  1:05     ` Byungchul Park
  0 siblings, 1 reply; 16+ messages in thread
From: Sergey Senozhatsky @ 2016-06-20 15:36 UTC (permalink / raw)
  To: Byungchul Park
  Cc: peterz, mingo, linux-kernel, npiggin, sergey.senozhatsky, gregkh,
	minchan, sergey.senozhatsky.work

On (06/20/16 13:55), Byungchul Park wrote:
> In order to use lockdep-enabled bit_spin_lock, we have to call
> bit_spin_init() when a instance including the bit used as lock
> creates, and bit_spin_free() when the instance including the bit
> destroys.
> 
> The zram is one of bit_spin_lock users. And this patch adds
> bit_spin_init() and bit_spin_free() properly to apply the lock
> correctness validator to bit_spin_lock the rzam is using.

Hello,

just for information

there was a proposal from -rt people to use normal spinlocks
for table's entries, rather that bitspinlock. I had a patch
some time ago, will obtain some performance data and post
RFC [may be tomorrow].

	-ss

> Signed-off-by: Byungchul Park <byungchul.park@lge.com>
> ---
>  drivers/block/zram/zram_drv.c | 10 ++++++++++
>  1 file changed, 10 insertions(+)
> 
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 370c2f7..2bc3bde 100644
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -495,6 +495,11 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
>  	}
>  
>  	zs_destroy_pool(meta->mem_pool);
> +
> +	for (index = 0; index < num_pages; index++) {
> +		bit_spin_free(ZRAM_ACCESS, &meta->table[index].value);
> +	}
> +
>  	vfree(meta->table);
>  	kfree(meta);
>  }
> @@ -503,6 +508,7 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
>  {
>  	size_t num_pages;
>  	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
> +	int index;
>  
>  	if (!meta)
>  		return NULL;
> @@ -520,6 +526,10 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
>  		goto out_error;
>  	}
>  
> +	for (index = 0; index < num_pages; index++) {
> +		bit_spin_init(ZRAM_ACCESS, &meta->table[index].value);
> +	}
> +
>  	return meta;
>  
>  out_error:
> -- 
> 1.9.1
> 

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/5] lockdep: Apply bit_spin_lock lockdep to zram
  2016-06-20 15:36   ` Sergey Senozhatsky
@ 2016-06-21  1:05     ` Byungchul Park
  0 siblings, 0 replies; 16+ messages in thread
From: Byungchul Park @ 2016-06-21  1:05 UTC (permalink / raw)
  To: Sergey Senozhatsky
  Cc: peterz, mingo, linux-kernel, npiggin, gregkh, minchan,
	sergey.senozhatsky.work

On Tue, Jun 21, 2016 at 12:36:19AM +0900, Sergey Senozhatsky wrote:
> 
> Hello,
> 
> just for information

Thank you for informing it.

> 
> there was a proposal from -rt people to use normal spinlocks
> for table's entries, rather that bitspinlock. I had a patch
> some time ago, will obtain some performance data and post
> RFC [may be tomorrow].
> 
> 	-ss
> 

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 0/5] Implement bitlock map allocator
  2016-06-20  4:55 [PATCH 0/5] Implement bitlock map allocator Byungchul Park
                   ` (4 preceding siblings ...)
  2016-06-20  4:55 ` [PATCH 5/5] lockdep: Apply bit_spin_lock lockdep to BH_Uptodate_Lock Byungchul Park
@ 2016-06-29 12:45 ` Byungchul Park
  5 siblings, 0 replies; 16+ messages in thread
From: Byungchul Park @ 2016-06-29 12:45 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, npiggin, sergey.senozhatsky, gregkh, minchan

On Mon, Jun 20, 2016 at 01:55:10PM +0900, Byungchul Park wrote:
> Currently, bit-based lock e.g. bit_spin_lock cannot use the lock
> correctness validator using lockdep. However, it would be useful if
> the validator supports for even bit-based lock as well.
> 
> Therefore, this patch provides interface for allocation and freeing
> lockdep_map for bit-based lock so that the bit-based lock can also use
> the lock correctness validator with the lockdep_map, allocated for each
> bit address.
> 
> This patch can be applied to any bit_spin_lock user except slab
> allocator where I am not sure if using kmalloc is safe. Anyway I chose
> two example to apply bitlock map allocator, zram and buffer head.
> And applied it and included it in this patch set.

What do you think this proposal to make bit-based lock can use lockdep?

> 
> Byungchul Park (5):
>   lockdep: Implement bitlock map allocator
>   lockdep: Apply bitlock to bit_spin_lock
>   lockdep: Apply bit_spin_lock lockdep to zram
>   fs/buffer.c: Remove trailing white space
>   lockdep: Apply bit_spin_lock lockdep to BH_Uptodate_Lock
> 
>  drivers/block/zram/zram_drv.c |  10 +++
>  fs/buffer.c                   |  24 +++----
>  include/linux/bit_spinlock.h  |  57 ++++++++++++++--
>  include/linux/bitlock.h       |  20 ++++++
>  kernel/locking/Makefile       |   1 +
>  kernel/locking/bitlock_map.c  | 147 ++++++++++++++++++++++++++++++++++++++++++
>  lib/Kconfig.debug             |  10 +++
>  7 files changed, 252 insertions(+), 17 deletions(-)
>  create mode 100644 include/linux/bitlock.h
>  create mode 100644 kernel/locking/bitlock_map.c
> 
> -- 
> 1.9.1

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/5] lockdep: Implement bitlock map allocator
  2016-06-20  4:55 ` [PATCH 1/5] lockdep: " Byungchul Park
@ 2016-06-30 12:59   ` Peter Zijlstra
  2016-07-01  0:24     ` Byungchul Park
  0 siblings, 1 reply; 16+ messages in thread
From: Peter Zijlstra @ 2016-06-30 12:59 UTC (permalink / raw)
  To: Byungchul Park
  Cc: mingo, linux-kernel, npiggin, sergey.senozhatsky, gregkh, minchan

On Mon, Jun 20, 2016 at 01:55:11PM +0900, Byungchul Park wrote:

> +struct bitlock_map {
> +	struct hlist_node	hash_entry;
> +	unsigned long		bitaddr; /* ID */
> +	struct lockdep_map	map;
> +	int			ref; /* reference count */
> +};

So this is effectively bigger than just adding a struct lockdep_map into
whatever structure holds the bit spinlock to begin with.

What is the gain?


> +static inline unsigned long get_bitaddr(int bitnum, unsigned long *addr)
> +{
> +	return (unsigned long)((char *)addr + bitnum);
> +}

And given you keep these lockdep_map thingies out-of-line, the original
structure remains dense and thus the above munging can easily result in
collisions.

Now, I suppose its rather unlikely, but given its entirely silent if it
happens, this is bad.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/5] lockdep: Implement bitlock map allocator
  2016-06-30 12:59   ` Peter Zijlstra
@ 2016-07-01  0:24     ` Byungchul Park
  2016-07-01  7:53       ` Peter Zijlstra
  0 siblings, 1 reply; 16+ messages in thread
From: Byungchul Park @ 2016-07-01  0:24 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: mingo, linux-kernel, npiggin, sergey.senozhatsky, gregkh, minchan

On Thu, Jun 30, 2016 at 02:59:19PM +0200, Peter Zijlstra wrote:
> On Mon, Jun 20, 2016 at 01:55:11PM +0900, Byungchul Park wrote:
> 
> > +struct bitlock_map {
> > +	struct hlist_node	hash_entry;
> > +	unsigned long		bitaddr; /* ID */
> > +	struct lockdep_map	map;
> > +	int			ref; /* reference count */
> > +};
> 
> So this is effectively bigger than just adding a struct lockdep_map into
> whatever structure holds the bit spinlock to begin with.
> 
> What is the gain?

1. I don't want to make being aware of lockdep essential to user of
   bit-base lock, like spin lock, mutex, semaphore ans so on. In other
   words, I want to make it work transparently.

2. Bit-base lock can be used with any data type which can be seperately,
   not within a structure. I mean sometimes it can be ugly to pack the
   lock bit and lockdep_map instance explicitly.

3. I think this is more general approach because _any_ random bit in 
   memory can be used as a lock. Do we need to restrict where the bit
   is so that we can place lockdep_map explicitly around the bit?

> 
> 
> > +static inline unsigned long get_bitaddr(int bitnum, unsigned long *addr)
> > +{
> > +	return (unsigned long)((char *)addr + bitnum);
> > +}
> 
> And given you keep these lockdep_map thingies out-of-line, the original
> structure remains dense and thus the above munging can easily result in
> collisions.

I am sorry. I don't understand what you said exactly. IIUC, of course
it would be safer if lockdep_map is included in a structure statically.
However, as you know, sometimes dynamical allocating and connecting can
be more useful and worth under careful implementation. CONFIG_LOCKDEP
is even a debug feature where IMHO it's more important to implement
new value than to keep it very much safest, even though of course we
have to keep it as safe as possible.

> 
> Now, I suppose its rather unlikely, but given its entirely silent if it
> happens, this is bad.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/5] lockdep: Implement bitlock map allocator
  2016-07-01  0:24     ` Byungchul Park
@ 2016-07-01  7:53       ` Peter Zijlstra
  2016-07-04  7:29         ` Byungchul Park
  0 siblings, 1 reply; 16+ messages in thread
From: Peter Zijlstra @ 2016-07-01  7:53 UTC (permalink / raw)
  To: Byungchul Park
  Cc: mingo, linux-kernel, npiggin, sergey.senozhatsky, gregkh, minchan

On Fri, Jul 01, 2016 at 09:24:44AM +0900, Byungchul Park wrote:
> On Thu, Jun 30, 2016 at 02:59:19PM +0200, Peter Zijlstra wrote:
> > On Mon, Jun 20, 2016 at 01:55:11PM +0900, Byungchul Park wrote:
> > 
> > > +struct bitlock_map {
> > > +	struct hlist_node	hash_entry;
> > > +	unsigned long		bitaddr; /* ID */
> > > +	struct lockdep_map	map;
> > > +	int			ref; /* reference count */
> > > +};
> > 
> > So this is effectively bigger than just adding a struct lockdep_map into
> > whatever structure holds the bit spinlock to begin with.
> > 
> > What is the gain?
> 
> 1. I don't want to make being aware of lockdep essential to user of
>    bit-base lock, like spin lock, mutex, semaphore ans so on. In other
>    words, I want to make it work transparently.

I want to discourage the use of bitlocks, they stink.

bitlocks must by their constraint be a test-and-set lock, with all the
known problems those have. It also means they're a royal pain for -rt.

Yes, there are a number of places we use them, but people should think
very carefully before they use them and consider all these issues. But
the problem seems to be that people aren't even aware there's problems.

> 2. Bit-base lock can be used with any data type which can be seperately,
>    not within a structure. I mean sometimes it can be ugly to pack the
>    lock bit and lockdep_map instance explicitly.

Yuck, people do this?

> 3. I think this is more general approach because _any_ random bit in 
>    memory can be used as a lock. Do we need to restrict where the bit
>    is so that we can place lockdep_map explicitly around the bit?

Again, yuck!

In any case, that would have made great Changelog material.

> > > +static inline unsigned long get_bitaddr(int bitnum, unsigned long *addr)
> > > +{
> > > +	return (unsigned long)((char *)addr + bitnum);
> > > +}
> > 
> > And given you keep these lockdep_map thingies out-of-line, the original
> > structure remains dense and thus the above munging can easily result in
> > collisions.
> 
> I am sorry. I don't understand what you said exactly. 


#define FOO_FLAG_LOCK	8

struct foo {
	struct hlist_bl_head head;
	unsigned long flags;
};

struct foo bar[];


That structure has 2 bitlocks in, one at:

  0 bytes + 0 bits
  8 bytes + 8 bits

In this case:

  get_bitaddr(8, &bar[0].flags) == get_bitaddr(0, &bar[1].head)

Which is a collision and fail, because they're two different lock
classes.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/5] lockdep: Implement bitlock map allocator
  2016-07-01  7:53       ` Peter Zijlstra
@ 2016-07-04  7:29         ` Byungchul Park
  2016-07-07 10:22           ` Byungchul Park
  2016-07-13 20:17           ` Peter Zijlstra
  0 siblings, 2 replies; 16+ messages in thread
From: Byungchul Park @ 2016-07-04  7:29 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: mingo, linux-kernel, npiggin, sergey.senozhatsky, gregkh, minchan

On Fri, Jul 01, 2016 at 09:53:12AM +0200, Peter Zijlstra wrote:
> On Fri, Jul 01, 2016 at 09:24:44AM +0900, Byungchul Park wrote:
> > On Thu, Jun 30, 2016 at 02:59:19PM +0200, Peter Zijlstra wrote:
> > > On Mon, Jun 20, 2016 at 01:55:11PM +0900, Byungchul Park wrote:
> > > 
> > > > +struct bitlock_map {
> > > > +	struct hlist_node	hash_entry;
> > > > +	unsigned long		bitaddr; /* ID */
> > > > +	struct lockdep_map	map;
> > > > +	int			ref; /* reference count */
> > > > +};
> > > 
> > > So this is effectively bigger than just adding a struct lockdep_map into
> > > whatever structure holds the bit spinlock to begin with.
> > > 
> > > What is the gain?
> > 
> > 1. I don't want to make being aware of lockdep essential to user of
> >    bit-base lock, like spin lock, mutex, semaphore ans so on. In other
> >    words, I want to make it work transparently.
> 
> I want to discourage the use of bitlocks, they stink.

I agree it has some problems. But someone who are sensive to memory
consumption still need to use bit-based lock. Right?

I can stop this proposal because it's meaningless if bit-based lock can be
removed entirely since any requirement for bit-based lock does not exist
at all. But IMHO, it's worthy if the requirement be.

> bitlocks must by their constraint be a test-and-set lock, with all the
> known problems those have. It also means they're a royal pain for -rt.

I also think it's better to use rather spinlock in most cases unless memory
consumption is critical problem. But in the case memory consumption is
critical... what can we do?

> Yes, there are a number of places we use them, but people should think
> very carefully before they use them and consider all these issues. But
> the problem seems to be that people aren't even aware there's problems.
> 
> > 2. Bit-base lock can be used with any data type which can be seperately,
> >    not within a structure. I mean sometimes it can be ugly to pack the
> >    lock bit and lockdep_map instance explicitly.
> 
> Yuck, people do this?
> 
> > 3. I think this is more general approach because _any_ random bit in 
> >    memory can be used as a lock. Do we need to restrict where the bit
> >    is so that we can place lockdep_map explicitly around the bit?
> 
> Again, yuck!

You mean we should never provide lockdep checking mechanism tranparently,
but the user of bit-based lock must add lockdep_map manually, case by
case. Right? Do I understand correctly? If so, I wonder why?

> > > > +static inline unsigned long get_bitaddr(int bitnum, unsigned long *addr)
> > > > +{
> > > > +	return (unsigned long)((char *)addr + bitnum);
> > > > +}
> > > 
> > > And given you keep these lockdep_map thingies out-of-line, the original
> > > structure remains dense and thus the above munging can easily result in
> > > collisions.
> > 
> > I am sorry. I don't understand what you said exactly. 
> 
> 
> #define FOO_FLAG_LOCK	8
> 
> struct foo {
> 	struct hlist_bl_head head;
> 	unsigned long flags;
> };
> 
> struct foo bar[];
> 
> 
> That structure has 2 bitlocks in, one at:
> 
>   0 bytes + 0 bits
>   8 bytes + 8 bits
> 
> In this case:
> 
>   get_bitaddr(8, &bar[0].flags) == get_bitaddr(0, &bar[1].head)
> 
> Which is a collision and fail, because they're two different lock
> classes.

OOPS! What a fool I was. That's my mistake. I can fix it. Sorry.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/5] lockdep: Implement bitlock map allocator
  2016-07-04  7:29         ` Byungchul Park
@ 2016-07-07 10:22           ` Byungchul Park
  2016-07-13 20:17           ` Peter Zijlstra
  1 sibling, 0 replies; 16+ messages in thread
From: Byungchul Park @ 2016-07-07 10:22 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: mingo, linux-kernel, npiggin, sergey.senozhatsky, gregkh, minchan

On Mon, Jul 04, 2016 at 04:29:25PM +0900, Byungchul Park wrote:
> > > 3. I think this is more general approach because _any_ random bit in 
> > >    memory can be used as a lock. Do we need to restrict where the bit
> > >    is so that we can place lockdep_map explicitly around the bit?
> > 
> > Again, yuck!
> 
> You mean we should never provide lockdep checking mechanism tranparently,
> but the user of bit-based lock must add lockdep_map manually, case by
> case. Right? Do I understand correctly? If so, I wonder why?

I will stop it if it cannot provide any valuable things even I wonder.
I seriously asked it since I wonder it. What do you think about my
question? Is there something I missed?

Or can I proceed it after fixing my bug you pointed?

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/5] lockdep: Implement bitlock map allocator
  2016-07-04  7:29         ` Byungchul Park
  2016-07-07 10:22           ` Byungchul Park
@ 2016-07-13 20:17           ` Peter Zijlstra
  2016-07-18  1:46             ` Byungchul Park
  1 sibling, 1 reply; 16+ messages in thread
From: Peter Zijlstra @ 2016-07-13 20:17 UTC (permalink / raw)
  To: Byungchul Park
  Cc: mingo, linux-kernel, sergey.senozhatsky, gregkh, minchan,
	Thomas Gleixner

On Mon, Jul 04, 2016 at 04:29:25PM +0900, Byungchul Park wrote:
> On Fri, Jul 01, 2016 at 09:53:12AM +0200, Peter Zijlstra wrote:
> > On Fri, Jul 01, 2016 at 09:24:44AM +0900, Byungchul Park wrote:
> > > On Thu, Jun 30, 2016 at 02:59:19PM +0200, Peter Zijlstra wrote:
> > > > On Mon, Jun 20, 2016 at 01:55:11PM +0900, Byungchul Park wrote:
> > > > 
> > > > > +struct bitlock_map {
> > > > > +	struct hlist_node	hash_entry;
> > > > > +	unsigned long		bitaddr; /* ID */
> > > > > +	struct lockdep_map	map;
> > > > > +	int			ref; /* reference count */
> > > > > +};
> > > > 
> > > > So this is effectively bigger than just adding a struct lockdep_map into
> > > > whatever structure holds the bit spinlock to begin with.
> > > > 
> > > > What is the gain?
> > > 
> > > 1. I don't want to make being aware of lockdep essential to user of
> > >    bit-base lock, like spin lock, mutex, semaphore ans so on. In other
> > >    words, I want to make it work transparently.
> > 
> > I want to discourage the use of bitlocks, they stink.
> 
> I agree it has some problems. But someone who are sensive to memory
> consumption still need to use bit-based lock. Right?
> 
> I can stop this proposal because it's meaningless if bit-based lock can be
> removed entirely since any requirement for bit-based lock does not exist
> at all. But IMHO, it's worthy if the requirement be.
> 
> > bitlocks must by their constraint be a test-and-set lock, with all the
> > known problems those have. It also means they're a royal pain for -rt.
> 
> I also think it's better to use rather spinlock in most cases unless memory
> consumption is critical problem. But in the case memory consumption is
> critical... what can we do?

So RT is already patching a whole bunch of bit-spinlocks into proper
spinlocks, I would much rather we do that and get lockdep coverage that
way.

That is, have the bit-spinlock for 'normal' kernels and use the proper
spinlock for LOCKDEP || PREEMPT_RT kernels.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/5] lockdep: Implement bitlock map allocator
  2016-07-13 20:17           ` Peter Zijlstra
@ 2016-07-18  1:46             ` Byungchul Park
  0 siblings, 0 replies; 16+ messages in thread
From: Byungchul Park @ 2016-07-18  1:46 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: mingo, linux-kernel, sergey.senozhatsky, gregkh, minchan,
	Thomas Gleixner

On Wed, Jul 13, 2016 at 10:17:54PM +0200, Peter Zijlstra wrote:
> On Mon, Jul 04, 2016 at 04:29:25PM +0900, Byungchul Park wrote:
> > On Fri, Jul 01, 2016 at 09:53:12AM +0200, Peter Zijlstra wrote:
> > > On Fri, Jul 01, 2016 at 09:24:44AM +0900, Byungchul Park wrote:
> > > > On Thu, Jun 30, 2016 at 02:59:19PM +0200, Peter Zijlstra wrote:
> > > > > On Mon, Jun 20, 2016 at 01:55:11PM +0900, Byungchul Park wrote:
> > > > > 
> > > > > > +struct bitlock_map {
> > > > > > +	struct hlist_node	hash_entry;
> > > > > > +	unsigned long		bitaddr; /* ID */
> > > > > > +	struct lockdep_map	map;
> > > > > > +	int			ref; /* reference count */
> > > > > > +};
> > > > > 
> > > > > So this is effectively bigger than just adding a struct lockdep_map into
> > > > > whatever structure holds the bit spinlock to begin with.
> > > > > 
> > > > > What is the gain?
> > > > 
> > > > 1. I don't want to make being aware of lockdep essential to user of
> > > >    bit-base lock, like spin lock, mutex, semaphore ans so on. In other
> > > >    words, I want to make it work transparently.
> > > 
> > > I want to discourage the use of bitlocks, they stink.
> > 
> > I agree it has some problems. But someone who are sensive to memory
> > consumption still need to use bit-based lock. Right?
> > 
> > I can stop this proposal because it's meaningless if bit-based lock can be
> > removed entirely since any requirement for bit-based lock does not exist
> > at all. But IMHO, it's worthy if the requirement be.
> > 
> > > bitlocks must by their constraint be a test-and-set lock, with all the
> > > known problems those have. It also means they're a royal pain for -rt.
> > 
> > I also think it's better to use rather spinlock in most cases unless memory
> > consumption is critical problem. But in the case memory consumption is
> > critical... what can we do?
> 
> So RT is already patching a whole bunch of bit-spinlocks into proper
> spinlocks, I would much rather we do that and get lockdep coverage that
> way.
> 
> That is, have the bit-spinlock for 'normal' kernels and use the proper
> spinlock for LOCKDEP || PREEMPT_RT kernels.

Yes. It makes sense to me.

Thank you for answering it.

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2016-07-18  1:48 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-06-20  4:55 [PATCH 0/5] Implement bitlock map allocator Byungchul Park
2016-06-20  4:55 ` [PATCH 1/5] lockdep: " Byungchul Park
2016-06-30 12:59   ` Peter Zijlstra
2016-07-01  0:24     ` Byungchul Park
2016-07-01  7:53       ` Peter Zijlstra
2016-07-04  7:29         ` Byungchul Park
2016-07-07 10:22           ` Byungchul Park
2016-07-13 20:17           ` Peter Zijlstra
2016-07-18  1:46             ` Byungchul Park
2016-06-20  4:55 ` [PATCH 2/5] lockdep: Apply bitlock to bit_spin_lock Byungchul Park
2016-06-20  4:55 ` [PATCH 3/5] lockdep: Apply bit_spin_lock lockdep to zram Byungchul Park
2016-06-20 15:36   ` Sergey Senozhatsky
2016-06-21  1:05     ` Byungchul Park
2016-06-20  4:55 ` [PATCH 4/5] fs/buffer.c: Remove trailing white space Byungchul Park
2016-06-20  4:55 ` [PATCH 5/5] lockdep: Apply bit_spin_lock lockdep to BH_Uptodate_Lock Byungchul Park
2016-06-29 12:45 ` [PATCH 0/5] Implement bitlock map allocator Byungchul Park

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).