From: Jan Kara <jack@suse.cz> To: LKML <linux-kernel@vger.kernel.org> Cc: linux-fsdevel@vger.kernel.org, linux-mm@kvack.org, Jan Kara <jack@suse.cz> Subject: [PATCH 1/6] lib: Implement range locks Date: Thu, 31 Jan 2013 22:49:49 +0100 [thread overview] Message-ID: <1359668994-13433-2-git-send-email-jack@suse.cz> (raw) In-Reply-To: <1359668994-13433-1-git-send-email-jack@suse.cz> Implement range locking using interval tree. Signed-off-by: Jan Kara <jack@suse.cz> --- include/linux/range_lock.h | 51 ++++++++++++++++++++++++++++ lib/Makefile | 4 +- lib/range_lock.c | 78 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 131 insertions(+), 2 deletions(-) create mode 100644 include/linux/range_lock.h create mode 100644 lib/range_lock.c diff --git a/include/linux/range_lock.h b/include/linux/range_lock.h new file mode 100644 index 0000000..fe258a5 --- /dev/null +++ b/include/linux/range_lock.h @@ -0,0 +1,51 @@ +/* + * Range locking + * + * We allow exclusive locking of arbitrary ranges. We guarantee that each + * range is locked only after all conflicting range locks requested previously + * have been unlocked. Thus we achieve fairness and avoid livelocks. + * + * The cost of lock and unlock of a range is O(log(R_all)+R_int) where R_all is + * total number of ranges and R_int is the number of ranges intersecting the + * operated range. + */ +#ifndef _LINUX_RANGE_LOCK_H +#define _LINUX_RANGE_LOCK_H + +#include <linux/rbtree.h> +#include <linux/interval_tree.h> +#include <linux/list.h> +#include <linux/spinlock.h> + + +struct task_struct; + +struct range_lock { + struct interval_tree_node node; + struct task_struct *task; + /* Number of ranges which are blocking acquisition of the lock */ + unsigned int blocking_ranges; +}; + +struct range_lock_tree { + struct rb_root root; + spinlock_t lock; +}; + +#define RANGE_LOCK_INITIALIZER(start, end) {\ + .node = {\ + .start = (start),\ + .end = (end)\ + }\ +} + +static inline void range_lock_tree_init(struct range_lock_tree *tree) +{ + tree->root = RB_ROOT; + spin_lock_init(&tree->lock); +} +void range_lock_init(struct range_lock *lock, unsigned long start, + unsigned long end); +void range_lock(struct range_lock_tree *tree, struct range_lock *lock); +void range_unlock(struct range_lock_tree *tree, struct range_lock *lock); +#endif diff --git a/lib/Makefile b/lib/Makefile index 02ed6c0..04a9caa 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ - earlycpio.o + earlycpio.o interval_tree.o range_lock.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o @@ -144,7 +144,7 @@ lib-$(CONFIG_LIBFDT) += $(libfdt_files) obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o -interval_tree_test-objs := interval_tree_test_main.o interval_tree.o +interval_tree_test-objs := interval_tree_test_main.o obj-$(CONFIG_ASN1) += asn1_decoder.o diff --git a/lib/range_lock.c b/lib/range_lock.c new file mode 100644 index 0000000..1cb119b --- /dev/null +++ b/lib/range_lock.c @@ -0,0 +1,78 @@ +/* + * Implementation of range locks. + * + * We keep interval tree of locked and to-be-locked ranges. When new range lock + * is requested, we add its interval to the tree and store number of intervals + * intersecting it to 'blocking_ranges'. + * + * When a range is unlocked, we again walk intervals that intersect with the + * unlocked one and decrement their 'blocking_ranges'. We wake up owner of any + * range lock whose 'blocking_ranges' drops to 0. + */ +#include <linux/list.h> +#include <linux/rbtree.h> +#include <linux/interval_tree.h> +#include <linux/spinlock.h> +#include <linux/range_lock.h> +#include <linux/sched.h> +#include <linux/export.h> + +void range_lock_init(struct range_lock *lock, unsigned long start, + unsigned long end) +{ + lock->node.start = start; + lock->node.last = end; + RB_CLEAR_NODE(&lock->node.rb); + lock->blocking_ranges = 0; +} +EXPORT_SYMBOL(range_lock_init); + +void range_lock(struct range_lock_tree *tree, struct range_lock *lock) +{ + struct interval_tree_node *node; + unsigned long flags; + + spin_lock_irqsave(&tree->lock, flags); + node = interval_tree_iter_first(&tree->root, lock->node.start, + lock->node.last); + while (node) { + lock->blocking_ranges++; + node = interval_tree_iter_next(node, lock->node.start, + lock->node.last); + } + interval_tree_insert(&lock->node, &tree->root); + /* Do we need to go to sleep? */ + while (lock->blocking_ranges) { + lock->task = current; + __set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock_irqrestore(&tree->lock, flags); + schedule(); + spin_lock_irqsave(&tree->lock, flags); + } + spin_unlock_irqrestore(&tree->lock, flags); +} +EXPORT_SYMBOL(range_lock); + +static void range_lock_unblock(struct range_lock *lock) +{ + if (!--lock->blocking_ranges) + wake_up_process(lock->task); +} + +void range_unlock(struct range_lock_tree *tree, struct range_lock *lock) +{ + struct interval_tree_node *node; + unsigned long flags; + + spin_lock_irqsave(&tree->lock, flags); + interval_tree_remove(&lock->node, &tree->root); + node = interval_tree_iter_first(&tree->root, lock->node.start, + lock->node.last); + while (node) { + range_lock_unblock((struct range_lock *)node); + node = interval_tree_iter_next(node, lock->node.start, + lock->node.last); + } + spin_unlock_irqrestore(&tree->lock, flags); +} +EXPORT_SYMBOL(range_unlock); -- 1.7.1
WARNING: multiple messages have this Message-ID (diff)
From: Jan Kara <jack@suse.cz> To: LKML <linux-kernel@vger.kernel.org> Cc: linux-fsdevel@vger.kernel.org, linux-mm@kvack.org, Jan Kara <jack@suse.cz> Subject: [PATCH 1/6] lib: Implement range locks Date: Thu, 31 Jan 2013 22:49:49 +0100 [thread overview] Message-ID: <1359668994-13433-2-git-send-email-jack@suse.cz> (raw) In-Reply-To: <1359668994-13433-1-git-send-email-jack@suse.cz> Implement range locking using interval tree. Signed-off-by: Jan Kara <jack@suse.cz> --- include/linux/range_lock.h | 51 ++++++++++++++++++++++++++++ lib/Makefile | 4 +- lib/range_lock.c | 78 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 131 insertions(+), 2 deletions(-) create mode 100644 include/linux/range_lock.h create mode 100644 lib/range_lock.c diff --git a/include/linux/range_lock.h b/include/linux/range_lock.h new file mode 100644 index 0000000..fe258a5 --- /dev/null +++ b/include/linux/range_lock.h @@ -0,0 +1,51 @@ +/* + * Range locking + * + * We allow exclusive locking of arbitrary ranges. We guarantee that each + * range is locked only after all conflicting range locks requested previously + * have been unlocked. Thus we achieve fairness and avoid livelocks. + * + * The cost of lock and unlock of a range is O(log(R_all)+R_int) where R_all is + * total number of ranges and R_int is the number of ranges intersecting the + * operated range. + */ +#ifndef _LINUX_RANGE_LOCK_H +#define _LINUX_RANGE_LOCK_H + +#include <linux/rbtree.h> +#include <linux/interval_tree.h> +#include <linux/list.h> +#include <linux/spinlock.h> + + +struct task_struct; + +struct range_lock { + struct interval_tree_node node; + struct task_struct *task; + /* Number of ranges which are blocking acquisition of the lock */ + unsigned int blocking_ranges; +}; + +struct range_lock_tree { + struct rb_root root; + spinlock_t lock; +}; + +#define RANGE_LOCK_INITIALIZER(start, end) {\ + .node = {\ + .start = (start),\ + .end = (end)\ + }\ +} + +static inline void range_lock_tree_init(struct range_lock_tree *tree) +{ + tree->root = RB_ROOT; + spin_lock_init(&tree->lock); +} +void range_lock_init(struct range_lock *lock, unsigned long start, + unsigned long end); +void range_lock(struct range_lock_tree *tree, struct range_lock *lock); +void range_unlock(struct range_lock_tree *tree, struct range_lock *lock); +#endif diff --git a/lib/Makefile b/lib/Makefile index 02ed6c0..04a9caa 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ - earlycpio.o + earlycpio.o interval_tree.o range_lock.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o @@ -144,7 +144,7 @@ lib-$(CONFIG_LIBFDT) += $(libfdt_files) obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o -interval_tree_test-objs := interval_tree_test_main.o interval_tree.o +interval_tree_test-objs := interval_tree_test_main.o obj-$(CONFIG_ASN1) += asn1_decoder.o diff --git a/lib/range_lock.c b/lib/range_lock.c new file mode 100644 index 0000000..1cb119b --- /dev/null +++ b/lib/range_lock.c @@ -0,0 +1,78 @@ +/* + * Implementation of range locks. + * + * We keep interval tree of locked and to-be-locked ranges. When new range lock + * is requested, we add its interval to the tree and store number of intervals + * intersecting it to 'blocking_ranges'. + * + * When a range is unlocked, we again walk intervals that intersect with the + * unlocked one and decrement their 'blocking_ranges'. We wake up owner of any + * range lock whose 'blocking_ranges' drops to 0. + */ +#include <linux/list.h> +#include <linux/rbtree.h> +#include <linux/interval_tree.h> +#include <linux/spinlock.h> +#include <linux/range_lock.h> +#include <linux/sched.h> +#include <linux/export.h> + +void range_lock_init(struct range_lock *lock, unsigned long start, + unsigned long end) +{ + lock->node.start = start; + lock->node.last = end; + RB_CLEAR_NODE(&lock->node.rb); + lock->blocking_ranges = 0; +} +EXPORT_SYMBOL(range_lock_init); + +void range_lock(struct range_lock_tree *tree, struct range_lock *lock) +{ + struct interval_tree_node *node; + unsigned long flags; + + spin_lock_irqsave(&tree->lock, flags); + node = interval_tree_iter_first(&tree->root, lock->node.start, + lock->node.last); + while (node) { + lock->blocking_ranges++; + node = interval_tree_iter_next(node, lock->node.start, + lock->node.last); + } + interval_tree_insert(&lock->node, &tree->root); + /* Do we need to go to sleep? */ + while (lock->blocking_ranges) { + lock->task = current; + __set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock_irqrestore(&tree->lock, flags); + schedule(); + spin_lock_irqsave(&tree->lock, flags); + } + spin_unlock_irqrestore(&tree->lock, flags); +} +EXPORT_SYMBOL(range_lock); + +static void range_lock_unblock(struct range_lock *lock) +{ + if (!--lock->blocking_ranges) + wake_up_process(lock->task); +} + +void range_unlock(struct range_lock_tree *tree, struct range_lock *lock) +{ + struct interval_tree_node *node; + unsigned long flags; + + spin_lock_irqsave(&tree->lock, flags); + interval_tree_remove(&lock->node, &tree->root); + node = interval_tree_iter_first(&tree->root, lock->node.start, + lock->node.last); + while (node) { + range_lock_unblock((struct range_lock *)node); + node = interval_tree_iter_next(node, lock->node.start, + lock->node.last); + } + spin_unlock_irqrestore(&tree->lock, flags); +} +EXPORT_SYMBOL(range_unlock); -- 1.7.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-01-31 21:51 UTC|newest] Thread overview: 46+ messages / expand[flat|nested] mbox.gz Atom feed top 2013-01-31 21:49 [PATCH 0/6 RFC] Mapping range lock Jan Kara 2013-01-31 21:49 ` Jan Kara 2013-01-31 21:49 ` Jan Kara [this message] 2013-01-31 21:49 ` [PATCH 1/6] lib: Implement range locks Jan Kara 2013-01-31 23:57 ` Andrew Morton 2013-01-31 23:57 ` Andrew Morton 2013-02-04 16:41 ` Jan Kara 2013-02-04 16:41 ` Jan Kara 2013-02-11 5:42 ` Michel Lespinasse 2013-02-11 5:42 ` Michel Lespinasse 2013-02-11 10:27 ` Jan Kara 2013-02-11 10:27 ` Jan Kara 2013-02-11 11:03 ` Michel Lespinasse 2013-02-11 11:03 ` Michel Lespinasse 2013-02-11 12:58 ` Jan Kara 2013-02-11 12:58 ` Jan Kara 2013-01-31 21:49 ` [PATCH 2/6] fs: Take mapping lock in generic read paths Jan Kara 2013-01-31 21:49 ` Jan Kara 2013-01-31 23:59 ` Andrew Morton 2013-01-31 23:59 ` Andrew Morton 2013-02-04 12:47 ` Jan Kara 2013-02-04 12:47 ` Jan Kara 2013-02-08 14:59 ` Jan Kara 2013-02-08 14:59 ` Jan Kara 2013-01-31 21:49 ` [PATCH 3/6] fs: Provide function to take mapping lock in buffered write path Jan Kara 2013-01-31 21:49 ` Jan Kara 2013-01-31 21:49 ` [PATCH 4/6] fs: Don't call dio_cleanup() before submitting all bios Jan Kara 2013-01-31 21:49 ` Jan Kara 2013-01-31 21:49 ` [PATCH 5/6] fs: Take mapping lock during direct IO Jan Kara 2013-01-31 21:49 ` Jan Kara 2013-01-31 21:49 ` [PATCH 6/6] ext3: Convert ext3 to use mapping lock Jan Kara 2013-01-31 21:49 ` Jan Kara 2013-02-01 0:07 ` [PATCH 0/6 RFC] Mapping range lock Andrew Morton 2013-02-01 0:07 ` Andrew Morton 2013-02-04 9:29 ` Zheng Liu 2013-02-04 9:29 ` Zheng Liu 2013-02-04 12:38 ` Jan Kara 2013-02-04 12:38 ` Jan Kara 2013-02-05 23:25 ` Dave Chinner 2013-02-05 23:25 ` Dave Chinner 2013-02-06 19:25 ` Jan Kara 2013-02-06 19:25 ` Jan Kara 2013-02-07 2:43 ` Dave Chinner 2013-02-07 2:43 ` Dave Chinner 2013-02-07 11:06 ` Jan Kara 2013-02-07 11:06 ` Jan Kara
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=1359668994-13433-2-git-send-email-jack@suse.cz \ --to=jack@suse.cz \ --cc=linux-fsdevel@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.