linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/2] Refactor snapshot vs nocow writers locking
@ 2019-07-19  8:39 Nikolay Borisov
  2019-07-19  8:39 ` [PATCH v2 1/2] btrfs: Implement DRW lock Nikolay Borisov
                   ` (3 more replies)
  0 siblings, 4 replies; 12+ messages in thread
From: Nikolay Borisov @ 2019-07-19  8:39 UTC (permalink / raw)
  To: linux-btrfs; +Cc: paulmck, andrea.parri, linux-kernel, Nikolay Borisov

Hello, 

Here is the second version of the DRW lock for btrfs. Main changes from v1: 

* Fixed all checkpatch warnings (Andrea Parri)
* Properly call write_unlock in btrfs_drw_try_write_lock (Filipe Manana)
* Comment fix. 
* Stress tested it via locktorture. Survived for 8 straight days on a 4 socket
48 thread machine.

I have also produced a PlusCal specification which I'd be happy to discuss with 
people since I'm new to formal specification and I seem it doesn't have the 
right fidelity: 

---- MODULE specs ----
EXTENDS Integers, Sequences, TLC

CONSTANT NumLockers

ASSUME NumLockers > 0

(*--algorithm DRW

variables
    lock_state = "idle",
    states = {"idle", "write_locked", "read_locked", "read_waiting", "write_waiting"},
    threads = [thread \in 1..NumLockers |-> "idle" ] \* everyone is in idle state at first, this generates a tuple

define
INV_SingleLockerType  == \/ lock_state = "write_locked" /\ ~\E thread \in 1..Len(threads): threads[thread] = "read_locked"
                         \/ lock_state = "read_locked" /\ ~\E thread \in 1..Len(threads): threads[thread] = "write_locked"
                         \/ lock_state = "idle" /\ \A thread \in 1..Len(threads): threads[thread] = "idle"
end define;

macro ReadLock(tid) begin
    if lock_state = "idle" \/ lock_state = "read_locked" then
        lock_state := "read_locked";
        threads[tid] := "read_locked";
    else
        assert lock_state = "write_locked";
        threads[tid] := "write_waiting"; \* waiting for writers to finish
        await lock_state = "" \/ lock_state = "read_locked";
    end if;

end macro;

macro WriteLock(tid) begin
    if lock_state = "idle" \/ lock_state = "write_locked" then
        lock_state := "write_locked";
        threads[tid] := "write_locked";
    else
        assert lock_state = "read_locked";
        threads[tid] := "reader_waiting"; \* waiting for readers to finish
        await lock_state = "idle" \/ lock_state = "write_locked";
    end if;

end macro;

macro ReadUnlock(tid) begin
    if threads[tid] = "read_locked" then
        threads[tid] := "idle";
        if ~\E thread \in 1..Len(threads): threads[thread] = "read_locked" then
            lock_state := "idle"; \* we were the last read holder, everyone else should be waiting, unlock the lock
        end if;
    end if;

end macro;

macro WriteUnlock(tid) begin
    if threads[tid] = "write_locked" then
        threads[tid] := "idle";
        if ~\E thread \in 1..Len(threads): threads[thread] = "write_locked" then
            lock_state := "idle"; \* we were the last write holder, everyone else should be waiting, unlock the lock
        end if;
    end if;

end macro;

process lock \in 1..NumLockers

begin LOCKER:
    while TRUE do
        either
            ReadLock(self);
        or
            WriteLock(self);
        or
            ReadUnlock(self);
        or
            WriteUnlock(self);
        end either;
    end while;
end process;

end algorithm; *)

====


Nikolay Borisov (2):
  btrfs: Implement DRW lock
  btrfs: convert snapshot/nocow exlcusion to drw lock

 fs/btrfs/ctree.h       | 10 ++---
 fs/btrfs/disk-io.c     | 46 ++++++----------------
 fs/btrfs/extent-tree.c | 35 -----------------
 fs/btrfs/file.c        | 11 +++---
 fs/btrfs/inode.c       |  8 ++--
 fs/btrfs/ioctl.c       | 10 ++---
 fs/btrfs/locking.c     | 88 ++++++++++++++++++++++++++++++++++++++++++
 fs/btrfs/locking.h     | 20 ++++++++++
 8 files changed, 134 insertions(+), 94 deletions(-)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2 1/2] btrfs: Implement DRW lock
  2019-07-19  8:39 [PATCH v2 0/2] Refactor snapshot vs nocow writers locking Nikolay Borisov
@ 2019-07-19  8:39 ` Nikolay Borisov
  2019-07-19  8:39 ` [PATCH v2 2/2] btrfs: convert snapshot/nocow exlcusion to drw lock Nikolay Borisov
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 12+ messages in thread
From: Nikolay Borisov @ 2019-07-19  8:39 UTC (permalink / raw)
  To: linux-btrfs; +Cc: paulmck, andrea.parri, linux-kernel, Nikolay Borisov

A (D)ouble (R)eader (W)riter lock is a locking primitive that allows
to have multiple readers or multiple writers but not multiple readers
and writers holding it concurrently. The code is factored out from
the existing open-coded locking scheme used to exclude pending
snapshots from nocow writers and vice-versa. Current implementation
actually favors Readers (that is snapshot creaters) to writers (nocow
writers of the filesystem).

Signed-off-by: Nikolay Borisov <nborisov@suse.com>
---
 fs/btrfs/ctree.h   |  1 +
 fs/btrfs/locking.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++
 fs/btrfs/locking.h | 20 +++++++++++
 3 files changed, 109 insertions(+)

diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index da97ff10f421..b7c9359b24a0 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -33,6 +33,7 @@
 #include "extent_map.h"
 #include "async-thread.h"
 #include "block-rsv.h"
+#include "locking.h"
 
 struct btrfs_trans_handle;
 struct btrfs_transaction;
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 98fccce4208c..702c956ed028 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -354,3 +354,91 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
 		write_unlock(&eb->lock);
 	}
 }
+
+
+int btrfs_drw_lock_init(struct btrfs_drw_lock *lock)
+{
+	int ret;
+
+	ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL);
+	if (ret)
+		return ret;
+
+	atomic_set(&lock->readers, 0);
+	init_waitqueue_head(&lock->pending_readers);
+	init_waitqueue_head(&lock->pending_writers);
+
+	return 0;
+}
+EXPORT_SYMBOL(btrfs_drw_lock_init);
+
+void btrfs_drw_lock_destroy(struct btrfs_drw_lock *lock)
+{
+	percpu_counter_destroy(&lock->writers);
+}
+
+bool btrfs_drw_try_write_lock(struct btrfs_drw_lock *lock)
+{
+	if (atomic_read(&lock->readers))
+		return false;
+
+	percpu_counter_inc(&lock->writers);
+
+	/*
+	 * Ensure writers count is updated before we check for
+	 * pending readers
+	 */
+	smp_mb();
+	if (atomic_read(&lock->readers)) {
+		btrfs_drw_write_unlock(lock);
+		return false;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(btrfs_drw_try_write_lock);
+
+void btrfs_drw_write_lock(struct btrfs_drw_lock *lock)
+{
+	while (true) {
+		if (btrfs_drw_try_write_lock(lock))
+			return;
+		wait_event(lock->pending_writers, !atomic_read(&lock->readers));
+	}
+}
+EXPORT_SYMBOL(btrfs_drw_write_lock);
+
+void btrfs_drw_write_unlock(struct btrfs_drw_lock *lock)
+{
+	percpu_counter_dec(&lock->writers);
+	cond_wake_up(&lock->pending_readers);
+}
+EXPORT_SYMBOL(btrfs_drw_write_unlock);
+
+void btrfs_drw_read_lock(struct btrfs_drw_lock *lock)
+{
+	atomic_inc(&lock->readers);
+
+	/*
+	 * Ensure the pending reader count is perceieved BEFORE this reader
+	 * goes to sleep in case of active writers. This guarantees new writers
+	 * won't be allowed and that the current reader will be woken up when
+	 * the last active writer finishes its jobs.
+	 */
+	smp_mb__after_atomic();
+
+	wait_event(lock->pending_readers,
+		   percpu_counter_sum(&lock->writers) == 0);
+}
+EXPORT_SYMBOL(btrfs_drw_read_lock);
+
+void btrfs_drw_read_unlock(struct btrfs_drw_lock *lock)
+{
+	/*
+	 * Atomic RMW operations imply full barrier, so woken up writers
+	 * are guaranteed to see the decrement
+	 */
+	if (atomic_dec_and_test(&lock->readers))
+		wake_up(&lock->pending_writers);
+}
+EXPORT_SYMBOL(btrfs_drw_read_unlock);
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 595014f64830..44378c65f843 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -6,6 +6,10 @@
 #ifndef BTRFS_LOCKING_H
 #define BTRFS_LOCKING_H
 
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/percpu_counter.h>
+
 #define BTRFS_WRITE_LOCK 1
 #define BTRFS_READ_LOCK 2
 #define BTRFS_WRITE_LOCK_BLOCKING 3
@@ -39,4 +43,20 @@ static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
 		BUG();
 }
 
+
+struct btrfs_drw_lock {
+	atomic_t readers;
+	struct percpu_counter writers;
+	wait_queue_head_t pending_writers;
+	wait_queue_head_t pending_readers;
+};
+
+int btrfs_drw_lock_init(struct btrfs_drw_lock *lock);
+void btrfs_drw_lock_destroy(struct btrfs_drw_lock *lock);
+void btrfs_drw_write_lock(struct btrfs_drw_lock *lock);
+bool btrfs_drw_try_write_lock(struct btrfs_drw_lock *lock);
+void btrfs_drw_write_unlock(struct btrfs_drw_lock *lock);
+void btrfs_drw_read_lock(struct btrfs_drw_lock *lock);
+void btrfs_drw_read_unlock(struct btrfs_drw_lock *lock);
+
 #endif
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v2 2/2] btrfs: convert snapshot/nocow exlcusion to drw lock
  2019-07-19  8:39 [PATCH v2 0/2] Refactor snapshot vs nocow writers locking Nikolay Borisov
  2019-07-19  8:39 ` [PATCH v2 1/2] btrfs: Implement DRW lock Nikolay Borisov
@ 2019-07-19  8:39 ` Nikolay Borisov
  2019-07-19  8:48 ` [RFC PATCH] btrfs: Hook btrfs' DRW lock to locktorture infrastructure Nikolay Borisov
  2019-07-29 14:13 ` [PATCH v2 0/2] Refactor snapshot vs nocow writers locking Valentin Schneider
  3 siblings, 0 replies; 12+ messages in thread
From: Nikolay Borisov @ 2019-07-19  8:39 UTC (permalink / raw)
  To: linux-btrfs; +Cc: paulmck, andrea.parri, linux-kernel, Nikolay Borisov

This patch removes all haphazard code implementing nocow writers
exclusion from pending snapshot creation and switches to using the drw
lock to ensure this invariant still holds. "Readers" are snapshot
creators from create_snapshot and 'writers' are nocow writers from
buffered write path or btrfs_setsize. This locking scheme allows for
multiple snapshots to happen while any nocow writers are blocked, since
writes to page cache in the nocow path will make snapshots inconsistent.

So for performance reasons we'd like to have the ability to run multiple
concurrent snapshots and also favors readers in this case. And in case
there aren't pending snapshots (which will be the majority of the cases)
we rely on the percpu's writers counter to avoid cacheline contention.

The main gain from using the drw is it's now a lot easier to reason about
the guarantees of the locking scheme and whether there is some silent
breakage lurking.

Signed-off-by: Nikolay Borisov <nborisov@suse.com>
---
 fs/btrfs/ctree.h       |  9 ++-------
 fs/btrfs/disk-io.c     | 46 ++++++++++--------------------------------
 fs/btrfs/extent-tree.c | 35 --------------------------------
 fs/btrfs/file.c        | 11 +++++-----
 fs/btrfs/inode.c       |  8 ++++----
 fs/btrfs/ioctl.c       | 10 +++------
 6 files changed, 25 insertions(+), 94 deletions(-)

diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index b7c9359b24a0..c04a23384210 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1087,11 +1087,6 @@ static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
 	return sb->s_fs_info;
 }
 
-struct btrfs_subvolume_writers {
-	struct percpu_counter	counter;
-	wait_queue_head_t	wait;
-};
-
 /*
  * The state of btrfs root
  */
@@ -1263,8 +1258,8 @@ struct btrfs_root {
 	 * root_item_lock.
 	 */
 	int dedupe_in_progress;
-	struct btrfs_subvolume_writers *subv_writers;
-	atomic_t will_be_snapshotted;
+	struct btrfs_drw_lock snapshot_lock;
+
 	atomic_t snapshot_force_cow;
 
 	/* For qgroup metadata reserved space */
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index baebbb74032d..5a967d6264c9 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1122,32 +1122,6 @@ void btrfs_clean_tree_block(struct extent_buffer *buf)
 	}
 }
 
-static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
-{
-	struct btrfs_subvolume_writers *writers;
-	int ret;
-
-	writers = kmalloc(sizeof(*writers), GFP_NOFS);
-	if (!writers)
-		return ERR_PTR(-ENOMEM);
-
-	ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
-	if (ret < 0) {
-		kfree(writers);
-		return ERR_PTR(ret);
-	}
-
-	init_waitqueue_head(&writers->wait);
-	return writers;
-}
-
-static void
-btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
-{
-	percpu_counter_destroy(&writers->counter);
-	kfree(writers);
-}
-
 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
 			 u64 objectid)
 {
@@ -1195,7 +1169,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
 	atomic_set(&root->log_writers, 0);
 	atomic_set(&root->log_batch, 0);
 	refcount_set(&root->refs, 1);
-	atomic_set(&root->will_be_snapshotted, 0);
 	atomic_set(&root->snapshot_force_cow, 0);
 	atomic_set(&root->nr_swapfiles, 0);
 	root->log_transid = 0;
@@ -1484,7 +1457,7 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
 int btrfs_init_fs_root(struct btrfs_root *root)
 {
 	int ret;
-	struct btrfs_subvolume_writers *writers;
+	unsigned int nofs_flag;
 
 	root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
 	root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
@@ -1494,12 +1467,16 @@ int btrfs_init_fs_root(struct btrfs_root *root)
 		goto fail;
 	}
 
-	writers = btrfs_alloc_subvolume_writers();
-	if (IS_ERR(writers)) {
-		ret = PTR_ERR(writers);
+	/*
+	 * We might be called under a transaction (e.g. indirect
+	 * backref resolution) which could deadlock if it triggers memory
+	 * reclaim
+	 */
+	nofs_flag = memalloc_nofs_save();
+	ret = btrfs_drw_lock_init(&root->snapshot_lock);
+	memalloc_nofs_restore(nofs_flag);
+	if (ret)
 		goto fail;
-	}
-	root->subv_writers = writers;
 
 	btrfs_init_free_ino_ctl(root);
 	spin_lock_init(&root->ino_cache_lock);
@@ -3920,8 +3897,7 @@ void btrfs_free_fs_root(struct btrfs_root *root)
 	WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
 	if (root->anon_dev)
 		free_anon_bdev(root->anon_dev);
-	if (root->subv_writers)
-		btrfs_free_subvolume_writers(root->subv_writers);
+	btrfs_drw_lock_destroy(&root->snapshot_lock);
 	free_extent_buffer(root->node);
 	free_extent_buffer(root->commit_root);
 	kfree(root->free_ino_ctl);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d3b58e388535..e4f36c874f1b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -9061,41 +9061,6 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
  * operations while snapshotting is ongoing and that cause the snapshot to be
  * inconsistent (writes followed by expanding truncates for example).
  */
-void btrfs_end_write_no_snapshotting(struct btrfs_root *root)
-{
-	percpu_counter_dec(&root->subv_writers->counter);
-	cond_wake_up(&root->subv_writers->wait);
-}
-
-int btrfs_start_write_no_snapshotting(struct btrfs_root *root)
-{
-	if (atomic_read(&root->will_be_snapshotted))
-		return 0;
-
-	percpu_counter_inc(&root->subv_writers->counter);
-	/*
-	 * Make sure counter is updated before we check for snapshot creation.
-	 */
-	smp_mb();
-	if (atomic_read(&root->will_be_snapshotted)) {
-		btrfs_end_write_no_snapshotting(root);
-		return 0;
-	}
-	return 1;
-}
-
-void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
-{
-	while (true) {
-		int ret;
-
-		ret = btrfs_start_write_no_snapshotting(root);
-		if (ret)
-			break;
-		wait_var_event(&root->will_be_snapshotted,
-			       !atomic_read(&root->will_be_snapshotted));
-	}
-}
 
 void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
 {
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 52fb235e6d19..c043dc9a6e43 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1555,8 +1555,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
 	u64 num_bytes;
 	int ret;
 
-	ret = btrfs_start_write_no_snapshotting(root);
-	if (!ret)
+	if (!btrfs_drw_try_write_lock(&root->snapshot_lock))
 		return -EAGAIN;
 
 	lockstart = round_down(pos, fs_info->sectorsize);
@@ -1571,7 +1570,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
 			NULL, NULL, NULL);
 	if (ret <= 0) {
 		ret = 0;
-		btrfs_end_write_no_snapshotting(root);
+		btrfs_drw_write_unlock(&root->snapshot_lock);
 	} else {
 		*write_bytes = min_t(size_t, *write_bytes ,
 				     num_bytes - pos + lockstart);
@@ -1676,7 +1675,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
 						data_reserved, pos,
 						write_bytes);
 			else
-				btrfs_end_write_no_snapshotting(root);
+				btrfs_drw_write_unlock(&root->snapshot_lock);
 			break;
 		}
 
@@ -1770,7 +1769,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
 
 		release_bytes = 0;
 		if (only_release_metadata)
-			btrfs_end_write_no_snapshotting(root);
+			btrfs_drw_write_unlock(&root->snapshot_lock);
 
 		if (only_release_metadata && copied > 0) {
 			lockstart = round_down(pos,
@@ -1800,7 +1799,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
 
 	if (release_bytes) {
 		if (only_release_metadata) {
-			btrfs_end_write_no_snapshotting(root);
+			btrfs_drw_write_unlock(&root->snapshot_lock);
 			btrfs_delalloc_release_metadata(BTRFS_I(inode),
 					release_bytes, true);
 		} else {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 11068a54e696..6359aa3bfcab 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5138,16 +5138,16 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
 		 * truncation, it must capture all writes that happened before
 		 * this truncation.
 		 */
-		btrfs_wait_for_snapshot_creation(root);
+		btrfs_drw_write_lock(&root->snapshot_lock);
 		ret = btrfs_cont_expand(inode, oldsize, newsize);
 		if (ret) {
-			btrfs_end_write_no_snapshotting(root);
+			btrfs_drw_write_unlock(&root->snapshot_lock);
 			return ret;
 		}
 
 		trans = btrfs_start_transaction(root, 1);
 		if (IS_ERR(trans)) {
-			btrfs_end_write_no_snapshotting(root);
+			btrfs_drw_write_unlock(&root->snapshot_lock);
 			return PTR_ERR(trans);
 		}
 
@@ -5155,7 +5155,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
 		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
 		pagecache_isize_extended(inode, oldsize, newsize);
 		ret = btrfs_update_inode(trans, root, inode);
-		btrfs_end_write_no_snapshotting(root);
+		btrfs_drw_write_unlock(&root->snapshot_lock);
 		btrfs_end_transaction(trans);
 	} else {
 
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a8c21da77b87..2182af8d62e2 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -791,11 +791,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
 	 * possible. This is to avoid later writeback (running dealloc) to
 	 * fallback to COW mode and unexpectedly fail with ENOSPC.
 	 */
-	atomic_inc(&root->will_be_snapshotted);
-	smp_mb__after_atomic();
-	/* wait for no snapshot writes */
-	wait_event(root->subv_writers->wait,
-		   percpu_counter_sum(&root->subv_writers->counter) == 0);
+	btrfs_drw_read_lock(&root->snapshot_lock);
 
 	ret = btrfs_start_delalloc_snapshot(root);
 	if (ret)
@@ -875,8 +871,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
 dec_and_free:
 	if (snapshot_force_cow)
 		atomic_dec(&root->snapshot_force_cow);
-	if (atomic_dec_and_test(&root->will_be_snapshotted))
-		wake_up_var(&root->will_be_snapshotted);
+	btrfs_drw_read_unlock(&root->snapshot_lock);
+
 free_pending:
 	kfree(pending_snapshot->root_item);
 	btrfs_free_path(pending_snapshot->path);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [RFC PATCH] btrfs: Hook btrfs' DRW lock to locktorture infrastructure
  2019-07-19  8:39 [PATCH v2 0/2] Refactor snapshot vs nocow writers locking Nikolay Borisov
  2019-07-19  8:39 ` [PATCH v2 1/2] btrfs: Implement DRW lock Nikolay Borisov
  2019-07-19  8:39 ` [PATCH v2 2/2] btrfs: convert snapshot/nocow exlcusion to drw lock Nikolay Borisov
@ 2019-07-19  8:48 ` Nikolay Borisov
  2019-08-05 16:36   ` Nathan Chancellor
  2019-07-29 14:13 ` [PATCH v2 0/2] Refactor snapshot vs nocow writers locking Valentin Schneider
  3 siblings, 1 reply; 12+ messages in thread
From: Nikolay Borisov @ 2019-07-19  8:48 UTC (permalink / raw)
  To: linux-btrfs; +Cc: paulmck, linux-kernel, Nikolay Borisov

Signed-off-by: Nikolay Borisov <nborisov@suse.com>
---

Hello Paul, 

Here is the code I used to test the DRW lock via the lock torture infrastructure. 
It's rather ugly but got the job done for me. It's definitely not in a mergeable
form. At the very least I think including btrfs headers constitutes a violation 
of separation of different subsystems. Would it be acceptable to guard them 
behind something like "#if BTRFS && BTRFS_DEBUG" ? 

I'm really posting this just for posterity/provenance purposes. I'm fine with 
dropping the patch. 


 fs/btrfs/locking.h           |  1 +
 kernel/locking/locktorture.c | 77 +++++++++++++++++++++++++++++++++++-
 2 files changed, 77 insertions(+), 1 deletion(-)

diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 44378c65f843..27627d4fd3a9 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -9,6 +9,7 @@
 #include <linux/atomic.h>
 #include <linux/wait.h>
 #include <linux/percpu_counter.h>
+#include "extent_io.h"
 
 #define BTRFS_WRITE_LOCK 1
 #define BTRFS_READ_LOCK 2
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 80a463d31a8d..774e10a25876 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -30,6 +30,8 @@
 #include <linux/slab.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/torture.h>
+#include "../../fs/btrfs/ctree.h"
+#include "../../fs/btrfs/locking.h"
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
@@ -85,6 +87,7 @@ struct lock_torture_ops {
 
 	unsigned long flags; /* for irq spinlocks */
 	const char *name;
+	bool multiple;
 };
 
 struct lock_torture_cxt {
@@ -600,6 +603,7 @@ static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
 	percpu_up_read(&pcpu_rwsem);
 }
 
+
 static struct lock_torture_ops percpu_rwsem_lock_ops = {
 	.init		= torture_percpu_rwsem_init,
 	.writelock	= torture_percpu_rwsem_down_write,
@@ -612,6 +616,76 @@ static struct lock_torture_ops percpu_rwsem_lock_ops = {
 	.name		= "percpu_rwsem_lock"
 };
 
+static struct btrfs_drw_lock torture_drw_lock;
+
+void torture_drw_init(void)
+{
+	BUG_ON(btrfs_drw_lock_init(&torture_drw_lock));
+}
+
+static int torture_drw_write_lock(void) __acquires(torture_drw_lock)
+{
+	btrfs_drw_write_lock(&torture_drw_lock);
+	return 0;
+}
+
+static void torture_drw_write_unlock(void) __releases(torture_drw_lock)
+{
+	btrfs_drw_write_unlock(&torture_drw_lock);
+}
+
+static int torture_drw_read_lock(void) __acquires(torture_drw_lock)
+{
+	btrfs_drw_read_lock(&torture_drw_lock);
+	return 0;
+}
+
+static void torture_drw_read_unlock(void) __releases(torture_drw_lock)
+{
+	btrfs_drw_read_unlock(&torture_drw_lock);
+}
+
+static void torture_drw_write_delay(struct torture_random_state *trsp)
+{
+	const unsigned long longdelay_ms = 100;
+
+	/* We want a long delay occasionally to force massive contention.  */
+	if (!(torture_random(trsp) %
+	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+		mdelay(longdelay_ms * 10);
+	else
+		mdelay(longdelay_ms / 10);
+	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
+		torture_preempt_schedule();  /* Allow test to be preempted. */
+}
+
+static void torture_drw_read_delay(struct torture_random_state *trsp)
+{
+	const unsigned long longdelay_ms = 100;
+
+	/* We want a long delay occasionally to force massive contention.  */
+	if (!(torture_random(trsp) %
+	      (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
+		mdelay(longdelay_ms * 2);
+	else
+		mdelay(longdelay_ms / 2);
+	if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
+		torture_preempt_schedule();  /* Allow test to be preempted. */
+}
+
+static struct lock_torture_ops btrfs_drw_lock_ops = {
+	.init		= torture_drw_init,
+	.writelock	= torture_drw_write_lock,
+	.write_delay	= torture_drw_write_delay,
+	.task_boost     = torture_boost_dummy,
+	.writeunlock	= torture_drw_write_unlock,
+	.readlock       = torture_drw_read_lock,
+	.read_delay     = torture_drw_read_delay, /* figure what to do with this */
+	.readunlock     = torture_drw_read_unlock,
+	.multiple = true,
+	.name		= "btrfs_drw_lock"
+};
+
 /*
  * Lock torture writer kthread.  Repeatedly acquires and releases
  * the lock, checking for duplicate acquisitions.
@@ -630,7 +704,7 @@ static int lock_torture_writer(void *arg)
 
 		cxt.cur_ops->task_boost(&rand);
 		cxt.cur_ops->writelock();
-		if (WARN_ON_ONCE(lock_is_write_held))
+		if (!cxt.cur_ops->multiple && WARN_ON_ONCE(lock_is_write_held))
 			lwsp->n_lock_fail++;
 		lock_is_write_held = 1;
 		if (WARN_ON_ONCE(lock_is_read_held))
@@ -852,6 +926,7 @@ static int __init lock_torture_init(void)
 #endif
 		&rwsem_lock_ops,
 		&percpu_rwsem_lock_ops,
+		&btrfs_drw_lock_ops
 	};
 
 	if (!torture_init_begin(torture_type, verbose))
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH v2 0/2] Refactor snapshot vs nocow writers locking
  2019-07-19  8:39 [PATCH v2 0/2] Refactor snapshot vs nocow writers locking Nikolay Borisov
                   ` (2 preceding siblings ...)
  2019-07-19  8:48 ` [RFC PATCH] btrfs: Hook btrfs' DRW lock to locktorture infrastructure Nikolay Borisov
@ 2019-07-29 14:13 ` Valentin Schneider
  2019-07-29 15:33   ` Catalin Marinas
  3 siblings, 1 reply; 12+ messages in thread
From: Valentin Schneider @ 2019-07-29 14:13 UTC (permalink / raw)
  To: Nikolay Borisov, linux-btrfs
  Cc: paulmck, andrea.parri, linux-kernel, Catalin Marinas

Hi Nikolay,

On 19/07/2019 09:39, Nikolay Borisov wrote:
> Hello, 
> 
> Here is the second version of the DRW lock for btrfs. Main changes from v1: 
> 
> * Fixed all checkpatch warnings (Andrea Parri)
> * Properly call write_unlock in btrfs_drw_try_write_lock (Filipe Manana)
> * Comment fix. 
> * Stress tested it via locktorture. Survived for 8 straight days on a 4 socket
> 48 thread machine.
> 
> I have also produced a PlusCal specification which I'd be happy to discuss with 
> people since I'm new to formal specification and I seem it doesn't have the 
> right fidelity: 
> 

I haven't played with PlusCal in a while but I figured I'd have a go at it
anyway. I've also Cc'd Catalin who's my local TLA+/PCal guru.


FYI PlusCal also supports a C-like syntax, which means you can use glorious
brackets instead of begin/end (unless you prefer those... I won't judge).

I appended my "clean-up" of your spec - mainly changed to C-style
syntax, added a few more constant definitions, and split the locker
process into separate reader and writer processes. IMO makes it more
readable.

> ---- MODULE specs ----
> EXTENDS Integers, Sequences, TLC
> 
> CONSTANT NumLockers
> 
> ASSUME NumLockers > 0
> 
> (*--algorithm DRW
> 
> variables
>     lock_state = "idle",
>     states = {"idle", "write_locked", "read_locked", "read_waiting", "write_waiting"},
>     threads = [thread \in 1..NumLockers |-> "idle" ] \* everyone is in idle state at first, this generates a tuple
> 
> define
> INV_SingleLockerType  == \/ lock_state = "write_locked" /\ ~\E thread \in 1..Len(threads): threads[thread] = "read_locked"
>                          \/ lock_state = "read_locked" /\ ~\E thread \in 1..Len(threads): threads[thread] = "write_locked"
>                          \/ lock_state = "idle" /\ \A thread \in 1..Len(threads): threads[thread] = "idle"

I've tried to think about liveness properties we would want to check
against this spec, but the usual ones don't really work there: AFAICT
there is no fairness there, readers can completely block writers (and
the opposite is true as well).

TLC checks for deadlocks by default (IIRC that should translate to always
having > 1 non-stuttering step enabled in the next-state formula), so
maybe that is all we need?

> end define;
> 
> macro ReadLock(tid) begin
>     if lock_state = "idle" \/ lock_state = "read_locked" then
>         lock_state := "read_locked";
>         threads[tid] := "read_locked";
>     else
>         assert lock_state = "write_locked";
>         threads[tid] := "write_waiting"; \* waiting for writers to finish
>         await lock_state = "" \/ lock_state = "read_locked";
                             ^^
That's not a valid lock state, was that meant to be "idle"? 

BTW your spec doesn't have a type check invariant (something to make sure
whatever is in your variables is sane). It seems to be common practice, and
it's quite handy to spot stupid mistakes. For this spec it would look
something like this:

LOCK_STATES == {"idle", "write_locked", "read_locked"}
THREAD_STATES == LOCK_STATES \union {"read_waiting", "write_waiting"}

TypeCheck ==
    /\ lock_state \in LOCK_STATES
    /\ \A thread \in THREADS: threads[thread] \in THREAD_STATES

>     end if;
> 
> end macro;
> 
> macro WriteLock(tid) begin
>     if lock_state = "idle" \/ lock_state = "write_locked" then
>         lock_state := "write_locked";
>         threads[tid] := "write_locked";
>     else
>         assert lock_state = "read_locked";
>         threads[tid] := "reader_waiting"; \* waiting for readers to finish
>         await lock_state = "idle" \/ lock_state = "write_locked";

Aren't we missing an extra action here (same goes for the read lock)?

threads[tid] should be set to "write_locked", and lock_state should be
updated if it was previously "idle".

Now the nasty thing is that we'd be setting threads[tid] to two different
values in the same atomic block, so PlusCal will complain and we'll have
to add some labels (which means changing the macro into a procedure).

Maybe something like this?

procedure WriteLock()
{
prepare:
    \* waiting for readers to finish
    threads[self] := "read_waiting";
lock:
    await lock_state = "idle" \/ lock_state = "write_locked";
    lock_state := "write_locked";
    threads[self] := "write_locked";
    return;
};

+ something similar for ReadLock().

Alternatively the "prepare" step could be some counter increment, to more
closely mimic the actual implementation, but I don't think it adds much
value to the model.

---------------------------------------------------------------------------

specs.tla:

---- MODULE specs ----
EXTENDS Integers, Sequences, TLC

CONSTANTS
    NR_WRITERS,
    NR_READERS,
    WRITER_TASK,
    READER_TASK

WRITERS == {WRITER_TASK} \X (1..NR_WRITERS)
READERS == {READER_TASK} \X (1..NR_READERS)
THREADS == WRITERS \union READERS

(*--algorithm DRW {

variables
    lock_state = "idle",
    \* everyone is in idle state at first, this generates a tuple
    threads = [thread \in THREADS |-> "idle" ]

define {

LOCK_STATES == {"idle", "write_locked", "read_locked"}
THREAD_STATES == LOCK_STATES \union {"read_waiting", "write_waiting"}

(* Safety invariants *)
TypeCheck ==
    /\ lock_state \in LOCK_STATES
    /\ \A thread \in THREADS: threads[thread] \in THREAD_STATES

NoReadWhenWrite ==
    lock_state = "write_locked" =>
        \A thread \in THREADS: threads[thread] # "read_locked"

NoWriteWhenRead ==
    lock_state = "read_locked" =>
        \A thread \in THREADS: threads[thread] # "write_locked"

AllIdleWhenIdle ==
    lock_state = "idle" =>
        \A thread \in THREADS: threads[thread] = "idle"

(* Ensure critical section exclusiveness *)
Exclusion ==
    /\ \E writer \in WRITERS: pc[writer] = "write_cs" =>
        \A reader \in READERS: pc[reader] # "read_cs"
    /\ \E reader \in READERS: pc[reader] = "read_cs" =>
        \A writer \in WRITERS: pc[writer] # "write_cs"
}

macro ReadLock(tid)
{
    if (lock_state = "idle" \/ lock_state = "read_locked") {
        lock_state := "read_locked";
        threads[tid] := "read_locked";
    } else {
        assert lock_state = "write_locked";
        \* waiting for writers to finish
        threads[tid] := "write_waiting";
        await lock_state = "" \/ lock_state = "read_locked";
    };
}

macro WriteLock(tid)
{
    if (lock_state = "idle" \/ lock_state = "write_locked") {
        lock_state := "write_locked";
        threads[tid] := "write_locked";
    } else {
        assert lock_state = "read_locked";
        \* waiting for readers to finish
        threads[tid] := "read_waiting";
        await lock_state = "idle" \/ lock_state = "write_locked";
    };
}

macro ReadUnlock(tid) {
    if (threads[tid] = "read_locked") {
        threads[tid] := "idle";
        if (\A thread \in THREADS: threads[thread] # "read_locked") {
            \* we were the last read holder, everyone else should be waiting, unlock the lock
            lock_state := "idle";
        };
    };
}

macro WriteUnlock(tid) {
    if (threads[tid] = "write_locked") {
        threads[tid] := "idle";
        if (\A thread \in THREADS: threads[thread] # "write_locked") {
            \* we were the last write holder, everyone else should be waiting, unlock the lock
            lock_state := "idle";
        };
    };
}

fair process(writer \in WRITERS)
{
loop:
    while (TRUE) {
        WriteLock(self);
write_cs:
        skip;
unlock:
        WriteUnlock(self);
    };
}

fair process(reader \in READERS)
{
loop:
    while (TRUE) {
        ReadLock(self);
read_cs:
        skip;
unlock:
        ReadUnlock(self);
    };

}

}*)
====

specs.cfg:

SPECIFICATION Spec
\* Add statements after this line.

CONSTANTS
	NR_READERS = 3
	NR_WRITERS = 3
	READER_TASK = reader
	WRITER_TASK = writer

INVARIANTS
	TypeCheck
	
	NoReadWhenWrite
	NoWriteWhenRead
	AllIdleWhenIdle
	
	Exclusion

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v2 0/2] Refactor snapshot vs nocow writers locking
  2019-07-29 14:13 ` [PATCH v2 0/2] Refactor snapshot vs nocow writers locking Valentin Schneider
@ 2019-07-29 15:33   ` Catalin Marinas
  2019-07-29 16:32     ` Valentin Schneider
  0 siblings, 1 reply; 12+ messages in thread
From: Catalin Marinas @ 2019-07-29 15:33 UTC (permalink / raw)
  To: Valentin Schneider
  Cc: Nikolay Borisov, linux-btrfs, paulmck, andrea.parri, linux-kernel

Some nitpicking below:

On Mon, Jul 29, 2019 at 03:13:42PM +0100, Valentin Schneider wrote:
> specs.tla:
> 
> ---- MODULE specs ----
> EXTENDS Integers, Sequences, TLC
> 
> CONSTANTS
>     NR_WRITERS,
>     NR_READERS,
>     WRITER_TASK,
>     READER_TASK
> 
> WRITERS == {WRITER_TASK} \X (1..NR_WRITERS)
> READERS == {READER_TASK} \X (1..NR_READERS)
> THREADS == WRITERS \union READERS

Recommendation: use symbolic values for WRITERS and READERS (defined in
.cfg: e.g. r1, r2, r3, w1, w2, w2). It allows you do to symmetry
optimisations. We've also hit a TLC bug in the past with process values
made up of a Cartesian product (though it may have been fixed since).

> macro ReadLock(tid)
> {
>     if (lock_state = "idle" \/ lock_state = "read_locked") {
>         lock_state := "read_locked";
>         threads[tid] := "read_locked";
>     } else {
>         assert lock_state = "write_locked";
>         \* waiting for writers to finish
>         threads[tid] := "write_waiting";
>         await lock_state = "" \/ lock_state = "read_locked";

lock_state = "idle"?

> macro WriteLock(tid)
> {
>     if (lock_state = "idle" \/ lock_state = "write_locked") {
>         lock_state := "write_locked";
>         threads[tid] := "write_locked";
>     } else {
>         assert lock_state = "read_locked";
>         \* waiting for readers to finish
>         threads[tid] := "read_waiting";
>         await lock_state = "idle" \/ lock_state = "write_locked";
>     };
> }

I'd say that's one of the pitfalls of PlusCal. The above is executed
atomically, so you'd have the lock_state read and updated in the same
action. Looking at the C patches, there is an
atomic_read(&lock->readers) followed by a
percpu_counter_inc(&lock->writers). Between these two, you can have
"readers" becoming non-zero via a different CPU.

My suggestion would be to use procedures with labels to express the
non-atomicity of such sequences.

> macro ReadUnlock(tid) {
>     if (threads[tid] = "read_locked") {
>         threads[tid] := "idle";
>         if (\A thread \in THREADS: threads[thread] # "read_locked") {
>             \* we were the last read holder, everyone else should be waiting, unlock the lock
>             lock_state := "idle";
>         };
>     };
> }

I'd make this close to the proposed C code with atomic counters. You'd
not be able to check each thread atomically in practice anyway.

-- 
Catalin

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v2 0/2] Refactor snapshot vs nocow writers locking
  2019-07-29 15:33   ` Catalin Marinas
@ 2019-07-29 16:32     ` Valentin Schneider
  2019-07-30 11:03       ` Valentin Schneider
  0 siblings, 1 reply; 12+ messages in thread
From: Valentin Schneider @ 2019-07-29 16:32 UTC (permalink / raw)
  To: Catalin Marinas
  Cc: Nikolay Borisov, linux-btrfs, paulmck, andrea.parri, linux-kernel

On 29/07/2019 16:33, Catalin Marinas wrote:
[...]
>> ---- MODULE specs ----
>> EXTENDS Integers, Sequences, TLC
>>
>> CONSTANTS
>>     NR_WRITERS,
>>     NR_READERS,
>>     WRITER_TASK,
>>     READER_TASK
>>
>> WRITERS == {WRITER_TASK} \X (1..NR_WRITERS)
>> READERS == {READER_TASK} \X (1..NR_READERS)
>> THREADS == WRITERS \union READERS
> 
> Recommendation: use symbolic values for WRITERS and READERS (defined in
> .cfg: e.g. r1, r2, r3, w1, w2, w2). It allows you do to symmetry
> optimisations. We've also hit a TLC bug in the past with process values
> made up of a Cartesian product (though it may have been fixed since).
> 

Right, I had forgotten that one:

  https://github.com/tlaplus/tlaplus/issues/164

Being very lazy I dislike having to manually input those, but as you say
it can't be avoided if we want to use symmetry.

>> macro ReadLock(tid)
>> {
>>     if (lock_state = "idle" \/ lock_state = "read_locked") {
>>         lock_state := "read_locked";
>>         threads[tid] := "read_locked";
>>     } else {
>>         assert lock_state = "write_locked";
>>         \* waiting for writers to finish
>>         threads[tid] := "write_waiting";
>>         await lock_state = "" \/ lock_state = "read_locked";
> 
> lock_state = "idle"?
> 

Aye, I didn't modify those macros from the original spec.

>> macro WriteLock(tid)
>> {
>>     if (lock_state = "idle" \/ lock_state = "write_locked") {
>>         lock_state := "write_locked";
>>         threads[tid] := "write_locked";
>>     } else {
>>         assert lock_state = "read_locked";
>>         \* waiting for readers to finish
>>         threads[tid] := "read_waiting";
>>         await lock_state = "idle" \/ lock_state = "write_locked";
>>     };
>> }
> 
> I'd say that's one of the pitfalls of PlusCal. The above is executed
> atomically, so you'd have the lock_state read and updated in the same
> action. Looking at the C patches, there is an
> atomic_read(&lock->readers) followed by a
> percpu_counter_inc(&lock->writers). Between these two, you can have
> "readers" becoming non-zero via a different CPU.
> 
> My suggestion would be to use procedures with labels to express the
> non-atomicity of such sequences.
> 

Agreed, I've suggested something like this in my reply.

[...]

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v2 0/2] Refactor snapshot vs nocow writers locking
  2019-07-29 16:32     ` Valentin Schneider
@ 2019-07-30 11:03       ` Valentin Schneider
  2019-07-30 12:11         ` Nikolay Borisov
  2019-07-30 13:36         ` Valentin Schneider
  0 siblings, 2 replies; 12+ messages in thread
From: Valentin Schneider @ 2019-07-30 11:03 UTC (permalink / raw)
  To: Catalin Marinas
  Cc: Nikolay Borisov, linux-btrfs, paulmck, andrea.parri, linux-kernel

On 29/07/2019 17:32, Valentin Schneider wrote:
> On 29/07/2019 16:33, Catalin Marinas wrote:
[...]
>> I'd say that's one of the pitfalls of PlusCal. The above is executed
>> atomically, so you'd have the lock_state read and updated in the same
>> action. Looking at the C patches, there is an
>> atomic_read(&lock->readers) followed by a
>> percpu_counter_inc(&lock->writers). Between these two, you can have
>> "readers" becoming non-zero via a different CPU.
>>
>> My suggestion would be to use procedures with labels to express the
>> non-atomicity of such sequences.
>>
> 

FYI, with a very simple and stupid modification of the spec:

----->8-----
macro ReadUnlock()
{
    reader_count := reader_count - 1;
    \* Condition variable signal is "implicit" here
}

macro WriteUnlock()
{
    writer_count := writer_count - 1;
    \* Ditto on the cond var
}

procedure ReadLock()
{
add:
    reader_count := reader_count + 1;
lock:
    await writer_count = 0;
    return;
}

procedure WriteLock()
{
add:
    writer_count := writer_count + 1;
lock:
    await reader_count = 0;
    return;
};
-----8<-----

it's quite easy to trigger the case Paul pointed out in [1]:

----->8-----
Error: Deadlock reached.
Error: The behavior up to this point is:
State 1: <Initial predicate>
/\ stack = (<<reader, 1>> :> <<>> @@ <<writer, 1>> :> <<>>)
/\ pc = (<<reader, 1>> :> "loop" @@ <<writer, 1>> :> "loop_")
/\ writer_count = 0
/\ reader_count = 0
/\ lock_state = "idle"

State 2: <loop_ line 159, col 16 to line 164, col 72 of module specs>
/\ stack = ( <<reader, 1>> :> <<>> @@
  <<writer, 1>> :> <<[pc |-> "write_cs", procedure |-> "WriteLock"]>> )
/\ pc = (<<reader, 1>> :> "loop" @@ <<writer, 1>> :> "add")
/\ writer_count = 0
/\ reader_count = 0
/\ lock_state = "idle"

State 3: <add line 146, col 14 to line 149, col 63 of module specs>
/\ stack = ( <<reader, 1>> :> <<>> @@
  <<writer, 1>> :> <<[pc |-> "write_cs", procedure |-> "WriteLock"]>> )
/\ pc = (<<reader, 1>> :> "loop" @@ <<writer, 1>> :> "lock")
/\ writer_count = 1
/\ reader_count = 0
/\ lock_state = "idle"

State 4: <loop line 179, col 15 to line 184, col 71 of module specs>
/\ stack = ( <<reader, 1>> :> <<[pc |-> "read_cs", procedure |-> "ReadLock"]>> @@
  <<writer, 1>> :> <<[pc |-> "write_cs", procedure |-> "WriteLock"]>> )
/\ pc = (<<reader, 1>> :> "add_" @@ <<writer, 1>> :> "lock")
/\ writer_count = 1
/\ reader_count = 0
/\ lock_state = "idle"

State 5: <add_ line 133, col 15 to line 136, col 64 of module specs>
/\ stack = ( <<reader, 1>> :> <<[pc |-> "read_cs", procedure |-> "ReadLock"]>> @@
  <<writer, 1>> :> <<[pc |-> "write_cs", procedure |-> "WriteLock"]>> )
/\ pc = (<<reader, 1>> :> "lock_" @@ <<writer, 1>> :> "lock")
/\ writer_count = 1
/\ reader_count = 1
/\ lock_state = "idle"
-----8<-----

Which I think is pretty cool considering the effort that was required
(read: not much).

[1]: https://lore.kernel.org/lkml/20190607105251.GB28207@linux.ibm.com/

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v2 0/2] Refactor snapshot vs nocow writers locking
  2019-07-30 11:03       ` Valentin Schneider
@ 2019-07-30 12:11         ` Nikolay Borisov
  2019-07-30 13:36         ` Valentin Schneider
  1 sibling, 0 replies; 12+ messages in thread
From: Nikolay Borisov @ 2019-07-30 12:11 UTC (permalink / raw)
  To: Valentin Schneider, Catalin Marinas
  Cc: andrea.parri, paulmck, linux-btrfs, linux-kernel



On 30.07.19 г. 14:03 ч., Valentin Schneider wrote:
> On 29/07/2019 17:32, Valentin Schneider wrote:
>> On 29/07/2019 16:33, Catalin Marinas wrote:
> [...]
>>> I'd say that's one of the pitfalls of PlusCal. The above is executed
>>> atomically, so you'd have the lock_state read and updated in the same
>>> action. Looking at the C patches, there is an
>>> atomic_read(&lock->readers) followed by a
>>> percpu_counter_inc(&lock->writers). Between these two, you can have
>>> "readers" becoming non-zero via a different CPU.
>>>
>>> My suggestion would be to use procedures with labels to express the
>>> non-atomicity of such sequences.
>>>
>>
> 
> FYI, with a very simple and stupid modification of the spec:
> 
> ----->8-----
> macro ReadUnlock()
> {
>     reader_count := reader_count - 1;
>     \* Condition variable signal is "implicit" here
> }
> 
> macro WriteUnlock()
> {
>     writer_count := writer_count - 1;
>     \* Ditto on the cond var
> }
> 
> procedure ReadLock()
> {
> add:
>     reader_count := reader_count + 1;
> lock:
>     await writer_count = 0;
>     return;
> }
> 
> procedure WriteLock()
> {
> add:
>     writer_count := writer_count + 1;
> lock:
>     await reader_count = 0;
>     return;
> };
> -----8<-----
> 
> it's quite easy to trigger the case Paul pointed out in [1]:

Yes, however, there was a bug in the original posting, in that
btrfs_drw_try_write_lock should have called btrfs_drw_write_unlock
instead of btrfs_drw_read_unlock if it sees that readers incremented
while it has already incremented its percpu counter.

Additionally the implementation doesn't await with the respective
variable incremented. Is there a way to express something along the
lines of :


> procedure WriteLock()
> {
> add:
>     writer_count := writer_count + 1;
> lock:
>     await reader_count = 0;

If we are about to wait then also decrement writer_count?  I guess the
correct way to specify it would be:

procedure WriteLock()
 {

     writer_count := writer_count + 1;
     await reader_count = 0;
     return;
 };


Because the implementation (by using barriers and percpu counters
ensures all of this happens as one atomic step?) E.g. before going to
sleep we decrement the write unlock.

>     return;
> };

> 
> ----->8-----
> Error: Deadlock reached.
> Error: The behavior up to this point is:
> State 1: <Initial predicate>
> /\ stack = (<<reader, 1>> :> <<>> @@ <<writer, 1>> :> <<>>)
> /\ pc = (<<reader, 1>> :> "loop" @@ <<writer, 1>> :> "loop_")
> /\ writer_count = 0
> /\ reader_count = 0
> /\ lock_state = "idle"
> 
> State 2: <loop_ line 159, col 16 to line 164, col 72 of module specs>
> /\ stack = ( <<reader, 1>> :> <<>> @@
>   <<writer, 1>> :> <<[pc |-> "write_cs", procedure |-> "WriteLock"]>> )
> /\ pc = (<<reader, 1>> :> "loop" @@ <<writer, 1>> :> "add")
> /\ writer_count = 0
> /\ reader_count = 0
> /\ lock_state = "idle"
> 
> State 3: <add line 146, col 14 to line 149, col 63 of module specs>
> /\ stack = ( <<reader, 1>> :> <<>> @@
>   <<writer, 1>> :> <<[pc |-> "write_cs", procedure |-> "WriteLock"]>> )
> /\ pc = (<<reader, 1>> :> "loop" @@ <<writer, 1>> :> "lock")
> /\ writer_count = 1
> /\ reader_count = 0
> /\ lock_state = "idle"
> 
> State 4: <loop line 179, col 15 to line 184, col 71 of module specs>
> /\ stack = ( <<reader, 1>> :> <<[pc |-> "read_cs", procedure |-> "ReadLock"]>> @@
>   <<writer, 1>> :> <<[pc |-> "write_cs", procedure |-> "WriteLock"]>> )
> /\ pc = (<<reader, 1>> :> "add_" @@ <<writer, 1>> :> "lock")
> /\ writer_count = 1
> /\ reader_count = 0
> /\ lock_state = "idle"
> 
> State 5: <add_ line 133, col 15 to line 136, col 64 of module specs>
> /\ stack = ( <<reader, 1>> :> <<[pc |-> "read_cs", procedure |-> "ReadLock"]>> @@
>   <<writer, 1>> :> <<[pc |-> "write_cs", procedure |-> "WriteLock"]>> )
> /\ pc = (<<reader, 1>> :> "lock_" @@ <<writer, 1>> :> "lock")
> /\ writer_count = 1
> /\ reader_count = 1
> /\ lock_state = "idle"
> -----8<-----
> 
> Which I think is pretty cool considering the effort that was required
> (read: not much).
> 
> [1]: https://lore.kernel.org/lkml/20190607105251.GB28207@linux.ibm.com/
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v2 0/2] Refactor snapshot vs nocow writers locking
  2019-07-30 11:03       ` Valentin Schneider
  2019-07-30 12:11         ` Nikolay Borisov
@ 2019-07-30 13:36         ` Valentin Schneider
  1 sibling, 0 replies; 12+ messages in thread
From: Valentin Schneider @ 2019-07-30 13:36 UTC (permalink / raw)
  To: Catalin Marinas
  Cc: Nikolay Borisov, linux-btrfs, paulmck, andrea.parri, linux-kernel

And with a more accurate spec (appended at the end of this email):

    Model checking completed. No error has been found.
      Estimates of the probability that TLC did not check all reachable states
      because two distinct states had the same fingerprint:
      calculated (optimistic):  val = 8.6E-8
      based on the actual fingerprints:  val = 9.8E-9
    3100306 states generated, 651251 distinct states found, 0 states left on queue.

IOW, it seems fine in regards to the defined properties + the deadlock
check.

The EventuallyRead liveness property shows that a waiting reader will
always eventually get the lock (no reader starvation), of course assuming
no lock-user blocks in its critical section (i.e. no stuttering steps).

It doesn't hold for writers - they can be starved by a never-ending stream
of readers. IOW

  \* Eventually one writer gets the lock
  <> \E writer \in WRITERS: pc[writer] = "write_cs"

can be disproven by some behaviours. However,

  \* No writer ever gets the lock
  [] \A writer \in WRITERS: pc[writer] # "write_cs"

can be disproven as well - there are *some* paths that allow writers to
get the lock. I haven't found a neater way to check that other than by
having a "debug" property that I don't include in the full-fledged check.



Note that the entire content of a label is considered atomic by TLC. From
the point of view of that spec:

Read lock:
- reader count increment is atomic
- writer count read is atomic

Read unlock:
- reader count decrement is atomic
(The model's writer counter read is in the same atomic block as the
reader count decrement, but it's only used for updating a model-internal
variable, so I'm not including it here)

Write lock:
- write count increment is atomic
- reader count read is atomic
- writer count decrement is atomic

Write unlock:
- writer count increment is atomic
(ditto on the reader counter read)

This doesn't help for the placement of memory barriers, but from an
algorithm point of view that seems to be holding up.



Here's the actual spec (if I keep this up I'll get a git tree going...)
---------------------------------------------------------------------------
specs.tla:

---- MODULE specs ----
EXTENDS Integers, Sequences, FiniteSets, TLC

CONSTANTS
    NR_WRITERS,
    NR_READERS,
    WRITER_TASK,
    READER_TASK

WRITERS == {WRITER_TASK} \X (1..NR_WRITERS)
READERS == {READER_TASK} \X (1..NR_READERS)
THREADS == WRITERS \union READERS

(*--algorithm DRW {

variables
    lock_state = "idle",
    reader_count = 0,
    writer_count = 0

define {

LOCK_STATES == {"idle", "write_locked", "read_locked"}

(* Safety invariants *)
TypeCheck ==
    /\ lock_state \in LOCK_STATES
    /\ reader_count \in (0..NR_READERS)
    /\ writer_count \in (0..NR_WRITERS)

(* Ensure critical section exclusiveness *)
Exclusion ==
    /\ \E writer \in WRITERS: pc[writer] = "write_cs" =>
        \A reader \in READERS: pc[reader] # "read_cs"
    /\ \E reader \in READERS: pc[reader] = "read_cs" =>
        \A writer \in WRITERS: pc[writer] # "write_cs"

(* Ensure the lock state is sane *)
LockState ==
    /\ lock_state = "idle" =>
        (* Not much can be said about the counts - the whole range is valid *)
        /\ \A writer \in WRITERS: pc[writer] # "write_cs"
        /\ \A reader \in READERS: pc[reader] # "read_cs"
    /\ lock_state = "read_locked" =>
        (* Some readers can still be in the locking procedure, just make sure  *)
	(* all readers in their critical section are accounted for *)
        reader_count >= Cardinality({r \in READERS: pc[r] = "read_cs"})
    /\ lock_state = "write_locked" =>
        (* Ditto for writers *)
        writer_count >= Cardinality({w \in WRITERS: pc[w] = "write_cs"})

(* Liveness properties *)

(* A waiting reader *always* eventually gets the lock *)
EventuallyRead ==
    reader_count > 0 /\ lock_state # "read_locked" ~>
        lock_state = "read_locked"

(* A waiting writer *can* eventually get the lock *)
EventuallyWrite ==
    (* TODO: find a way to express that this can be false in some behaviours *)
    (*       but has to be true in > 0 behaviours *)
    TRUE
}

macro ReadUnlock()
{
    reader_count := reader_count - 1;
    \* Condition variable signal is "implicit" here
    if (reader_count = 0) {
        lock_state := "idle";
    };
}

macro WriteUnlock()
{
    writer_count := writer_count - 1;
    \* Ditto on the cond var
    if (writer_count = 0) {
        lock_state := "idle";
    };
}

procedure ReadLock()
{
add:
    reader_count := reader_count + 1;
lock:
    await writer_count = 0;
    lock_state := "read_locked";
    return;
}

procedure WriteLock()
variables rc;
{
loop:
    while (TRUE) {
        writer_count := writer_count + 1;
get_readers:
        rc := reader_count;
check_readers:
        if (rc > 0) {
            writer_count := writer_count - 1;
wait:
            await reader_count = 0;
        } else {
            goto locked;
        };
    };

locked:
    lock_state := "write_locked";
    return;
};

fair process(writer \in WRITERS)
{
loop:
    while (TRUE) {
        call WriteLock();
write_cs:
        skip;
unlock:
        WriteUnlock();
    };
}

fair process(reader \in READERS)
{
loop:
    while (TRUE) {
        call ReadLock();
read_cs:
        skip;
unlock:
        ReadUnlock();
    };

}

}*)
====

specs.cfg:

SPECIFICATION Spec
\* Add statements after this line.

CONSTANTS
	NR_READERS = 3
	NR_WRITERS = 3
	READER_TASK = reader
	WRITER_TASK = writer

INVARIANTS
	TypeCheck
	LockState
	Exclusion

PROPERTIES
	EventuallyRead
	EventuallyWrite

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [RFC PATCH] btrfs: Hook btrfs' DRW lock to locktorture infrastructure
  2019-07-19  8:48 ` [RFC PATCH] btrfs: Hook btrfs' DRW lock to locktorture infrastructure Nikolay Borisov
@ 2019-08-05 16:36   ` Nathan Chancellor
  2019-08-05 18:17     ` David Sterba
  0 siblings, 1 reply; 12+ messages in thread
From: Nathan Chancellor @ 2019-08-05 16:36 UTC (permalink / raw)
  To: Nikolay Borisov; +Cc: linux-btrfs, paulmck, linux-kernel

On Fri, Jul 19, 2019 at 11:48:08AM +0300, Nikolay Borisov wrote:
> Signed-off-by: Nikolay Borisov <nborisov@suse.com>
> ---
> 
> Hello Paul, 
> 
> Here is the code I used to test the DRW lock via the lock torture infrastructure. 
> It's rather ugly but got the job done for me. It's definitely not in a mergeable
> form. At the very least I think including btrfs headers constitutes a violation 
> of separation of different subsystems. Would it be acceptable to guard them 
> behind something like "#if BTRFS && BTRFS_DEBUG" ? 
> 
> I'm really posting this just for posterity/provenance purposes. I'm fine with 
> dropping the patch. 
> 
> 
>  fs/btrfs/locking.h           |  1 +
>  kernel/locking/locktorture.c | 77 +++++++++++++++++++++++++++++++++++-
>  2 files changed, 77 insertions(+), 1 deletion(-)
> 
> diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
> index 44378c65f843..27627d4fd3a9 100644
> --- a/fs/btrfs/locking.h
> +++ b/fs/btrfs/locking.h
> @@ -9,6 +9,7 @@
>  #include <linux/atomic.h>
>  #include <linux/wait.h>
>  #include <linux/percpu_counter.h>
> +#include "extent_io.h"
>  
>  #define BTRFS_WRITE_LOCK 1
>  #define BTRFS_READ_LOCK 2
> diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
> index 80a463d31a8d..774e10a25876 100644
> --- a/kernel/locking/locktorture.c
> +++ b/kernel/locking/locktorture.c
> @@ -30,6 +30,8 @@
>  #include <linux/slab.h>
>  #include <linux/percpu-rwsem.h>
>  #include <linux/torture.h>
> +#include "../../fs/btrfs/ctree.h"
> +#include "../../fs/btrfs/locking.h"
>  
>  MODULE_LICENSE("GPL");
>  MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
> @@ -85,6 +87,7 @@ struct lock_torture_ops {
>  
>  	unsigned long flags; /* for irq spinlocks */
>  	const char *name;
> +	bool multiple;
>  };
>  
>  struct lock_torture_cxt {
> @@ -600,6 +603,7 @@ static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
>  	percpu_up_read(&pcpu_rwsem);
>  }
>  
> +
>  static struct lock_torture_ops percpu_rwsem_lock_ops = {
>  	.init		= torture_percpu_rwsem_init,
>  	.writelock	= torture_percpu_rwsem_down_write,
> @@ -612,6 +616,76 @@ static struct lock_torture_ops percpu_rwsem_lock_ops = {
>  	.name		= "percpu_rwsem_lock"
>  };
>  
> +static struct btrfs_drw_lock torture_drw_lock;
> +
> +void torture_drw_init(void)
> +{
> +	BUG_ON(btrfs_drw_lock_init(&torture_drw_lock));
> +}
> +
> +static int torture_drw_write_lock(void) __acquires(torture_drw_lock)
> +{
> +	btrfs_drw_write_lock(&torture_drw_lock);
> +	return 0;
> +}
> +
> +static void torture_drw_write_unlock(void) __releases(torture_drw_lock)
> +{
> +	btrfs_drw_write_unlock(&torture_drw_lock);
> +}
> +
> +static int torture_drw_read_lock(void) __acquires(torture_drw_lock)
> +{
> +	btrfs_drw_read_lock(&torture_drw_lock);
> +	return 0;
> +}
> +
> +static void torture_drw_read_unlock(void) __releases(torture_drw_lock)
> +{
> +	btrfs_drw_read_unlock(&torture_drw_lock);
> +}
> +
> +static void torture_drw_write_delay(struct torture_random_state *trsp)
> +{
> +	const unsigned long longdelay_ms = 100;
> +
> +	/* We want a long delay occasionally to force massive contention.  */
> +	if (!(torture_random(trsp) %
> +	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
> +		mdelay(longdelay_ms * 10);
> +	else
> +		mdelay(longdelay_ms / 10);
> +	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
> +		torture_preempt_schedule();  /* Allow test to be preempted. */
> +}
> +
> +static void torture_drw_read_delay(struct torture_random_state *trsp)
> +{
> +	const unsigned long longdelay_ms = 100;
> +
> +	/* We want a long delay occasionally to force massive contention.  */
> +	if (!(torture_random(trsp) %
> +	      (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
> +		mdelay(longdelay_ms * 2);
> +	else
> +		mdelay(longdelay_ms / 2);
> +	if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
> +		torture_preempt_schedule();  /* Allow test to be preempted. */
> +}
> +
> +static struct lock_torture_ops btrfs_drw_lock_ops = {
> +	.init		= torture_drw_init,
> +	.writelock	= torture_drw_write_lock,
> +	.write_delay	= torture_drw_write_delay,
> +	.task_boost     = torture_boost_dummy,
> +	.writeunlock	= torture_drw_write_unlock,
> +	.readlock       = torture_drw_read_lock,
> +	.read_delay     = torture_drw_read_delay, /* figure what to do with this */
> +	.readunlock     = torture_drw_read_unlock,
> +	.multiple = true,
> +	.name		= "btrfs_drw_lock"
> +};
> +
>  /*
>   * Lock torture writer kthread.  Repeatedly acquires and releases
>   * the lock, checking for duplicate acquisitions.
> @@ -630,7 +704,7 @@ static int lock_torture_writer(void *arg)
>  
>  		cxt.cur_ops->task_boost(&rand);
>  		cxt.cur_ops->writelock();
> -		if (WARN_ON_ONCE(lock_is_write_held))
> +		if (!cxt.cur_ops->multiple && WARN_ON_ONCE(lock_is_write_held))
>  			lwsp->n_lock_fail++;
>  		lock_is_write_held = 1;
>  		if (WARN_ON_ONCE(lock_is_read_held))
> @@ -852,6 +926,7 @@ static int __init lock_torture_init(void)
>  #endif
>  		&rwsem_lock_ops,
>  		&percpu_rwsem_lock_ops,
> +		&btrfs_drw_lock_ops
>  	};
>  
>  	if (!torture_init_begin(torture_type, verbose))
> -- 
> 2.17.1
> 

Looks like this is in next-20190805 and causes a link time error when
CONFIG_BTRFS_FS is unset:

  LD      vmlinux.o
  MODPOST vmlinux.o
  MODINFO modules.builtin.modinfo
ld.lld: error: undefined symbol: btrfs_drw_lock_init
>>> referenced by locktorture.c
>>>               locking/locktorture.o:(torture_drw_init) in archive kernel/built-in.a

ld.lld: error: undefined symbol: btrfs_drw_write_lock
>>> referenced by locktorture.c
>>>               locking/locktorture.o:(torture_drw_write_lock) in archive kernel/built-in.a

ld.lld: error: undefined symbol: btrfs_drw_write_unlock
>>> referenced by locktorture.c
>>>               locking/locktorture.o:(torture_drw_write_unlock) in archive kernel/built-in.a

ld.lld: error: undefined symbol: btrfs_drw_read_lock
>>> referenced by locktorture.c
>>>               locking/locktorture.o:(torture_drw_read_lock) in archive kernel/built-in.a

ld.lld: error: undefined symbol: btrfs_drw_read_unlock
>>> referenced by locktorture.c
>>>               locking/locktorture.o:(torture_drw_read_unlock) in archive kernel/built-in.a

If this commit is to remain around, there should probably be static
inline stubs in fs/btrfs/locking.h. Apologies if this has already been
reported, I still see the commit in the btrfs for-next branch.

Cheers,
Nathan

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [RFC PATCH] btrfs: Hook btrfs' DRW lock to locktorture infrastructure
  2019-08-05 16:36   ` Nathan Chancellor
@ 2019-08-05 18:17     ` David Sterba
  0 siblings, 0 replies; 12+ messages in thread
From: David Sterba @ 2019-08-05 18:17 UTC (permalink / raw)
  To: Nathan Chancellor; +Cc: Nikolay Borisov, linux-btrfs, paulmck, linux-kernel

On Mon, Aug 05, 2019 at 09:36:21AM -0700, Nathan Chancellor wrote:
> Looks like this is in next-20190805 and causes a link time error when
> CONFIG_BTRFS_FS is unset:
> 
>   LD      vmlinux.o
>   MODPOST vmlinux.o
>   MODINFO modules.builtin.modinfo
> ld.lld: error: undefined symbol: btrfs_drw_lock_init
> >>> referenced by locktorture.c
> >>>               locking/locktorture.o:(torture_drw_init) in archive kernel/built-in.a
> 
> ld.lld: error: undefined symbol: btrfs_drw_write_lock
> >>> referenced by locktorture.c
> >>>               locking/locktorture.o:(torture_drw_write_lock) in archive kernel/built-in.a
> 
> ld.lld: error: undefined symbol: btrfs_drw_write_unlock
> >>> referenced by locktorture.c
> >>>               locking/locktorture.o:(torture_drw_write_unlock) in archive kernel/built-in.a
> 
> ld.lld: error: undefined symbol: btrfs_drw_read_lock
> >>> referenced by locktorture.c
> >>>               locking/locktorture.o:(torture_drw_read_lock) in archive kernel/built-in.a
> 
> ld.lld: error: undefined symbol: btrfs_drw_read_unlock
> >>> referenced by locktorture.c
> >>>               locking/locktorture.o:(torture_drw_read_unlock) in archive kernel/built-in.a
> 
> If this commit is to remain around, there should probably be static
> inline stubs in fs/btrfs/locking.h. Apologies if this has already been
> reported, I still see the commit in the btrfs for-next branch.

Sorry for the build breakage, the patch is not essential for the
patchset so I'll remove it from the upcoming for-next branch.

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2019-08-05 18:16 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-19  8:39 [PATCH v2 0/2] Refactor snapshot vs nocow writers locking Nikolay Borisov
2019-07-19  8:39 ` [PATCH v2 1/2] btrfs: Implement DRW lock Nikolay Borisov
2019-07-19  8:39 ` [PATCH v2 2/2] btrfs: convert snapshot/nocow exlcusion to drw lock Nikolay Borisov
2019-07-19  8:48 ` [RFC PATCH] btrfs: Hook btrfs' DRW lock to locktorture infrastructure Nikolay Borisov
2019-08-05 16:36   ` Nathan Chancellor
2019-08-05 18:17     ` David Sterba
2019-07-29 14:13 ` [PATCH v2 0/2] Refactor snapshot vs nocow writers locking Valentin Schneider
2019-07-29 15:33   ` Catalin Marinas
2019-07-29 16:32     ` Valentin Schneider
2019-07-30 11:03       ` Valentin Schneider
2019-07-30 12:11         ` Nikolay Borisov
2019-07-30 13:36         ` Valentin Schneider

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).