All of lore.kernel.org
 help / color / mirror / Atom feed
From: Qu Wenruo <wqu@suse.com>
To: linux-btrfs@vger.kernel.org
Subject: [PATCH RFC 2/3] btrfs: locking: Add hooks for btrfs perf
Date: Wed,  6 Mar 2019 14:19:06 +0800	[thread overview]
Message-ID: <20190306061907.29685-3-wqu@suse.com> (raw)
In-Reply-To: <20190306061907.29685-1-wqu@suse.com>

For btrfs tree locking, there are only 2 functions can sleep:
- btrfs_tree_read_lock()
  It will wait for any blocking writers
- btrfs_tree_lock()
  It will wait for any blocking readers or writers

Other functions only depends on rwlock which won't sleep.
We doesn't really care about the spinning lock version.

The overheads introduced are:
- two ktime_get() calls
- several if branches
- percpu_counter_add()

Which should be smaller than to ftrace function_graph.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/locking.c | 11 +++++++++++
 fs/btrfs/perf.c    | 20 ++++++++++++++++++++
 fs/btrfs/perf.h    |  8 ++++++++
 3 files changed, 39 insertions(+)

diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 1da768e5ef75..c23c8f7450e8 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -11,6 +11,7 @@
 #include "ctree.h"
 #include "extent_io.h"
 #include "locking.h"
+#include "perf.h"
 
 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
 
@@ -85,10 +86,14 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
  */
 void btrfs_tree_read_lock(struct extent_buffer *eb)
 {
+	u64 start_ns;
+
+	start_ns = btrfs_perf_start();
 again:
 	BUG_ON(!atomic_read(&eb->blocking_writers) &&
 	       current->pid == eb->lock_owner);
 
+
 	read_lock(&eb->lock);
 	if (atomic_read(&eb->blocking_writers) &&
 	    current->pid == eb->lock_owner) {
@@ -101,16 +106,19 @@ void btrfs_tree_read_lock(struct extent_buffer *eb)
 		BUG_ON(eb->lock_nested);
 		eb->lock_nested = 1;
 		read_unlock(&eb->lock);
+		btrfs_perf_end(eb->fs_info, btrfs_header_owner(eb), start_ns);
 		return;
 	}
 	if (atomic_read(&eb->blocking_writers)) {
 		read_unlock(&eb->lock);
+
 		wait_event(eb->write_lock_wq,
 			   atomic_read(&eb->blocking_writers) == 0);
 		goto again;
 	}
 	atomic_inc(&eb->read_locks);
 	atomic_inc(&eb->spinning_readers);
+	btrfs_perf_end(eb->fs_info, btrfs_header_owner(eb), start_ns);
 }
 
 /*
@@ -227,7 +235,9 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
  */
 void btrfs_tree_lock(struct extent_buffer *eb)
 {
+	u64 start_ns;
 	WARN_ON(eb->lock_owner == current->pid);
+	start_ns = btrfs_perf_start();
 again:
 	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
 	wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
@@ -248,6 +258,7 @@ void btrfs_tree_lock(struct extent_buffer *eb)
 	atomic_inc(&eb->spinning_writers);
 	atomic_inc(&eb->write_locks);
 	eb->lock_owner = current->pid;
+	btrfs_perf_end(eb->fs_info, btrfs_header_owner(eb), start_ns);
 }
 
 /*
diff --git a/fs/btrfs/perf.c b/fs/btrfs/perf.c
index 2880111f3b0c..893bfad8e6d3 100644
--- a/fs/btrfs/perf.c
+++ b/fs/btrfs/perf.c
@@ -50,3 +50,23 @@ void btrfs_perf_free_profiler(struct btrfs_fs_info *fs_info)
 	kfree(profiler);
 }
 
+void btrfs_perf_end(struct btrfs_fs_info *fs_info, u64 eb_owner, u64 start_ns)
+{
+	struct btrfs_perf_profiler *profiler = fs_info->profiler;
+	u64 end_ns;
+	int i;
+
+	if (!profiler)
+		return;
+
+	end_ns = ktime_get_ns();
+	if (eb_owner == BTRFS_ROOT_TREE_OBJECTID)
+		i = BTRFS_PERF_TREE_LOCK_ROOT;
+	else if (is_fstree(eb_owner))
+		i = BTRFS_PERF_TREE_LOCK_FS;
+	else if (eb_owner == BTRFS_EXTENT_TREE_OBJECTID)
+		i = BTRFS_PERF_TREE_LOCK_EXTENT;
+	else
+		i = BTRFS_PERF_TREE_LOCK_OTHER;
+	percpu_counter_add(&profiler->perf_counters[i], end_ns - start_ns);
+}
diff --git a/fs/btrfs/perf.h b/fs/btrfs/perf.h
index 9dbea6458d86..7cf4b8c9a0ad 100644
--- a/fs/btrfs/perf.h
+++ b/fs/btrfs/perf.h
@@ -24,4 +24,12 @@ struct btrfs_perf_profiler {
 
 struct btrfs_perf_profiler *btrfs_perf_alloc_profiler(void);
 void btrfs_perf_free_profiler(struct btrfs_fs_info *fs_info);
+void btrfs_perf_update_lock(struct btrfs_fs_info *fs_info,
+			    u64 eb_owner, u64 ns_diff);
+static inline u64 btrfs_perf_start(void)
+{
+	return ktime_get_ns();
+}
+
+void btrfs_perf_end(struct btrfs_fs_info *fs_info, u64 eb_owner, u64 start_ns);
 #endif
-- 
2.21.0


  parent reply	other threads:[~2019-03-06  6:19 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-06  6:19 [PATCH RFC 0/3] btrfs: Performance profiler support Qu Wenruo
2019-03-06  6:19 ` [PATCH RFC 1/3] btrfs: Introduce performance profiler Qu Wenruo
2019-03-06  6:19 ` Qu Wenruo [this message]
2019-03-06  6:19 ` [PATCH RFC 3/3] btrfs: perf: Add RO sysfs interface to collect perf result Qu Wenruo
2019-03-07 14:02 ` [PATCH RFC 0/3] btrfs: Performance profiler support David Sterba
2019-03-07 14:18   ` Qu Wenruo
2019-03-07 16:12     ` David Sterba
2019-03-09  6:21       ` Nikolay Borisov
2019-03-09  6:32         ` Qu Wenruo
2019-03-10  3:08 ` Anand Jain
2019-03-10  9:29   ` Nikolay Borisov
2019-03-10  9:34     ` Qu Wenruo
2019-03-10  9:40       ` Nikolay Borisov
2019-03-10  9:56         ` Qu Wenruo
2019-03-10 10:00           ` Nikolay Borisov
2019-03-11  0:44     ` Anand Jain

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190306061907.29685-3-wqu@suse.com \
    --to=wqu@suse.com \
    --cc=linux-btrfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.