From: Nathan Huckleberry <nhuck@google.com>
To: linux-kernel@vger.kernel.org, dm-devel@redhat.com,
Alasdair Kergon <agk@redhat.com>,
Mike Snitzer <snitzer@kernel.org>
Cc: Eric Biggers <ebiggers@kernel.org>,
Sami Tolvanen <samitolvanen@google.com>,
Nathan Huckleberry <nhuck@google.com>
Subject: [PATCH 2/3] dm-bufio: Add DM_BUFIO_GET_CANT_SLEEP
Date: Fri, 22 Jul 2022 09:38:22 +0000 [thread overview]
Message-ID: <20220722093823.4158756-3-nhuck@google.com> (raw)
In-Reply-To: <20220722093823.4158756-1-nhuck@google.com>
Add an optional flag that ensures dm_bufio_get does not sleep. This
allows the dm-bufio cache to be queried from interrupt context.
To ensure that dm-bufio does not sleep, dm-bufio must use a spinlock
instead of a mutex. Additionally, to avoid deadlocks, special care must
be taken so that dm-bufio does not sleep while holding the spinlock.
DM_BUFIO_GET_CANT_SLEEP is useful in some contexts, such as dm-verity,
so that we can query the dm-bufio cache in a tasklet. If the required
data is cached, processing can be handled immediately in the tasklet
instead of waiting for a work-queue job to be scheduled. This can
reduce latency when there is high CPU load and memory pressure.
Signed-off-by: Nathan Huckleberry <nhuck@google.com>
---
drivers/md/dm-bufio.c | 26 ++++++++++++++++++++++----
include/linux/dm-bufio.h | 5 +++++
2 files changed, 27 insertions(+), 4 deletions(-)
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index ad5603eb12e3..3edeca7cfca6 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -81,6 +81,8 @@
*/
struct dm_bufio_client {
struct mutex lock;
+ spinlock_t spinlock;
+ unsigned long spinlock_flags;
struct list_head lru[LIST_SIZE];
unsigned long n_buffers[LIST_SIZE];
@@ -90,6 +92,7 @@ struct dm_bufio_client {
s8 sectors_per_block_bits;
void (*alloc_callback)(struct dm_buffer *);
void (*write_callback)(struct dm_buffer *);
+ bool may_sleep;
struct kmem_cache *slab_buffer;
struct kmem_cache *slab_cache;
@@ -167,17 +170,26 @@ struct dm_buffer {
static void dm_bufio_lock(struct dm_bufio_client *c)
{
- mutex_lock_nested(&c->lock, dm_bufio_in_request());
+ if (c->may_sleep)
+ mutex_lock_nested(&c->lock, dm_bufio_in_request());
+ else
+ spin_lock_irqsave_nested(&c->spinlock, c->spinlock_flags, dm_bufio_in_request());
}
static int dm_bufio_trylock(struct dm_bufio_client *c)
{
- return mutex_trylock(&c->lock);
+ if (c->may_sleep)
+ return mutex_trylock(&c->lock);
+ else
+ return spin_trylock_irqsave(&c->spinlock, c->spinlock_flags);
}
static void dm_bufio_unlock(struct dm_bufio_client *c)
{
- mutex_unlock(&c->lock);
+ if (c->may_sleep)
+ mutex_unlock(&c->lock);
+ else
+ spin_unlock_irqrestore(&c->spinlock, c->spinlock_flags);
}
/*----------------------------------------------------------------*/
@@ -878,7 +890,7 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
* be allocated.
*/
while (1) {
- if (dm_bufio_cache_size_latch != 1) {
+ if (dm_bufio_cache_size_latch != 1 && c->may_sleep) {
b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
if (b)
return b;
@@ -1041,6 +1053,7 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
return NULL;
+
b->hold_count++;
__relink_lru(b, test_bit(B_DIRTY, &b->state) ||
test_bit(B_WRITING, &b->state));
@@ -1748,12 +1761,17 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->alloc_callback = alloc_callback;
c->write_callback = write_callback;
+ c->may_sleep = true;
+ if (flags & DM_BUFIO_GET_CANT_SLEEP)
+ c->may_sleep = false;
+
for (i = 0; i < LIST_SIZE; i++) {
INIT_LIST_HEAD(&c->lru[i]);
c->n_buffers[i] = 0;
}
mutex_init(&c->lock);
+ spin_lock_init(&c->spinlock);
INIT_LIST_HEAD(&c->reserved_buffers);
c->need_reserved_buffers = reserved_buffers;
diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
index e21480715255..2a78f0cb8e71 100644
--- a/include/linux/dm-bufio.h
+++ b/include/linux/dm-bufio.h
@@ -17,6 +17,11 @@
struct dm_bufio_client;
struct dm_buffer;
+/*
+ * Flags for dm_bufio_client_create
+ */
+#define DM_BUFIO_GET_CANT_SLEEP 0x1
+
/*
* Create a buffered IO cache on a given device
*/
--
2.37.1.359.gd136c6c3e2-goog
next prev parent reply other threads:[~2022-07-22 9:45 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-22 9:38 [PATCH 0/3] dm-verity: optionally use tasklets in dm-verity Nathan Huckleberry
2022-07-22 9:38 ` [PATCH 1/3] dm-bufio: Add flags for dm_bufio_client_create Nathan Huckleberry
2022-07-22 9:38 ` Nathan Huckleberry [this message]
2022-07-22 9:38 ` [PATCH 3/3] dm-verity: Add try_verify_in_tasklet Nathan Huckleberry
2022-07-26 1:58 ` Mike Snitzer
2022-07-26 3:06 ` Eric Biggers
2022-07-26 4:13 ` Mike Snitzer
2022-07-22 16:41 ` [PATCH 0/3] dm-verity: optionally use tasklets in dm-verity Christoph Hellwig
2022-07-22 17:12 ` Mike Snitzer
2022-08-15 11:35 ` Sebastian Andrzej Siewior
2022-07-22 18:12 ` [dm-devel] " Bart Van Assche
2022-07-22 18:33 ` Mike Snitzer
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220722093823.4158756-3-nhuck@google.com \
--to=nhuck@google.com \
--cc=agk@redhat.com \
--cc=dm-devel@redhat.com \
--cc=ebiggers@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=samitolvanen@google.com \
--cc=snitzer@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).