From: 권오훈 <ohoono.kwon@samsung.com>
To: "akpm@linux-foundation.org" <akpm@linux-foundation.org>,
"konrad.wilk@oracle.com" <konrad.wilk@oracle.com>,
"gregkh@linuxfoundation.org" <gregkh@linuxfoundation.org>
Cc: 권오훈 <ohoono.kwon@samsung.com>,
"ohkwon1043@gmail.com" <ohkwon1043@gmail.com>,
"linux-mm@kvack.org" <linux-mm@kvack.org>,
"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>
Subject: [PATCH] mm: cleancache: fix potential race in cleancache apis
Date: Wed, 30 Jun 2021 16:33:10 +0900 [thread overview]
Message-ID: <20210630073310epcms1p2ad6803cfd9dbc8ab501c4c99f799f4da@epcms1p2> (raw)
In-Reply-To: CGME20210630073310epcms1p2ad6803cfd9dbc8ab501c4c99f799f4da@epcms1p2
Current cleancache api implementation has potential race as follows,
which might lead to corruption in filesystems using cleancache.
thread 0 thread 1 thread 2
in put_page
get pool_id K for fs1
invalidate_fs on fs1
frees pool_id K
init_fs for fs2
allocates pool_id K
put_page puts page
which belongs to fs1
into cleancache pool for fs2
At this point, a file cache which originally belongs to fs1 might be
copied back to cleancache pool of fs2, which might be later used as if
it were normal cleancache of fs2, and could eventually corrupt fs2 when
flushed back.
Add rwlock in order to synchronize invalidate_fs with other cleancache
operations.
In normal situations where filesystems are not frequently mounted or
unmounted, there will be little performance impact since
read_lock/read_unlock apis are used.
Signed-off-by: Ohhoon Kwon <ohoono.kwon@samsung.com>
---
fs/super.c | 1 +
include/linux/fs.h | 1 +
mm/cleancache.c | 29 ++++++++++++++++++++++++++---
3 files changed, 28 insertions(+), 3 deletions(-)
diff --git a/fs/super.c b/fs/super.c
index 11b7e7213fd1..6810b685490c 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -261,6 +261,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
s->s_time_min = TIME64_MIN;
s->s_time_max = TIME64_MAX;
s->cleancache_poolid = CLEANCACHE_NO_POOL;
+ rwlock_init(&s->cleancache_pool_lock);
s->s_shrink.seeks = DEFAULT_SEEKS;
s->s_shrink.scan_objects = super_cache_scan;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c3c88fdb9b2a..f61008c9e8fc 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1501,6 +1501,7 @@ struct super_block {
* Saved pool identifier for cleancache (-1 means none)
*/
int cleancache_poolid;
+ rwlock_t cleancache_pool_lock;
struct shrinker s_shrink; /* per-sb shrinker handle */
diff --git a/mm/cleancache.c b/mm/cleancache.c
index db7eee9c0886..10b436a28219 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -114,12 +114,14 @@ void __cleancache_init_fs(struct super_block *sb)
{
int pool_id = CLEANCACHE_NO_BACKEND;
+ write_lock(&sb->cleancache_pool_lock);
if (cleancache_ops) {
pool_id = cleancache_ops->init_fs(PAGE_SIZE);
if (pool_id < 0)
pool_id = CLEANCACHE_NO_POOL;
}
sb->cleancache_poolid = pool_id;
+ write_unlock(&sb->cleancache_pool_lock);
}
EXPORT_SYMBOL(__cleancache_init_fs);
@@ -128,12 +130,14 @@ void __cleancache_init_shared_fs(struct super_block *sb)
{
int pool_id = CLEANCACHE_NO_BACKEND_SHARED;
+ write_lock(&sb->cleancache_pool_lock);
if (cleancache_ops) {
pool_id = cleancache_ops->init_shared_fs(&sb->s_uuid, PAGE_SIZE);
if (pool_id < 0)
pool_id = CLEANCACHE_NO_POOL;
}
sb->cleancache_poolid = pool_id;
+ write_unlock(&sb->cleancache_pool_lock);
}
EXPORT_SYMBOL(__cleancache_init_shared_fs);
@@ -185,6 +189,7 @@ int __cleancache_get_page(struct page *page)
}
VM_BUG_ON_PAGE(!PageLocked(page), page);
+ read_lock(&page->mapping->host->i_sb->cleancache_pool_lock);
pool_id = page->mapping->host->i_sb->cleancache_poolid;
if (pool_id < 0)
goto out;
@@ -198,6 +203,7 @@ int __cleancache_get_page(struct page *page)
else
cleancache_failed_gets++;
out:
+ read_unlock(&page->mapping->host->i_sb->cleancache_pool_lock);
return ret;
}
EXPORT_SYMBOL(__cleancache_get_page);
@@ -223,12 +229,14 @@ void __cleancache_put_page(struct page *page)
}
VM_BUG_ON_PAGE(!PageLocked(page), page);
+ read_lock(&page->mapping->host->i_sb->cleancache_pool_lock);
pool_id = page->mapping->host->i_sb->cleancache_poolid;
if (pool_id >= 0 &&
cleancache_get_key(page->mapping->host, &key) >= 0) {
cleancache_ops->put_page(pool_id, key, page->index, page);
cleancache_puts++;
}
+ read_unlock(&page->mapping->host->i_sb->cleancache_pool_lock);
}
EXPORT_SYMBOL(__cleancache_put_page);
@@ -244,12 +252,15 @@ void __cleancache_invalidate_page(struct address_space *mapping,
struct page *page)
{
/* careful... page->mapping is NULL sometimes when this is called */
- int pool_id = mapping->host->i_sb->cleancache_poolid;
+ int pool_id;
struct cleancache_filekey key = { .u.key = { 0 } };
if (!cleancache_ops)
return;
+ read_lock(&mapping->host->i_sb->cleancache_pool_lock);
+ pool_id = mapping->host->i_sb->cleancache_poolid;
+
if (pool_id >= 0) {
VM_BUG_ON_PAGE(!PageLocked(page), page);
if (cleancache_get_key(mapping->host, &key) >= 0) {
@@ -258,6 +269,7 @@ void __cleancache_invalidate_page(struct address_space *mapping,
cleancache_invalidates++;
}
}
+ read_unlock(&mapping->host->i_sb->cleancache_pool_lock);
}
EXPORT_SYMBOL(__cleancache_invalidate_page);
@@ -272,14 +284,19 @@ EXPORT_SYMBOL(__cleancache_invalidate_page);
*/
void __cleancache_invalidate_inode(struct address_space *mapping)
{
- int pool_id = mapping->host->i_sb->cleancache_poolid;
+ int pool_id;
struct cleancache_filekey key = { .u.key = { 0 } };
if (!cleancache_ops)
return;
+ read_lock(&mapping->host->i_sb->cleancache_pool_lock);
+ pool_id = mapping->host->i_sb->cleancache_poolid;
+
if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
cleancache_ops->invalidate_inode(pool_id, key);
+
+ read_unlock(&mapping->host->i_sb->cleancache_pool_lock);
}
EXPORT_SYMBOL(__cleancache_invalidate_inode);
@@ -292,11 +309,17 @@ void __cleancache_invalidate_fs(struct super_block *sb)
{
int pool_id;
+ if (!cleancache_ops)
+ return;
+
+ write_lock(&sb->cleancache_pool_lock);
pool_id = sb->cleancache_poolid;
sb->cleancache_poolid = CLEANCACHE_NO_POOL;
- if (cleancache_ops && pool_id >= 0)
+ if (pool_id >= 0)
cleancache_ops->invalidate_fs(pool_id);
+
+ write_unlock(&sb->cleancache_pool_lock);
}
EXPORT_SYMBOL(__cleancache_invalidate_fs);
--
2.17.1
next parent reply other threads:[~2021-06-30 7:33 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <CGME20210630073310epcms1p2ad6803cfd9dbc8ab501c4c99f799f4da@epcms1p2>
2021-06-30 7:33 ` 권오훈 [this message]
2021-06-30 8:13 ` [PATCH] mm: cleancache: fix potential race in cleancache apis gregkh
2021-06-30 11:26 ` Matthew Wilcox
2021-06-30 12:29 ` gregkh
[not found] ` <CGME20210630073310epcms1p2ad6803cfd9dbc8ab501c4c99f799f4da@epcms1p5>
2021-07-01 5:06 ` 권오훈
2021-07-01 5:58 ` gregkh
[not found] ` <CGME20210630073310epcms1p2ad6803cfd9dbc8ab501c4c99f799f4da@epcms1p3>
2021-07-01 8:56 ` 권오훈
2021-07-01 11:57 ` Matthew Wilcox
2021-07-01 8:14 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210630073310epcms1p2ad6803cfd9dbc8ab501c4c99f799f4da@epcms1p2 \
--to=ohoono.kwon@samsung.com \
--cc=akpm@linux-foundation.org \
--cc=gregkh@linuxfoundation.org \
--cc=konrad.wilk@oracle.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=ohkwon1043@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).