All of lore.kernel.org
 help / color / mirror / Atom feed
From: David Howells <dhowells@redhat.com>
To: Jeff Layton <jlayton@kernel.org>, Steve French <smfrench@gmail.com>
Cc: David Howells <dhowells@redhat.com>,
	Matthew Wilcox <willy@infradead.org>,
	Marc Dionne <marc.dionne@auristor.com>,
	Paulo Alcantara <pc@manguebit.com>,
	Shyam Prasad N <sprasad@microsoft.com>,
	Tom Talpey <tom@talpey.com>,
	Dominique Martinet <asmadeus@codewreck.org>,
	Eric Van Hensbergen <ericvh@kernel.org>,
	Ilya Dryomov <idryomov@gmail.com>,
	Christian Brauner <christian@brauner.io>,
	linux-cachefs@redhat.com, linux-afs@lists.infradead.org,
	linux-cifs@vger.kernel.org, linux-nfs@vger.kernel.org,
	ceph-devel@vger.kernel.org, v9fs@lists.linux.dev,
	linux-fsdevel@vger.kernel.org, linux-mm@kvack.org,
	netdev@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH v5 12/40] netfs: Provide invalidate_folio and release_folio calls
Date: Thu, 21 Dec 2023 13:23:07 +0000	[thread overview]
Message-ID: <20231221132400.1601991-13-dhowells@redhat.com> (raw)
In-Reply-To: <20231221132400.1601991-1-dhowells@redhat.com>

Provide default invalidate_folio and release_folio calls.  These will need
to interact with invalidation correctly at some point.  They will be needed
if netfslib is to make use of folio->private for its own purposes.

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
---

Notes:
    Changes
    =======
    ver #5)
     - Removed ceph_fscache_note_page_release() also as that is now unused.
     - Added missing '*' to turn comment into kdoc.

 fs/9p/vfs_addr.c      | 33 ++-------------------------
 fs/afs/file.c         | 53 ++++---------------------------------------
 fs/ceph/addr.c        | 24 ++------------------
 fs/ceph/cache.h       | 10 --------
 fs/netfs/misc.c       | 42 ++++++++++++++++++++++++++++++++++
 include/linux/netfs.h |  6 +++--
 6 files changed, 54 insertions(+), 114 deletions(-)

diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 131b83c31f85..055b672a247d 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -88,35 +88,6 @@ const struct netfs_request_ops v9fs_req_ops = {
 	.issue_read		= v9fs_issue_read,
 };
 
-/**
- * v9fs_release_folio - release the private state associated with a folio
- * @folio: The folio to be released
- * @gfp: The caller's allocation restrictions
- *
- * Returns true if the page can be released, false otherwise.
- */
-
-static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
-{
-	if (folio_test_private(folio))
-		return false;
-#ifdef CONFIG_9P_FSCACHE
-	if (folio_test_fscache(folio)) {
-		if (current_is_kswapd() || !(gfp & __GFP_FS))
-			return false;
-		folio_wait_fscache(folio);
-	}
-	fscache_note_page_release(v9fs_inode_cookie(V9FS_I(folio_inode(folio))));
-#endif
-	return true;
-}
-
-static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
-				 size_t length)
-{
-	folio_wait_fscache(folio);
-}
-
 #ifdef CONFIG_9P_FSCACHE
 static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
 				     bool was_async)
@@ -324,8 +295,8 @@ const struct address_space_operations v9fs_addr_operations = {
 	.writepage	= v9fs_vfs_writepage,
 	.write_begin	= v9fs_write_begin,
 	.write_end	= v9fs_write_end,
-	.release_folio	= v9fs_release_folio,
-	.invalidate_folio = v9fs_invalidate_folio,
+	.release_folio	= netfs_release_folio,
+	.invalidate_folio = netfs_invalidate_folio,
 	.launder_folio	= v9fs_launder_folio,
 	.direct_IO	= v9fs_direct_IO,
 };
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 0d783e5b2147..d152ba451f0e 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -20,9 +20,6 @@
 
 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
 static int afs_symlink_read_folio(struct file *file, struct folio *folio);
-static void afs_invalidate_folio(struct folio *folio, size_t offset,
-			       size_t length);
-static bool afs_release_folio(struct folio *folio, gfp_t gfp_flags);
 
 static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
 static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos,
@@ -57,8 +54,8 @@ const struct address_space_operations afs_file_aops = {
 	.readahead	= netfs_readahead,
 	.dirty_folio	= netfs_dirty_folio,
 	.launder_folio	= afs_launder_folio,
-	.release_folio	= afs_release_folio,
-	.invalidate_folio = afs_invalidate_folio,
+	.release_folio	= netfs_release_folio,
+	.invalidate_folio = netfs_invalidate_folio,
 	.write_begin	= afs_write_begin,
 	.write_end	= afs_write_end,
 	.writepages	= afs_writepages,
@@ -67,8 +64,8 @@ const struct address_space_operations afs_file_aops = {
 
 const struct address_space_operations afs_symlink_aops = {
 	.read_folio	= afs_symlink_read_folio,
-	.release_folio	= afs_release_folio,
-	.invalidate_folio = afs_invalidate_folio,
+	.release_folio	= netfs_release_folio,
+	.invalidate_folio = netfs_invalidate_folio,
 	.migrate_folio	= filemap_migrate_folio,
 };
 
@@ -386,48 +383,6 @@ const struct netfs_request_ops afs_req_ops = {
 	.issue_read		= afs_issue_read,
 };
 
-/*
- * invalidate part or all of a page
- * - release a page and clean up its private data if offset is 0 (indicating
- *   the entire page)
- */
-static void afs_invalidate_folio(struct folio *folio, size_t offset,
-			       size_t length)
-{
-	_enter("{%lu},%zu,%zu", folio->index, offset, length);
-
-	folio_wait_fscache(folio);
-	_leave("");
-}
-
-/*
- * release a page and clean up its private state if it's not busy
- * - return true if the page can now be released, false if not
- */
-static bool afs_release_folio(struct folio *folio, gfp_t gfp)
-{
-	struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
-
-	_enter("{{%llx:%llu}[%lu],%lx},%x",
-	       vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags,
-	       gfp);
-
-	/* deny if folio is being written to the cache and the caller hasn't
-	 * elected to wait */
-#ifdef CONFIG_AFS_FSCACHE
-	if (folio_test_fscache(folio)) {
-		if (current_is_kswapd() || !(gfp & __GFP_FS))
-			return false;
-		folio_wait_fscache(folio);
-	}
-	fscache_note_page_release(afs_vnode_cache(vnode));
-#endif
-
-	/* Indicate that the folio can be released */
-	_leave(" = T");
-	return true;
-}
-
 static void afs_add_open_mmap(struct afs_vnode *vnode)
 {
 	if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 3b8641febeac..8eedc62e7ac4 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -159,27 +159,7 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset,
 		ceph_put_snap_context(snapc);
 	}
 
-	folio_wait_fscache(folio);
-}
-
-static bool ceph_release_folio(struct folio *folio, gfp_t gfp)
-{
-	struct inode *inode = folio->mapping->host;
-	struct ceph_client *cl = ceph_inode_to_client(inode);
-
-	doutc(cl, "%llx.%llx idx %lu (%sdirty)\n", ceph_vinop(inode),
-	      folio->index, folio_test_dirty(folio) ? "" : "not ");
-
-	if (folio_test_private(folio))
-		return false;
-
-	if (folio_test_fscache(folio)) {
-		if (current_is_kswapd() || !(gfp & __GFP_FS))
-			return false;
-		folio_wait_fscache(folio);
-	}
-	ceph_fscache_note_page_release(inode);
-	return true;
+	netfs_invalidate_folio(folio, offset, length);
 }
 
 static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
@@ -1585,7 +1565,7 @@ const struct address_space_operations ceph_aops = {
 	.write_end = ceph_write_end,
 	.dirty_folio = ceph_dirty_folio,
 	.invalidate_folio = ceph_invalidate_folio,
-	.release_folio = ceph_release_folio,
+	.release_folio = netfs_release_folio,
 	.direct_IO = noop_direct_IO,
 };
 
diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h
index 8fc7d828d990..20efac020394 100644
--- a/fs/ceph/cache.h
+++ b/fs/ceph/cache.h
@@ -56,12 +56,6 @@ static inline bool ceph_is_cache_enabled(struct inode *inode)
 	return fscache_cookie_enabled(ceph_fscache_cookie(ceph_inode(inode)));
 }
 
-static inline void ceph_fscache_note_page_release(struct inode *inode)
-{
-	struct ceph_inode_info *ci = ceph_inode(inode);
-
-	fscache_note_page_release(ceph_fscache_cookie(ci));
-}
 #else /* CONFIG_CEPH_FSCACHE */
 static inline int ceph_fscache_register_fs(struct ceph_fs_client* fsc,
 					   struct fs_context *fc)
@@ -118,10 +112,6 @@ static inline bool ceph_is_cache_enabled(struct inode *inode)
 {
 	return false;
 }
-
-static inline void ceph_fscache_note_page_release(struct inode *inode)
-{
-}
 #endif /* CONFIG_CEPH_FSCACHE */
 
 #endif
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index 68baf55c47a4..45bb19ec9a63 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -84,3 +84,45 @@ void netfs_clear_inode_writeback(struct inode *inode, const void *aux)
 	}
 }
 EXPORT_SYMBOL(netfs_clear_inode_writeback);
+
+/**
+ * netfs_invalidate_folio - Invalidate or partially invalidate a folio
+ * @folio: Folio proposed for release
+ * @offset: Offset of the invalidated region
+ * @length: Length of the invalidated region
+ *
+ * Invalidate part or all of a folio for a network filesystem.  The folio will
+ * be removed afterwards if the invalidated region covers the entire folio.
+ */
+void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
+{
+	_enter("{%lx},%zx,%zx", folio_index(folio), offset, length);
+
+	folio_wait_fscache(folio);
+}
+EXPORT_SYMBOL(netfs_invalidate_folio);
+
+/**
+ * netfs_release_folio - Try to release a folio
+ * @folio: Folio proposed for release
+ * @gfp: Flags qualifying the release
+ *
+ * Request release of a folio and clean up its private state if it's not busy.
+ * Returns true if the folio can now be released, false if not
+ */
+bool netfs_release_folio(struct folio *folio, gfp_t gfp)
+{
+	struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
+
+	if (folio_test_private(folio))
+		return false;
+	if (folio_test_fscache(folio)) {
+		if (current_is_kswapd() || !(gfp & __GFP_FS))
+			return false;
+		folio_wait_fscache(folio);
+	}
+
+	fscache_note_page_release(netfs_i_cookie(ctx));
+	return true;
+}
+EXPORT_SYMBOL(netfs_release_folio);
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 06f57d9d09f6..8efbfd3b2820 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -293,11 +293,13 @@ struct readahead_control;
 void netfs_readahead(struct readahead_control *);
 int netfs_read_folio(struct file *, struct folio *);
 int netfs_write_begin(struct netfs_inode *, struct file *,
-		struct address_space *, loff_t pos, unsigned int len,
-		struct folio **, void **fsdata);
+		      struct address_space *, loff_t pos, unsigned int len,
+		      struct folio **, void **fsdata);
 bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio);
 int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
 void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
+void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
+bool netfs_release_folio(struct folio *folio, gfp_t gfp);
 
 void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
 void netfs_get_subrequest(struct netfs_io_subrequest *subreq,


  parent reply	other threads:[~2023-12-21 13:24 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-21 13:22 [PATCH v5 00/40] netfs, afs, 9p: Delegate high-level I/O to netfslib David Howells
2023-12-21 13:22 ` [PATCH v5 01/40] afs: Remove whitespace before most ')' from the trace header David Howells
2023-12-21 13:22 ` [PATCH v5 02/40] afs: Automatically generate trace tag enums David Howells
2023-12-21 13:22 ` [PATCH v5 03/40] netfs, fscache: Move fs/fscache/* into fs/netfs/ David Howells
2023-12-21 13:22 ` [PATCH v5 04/40] netfs, fscache: Combine fscache with netfs David Howells
2023-12-21 13:23 ` [PATCH v5 05/40] netfs, fscache: Remove ->begin_cache_operation David Howells
2023-12-21 13:23 ` [PATCH v5 06/40] netfs, fscache: Move /proc/fs/fscache to /proc/fs/netfs and put in a symlink David Howells
2024-01-03 16:49   ` Marc Dionne
2023-12-21 13:23 ` [PATCH v5 07/40] netfs: Move pinning-for-writeback from fscache to netfs David Howells
2023-12-21 13:23 ` [PATCH v5 08/40] netfs: Add a procfile to list in-progress requests David Howells
2023-12-21 13:23 ` [PATCH v5 09/40] netfs: Allow the netfs to make the io (sub)request alloc larger David Howells
2023-12-21 13:23 ` [PATCH v5 10/40] netfs: Add a ->free_subrequest() op David Howells
2023-12-21 13:23 ` [PATCH v5 11/40] afs: Don't use folio->private to record partial modification David Howells
2023-12-21 13:23 ` David Howells [this message]
2023-12-21 13:23 ` [PATCH v5 13/40] netfs: Implement unbuffered/DIO vs buffered I/O locking David Howells
2023-12-21 13:23 ` [PATCH v5 14/40] netfs: Add iov_iters to (sub)requests to describe various buffers David Howells
2023-12-21 13:23 ` [PATCH v5 15/40] netfs: Add support for DIO buffering David Howells
2023-12-26 16:54   ` Nathan Chancellor
2023-12-28 10:47     ` Christian Brauner
2023-12-28 16:58       ` Nathan Chancellor
2023-12-21 13:23 ` [PATCH v5 16/40] netfs: Provide tools to create a buffer in an xarray David Howells
2023-12-21 13:23 ` [PATCH v5 17/40] netfs: Add func to calculate pagecount/size-limited span of an iterator David Howells
2023-12-21 13:23 ` [PATCH v5 18/40] netfs: Limit subrequest by size or number of segments David Howells
2023-12-21 13:23 ` [PATCH v5 19/40] netfs: Extend the netfs_io_*request structs to handle writes David Howells
2023-12-21 13:23 ` [PATCH v5 20/40] netfs: Add a hook to allow tell the netfs to update its i_size David Howells
2023-12-21 13:23 ` [PATCH v5 21/40] netfs: Make netfs_put_request() handle a NULL pointer David Howells
2023-12-21 13:23 ` [PATCH v5 22/40] netfs: Make the refcounting of netfs_begin_read() easier to use David Howells
2023-12-21 13:23 ` [PATCH v5 23/40] netfs: Prep to use folio->private for write grouping and streaming write David Howells
2023-12-21 13:23 ` [PATCH v5 24/40] netfs: Dispatch write requests to process a writeback slice David Howells
2023-12-21 13:23 ` [PATCH v5 25/40] netfs: Provide func to copy data to pagecache for buffered write David Howells
2023-12-21 13:23 ` [PATCH v5 26/40] netfs: Make netfs_read_folio() handle streaming-write pages David Howells
2023-12-21 13:23 ` [PATCH v5 27/40] netfs: Allocate multipage folios in the writepath David Howells
2023-12-21 13:23 ` [PATCH v5 28/40] netfs: Implement unbuffered/DIO read support David Howells
2023-12-21 13:23 ` [PATCH v5 29/40] netfs: Implement unbuffered/DIO write support David Howells
2023-12-21 13:23 ` [PATCH v5 30/40] netfs: Implement buffered write API David Howells
2023-12-21 13:23 ` [PATCH v5 31/40] netfs: Allow buffered shared-writeable mmap through netfs_page_mkwrite() David Howells
2023-12-21 13:23 ` [PATCH v5 32/40] netfs: Provide netfs_file_read_iter() David Howells
2023-12-21 13:23 ` [PATCH v5 33/40] netfs, cachefiles: Pass upper bound length to allow expansion David Howells
2024-01-02 14:04   ` Gao Xiang
2024-01-02 17:11   ` David Howells
2024-01-02 20:37     ` Gao Xiang
2024-01-03  9:18       ` Yiqun Leng
2024-01-03 10:15     ` Jia Zhu
2023-12-21 13:23 ` [PATCH v5 34/40] netfs: Provide a writepages implementation David Howells
2023-12-21 13:23 ` [PATCH v5 35/40] netfs: Provide a launder_folio implementation David Howells
2023-12-21 13:23 ` [PATCH v5 36/40] netfs: Implement a write-through caching option David Howells
2023-12-21 13:23 ` [PATCH v5 37/40] netfs: Optimise away reads above the point at which there can be no data David Howells
2023-12-21 23:01   ` Nathan Chancellor
2023-12-22 11:49   ` David Howells
2023-12-22 12:00   ` [PATCH] Fix oops in NFS David Howells
2024-01-05  4:52     ` Matthew Wilcox
2024-01-05 10:12     ` David Howells
2024-01-05 13:17       ` Matthew Wilcox
2024-01-05 17:20         ` Dominique Martinet
2024-01-05 11:48     ` David Howells
2024-01-05 14:33     ` David Howells
2023-12-21 13:23 ` [PATCH v5 38/40] netfs: Export the netfs_sreq tracepoint David Howells
2023-12-21 13:23 ` [PATCH v5 39/40] afs: Use the netfs write helpers David Howells
2023-12-21 13:23 ` [PATCH v5 40/40] 9p: Use netfslib read/write_iter David Howells
2024-01-03  7:22   ` Dominique Martinet
2024-01-03 19:52     ` Eric Van Hensbergen
2024-01-03 12:08   ` David Howells
2024-01-03 12:39   ` David Howells
2024-01-03 13:00     ` Dominique Martinet
2024-05-09 17:15   ` Andrea Righi
2024-05-09 21:33   ` David Howells
2024-05-10  5:53     ` Andrea Righi
2024-05-10  7:57     ` David Howells
2023-12-22 13:02 ` [PATCH] Fix EROFS Kconfig David Howells
2023-12-22 13:02   ` David Howells
2023-12-23  3:55   ` Jingbo Xu
2023-12-23  3:55     ` Jingbo Xu
2023-12-23 13:32     ` Gao Xiang
2023-12-23 13:32       ` Gao Xiang
2024-01-02 15:39 ` [PATCH v5 40/40] 9p: Use netfslib read/write_iter David Howells
2024-01-02 21:49 ` [PATCH] 9p: Fix initialisation of netfs_inode for 9p David Howells
2024-01-03 13:10   ` Dominique Martinet
2024-01-03 13:59   ` David Howells
2024-01-03 14:04   ` David Howells

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231221132400.1601991-13-dhowells@redhat.com \
    --to=dhowells@redhat.com \
    --cc=asmadeus@codewreck.org \
    --cc=ceph-devel@vger.kernel.org \
    --cc=christian@brauner.io \
    --cc=ericvh@kernel.org \
    --cc=idryomov@gmail.com \
    --cc=jlayton@kernel.org \
    --cc=linux-afs@lists.infradead.org \
    --cc=linux-cachefs@redhat.com \
    --cc=linux-cifs@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=marc.dionne@auristor.com \
    --cc=netdev@vger.kernel.org \
    --cc=pc@manguebit.com \
    --cc=smfrench@gmail.com \
    --cc=sprasad@microsoft.com \
    --cc=tom@talpey.com \
    --cc=v9fs@lists.linux.dev \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.