All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andreas Gruenbacher <agruenba@redhat.com>
To: Christoph Hellwig <hch@infradead.org>,
	"Darrick J . Wong" <djwong@kernel.org>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	Matthew Wilcox <willy@infradead.org>
Cc: Andreas Gruenbacher <agruenba@redhat.com>,
	linux-xfs@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-ext4@vger.kernel.org, cluster-devel@redhat.com
Subject: [RFC v3 7/7] iomap: Rename page_ops to folio_ops
Date: Fri, 16 Dec 2022 16:06:26 +0100	[thread overview]
Message-ID: <20221216150626.670312-8-agruenba@redhat.com> (raw)
In-Reply-To: <20221216150626.670312-1-agruenba@redhat.com>

The operations in struct page_ops all operate on folios, so rename
struct page_ops to struct folio_ops, ->page_prepare() to
->folio_prepare(), and ->page_done() to ->folio_done().

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
 fs/gfs2/bmap.c         | 16 ++++++++--------
 fs/iomap/buffered-io.c | 12 ++++++------
 fs/xfs/xfs_iomap.c     |  8 ++++----
 include/linux/iomap.h  | 22 +++++++++++-----------
 4 files changed, 29 insertions(+), 29 deletions(-)

diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index cd5984d3ba50..ba8627ddc2bc 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -960,7 +960,7 @@ static int __gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
 }
 
 static struct folio *
-gfs2_iomap_page_prepare(struct iomap_iter *iter, loff_t pos, unsigned len)
+gfs2_iomap_folio_prepare(struct iomap_iter *iter, loff_t pos, unsigned len)
 {
 	struct inode *inode = iter->inode;
 	unsigned int blockmask = i_blocksize(inode) - 1;
@@ -980,8 +980,8 @@ gfs2_iomap_page_prepare(struct iomap_iter *iter, loff_t pos, unsigned len)
 	return folio;
 }
 
-static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
-				 unsigned copied, struct folio *folio)
+static void gfs2_iomap_folio_done(struct inode *inode, loff_t pos,
+				  unsigned copied, struct folio *folio)
 {
 	struct gfs2_trans *tr = current->journal_info;
 	struct gfs2_inode *ip = GFS2_I(inode);
@@ -1005,9 +1005,9 @@ static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
 	gfs2_trans_end(sdp);
 }
 
-static const struct iomap_page_ops gfs2_iomap_page_ops = {
-	.page_prepare = gfs2_iomap_page_prepare,
-	.page_done = gfs2_iomap_page_done,
+static const struct iomap_folio_ops gfs2_iomap_folio_ops = {
+	.folio_prepare = gfs2_iomap_folio_prepare,
+	.folio_done = gfs2_iomap_folio_done,
 };
 
 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
@@ -1083,7 +1083,7 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
 	}
 
 	if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
-		iomap->page_ops = &gfs2_iomap_page_ops;
+		iomap->folio_ops = &gfs2_iomap_folio_ops;
 	return 0;
 
 out_trans_end:
@@ -1299,7 +1299,7 @@ int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
 /*
  * NOTE: Never call gfs2_block_zero_range with an open transaction because it
  * uses iomap write to perform its actions, which begin their own transactions
- * (iomap_begin, page_prepare, etc.)
+ * (iomap_begin, folio_prepare, etc.)
  */
 static int gfs2_block_zero_range(struct inode *inode, loff_t from,
 				 unsigned int length)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index b73ff317da21..da4570d9d1ff 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -598,10 +598,10 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
 static void iomap_folio_done(struct iomap_iter *iter, loff_t pos, size_t ret,
 		struct folio *folio)
 {
-	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
+	const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
 
-	if (page_ops && page_ops->page_done) {
-		page_ops->page_done(iter->inode, pos, ret, folio);
+	if (folio_ops && folio_ops->folio_done) {
+		folio_ops->folio_done(iter->inode, pos, ret, folio);
 	} else {
 		folio_unlock(folio);
 		folio_put(folio);
@@ -620,7 +620,7 @@ static int iomap_write_begin_inline(const struct iomap_iter *iter,
 static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
 		size_t len, struct folio **foliop)
 {
-	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
+	const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
 	struct folio *folio;
 	int status;
@@ -635,8 +635,8 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
 	if (!mapping_large_folio_support(iter->inode->i_mapping))
 		len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
 
-	if (page_ops && page_ops->page_prepare)
-		folio = page_ops->page_prepare(iter, pos, len);
+	if (folio_ops && folio_ops->folio_prepare)
+		folio = folio_ops->folio_prepare(iter, pos, len);
 	else
 		folio = iomap_folio_prepare(iter, pos);
 	if (IS_ERR_OR_NULL(folio)) {
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 2248ce7be2e3..79b3f2d4c8ab 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -63,7 +63,7 @@ xfs_iomap_inode_sequence(
 }
 
 static struct folio *
-xfs_page_prepare(
+xfs_folio_prepare(
 	struct iomap_iter	*iter,
 	loff_t			pos,
 	unsigned		len)
@@ -99,8 +99,8 @@ xfs_page_prepare(
 	return folio;
 }
 
-const struct iomap_page_ops xfs_iomap_page_ops = {
-	.page_prepare		= xfs_page_prepare,
+const struct iomap_folio_ops xfs_iomap_folio_ops = {
+	.folio_prepare		= xfs_folio_prepare,
 };
 
 int
@@ -149,7 +149,7 @@ xfs_bmbt_to_iomap(
 		iomap->flags |= IOMAP_F_DIRTY;
 
 	iomap->validity_cookie = sequence_cookie;
-	iomap->page_ops = &xfs_iomap_page_ops;
+	iomap->folio_ops = &xfs_iomap_folio_ops;
 	return 0;
 }
 
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 1c8b9a04b0bb..85d360881851 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -86,7 +86,7 @@ struct vm_fault;
  */
 #define IOMAP_NULL_ADDR -1ULL	/* addr is not valid */
 
-struct iomap_page_ops;
+struct iomap_folio_ops;
 
 struct iomap {
 	u64			addr; /* disk offset of mapping, bytes */
@@ -98,7 +98,7 @@ struct iomap {
 	struct dax_device	*dax_dev; /* dax_dev for dax operations */
 	void			*inline_data;
 	void			*private; /* filesystem private */
-	const struct iomap_page_ops *page_ops;
+	const struct iomap_folio_ops *folio_ops;
 	u64			validity_cookie; /* used with .iomap_valid() */
 };
 
@@ -126,19 +126,19 @@ static inline bool iomap_inline_data_valid(const struct iomap *iomap)
 }
 
 /*
- * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
- * and page_done will be called for each page written to.  This only applies to
- * buffered writes as unbuffered writes will not typically have pages
- * associated with them.
+ * When a filesystem sets folio_ops in an iomap mapping it returns,
+ * folio_prepare and folio_done will be called for each page written to.  This
+ * only applies to buffered writes as unbuffered writes will not typically have
+ * pages associated with them.
  *
- * When page_prepare succeeds, page_done will always be called to do any
- * cleanup work necessary.  page_done is responsible for unlocking and putting
+ * When folio_prepare succeeds, folio_done will always be called to do any
+ * cleanup work necessary.  folio_done is responsible for unlocking and putting
  * @folio.
  */
-struct iomap_page_ops {
-	struct folio *(*page_prepare)(struct iomap_iter *iter, loff_t pos,
+struct iomap_folio_ops {
+	struct folio *(*folio_prepare)(struct iomap_iter *iter, loff_t pos,
 			unsigned len);
-	void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
+	void (*folio_done)(struct inode *inode, loff_t pos, unsigned copied,
 			struct folio *folio);
 };
 
-- 
2.38.1


WARNING: multiple messages have this Message-ID (diff)
From: Andreas Gruenbacher <agruenba@redhat.com>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] [RFC v3 7/7] iomap: Rename page_ops to folio_ops
Date: Fri, 16 Dec 2022 16:06:26 +0100	[thread overview]
Message-ID: <20221216150626.670312-8-agruenba@redhat.com> (raw)
In-Reply-To: <20221216150626.670312-1-agruenba@redhat.com>

The operations in struct page_ops all operate on folios, so rename
struct page_ops to struct folio_ops, ->page_prepare() to
->folio_prepare(), and ->page_done() to ->folio_done().

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
 fs/gfs2/bmap.c         | 16 ++++++++--------
 fs/iomap/buffered-io.c | 12 ++++++------
 fs/xfs/xfs_iomap.c     |  8 ++++----
 include/linux/iomap.h  | 22 +++++++++++-----------
 4 files changed, 29 insertions(+), 29 deletions(-)

diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index cd5984d3ba50..ba8627ddc2bc 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -960,7 +960,7 @@ static int __gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
 }
 
 static struct folio *
-gfs2_iomap_page_prepare(struct iomap_iter *iter, loff_t pos, unsigned len)
+gfs2_iomap_folio_prepare(struct iomap_iter *iter, loff_t pos, unsigned len)
 {
 	struct inode *inode = iter->inode;
 	unsigned int blockmask = i_blocksize(inode) - 1;
@@ -980,8 +980,8 @@ gfs2_iomap_page_prepare(struct iomap_iter *iter, loff_t pos, unsigned len)
 	return folio;
 }
 
-static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
-				 unsigned copied, struct folio *folio)
+static void gfs2_iomap_folio_done(struct inode *inode, loff_t pos,
+				  unsigned copied, struct folio *folio)
 {
 	struct gfs2_trans *tr = current->journal_info;
 	struct gfs2_inode *ip = GFS2_I(inode);
@@ -1005,9 +1005,9 @@ static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
 	gfs2_trans_end(sdp);
 }
 
-static const struct iomap_page_ops gfs2_iomap_page_ops = {
-	.page_prepare = gfs2_iomap_page_prepare,
-	.page_done = gfs2_iomap_page_done,
+static const struct iomap_folio_ops gfs2_iomap_folio_ops = {
+	.folio_prepare = gfs2_iomap_folio_prepare,
+	.folio_done = gfs2_iomap_folio_done,
 };
 
 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
@@ -1083,7 +1083,7 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
 	}
 
 	if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
-		iomap->page_ops = &gfs2_iomap_page_ops;
+		iomap->folio_ops = &gfs2_iomap_folio_ops;
 	return 0;
 
 out_trans_end:
@@ -1299,7 +1299,7 @@ int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
 /*
  * NOTE: Never call gfs2_block_zero_range with an open transaction because it
  * uses iomap write to perform its actions, which begin their own transactions
- * (iomap_begin, page_prepare, etc.)
+ * (iomap_begin, folio_prepare, etc.)
  */
 static int gfs2_block_zero_range(struct inode *inode, loff_t from,
 				 unsigned int length)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index b73ff317da21..da4570d9d1ff 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -598,10 +598,10 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
 static void iomap_folio_done(struct iomap_iter *iter, loff_t pos, size_t ret,
 		struct folio *folio)
 {
-	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
+	const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
 
-	if (page_ops && page_ops->page_done) {
-		page_ops->page_done(iter->inode, pos, ret, folio);
+	if (folio_ops && folio_ops->folio_done) {
+		folio_ops->folio_done(iter->inode, pos, ret, folio);
 	} else {
 		folio_unlock(folio);
 		folio_put(folio);
@@ -620,7 +620,7 @@ static int iomap_write_begin_inline(const struct iomap_iter *iter,
 static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
 		size_t len, struct folio **foliop)
 {
-	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
+	const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
 	struct folio *folio;
 	int status;
@@ -635,8 +635,8 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
 	if (!mapping_large_folio_support(iter->inode->i_mapping))
 		len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
 
-	if (page_ops && page_ops->page_prepare)
-		folio = page_ops->page_prepare(iter, pos, len);
+	if (folio_ops && folio_ops->folio_prepare)
+		folio = folio_ops->folio_prepare(iter, pos, len);
 	else
 		folio = iomap_folio_prepare(iter, pos);
 	if (IS_ERR_OR_NULL(folio)) {
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 2248ce7be2e3..79b3f2d4c8ab 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -63,7 +63,7 @@ xfs_iomap_inode_sequence(
 }
 
 static struct folio *
-xfs_page_prepare(
+xfs_folio_prepare(
 	struct iomap_iter	*iter,
 	loff_t			pos,
 	unsigned		len)
@@ -99,8 +99,8 @@ xfs_page_prepare(
 	return folio;
 }
 
-const struct iomap_page_ops xfs_iomap_page_ops = {
-	.page_prepare		= xfs_page_prepare,
+const struct iomap_folio_ops xfs_iomap_folio_ops = {
+	.folio_prepare		= xfs_folio_prepare,
 };
 
 int
@@ -149,7 +149,7 @@ xfs_bmbt_to_iomap(
 		iomap->flags |= IOMAP_F_DIRTY;
 
 	iomap->validity_cookie = sequence_cookie;
-	iomap->page_ops = &xfs_iomap_page_ops;
+	iomap->folio_ops = &xfs_iomap_folio_ops;
 	return 0;
 }
 
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 1c8b9a04b0bb..85d360881851 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -86,7 +86,7 @@ struct vm_fault;
  */
 #define IOMAP_NULL_ADDR -1ULL	/* addr is not valid */
 
-struct iomap_page_ops;
+struct iomap_folio_ops;
 
 struct iomap {
 	u64			addr; /* disk offset of mapping, bytes */
@@ -98,7 +98,7 @@ struct iomap {
 	struct dax_device	*dax_dev; /* dax_dev for dax operations */
 	void			*inline_data;
 	void			*private; /* filesystem private */
-	const struct iomap_page_ops *page_ops;
+	const struct iomap_folio_ops *folio_ops;
 	u64			validity_cookie; /* used with .iomap_valid() */
 };
 
@@ -126,19 +126,19 @@ static inline bool iomap_inline_data_valid(const struct iomap *iomap)
 }
 
 /*
- * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
- * and page_done will be called for each page written to.  This only applies to
- * buffered writes as unbuffered writes will not typically have pages
- * associated with them.
+ * When a filesystem sets folio_ops in an iomap mapping it returns,
+ * folio_prepare and folio_done will be called for each page written to.  This
+ * only applies to buffered writes as unbuffered writes will not typically have
+ * pages associated with them.
  *
- * When page_prepare succeeds, page_done will always be called to do any
- * cleanup work necessary.  page_done is responsible for unlocking and putting
+ * When folio_prepare succeeds, folio_done will always be called to do any
+ * cleanup work necessary.  folio_done is responsible for unlocking and putting
  * @folio.
  */
-struct iomap_page_ops {
-	struct folio *(*page_prepare)(struct iomap_iter *iter, loff_t pos,
+struct iomap_folio_ops {
+	struct folio *(*folio_prepare)(struct iomap_iter *iter, loff_t pos,
 			unsigned len);
-	void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
+	void (*folio_done)(struct inode *inode, loff_t pos, unsigned copied,
 			struct folio *folio);
 };
 
-- 
2.38.1


  parent reply	other threads:[~2022-12-16 15:08 UTC|newest]

Thread overview: 63+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-16 15:06 [RFC v3 0/7] Turn iomap_page_ops into iomap_folio_ops Andreas Gruenbacher
2022-12-16 15:06 ` [Cluster-devel] " Andreas Gruenbacher
2022-12-16 15:06 ` [RFC v3 1/7] fs: Add folio_may_straddle_isize helper Andreas Gruenbacher
2022-12-16 15:06   ` [Cluster-devel] " Andreas Gruenbacher
2022-12-23 14:56   ` Christoph Hellwig
2022-12-23 14:56     ` [Cluster-devel] " Christoph Hellwig
2022-12-23 22:04     ` Andreas Grünbacher
2022-12-23 22:04       ` [Cluster-devel] " Andreas Grünbacher
2022-12-24  7:21       ` Christoph Hellwig
2022-12-16 15:06 ` [RFC v3 2/7] iomap: Add iomap_folio_done helper Andreas Gruenbacher
2022-12-16 15:06   ` [Cluster-devel] " Andreas Gruenbacher
2022-12-23 15:02   ` Christoph Hellwig
2022-12-23 15:02     ` [Cluster-devel] " Christoph Hellwig
2022-12-23 20:54     ` Andreas Grünbacher
2022-12-23 20:54       ` [Cluster-devel] " Andreas Grünbacher
2022-12-24  7:22       ` Christoph Hellwig
2022-12-24  7:22         ` [Cluster-devel] " Christoph Hellwig
2022-12-16 15:06 ` [RFC v3 3/7] iomap/gfs2: Unlock and put folio in page_done handler Andreas Gruenbacher
2022-12-16 15:06   ` [Cluster-devel] " Andreas Gruenbacher
2022-12-23 15:03   ` Christoph Hellwig
2022-12-23 15:03     ` [Cluster-devel] " Christoph Hellwig
2022-12-16 15:06 ` [RFC v3 4/7] iomap: Add iomap_folio_prepare helper Andreas Gruenbacher
2022-12-16 15:06   ` [Cluster-devel] " Andreas Gruenbacher
2022-12-23 15:04   ` Christoph Hellwig
2022-12-23 15:04     ` [Cluster-devel] " Christoph Hellwig
2022-12-23 21:05     ` Andreas Grünbacher
2022-12-23 21:05       ` [Cluster-devel] " Andreas Grünbacher
2022-12-24  7:23       ` Christoph Hellwig
2022-12-24  7:23         ` [Cluster-devel] " Christoph Hellwig
2022-12-25  9:12         ` Matthew Wilcox
2022-12-25  9:12           ` [Cluster-devel] " Matthew Wilcox
2022-12-28 15:55           ` Christoph Hellwig
2022-12-28 15:55             ` [Cluster-devel] " Christoph Hellwig
2022-12-16 15:06 ` [RFC v3 5/7] iomap: Get page in page_prepare handler Andreas Gruenbacher
2022-12-16 15:06   ` [Cluster-devel] " Andreas Gruenbacher
2022-12-16 16:30   ` Matthew Wilcox
2022-12-16 16:30     ` [Cluster-devel] " Matthew Wilcox
2022-12-16 17:15     ` Andreas Gruenbacher
2022-12-16 17:15       ` [Cluster-devel] " Andreas Gruenbacher
2022-12-23 15:07   ` Christoph Hellwig
2022-12-23 15:07     ` [Cluster-devel] " Christoph Hellwig
2022-12-16 15:06 ` [RFC v3 6/7] iomap/xfs: Eliminate the iomap_valid handler Andreas Gruenbacher
2022-12-16 15:06   ` [Cluster-devel] " Andreas Gruenbacher
2022-12-23 15:10   ` Christoph Hellwig
2022-12-23 15:10     ` [Cluster-devel] " Christoph Hellwig
2022-12-16 15:06 ` Andreas Gruenbacher [this message]
2022-12-16 15:06   ` [Cluster-devel] [RFC v3 7/7] iomap: Rename page_ops to folio_ops Andreas Gruenbacher
2022-12-18 22:10 ` [RFC v4 0/7] Turn iomap_page_ops into iomap_folio_ops Andreas Gruenbacher
2022-12-18 22:10   ` [Cluster-devel] " Andreas Gruenbacher
2022-12-18 22:10 ` [RFC v4 1/7] fs: Add folio_may_straddle_isize helper Andreas Gruenbacher
2022-12-18 22:10   ` [Cluster-devel] " Andreas Gruenbacher
2022-12-18 22:10 ` [RFC v4 2/7] iomap: Add iomap_folio_done helper Andreas Gruenbacher
2022-12-18 22:10   ` [Cluster-devel] " Andreas Gruenbacher
2022-12-18 22:10 ` [RFC v4 3/7] iomap/gfs2: Unlock and put folio in page_done handler Andreas Gruenbacher
2022-12-18 22:10   ` [Cluster-devel] " Andreas Gruenbacher
2022-12-18 22:10 ` [RFC v4 4/7] iomap: Add iomap_folio_prepare helper Andreas Gruenbacher
2022-12-18 22:10   ` [Cluster-devel] " Andreas Gruenbacher
2022-12-18 22:10 ` [RFC v4 5/7] iomap/gfs2: Get page in page_prepare handler Andreas Gruenbacher
2022-12-18 22:10   ` [Cluster-devel] " Andreas Gruenbacher
2022-12-18 22:10 ` [RFC v4 6/7] iomap/xfs: Eliminate the iomap_valid handler Andreas Gruenbacher
2022-12-18 22:10   ` [Cluster-devel] " Andreas Gruenbacher
2022-12-18 22:10 ` [RFC v4 7/7] iomap: Rename page_ops to folio_ops Andreas Gruenbacher
2022-12-18 22:10   ` [Cluster-devel] " Andreas Gruenbacher

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221216150626.670312-8-agruenba@redhat.com \
    --to=agruenba@redhat.com \
    --cc=cluster-devel@redhat.com \
    --cc=djwong@kernel.org \
    --cc=hch@infradead.org \
    --cc=linux-ext4@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.