All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Ritesh Harjani (IBM)" <ritesh.list@gmail.com>
To: linux-xfs@vger.kernel.org
Cc: linux-fsdevel@vger.kernel.org,
	Matthew Wilcox <willy@infradead.org>,
	Dave Chinner <david@fromorbit.com>,
	Brian Foster <bfoster@redhat.com>,
	Ojaswin Mujoo <ojaswin@linux.ibm.com>,
	Disha Goel <disgoel@linux.ibm.com>,
	"Ritesh Harjani (IBM)" <ritesh.list@gmail.com>
Subject: [RFCv5 3/5] iomap: Add iop's uptodate state handling functions
Date: Mon,  8 May 2023 00:57:58 +0530	[thread overview]
Message-ID: <5372f29f986052f37b45c368a0eb8eed25eb8fdb.1683485700.git.ritesh.list@gmail.com> (raw)
In-Reply-To: <cover.1683485700.git.ritesh.list@gmail.com>

Firstly this patch renames iop->uptodate to iop->state bitmap.
This is because we will add dirty state to iop->state bitmap in later
patches. So it makes sense to rename the iop->uptodate bitmap to
iop->state.

Secondly this patch also adds other helpers for uptodate state bitmap
handling of iop->state.

No functionality change in this patch.

Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
---
 fs/iomap/buffered-io.c | 78 +++++++++++++++++++++++++++++++-----------
 1 file changed, 58 insertions(+), 20 deletions(-)

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index e732581dc2d4..5103b644e115 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -24,14 +24,14 @@
 #define IOEND_BATCH_SIZE	4096
 
 /*
- * Structure allocated for each folio when block size < folio size
- * to track sub-folio uptodate status and I/O completions.
+ * Structure allocated for each folio to track per-block uptodate state
+ * and I/O completions.
  */
 struct iomap_page {
 	atomic_t		read_bytes_pending;
 	atomic_t		write_bytes_pending;
-	spinlock_t		uptodate_lock;
-	unsigned long		uptodate[];
+	spinlock_t		state_lock;
+	unsigned long		state[];
 };
 
 static inline struct iomap_page *to_iomap_page(struct folio *folio)
@@ -43,6 +43,47 @@ static inline struct iomap_page *to_iomap_page(struct folio *folio)
 
 static struct bio_set iomap_ioend_bioset;
 
+/*
+ * inline helpers for bitmap operations on iop->state
+ */
+static inline void iop_set_range(struct iomap_page *iop, unsigned int start_blk,
+				 unsigned int nr_blks)
+{
+	bitmap_set(iop->state, start_blk, nr_blks);
+}
+
+static inline bool iop_test_block(struct iomap_page *iop, unsigned int block)
+{
+	return test_bit(block, iop->state);
+}
+
+static inline bool iop_bitmap_full(struct iomap_page *iop,
+				   unsigned int blks_per_folio)
+{
+	return bitmap_full(iop->state, blks_per_folio);
+}
+
+/*
+ * iop related helpers for checking uptodate/dirty state of per-block
+ * or range of blocks within a folio
+ */
+static bool iop_test_full_uptodate(struct folio *folio)
+{
+	struct iomap_page *iop = to_iomap_page(folio);
+	struct inode *inode = folio->mapping->host;
+
+	WARN_ON(!iop);
+	return iop_bitmap_full(iop, i_blocks_per_folio(inode, folio));
+}
+
+static bool iop_test_block_uptodate(struct folio *folio, unsigned int block)
+{
+	struct iomap_page *iop = to_iomap_page(folio);
+
+	WARN_ON(!iop);
+	return iop_test_block(iop, block);
+}
+
 static void iop_set_range_uptodate(struct inode *inode, struct folio *folio,
 				   size_t off, size_t len)
 {
@@ -53,12 +94,11 @@ static void iop_set_range_uptodate(struct inode *inode, struct folio *folio,
 	unsigned long flags;
 
 	if (iop) {
-		spin_lock_irqsave(&iop->uptodate_lock, flags);
-		bitmap_set(iop->uptodate, first_blk, nr_blks);
-		if (bitmap_full(iop->uptodate,
-				i_blocks_per_folio(inode, folio)))
+		spin_lock_irqsave(&iop->state_lock, flags);
+		iop_set_range(iop, first_blk, nr_blks);
+		if (iop_test_full_uptodate(folio))
 			folio_mark_uptodate(folio);
-		spin_unlock_irqrestore(&iop->uptodate_lock, flags);
+		spin_unlock_irqrestore(&iop->state_lock, flags);
 	} else {
 		folio_mark_uptodate(folio);
 	}
@@ -79,12 +119,12 @@ static struct iomap_page *iop_alloc(struct inode *inode, struct folio *folio,
 	else
 		gfp = GFP_NOFS | __GFP_NOFAIL;
 
-	iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
+	iop = kzalloc(struct_size(iop, state, BITS_TO_LONGS(nr_blocks)),
 		      gfp);
 	if (iop) {
-		spin_lock_init(&iop->uptodate_lock);
+		spin_lock_init(&iop->state_lock);
 		if (folio_test_uptodate(folio))
-			bitmap_fill(iop->uptodate, nr_blocks);
+			iop_set_range(iop, 0, nr_blocks);
 		folio_attach_private(folio, iop);
 	}
 	return iop;
@@ -93,15 +133,13 @@ static struct iomap_page *iop_alloc(struct inode *inode, struct folio *folio,
 static void iop_free(struct folio *folio)
 {
 	struct iomap_page *iop = to_iomap_page(folio);
-	struct inode *inode = folio->mapping->host;
-	unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
 
 	if (!iop)
 		return;
 	WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
 	WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
-	WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
-			folio_test_uptodate(folio));
+	WARN_ON_ONCE(iop_test_full_uptodate(folio) !=
+		     folio_test_uptodate(folio));
 	folio_detach_private(folio);
 	kfree(iop);
 }
@@ -132,7 +170,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
 
 		/* move forward for each leading block marked uptodate */
 		for (i = first; i <= last; i++) {
-			if (!test_bit(i, iop->uptodate))
+			if (!iop_test_block_uptodate(folio, i))
 				break;
 			*pos += block_size;
 			poff += block_size;
@@ -142,7 +180,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
 
 		/* truncate len if we find any trailing uptodate block(s) */
 		for ( ; i <= last; i++) {
-			if (test_bit(i, iop->uptodate)) {
+			if (iop_test_block_uptodate(folio, i)) {
 				plen -= (last - i + 1) * block_size;
 				last = i - 1;
 				break;
@@ -450,7 +488,7 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
 	last = (from + count - 1) >> inode->i_blkbits;
 
 	for (i = first; i <= last; i++)
-		if (!test_bit(i, iop->uptodate))
+		if (!iop_test_block_uptodate(folio, i))
 			return false;
 	return true;
 }
@@ -1634,7 +1672,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 	 * invalid, grab a new one.
 	 */
 	for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
-		if (iop && !test_bit(i, iop->uptodate))
+		if (iop && !iop_test_block_uptodate(folio, i))
 			continue;
 
 		error = wpc->ops->map_blocks(wpc, inode, pos);
-- 
2.39.2


  parent reply	other threads:[~2023-05-07 19:28 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-07 19:27 [RFCv5 0/5] iomap: Add support for per-block dirty state to improve write performance Ritesh Harjani (IBM)
2023-05-07 19:27 ` [RFCv5 1/5] iomap: Rename iomap_page_create/release() to iop_alloc/free() Ritesh Harjani (IBM)
2023-05-18  6:13   ` Christoph Hellwig
2023-05-19 15:01     ` Ritesh Harjani
2023-05-07 19:27 ` [RFCv5 2/5] iomap: Refactor iop_set_range_uptodate() function Ritesh Harjani (IBM)
2023-05-07 21:54   ` kernel test robot
2023-05-08  2:42   ` kernel test robot
2023-05-15 15:09   ` Brian Foster
2023-05-16 10:12     ` Ritesh Harjani
2023-05-18  6:16   ` Christoph Hellwig
2023-05-19 15:03     ` Ritesh Harjani
2023-05-07 19:27 ` Ritesh Harjani (IBM) [this message]
2023-05-15 15:10   ` [RFCv5 3/5] iomap: Add iop's uptodate state handling functions Brian Foster
2023-05-16 10:14     ` Ritesh Harjani
2023-05-18  6:18   ` Christoph Hellwig
2023-05-19 15:07     ` Ritesh Harjani
2023-05-23  6:00       ` Christoph Hellwig
2023-05-07 19:27 ` [RFCv5 4/5] iomap: Allocate iop in ->write_begin() early Ritesh Harjani (IBM)
2023-05-18  6:21   ` Christoph Hellwig
2023-05-19 15:18     ` Ritesh Harjani
2023-05-19 15:53       ` Matthew Wilcox
2023-05-22  4:05         ` Ritesh Harjani
2023-05-07 19:28 ` [RFCv5 5/5] iomap: Add per-block dirty state tracking to improve performance Ritesh Harjani (IBM)
2023-05-07 22:14   ` kernel test robot
2023-05-08  5:00   ` kernel test robot
     [not found]   ` <CGME20230515081618eucas1p1c852fec3ba7a42ee7094248c30ff5978@eucas1p1.samsung.com>
2023-05-15  8:16     ` Pankaj Raghav
2023-05-15  8:31       ` Ritesh Harjani
2023-05-15 13:23         ` Pankaj Raghav
2023-05-15 15:15   ` Brian Foster
2023-05-16 14:49     ` Ritesh Harjani
2023-05-16 19:29       ` Brian Foster
2023-05-17 15:20         ` Ritesh Harjani
2023-05-17 18:48           ` Brian Foster
2023-05-18 13:23             ` Christoph Hellwig
2023-05-18 16:15               ` Matthew Wilcox
2023-05-22  4:33                 ` Ritesh Harjani
2023-05-22  4:48                   ` Matthew Wilcox
2023-05-22 11:18                   ` Brian Foster
2023-05-23  0:56                     ` Darrick J. Wong
2023-05-23 12:15                       ` Brian Foster
2023-05-23 13:43                         ` Ritesh Harjani
2023-05-23 14:44                           ` Brian Foster
2023-05-23 15:02                             ` Ritesh Harjani
2023-05-23 15:22                               ` Brian Foster
2023-05-23 15:38                                 ` Ritesh Harjani
2023-05-23 15:59                                 ` Matthew Wilcox
2023-05-18 13:27   ` Christoph Hellwig
2023-05-19 16:08     ` Ritesh Harjani

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5372f29f986052f37b45c368a0eb8eed25eb8fdb.1683485700.git.ritesh.list@gmail.com \
    --to=ritesh.list@gmail.com \
    --cc=bfoster@redhat.com \
    --cc=david@fromorbit.com \
    --cc=disgoel@linux.ibm.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=ojaswin@linux.ibm.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.