linux-ext4.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm: increase usage of folio_next_index() helper
@ 2023-06-27 17:43 Sidhartha Kumar
  2023-06-28  4:54 ` Christoph Hellwig
  0 siblings, 1 reply; 2+ messages in thread
From: Sidhartha Kumar @ 2023-06-27 17:43 UTC (permalink / raw)
  To: linux-kernel, linux-mm, linux-fsdevel, linux-ext4
  Cc: akpm, tytso, willy, adilger.kernel, hughd, hch, Sidhartha Kumar

Simplify code pattern of 'folio->index + folio_nr_pages(folio)' by using
the existing helper folio_next_index().

Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Suggested-by: Christoph Hellwig <hch@infradead.org>
---
 fs/ext4/inode.c | 4 ++--
 mm/filemap.c    | 8 ++++----
 mm/memory.c     | 2 +-
 mm/shmem.c      | 2 +-
 mm/truncate.c   | 2 +-
 5 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 31b839a0ce8b8..ded4cbb017a51 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1583,7 +1583,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
 
 			if (folio->index < mpd->first_page)
 				continue;
-			if (folio->index + folio_nr_pages(folio) - 1 > end)
+			if (folio_next_index(folio) - 1 > end)
 				continue;
 			BUG_ON(!folio_test_locked(folio));
 			BUG_ON(folio_test_writeback(folio));
@@ -2481,7 +2481,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
 
 			if (mpd->map.m_len == 0)
 				mpd->first_page = folio->index;
-			mpd->next_page = folio->index + folio_nr_pages(folio);
+			mpd->next_page = folio_next_index(folio);
 			/*
 			 * Writeout when we cannot modify metadata is simple.
 			 * Just submit the page. For data=journal mode we
diff --git a/mm/filemap.c b/mm/filemap.c
index 758bbdf300e73..fdac934ce81a6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2075,7 +2075,7 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
 		if (!xa_is_value(folio)) {
 			if (folio->index < *start)
 				goto put;
-			if (folio->index + folio_nr_pages(folio) - 1 > end)
+			if (folio_next_index(folio) - 1 > end)
 				goto put;
 			if (!folio_trylock(folio))
 				goto put;
@@ -2174,7 +2174,7 @@ bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
 		return false;
 	if (index >= max)
 		return false;
-	return index < folio->index + folio_nr_pages(folio) - 1;
+	return index < folio_next_index(folio) - 1;
 }
 
 /**
@@ -2242,7 +2242,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
 		if (folio_test_hugetlb(folio))
 			*start = folio->index + 1;
 		else
-			*start = folio->index + folio_nr_pages(folio);
+			*start = folio_next_index(folio);
 	}
 out:
 	rcu_read_unlock();
@@ -2359,7 +2359,7 @@ static void filemap_get_read_batch(struct address_space *mapping,
 			break;
 		if (folio_test_readahead(folio))
 			break;
-		xas_advance(&xas, folio->index + folio_nr_pages(folio) - 1);
+		xas_advance(&xas, folio_next_index(folio) - 1);
 		continue;
 put_folio:
 		folio_put(folio);
diff --git a/mm/memory.c b/mm/memory.c
index 21fab27272092..0cabfe9d501d9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3497,7 +3497,7 @@ void unmap_mapping_folio(struct folio *folio)
 	VM_BUG_ON(!folio_test_locked(folio));
 
 	first_index = folio->index;
-	last_index = folio->index + folio_nr_pages(folio) - 1;
+	last_index = folio_next_index(folio) - 1;
 
 	details.even_cows = false;
 	details.single_folio = folio;
diff --git a/mm/shmem.c b/mm/shmem.c
index 85a0f8751a147..d4ca8bc7ab94f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -970,7 +970,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 		same_folio = lend < folio_pos(folio) + folio_size(folio);
 		folio_mark_dirty(folio);
 		if (!truncate_inode_partial_folio(folio, lstart, lend)) {
-			start = folio->index + folio_nr_pages(folio);
+			start = folio_next_index(folio);
 			if (same_folio)
 				end = folio->index;
 		}
diff --git a/mm/truncate.c b/mm/truncate.c
index 95d1291d269b5..2f28cc0e12ef1 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -378,7 +378,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
 	if (!IS_ERR(folio)) {
 		same_folio = lend < folio_pos(folio) + folio_size(folio);
 		if (!truncate_inode_partial_folio(folio, lstart, lend)) {
-			start = folio->index + folio_nr_pages(folio);
+			start = folio_next_index(folio);
 			if (same_folio)
 				end = folio->index;
 		}
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] mm: increase usage of folio_next_index() helper
  2023-06-27 17:43 [PATCH] mm: increase usage of folio_next_index() helper Sidhartha Kumar
@ 2023-06-28  4:54 ` Christoph Hellwig
  0 siblings, 0 replies; 2+ messages in thread
From: Christoph Hellwig @ 2023-06-28  4:54 UTC (permalink / raw)
  To: Sidhartha Kumar
  Cc: linux-kernel, linux-mm, linux-fsdevel, linux-ext4, akpm, tytso,
	willy, adilger.kernel, hughd, hch

Looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-06-28  9:29 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-06-27 17:43 [PATCH] mm: increase usage of folio_next_index() helper Sidhartha Kumar
2023-06-28  4:54 ` Christoph Hellwig

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).