All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: linux-fsdevel@vger.kernel.org
Cc: linux-cifs@vger.kernel.org, linux-nilfs@vger.kernel.org,
	"Vishal Moola \(Oracle\)" <vishal.moola@gmail.com>,
	linux-kernel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net, cluster-devel@redhat.com,
	linux-mm@kvack.org, ceph-devel@vger.kernel.org,
	linux-ext4@vger.kernel.org, linux-afs@lists.infradead.org,
	linux-btrfs@vger.kernel.org
Subject: [f2fs-dev] [PATCH v5 17/23] gfs2: Convert gfs2_write_cache_jdata() to use filemap_get_folios_tag()
Date: Wed,  4 Jan 2023 13:14:42 -0800	[thread overview]
Message-ID: <20230104211448.4804-18-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230104211448.4804-1-vishal.moola@gmail.com>

Converted function to use folios throughout. This is in preparation for
the removal of find_get_pgaes_range_tag(). This change removes 8 calls
to compound_head().

Also had to modify and rename gfs2_write_jdata_pagevec() to take in
and utilize folio_batch rather than pagevec and use folios rather
than pages. gfs2_write_jdata_batch() now supports large folios.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 fs/gfs2/aops.c | 64 +++++++++++++++++++++++++++-----------------------
 1 file changed, 35 insertions(+), 29 deletions(-)

diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index e782b4f1d104..0a47068f9acc 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -195,67 +195,71 @@ static int gfs2_writepages(struct address_space *mapping,
 }
 
 /**
- * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
+ * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
  * @mapping: The mapping
  * @wbc: The writeback control
- * @pvec: The vector of pages
- * @nr_pages: The number of pages to write
+ * @fbatch: The batch of folios
  * @done_index: Page index
  *
  * Returns: non-zero if loop should terminate, zero otherwise
  */
 
-static int gfs2_write_jdata_pagevec(struct address_space *mapping,
+static int gfs2_write_jdata_batch(struct address_space *mapping,
 				    struct writeback_control *wbc,
-				    struct pagevec *pvec,
-				    int nr_pages,
+				    struct folio_batch *fbatch,
 				    pgoff_t *done_index)
 {
 	struct inode *inode = mapping->host;
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
-	unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
+	unsigned nrblocks;
 	int i;
 	int ret;
+	int nr_pages = 0;
+	int nr_folios = folio_batch_count(fbatch);
+
+	for (i = 0; i < nr_folios; i++)
+		nr_pages += folio_nr_pages(fbatch->folios[i]);
+	nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
 
 	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
 	if (ret < 0)
 		return ret;
 
-	for(i = 0; i < nr_pages; i++) {
-		struct page *page = pvec->pages[i];
+	for (i = 0; i < nr_folios; i++) {
+		struct folio *folio = fbatch->folios[i];
 
-		*done_index = page->index;
+		*done_index = folio->index;
 
-		lock_page(page);
+		folio_lock(folio);
 
-		if (unlikely(page->mapping != mapping)) {
+		if (unlikely(folio->mapping != mapping)) {
 continue_unlock:
-			unlock_page(page);
+			folio_unlock(folio);
 			continue;
 		}
 
-		if (!PageDirty(page)) {
+		if (!folio_test_dirty(folio)) {
 			/* someone wrote it for us */
 			goto continue_unlock;
 		}
 
-		if (PageWriteback(page)) {
+		if (folio_test_writeback(folio)) {
 			if (wbc->sync_mode != WB_SYNC_NONE)
-				wait_on_page_writeback(page);
+				folio_wait_writeback(folio);
 			else
 				goto continue_unlock;
 		}
 
-		BUG_ON(PageWriteback(page));
-		if (!clear_page_dirty_for_io(page))
+		BUG_ON(folio_test_writeback(folio));
+		if (!folio_clear_dirty_for_io(folio))
 			goto continue_unlock;
 
 		trace_wbc_writepage(wbc, inode_to_bdi(inode));
 
-		ret = __gfs2_jdata_writepage(page, wbc);
+		ret = __gfs2_jdata_writepage(&folio->page, wbc);
 		if (unlikely(ret)) {
 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
-				unlock_page(page);
+				folio_unlock(folio);
 				ret = 0;
 			} else {
 
@@ -268,7 +272,8 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
 				 * not be suitable for data integrity
 				 * writeout).
 				 */
-				*done_index = page->index + 1;
+				*done_index = folio->index +
+					folio_nr_pages(folio);
 				ret = 1;
 				break;
 			}
@@ -305,8 +310,8 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
 {
 	int ret = 0;
 	int done = 0;
-	struct pagevec pvec;
-	int nr_pages;
+	struct folio_batch fbatch;
+	int nr_folios;
 	pgoff_t writeback_index;
 	pgoff_t index;
 	pgoff_t end;
@@ -315,7 +320,7 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
 	int range_whole = 0;
 	xa_mark_t tag;
 
-	pagevec_init(&pvec);
+	folio_batch_init(&fbatch);
 	if (wbc->range_cyclic) {
 		writeback_index = mapping->writeback_index; /* prev offset */
 		index = writeback_index;
@@ -341,17 +346,18 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
 		tag_pages_for_writeback(mapping, index, end);
 	done_index = index;
 	while (!done && (index <= end)) {
-		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
-				tag);
-		if (nr_pages == 0)
+		nr_folios = filemap_get_folios_tag(mapping, &index, end,
+				tag, &fbatch);
+		if (nr_folios == 0)
 			break;
 
-		ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
+		ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
+				&done_index);
 		if (ret)
 			done = 1;
 		if (ret > 0)
 			ret = 0;
-		pagevec_release(&pvec);
+		folio_batch_release(&fbatch);
 		cond_resched();
 	}
 
-- 
2.38.1



_______________________________________________
Linux-f2fs-devel mailing list
Linux-f2fs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

WARNING: multiple messages have this Message-ID (diff)
From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: linux-fsdevel@vger.kernel.org
Cc: linux-afs@lists.infradead.org, linux-kernel@vger.kernel.org,
	linux-btrfs@vger.kernel.org, ceph-devel@vger.kernel.org,
	linux-cifs@vger.kernel.org, linux-ext4@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net, cluster-devel@redhat.com,
	linux-nilfs@vger.kernel.org, linux-mm@kvack.org,
	"Vishal Moola (Oracle)" <vishal.moola@gmail.com>
Subject: [PATCH v5 17/23] gfs2: Convert gfs2_write_cache_jdata() to use filemap_get_folios_tag()
Date: Wed,  4 Jan 2023 13:14:42 -0800	[thread overview]
Message-ID: <20230104211448.4804-18-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230104211448.4804-1-vishal.moola@gmail.com>

Converted function to use folios throughout. This is in preparation for
the removal of find_get_pgaes_range_tag(). This change removes 8 calls
to compound_head().

Also had to modify and rename gfs2_write_jdata_pagevec() to take in
and utilize folio_batch rather than pagevec and use folios rather
than pages. gfs2_write_jdata_batch() now supports large folios.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 fs/gfs2/aops.c | 64 +++++++++++++++++++++++++++-----------------------
 1 file changed, 35 insertions(+), 29 deletions(-)

diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index e782b4f1d104..0a47068f9acc 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -195,67 +195,71 @@ static int gfs2_writepages(struct address_space *mapping,
 }
 
 /**
- * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
+ * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
  * @mapping: The mapping
  * @wbc: The writeback control
- * @pvec: The vector of pages
- * @nr_pages: The number of pages to write
+ * @fbatch: The batch of folios
  * @done_index: Page index
  *
  * Returns: non-zero if loop should terminate, zero otherwise
  */
 
-static int gfs2_write_jdata_pagevec(struct address_space *mapping,
+static int gfs2_write_jdata_batch(struct address_space *mapping,
 				    struct writeback_control *wbc,
-				    struct pagevec *pvec,
-				    int nr_pages,
+				    struct folio_batch *fbatch,
 				    pgoff_t *done_index)
 {
 	struct inode *inode = mapping->host;
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
-	unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
+	unsigned nrblocks;
 	int i;
 	int ret;
+	int nr_pages = 0;
+	int nr_folios = folio_batch_count(fbatch);
+
+	for (i = 0; i < nr_folios; i++)
+		nr_pages += folio_nr_pages(fbatch->folios[i]);
+	nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
 
 	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
 	if (ret < 0)
 		return ret;
 
-	for(i = 0; i < nr_pages; i++) {
-		struct page *page = pvec->pages[i];
+	for (i = 0; i < nr_folios; i++) {
+		struct folio *folio = fbatch->folios[i];
 
-		*done_index = page->index;
+		*done_index = folio->index;
 
-		lock_page(page);
+		folio_lock(folio);
 
-		if (unlikely(page->mapping != mapping)) {
+		if (unlikely(folio->mapping != mapping)) {
 continue_unlock:
-			unlock_page(page);
+			folio_unlock(folio);
 			continue;
 		}
 
-		if (!PageDirty(page)) {
+		if (!folio_test_dirty(folio)) {
 			/* someone wrote it for us */
 			goto continue_unlock;
 		}
 
-		if (PageWriteback(page)) {
+		if (folio_test_writeback(folio)) {
 			if (wbc->sync_mode != WB_SYNC_NONE)
-				wait_on_page_writeback(page);
+				folio_wait_writeback(folio);
 			else
 				goto continue_unlock;
 		}
 
-		BUG_ON(PageWriteback(page));
-		if (!clear_page_dirty_for_io(page))
+		BUG_ON(folio_test_writeback(folio));
+		if (!folio_clear_dirty_for_io(folio))
 			goto continue_unlock;
 
 		trace_wbc_writepage(wbc, inode_to_bdi(inode));
 
-		ret = __gfs2_jdata_writepage(page, wbc);
+		ret = __gfs2_jdata_writepage(&folio->page, wbc);
 		if (unlikely(ret)) {
 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
-				unlock_page(page);
+				folio_unlock(folio);
 				ret = 0;
 			} else {
 
@@ -268,7 +272,8 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
 				 * not be suitable for data integrity
 				 * writeout).
 				 */
-				*done_index = page->index + 1;
+				*done_index = folio->index +
+					folio_nr_pages(folio);
 				ret = 1;
 				break;
 			}
@@ -305,8 +310,8 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
 {
 	int ret = 0;
 	int done = 0;
-	struct pagevec pvec;
-	int nr_pages;
+	struct folio_batch fbatch;
+	int nr_folios;
 	pgoff_t writeback_index;
 	pgoff_t index;
 	pgoff_t end;
@@ -315,7 +320,7 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
 	int range_whole = 0;
 	xa_mark_t tag;
 
-	pagevec_init(&pvec);
+	folio_batch_init(&fbatch);
 	if (wbc->range_cyclic) {
 		writeback_index = mapping->writeback_index; /* prev offset */
 		index = writeback_index;
@@ -341,17 +346,18 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
 		tag_pages_for_writeback(mapping, index, end);
 	done_index = index;
 	while (!done && (index <= end)) {
-		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
-				tag);
-		if (nr_pages == 0)
+		nr_folios = filemap_get_folios_tag(mapping, &index, end,
+				tag, &fbatch);
+		if (nr_folios == 0)
 			break;
 
-		ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
+		ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
+				&done_index);
 		if (ret)
 			done = 1;
 		if (ret > 0)
 			ret = 0;
-		pagevec_release(&pvec);
+		folio_batch_release(&fbatch);
 		cond_resched();
 	}
 
-- 
2.38.1


WARNING: multiple messages have this Message-ID (diff)
From: Vishal Moola (Oracle) <vishal.moola@gmail.com>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] [PATCH v5 17/23] gfs2: Convert gfs2_write_cache_jdata() to use filemap_get_folios_tag()
Date: Wed,  4 Jan 2023 13:14:42 -0800	[thread overview]
Message-ID: <20230104211448.4804-18-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230104211448.4804-1-vishal.moola@gmail.com>

Converted function to use folios throughout. This is in preparation for
the removal of find_get_pgaes_range_tag(). This change removes 8 calls
to compound_head().

Also had to modify and rename gfs2_write_jdata_pagevec() to take in
and utilize folio_batch rather than pagevec and use folios rather
than pages. gfs2_write_jdata_batch() now supports large folios.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 fs/gfs2/aops.c | 64 +++++++++++++++++++++++++++-----------------------
 1 file changed, 35 insertions(+), 29 deletions(-)

diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index e782b4f1d104..0a47068f9acc 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -195,67 +195,71 @@ static int gfs2_writepages(struct address_space *mapping,
 }
 
 /**
- * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
+ * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
  * @mapping: The mapping
  * @wbc: The writeback control
- * @pvec: The vector of pages
- * @nr_pages: The number of pages to write
+ * @fbatch: The batch of folios
  * @done_index: Page index
  *
  * Returns: non-zero if loop should terminate, zero otherwise
  */
 
-static int gfs2_write_jdata_pagevec(struct address_space *mapping,
+static int gfs2_write_jdata_batch(struct address_space *mapping,
 				    struct writeback_control *wbc,
-				    struct pagevec *pvec,
-				    int nr_pages,
+				    struct folio_batch *fbatch,
 				    pgoff_t *done_index)
 {
 	struct inode *inode = mapping->host;
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
-	unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
+	unsigned nrblocks;
 	int i;
 	int ret;
+	int nr_pages = 0;
+	int nr_folios = folio_batch_count(fbatch);
+
+	for (i = 0; i < nr_folios; i++)
+		nr_pages += folio_nr_pages(fbatch->folios[i]);
+	nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
 
 	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
 	if (ret < 0)
 		return ret;
 
-	for(i = 0; i < nr_pages; i++) {
-		struct page *page = pvec->pages[i];
+	for (i = 0; i < nr_folios; i++) {
+		struct folio *folio = fbatch->folios[i];
 
-		*done_index = page->index;
+		*done_index = folio->index;
 
-		lock_page(page);
+		folio_lock(folio);
 
-		if (unlikely(page->mapping != mapping)) {
+		if (unlikely(folio->mapping != mapping)) {
 continue_unlock:
-			unlock_page(page);
+			folio_unlock(folio);
 			continue;
 		}
 
-		if (!PageDirty(page)) {
+		if (!folio_test_dirty(folio)) {
 			/* someone wrote it for us */
 			goto continue_unlock;
 		}
 
-		if (PageWriteback(page)) {
+		if (folio_test_writeback(folio)) {
 			if (wbc->sync_mode != WB_SYNC_NONE)
-				wait_on_page_writeback(page);
+				folio_wait_writeback(folio);
 			else
 				goto continue_unlock;
 		}
 
-		BUG_ON(PageWriteback(page));
-		if (!clear_page_dirty_for_io(page))
+		BUG_ON(folio_test_writeback(folio));
+		if (!folio_clear_dirty_for_io(folio))
 			goto continue_unlock;
 
 		trace_wbc_writepage(wbc, inode_to_bdi(inode));
 
-		ret = __gfs2_jdata_writepage(page, wbc);
+		ret = __gfs2_jdata_writepage(&folio->page, wbc);
 		if (unlikely(ret)) {
 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
-				unlock_page(page);
+				folio_unlock(folio);
 				ret = 0;
 			} else {
 
@@ -268,7 +272,8 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
 				 * not be suitable for data integrity
 				 * writeout).
 				 */
-				*done_index = page->index + 1;
+				*done_index = folio->index +
+					folio_nr_pages(folio);
 				ret = 1;
 				break;
 			}
@@ -305,8 +310,8 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
 {
 	int ret = 0;
 	int done = 0;
-	struct pagevec pvec;
-	int nr_pages;
+	struct folio_batch fbatch;
+	int nr_folios;
 	pgoff_t writeback_index;
 	pgoff_t index;
 	pgoff_t end;
@@ -315,7 +320,7 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
 	int range_whole = 0;
 	xa_mark_t tag;
 
-	pagevec_init(&pvec);
+	folio_batch_init(&fbatch);
 	if (wbc->range_cyclic) {
 		writeback_index = mapping->writeback_index; /* prev offset */
 		index = writeback_index;
@@ -341,17 +346,18 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
 		tag_pages_for_writeback(mapping, index, end);
 	done_index = index;
 	while (!done && (index <= end)) {
-		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
-				tag);
-		if (nr_pages == 0)
+		nr_folios = filemap_get_folios_tag(mapping, &index, end,
+				tag, &fbatch);
+		if (nr_folios == 0)
 			break;
 
-		ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
+		ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
+				&done_index);
 		if (ret)
 			done = 1;
 		if (ret > 0)
 			ret = 0;
-		pagevec_release(&pvec);
+		folio_batch_release(&fbatch);
 		cond_resched();
 	}
 
-- 
2.38.1


WARNING: multiple messages have this Message-ID (diff)
From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: linux-fsdevel@vger.kernel.org
Cc: linux-cifs@vger.kernel.org, linux-nilfs@vger.kernel.org,
	"Vishal Moola (Oracle)" <vishal.moola@gmail.com>,
	linux-kernel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net, cluster-devel@redhat.com,
	linux-mm@kvack.org, ceph-devel@vger.kernel.org,
	linux-ext4@vger.kernel.org, linux-afs@lists.infradead.org,
	linux-btrfs@vger.kernel.org
Subject: [PATCH v5 17/23] gfs2: Convert gfs2_write_cache_jdata() to use filemap_get_folios_tag()
Date: Wed,  4 Jan 2023 13:14:42 -0800	[thread overview]
Message-ID: <20230104211448.4804-18-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230104211448.4804-1-vishal.moola@gmail.com>

Converted function to use folios throughout. This is in preparation for
the removal of find_get_pgaes_range_tag(). This change removes 8 calls
to compound_head().

Also had to modify and rename gfs2_write_jdata_pagevec() to take in
and utilize folio_batch rather than pagevec and use folios rather
than pages. gfs2_write_jdata_batch() now supports large folios.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 fs/gfs2/aops.c | 64 +++++++++++++++++++++++++++-----------------------
 1 file changed, 35 insertions(+), 29 deletions(-)

diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index e782b4f1d104..0a47068f9acc 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -195,67 +195,71 @@ static int gfs2_writepages(struct address_space *mapping,
 }
 
 /**
- * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
+ * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
  * @mapping: The mapping
  * @wbc: The writeback control
- * @pvec: The vector of pages
- * @nr_pages: The number of pages to write
+ * @fbatch: The batch of folios
  * @done_index: Page index
  *
  * Returns: non-zero if loop should terminate, zero otherwise
  */
 
-static int gfs2_write_jdata_pagevec(struct address_space *mapping,
+static int gfs2_write_jdata_batch(struct address_space *mapping,
 				    struct writeback_control *wbc,
-				    struct pagevec *pvec,
-				    int nr_pages,
+				    struct folio_batch *fbatch,
 				    pgoff_t *done_index)
 {
 	struct inode *inode = mapping->host;
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
-	unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
+	unsigned nrblocks;
 	int i;
 	int ret;
+	int nr_pages = 0;
+	int nr_folios = folio_batch_count(fbatch);
+
+	for (i = 0; i < nr_folios; i++)
+		nr_pages += folio_nr_pages(fbatch->folios[i]);
+	nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
 
 	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
 	if (ret < 0)
 		return ret;
 
-	for(i = 0; i < nr_pages; i++) {
-		struct page *page = pvec->pages[i];
+	for (i = 0; i < nr_folios; i++) {
+		struct folio *folio = fbatch->folios[i];
 
-		*done_index = page->index;
+		*done_index = folio->index;
 
-		lock_page(page);
+		folio_lock(folio);
 
-		if (unlikely(page->mapping != mapping)) {
+		if (unlikely(folio->mapping != mapping)) {
 continue_unlock:
-			unlock_page(page);
+			folio_unlock(folio);
 			continue;
 		}
 
-		if (!PageDirty(page)) {
+		if (!folio_test_dirty(folio)) {
 			/* someone wrote it for us */
 			goto continue_unlock;
 		}
 
-		if (PageWriteback(page)) {
+		if (folio_test_writeback(folio)) {
 			if (wbc->sync_mode != WB_SYNC_NONE)
-				wait_on_page_writeback(page);
+				folio_wait_writeback(folio);
 			else
 				goto continue_unlock;
 		}
 
-		BUG_ON(PageWriteback(page));
-		if (!clear_page_dirty_for_io(page))
+		BUG_ON(folio_test_writeback(folio));
+		if (!folio_clear_dirty_for_io(folio))
 			goto continue_unlock;
 
 		trace_wbc_writepage(wbc, inode_to_bdi(inode));
 
-		ret = __gfs2_jdata_writepage(page, wbc);
+		ret = __gfs2_jdata_writepage(&folio->page, wbc);
 		if (unlikely(ret)) {
 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
-				unlock_page(page);
+				folio_unlock(folio);
 				ret = 0;
 			} else {
 
@@ -268,7 +272,8 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
 				 * not be suitable for data integrity
 				 * writeout).
 				 */
-				*done_index = page->index + 1;
+				*done_index = folio->index +
+					folio_nr_pages(folio);
 				ret = 1;
 				break;
 			}
@@ -305,8 +310,8 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
 {
 	int ret = 0;
 	int done = 0;
-	struct pagevec pvec;
-	int nr_pages;
+	struct folio_batch fbatch;
+	int nr_folios;
 	pgoff_t writeback_index;
 	pgoff_t index;
 	pgoff_t end;
@@ -315,7 +320,7 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
 	int range_whole = 0;
 	xa_mark_t tag;
 
-	pagevec_init(&pvec);
+	folio_batch_init(&fbatch);
 	if (wbc->range_cyclic) {
 		writeback_index = mapping->writeback_index; /* prev offset */
 		index = writeback_index;
@@ -341,17 +346,18 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
 		tag_pages_for_writeback(mapping, index, end);
 	done_index = index;
 	while (!done && (index <= end)) {
-		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
-				tag);
-		if (nr_pages == 0)
+		nr_folios = filemap_get_folios_tag(mapping, &index, end,
+				tag, &fbatch);
+		if (nr_folios == 0)
 			break;
 
-		ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
+		ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
+				&done_index);
 		if (ret)
 			done = 1;
 		if (ret > 0)
 			ret = 0;
-		pagevec_release(&pvec);
+		folio_batch_release(&fbatch);
 		cond_resched();
 	}
 
-- 
2.38.1

  parent reply	other threads:[~2023-01-04 21:15 UTC|newest]

Thread overview: 121+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-04 21:14 [PATCH v5 00/23] Convert to filemap_get_folios_tag() Vishal Moola (Oracle)
2023-01-04 21:14 ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14 ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 01/23] pagemap: Add filemap_grab_folio() Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 02/23] filemap: Added filemap_get_folios_tag() Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 03/23] filemap: Convert __filemap_fdatawait_range() to use filemap_get_folios_tag() Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 04/23] page-writeback: Convert write_cache_pages() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 05/23] afs: Convert afs_writepages_region() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 06/23] btrfs: Convert btree_write_cache_pages() to use filemap_get_folio_tag() Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 07/23] btrfs: Convert extent_write_cache_pages() to use filemap_get_folios_tag() Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 08/23] ceph: Convert ceph_writepages_start() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 09/23] cifs: Convert wdata_alloc_and_fillpages() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-12 17:19   ` Vishal Moola
2023-01-12 17:19     ` Vishal Moola
2023-01-12 17:19     ` [Cluster-devel] " Vishal Moola
2023-01-12 17:19     ` [f2fs-dev] " Vishal Moola
2023-01-13  3:03     ` Tom Talpey
2023-01-13  3:03       ` [Cluster-devel] " Tom Talpey
2023-01-13  3:03       ` [f2fs-dev] " Tom Talpey
2023-01-12 19:23   ` Paulo Alcantara
2023-01-12 19:23     ` Paulo Alcantara via Linux-f2fs-devel
2023-01-12 19:23     ` [Cluster-devel] " Paulo Alcantara
2023-01-12 19:23     ` [f2fs-dev] " Paulo Alcantara via Linux-f2fs-devel
2023-01-04 21:14 ` [PATCH v5 10/23] ext4: Convert mpage_prepare_extent_to_map() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-12 17:16   ` Vishal Moola
2023-01-12 17:16     ` Vishal Moola
2023-01-12 17:16     ` [Cluster-devel] " Vishal Moola
2023-01-12 17:16     ` [f2fs-dev] " Vishal Moola
2023-01-04 21:14 ` [PATCH v5 11/23] f2fs: Convert f2fs_fsync_node_pages() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 12/23] f2fs: Convert f2fs_flush_inline_data() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 13/23] f2fs: Convert f2fs_sync_node_pages() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 14/23] f2fs: Convert f2fs_write_cache_pages() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-12 10:17   ` Chao Yu
2023-01-12 10:17     ` [Cluster-devel] " Chao Yu
2023-01-12 10:17     ` Chao Yu
2023-01-04 21:14 ` [PATCH v5 15/23] f2fs: Convert last_fsync_dnode() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 16/23] f2fs: Convert f2fs_sync_meta_pages() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` Vishal Moola (Oracle) [this message]
2023-01-04 21:14   ` [PATCH v5 17/23] gfs2: Convert gfs2_write_cache_jdata() " Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-06  7:57   ` [Cluster-devel] " Andreas Gruenbacher
2023-01-06  7:57     ` Andreas Gruenbacher
2023-01-06  7:57     ` [Cluster-devel] " Andreas Gruenbacher
2023-01-06  7:57     ` [f2fs-dev] " Andreas Gruenbacher
2023-01-04 21:14 ` [PATCH v5 18/23] nilfs2: Convert nilfs_lookup_dirty_data_buffers() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 19/23] nilfs2: Convert nilfs_lookup_dirty_node_buffers() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 20/23] nilfs2: Convert nilfs_btree_lookup_dirty_buffers() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 21/23] nilfs2: Convert nilfs_copy_dirty_pages() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 22/23] nilfs2: Convert nilfs_clear_dirty_pages() " Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-01-04 21:14 ` [PATCH v5 23/23] filemap: Remove find_get_pages_range_tag() Vishal Moola (Oracle)
2023-01-04 21:14   ` Vishal Moola (Oracle)
2023-01-04 21:14   ` [Cluster-devel] " Vishal Moola
2023-01-04 21:14   ` [f2fs-dev] " Vishal Moola (Oracle)
2023-02-28  1:01 ` [f2fs-dev] [PATCH v5 00/23] Convert to filemap_get_folios_tag() patchwork-bot+f2fs
2023-02-28  1:01   ` patchwork-bot+f2fs
2023-02-28  1:01   ` [Cluster-devel] [f2fs-dev] " patchwork-bot+f2fs
2023-02-28  1:01   ` patchwork-bot+f2fs

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230104211448.4804-18-vishal.moola@gmail.com \
    --to=vishal.moola@gmail.com \
    --cc=ceph-devel@vger.kernel.org \
    --cc=cluster-devel@redhat.com \
    --cc=linux-afs@lists.infradead.org \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-cifs@vger.kernel.org \
    --cc=linux-ext4@vger.kernel.org \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nilfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.