All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-arch@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, Mike Rapoport <rppt@kernel.org>,
	Dinh Nguyen <dinguyen@kernel.org>
Subject: [PATCH v6 18/38] nios2: Implement the new page table range API
Date: Wed,  2 Aug 2023 16:13:46 +0100	[thread overview]
Message-ID: <20230802151406.3735276-19-willy@infradead.org> (raw)
In-Reply-To: <20230802151406.3735276-1-willy@infradead.org>

Add set_ptes(), update_mmu_cache_range(), flush_icache_pages() and
flush_dcache_folio().  Change the PG_arch_1 (aka PG_dcache_dirty) flag
from being per-page to per-folio.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Dinh Nguyen <dinguyen@kernel.org>
---
 arch/nios2/include/asm/cacheflush.h |  6 ++-
 arch/nios2/include/asm/pgtable.h    | 28 ++++++----
 arch/nios2/mm/cacheflush.c          | 79 ++++++++++++++++-------------
 3 files changed, 67 insertions(+), 46 deletions(-)

diff --git a/arch/nios2/include/asm/cacheflush.h b/arch/nios2/include/asm/cacheflush.h
index d0b71dd71287..8624ca83cffe 100644
--- a/arch/nios2/include/asm/cacheflush.h
+++ b/arch/nios2/include/asm/cacheflush.h
@@ -29,9 +29,13 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
 	unsigned long pfn);
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 void flush_dcache_page(struct page *page);
+void flush_dcache_folio(struct folio *folio);
+#define flush_dcache_folio flush_dcache_folio
 
 extern void flush_icache_range(unsigned long start, unsigned long end);
-extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
+		unsigned int nr);
+#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1);
 
 #define flush_cache_vmap(start, end)		flush_dcache_range(start, end)
 #define flush_cache_vunmap(start, end)		flush_dcache_range(start, end)
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index 0f5c2564e9f5..be6bf3e0bd7a 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -178,14 +178,21 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
 	*ptep = pteval;
 }
 
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
-			      pte_t *ptep, pte_t pteval)
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+		pte_t *ptep, pte_t pte, unsigned int nr)
 {
-	unsigned long paddr = (unsigned long)page_to_virt(pte_page(pteval));
-
-	flush_dcache_range(paddr, paddr + PAGE_SIZE);
-	set_pte(ptep, pteval);
+	unsigned long paddr = (unsigned long)page_to_virt(pte_page(pte));
+
+	flush_dcache_range(paddr, paddr + nr * PAGE_SIZE);
+	for (;;) {
+		set_pte(ptep, pte);
+		if (--nr == 0)
+			break;
+		ptep++;
+		pte_val(pte) += 1;
+	}
 }
+#define set_ptes set_ptes
 
 static inline int pmd_none(pmd_t pmd)
 {
@@ -202,7 +209,7 @@ static inline void pte_clear(struct mm_struct *mm,
 
 	pte_val(null) = (addr >> PAGE_SHIFT) & 0xf;
 
-	set_pte_at(mm, addr, ptep, null);
+	set_pte(ptep, null);
 }
 
 /*
@@ -273,7 +280,10 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
 extern void __init paging_init(void);
 extern void __init mmu_init(void);
 
-extern void update_mmu_cache(struct vm_area_struct *vma,
-			     unsigned long address, pte_t *pte);
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+		unsigned long address, pte_t *ptep, unsigned int nr);
+
+#define update_mmu_cache(vma, addr, ptep) \
+	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
 
 #endif /* _ASM_NIOS2_PGTABLE_H */
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c
index 6aa9257c3ede..28b805f465a8 100644
--- a/arch/nios2/mm/cacheflush.c
+++ b/arch/nios2/mm/cacheflush.c
@@ -71,26 +71,26 @@ static void __flush_icache(unsigned long start, unsigned long end)
 	__asm__ __volatile(" flushp\n");
 }
 
-static void flush_aliases(struct address_space *mapping, struct page *page)
+static void flush_aliases(struct address_space *mapping, struct folio *folio)
 {
 	struct mm_struct *mm = current->active_mm;
-	struct vm_area_struct *mpnt;
+	struct vm_area_struct *vma;
 	pgoff_t pgoff;
+	unsigned long nr = folio_nr_pages(folio);
 
-	pgoff = page->index;
+	pgoff = folio->index;
 
 	flush_dcache_mmap_lock(mapping);
-	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
-		unsigned long offset;
+	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
+		unsigned long start;
 
-		if (mpnt->vm_mm != mm)
+		if (vma->vm_mm != mm)
 			continue;
-		if (!(mpnt->vm_flags & VM_MAYSHARE))
+		if (!(vma->vm_flags & VM_MAYSHARE))
 			continue;
 
-		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
-		flush_cache_page(mpnt, mpnt->vm_start + offset,
-			page_to_pfn(page));
+		start = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+		flush_cache_range(vma, start, start + nr * PAGE_SIZE);
 	}
 	flush_dcache_mmap_unlock(mapping);
 }
@@ -138,10 +138,11 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
 		__flush_icache(start, end);
 }
 
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
+		unsigned int nr)
 {
 	unsigned long start = (unsigned long) page_address(page);
-	unsigned long end = start + PAGE_SIZE;
+	unsigned long end = start + nr * PAGE_SIZE;
 
 	__flush_dcache(start, end);
 	__flush_icache(start, end);
@@ -158,19 +159,19 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
 		__flush_icache(start, end);
 }
 
-void __flush_dcache_page(struct address_space *mapping, struct page *page)
+static void __flush_dcache_folio(struct folio *folio)
 {
 	/*
 	 * Writeback any data associated with the kernel mapping of this
 	 * page.  This ensures that data in the physical page is mutually
 	 * coherent with the kernels mapping.
 	 */
-	unsigned long start = (unsigned long)page_address(page);
+	unsigned long start = (unsigned long)folio_address(folio);
 
-	__flush_dcache(start, start + PAGE_SIZE);
+	__flush_dcache(start, start + folio_size(folio));
 }
 
-void flush_dcache_page(struct page *page)
+void flush_dcache_folio(struct folio *folio)
 {
 	struct address_space *mapping;
 
@@ -178,32 +179,38 @@ void flush_dcache_page(struct page *page)
 	 * The zero page is never written to, so never has any dirty
 	 * cache lines, and therefore never needs to be flushed.
 	 */
-	if (page == ZERO_PAGE(0))
+	if (is_zero_pfn(folio_pfn(folio)))
 		return;
 
-	mapping = page_mapping_file(page);
+	mapping = folio_flush_mapping(folio);
 
 	/* Flush this page if there are aliases. */
 	if (mapping && !mapping_mapped(mapping)) {
-		clear_bit(PG_dcache_clean, &page->flags);
+		clear_bit(PG_dcache_clean, &folio->flags);
 	} else {
-		__flush_dcache_page(mapping, page);
+		__flush_dcache_folio(folio);
 		if (mapping) {
-			unsigned long start = (unsigned long)page_address(page);
-			flush_aliases(mapping,  page);
-			flush_icache_range(start, start + PAGE_SIZE);
+			unsigned long start = (unsigned long)folio_address(folio);
+			flush_aliases(mapping, folio);
+			flush_icache_range(start, start + folio_size(folio));
 		}
-		set_bit(PG_dcache_clean, &page->flags);
+		set_bit(PG_dcache_clean, &folio->flags);
 	}
 }
+EXPORT_SYMBOL(flush_dcache_folio);
+
+void flush_dcache_page(struct page *page)
+{
+	flush_dcache_folio(page_folio(page));
+}
 EXPORT_SYMBOL(flush_dcache_page);
 
-void update_mmu_cache(struct vm_area_struct *vma,
-		      unsigned long address, pte_t *ptep)
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+		unsigned long address, pte_t *ptep, unsigned int nr)
 {
 	pte_t pte = *ptep;
 	unsigned long pfn = pte_pfn(pte);
-	struct page *page;
+	struct folio *folio;
 	struct address_space *mapping;
 
 	reload_tlb_page(vma, address, pte);
@@ -215,19 +222,19 @@ void update_mmu_cache(struct vm_area_struct *vma,
 	* The zero page is never written to, so never has any dirty
 	* cache lines, and therefore never needs to be flushed.
 	*/
-	page = pfn_to_page(pfn);
-	if (page == ZERO_PAGE(0))
+	if (is_zero_pfn(pfn))
 		return;
 
-	mapping = page_mapping_file(page);
-	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
-		__flush_dcache_page(mapping, page);
+	folio = page_folio(pfn_to_page(pfn));
+	if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
+		__flush_dcache_folio(folio);
 
-	if(mapping)
-	{
-		flush_aliases(mapping, page);
+	mapping = folio_flush_mapping(folio);
+	if (mapping) {
+		flush_aliases(mapping, folio);
 		if (vma->vm_flags & VM_EXEC)
-			flush_icache_page(vma, page);
+			flush_icache_pages(vma, &folio->page,
+					folio_nr_pages(folio));
 	}
 }
 
-- 
2.40.1


  parent reply	other threads:[~2023-08-02 15:14 UTC|newest]

Thread overview: 61+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-02 15:13 [PATCH v6 00/38] New page table range API Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 01/38] minmax: Add in_range() macro Matthew Wilcox (Oracle)
2023-08-03 13:00   ` Phi Nguyen
2023-08-03 13:22     ` Matthew Wilcox
2023-08-03 19:11       ` Phi Nguyen
2023-08-02 15:13 ` [PATCH v6 02/38] mm: Convert page_table_check_pte_set() to page_table_check_ptes_set() Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 03/38] mm: Add generic flush_icache_pages() and documentation Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 04/38] mm: Add folio_flush_mapping() Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 05/38] mm: Remove ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 06/38] mm: Add default definition of set_ptes() Matthew Wilcox (Oracle)
2023-10-12 13:53   ` David Woodhouse
2023-10-12 14:05     ` Matthew Wilcox
2023-10-12 14:43       ` David Woodhouse
2023-08-02 15:13 ` [PATCH v6 07/38] alpha: Implement the new page table range API Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 08/38] arc: " Matthew Wilcox (Oracle)
2023-08-02 15:13   ` Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 09/38] arm: " Matthew Wilcox (Oracle)
2023-08-02 15:13   ` Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 10/38] arm64: " Matthew Wilcox (Oracle)
2023-08-02 15:13   ` Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 11/38] csky: " Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 12/38] hexagon: " Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 13/38] ia64: " Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 14/38] loongarch: " Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 15/38] m68k: " Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 16/38] microblaze: " Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 17/38] mips: " Matthew Wilcox (Oracle)
2023-08-02 15:13 ` Matthew Wilcox (Oracle) [this message]
2023-08-02 15:13 ` [PATCH v6 19/38] openrisc: " Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 20/38] parisc: " Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 21/38] powerpc: " Matthew Wilcox (Oracle)
2023-08-02 15:13   ` Matthew Wilcox (Oracle)
2023-08-03 23:38   ` Nathan Chancellor
2023-08-03 23:38     ` Nathan Chancellor
2023-08-04  3:50     ` Matthew Wilcox
2023-08-04  3:50       ` Matthew Wilcox
2023-08-02 15:13 ` [PATCH v6 22/38] riscv: " Matthew Wilcox (Oracle)
2023-08-02 15:13   ` Matthew Wilcox (Oracle)
2023-09-01 16:25   ` patchwork-bot+linux-riscv
2023-09-01 16:25     ` patchwork-bot+linux-riscv
2023-08-02 15:13 ` [PATCH v6 23/38] s390: " Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 24/38] sh: " Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 25/38] sparc32: " Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 26/38] sparc64: " Matthew Wilcox (Oracle)
2023-09-04 15:36   ` Guenter Roeck
2023-09-04 17:43     ` Mike Rapoport
2023-09-04 19:37       ` Guenter Roeck
2023-08-02 15:13 ` [PATCH v6 27/38] um: " Matthew Wilcox (Oracle)
2023-08-02 15:13   ` Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 28/38] x86: " Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 29/38] xtensa: " Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 30/38] mm: Remove page_mapping_file() Matthew Wilcox (Oracle)
2023-08-02 15:13 ` [PATCH v6 31/38] mm: Rationalise flush_icache_pages() and flush_icache_page() Matthew Wilcox (Oracle)
2023-08-02 15:14 ` [PATCH v6 32/38] mm: Tidy up set_ptes definition Matthew Wilcox (Oracle)
2023-08-02 15:14 ` [PATCH v6 33/38] mm: Use flush_icache_pages() in do_set_pmd() Matthew Wilcox (Oracle)
2023-08-02 15:14 ` [PATCH v6 34/38] filemap: Add filemap_map_folio_range() Matthew Wilcox (Oracle)
2023-08-02 15:14 ` [PATCH v6 35/38] rmap: add folio_add_file_rmap_range() Matthew Wilcox (Oracle)
2023-08-02 15:14 ` [PATCH v6 36/38] mm: Convert do_set_pte() to set_pte_range() Matthew Wilcox (Oracle)
2023-08-02 15:14 ` [PATCH v6 37/38] filemap: Batch PTE mappings Matthew Wilcox (Oracle)
2023-08-02 15:14 ` [PATCH v6 38/38] mm: Call update_mmu_cache_range() in more page fault handling paths Matthew Wilcox (Oracle)
2023-08-02 18:43 ` [PATCH v6 00/38] New page table range API Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230802151406.3735276-19-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=akpm@linux-foundation.org \
    --cc=dinguyen@kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=rppt@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.