All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org, linux-arch@vger.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-kernel@vger.kernel.org,
	"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
	Helge Deller <deller@gmx.de>,
	linux-parisc@vger.kernel.org
Subject: [PATCH v2 17/30] parisc: Implement the new page table range API
Date: Mon, 27 Feb 2023 17:57:28 +0000	[thread overview]
Message-ID: <20230227175741.71216-18-willy@infradead.org> (raw)
In-Reply-To: <20230227175741.71216-1-willy@infradead.org>

Add set_ptes(), update_mmu_cache_range(), flush_dcache_folio()
and flush_icache_pages().  Change the PG_arch_1 (aka PG_dcache_dirty) flag
from being per-page to per-folio.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Helge Deller <deller@gmx.de>
Cc: linux-parisc@vger.kernel.org
---
 arch/parisc/include/asm/cacheflush.h |  14 ++--
 arch/parisc/include/asm/pgtable.h    |  28 +++++---
 arch/parisc/kernel/cache.c           | 101 +++++++++++++++++++--------
 3 files changed, 99 insertions(+), 44 deletions(-)

diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index ff07c509e04b..0bf8b69d086b 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -46,16 +46,20 @@ void invalidate_kernel_vmap_range(void *vaddr, int size);
 #define flush_cache_vmap(start, end)		flush_cache_all()
 #define flush_cache_vunmap(start, end)		flush_cache_all()
 
+void flush_dcache_folio(struct folio *folio);
+#define flush_dcache_folio flush_dcache_folio
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-void flush_dcache_page(struct page *page);
+static inline void flush_dcache_page(struct page *page)
+{
+	flush_dcache_folio(page_folio(page));
+}
 
 #define flush_dcache_mmap_lock(mapping)		xa_lock_irq(&mapping->i_pages)
 #define flush_dcache_mmap_unlock(mapping)	xa_unlock_irq(&mapping->i_pages)
 
-#define flush_icache_page(vma,page)	do { 		\
-	flush_kernel_dcache_page_addr(page_address(page)); \
-	flush_kernel_icache_page(page_address(page)); 	\
-} while (0)
+void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
+		unsigned int nr);
+#define flush_icache_page(vma, page)	flush_icache_pages(vma, page, 1)
 
 #define flush_icache_range(s,e)		do { 		\
 	flush_kernel_dcache_range_asm(s,e); 		\
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index e2950f5db7c9..78ee9816f423 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -73,14 +73,7 @@ extern void __update_cache(pte_t pte);
 		mb();				\
 	} while(0)
 
-#define set_pte_at(mm, addr, pteptr, pteval)	\
-	do {					\
-		if (pte_present(pteval) &&	\
-		    pte_user(pteval))		\
-			__update_cache(pteval);	\
-		*(pteptr) = (pteval);		\
-		purge_tlb_entries(mm, addr);	\
-	} while (0)
+#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
 
 #endif /* !__ASSEMBLY__ */
 
@@ -391,11 +384,28 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 
 extern void paging_init (void);
 
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+		pte_t *ptep, pte_t pte, unsigned int nr)
+{
+	if (pte_present(pte) && pte_user(pte))
+		__update_cache(pte);
+	for (;;) {
+		*ptep = pte;
+		purge_tlb_entries(mm, addr);
+		if (--nr == 0)
+			break;
+		ptep++;
+		pte_val(pte) += 1 << PFN_PTE_SHIFT;
+		addr += PAGE_SIZE;
+	}
+}
+
 /* Used for deferring calls to flush_dcache_page() */
 
 #define PG_dcache_dirty         PG_arch_1
 
-#define update_mmu_cache(vms,addr,ptep) __update_cache(*ptep)
+#define update_mmu_cache_range(vma, addr, ptep, nr) __update_cache(*ptep)
+#define update_mmu_cache(vma, addr, ptep) __update_cache(*ptep)
 
 /*
  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 984d3a1b3828..16057812103b 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -92,11 +92,11 @@ static inline void flush_data_cache(void)
 /* Kernel virtual address of pfn.  */
 #define pfn_va(pfn)	__va(PFN_PHYS(pfn))
 
-void
-__update_cache(pte_t pte)
+void __update_cache(pte_t pte)
 {
 	unsigned long pfn = pte_pfn(pte);
-	struct page *page;
+	struct folio *folio;
+	unsigned int nr;
 
 	/* We don't have pte special.  As a result, we can be called with
 	   an invalid pfn and we don't need to flush the kernel dcache page.
@@ -104,13 +104,17 @@ __update_cache(pte_t pte)
 	if (!pfn_valid(pfn))
 		return;
 
-	page = pfn_to_page(pfn);
-	if (page_mapping_file(page) &&
-	    test_bit(PG_dcache_dirty, &page->flags)) {
-		flush_kernel_dcache_page_addr(pfn_va(pfn));
-		clear_bit(PG_dcache_dirty, &page->flags);
+	folio = page_folio(pfn_to_page(pfn));
+	pfn = folio_pfn(folio);
+	nr = folio_nr_pages(folio);
+	if (folio_flush_mapping(folio) &&
+	    test_bit(PG_dcache_dirty, &folio->flags)) {
+		while (nr--)
+			flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
+		clear_bit(PG_dcache_dirty, &folio->flags);
 	} else if (parisc_requires_coherency())
-		flush_kernel_dcache_page_addr(pfn_va(pfn));
+		while (nr--)
+			flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
 }
 
 void
@@ -365,6 +369,20 @@ static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmad
 	preempt_enable();
 }
 
+void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
+		unsigned int nr)
+{
+	void *kaddr = page_address(page);
+
+	for (;;) {
+		flush_kernel_dcache_page_addr(kaddr);
+		flush_kernel_icache_page(kaddr);
+		if (--nr == 0)
+			break;
+		page += PAGE_SIZE;
+	}
+}
+
 static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
 {
 	pte_t *ptep = NULL;
@@ -393,26 +411,30 @@ static inline bool pte_needs_flush(pte_t pte)
 		== (_PAGE_PRESENT | _PAGE_ACCESSED);
 }
 
-void flush_dcache_page(struct page *page)
+void flush_dcache_folio(struct folio *folio)
 {
-	struct address_space *mapping = page_mapping_file(page);
-	struct vm_area_struct *mpnt;
-	unsigned long offset;
+	struct address_space *mapping = folio_flush_mapping(folio);
+	struct vm_area_struct *vma;
 	unsigned long addr, old_addr = 0;
+	void *kaddr;
 	unsigned long count = 0;
+	unsigned long i, nr;
 	pgoff_t pgoff;
 
 	if (mapping && !mapping_mapped(mapping)) {
-		set_bit(PG_dcache_dirty, &page->flags);
+		set_bit(PG_dcache_dirty, &folio->flags);
 		return;
 	}
 
-	flush_kernel_dcache_page_addr(page_address(page));
+	nr = folio_nr_pages(folio);
+	kaddr = folio_address(folio);
+	for (i = 0; i < nr; i++)
+		flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
 
 	if (!mapping)
 		return;
 
-	pgoff = page->index;
+	pgoff = folio->index;
 
 	/*
 	 * We have carefully arranged in arch_get_unmapped_area() that
@@ -422,15 +444,29 @@ void flush_dcache_page(struct page *page)
 	 * on machines that support equivalent aliasing
 	 */
 	flush_dcache_mmap_lock(mapping);
-	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
-		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
-		addr = mpnt->vm_start + offset;
-		if (parisc_requires_coherency()) {
-			pte_t *ptep;
+	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
+		unsigned long offset = pgoff - vma->vm_pgoff;
+		unsigned long pfn = folio_pfn(folio);
+
+		addr = vma->vm_start;
+		nr = folio_nr_pages(folio);
+		if (offset > -nr) {
+			pfn -= offset;
+			nr += offset;
+		} else {
+			addr += offset * PAGE_SIZE;
+		}
+		if (addr + nr * PAGE_SIZE > vma->vm_end)
+			nr = (vma->vm_end - addr) / PAGE_SIZE;
 
-			ptep = get_ptep(mpnt->vm_mm, addr);
-			if (ptep && pte_needs_flush(*ptep))
-				flush_user_cache_page(mpnt, addr);
+		if (parisc_requires_coherency()) {
+			for (i = 0; i < nr; i++) {
+				pte_t *ptep = get_ptep(vma->vm_mm,
+							addr + i * PAGE_SIZE);
+				if (ptep && pte_needs_flush(*ptep))
+					flush_user_cache_page(vma,
+							addr + i * PAGE_SIZE);
+			}
 		} else {
 			/*
 			 * The TLB is the engine of coherence on parisc:
@@ -443,27 +479,32 @@ void flush_dcache_page(struct page *page)
 			 * in (until the user or kernel specifically
 			 * accesses it, of course)
 			 */
-			flush_tlb_page(mpnt, addr);
+			for (i = 0; i < nr; i++)
+				flush_tlb_page(vma, addr + i * PAGE_SIZE);
 			if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
 					!= (addr & (SHM_COLOUR - 1))) {
-				__flush_cache_page(mpnt, addr, page_to_phys(page));
+				for (i = 0; i < nr; i++)
+					__flush_cache_page(vma,
+						addr + i * PAGE_SIZE,
+						(pfn + i) * PAGE_SIZE);
 				/*
 				 * Software is allowed to have any number
 				 * of private mappings to a page.
 				 */
-				if (!(mpnt->vm_flags & VM_SHARED))
+				if (!(vma->vm_flags & VM_SHARED))
 					continue;
 				if (old_addr)
 					pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
-						old_addr, addr, mpnt->vm_file);
-				old_addr = addr;
+						old_addr, addr, vma->vm_file);
+				if (nr == folio_nr_pages(folio))
+					old_addr = addr;
 			}
 		}
 		WARN_ON(++count == 4096);
 	}
 	flush_dcache_mmap_unlock(mapping);
 }
-EXPORT_SYMBOL(flush_dcache_page);
+EXPORT_SYMBOL(flush_dcache_folio);
 
 /* Defined in arch/parisc/kernel/pacache.S */
 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
-- 
2.39.1


  parent reply	other threads:[~2023-02-27 17:58 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-27 17:57 [PATCH v2 00/30] New page table range API Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 01/30] mm: Convert page_table_check_pte_set() to page_table_check_ptes_set() Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 02/30] mm: Add generic flush_icache_pages() and documentation Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 03/30] mm: Add folio_flush_mapping() Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 04/30] mm: Remove ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 05/30] alpha: Implement the new page table range API Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 06/30] arc: " Matthew Wilcox (Oracle)
2023-02-27 17:57   ` Matthew Wilcox (Oracle)
2023-02-28  6:34   ` Vineet Gupta
2023-02-28  6:34     ` Vineet Gupta
2023-02-28 16:25     ` Matthew Wilcox
2023-02-28 16:25       ` Matthew Wilcox
2023-02-27 17:57 ` [PATCH v2 07/30] arm64: " Matthew Wilcox (Oracle)
2023-02-27 17:57   ` Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 08/30] csky: " Matthew Wilcox (Oracle)
2023-02-28  3:17   ` Guo Ren
2023-02-27 17:57 ` [PATCH v2 09/30] hexagon: " Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 10/30] ia64: " Matthew Wilcox (Oracle)
2023-02-27 17:57   ` Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 11/30] loongarch: " Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 12/30] m68k: " Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 13/30] microblaze: " Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 14/30] mips: " Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 15/30] nios2: " Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 16/30] openrisc: " Matthew Wilcox (Oracle)
2023-02-27 17:57 ` Matthew Wilcox (Oracle) [this message]
2023-02-27 22:49   ` [PATCH v2 17/30] parisc: " John David Anglin
2023-02-27 23:50     ` Matthew Wilcox
2023-02-27 17:57 ` [PATCH v2 18/30] powerpc: " Matthew Wilcox (Oracle)
2023-02-27 17:57   ` Matthew Wilcox (Oracle)
2023-02-27 19:45   ` Christophe Leroy
2023-02-27 19:45     ` Christophe Leroy
2023-02-27 20:20     ` Matthew Wilcox
2023-02-27 20:20       ` Matthew Wilcox
2023-02-28  6:58       ` Christophe Leroy
2023-02-28  6:58         ` Christophe Leroy
2023-02-27 17:57 ` [PATCH v2 19/30] riscv: " Matthew Wilcox (Oracle)
2023-02-27 17:57   ` Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 20/30] s390: " Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 21/30] superh: " Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 22/30] sparc32: " Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 23/30] sparc64: " Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 24/30] um: " Matthew Wilcox (Oracle)
2023-02-27 17:57   ` Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 25/30] x86: " Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 26/30] xtensa: " Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 27/30] filemap: Add filemap_map_folio_range() Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 28/30] rmap: add folio_add_file_rmap_range() Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 29/30] mm: Convert do_set_pte() to set_pte_range() Matthew Wilcox (Oracle)
2023-02-27 17:57 ` [PATCH v2 30/30] filemap: Batch PTE mappings Matthew Wilcox (Oracle)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230227175741.71216-18-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=James.Bottomley@HansenPartnership.com \
    --cc=deller@gmx.de \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-parisc@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.