All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] Revert "MIPS: cache: Provide cache flush operations for XFS"
@ 2012-04-09 16:13 Steven J. Hill
  2012-04-10  2:44 ` Kevin Cernekee
  0 siblings, 1 reply; 6+ messages in thread
From: Steven J. Hill @ 2012-04-09 16:13 UTC (permalink / raw)
  To: linux-mips, ralf; +Cc: Steven J. Hill, Leonid Yegoshin

From: "Steven J. Hill" <sjhill@mips.com>

This reverts commit d759e6bf49a41edd66de7c6480b34cceb88ee29a. The
flush_kernel_vmap_range() and invalidate_kernel_vmap_range() are DMA
cache flushing operations and should be done via _dma_cache_wback_inv()
and _dma_cache_inv().

Signed-off-by: Leonid Yegoshin <yegoshin@mips.com>
Signed-off-by: Steven J. Hill <sjhill@mips.com>
---
 arch/mips/include/asm/cacheflush.h |   43 ++++++++++++++++--------------------
 arch/mips/mm/c-octeon.c            |    6 -----
 arch/mips/mm/c-r3k.c               |    7 ------
 arch/mips/mm/c-r4k.c               |   35 -----------------------------
 arch/mips/mm/c-tx39.c              |    7 ------
 arch/mips/mm/cache.c               |    5 -----
 6 files changed, 19 insertions(+), 84 deletions(-)

diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
index 69468de..106b61a 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -58,9 +58,28 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
 		__flush_anon_page(page, vmaddr);
 }
 
+#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+static inline void flush_kernel_dcache_page(struct page *page)
+{
+	if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
+		__flush_dcache_page(page);
+}
+
+static inline void flush_kernel_vmap_range(void *addr, unsigned long size)
+{
+	_dma_cache_wback_inv((unsigned long)addr, size);
+}
+
+static inline void invalidate_kernel_vmap_range(void *addr, unsigned long size)
+{
+	_dma_cache_inv((unsigned long)addr, size);
+}
+
 static inline void flush_icache_page(struct vm_area_struct *vma,
 	struct page *page)
 {
+	if (cpu_has_dc_aliases || ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc))
+		__flush_dcache_page(page);
 }
 
 extern void (*flush_icache_range)(unsigned long start, unsigned long end);
@@ -114,28 +133,4 @@ unsigned long run_uncached(void *func);
 extern void *kmap_coherent(struct page *page, unsigned long addr);
 extern void kunmap_coherent(void);
 
-#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
-{
-	BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
-}
-
-/*
- * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
- * cache writeback and invalidate operation.
- */
-extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
-
-static inline void flush_kernel_vmap_range(void *vaddr, int size)
-{
-	if (cpu_has_dc_aliases)
-		__flush_kernel_vmap_range((unsigned long) vaddr, size);
-}
-
-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
-{
-	if (cpu_has_dc_aliases)
-		__flush_kernel_vmap_range((unsigned long) vaddr, size);
-}
-
 #endif /* _ASM_CACHEFLUSH_H */
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 1f9ca07..109d707 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -168,10 +168,6 @@ static void octeon_flush_cache_page(struct vm_area_struct *vma,
 		octeon_flush_icache_all_cores(vma);
 }
 
-static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
-{
-	BUG();
-}
 
 /**
  * Probe Octeon's caches
@@ -276,8 +272,6 @@ void __cpuinit octeon_cache_init(void)
 	flush_icache_range		= octeon_flush_icache_range;
 	local_flush_icache_range	= local_octeon_flush_icache_range;
 
-	__flush_kernel_vmap_range	= octeon_flush_kernel_vmap_range;
-
 	build_clear_page();
 	build_copy_page();
 }
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 031c4c2..89f1ca2 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -298,11 +298,6 @@ static void r3k_flush_cache_sigtramp(unsigned long addr)
 	write_c0_status(flags);
 }
 
-static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size)
-{
-	BUG();
-}
-
 static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
 {
 	/* Catch bad driver code */
@@ -327,8 +322,6 @@ void __cpuinit r3k_cache_init(void)
 	flush_icache_range = r3k_flush_icache_range;
 	local_flush_icache_range = r3k_flush_icache_range;
 
-	__flush_kernel_vmap_range = r3k_flush_kernel_vmap_range;
-
 	flush_cache_sigtramp = r3k_flush_cache_sigtramp;
 	local_flush_data_cache_page = local_r3k_flush_data_cache_page;
 	flush_data_cache_page = r3k_flush_data_cache_page;
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 821a8bd..d3adcb6 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -721,39 +721,6 @@ static void r4k_flush_icache_all(void)
 		r4k_blast_icache();
 }
 
-struct flush_kernel_vmap_range_args {
-	unsigned long	vaddr;
-	int		size;
-};
-
-static inline void local_r4k_flush_kernel_vmap_range(void *args)
-{
-	struct flush_kernel_vmap_range_args *vmra = args;
-	unsigned long vaddr = vmra->vaddr;
-	int size = vmra->size;
-
-	/*
-	 * Aliases only affect the primary caches so don't bother with
-	 * S-caches or T-caches.
-	 */
-	if (cpu_has_safe_index_cacheops && size >= dcache_size)
-		r4k_blast_dcache();
-	else {
-		R4600_HIT_CACHEOP_WAR_IMPL;
-		blast_dcache_range(vaddr, vaddr + size);
-	}
-}
-
-static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
-{
-	struct flush_kernel_vmap_range_args args;
-
-	args.vaddr = (unsigned long) vaddr;
-	args.size = size;
-
-	r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
-}
-
 static inline void rm7k_erratum31(void)
 {
 	const unsigned long ic_lsize = 32;
@@ -1449,8 +1416,6 @@ void __cpuinit r4k_cache_init(void)
 	flush_cache_page	= r4k_flush_cache_page;
 	flush_cache_range	= r4k_flush_cache_range;
 
-	__flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
-
 	flush_cache_sigtramp	= r4k_flush_cache_sigtramp;
 	flush_icache_all	= r4k_flush_icache_all;
 	local_flush_data_cache_page	= local_r4k_flush_data_cache_page;
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index 87d23ca..b305187 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -252,11 +252,6 @@ static void tx39_flush_icache_range(unsigned long start, unsigned long end)
 	}
 }
 
-static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size)
-{
-	BUG();
-}
-
 static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
 {
 	unsigned long end;
@@ -398,8 +393,6 @@ void __cpuinit tx39_cache_init(void)
 		flush_icache_range = tx39_flush_icache_range;
 		local_flush_icache_range = tx39_flush_icache_range;
 
-		__flush_kernel_vmap_range = tx39_flush_kernel_vmap_range;
-
 		flush_cache_sigtramp = tx39_flush_cache_sigtramp;
 		local_flush_data_cache_page = local_tx39_flush_data_cache_page;
 		flush_data_cache_page = tx39_flush_data_cache_page;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 829320c..12af739 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -35,11 +35,6 @@ void (*local_flush_icache_range)(unsigned long start, unsigned long end);
 void (*__flush_cache_vmap)(void);
 void (*__flush_cache_vunmap)(void);
 
-void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
-void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
-
-EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
-
 /* MIPS specific cache operations */
 void (*flush_cache_sigtramp)(unsigned long addr);
 void (*local_flush_data_cache_page)(void * addr);
-- 
1.7.9.6

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] Revert "MIPS: cache: Provide cache flush operations for XFS"
  2012-04-09 16:13 [PATCH] Revert "MIPS: cache: Provide cache flush operations for XFS" Steven J. Hill
@ 2012-04-10  2:44 ` Kevin Cernekee
  2012-04-10 18:03     ` Leonid Yegoshin
  0 siblings, 1 reply; 6+ messages in thread
From: Kevin Cernekee @ 2012-04-10  2:44 UTC (permalink / raw)
  To: Steven J. Hill; +Cc: linux-mips, ralf, Leonid Yegoshin

On Mon, Apr 9, 2012 at 9:13 AM, Steven J. Hill <sjhill@mips.com> wrote:
> This reverts commit d759e6bf49a41edd66de7c6480b34cceb88ee29a.

It may be clearer to say "refactors" instead of "reverts," as this
patch reimplements the same feature in a different way.

If the change was completely reverting the MIPS *_kernel_vmap_range
functions, that would make me very uneasy as XFS will not run reliably
on my systems without them.

Also, the commit hash is d9cdc901af0f92da7f90c750d8c187f5500be067 in
Linus' tree.

> The
> flush_kernel_vmap_range() and invalidate_kernel_vmap_range() are DMA
> cache flushing operations and should be done via _dma_cache_wback_inv()
> and _dma_cache_inv().

I think there is some ambiguity here.

Documentation/cachetlb.txt says:

"Since kernel I/O goes via physical pages, the I/O subsystem assumes
that the user mapping and kernel offset mapping are the only aliases.
This isn't true for vmap aliases, so anything in the kernel trying to
do I/O to vmap areas must manually manage coherency.  It must do this
by flushing the vmap range before doing I/O and invalidating it after
the I/O returns."

"The design is to make this area safe to perform I/O on."

This appears to support your statement.

However, actual usage suggests that the *_kernel_vmap_range functions
are used way up at the filesystem layer just to get the data out of
the "wrong colored" D$ lines on systems with cache aliases:

		if (xfs_buf_is_vmapped(bp)) {
			flush_kernel_vmap_range(bp->b_addr,
						xfs_buf_vmap_len(bp));
		}
		submit_bio(rw, bio);

Then eventually, something like dma_map_sg() is called from a lower
level driver (ala ata_sg_setup()), to perform the full L1+L2
cacheflush on each page involved in the transaction.

I would also note that on ARM these operations turn into NOPs on VIPT
non-aliasing CPUs, even if their caches are noncoherent:

static inline void flush_kernel_vmap_range(void *addr, int size)
{
	if ((cache_is_vivt() || cache_is_vipt_aliasing()))
	  __cpuc_flush_dcache_area(addr, (size_t)size);
}

I suspect that reimplementing the *_kernel_vmap_range functions using
_dma_cache_* would result in a double L2 flush on the same memory
regions on systems with cache aliases, and an unnecessary L1+L2 flush
on systems without aliases.

I don't see any way that the *_kernel_vmap_range functions could be
called in lieu of the dma_map_* functions, as they are missing
critical elements such as "returning the mapped PA" and calls to the
plat_* functions.  So, any memory getting flushed through
*_kernel_vmap_range will still need to use dma_map_* for DMA.

Is there a reason why Ralf's original approach was not workable?

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] Revert "MIPS: cache: Provide cache flush operations for XFS"
@ 2012-04-10 18:03     ` Leonid Yegoshin
  0 siblings, 0 replies; 6+ messages in thread
From: Leonid Yegoshin @ 2012-04-10 18:03 UTC (permalink / raw)
  To: Kevin Cernekee; +Cc: Steven J. Hill, linux-mips, ralf

On 04/09/2012 07:44 PM, Kevin Cernekee wrote:
> On Mon,Is there a reason why Ralf's original approach was not workable?

It doesn't work with HIGHMEM + cache aliasing. It also uses cache flush 
(blast_dcache) to buffer instead of cache invalidate after read I/O is 
completed.


> I suspect that reimplementing the *_kernel_vmap_range functions using
> _dma_cache_* would result in a double L2 flush on the same memory
> regions on systems with cache aliases, and an unnecessary L1+L2 flush
> on systems without aliases.

Good point. I put that to set it working. Now, after your comment, I 
think it has sense to try with L1 only.

- Leonid.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] Revert "MIPS: cache: Provide cache flush operations for XFS"
@ 2012-04-10 18:03     ` Leonid Yegoshin
  0 siblings, 0 replies; 6+ messages in thread
From: Leonid Yegoshin @ 2012-04-10 18:03 UTC (permalink / raw)
  To: Kevin Cernekee; +Cc: Steven J. Hill, linux-mips, ralf

On 04/09/2012 07:44 PM, Kevin Cernekee wrote:
> On Mon,Is there a reason why Ralf's original approach was not workable?

It doesn't work with HIGHMEM + cache aliasing. It also uses cache flush 
(blast_dcache) to buffer instead of cache invalidate after read I/O is 
completed.


> I suspect that reimplementing the *_kernel_vmap_range functions using
> _dma_cache_* would result in a double L2 flush on the same memory
> regions on systems with cache aliases, and an unnecessary L1+L2 flush
> on systems without aliases.

Good point. I put that to set it working. Now, after your comment, I 
think it has sense to try with L1 only.

- Leonid.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE: [PATCH] Revert "MIPS: cache: Provide cache flush operations for XFS"
  2012-04-10 18:03     ` Leonid Yegoshin
  (?)
@ 2012-05-07 19:21     ` Hill, Steven
  2012-05-07 19:28       ` Leonid Yegoshin
  -1 siblings, 1 reply; 6+ messages in thread
From: Hill, Steven @ 2012-05-07 19:21 UTC (permalink / raw)
  To: Yegoshin, Leonid, Kevin Cernekee; +Cc: linux-mips, ralf

Leonid,

I am dropping this patch for the 3.5 release too.

-Steve

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] Revert "MIPS: cache: Provide cache flush operations for XFS"
  2012-05-07 19:21     ` Hill, Steven
@ 2012-05-07 19:28       ` Leonid Yegoshin
  0 siblings, 0 replies; 6+ messages in thread
From: Leonid Yegoshin @ 2012-05-07 19:28 UTC (permalink / raw)
  To: Hill, Steven; +Cc: Kevin Cernekee, linux-mips, ralf

Steven,

On 05/07/2012 12:21 PM, Hill, Steven wrote:
> Leonid,
>
> I am dropping this patch for the 3.5 release too.
>
> -Steve

I don't care - we still have it in internal repo, right?

Sometime this/next month I will create a second patch for L1 only 
flushing optimization in this case and we can repeat it in future 
release. Unfortunately, right these weeks I have no time for that 
optimization, sorry for that.

Thank you,

- Leonid.

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2012-05-07 19:28 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-04-09 16:13 [PATCH] Revert "MIPS: cache: Provide cache flush operations for XFS" Steven J. Hill
2012-04-10  2:44 ` Kevin Cernekee
2012-04-10 18:03   ` Leonid Yegoshin
2012-04-10 18:03     ` Leonid Yegoshin
2012-05-07 19:21     ` Hill, Steven
2012-05-07 19:28       ` Leonid Yegoshin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.