All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] MIPS: Flush cache after DMA_FROM_DEVICE for agressively speculative CPUs
@ 2015-05-14  1:49 ` Leonid Yegoshin
  0 siblings, 0 replies; 4+ messages in thread
From: Leonid Yegoshin @ 2015-05-14  1:49 UTC (permalink / raw)
  To: mina86, linux-mips, Zubair.Kakakhel, ralf, linux-kernel

Some MIPS CPUs have an aggressive speculative load and may erroneuosly load
some cache line in the middle of DMA transaction. CPU discards result but cache
doesn't. If DMA happens from device then additional cache invalidation is needed
on that CPU's after DMA.

Found in test.

Signed-off-by: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
---
 arch/mips/mm/dma-default.c |   10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 609d1241b0c4..ccf49ecfbf8c 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -67,11 +67,13 @@ static inline struct page *dma_addr_to_page(struct device *dev,
  * systems and only the R10000 and R12000 are used in such systems, the
  * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
  */
-static inline int cpu_needs_post_dma_flush(struct device *dev)
+static inline int cpu_needs_post_dma_flush(struct device *dev,
+					   enum dma_data_direction direction)
 {
 	return !plat_device_is_coherent(dev) &&
 	       (boot_cpu_type() == CPU_R10000 ||
 		boot_cpu_type() == CPU_R12000 ||
+		(cpu_has_maar && (direction != DMA_TO_DEVICE)) ||
 		boot_cpu_type() == CPU_BMIPS5000);
 }
 
@@ -255,7 +257,7 @@ static inline void __dma_sync(struct page *page,
 static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
 	size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
 {
-	if (cpu_needs_post_dma_flush(dev))
+	if (cpu_needs_post_dma_flush(dev, direction))
 		__dma_sync(dma_addr_to_page(dev, dma_addr),
 			   dma_addr & ~PAGE_MASK, size, direction);
 	plat_post_dma_flush(dev);
@@ -309,7 +311,7 @@ static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
 static void mips_dma_sync_single_for_cpu(struct device *dev,
 	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
 {
-	if (cpu_needs_post_dma_flush(dev))
+	if (cpu_needs_post_dma_flush(dev, direction))
 		__dma_sync(dma_addr_to_page(dev, dma_handle),
 			   dma_handle & ~PAGE_MASK, size, direction);
 	plat_post_dma_flush(dev);
@@ -328,7 +330,7 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
 {
 	int i;
 
-	if (cpu_needs_post_dma_flush(dev))
+	if (cpu_needs_post_dma_flush(dev, direction))
 		for (i = 0; i < nelems; i++, sg++)
 			__dma_sync(sg_page(sg), sg->offset, sg->length,
 				   direction);


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH] MIPS: Flush cache after DMA_FROM_DEVICE for agressively speculative CPUs
@ 2015-05-14  1:49 ` Leonid Yegoshin
  0 siblings, 0 replies; 4+ messages in thread
From: Leonid Yegoshin @ 2015-05-14  1:49 UTC (permalink / raw)
  To: mina86, linux-mips, Zubair.Kakakhel, ralf, linux-kernel

Some MIPS CPUs have an aggressive speculative load and may erroneuosly load
some cache line in the middle of DMA transaction. CPU discards result but cache
doesn't. If DMA happens from device then additional cache invalidation is needed
on that CPU's after DMA.

Found in test.

Signed-off-by: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
---
 arch/mips/mm/dma-default.c |   10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 609d1241b0c4..ccf49ecfbf8c 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -67,11 +67,13 @@ static inline struct page *dma_addr_to_page(struct device *dev,
  * systems and only the R10000 and R12000 are used in such systems, the
  * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
  */
-static inline int cpu_needs_post_dma_flush(struct device *dev)
+static inline int cpu_needs_post_dma_flush(struct device *dev,
+					   enum dma_data_direction direction)
 {
 	return !plat_device_is_coherent(dev) &&
 	       (boot_cpu_type() == CPU_R10000 ||
 		boot_cpu_type() == CPU_R12000 ||
+		(cpu_has_maar && (direction != DMA_TO_DEVICE)) ||
 		boot_cpu_type() == CPU_BMIPS5000);
 }
 
@@ -255,7 +257,7 @@ static inline void __dma_sync(struct page *page,
 static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
 	size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
 {
-	if (cpu_needs_post_dma_flush(dev))
+	if (cpu_needs_post_dma_flush(dev, direction))
 		__dma_sync(dma_addr_to_page(dev, dma_addr),
 			   dma_addr & ~PAGE_MASK, size, direction);
 	plat_post_dma_flush(dev);
@@ -309,7 +311,7 @@ static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
 static void mips_dma_sync_single_for_cpu(struct device *dev,
 	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
 {
-	if (cpu_needs_post_dma_flush(dev))
+	if (cpu_needs_post_dma_flush(dev, direction))
 		__dma_sync(dma_addr_to_page(dev, dma_handle),
 			   dma_handle & ~PAGE_MASK, size, direction);
 	plat_post_dma_flush(dev);
@@ -328,7 +330,7 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
 {
 	int i;
 
-	if (cpu_needs_post_dma_flush(dev))
+	if (cpu_needs_post_dma_flush(dev, direction))
 		for (i = 0; i < nelems; i++, sg++)
 			__dma_sync(sg_page(sg), sg->offset, sg->length,
 				   direction);

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] MIPS: Flush cache after DMA_FROM_DEVICE for agressively speculative CPUs
  2015-05-14  1:49 ` Leonid Yegoshin
  (?)
@ 2015-05-14  2:49 ` Kevin Cernekee
  2015-05-14 22:42   ` Florian Fainelli
  -1 siblings, 1 reply; 4+ messages in thread
From: Kevin Cernekee @ 2015-05-14  2:49 UTC (permalink / raw)
  To: Leonid Yegoshin
  Cc: mina86, Linux MIPS Mailing List, Zubair.Kakakhel, Ralf Baechle,
	linux-kernel, Florian Fainelli

On Wed, May 13, 2015 at 6:49 PM, Leonid Yegoshin
<Leonid.Yegoshin@imgtec.com> wrote:
> Some MIPS CPUs have an aggressive speculative load and may erroneuosly load
> some cache line in the middle of DMA transaction. CPU discards result but cache
> doesn't. If DMA happens from device then additional cache invalidation is needed
> on that CPU's after DMA.
>
> Found in test.
>
> Signed-off-by: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
> ---
>  arch/mips/mm/dma-default.c |   10 ++++++----
>  1 file changed, 6 insertions(+), 4 deletions(-)
>
> diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
> index 609d1241b0c4..ccf49ecfbf8c 100644
> --- a/arch/mips/mm/dma-default.c
> +++ b/arch/mips/mm/dma-default.c
> @@ -67,11 +67,13 @@ static inline struct page *dma_addr_to_page(struct device *dev,
>   * systems and only the R10000 and R12000 are used in such systems, the
>   * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
>   */
> -static inline int cpu_needs_post_dma_flush(struct device *dev)
> +static inline int cpu_needs_post_dma_flush(struct device *dev,
> +                                          enum dma_data_direction direction)
>  {
>         return !plat_device_is_coherent(dev) &&
>                (boot_cpu_type() == CPU_R10000 ||
>                 boot_cpu_type() == CPU_R12000 ||
> +               (cpu_has_maar && (direction != DMA_TO_DEVICE)) ||
>                 boot_cpu_type() == CPU_BMIPS5000);

Can all of these CPUs safely skip the post_dma_flush on DMA_TO_DEVICE
(not just maar)?

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] MIPS: Flush cache after DMA_FROM_DEVICE for agressively speculative CPUs
  2015-05-14  2:49 ` Kevin Cernekee
@ 2015-05-14 22:42   ` Florian Fainelli
  0 siblings, 0 replies; 4+ messages in thread
From: Florian Fainelli @ 2015-05-14 22:42 UTC (permalink / raw)
  To: Kevin Cernekee, Leonid Yegoshin
  Cc: mina86, Linux MIPS Mailing List, Zubair.Kakakhel, Ralf Baechle,
	linux-kernel

On 13/05/15 19:49, Kevin Cernekee wrote:
> On Wed, May 13, 2015 at 6:49 PM, Leonid Yegoshin
> <Leonid.Yegoshin@imgtec.com> wrote:
>> Some MIPS CPUs have an aggressive speculative load and may erroneuosly load
>> some cache line in the middle of DMA transaction. CPU discards result but cache
>> doesn't. If DMA happens from device then additional cache invalidation is needed
>> on that CPU's after DMA.
>>
>> Found in test.
>>
>> Signed-off-by: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
>> ---
>>  arch/mips/mm/dma-default.c |   10 ++++++----
>>  1 file changed, 6 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
>> index 609d1241b0c4..ccf49ecfbf8c 100644
>> --- a/arch/mips/mm/dma-default.c
>> +++ b/arch/mips/mm/dma-default.c
>> @@ -67,11 +67,13 @@ static inline struct page *dma_addr_to_page(struct device *dev,
>>   * systems and only the R10000 and R12000 are used in such systems, the
>>   * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
>>   */
>> -static inline int cpu_needs_post_dma_flush(struct device *dev)
>> +static inline int cpu_needs_post_dma_flush(struct device *dev,
>> +                                          enum dma_data_direction direction)
>>  {
>>         return !plat_device_is_coherent(dev) &&
>>                (boot_cpu_type() == CPU_R10000 ||
>>                 boot_cpu_type() == CPU_R12000 ||
>> +               (cpu_has_maar && (direction != DMA_TO_DEVICE)) ||
>>                 boot_cpu_type() == CPU_BMIPS5000);
> 
> Can all of these CPUs safely skip the post_dma_flush on DMA_TO_DEVICE
> (not just maar)?

Agreed that would seem like the logical thing to do. You could then just
skip the call to cpu_needs_post_dma_flush() and move the direction test
in arch/mips/mm/dma-default.c for instance?
-- 
Florian

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2015-05-14 22:43 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-05-14  1:49 [PATCH] MIPS: Flush cache after DMA_FROM_DEVICE for agressively speculative CPUs Leonid Yegoshin
2015-05-14  1:49 ` Leonid Yegoshin
2015-05-14  2:49 ` Kevin Cernekee
2015-05-14 22:42   ` Florian Fainelli

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.