linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v5 2/2] OMAP:IOMMU:flush L1 and L2 caches
@ 2012-09-12 15:19 Gupta, Ramesh
  2012-09-12 16:41 ` Russell King - ARM Linux
  0 siblings, 1 reply; 4+ messages in thread
From: Gupta, Ramesh @ 2012-09-12 15:19 UTC (permalink / raw)
  To: linux-arm-kernel, linux-kernel, linux-omap; +Cc: Russell King - ARM Linux, tony

>From d78ddb5b0dffed3fd77e6e010735e869ea41b02f Mon Sep 17 00:00:00 2001
From: Ramesh Gupta G <grgupta@ti.com>
Date: Wed, 12 Sep 2012 19:05:29 +0530
Subject: [PATCH v5 2/2] OMAP:IOMMU:flush L1 and L2 caches

OMAP IOMMU need to make sure that data in the L1 and L2 caches
is visible to the MMU hardware whenever the pagetables are
updated. The current code only takes care of L1 cache using
assembly. Added code to handle this using a new L1 cache
maintenance function and the outer cache function.
Thanks to the RMK's suggestions.

Signed-off-by: Ramesh Gupta G <grgupta@ti.com>
---
 drivers/iommu/omap-iommu.c |   41 +++++++++++++++++++----------------------
 1 files changed, 19 insertions(+), 22 deletions(-)

diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index d0b1234..8f61ef9 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -469,24 +469,21 @@ EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
 /*
  *	H/W pagetable operations
  */
-static void flush_iopgd_range(u32 *first, u32 *last)
+static void flush_iopgd_range(u32 *first, size_t size)
 {
-	/* FIXME: L2 cache should be taken care of if it exists */
-	do {
-		asm("mcr	p15, 0, %0, c7, c10, 1 @ flush_pgd"
-		    : : "r" (first));
-		first += L1_CACHE_BYTES / sizeof(*first);
-	} while (first <= last);
+	phys_addr_t phys = virt_to_phys(first);
+
+	iommu_flush_area(first, size);
+	outer_flush_range(phys, phys + size);
 }

-static void flush_iopte_range(u32 *first, u32 *last)
+static void flush_iopte_range(u32 *first, size_t size)
+
 {
-	/* FIXME: L2 cache should be taken care of if it exists */
-	do {
-		asm("mcr	p15, 0, %0, c7, c10, 1 @ flush_pte"
-		    : : "r" (first));
-		first += L1_CACHE_BYTES / sizeof(*first);
-	} while (first <= last);
+	phys_addr_t phys = virt_to_phys(first);
+
+	iommu_flush_area(first, size);
+	outer_flush_range(phys, phys + size);
 }

 static void iopte_free(u32 *iopte)
@@ -515,7 +512,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj,
u32 *iopgd, u32 da)
 			return ERR_PTR(-ENOMEM);

 		*iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
-		flush_iopgd_range(iopgd, iopgd);
+		flush_iopgd_range(iopgd, sizeof(*iopgd));

 		dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
 	} else {
@@ -544,7 +541,7 @@ static int iopgd_alloc_section(struct omap_iommu
*obj, u32 da, u32 pa, u32 prot)
 	}

 	*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
-	flush_iopgd_range(iopgd, iopgd);
+	flush_iopgd_range(iopgd, sizeof(*iopgd));
 	return 0;
 }

@@ -561,7 +558,7 @@ static int iopgd_alloc_super(struct omap_iommu
*obj, u32 da, u32 pa, u32 prot)

 	for (i = 0; i < 16; i++)
 		*(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
-	flush_iopgd_range(iopgd, iopgd + 15);
+	flush_iopgd_range(iopgd, sizeof(*iopgd) * 16);
 	return 0;
 }

@@ -574,7 +571,7 @@ static int iopte_alloc_page(struct omap_iommu
*obj, u32 da, u32 pa, u32 prot)
 		return PTR_ERR(iopte);

 	*iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
-	flush_iopte_range(iopte, iopte);
+	flush_iopte_range(iopte, sizeof(*iopte));

 	dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
 		 __func__, da, pa, iopte, *iopte);
@@ -599,7 +596,7 @@ static int iopte_alloc_large(struct omap_iommu
*obj, u32 da, u32 pa, u32 prot)

 	for (i = 0; i < 16; i++)
 		*(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
-	flush_iopte_range(iopte, iopte + 15);
+	flush_iopte_range(iopte, sizeof(*iopte) * 16);
 	return 0;
 }

@@ -702,7 +699,7 @@ static size_t iopgtable_clear_entry_core(struct
omap_iommu *obj, u32 da)
 		}
 		bytes *= nent;
 		memset(iopte, 0, nent * sizeof(*iopte));
-		flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
+		flush_iopte_range(iopte, iopte + (nent) * sizeof(*iopte));

 		/*
 		 * do table walk to check if this table is necessary or not
@@ -724,7 +721,7 @@ static size_t iopgtable_clear_entry_core(struct
omap_iommu *obj, u32 da)
 		bytes *= nent;
 	}
 	memset(iopgd, 0, nent * sizeof(*iopgd));
-	flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
+	flush_iopgd_range(iopgd, iopgd + (nent) * sizeof(*iopgd));
 out:
 	return bytes;
 }
@@ -768,7 +765,7 @@ static void iopgtable_clear_entry_all(struct
omap_iommu *obj)
 			iopte_free(iopte_offset(iopgd, 0));

 		*iopgd = 0;
-		flush_iopgd_range(iopgd, iopgd);
+		flush_iopgd_range(iopgd, sizeof(*iopgd));
 	}

 	flush_iotlb_all(obj);
-- 
1.7.0.4


-- 
regards
Ramesh Gupta G

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v5 2/2] OMAP:IOMMU:flush L1 and L2 caches
  2012-09-12 15:19 [PATCH v5 2/2] OMAP:IOMMU:flush L1 and L2 caches Gupta, Ramesh
@ 2012-09-12 16:41 ` Russell King - ARM Linux
  2012-09-13  4:34   ` Gupta, Ramesh
  0 siblings, 1 reply; 4+ messages in thread
From: Russell King - ARM Linux @ 2012-09-12 16:41 UTC (permalink / raw)
  To: Gupta, Ramesh; +Cc: linux-arm-kernel, linux-kernel, linux-omap, tony

On Wed, Sep 12, 2012 at 08:49:16PM +0530, Gupta, Ramesh wrote:
> Thanks to the RMK's suggestions.

I should've made clear the distinction between _range and _area.
A _range function takes start and end.  An _area function takes a start
and size.  So...

> -static void flush_iopgd_range(u32 *first, u32 *last)
> +static void flush_iopgd_range(u32 *first, size_t size)

This should change to flush_iopgd_area().

>  {
> -	/* FIXME: L2 cache should be taken care of if it exists */
> -	do {
> -		asm("mcr	p15, 0, %0, c7, c10, 1 @ flush_pgd"
> -		    : : "r" (first));
> -		first += L1_CACHE_BYTES / sizeof(*first);
> -	} while (first <= last);
> +	phys_addr_t phys = virt_to_phys(first);
> +
> +	iommu_flush_area(first, size);
> +	outer_flush_range(phys, phys + size);
>  }
> 
> -static void flush_iopte_range(u32 *first, u32 *last)
> +static void flush_iopte_range(u32 *first, size_t size)
> +

flush_iopte_area().  (And there shouldn't be a blank line between this
and the open curley.)

Otherwise, looks fine.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v5 2/2] OMAP:IOMMU:flush L1 and L2 caches
  2012-09-12 16:41 ` Russell King - ARM Linux
@ 2012-09-13  4:34   ` Gupta, Ramesh
  2012-09-13  7:12     ` [PATCH v6 " Gupta, Ramesh
  0 siblings, 1 reply; 4+ messages in thread
From: Gupta, Ramesh @ 2012-09-13  4:34 UTC (permalink / raw)
  To: Russell King - ARM Linux; +Cc: linux-arm-kernel, linux-kernel, linux-omap, tony

Hi Russell,


On Wed, Sep 12, 2012 at 10:11 PM, Russell King - ARM Linux
<linux@arm.linux.org.uk> wrote:
> On Wed, Sep 12, 2012 at 08:49:16PM +0530, Gupta, Ramesh wrote:
>> Thanks to the RMK's suggestions.
>
> I should've made clear the distinction between _range and _area.
> A _range function takes start and end.  An _area function takes a start
> and size.  So...
>
>> -static void flush_iopgd_range(u32 *first, u32 *last)
>> +static void flush_iopgd_range(u32 *first, size_t size)
>
> This should change to flush_iopgd_area().

Looks like I missed this, I will fix and send the updated patch.

>
>>  {
>> -     /* FIXME: L2 cache should be taken care of if it exists */
>> -     do {
>> -             asm("mcr        p15, 0, %0, c7, c10, 1 @ flush_pgd"
>> -                 : : "r" (first));
>> -             first += L1_CACHE_BYTES / sizeof(*first);
>> -     } while (first <= last);
>> +     phys_addr_t phys = virt_to_phys(first);
>> +
>> +     iommu_flush_area(first, size);
>> +     outer_flush_range(phys, phys + size);
>>  }
>>
>> -static void flush_iopte_range(u32 *first, u32 *last)
>> +static void flush_iopte_range(u32 *first, size_t size)
>> +
>
> flush_iopte_area().  (And there shouldn't be a blank line between this
> and the open curley.)

I will fix it.

> Otherwise, looks fine.

thank you.


Best regards
Ramesh Gupta G

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH v6 2/2] OMAP:IOMMU:flush L1 and L2 caches
  2012-09-13  4:34   ` Gupta, Ramesh
@ 2012-09-13  7:12     ` Gupta, Ramesh
  0 siblings, 0 replies; 4+ messages in thread
From: Gupta, Ramesh @ 2012-09-13  7:12 UTC (permalink / raw)
  To: linux-arm-kernel, linux-kernel, linux-omap; +Cc: Russell King - ARM Linux, tony

>From a00cbfadc0053a3c21812593997a1b7338234a9f Mon Sep 17 00:00:00 2001
From: Ramesh Gupta G <grgupta@ti.com>
Date: Wed, 12 Sep 2012 19:05:29 +0530
Subject: [PATCH v6 2/2] OMAP:IOMMU:flush L1 and L2 caches

OMAP IOMMU need to make sure that data in the L1 and L2 caches
is visible to the MMU hardware whenever the page tables are
updated. The current code only takes care of L1 cache using
assembly. Added code to handle this using a new L1 cache
maintenance function and the outer cache function.
Thanks to the RMK's suggestions.

Signed-off-by: Ramesh Gupta G <grgupta@ti.com>
---
 drivers/iommu/omap-iommu.c |   40 ++++++++++++++++++----------------------
 1 files changed, 18 insertions(+), 22 deletions(-)

diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index d0b1234..d399493 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -469,24 +469,20 @@ EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
 /*
  *	H/W pagetable operations
  */
-static void flush_iopgd_range(u32 *first, u32 *last)
+static void flush_iopgd_area(u32 *first, size_t size)
 {
-	/* FIXME: L2 cache should be taken care of if it exists */
-	do {
-		asm("mcr	p15, 0, %0, c7, c10, 1 @ flush_pgd"
-		    : : "r" (first));
-		first += L1_CACHE_BYTES / sizeof(*first);
-	} while (first <= last);
+	phys_addr_t phys = virt_to_phys(first);
+
+	iommu_flush_area(first, size);
+	outer_flush_range(phys, phys + size);
 }

-static void flush_iopte_range(u32 *first, u32 *last)
+static void flush_iopte_area(u32 *first, size_t size)
 {
-	/* FIXME: L2 cache should be taken care of if it exists */
-	do {
-		asm("mcr	p15, 0, %0, c7, c10, 1 @ flush_pte"
-		    : : "r" (first));
-		first += L1_CACHE_BYTES / sizeof(*first);
-	} while (first <= last);
+	phys_addr_t phys = virt_to_phys(first);
+
+	iommu_flush_area(first, size);
+	outer_flush_range(phys, phys + size);
 }

 static void iopte_free(u32 *iopte)
@@ -515,7 +511,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj,
u32 *iopgd, u32 da)
 			return ERR_PTR(-ENOMEM);

 		*iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
-		flush_iopgd_range(iopgd, iopgd);
+		flush_iopgd_area(iopgd, sizeof(*iopgd));

 		dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
 	} else {
@@ -544,7 +540,7 @@ static int iopgd_alloc_section(struct omap_iommu
*obj, u32 da, u32 pa, u32 prot)
 	}

 	*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
-	flush_iopgd_range(iopgd, iopgd);
+	flush_iopgd_area(iopgd, sizeof(*iopgd));
 	return 0;
 }

@@ -561,7 +557,7 @@ static int iopgd_alloc_super(struct omap_iommu
*obj, u32 da, u32 pa, u32 prot)

 	for (i = 0; i < 16; i++)
 		*(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
-	flush_iopgd_range(iopgd, iopgd + 15);
+	flush_iopgd_area(iopgd, sizeof(*iopgd) * 16);
 	return 0;
 }

@@ -574,7 +570,7 @@ static int iopte_alloc_page(struct omap_iommu
*obj, u32 da, u32 pa, u32 prot)
 		return PTR_ERR(iopte);

 	*iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
-	flush_iopte_range(iopte, iopte);
+	flush_iopte_area(iopte, sizeof(*iopte));

 	dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
 		 __func__, da, pa, iopte, *iopte);
@@ -599,7 +595,7 @@ static int iopte_alloc_large(struct omap_iommu
*obj, u32 da, u32 pa, u32 prot)

 	for (i = 0; i < 16; i++)
 		*(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
-	flush_iopte_range(iopte, iopte + 15);
+	flush_iopte_area(iopte, sizeof(*iopte) * 16);
 	return 0;
 }

@@ -702,7 +698,7 @@ static size_t iopgtable_clear_entry_core(struct
omap_iommu *obj, u32 da)
 		}
 		bytes *= nent;
 		memset(iopte, 0, nent * sizeof(*iopte));
-		flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
+		flush_iopte_area(iopte, (nent) * sizeof(*iopte));

 		/*
 		 * do table walk to check if this table is necessary or not
@@ -724,7 +720,7 @@ static size_t iopgtable_clear_entry_core(struct
omap_iommu *obj, u32 da)
 		bytes *= nent;
 	}
 	memset(iopgd, 0, nent * sizeof(*iopgd));
-	flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
+	flush_iopgd_area(iopgd, (nent) * sizeof(*iopgd));
 out:
 	return bytes;
 }
@@ -768,7 +764,7 @@ static void iopgtable_clear_entry_all(struct
omap_iommu *obj)
 			iopte_free(iopte_offset(iopgd, 0));

 		*iopgd = 0;
-		flush_iopgd_range(iopgd, iopgd);
+		flush_iopgd_area(iopgd, sizeof(*iopgd));
 	}

 	flush_iotlb_all(obj);
-- 
1.7.0.4


-- 
regards
Ramesh Gupta G

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2012-09-13  7:12 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-09-12 15:19 [PATCH v5 2/2] OMAP:IOMMU:flush L1 and L2 caches Gupta, Ramesh
2012-09-12 16:41 ` Russell King - ARM Linux
2012-09-13  4:34   ` Gupta, Ramesh
2012-09-13  7:12     ` [PATCH v6 " Gupta, Ramesh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).