All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] hw/arm/smmuv3: Another range invalidation fix
@ 2021-04-21 17:29 Eric Auger
  2021-05-10 11:31 ` Peter Maydell
  0 siblings, 1 reply; 3+ messages in thread
From: Eric Auger @ 2021-04-21 17:29 UTC (permalink / raw)
  To: eric.auger.pro, eric.auger, qemu-devel, qemu-arm, peter.maydell
  Cc: jiangkunkun, peterx, shameerali.kolothum.thodi

6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range")
failed to completely fix misalignment issues with range
invalidation. For instance invalidations patterns like "invalidate 32
4kB pages starting from 0xff395000 are not correctly handled" due
to the fact the previous fix only made sure the number of invalidated
pages were a power of 2 but did not properly handle the start
address was not aligned with the range. This can be noticed when
boothing a fedora 33 with protected virtio-blk-pci.

Signed-off-by: Eric Auger <eric.auger@redhat.com>
Fixes: 6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range")

---

This bug was found with SMMU RIL avocado-qemu acceptance tests
---
 hw/arm/smmuv3.c | 49 +++++++++++++++++++++++++------------------------
 1 file changed, 25 insertions(+), 24 deletions(-)

diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 8705612535..16f285a566 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -856,43 +856,44 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova,
 
 static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
 {
-    uint8_t scale = 0, num = 0, ttl = 0;
-    dma_addr_t addr = CMD_ADDR(cmd);
+    dma_addr_t end, addr = CMD_ADDR(cmd);
     uint8_t type = CMD_TYPE(cmd);
     uint16_t vmid = CMD_VMID(cmd);
+    uint8_t scale = CMD_SCALE(cmd);
+    uint8_t num = CMD_NUM(cmd);
+    uint8_t ttl = CMD_TTL(cmd);
     bool leaf = CMD_LEAF(cmd);
     uint8_t tg = CMD_TG(cmd);
-    uint64_t first_page = 0, last_page;
-    uint64_t num_pages = 1;
+    uint64_t num_pages;
+    uint8_t granule;
     int asid = -1;
 
-    if (tg) {
-        scale = CMD_SCALE(cmd);
-        num = CMD_NUM(cmd);
-        ttl = CMD_TTL(cmd);
-        num_pages = (num + 1) * BIT_ULL(scale);
-    }
-
     if (type == SMMU_CMD_TLBI_NH_VA) {
         asid = CMD_ASID(cmd);
     }
 
+    if (!tg) {
+        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
+        smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1);
+        smmu_iotlb_inv_iova(s, asid, addr, tg, 1, ttl);
+    }
+
+    /* RIL in use */
+
+    num_pages = (num + 1) * BIT_ULL(scale);
+    granule = tg * 2 + 10;
+
     /* Split invalidations into ^2 range invalidations */
-    last_page = num_pages - 1;
-    while (num_pages) {
-        uint8_t granule = tg * 2 + 10;
-        uint64_t mask, count;
+    end = addr + (num_pages << granule) - 1;
 
-        mask = dma_aligned_pow2_mask(first_page, last_page, 64 - granule);
-        count = mask + 1;
+    while (addr != end + 1) {
+        uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
 
-        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, count, ttl, leaf);
-        smmuv3_inv_notifiers_iova(s, asid, addr, tg, count);
-        smmu_iotlb_inv_iova(s, asid, addr, tg, count, ttl);
-
-        num_pages -= count;
-        first_page += count;
-        addr += count * BIT_ULL(granule);
+        num_pages = (mask + 1) >> granule;
+        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
+        smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
+        smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
+        addr += mask + 1;
     }
 }
 
-- 
2.26.3



^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] hw/arm/smmuv3: Another range invalidation fix
  2021-04-21 17:29 [PATCH] hw/arm/smmuv3: Another range invalidation fix Eric Auger
@ 2021-05-10 11:31 ` Peter Maydell
  2021-05-10 11:44   ` Auger Eric
  0 siblings, 1 reply; 3+ messages in thread
From: Peter Maydell @ 2021-05-10 11:31 UTC (permalink / raw)
  To: Eric Auger
  Cc: Kunkun Jiang, QEMU Developers, Peter Xu, qemu-arm,
	Shameerali Kolothum Thodi, Eric Auger

On Wed, 21 Apr 2021 at 18:29, Eric Auger <eric.auger@redhat.com> wrote:
>
> 6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range")
> failed to completely fix misalignment issues with range
> invalidation. For instance invalidations patterns like "invalidate 32
> 4kB pages starting from 0xff395000 are not correctly handled" due
> to the fact the previous fix only made sure the number of invalidated
> pages were a power of 2 but did not properly handle the start
> address was not aligned with the range. This can be noticed when
> boothing a fedora 33 with protected virtio-blk-pci.
>
> Signed-off-by: Eric Auger <eric.auger@redhat.com>
> Fixes: 6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range")
>
> ---
>
> This bug was found with SMMU RIL avocado-qemu acceptance tests
> ---
>  hw/arm/smmuv3.c | 49 +++++++++++++++++++++++++------------------------
>  1 file changed, 25 insertions(+), 24 deletions(-)
>
> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
> index 8705612535..16f285a566 100644
> --- a/hw/arm/smmuv3.c
> +++ b/hw/arm/smmuv3.c
> @@ -856,43 +856,44 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova,
>
>  static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
>  {
> -    uint8_t scale = 0, num = 0, ttl = 0;
> -    dma_addr_t addr = CMD_ADDR(cmd);
> +    dma_addr_t end, addr = CMD_ADDR(cmd);
>      uint8_t type = CMD_TYPE(cmd);
>      uint16_t vmid = CMD_VMID(cmd);
> +    uint8_t scale = CMD_SCALE(cmd);
> +    uint8_t num = CMD_NUM(cmd);
> +    uint8_t ttl = CMD_TTL(cmd);
>      bool leaf = CMD_LEAF(cmd);
>      uint8_t tg = CMD_TG(cmd);
> -    uint64_t first_page = 0, last_page;
> -    uint64_t num_pages = 1;
> +    uint64_t num_pages;
> +    uint8_t granule;
>      int asid = -1;
>
> -    if (tg) {
> -        scale = CMD_SCALE(cmd);
> -        num = CMD_NUM(cmd);
> -        ttl = CMD_TTL(cmd);
> -        num_pages = (num + 1) * BIT_ULL(scale);
> -    }
> -
>      if (type == SMMU_CMD_TLBI_NH_VA) {
>          asid = CMD_ASID(cmd);
>      }
>
> +    if (!tg) {
> +        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
> +        smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1);
> +        smmu_iotlb_inv_iova(s, asid, addr, tg, 1, ttl);
> +    }

Is this intended to fall through ?

> +
> +    /* RIL in use */
> +
> +    num_pages = (num + 1) * BIT_ULL(scale);
> +    granule = tg * 2 + 10;
> +
>      /* Split invalidations into ^2 range invalidations */
> -    last_page = num_pages - 1;
> -    while (num_pages) {
> -        uint8_t granule = tg * 2 + 10;
> -        uint64_t mask, count;
> +    end = addr + (num_pages << granule) - 1;
>
> -        mask = dma_aligned_pow2_mask(first_page, last_page, 64 - granule);
> -        count = mask + 1;
> +    while (addr != end + 1) {
> +        uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
>
> -        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, count, ttl, leaf);
> -        smmuv3_inv_notifiers_iova(s, asid, addr, tg, count);
> -        smmu_iotlb_inv_iova(s, asid, addr, tg, count, ttl);
> -
> -        num_pages -= count;
> -        first_page += count;
> -        addr += count * BIT_ULL(granule);
> +        num_pages = (mask + 1) >> granule;
> +        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
> +        smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
> +        smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
> +        addr += mask + 1;
>      }
>  }

thanks
-- PMM


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] hw/arm/smmuv3: Another range invalidation fix
  2021-05-10 11:31 ` Peter Maydell
@ 2021-05-10 11:44   ` Auger Eric
  0 siblings, 0 replies; 3+ messages in thread
From: Auger Eric @ 2021-05-10 11:44 UTC (permalink / raw)
  To: Peter Maydell
  Cc: Kunkun Jiang, QEMU Developers, Peter Xu, qemu-arm,
	Shameerali Kolothum Thodi, Eric Auger

Hi Peter,

On 5/10/21 1:31 PM, Peter Maydell wrote:
> On Wed, 21 Apr 2021 at 18:29, Eric Auger <eric.auger@redhat.com> wrote:
>>
>> 6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range")
>> failed to completely fix misalignment issues with range
>> invalidation. For instance invalidations patterns like "invalidate 32
>> 4kB pages starting from 0xff395000 are not correctly handled" due
>> to the fact the previous fix only made sure the number of invalidated
>> pages were a power of 2 but did not properly handle the start
>> address was not aligned with the range. This can be noticed when
>> boothing a fedora 33 with protected virtio-blk-pci.
>>
>> Signed-off-by: Eric Auger <eric.auger@redhat.com>
>> Fixes: 6d9cd115b9 ("hw/arm/smmuv3: Enforce invalidation on a power of two range")
>>
>> ---
>>
>> This bug was found with SMMU RIL avocado-qemu acceptance tests
>> ---
>>  hw/arm/smmuv3.c | 49 +++++++++++++++++++++++++------------------------
>>  1 file changed, 25 insertions(+), 24 deletions(-)
>>
>> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
>> index 8705612535..16f285a566 100644
>> --- a/hw/arm/smmuv3.c
>> +++ b/hw/arm/smmuv3.c
>> @@ -856,43 +856,44 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova,
>>
>>  static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
>>  {
>> -    uint8_t scale = 0, num = 0, ttl = 0;
>> -    dma_addr_t addr = CMD_ADDR(cmd);
>> +    dma_addr_t end, addr = CMD_ADDR(cmd);
>>      uint8_t type = CMD_TYPE(cmd);
>>      uint16_t vmid = CMD_VMID(cmd);
>> +    uint8_t scale = CMD_SCALE(cmd);
>> +    uint8_t num = CMD_NUM(cmd);
>> +    uint8_t ttl = CMD_TTL(cmd);
>>      bool leaf = CMD_LEAF(cmd);
>>      uint8_t tg = CMD_TG(cmd);
>> -    uint64_t first_page = 0, last_page;
>> -    uint64_t num_pages = 1;
>> +    uint64_t num_pages;
>> +    uint8_t granule;
>>      int asid = -1;
>>
>> -    if (tg) {
>> -        scale = CMD_SCALE(cmd);
>> -        num = CMD_NUM(cmd);
>> -        ttl = CMD_TTL(cmd);
>> -        num_pages = (num + 1) * BIT_ULL(scale);
>> -    }
>> -
>>      if (type == SMMU_CMD_TLBI_NH_VA) {
>>          asid = CMD_ASID(cmd);
>>      }
>>
>> +    if (!tg) {
>> +        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
>> +        smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1);
>> +        smmu_iotlb_inv_iova(s, asid, addr, tg, 1, ttl);
>> +    }
> 
> Is this intended to fall through ?
hum no it isn't. I will fix that.

Thanks

Eric
> 
>> +
>> +    /* RIL in use */
>> +
>> +    num_pages = (num + 1) * BIT_ULL(scale);
>> +    granule = tg * 2 + 10;
>> +
>>      /* Split invalidations into ^2 range invalidations */
>> -    last_page = num_pages - 1;
>> -    while (num_pages) {
>> -        uint8_t granule = tg * 2 + 10;
>> -        uint64_t mask, count;
>> +    end = addr + (num_pages << granule) - 1;
>>
>> -        mask = dma_aligned_pow2_mask(first_page, last_page, 64 - granule);
>> -        count = mask + 1;
>> +    while (addr != end + 1) {
>> +        uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
>>
>> -        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, count, ttl, leaf);
>> -        smmuv3_inv_notifiers_iova(s, asid, addr, tg, count);
>> -        smmu_iotlb_inv_iova(s, asid, addr, tg, count, ttl);
>> -
>> -        num_pages -= count;
>> -        first_page += count;
>> -        addr += count * BIT_ULL(granule);
>> +        num_pages = (mask + 1) >> granule;
>> +        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
>> +        smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
>> +        smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
>> +        addr += mask + 1;
>>      }
>>  }
> 
> thanks
> -- PMM
> 



^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-05-10 11:55 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-21 17:29 [PATCH] hw/arm/smmuv3: Another range invalidation fix Eric Auger
2021-05-10 11:31 ` Peter Maydell
2021-05-10 11:44   ` Auger Eric

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.