All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] VT-d: macro definition / use tidying
@ 2021-12-09 15:51 Jan Beulich
  2021-12-09 15:52 ` [PATCH 1/3] VT-d: properly parenthesize a number of macros Jan Beulich
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Jan Beulich @ 2021-12-09 15:51 UTC (permalink / raw)
  To: xen-devel; +Cc: Kevin Tian

While putting together patch 1, I've noticed two further aspects to
clean up a little.

1: properly parenthesize a number of macros
2: use DMA_TLB_IVA_ADDR()
3: shorten vtd_flush_{context,iotlb}_reg()

Jan



^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 1/3] VT-d: properly parenthesize a number of macros
  2021-12-09 15:51 [PATCH 0/3] VT-d: macro definition / use tidying Jan Beulich
@ 2021-12-09 15:52 ` Jan Beulich
  2021-12-09 15:53 ` [PATCH 2/3] VT-d: use DMA_TLB_IVA_ADDR() Jan Beulich
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Jan Beulich @ 2021-12-09 15:52 UTC (permalink / raw)
  To: xen-devel; +Cc: Kevin Tian

Let's eliminate the risk of any of these macros getting used with more
complex expressions as arguments.

Where touching lines anyway, also
- switch from u64 to uint64_t,
- drop unnecessary parentheses,
- drop pointless 0x prefixes.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -90,14 +90,14 @@
 
 #define ecap_niotlb_iunits(e)    ((((e) >> 24) & 0xff) + 1)
 #define ecap_iotlb_offset(e)     ((((e) >> 8) & 0x3ff) * 16)
-#define ecap_coherent(e)         ((e >> 0) & 0x1)
-#define ecap_queued_inval(e)     ((e >> 1) & 0x1)
-#define ecap_dev_iotlb(e)        ((e >> 2) & 0x1)
-#define ecap_intr_remap(e)       ((e >> 3) & 0x1)
-#define ecap_eim(e)              ((e >> 4) & 0x1)
-#define ecap_cache_hints(e)      ((e >> 5) & 0x1)
-#define ecap_pass_thru(e)        ((e >> 6) & 0x1)
-#define ecap_snp_ctl(e)          ((e >> 7) & 0x1)
+#define ecap_coherent(e)         (((e) >> 0) & 1)
+#define ecap_queued_inval(e)     (((e) >> 1) & 1)
+#define ecap_dev_iotlb(e)        (((e) >> 2) & 1)
+#define ecap_intr_remap(e)       (((e) >> 3) & 1)
+#define ecap_eim(e)              (((e) >> 4) & 1)
+#define ecap_cache_hints(e)      (((e) >> 5) & 1)
+#define ecap_pass_thru(e)        (((e) >> 6) & 1)
+#define ecap_snp_ctl(e)          (((e) >> 7) & 1)
 
 /* IOTLB_REG */
 #define DMA_TLB_FLUSH_GRANU_OFFSET  60
@@ -106,14 +106,14 @@
 #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
 #define DMA_TLB_IIRG(x) (((x) >> 60) & 7) 
 #define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
-#define DMA_TLB_DID(x) (((u64)(x & 0xffff)) << 32)
+#define DMA_TLB_DID(x) (((uint64_t)((x) & 0xffff)) << 32)
 
 #define DMA_TLB_READ_DRAIN (((u64)1) << 49)
 #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
 #define DMA_TLB_IVT (((u64)1) << 63)
 
-#define DMA_TLB_IVA_ADDR(x) ((((u64)x) >> 12) << 12)
-#define DMA_TLB_IVA_HINT(x) ((((u64)x) & 1) << 6)
+#define DMA_TLB_IVA_ADDR(x) (((uint64_t)(x) >> 12) << 12)
+#define DMA_TLB_IVA_HINT(x) (((uint64_t)(x) & 1) << 6)
 
 /* GCMD_REG */
 #define DMA_GCMD_TE     (1u << 31)
@@ -144,11 +144,11 @@
 /* CCMD_REG */
 #define DMA_CCMD_INVL_GRANU_OFFSET  61
 #define DMA_CCMD_ICC   (((u64)1) << 63)
-#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
-#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
-#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
+#define DMA_CCMD_GLOBAL_INVL ((uint64_t)1 << DMA_CCMD_INVL_GRANU_OFFSET)
+#define DMA_CCMD_DOMAIN_INVL ((uint64_t)2 << DMA_CCMD_INVL_GRANU_OFFSET)
+#define DMA_CCMD_DEVICE_INVL ((uint64_t)3 << DMA_CCMD_INVL_GRANU_OFFSET)
+#define DMA_CCMD_CIRG(x) (((uint64_t)3 << DMA_CCMD_INVL_GRANU_OFFSET) & (x))
 #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
-#define DMA_CCMD_CIRG(x) ((((u64)3) << 61) & x)
 #define DMA_CCMD_MASK_NOBIT 0
 #define DMA_CCMD_MASK_1BIT 1
 #define DMA_CCMD_MASK_2BIT 2
@@ -156,7 +156,7 @@
 #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
 #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
 
-#define DMA_CCMD_CAIG_MASK(x) (((u64)x) & ((u64) 0x3 << 59))
+#define DMA_CCMD_CAIG_MASK(x) ((uint64_t)(x) & ((uint64_t)3 << 59))
 
 /* FECTL_REG */
 #define DMA_FECTL_IM (1u << 31)
@@ -175,10 +175,10 @@
 
 /* FRCD_REG, 32 bits access */
 #define DMA_FRCD_F (1u << 31)
-#define dma_frcd_type(d) ((d >> 30) & 1)
-#define dma_frcd_fault_reason(c) (c & 0xff)
-#define dma_frcd_source_id(c) (c & 0xffff)
-#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
+#define dma_frcd_type(d) (((d) >> 30) & 1)
+#define dma_frcd_fault_reason(c) ((c) & 0xff)
+#define dma_frcd_source_id(c) ((c) & 0xffff)
+#define dma_frcd_page_addr(d) ((d) & ((uint64_t)-1 << 12)) /* low 64 bit */
 
 /*
  * 0: Present
@@ -233,16 +233,16 @@ struct context_entry {
 #define PTE_NUM            (1 << LEVEL_STRIDE)
 #define level_to_agaw(val) ((val) - 2)
 #define agaw_to_level(val) ((val) + 2)
-#define agaw_to_width(val) (30 + val * LEVEL_STRIDE)
-#define width_to_agaw(w)   ((w - 30)/LEVEL_STRIDE)
-#define level_to_offset_bits(l) (12 + (l - 1) * LEVEL_STRIDE)
+#define agaw_to_width(val) (30 + (val) * LEVEL_STRIDE)
+#define width_to_agaw(w)   (((w) - 30)/LEVEL_STRIDE)
+#define level_to_offset_bits(l) (12 + ((l) - 1) * LEVEL_STRIDE)
 #define address_level_offset(addr, level) \
-            ((addr >> level_to_offset_bits(level)) & LEVEL_MASK)
+            (((addr) >> level_to_offset_bits(level)) & LEVEL_MASK)
 #define offset_level_address(offset, level) \
             ((u64)(offset) << level_to_offset_bits(level))
 #define level_mask(l) (((u64)(-1)) << level_to_offset_bits(l))
 #define level_size(l) (1 << level_to_offset_bits(l))
-#define align_to_level(addr, l) ((addr + level_size(l) - 1) & level_mask(l))
+#define align_to_level(addr, l) (((addr) + level_size(l) - 1) & level_mask(l))
 
 /*
  * 0: readable



^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 2/3] VT-d: use DMA_TLB_IVA_ADDR()
  2021-12-09 15:51 [PATCH 0/3] VT-d: macro definition / use tidying Jan Beulich
  2021-12-09 15:52 ` [PATCH 1/3] VT-d: properly parenthesize a number of macros Jan Beulich
@ 2021-12-09 15:53 ` Jan Beulich
  2021-12-09 15:53 ` [PATCH 3/3] VT-d: shorten vtd_flush_{context,iotlb}_reg() Jan Beulich
  2021-12-24  7:42 ` [PATCH 0/3] VT-d: macro definition / use tidying Tian, Kevin
  3 siblings, 0 replies; 5+ messages in thread
From: Jan Beulich @ 2021-12-09 15:53 UTC (permalink / raw)
  To: xen-devel; +Cc: Kevin Tian

Let's use the macro in the one place it's supposed to be used, and in
favor of then unnecessary manipulations of the address in
iommu_flush_iotlb_psi(): All leaf functions then already deal correctly
with the supplied address.

There also has never been a need to require (i.e. assert for) the
passing in of 4k-aligned addresses - it'll always be the order-sized
range containing the address which gets flushed.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -546,7 +546,8 @@ int vtd_flush_iotlb_reg(struct vtd_iommu
     if ( type == DMA_TLB_PSI_FLUSH )
     {
         /* Note: always flush non-leaf currently. */
-        dmar_writeq(iommu->reg, tlb_offset, size_order | addr);
+        dmar_writeq(iommu->reg, tlb_offset,
+                    size_order | DMA_TLB_IVA_ADDR(addr));
     }
     dmar_writeq(iommu->reg, tlb_offset + 8, val);
 
@@ -606,8 +607,6 @@ static int __must_check iommu_flush_iotl
 {
     int status;
 
-    ASSERT(!(addr & (~PAGE_MASK_4K)));
-
     /* Fallback to domain selective flush if no PSI support */
     if ( !cap_pgsel_inv(iommu->cap) )
         return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
@@ -618,9 +617,6 @@ static int __must_check iommu_flush_iotl
         return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
                                      flush_dev_iotlb);
 
-    addr >>= PAGE_SHIFT_4K + order;
-    addr <<= PAGE_SHIFT_4K + order;
-
     /* apply platform specific errata workarounds */
     vtd_ops_preamble_quirk(iommu);
 



^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 3/3] VT-d: shorten vtd_flush_{context,iotlb}_reg()
  2021-12-09 15:51 [PATCH 0/3] VT-d: macro definition / use tidying Jan Beulich
  2021-12-09 15:52 ` [PATCH 1/3] VT-d: properly parenthesize a number of macros Jan Beulich
  2021-12-09 15:53 ` [PATCH 2/3] VT-d: use DMA_TLB_IVA_ADDR() Jan Beulich
@ 2021-12-09 15:53 ` Jan Beulich
  2021-12-24  7:42 ` [PATCH 0/3] VT-d: macro definition / use tidying Tian, Kevin
  3 siblings, 0 replies; 5+ messages in thread
From: Jan Beulich @ 2021-12-09 15:53 UTC (permalink / raw)
  To: xen-devel; +Cc: Kevin Tian

Their calculations of the value to write to the respective command
register can be partly folded, resulting in almost 100 bytes less code
for these two relatively short functions.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -441,7 +441,6 @@ int vtd_flush_context_reg(struct vtd_iom
                           uint16_t source_id, uint8_t function_mask,
                           uint64_t type, bool flush_non_present_entry)
 {
-    u64 val = 0;
     unsigned long flags;
 
     /*
@@ -462,26 +461,26 @@ int vtd_flush_context_reg(struct vtd_iom
     switch ( type )
     {
     case DMA_CCMD_GLOBAL_INVL:
-        val = DMA_CCMD_GLOBAL_INVL;
-        break;
-    case DMA_CCMD_DOMAIN_INVL:
-        val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
         break;
+
     case DMA_CCMD_DEVICE_INVL:
-        val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
-            |DMA_CCMD_SID(source_id)|DMA_CCMD_FM(function_mask);
+        type |= DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
+        fallthrough;
+    case DMA_CCMD_DOMAIN_INVL:
+        type |= DMA_CCMD_DID(did);
         break;
+
     default:
         BUG();
     }
-    val |= DMA_CCMD_ICC;
+    type |= DMA_CCMD_ICC;
 
     spin_lock_irqsave(&iommu->register_lock, flags);
-    dmar_writeq(iommu->reg, DMAR_CCMD_REG, val);
+    dmar_writeq(iommu->reg, DMAR_CCMD_REG, type);
 
     /* Make sure hardware complete it */
     IOMMU_FLUSH_WAIT("context", iommu, DMAR_CCMD_REG, dmar_readq,
-                     !(val & DMA_CCMD_ICC), val);
+                     !(type & DMA_CCMD_ICC), type);
 
     spin_unlock_irqrestore(&iommu->register_lock, flags);
     /* flush context entry will implicitly flush write buffer */
@@ -510,7 +509,7 @@ int vtd_flush_iotlb_reg(struct vtd_iommu
                         bool flush_non_present_entry, bool flush_dev_iotlb)
 {
     int tlb_offset = ecap_iotlb_offset(iommu->ecap);
-    u64 val = 0;
+    uint64_t val = type | DMA_TLB_IVT;
     unsigned long flags;
 
     /*
@@ -524,14 +523,13 @@ int vtd_flush_iotlb_reg(struct vtd_iommu
     switch ( type )
     {
     case DMA_TLB_GLOBAL_FLUSH:
-        val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
         break;
+
     case DMA_TLB_DSI_FLUSH:
-        val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
-        break;
     case DMA_TLB_PSI_FLUSH:
-        val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
+        val |= DMA_TLB_DID(did);
         break;
+
     default:
         BUG();
     }



^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH 0/3] VT-d: macro definition / use tidying
  2021-12-09 15:51 [PATCH 0/3] VT-d: macro definition / use tidying Jan Beulich
                   ` (2 preceding siblings ...)
  2021-12-09 15:53 ` [PATCH 3/3] VT-d: shorten vtd_flush_{context,iotlb}_reg() Jan Beulich
@ 2021-12-24  7:42 ` Tian, Kevin
  3 siblings, 0 replies; 5+ messages in thread
From: Tian, Kevin @ 2021-12-24  7:42 UTC (permalink / raw)
  To: Beulich, Jan, xen-devel

> From: Jan Beulich <jbeulich@suse.com>
> Sent: Thursday, December 9, 2021 11:52 PM
> 
> While putting together patch 1, I've noticed two further aspects to
> clean up a little.
> 
> 1: properly parenthesize a number of macros
> 2: use DMA_TLB_IVA_ADDR()
> 3: shorten vtd_flush_{context,iotlb}_reg()
> 

Reviewed-by: Kevin Tian <kevin.tian@intel.com> for the series.

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2021-12-24  7:43 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-12-09 15:51 [PATCH 0/3] VT-d: macro definition / use tidying Jan Beulich
2021-12-09 15:52 ` [PATCH 1/3] VT-d: properly parenthesize a number of macros Jan Beulich
2021-12-09 15:53 ` [PATCH 2/3] VT-d: use DMA_TLB_IVA_ADDR() Jan Beulich
2021-12-09 15:53 ` [PATCH 3/3] VT-d: shorten vtd_flush_{context,iotlb}_reg() Jan Beulich
2021-12-24  7:42 ` [PATCH 0/3] VT-d: macro definition / use tidying Tian, Kevin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.