From: Quan Xu <quan.xu@intel.com>
To: xen-devel@lists.xen.org
Cc: Quan Xu <quan.xu@intel.com>,
kevin.tian@intel.com, feng.wu@intel.com,
dario.faggioli@citrix.com, jbeulich@suse.com
Subject: [PATCH v8 2/3] VT-d: Wrap a _sync version for all VT-d flush interfaces
Date: Thu, 24 Mar 2016 13:57:57 +0800 [thread overview]
Message-ID: <1458799079-79825-2-git-send-email-quan.xu@intel.com> (raw)
In-Reply-To: <1458799079-79825-1-git-send-email-quan.xu@intel.com>
For consistency, we wrap a _sync version for all VT-d flush interfaces.
It simplifies caller logic and makes code more readable as well.
Signed-off-by: Quan Xu <quan.xu@intel.com>
---
xen/drivers/passthrough/vtd/extern.h | 2 +
xen/drivers/passthrough/vtd/qinval.c | 173 ++++++++++++++++++++--------------
xen/drivers/passthrough/vtd/x86/ats.c | 12 +--
3 files changed, 106 insertions(+), 81 deletions(-)
diff --git a/xen/drivers/passthrough/vtd/extern.h b/xen/drivers/passthrough/vtd/extern.h
index d4d37c3..6d3187d 100644
--- a/xen/drivers/passthrough/vtd/extern.h
+++ b/xen/drivers/passthrough/vtd/extern.h
@@ -61,6 +61,8 @@ int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
int qinval_device_iotlb(struct iommu *iommu,
u32 max_invs_pend, u16 sid, u16 size, u64 addr);
+int qinval_device_iotlb_sync(struct iommu *iommu, u32 max_invs_pend,
+ u16 sid, u16 size, u64 addr);
unsigned int get_cache_line_size(void);
void cacheline_flush(char *);
diff --git a/xen/drivers/passthrough/vtd/qinval.c b/xen/drivers/passthrough/vtd/qinval.c
index 52ba2c2..ad9e265 100644
--- a/xen/drivers/passthrough/vtd/qinval.c
+++ b/xen/drivers/passthrough/vtd/qinval.c
@@ -72,6 +72,70 @@ static void qinval_update_qtail(struct iommu *iommu, unsigned int index)
dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << QINVAL_INDEX_SHIFT));
}
+static int __must_check queue_invalidate_wait(struct iommu *iommu,
+ u8 iflag, u8 sw, u8 fn)
+{
+ s_time_t timeout;
+ volatile u32 poll_slot = QINVAL_STAT_INIT;
+ unsigned int index;
+ unsigned long flags;
+ u64 entry_base;
+ struct qinval_entry *qinval_entry, *qinval_entries;
+
+ spin_lock_irqsave(&iommu->register_lock, flags);
+ index = qinval_next_index(iommu);
+ entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
+ ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
+ qinval_entries = map_vtd_domain_page(entry_base);
+ qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
+
+ qinval_entry->q.inv_wait_dsc.lo.type = TYPE_INVAL_WAIT;
+ qinval_entry->q.inv_wait_dsc.lo.iflag = iflag;
+ qinval_entry->q.inv_wait_dsc.lo.sw = sw;
+ qinval_entry->q.inv_wait_dsc.lo.fn = fn;
+ qinval_entry->q.inv_wait_dsc.lo.res_1 = 0;
+ qinval_entry->q.inv_wait_dsc.lo.sdata = QINVAL_STAT_DONE;
+ qinval_entry->q.inv_wait_dsc.hi.res_1 = 0;
+ qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(&poll_slot) >> 2;
+
+ unmap_vtd_domain_page(qinval_entries);
+ qinval_update_qtail(iommu, index);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
+
+ /* Now we don't support interrupt method */
+ if ( sw )
+ {
+ /* In case all wait descriptor writes to same addr with same data */
+ timeout = NOW() + IOMMU_QI_TIMEOUT;
+ while ( poll_slot != QINVAL_STAT_DONE )
+ {
+ if ( NOW() > timeout )
+ {
+ print_qi_regs(iommu);
+ printk(XENLOG_WARNING VTDPREFIX
+ "Queue invalidate wait descriptor timed out.\n");
+ return -ETIMEDOUT;
+ }
+
+ cpu_relax();
+ }
+
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int invalidate_sync(struct iommu *iommu)
+{
+ struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
+
+ if ( qi_ctrl->qinval_maddr )
+ return queue_invalidate_wait(iommu, 0, 1, 1);
+
+ return 0;
+}
+
static void queue_invalidate_context(struct iommu *iommu,
u16 did, u16 source_id, u8 function_mask, u8 granu)
{
@@ -102,6 +166,15 @@ static void queue_invalidate_context(struct iommu *iommu,
unmap_vtd_domain_page(qinval_entries);
}
+static int queue_invalidate_context_sync(struct iommu *iommu,
+ u16 did, u16 source_id, u8 function_mask, u8 granu)
+{
+ queue_invalidate_context(iommu, did, source_id,
+ function_mask, granu);
+
+ return invalidate_sync(iommu);
+}
+
static void queue_invalidate_iotlb(struct iommu *iommu,
u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
{
@@ -135,65 +208,12 @@ static void queue_invalidate_iotlb(struct iommu *iommu,
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-static int __must_check queue_invalidate_wait(struct iommu *iommu,
- u8 iflag, u8 sw, u8 fn)
-{
- s_time_t timeout;
- volatile u32 poll_slot = QINVAL_STAT_INIT;
- unsigned int index;
- unsigned long flags;
- u64 entry_base;
- struct qinval_entry *qinval_entry, *qinval_entries;
-
- spin_lock_irqsave(&iommu->register_lock, flags);
- index = qinval_next_index(iommu);
- entry_base = iommu_qi_ctrl(iommu)->qinval_maddr +
- ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
- qinval_entries = map_vtd_domain_page(entry_base);
- qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
-
- qinval_entry->q.inv_wait_dsc.lo.type = TYPE_INVAL_WAIT;
- qinval_entry->q.inv_wait_dsc.lo.iflag = iflag;
- qinval_entry->q.inv_wait_dsc.lo.sw = sw;
- qinval_entry->q.inv_wait_dsc.lo.fn = fn;
- qinval_entry->q.inv_wait_dsc.lo.res_1 = 0;
- qinval_entry->q.inv_wait_dsc.lo.sdata = QINVAL_STAT_DONE;
- qinval_entry->q.inv_wait_dsc.hi.res_1 = 0;
- qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(&poll_slot) >> 2;
-
- unmap_vtd_domain_page(qinval_entries);
- qinval_update_qtail(iommu, index);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
-
- /* Now we don't support interrupt method */
- if ( sw )
- {
- /* In case all wait descriptor writes to same addr with same data */
- timeout = NOW() + IOMMU_QI_TIMEOUT;
- while ( poll_slot != QINVAL_STAT_DONE )
- {
- if ( NOW() > timeout )
- {
- print_qi_regs(iommu);
- printk(XENLOG_WARNING VTDPREFIX
- "Queue invalidate wait descriptor timed out.\n");
- return -ETIMEDOUT;
- }
- cpu_relax();
- }
- return 0;
- }
-
- return -EOPNOTSUPP;
-}
-
-static int invalidate_sync(struct iommu *iommu)
+static int queue_invalidate_iotlb_sync(struct iommu *iommu,
+ u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
{
- struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
+ queue_invalidate_iotlb(iommu, granu, dr, dw, did, am, ih, addr);
- if ( qi_ctrl->qinval_maddr )
- return queue_invalidate_wait(iommu, 0, 1, 1);
- return 0;
+ return invalidate_sync(iommu);
}
int qinval_device_iotlb(struct iommu *iommu,
@@ -229,6 +249,14 @@ int qinval_device_iotlb(struct iommu *iommu,
return 0;
}
+int qinval_device_iotlb_sync(struct iommu *iommu,
+ u32 max_invs_pend, u16 sid, u16 size, u64 addr)
+{
+ qinval_device_iotlb(iommu, max_invs_pend, sid, size, addr);
+
+ return invalidate_sync(iommu);
+}
+
static void queue_invalidate_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
{
unsigned long flags;
@@ -256,7 +284,7 @@ static void queue_invalidate_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-static int __iommu_flush_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
+static int queue_invalidate_iec_sync(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
{
int ret;
@@ -273,12 +301,12 @@ static int __iommu_flush_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
int iommu_flush_iec_global(struct iommu *iommu)
{
- return __iommu_flush_iec(iommu, IEC_GLOBAL_INVL, 0, 0);
+ return queue_invalidate_iec_sync(iommu, IEC_GLOBAL_INVL, 0, 0);
}
int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx)
{
- return __iommu_flush_iec(iommu, IEC_INDEX_INVL, im, iidx);
+ return queue_invalidate_iec_sync(iommu, IEC_INDEX_INVL, im, iidx);
}
static int flush_context_qi(
@@ -304,11 +332,9 @@ static int flush_context_qi(
}
if ( qi_ctrl->qinval_maddr != 0 )
- {
- queue_invalidate_context(iommu, did, sid, fm,
- type >> DMA_CCMD_INVL_GRANU_OFFSET);
- ret = invalidate_sync(iommu);
- }
+ ret = queue_invalidate_context_sync(iommu, did, sid, fm,
+ type >> DMA_CCMD_INVL_GRANU_OFFSET);
+
return ret;
}
@@ -338,23 +364,24 @@ static int flush_iotlb_qi(
if ( qi_ctrl->qinval_maddr != 0 )
{
- int rc;
-
/* use queued invalidation */
if (cap_write_drain(iommu->cap))
dw = 1;
if (cap_read_drain(iommu->cap))
dr = 1;
/* Need to conside the ih bit later */
- queue_invalidate_iotlb(iommu,
- type >> DMA_TLB_FLUSH_GRANU_OFFSET, dr,
- dw, did, size_order, 0, addr);
+ ret = queue_invalidate_iotlb_sync(iommu,
+ type >> DMA_TLB_FLUSH_GRANU_OFFSET, dr, dw, did,
+ size_order, 0, addr);
+
+ /* TODO: Timeout error handling to be added later */
+ if ( ret )
+ return ret;
+
if ( flush_dev_iotlb )
ret = dev_invalidate_iotlb(iommu, did, addr, size_order, type);
- rc = invalidate_sync(iommu);
- if ( !ret )
- ret = rc;
}
+
return ret;
}
diff --git a/xen/drivers/passthrough/vtd/x86/ats.c b/xen/drivers/passthrough/vtd/x86/ats.c
index 334b9c1..7b1c07b 100644
--- a/xen/drivers/passthrough/vtd/x86/ats.c
+++ b/xen/drivers/passthrough/vtd/x86/ats.c
@@ -118,7 +118,6 @@ int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
{
u16 sid = PCI_BDF2(pdev->bus, pdev->devfn);
bool_t sbit;
- int rc = 0;
/* Only invalidate devices that belong to this IOMMU */
if ( pdev->iommu != iommu )
@@ -134,8 +133,8 @@ int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
/* invalidate all translations: sbit=1,bit_63=0,bit[62:12]=1 */
sbit = 1;
addr = (~0UL << PAGE_SHIFT_4K) & 0x7FFFFFFFFFFFFFFF;
- rc = qinval_device_iotlb(iommu, pdev->ats_queue_depth,
- sid, sbit, addr);
+ ret = qinval_device_iotlb_sync(iommu, pdev->ats_queue_depth,
+ sid, sbit, addr);
break;
case DMA_TLB_PSI_FLUSH:
if ( !device_in_domain(iommu, pdev, did) )
@@ -154,16 +153,13 @@ int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
addr |= (((u64)1 << (size_order - 1)) - 1) << PAGE_SHIFT_4K;
}
- rc = qinval_device_iotlb(iommu, pdev->ats_queue_depth,
- sid, sbit, addr);
+ ret = qinval_device_iotlb_sync(iommu, pdev->ats_queue_depth,
+ sid, sbit, addr);
break;
default:
dprintk(XENLOG_WARNING VTDPREFIX, "invalid vt-d flush type\n");
return -EOPNOTSUPP;
}
-
- if ( !ret )
- ret = rc;
}
return ret;
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-03-24 5:57 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-03-24 5:57 [PATCH v8 0/3] VT-d Device-TLB flush issue Quan Xu
2016-03-24 5:57 ` Quan Xu [this message]
2016-03-24 13:56 ` [PATCH v8 2/3] VT-d: Wrap a _sync version for all VT-d flush interfaces Dario Faggioli
2016-03-24 15:06 ` Dario Faggioli
2016-03-25 3:11 ` Xu, Quan
2016-03-24 5:57 ` [PATCH v8 1/3] VT-d: Reduce spin timeout to 1ms, which can be boot-time changed Quan Xu
2016-03-24 11:04 ` Dario Faggioli
2016-03-24 11:28 ` Xu, Quan
2016-03-25 20:06 ` Konrad Rzeszutek Wilk
2016-03-28 6:27 ` Xu, Quan
2016-03-28 13:31 ` Konrad Rzeszutek Wilk
2016-04-01 15:03 ` Xu, Quan
2016-03-24 5:57 ` [PATCH v8 3/3] VT-d: Fix vt-d Device-TLB flush timeout issue Quan Xu
2016-03-24 15:38 ` Dario Faggioli
2016-03-25 3:43 ` Xu, Quan
2016-03-25 20:40 ` Konrad Rzeszutek Wilk
2016-03-28 3:44 ` Xu, Quan
2016-03-28 7:45 ` Xu, Quan
2016-03-25 20:31 ` Konrad Rzeszutek Wilk
2016-03-28 3:56 ` Xu, Quan
2016-03-28 14:11 ` Konrad Rzeszutek Wilk
2016-03-29 1:32 ` Xu, Quan
2016-03-29 14:20 ` Konrad Rzeszutek Wilk
2016-03-29 14:32 ` Xu, Quan
2016-03-24 10:33 ` [PATCH v8 0/3] VT-d Device-TLB flush issue Jan Beulich
2016-03-24 11:11 ` Xu, Quan
2016-04-01 14:47 ` Xu, Quan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1458799079-79825-2-git-send-email-quan.xu@intel.com \
--to=quan.xu@intel.com \
--cc=dario.faggioli@citrix.com \
--cc=feng.wu@intel.com \
--cc=jbeulich@suse.com \
--cc=kevin.tian@intel.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).