All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Tian, Kevin" <kevin.tian@intel.com>
To: Jan Beulich <jbeulich@suse.com>,
	"xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: Paul Durrant <paul@xen.org>,
	"Cooper, Andrew" <andrew.cooper3@citrix.com>
Subject: RE: [PATCH 7/9] VT-d: centralize mapping of QI entries
Date: Thu, 24 Jun 2021 05:31:55 +0000	[thread overview]
Message-ID: <MWHPR11MB18865ECDDB47B086ECC377A38C079@MWHPR11MB1886.namprd11.prod.outlook.com> (raw)
In-Reply-To: <b1aba243-e05e-1f50-d85d-00f60703b62b@suse.com>

> From: Jan Beulich <jbeulich@suse.com>
> Sent: Wednesday, June 9, 2021 5:30 PM
> 
> Introduce a helper function to reduce redundancy. Take the opportunity
> to express the logic without using the somewhat odd QINVAL_ENTRY_ORDER.
> Also take the opportunity to uniformly unmap after updating queue tail
> and dropping the lock (like was done so far only by
> queue_invalidate_context_sync()).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

> ---
> I wonder though whether we wouldn't be better off permanently mapping
> the queue(s).
> 
> --- a/xen/drivers/passthrough/vtd/qinval.c
> +++ b/xen/drivers/passthrough/vtd/qinval.c
> @@ -69,6 +69,16 @@ static void qinval_update_qtail(struct v
>      dmar_writel(iommu->reg, DMAR_IQT_REG, val << QINVAL_INDEX_SHIFT);
>  }
> 
> +static struct qinval_entry *qi_map_entry(const struct vtd_iommu *iommu,
> +                                         unsigned int index)
> +{
> +    paddr_t base = iommu->qinval_maddr +
> +                   ((index * sizeof(struct qinval_entry)) & PAGE_MASK);
> +    struct qinval_entry *entries = map_vtd_domain_page(base);
> +
> +    return &entries[index % (PAGE_SIZE / sizeof(*entries))];
> +}
> +
>  static int __must_check queue_invalidate_context_sync(struct vtd_iommu
> *iommu,
>                                                        u16 did, u16 source_id,
>                                                        u8 function_mask,
> @@ -76,15 +86,11 @@ static int __must_check queue_invalidate
>  {
>      unsigned long flags;
>      unsigned int index;
> -    u64 entry_base;
> -    struct qinval_entry *qinval_entry, *qinval_entries;
> +    struct qinval_entry *qinval_entry;
> 
>      spin_lock_irqsave(&iommu->register_lock, flags);
>      index = qinval_next_index(iommu);
> -    entry_base = iommu->qinval_maddr +
> -                 ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
> -    qinval_entries = map_vtd_domain_page(entry_base);
> -    qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
> +    qinval_entry = qi_map_entry(iommu, index);
> 
>      qinval_entry->q.cc_inv_dsc.lo.type = TYPE_INVAL_CONTEXT;
>      qinval_entry->q.cc_inv_dsc.lo.granu = granu;
> @@ -98,7 +104,7 @@ static int __must_check queue_invalidate
>      qinval_update_qtail(iommu, index);
>      spin_unlock_irqrestore(&iommu->register_lock, flags);
> 
> -    unmap_vtd_domain_page(qinval_entries);
> +    unmap_vtd_domain_page(qinval_entry);
> 
>      return invalidate_sync(iommu);
>  }
> @@ -110,15 +116,11 @@ static int __must_check queue_invalidate
>  {
>      unsigned long flags;
>      unsigned int index;
> -    u64 entry_base;
> -    struct qinval_entry *qinval_entry, *qinval_entries;
> +    struct qinval_entry *qinval_entry;
> 
>      spin_lock_irqsave(&iommu->register_lock, flags);
>      index = qinval_next_index(iommu);
> -    entry_base = iommu->qinval_maddr +
> -                 ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
> -    qinval_entries = map_vtd_domain_page(entry_base);
> -    qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
> +    qinval_entry = qi_map_entry(iommu, index);
> 
>      qinval_entry->q.iotlb_inv_dsc.lo.type = TYPE_INVAL_IOTLB;
>      qinval_entry->q.iotlb_inv_dsc.lo.granu = granu;
> @@ -133,10 +135,11 @@ static int __must_check queue_invalidate
>      qinval_entry->q.iotlb_inv_dsc.hi.res_1 = 0;
>      qinval_entry->q.iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K;
> 
> -    unmap_vtd_domain_page(qinval_entries);
>      qinval_update_qtail(iommu, index);
>      spin_unlock_irqrestore(&iommu->register_lock, flags);
> 
> +    unmap_vtd_domain_page(qinval_entry);
> +
>      return invalidate_sync(iommu);
>  }
> 
> @@ -147,17 +150,13 @@ static int __must_check queue_invalidate
>      static DEFINE_PER_CPU(uint32_t, poll_slot);
>      unsigned int index;
>      unsigned long flags;
> -    u64 entry_base;
> -    struct qinval_entry *qinval_entry, *qinval_entries;
> +    struct qinval_entry *qinval_entry;
>      uint32_t *this_poll_slot = &this_cpu(poll_slot);
> 
>      spin_lock_irqsave(&iommu->register_lock, flags);
>      ACCESS_ONCE(*this_poll_slot) = QINVAL_STAT_INIT;
>      index = qinval_next_index(iommu);
> -    entry_base = iommu->qinval_maddr +
> -                 ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
> -    qinval_entries = map_vtd_domain_page(entry_base);
> -    qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
> +    qinval_entry = qi_map_entry(iommu, index);
> 
>      qinval_entry->q.inv_wait_dsc.lo.type = TYPE_INVAL_WAIT;
>      qinval_entry->q.inv_wait_dsc.lo.iflag = iflag;
> @@ -167,10 +166,11 @@ static int __must_check queue_invalidate
>      qinval_entry->q.inv_wait_dsc.lo.sdata = QINVAL_STAT_DONE;
>      qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(this_poll_slot);
> 
> -    unmap_vtd_domain_page(qinval_entries);
>      qinval_update_qtail(iommu, index);
>      spin_unlock_irqrestore(&iommu->register_lock, flags);
> 
> +    unmap_vtd_domain_page(qinval_entry);
> +
>      /* Now we don't support interrupt method */
>      if ( sw )
>      {
> @@ -246,16 +246,12 @@ int qinval_device_iotlb_sync(struct vtd_
>  {
>      unsigned long flags;
>      unsigned int index;
> -    u64 entry_base;
> -    struct qinval_entry *qinval_entry, *qinval_entries;
> +    struct qinval_entry *qinval_entry;
> 
>      ASSERT(pdev);
>      spin_lock_irqsave(&iommu->register_lock, flags);
>      index = qinval_next_index(iommu);
> -    entry_base = iommu->qinval_maddr +
> -                 ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
> -    qinval_entries = map_vtd_domain_page(entry_base);
> -    qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
> +    qinval_entry = qi_map_entry(iommu, index);
> 
>      qinval_entry->q.dev_iotlb_inv_dsc.lo.type = TYPE_INVAL_DEVICE_IOTLB;
>      qinval_entry->q.dev_iotlb_inv_dsc.lo.res_1 = 0;
> @@ -268,10 +264,11 @@ int qinval_device_iotlb_sync(struct vtd_
>      qinval_entry->q.dev_iotlb_inv_dsc.hi.res_1 = 0;
>      qinval_entry->q.dev_iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K;
> 
> -    unmap_vtd_domain_page(qinval_entries);
>      qinval_update_qtail(iommu, index);
>      spin_unlock_irqrestore(&iommu->register_lock, flags);
> 
> +    unmap_vtd_domain_page(qinval_entry);
> +
>      return dev_invalidate_sync(iommu, pdev, did);
>  }
> 
> @@ -280,16 +277,12 @@ static int __must_check queue_invalidate
>  {
>      unsigned long flags;
>      unsigned int index;
> -    u64 entry_base;
> -    struct qinval_entry *qinval_entry, *qinval_entries;
> +    struct qinval_entry *qinval_entry;
>      int ret;
> 
>      spin_lock_irqsave(&iommu->register_lock, flags);
>      index = qinval_next_index(iommu);
> -    entry_base = iommu->qinval_maddr +
> -                 ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
> -    qinval_entries = map_vtd_domain_page(entry_base);
> -    qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
> +    qinval_entry = qi_map_entry(iommu, index);
> 
>      qinval_entry->q.iec_inv_dsc.lo.type = TYPE_INVAL_IEC;
>      qinval_entry->q.iec_inv_dsc.lo.granu = granu;
> @@ -299,10 +292,11 @@ static int __must_check queue_invalidate
>      qinval_entry->q.iec_inv_dsc.lo.res_2 = 0;
>      qinval_entry->q.iec_inv_dsc.hi.res = 0;
> 
> -    unmap_vtd_domain_page(qinval_entries);
>      qinval_update_qtail(iommu, index);
>      spin_unlock_irqrestore(&iommu->register_lock, flags);
> 
> +    unmap_vtd_domain_page(qinval_entry);
> +
>      ret = invalidate_sync(iommu);
> 
>      /*


  reply	other threads:[~2021-06-24  5:32 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-09  9:25 [PATCH 0/9] IOMMU: XSA-373 follow-on Jan Beulich
2021-06-09  9:26 ` [PATCH 1/9] AMD/IOMMU: redo awaiting of command completion Jan Beulich
2021-06-09 10:36   ` Andrew Cooper
2021-06-09 12:08     ` Jan Beulich
2021-06-09 12:33       ` Andrew Cooper
2021-06-09 12:51         ` Jan Beulich
2021-06-10 12:24     ` Jan Beulich
2021-06-09  9:27 ` [PATCH 2/9] AMD/IOMMU: re-work locking around sending of commands Jan Beulich
2021-06-09 10:53   ` Andrew Cooper
2021-06-09 12:22     ` Jan Beulich
2021-06-10 11:58     ` Jan Beulich
2021-06-10 12:53       ` Jan Beulich
2021-06-09  9:27 ` [PATCH 3/9] VT-d: undo device mappings upon error Jan Beulich
2021-06-24  5:13   ` Tian, Kevin
2021-06-09  9:28 ` [PATCH 4/9] VT-d: adjust domid map updating when unmapping context Jan Beulich
2021-06-24  5:21   ` Tian, Kevin
2021-06-09  9:28 ` [PATCH 5/9] VT-d: clear_fault_bits() should clear all fault bits Jan Beulich
2021-06-24  5:26   ` Tian, Kevin
2021-06-09  9:29 ` [PATCH 6/9] VT-d: don't lose errors when flushing TLBs on multiple IOMMUs Jan Beulich
2021-06-24  5:28   ` Tian, Kevin
2021-06-09  9:29 ` [PATCH 7/9] VT-d: centralize mapping of QI entries Jan Beulich
2021-06-24  5:31   ` Tian, Kevin [this message]
2021-06-09  9:29 ` [PATCH 8/9] VT-d: drop/move a few QI related constants Jan Beulich
2021-06-24  5:32   ` Tian, Kevin
2021-06-09  9:30 ` [PATCH 9/9] IOMMU/PCI: don't let domain cleanup continue when device de-assignment failed Jan Beulich
2021-06-24 15:34   ` Paul Durrant
2021-06-23  6:51 ` Ping: [PATCH 0/9] IOMMU: XSA-373 follow-on Jan Beulich
2021-06-23  6:58   ` Tian, Kevin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=MWHPR11MB18865ECDDB47B086ECC377A38C079@MWHPR11MB1886.namprd11.prod.outlook.com \
    --to=kevin.tian@intel.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=paul@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.