All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Tian, Kevin" <kevin.tian@intel.com>
To: Jan Beulich <jbeulich@suse.com>,
	"xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Subject: RE: [PATCH 1/2] VT-d: install sync_cache hook on demand
Date: Fri, 17 Jul 2020 02:48:29 +0000	[thread overview]
Message-ID: <MWHPR11MB1645176C28AD7F37615C79378C7C0@MWHPR11MB1645.namprd11.prod.outlook.com> (raw)
In-Reply-To: <0036b69f-0d56-9ac4-1afa-06640c9007de@suse.com>

> From: Jan Beulich <jbeulich@suse.com>
> Sent: Wednesday, July 15, 2020 6:04 PM
> 
> Instead of checking inside the hook whether any non-coherent IOMMUs are
> present, simply install the hook only when this is the case.
> 
> To prove that there are no other references to the now dynamically
> updated ops structure (and hence that its updating happens early
> enough), make it static and rename it at the same time.
> 
> Note that this change implies that sync_cache() shouldn't be called
> directly unless there are unusual circumstances, like is the case in
> alloc_pgtable_maddr(), which gets invoked too early for iommu_ops to
> be set already (and therefore we also need to be careful there to
> avoid accessing vtd_ops later on, as it lives in .init).
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

> 
> --- a/xen/drivers/passthrough/vtd/extern.h
> +++ b/xen/drivers/passthrough/vtd/extern.h
> @@ -28,7 +28,6 @@
>  struct pci_ats_dev;
>  extern bool_t rwbf_quirk;
>  extern const struct iommu_init_ops intel_iommu_init_ops;
> -extern const struct iommu_ops intel_iommu_ops;
> 
>  void print_iommu_regs(struct acpi_drhd_unit *drhd);
>  void print_vtd_entries(struct vtd_iommu *iommu, int bus, int devfn, u64
> gmfn);
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -59,6 +59,7 @@ bool __read_mostly iommu_snoop = true;
> 
>  int nr_iommus;
> 
> +static struct iommu_ops vtd_ops;
>  static struct tasklet vtd_fault_tasklet;
> 
>  static int setup_hwdom_device(u8 devfn, struct pci_dev *);
> @@ -146,16 +147,11 @@ static int context_get_domain_id(struct
>      return domid;
>  }
> 
> -static int iommus_incoherent;
> -
>  static void sync_cache(const void *addr, unsigned int size)
>  {
>      static unsigned long clflush_size = 0;
>      const void *end = addr + size;
> 
> -    if ( !iommus_incoherent )
> -        return;
> -
>      if ( clflush_size == 0 )
>          clflush_size = get_cache_line_size();
> 
> @@ -217,7 +213,8 @@ uint64_t alloc_pgtable_maddr(unsigned lo
>          vaddr = __map_domain_page(cur_pg);
>          memset(vaddr, 0, PAGE_SIZE);
> 
> -        sync_cache(vaddr, PAGE_SIZE);
> +        if ( (iommu_ops.init ? &iommu_ops : &vtd_ops)->sync_cache )
> +            sync_cache(vaddr, PAGE_SIZE);
>          unmap_domain_page(vaddr);
>          cur_pg++;
>      }
> @@ -1227,7 +1224,7 @@ int __init iommu_alloc(struct acpi_drhd_
>      iommu->nr_pt_levels = agaw_to_level(agaw);
> 
>      if ( !ecap_coherent(iommu->ecap) )
> -        iommus_incoherent = 1;
> +        vtd_ops.sync_cache = sync_cache;
> 
>      /* allocate domain id bitmap */
>      nr_dom = cap_ndoms(iommu->cap);
> @@ -2737,7 +2734,7 @@ static int __init intel_iommu_quarantine
>      return level ? -ENOMEM : rc;
>  }
> 
> -const struct iommu_ops __initconstrel intel_iommu_ops = {
> +static struct iommu_ops __initdata vtd_ops = {
>      .init = intel_iommu_domain_init,
>      .hwdom_init = intel_iommu_hwdom_init,
>      .quarantine_init = intel_iommu_quarantine_init,
> @@ -2768,11 +2765,10 @@ const struct iommu_ops __initconstrel in
>      .iotlb_flush_all = iommu_flush_iotlb_all,
>      .get_reserved_device_memory =
> intel_iommu_get_reserved_device_memory,
>      .dump_p2m_table = vtd_dump_p2m_table,
> -    .sync_cache = sync_cache,
>  };
> 
>  const struct iommu_init_ops __initconstrel intel_iommu_init_ops = {
> -    .ops = &intel_iommu_ops,
> +    .ops = &vtd_ops,
>      .setup = vtd_setup,
>      .supports_x2apic = intel_iommu_supports_eim,
>  };


  parent reply	other threads:[~2020-07-17  2:49 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-15 10:02 [PATCH 0/2] VT-d: XSA-321 follow-up Jan Beulich
2020-07-15 10:03 ` [PATCH 1/2] VT-d: install sync_cache hook on demand Jan Beulich
2020-07-16 10:14   ` Roger Pau Monné
2020-07-16 10:25     ` Jan Beulich
2020-07-16 10:34       ` Roger Pau Monné
2020-07-17  2:48   ` Tian, Kevin [this message]
2020-07-15 10:04 ` [PATCH 2/2] VT-d: use clear_page() in alloc_pgtable_maddr() Jan Beulich
2020-07-16 10:15   ` Roger Pau Monné
2020-07-17  2:49   ` Tian, Kevin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=MWHPR11MB1645176C28AD7F37615C79378C7C0@MWHPR11MB1645.namprd11.prod.outlook.com \
    --to=kevin.tian@intel.com \
    --cc=jbeulich@suse.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.