All of lore.kernel.org
 help / color / mirror / Atom feed
From: Oleksandr Tyshchenko <olekstysh@gmail.com>
To: Kevin Tian <kevin.tian@intel.com>
Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>,
	Jan Beulich <jbeulich@suse.com>,
	xen-devel@lists.xenproject.org
Subject: Re: [PATCH v2 12/13] [RFC] iommu: VT-d: Squash map_pages/unmap_pages with map_page/unmap_page
Date: Tue, 12 Sep 2017 17:44:19 +0300	[thread overview]
Message-ID: <CAPD2p-=W4-gWvTsJJZ+QE7gzwF-_hkzmAxD3wh7BE5LYAzL_Kw@mail.gmail.com> (raw)
In-Reply-To: <CAPD2p-k0eOsbS+=hdJ7_t2h9My_jobnEQqajoGiownac_wzztg@mail.gmail.com>

Hi.

Gentle reminder.

On Mon, Aug 21, 2017 at 7:44 PM, Oleksandr Tyshchenko
<olekstysh@gmail.com> wrote:
> Hi, all.
>
> Any comments?
>
> On Tue, Jul 25, 2017 at 8:26 PM, Oleksandr Tyshchenko
> <olekstysh@gmail.com> wrote:
>> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
>>
>> Reduce the scope of the TODO by squashing single-page stuff with
>> multi-page one. Next target is to use large pages whenever possible
>> in the case that hardware supports them.
>>
>> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
>> CC: Jan Beulich <jbeulich@suse.com>
>> CC: Kevin Tian <kevin.tian@intel.com>
>>
>> ---
>>    Changes in v1:
>>       -
>>
>>    Changes in v2:
>>       -
>> ---
>>  xen/drivers/passthrough/vtd/iommu.c | 138 +++++++++++++++++-------------------
>>  1 file changed, 67 insertions(+), 71 deletions(-)
>>
>> diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
>> index 45d1f36..d20b2f9 100644
>> --- a/xen/drivers/passthrough/vtd/iommu.c
>> +++ b/xen/drivers/passthrough/vtd/iommu.c
>> @@ -1750,15 +1750,24 @@ static void iommu_domain_teardown(struct domain *d)
>>      spin_unlock(&hd->arch.mapping_lock);
>>  }
>>
>> -static int __must_check intel_iommu_map_page(struct domain *d,
>> -                                             unsigned long gfn,
>> -                                             unsigned long mfn,
>> -                                             unsigned int flags)
>> +static int __must_check intel_iommu_unmap_pages(struct domain *d,
>> +                                                unsigned long gfn,
>> +                                                unsigned int order);
>> +
>> +/*
>> + * TODO: Optimize by using large pages whenever possible in the case
>> + * that hardware supports them.
>> + */
>> +static int __must_check intel_iommu_map_pages(struct domain *d,
>> +                                              unsigned long gfn,
>> +                                              unsigned long mfn,
>> +                                              unsigned int order,
>> +                                              unsigned int flags)
>>  {
>>      struct domain_iommu *hd = dom_iommu(d);
>> -    struct dma_pte *page = NULL, *pte = NULL, old, new = { 0 };
>> -    u64 pg_maddr;
>>      int rc = 0;
>> +    unsigned long orig_gfn = gfn;
>> +    unsigned long i;
>>
>>      /* Do nothing if VT-d shares EPT page table */
>>      if ( iommu_use_hap_pt(d) )
>> @@ -1768,78 +1777,60 @@ static int __must_check intel_iommu_map_page(struct domain *d,
>>      if ( iommu_passthrough && is_hardware_domain(d) )
>>          return 0;
>>
>> -    spin_lock(&hd->arch.mapping_lock);
>> -
>> -    pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K, 1);
>> -    if ( pg_maddr == 0 )
>> +    for ( i = 0; i < (1UL << order); i++, gfn++, mfn++ )
>>      {
>> -        spin_unlock(&hd->arch.mapping_lock);
>> -        return -ENOMEM;
>> -    }
>> -    page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
>> -    pte = page + (gfn & LEVEL_MASK);
>> -    old = *pte;
>> -    dma_set_pte_addr(new, (paddr_t)mfn << PAGE_SHIFT_4K);
>> -    dma_set_pte_prot(new,
>> -                     ((flags & IOMMUF_readable) ? DMA_PTE_READ  : 0) |
>> -                     ((flags & IOMMUF_writable) ? DMA_PTE_WRITE : 0));
>> +        struct dma_pte *page = NULL, *pte = NULL, old, new = { 0 };
>> +        u64 pg_maddr;
>>
>> -    /* Set the SNP on leaf page table if Snoop Control available */
>> -    if ( iommu_snoop )
>> -        dma_set_pte_snp(new);
>> +        spin_lock(&hd->arch.mapping_lock);
>>
>> -    if ( old.val == new.val )
>> -    {
>> +        pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K, 1);
>> +        if ( pg_maddr == 0 )
>> +        {
>> +            spin_unlock(&hd->arch.mapping_lock);
>> +            rc = -ENOMEM;
>> +            goto err;
>> +        }
>> +        page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
>> +        pte = page + (gfn & LEVEL_MASK);
>> +        old = *pte;
>> +        dma_set_pte_addr(new, (paddr_t)mfn << PAGE_SHIFT_4K);
>> +        dma_set_pte_prot(new,
>> +                         ((flags & IOMMUF_readable) ? DMA_PTE_READ  : 0) |
>> +                         ((flags & IOMMUF_writable) ? DMA_PTE_WRITE : 0));
>> +
>> +        /* Set the SNP on leaf page table if Snoop Control available */
>> +        if ( iommu_snoop )
>> +            dma_set_pte_snp(new);
>> +
>> +        if ( old.val == new.val )
>> +        {
>> +            spin_unlock(&hd->arch.mapping_lock);
>> +            unmap_vtd_domain_page(page);
>> +            continue;
>> +        }
>> +        *pte = new;
>> +
>> +        iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
>>          spin_unlock(&hd->arch.mapping_lock);
>>          unmap_vtd_domain_page(page);
>> -        return 0;
>> -    }
>> -    *pte = new;
>> -
>> -    iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
>> -    spin_unlock(&hd->arch.mapping_lock);
>> -    unmap_vtd_domain_page(page);
>>
>> -    if ( !this_cpu(iommu_dont_flush_iotlb) )
>> -        rc = iommu_flush_iotlb(d, gfn, dma_pte_present(old), 1);
>> -
>> -    return rc;
>> -}
>> -
>> -static int __must_check intel_iommu_unmap_page(struct domain *d,
>> -                                               unsigned long gfn)
>> -{
>> -    /* Do nothing if hardware domain and iommu supports pass thru. */
>> -    if ( iommu_passthrough && is_hardware_domain(d) )
>> -        return 0;
>> -
>> -    return dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
>> -}
>> -
>> -/* TODO: Optimize by squashing map_pages/unmap_pages with map_page/unmap_page */
>> -static int __must_check intel_iommu_map_pages(struct domain *d,
>> -                                              unsigned long gfn,
>> -                                              unsigned long mfn,
>> -                                              unsigned int order,
>> -                                              unsigned int flags)
>> -{
>> -    unsigned long i;
>> -    int rc = 0;
>> -
>> -    for ( i = 0; i < (1UL << order); i++ )
>> -    {
>> -        rc = intel_iommu_map_page(d, gfn + i, mfn + i, flags);
>> -        if ( unlikely(rc) )
>> +        if ( !this_cpu(iommu_dont_flush_iotlb) )
>>          {
>> -            while ( i-- )
>> -                /* If statement to satisfy __must_check. */
>> -                if ( intel_iommu_unmap_page(d, gfn + i) )
>> -                    continue;
>> -
>> -            break;
>> +            rc = iommu_flush_iotlb(d, gfn, dma_pte_present(old), 1);
>> +            if ( rc )
>> +                goto err;
>>          }
>>      }
>>
>> +    return 0;
>> +
>> +err:
>> +    while ( i-- )
>> +        /* If statement to satisfy __must_check. */
>> +        if ( intel_iommu_unmap_pages(d, orig_gfn + i, 0) )
>> +            continue;
>> +
>>      return rc;
>>  }
>>
>> @@ -1847,12 +1838,17 @@ static int __must_check intel_iommu_unmap_pages(struct domain *d,
>>                                                  unsigned long gfn,
>>                                                  unsigned int order)
>>  {
>> -    unsigned long i;
>>      int rc = 0;
>> +    unsigned long i;
>> +
>> +    /* Do nothing if hardware domain and iommu supports pass thru. */
>> +    if ( iommu_passthrough && is_hardware_domain(d) )
>> +        return 0;
>>
>> -    for ( i = 0; i < (1UL << order); i++ )
>> +    for ( i = 0; i < (1UL << order); i++, gfn++ )
>>      {
>> -        int ret = intel_iommu_unmap_page(d, gfn + i);
>> +        int ret = dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
>> +
>>          if ( !rc )
>>              rc = ret;
>>      }
>> --
>> 2.7.4
>>
>
>
>
> --
> Regards,
>
> Oleksandr Tyshchenko



-- 
Regards,

Oleksandr Tyshchenko

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  reply	other threads:[~2017-09-12 14:44 UTC|newest]

Thread overview: 62+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-07-25 17:26 [PATCH v2 00/13] "Non-shared" IOMMU support on ARM Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 01/13] xen/device-tree: Add dt_count_phandle_with_args helper Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 02/13] iommu: Add extra order argument to the IOMMU APIs and platform callbacks Oleksandr Tyshchenko
2017-08-03 11:21   ` Julien Grall
2017-08-03 12:32     ` Oleksandr Tyshchenko
2017-08-21 16:20       ` Oleksandr Tyshchenko
2017-08-22  7:21         ` Jan Beulich
2017-08-22 10:28           ` Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 03/13] xen/arm: p2m: Add helper to convert p2m type to IOMMU flags Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 04/13] xen/arm: p2m: Update IOMMU mapping whenever possible if page table is not shared Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 05/13] iommu/arm: Re-define iommu_use_hap_pt(d) as iommu_hap_pt_share Oleksandr Tyshchenko
2017-08-03 11:23   ` Julien Grall
2017-08-03 12:33     ` Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 06/13] iommu: Add extra use_iommu argument to iommu_domain_init() Oleksandr Tyshchenko
2017-08-21 16:29   ` Oleksandr Tyshchenko
2017-12-06 16:51   ` Jan Beulich
2017-12-06 19:53     ` Oleksandr Tyshchenko
2017-12-06 22:49       ` Julien Grall
2017-12-07 12:08         ` Oleksandr Tyshchenko
2017-12-07 12:51           ` Jan Beulich
2017-07-25 17:26 ` [PATCH v2 07/13] iommu: Make decision about needing IOMMU for hardware domains in advance Oleksandr Tyshchenko
2017-08-21 16:30   ` Oleksandr Tyshchenko
2017-12-06 17:01   ` Jan Beulich
2017-12-06 19:23     ` Oleksandr Tyshchenko
2017-12-07  8:57       ` Jan Beulich
2017-12-07 13:50         ` Oleksandr Tyshchenko
2017-12-07 13:57           ` Jan Beulich
2017-12-08 12:28             ` Oleksandr Tyshchenko
2018-01-18 12:09   ` Roger Pau Monné
2018-01-18 14:50     ` Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 08/13] iommu/arm: Misc fixes for arch specific part Oleksandr Tyshchenko
2017-08-03 11:31   ` Julien Grall
2017-08-03 12:34     ` Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 09/13] xen/arm: Add use_iommu flag to xen_arch_domainconfig Oleksandr Tyshchenko
2017-07-28 16:16   ` Wei Liu
2017-07-28 16:30     ` Oleksandr Tyshchenko
2017-08-03 11:33   ` Julien Grall
2017-08-03 12:31     ` Oleksandr Tyshchenko
2017-08-03 12:35       ` Julien Grall
2017-07-25 17:26 ` [PATCH v2 10/13] xen/arm: domain_build: Don't expose IOMMU specific properties to the guest Oleksandr Tyshchenko
2017-08-03 11:37   ` Julien Grall
2017-08-03 13:24     ` Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 11/13] iommu/arm: smmu: Squash map_pages/unmap_pages with map_page/unmap_page Oleksandr Tyshchenko
2017-08-03 12:36   ` Julien Grall
2017-08-03 13:26     ` Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 12/13] [RFC] iommu: VT-d: " Oleksandr Tyshchenko
2017-08-21 16:44   ` Oleksandr Tyshchenko
2017-09-12 14:44     ` Oleksandr Tyshchenko [this message]
2017-09-20  8:54       ` Tian, Kevin
2017-09-20 18:23         ` Oleksandr Tyshchenko
2017-07-25 17:26 ` [PATCH v2 13/13] [RFC] iommu: AMD-Vi: " Oleksandr Tyshchenko
2017-08-21 16:44   ` Oleksandr Tyshchenko
2017-09-12 14:45     ` Oleksandr Tyshchenko
2017-07-31  5:57 ` [PATCH v2 00/13] "Non-shared" IOMMU support on ARM Tian, Kevin
2017-07-31 11:57   ` Oleksandr Tyshchenko
2017-08-01  3:06     ` Tian, Kevin
2017-08-01 11:08       ` Oleksandr Tyshchenko
2017-08-02  6:12         ` Tian, Kevin
2017-08-02 17:47           ` Oleksandr Tyshchenko
2017-08-01 18:09       ` Julien Grall
2017-08-01 18:20         ` Oleksandr Tyshchenko
2017-08-01 17:56   ` Julien Grall

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CAPD2p-=W4-gWvTsJJZ+QE7gzwF-_hkzmAxD3wh7BE5LYAzL_Kw@mail.gmail.com' \
    --to=olekstysh@gmail.com \
    --cc=jbeulich@suse.com \
    --cc=kevin.tian@intel.com \
    --cc=oleksandr_tyshchenko@epam.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.