xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Stefano Stabellini <sstabellini@kernel.org>
To: Oleksandr <olekstysh@gmail.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>,
	 xen-devel@lists.xenproject.org, linux-kernel@vger.kernel.org,
	 Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>,
	 Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	 Juergen Gross <jgross@suse.com>, Julien Grall <julien@xen.org>
Subject: Re: [RFC PATCH 1/2] xen/unpopulated-alloc: Introduce helpers for DMA allocations
Date: Fri, 10 Jun 2022 17:12:12 -0700 (PDT)	[thread overview]
Message-ID: <alpine.DEB.2.22.394.2206101709210.756493@ubuntu-linux-20-04-desktop> (raw)
In-Reply-To: <00c14b91-4cf2-179c-749d-593db853e42e@gmail.com>

[-- Attachment #1: Type: text/plain, Size: 11699 bytes --]

On Wed, 8 Jun 2022, Oleksandr wrote:
> 2. Drop the "page_list" entirely and use "dma_pool" for all (contiguous and
> non-contiguous) allocations. After all, all pages are initially contiguous in
> fill_list() as they are built from the resource. This changes behavior for all
> users of xen_alloc_unpopulated_pages()
> 
> Below the diff for unpopulated-alloc.c. The patch is also available at:
> 
> https://github.com/otyshchenko1/linux/commit/7be569f113a4acbdc4bcb9b20cb3995b3151387a
> 
> 
> diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
> index a39f2d3..ab5c7bd 100644
> --- a/drivers/xen/unpopulated-alloc.c
> +++ b/drivers/xen/unpopulated-alloc.c
> @@ -1,5 +1,7 @@
>  // SPDX-License-Identifier: GPL-2.0
> +#include <linux/dma-mapping.h>
>  #include <linux/errno.h>
> +#include <linux/genalloc.h>
>  #include <linux/gfp.h>
>  #include <linux/kernel.h>
>  #include <linux/mm.h>
> @@ -13,8 +15,8 @@
>  #include <xen/xen.h>
> 
>  static DEFINE_MUTEX(list_lock);
> -static struct page *page_list;
> -static unsigned int list_count;
> +
> +static struct gen_pool *dma_pool;
> 
>  static struct resource *target_resource;
> 
> @@ -36,7 +38,7 @@ static int fill_list(unsigned int nr_pages)
>         struct dev_pagemap *pgmap;
>         struct resource *res, *tmp_res = NULL;
>         void *vaddr;
> -       unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
> +       unsigned int alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
>         struct range mhp_range;
>         int ret;
> 
> @@ -106,6 +108,7 @@ static int fill_list(unsigned int nr_pages)
>           * conflict with any devices.
>           */
>         if (!xen_feature(XENFEAT_auto_translated_physmap)) {
> +               unsigned int i;
>                 xen_pfn_t pfn = PFN_DOWN(res->start);
> 
>                 for (i = 0; i < alloc_pages; i++) {
> @@ -125,16 +128,17 @@ static int fill_list(unsigned int nr_pages)
>                 goto err_memremap;
>         }
> 
> -       for (i = 0; i < alloc_pages; i++) {
> -               struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
> -
> -               pg->zone_device_data = page_list;
> -               page_list = pg;
> -               list_count++;
> +       ret = gen_pool_add_virt(dma_pool, (unsigned long)vaddr, res->start,
> +                       alloc_pages * PAGE_SIZE, NUMA_NO_NODE);
> +       if (ret) {
> +               pr_err("Cannot add memory range to the pool\n");
> +               goto err_pool;
>         }
> 
>         return 0;
> 
> +err_pool:
> +       memunmap_pages(pgmap);
>  err_memremap:
>         kfree(pgmap);
>  err_pgmap:
> @@ -149,51 +153,49 @@ static int fill_list(unsigned int nr_pages)
>         return ret;
>  }
> 
> -/**
> - * xen_alloc_unpopulated_pages - alloc unpopulated pages
> - * @nr_pages: Number of pages
> - * @pages: pages returned
> - * @return 0 on success, error otherwise
> - */
> -int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
> +static int alloc_unpopulated_pages(unsigned int nr_pages, struct page
> **pages,
> +               bool contiguous)
>  {
>         unsigned int i;
>         int ret = 0;
> +       void *vaddr;
> +       bool filled = false;
> 
>         /*
>          * Fallback to default behavior if we do not have any suitable
> resource
>          * to allocate required region from and as the result we won't be able
> to
>          * construct pages.
>          */
> -       if (!target_resource)
> +       if (!target_resource) {
> +               if (contiguous)
> +                       return -ENODEV;
> +
>                 return xen_alloc_ballooned_pages(nr_pages, pages);
> +       }
> 
>         mutex_lock(&list_lock);
> -       if (list_count < nr_pages) {
> -               ret = fill_list(nr_pages - list_count);
> +
> +       while (!(vaddr = (void *)gen_pool_alloc(dma_pool, nr_pages *
> PAGE_SIZE))) {
> +               if (filled)
> +                       ret = -ENOMEM;
> +               else {
> +                       ret = fill_list(nr_pages);
> +                       filled = true;
> +               }
>                 if (ret)
>                         goto out;
>         }
> 
>         for (i = 0; i < nr_pages; i++) {
> -               struct page *pg = page_list;
> -
> -               BUG_ON(!pg);
> -               page_list = pg->zone_device_data;
> -               list_count--;
> -               pages[i] = pg;
> +               pages[i] = virt_to_page(vaddr + PAGE_SIZE * i);
> 
>  #ifdef CONFIG_XEN_HAVE_PVMMU
>                 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
> -                       ret = xen_alloc_p2m_entry(page_to_pfn(pg));
> +                       ret = xen_alloc_p2m_entry(page_to_pfn(pages[i]));
>                         if (ret < 0) {
> -                               unsigned int j;
> -
> -                               for (j = 0; j <= i; j++) {
> - pages[j]->zone_device_data = page_list;
> -                                       page_list = pages[j];
> -                                       list_count++;
> -                               }
> +                               /* XXX Do we need to zeroed pages[i]? */
> +                               gen_pool_free(dma_pool, (unsigned long)vaddr,
> +                                               nr_pages * PAGE_SIZE);
>                                 goto out;
>                         }
>                 }
> @@ -204,32 +206,89 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages,
> struct page **pages)
>         mutex_unlock(&list_lock);
>         return ret;
>  }
> -EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
> 
> -/**
> - * xen_free_unpopulated_pages - return unpopulated pages
> - * @nr_pages: Number of pages
> - * @pages: pages to return
> - */
> -void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
> +static void free_unpopulated_pages(unsigned int nr_pages, struct page
> **pages,
> +               bool contiguous)
>  {
> -       unsigned int i;
> -
>         if (!target_resource) {
> +               if (contiguous)
> +                       return;
> +
>                 xen_free_ballooned_pages(nr_pages, pages);
>                 return;
>         }
> 
>         mutex_lock(&list_lock);
> -       for (i = 0; i < nr_pages; i++) {
> -               pages[i]->zone_device_data = page_list;
> -               page_list = pages[i];
> -               list_count++;
> +
> +       /* XXX Do we need to check the range (gen_pool_has_addr)? */
> +       if (contiguous)
> +               gen_pool_free(dma_pool, (unsigned long)page_to_virt(pages[0]),
> +                               nr_pages * PAGE_SIZE);
> +       else {
> +               unsigned int i;
> +
> +               for (i = 0; i < nr_pages; i++)
> +                       gen_pool_free(dma_pool, (unsigned
> long)page_to_virt(pages[i]),
> +                                       PAGE_SIZE);
>         }
> +
>         mutex_unlock(&list_lock);
>  }
> +
> +/**
> + * xen_alloc_unpopulated_pages - alloc unpopulated pages
> + * @nr_pages: Number of pages
> + * @pages: pages returned
> + * @return 0 on success, error otherwise
> + */
> +int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
> +{
> +       return alloc_unpopulated_pages(nr_pages, pages, false);
> +}
> +EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
> +
> +/**
> + * xen_free_unpopulated_pages - return unpopulated pages
> + * @nr_pages: Number of pages
> + * @pages: pages to return
> + */
> +void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
> +{
> +       free_unpopulated_pages(nr_pages, pages, false);
> +}
>  EXPORT_SYMBOL(xen_free_unpopulated_pages);
> 
> +/**
> + * xen_alloc_unpopulated_dma_pages - alloc unpopulated DMAable pages
> + * @dev: valid struct device pointer
> + * @nr_pages: Number of pages
> + * @pages: pages returned
> + * @return 0 on success, error otherwise
> + */
> +int xen_alloc_unpopulated_dma_pages(struct device *dev, unsigned int
> nr_pages,
> +               struct page **pages)
> +{
> +       /* XXX Handle devices which support 64-bit DMA address only for now */
> +       if (dma_get_mask(dev) != DMA_BIT_MASK(64))
> +               return -EINVAL;
> +
> +       return alloc_unpopulated_pages(nr_pages, pages, true);
> +}
> +EXPORT_SYMBOL(xen_alloc_unpopulated_dma_pages);
> +
> +/**
> + * xen_free_unpopulated_dma_pages - return unpopulated DMAable pages
> + * @dev: valid struct device pointer
> + * @nr_pages: Number of pages
> + * @pages: pages to return
> + */
> +void xen_free_unpopulated_dma_pages(struct device *dev, unsigned int
> nr_pages,
> +               struct page **pages)
> +{
> +       free_unpopulated_pages(nr_pages, pages, true);
> +}
> +EXPORT_SYMBOL(xen_free_unpopulated_dma_pages);
> +
>  static int __init unpopulated_init(void)
>  {
>         int ret;
> @@ -237,9 +296,19 @@ static int __init unpopulated_init(void)
>         if (!xen_domain())
>                 return -ENODEV;
> 
> +       dma_pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
> +       if (!dma_pool) {
> +               pr_err("xen:unpopulated: Cannot create DMA pool\n");
> +               return -ENOMEM;
> +       }
> +
> +       gen_pool_set_algo(dma_pool, gen_pool_best_fit, NULL);
> +
>         ret = arch_xen_unpopulated_init(&target_resource);
>         if (ret) {
>                 pr_err("xen:unpopulated: Cannot initialize target
> resource\n");
> +               gen_pool_destroy(dma_pool);
> +               dma_pool = NULL;
>                 target_resource = NULL;
>         }
> 
> [snip]
> 
> 
> I think, regarding on the approach we would likely need to do some renaming
> for fill_list, page_list, list_lock, etc.
> 
> 
> Both options work in my Arm64 based environment, not sure regarding x86.
> Or do we have another option here?
> I would be happy to go any route. What do you think?

The second option (use "dma_pool" for all) looks great, thank you for
looking into it!

  reply	other threads:[~2022-06-11  0:12 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-05-17 18:04 [RFC PATCH 0/2] Ability to allocate DMAable pages using unpopulated-alloc Oleksandr Tyshchenko
2022-05-17 18:04 ` [RFC PATCH 1/2] xen/unpopulated-alloc: Introduce helpers for DMA allocations Oleksandr Tyshchenko
2022-06-03 21:52   ` Stefano Stabellini
2022-06-08 18:12     ` Oleksandr
2022-06-11  0:12       ` Stefano Stabellini [this message]
2022-06-14 17:37         ` Oleksandr
2022-06-15  0:45           ` Stefano Stabellini
2022-06-15 16:56             ` Oleksandr
2022-06-16  8:49             ` Roger Pau Monné
2022-05-17 18:04 ` [RFC PATCH 2/2] xen/grant-table: Use unpopulated DMAable pages instead of real RAM ones Oleksandr Tyshchenko
2022-06-03 21:19   ` Stefano Stabellini
2022-06-09 18:39     ` Oleksandr
2022-06-10 23:55       ` Stefano Stabellini
2022-06-14 17:53         ` Oleksandr
2022-06-15  0:51           ` Stefano Stabellini
2022-06-15 16:40             ` Oleksandr

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=alpine.DEB.2.22.394.2206101709210.756493@ubuntu-linux-20-04-desktop \
    --to=sstabellini@kernel.org \
    --cc=boris.ostrovsky@oracle.com \
    --cc=jgross@suse.com \
    --cc=julien@xen.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=oleksandr_tyshchenko@epam.com \
    --cc=olekstysh@gmail.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).