linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Alistair Popple <apopple@nvidia.com>
To: Balbir Singh <bsingharora@gmail.com>
Cc: <linux-kernel@vger.kernel.org>, <akpm@linux-foundation.org>,
	<david@redhat.com>, <daniel.vetter@ffwll.ch>,
	<dan.j.williams@intel.com>, <gregkh@linuxfoundation.org>,
	<jhubbard@nvidia.com>, <jglisse@redhat.com>, <linux-mm@kvack.org>
Subject: Re: [PATCH v2] kernel/resource: Fix locking in request_free_mem_region
Date: Mon, 29 Mar 2021 12:55:15 +1100	[thread overview]
Message-ID: <2574877.iCKU5I5uxK@nvdebian> (raw)
In-Reply-To: <20210326051536.GN77072@balbir-desktop>

On Friday, 26 March 2021 4:15:36 PM AEDT Balbir Singh wrote:
> On Fri, Mar 26, 2021 at 12:20:35PM +1100, Alistair Popple wrote:
> > +static int __region_intersects(resource_size_t start, size_t size,
> > +                            unsigned long flags, unsigned long desc)
> > +{
> > +     struct resource res;
> > +     int type = 0; int other = 0;
> > +     struct resource *p;
> > +
> > +     res.start = start;
> > +     res.end = start + size - 1;
> > +
> > +     for (p = iomem_resource.child; p ; p = p->sibling) {
> > +             bool is_type = (((p->flags & flags) == flags) &&
> > +                             ((desc == IORES_DESC_NONE) ||
> > +                              (desc == p->desc)));
> 
> is_type is a bad name, are we saying "is" as in boolean question?
> Or is it short for something like intersection_type? I know you've
> just moved the code over :)

Yeah, I'm not a fan of that name either but I was just moving code over and 
couldn't come up with anything better :)

It is a boolean question though - it is checking to see if resource *p is the 
same type (flags+desc) of region as what is being checked for intersection.
 
> > +
> > +             if (resource_overlaps(p, &res))
> > +                     is_type ? type++ : other++;
> > +     }
> > +
> > +     if (type == 0)
> > +             return REGION_DISJOINT;
> > +
> > +     if (other == 0)
> > +             return REGION_INTERSECTS;
> > +
> > +     return REGION_MIXED;
> > +}
> > +
> >  /**
> >   * region_intersects() - determine intersection of region with known 
resources
> >   * @start: region start address
> > @@ -546,31 +574,12 @@ EXPORT_SYMBOL_GPL(page_is_ram);
> >  int region_intersects(resource_size_t start, size_t size, unsigned long 
flags,
> >                     unsigned long desc)
> >  {
> > -     struct resource res;
> > -     int type = 0; int other = 0;
> > -     struct resource *p;
> > -
> > -     res.start = start;
> > -     res.end = start + size - 1;
> > +     int rc;
> >
> >       read_lock(&resource_lock);
> > -     for (p = iomem_resource.child; p ; p = p->sibling) {
> > -             bool is_type = (((p->flags & flags) == flags) &&
> > -                             ((desc == IORES_DESC_NONE) ||
> > -                              (desc == p->desc)));
> > -
> > -             if (resource_overlaps(p, &res))
> > -                     is_type ? type++ : other++;
> > -     }
> > +     rc = __region_intersects(start, size, flags, desc);
> >       read_unlock(&resource_lock);
> > -
> > -     if (type == 0)
> > -             return REGION_DISJOINT;
> > -
> > -     if (other == 0)
> > -             return REGION_INTERSECTS;
> > -
> > -     return REGION_MIXED;
> > +     return rc;
> >  }
> >  EXPORT_SYMBOL_GPL(region_intersects);
> >
> > @@ -1171,31 +1180,17 @@ struct address_space *iomem_get_mapping(void)
> >       return smp_load_acquire(&iomem_inode)->i_mapping;
> >  }
> >
> > -/**
> > - * __request_region - create a new busy resource region
> > - * @parent: parent resource descriptor
> > - * @start: resource start address
> > - * @n: resource region size
> > - * @name: reserving caller's ID string
> > - * @flags: IO resource flags
> > - */
> > -struct resource * __request_region(struct resource *parent,
> > -                                resource_size_t start, resource_size_t n,
> > -                                const char *name, int flags)
> > +static bool request_region_locked(struct resource *parent,
> > +                                 struct resource *res, resource_size_t 
start,
> > +                                 resource_size_t n, const char *name, int 
flags)
> >  {
> > -     DECLARE_WAITQUEUE(wait, current);
> > -     struct resource *res = alloc_resource(GFP_KERNEL);
> >       struct resource *orig_parent = parent;
> > -
> > -     if (!res)
> > -             return NULL;
> > +     DECLARE_WAITQUEUE(wait, current);
> 
> This part of the diff looks confusing, do we have a waitqueue and we call
> schedule() within a function called with the lock held?

Good point. schedule() does get called but the lock is dropped first:

		if (conflict->flags & flags & IORESOURCE_MUXED) {
			add_wait_queue(&muxed_resource_wait, &wait);
			write_unlock(&resource_lock);
			set_current_state(TASK_UNINTERRUPTIBLE);
			schedule();
			remove_wait_queue(&muxed_resource_wait, &wait);
			write_lock(&resource_lock);
			continue;
		}

This isn't an issue though as it's only used for request_muxed_region() which 
isn't used for the ZONE_DEVICE allocation and by design doesn't search for 
free space. Ie. IORESOURCE_MUXED will never be set for 
request_free_mem_region().

> >
> >       res->name = name;
> >       res->start = start;
> >       res->end = start + n - 1;
> >
> > -     write_lock(&resource_lock);
> > -
> >       for (;;) {
> >               struct resource *conflict;
> >
> > @@ -1230,16 +1225,39 @@ struct resource * __request_region(struct resource 
*parent,
> >                       write_lock(&resource_lock);
> >                       continue;
> >               }
> > -             /* Uhhuh, that didn't work out.. */
> > -             free_resource(res);
> > -             res = NULL;
> > -             break;
> > +             return false;
> >       }
> > -     write_unlock(&resource_lock);
> >
> >       if (res && orig_parent == &iomem_resource)
> >               revoke_iomem(res);
> >
> > +     return true;
> > +}
> > +
> > +/**
> > + * __request_region - create a new busy resource region
> > + * @parent: parent resource descriptor
> > + * @start: resource start address
> > + * @n: resource region size
> > + * @name: reserving caller's ID string
> > + * @flags: IO resource flags
> > + */
> > +struct resource *__request_region(struct resource *parent,
> > +                               resource_size_t start, resource_size_t n,
> > +                               const char *name, int flags)
> > +{
> > +     struct resource *res = alloc_resource(GFP_KERNEL);
> > +
> > +     if (!res)
> > +             return NULL;
> > +
> > +     write_lock(&resource_lock);
> > +     if (!request_region_locked(parent, res, start, n, name, flags)) {
> > +             /* Uhhuh, that didn't work out.. */
> > +             free_resource(res);
> > +             res = NULL;
> > +     }
> > +     write_unlock(&resource_lock);
> 
> Should the function be called __request_region_locked?

This is the name of original function, so this is just maintaining the 
original behaviour by taking the lock and calling the inner function 
(request_region_locked) rather than having it coded directly there.

__request_region() is rarely called directly and is mostly called via macros:

include/linux/ioport.h:#define request_region(start,n,name)             
__request_region(&ioport_resource, (start), (n), (name), 0)
include/linux/ioport.h:#define request_muxed_region(start,n,name)       
__request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
include/linux/ioport.h:#define __request_mem_region(start,n,name, excl) 
__request_region(&iomem_resource, (start), (n), (name), excl)
include/linux/ioport.h:#define request_mem_region(start,n,name) 
__request_region(&iomem_resource, (start), (n), (name), 0)

Thanks,

 - Alistair

> >       return res;
> >  }
> >  EXPORT_SYMBOL(__request_region);
> > @@ -1779,26 +1797,50 @@ static struct resource 
*__request_free_mem_region(struct device *dev,
> >  {
> >       resource_size_t end, addr;
> >       struct resource *res;
> > +     struct region_devres *dr = NULL;
> > +
> > +     res = alloc_resource(GFP_KERNEL);
> > +     if (!res)
> > +             return ERR_PTR(-ENOMEM);
> > +
> > +     if (dev) {
> > +             dr = devres_alloc(devm_region_release, sizeof(struct 
region_devres),
> > +                               GFP_KERNEL);
> > +             if (!dr) {
> > +                     free_resource(res);
> > +                     return ERR_PTR(-ENOMEM);
> > +             }
> > +     }
> >
> >       size = ALIGN(size, 1UL << PA_SECTION_SHIFT);
> >       end = min_t(unsigned long, base->end, (1UL << MAX_PHYSMEM_BITS) - 
1);
> >       addr = end - size + 1UL;
> >
> > +     write_lock(&resource_lock);
> >       for (; addr > size && addr >= base->start; addr -= size) {
> > -             if (region_intersects(addr, size, 0, IORES_DESC_NONE) !=
> > +             if (__region_intersects(addr, size, 0, IORES_DESC_NONE) !=
> >                               REGION_DISJOINT)
> >                       continue;
> >
> > -             if (dev)
> > -                     res = devm_request_mem_region(dev, addr, size, 
name);
> > -             else
> > -                     res = request_mem_region(addr, size, name);
> > -             if (!res)
> > -                     return ERR_PTR(-ENOMEM);
> > +             if (!request_region_locked(&iomem_resource, res, addr,
> > +                                                size, name, 0))
> > +                     break;
> > +
> >               res->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
> > +             if (dev) {
> > +                     dr->parent = &iomem_resource;
> > +                     dr->start = addr;
> > +                     dr->n = size;
> > +                     devres_add(dev, dr);
> > +             }
> > +
> > +             write_unlock(&resource_lock);
> >               return res;
> >       }
> >
> > +     write_unlock(&resource_lock);
> > +     free_resource(res);
> > +
> >       return ERR_PTR(-ERANGE);
> >  }
> >
> 
> Balbir Singh.






  reply	other threads:[~2021-03-29  1:55 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-26  1:20 [PATCH v2] kernel/resource: Fix locking in request_free_mem_region Alistair Popple
2021-03-26  5:15 ` Balbir Singh
2021-03-29  1:55   ` Alistair Popple [this message]
2021-03-29  5:39     ` Balbir Singh
2021-03-26  8:57 ` David Hildenbrand
2021-03-29  1:37   ` Alistair Popple
2021-03-29  9:27     ` David Hildenbrand
2021-03-30  9:13     ` David Hildenbrand
2021-03-31  6:19       ` Alistair Popple
2021-03-31  6:41         ` David Hildenbrand
2021-03-29  5:42 ` [kernel/resource] cf1e4e12c9: WARNING:possible_recursive_locking_detected kernel test robot
2021-03-29  7:53   ` Alistair Popple
2021-04-01  4:56 ` [PATCH v2] kernel/resource: Fix locking in request_free_mem_region Muchun Song
2021-04-01  5:03   ` Alistair Popple

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=2574877.iCKU5I5uxK@nvdebian \
    --to=apopple@nvidia.com \
    --cc=akpm@linux-foundation.org \
    --cc=bsingharora@gmail.com \
    --cc=dan.j.williams@intel.com \
    --cc=daniel.vetter@ffwll.ch \
    --cc=david@redhat.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=jglisse@redhat.com \
    --cc=jhubbard@nvidia.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).