From: Christoph Hellwig <hch@lst.de> To: Dan Williams <dan.j.williams@intel.com>, Jason Gunthorpe <jgg@mellanox.com> Cc: linux-mm@kvack.org, Andrew Morton <akpm@linux-foundation.org>, linux-nvdimm@lists.01.org, linux-kernel@vger.kernel.org, Bharata B Rao <bharata@linux.ibm.com> Subject: [PATCH 4/4] memremap: provide a not device managed memremap_pages Date: Fri, 16 Aug 2019 08:54:34 +0200 [thread overview] Message-ID: <20190816065434.2129-5-hch@lst.de> (raw) In-Reply-To: <20190816065434.2129-1-hch@lst.de> The kvmppc ultravisor code wants a device private memory pool that is system wide and not attached to a device. Instead of faking up one provide a low-level memremap_pages for it. Note that this function is not exported, and doesn't have a cleanup routine associated with it to discourage use from more driver like users. Signed-off-by: Christoph Hellwig <hch@lst.de> --- include/linux/memremap.h | 2 + mm/memremap.c | 84 +++++++++++++++++++++++++--------------- 2 files changed, 54 insertions(+), 32 deletions(-) diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 8f0013e18e14..fb2a0bd826b9 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -123,6 +123,8 @@ static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) } #ifdef CONFIG_ZONE_DEVICE +void *memremap_pages(struct dev_pagemap *pgmap, int nid); +void memunmap_pages(struct dev_pagemap *pgmap); void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); struct dev_pagemap *get_dev_pagemap(unsigned long pfn, diff --git a/mm/memremap.c b/mm/memremap.c index 4e11da4ecab9..9e163fe367ae 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -102,9 +102,8 @@ static void dev_pagemap_cleanup(struct dev_pagemap *pgmap) pgmap->ref = NULL; } -static void devm_memremap_pages_release(void *data) +void memunmap_pages(struct dev_pagemap *pgmap) { - struct dev_pagemap *pgmap = data; struct resource *res = &pgmap->res; unsigned long pfn; int nid; @@ -134,6 +133,12 @@ static void devm_memremap_pages_release(void *data) WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); devmap_managed_enable_put(); } +EXPORT_SYMBOL_GPL(memunmap_pages); + +static void devm_memremap_pages_release(void *data) +{ + memunmap_pages(data); +} static void dev_pagemap_percpu_release(struct percpu_ref *ref) { @@ -143,27 +148,12 @@ static void dev_pagemap_percpu_release(struct percpu_ref *ref) complete(&pgmap->done); } -/** - * devm_memremap_pages - remap and provide memmap backing for the given resource - * @dev: hosting device for @res - * @pgmap: pointer to a struct dev_pagemap - * - * Notes: - * 1/ At a minimum the res and type members of @pgmap must be initialized - * by the caller before passing it to this function - * - * 2/ The altmap field may optionally be initialized, in which case - * PGMAP_ALTMAP_VALID must be set in pgmap->flags. - * - * 3/ The ref field may optionally be provided, in which pgmap->ref must be - * 'live' on entry and will be killed and reaped at - * devm_memremap_pages_release() time, or if this routine fails. - * - * 4/ res is expected to be a host memory range that could feasibly be - * treated as a "System RAM" range, i.e. not a device mmio range, but - * this is not enforced. +/* + * Not device managed version of dev_memremap_pages, undone by + * memunmap_pages(). Please use dev_memremap_pages if you have a struct + * device available. */ -void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) +void *memremap_pages(struct dev_pagemap *pgmap, int nid) { struct resource *res = &pgmap->res; struct dev_pagemap *conflict_pgmap; @@ -174,7 +164,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) .altmap = pgmap_altmap(pgmap), }; pgprot_t pgprot = PAGE_KERNEL; - int error, nid, is_ram; + int error, is_ram; bool need_devmap_managed = true; switch (pgmap->type) { @@ -229,7 +219,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL); if (conflict_pgmap) { - dev_WARN(dev, "Conflicting mapping in same section\n"); + WARN(1, "Conflicting mapping in same section\n"); put_dev_pagemap(conflict_pgmap); error = -ENOMEM; goto err_array; @@ -237,7 +227,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL); if (conflict_pgmap) { - dev_WARN(dev, "Conflicting mapping in same section\n"); + WARN(1, "Conflicting mapping in same section\n"); put_dev_pagemap(conflict_pgmap); error = -ENOMEM; goto err_array; @@ -258,7 +248,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) if (error) goto err_array; - nid = dev_to_node(dev); if (nid < 0) nid = numa_mem_id(); @@ -314,12 +303,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) PHYS_PFN(res->start), PHYS_PFN(resource_size(res)), pgmap); percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap)); - - error = devm_add_action_or_reset(dev, devm_memremap_pages_release, - pgmap); - if (error) - return ERR_PTR(error); - return __va(res->start); err_add_memory: @@ -334,6 +317,43 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) devmap_managed_enable_put(); return ERR_PTR(error); } +EXPORT_SYMBOL_GPL(memremap_pages); + +/** + * devm_memremap_pages - remap and provide memmap backing for the given resource + * @dev: hosting device for @res + * @pgmap: pointer to a struct dev_pagemap + * + * Notes: + * 1/ At a minimum the res and type members of @pgmap must be initialized + * by the caller before passing it to this function + * + * 2/ The altmap field may optionally be initialized, in which case + * PGMAP_ALTMAP_VALID must be set in pgmap->flags. + * + * 3/ The ref field may optionally be provided, in which pgmap->ref must be + * 'live' on entry and will be killed and reaped at + * devm_memremap_pages_release() time, or if this routine fails. + * + * 4/ res is expected to be a host memory range that could feasibly be + * treated as a "System RAM" range, i.e. not a device mmio range, but + * this is not enforced. + */ +void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) +{ + int error; + void *ret; + + ret = memremap_pages(pgmap, dev_to_node(dev)); + if (IS_ERR(ret)) + return ret; + + error = devm_add_action_or_reset(dev, devm_memremap_pages_release, + pgmap); + if (error) + return ERR_PTR(error); + return ret; +} EXPORT_SYMBOL_GPL(devm_memremap_pages); void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) -- 2.20.1 _______________________________________________ Linux-nvdimm mailing list Linux-nvdimm@lists.01.org https://lists.01.org/mailman/listinfo/linux-nvdimm
WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de> To: Dan Williams <dan.j.williams@intel.com>, Jason Gunthorpe <jgg@mellanox.com> Cc: Bharata B Rao <bharata@linux.ibm.com>, Andrew Morton <akpm@linux-foundation.org>, linux-mm@kvack.org, linux-kernel@vger.kernel.org, linux-nvdimm@lists.01.org Subject: [PATCH 4/4] memremap: provide a not device managed memremap_pages Date: Fri, 16 Aug 2019 08:54:34 +0200 [thread overview] Message-ID: <20190816065434.2129-5-hch@lst.de> (raw) In-Reply-To: <20190816065434.2129-1-hch@lst.de> The kvmppc ultravisor code wants a device private memory pool that is system wide and not attached to a device. Instead of faking up one provide a low-level memremap_pages for it. Note that this function is not exported, and doesn't have a cleanup routine associated with it to discourage use from more driver like users. Signed-off-by: Christoph Hellwig <hch@lst.de> --- include/linux/memremap.h | 2 + mm/memremap.c | 84 +++++++++++++++++++++++++--------------- 2 files changed, 54 insertions(+), 32 deletions(-) diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 8f0013e18e14..fb2a0bd826b9 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -123,6 +123,8 @@ static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) } #ifdef CONFIG_ZONE_DEVICE +void *memremap_pages(struct dev_pagemap *pgmap, int nid); +void memunmap_pages(struct dev_pagemap *pgmap); void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); struct dev_pagemap *get_dev_pagemap(unsigned long pfn, diff --git a/mm/memremap.c b/mm/memremap.c index 4e11da4ecab9..9e163fe367ae 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -102,9 +102,8 @@ static void dev_pagemap_cleanup(struct dev_pagemap *pgmap) pgmap->ref = NULL; } -static void devm_memremap_pages_release(void *data) +void memunmap_pages(struct dev_pagemap *pgmap) { - struct dev_pagemap *pgmap = data; struct resource *res = &pgmap->res; unsigned long pfn; int nid; @@ -134,6 +133,12 @@ static void devm_memremap_pages_release(void *data) WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); devmap_managed_enable_put(); } +EXPORT_SYMBOL_GPL(memunmap_pages); + +static void devm_memremap_pages_release(void *data) +{ + memunmap_pages(data); +} static void dev_pagemap_percpu_release(struct percpu_ref *ref) { @@ -143,27 +148,12 @@ static void dev_pagemap_percpu_release(struct percpu_ref *ref) complete(&pgmap->done); } -/** - * devm_memremap_pages - remap and provide memmap backing for the given resource - * @dev: hosting device for @res - * @pgmap: pointer to a struct dev_pagemap - * - * Notes: - * 1/ At a minimum the res and type members of @pgmap must be initialized - * by the caller before passing it to this function - * - * 2/ The altmap field may optionally be initialized, in which case - * PGMAP_ALTMAP_VALID must be set in pgmap->flags. - * - * 3/ The ref field may optionally be provided, in which pgmap->ref must be - * 'live' on entry and will be killed and reaped at - * devm_memremap_pages_release() time, or if this routine fails. - * - * 4/ res is expected to be a host memory range that could feasibly be - * treated as a "System RAM" range, i.e. not a device mmio range, but - * this is not enforced. +/* + * Not device managed version of dev_memremap_pages, undone by + * memunmap_pages(). Please use dev_memremap_pages if you have a struct + * device available. */ -void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) +void *memremap_pages(struct dev_pagemap *pgmap, int nid) { struct resource *res = &pgmap->res; struct dev_pagemap *conflict_pgmap; @@ -174,7 +164,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) .altmap = pgmap_altmap(pgmap), }; pgprot_t pgprot = PAGE_KERNEL; - int error, nid, is_ram; + int error, is_ram; bool need_devmap_managed = true; switch (pgmap->type) { @@ -229,7 +219,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL); if (conflict_pgmap) { - dev_WARN(dev, "Conflicting mapping in same section\n"); + WARN(1, "Conflicting mapping in same section\n"); put_dev_pagemap(conflict_pgmap); error = -ENOMEM; goto err_array; @@ -237,7 +227,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL); if (conflict_pgmap) { - dev_WARN(dev, "Conflicting mapping in same section\n"); + WARN(1, "Conflicting mapping in same section\n"); put_dev_pagemap(conflict_pgmap); error = -ENOMEM; goto err_array; @@ -258,7 +248,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) if (error) goto err_array; - nid = dev_to_node(dev); if (nid < 0) nid = numa_mem_id(); @@ -314,12 +303,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) PHYS_PFN(res->start), PHYS_PFN(resource_size(res)), pgmap); percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap)); - - error = devm_add_action_or_reset(dev, devm_memremap_pages_release, - pgmap); - if (error) - return ERR_PTR(error); - return __va(res->start); err_add_memory: @@ -334,6 +317,43 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) devmap_managed_enable_put(); return ERR_PTR(error); } +EXPORT_SYMBOL_GPL(memremap_pages); + +/** + * devm_memremap_pages - remap and provide memmap backing for the given resource + * @dev: hosting device for @res + * @pgmap: pointer to a struct dev_pagemap + * + * Notes: + * 1/ At a minimum the res and type members of @pgmap must be initialized + * by the caller before passing it to this function + * + * 2/ The altmap field may optionally be initialized, in which case + * PGMAP_ALTMAP_VALID must be set in pgmap->flags. + * + * 3/ The ref field may optionally be provided, in which pgmap->ref must be + * 'live' on entry and will be killed and reaped at + * devm_memremap_pages_release() time, or if this routine fails. + * + * 4/ res is expected to be a host memory range that could feasibly be + * treated as a "System RAM" range, i.e. not a device mmio range, but + * this is not enforced. + */ +void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) +{ + int error; + void *ret; + + ret = memremap_pages(pgmap, dev_to_node(dev)); + if (IS_ERR(ret)) + return ret; + + error = devm_add_action_or_reset(dev, devm_memremap_pages_release, + pgmap); + if (error) + return ERR_PTR(error); + return ret; +} EXPORT_SYMBOL_GPL(devm_memremap_pages); void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) -- 2.20.1
next prev parent reply other threads:[~2019-08-16 6:56 UTC|newest] Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top 2019-08-16 6:54 add a not device managed memremap_pages v2 Christoph Hellwig 2019-08-16 6:54 ` Christoph Hellwig 2019-08-16 6:54 ` [PATCH 1/4] resource: add a not device managed request_free_mem_region variant Christoph Hellwig 2019-08-16 6:54 ` Christoph Hellwig 2019-08-16 21:01 ` Andrew Morton 2019-08-16 21:01 ` Andrew Morton 2019-08-18 9:03 ` Christoph Hellwig 2019-08-18 9:03 ` Christoph Hellwig 2019-08-16 6:54 ` [PATCH 2/4] memremap: remove the dev field in struct dev_pagemap Christoph Hellwig 2019-08-16 6:54 ` Christoph Hellwig 2019-08-16 6:54 ` [PATCH 3/4] memremap: don't use a separate devm action for devmap_managed_enable_get Christoph Hellwig 2019-08-16 6:54 ` Christoph Hellwig 2019-08-16 6:54 ` Christoph Hellwig [this message] 2019-08-16 6:54 ` [PATCH 4/4] memremap: provide a not device managed memremap_pages Christoph Hellwig 2019-08-16 21:00 ` Andrew Morton 2019-08-16 21:00 ` Andrew Morton 2019-08-18 9:04 ` Christoph Hellwig 2019-08-18 9:04 ` Christoph Hellwig 2019-08-16 10:48 ` add a not device managed memremap_pages v2 Bharata B Rao 2019-08-16 12:34 ` Jason Gunthorpe 2019-08-16 12:36 ` Christoph Hellwig 2019-08-16 12:40 ` Jason Gunthorpe 2019-08-16 12:40 ` Jason Gunthorpe 2019-08-16 23:59 ` Ira Weiny 2019-08-16 23:59 ` Ira Weiny 2019-08-18 9:05 add a not device managed memremap_pages v3 Christoph Hellwig 2019-08-18 9:05 ` [PATCH 4/4] memremap: provide a not device managed memremap_pages Christoph Hellwig 2019-08-18 9:05 ` Christoph Hellwig 2019-08-20 2:24 ` Dan Williams 2019-08-20 2:24 ` Dan Williams 2019-08-20 2:24 ` Dan Williams
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20190816065434.2129-5-hch@lst.de \ --to=hch@lst.de \ --cc=akpm@linux-foundation.org \ --cc=bharata@linux.ibm.com \ --cc=dan.j.williams@intel.com \ --cc=jgg@mellanox.com \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=linux-nvdimm@lists.01.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.