All of lore.kernel.org
 help / color / mirror / Atom feed
From: Dan Williams <dan.j.williams@intel.com>
To: akpm@linux-foundation.org
Cc: linux-nvdimm@lists.01.org, linux-kernel@vger.kernel.org,
	Stephen Bates <stephen.bates@microsemi.com>,
	linux-mm@kvack.org
Subject: [PATCH 10/11] mm: enable section-unaligned devm_memremap_pages()
Date: Thu, 01 Dec 2016 14:30:40 -0800	[thread overview]
Message-ID: <148063144088.37496.13851137514859626846.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <148063138593.37496.4684424640746238765.stgit@dwillia2-desk3.amr.corp.intel.com>

Teach devm_memremap_pages() about the new sub-section capabilities of
arch_{add,remove}_memory().

Cc: Toshi Kani <toshi.kani@hpe.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Stephen Bates <stephen.bates@microsemi.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 kernel/memremap.c |   22 +++++++---------------
 1 file changed, 7 insertions(+), 15 deletions(-)

diff --git a/kernel/memremap.c b/kernel/memremap.c
index faf1b7b4114f..70b3b4e1b8b3 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -254,7 +254,6 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
 {
 	struct page_map *page_map = data;
 	struct resource *res = &page_map->res;
-	resource_size_t align_start, align_size;
 	struct dev_pagemap *pgmap = &page_map->pgmap;
 
 	if (percpu_ref_tryget_live(pgmap->ref)) {
@@ -263,10 +262,8 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
 	}
 
 	/* pages are dead and unused, undo the arch mapping */
-	align_start = res->start & PA_SECTION_MASK;
-	align_size = ALIGN(resource_size(res), PA_SECTION_SIZE);
-	arch_remove_memory(align_start, align_size);
-	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
+	arch_remove_memory(res->start, resource_size(res));
+	untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
 	pgmap_radix_release(res);
 	dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
 			"%s: failed to free all reserved pages\n", __func__);
@@ -301,17 +298,13 @@ struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
 void *devm_memremap_pages(struct device *dev, struct resource *res,
 		struct percpu_ref *ref, struct vmem_altmap *altmap)
 {
-	resource_size_t align_start, align_size, align_end;
 	unsigned long pfn, offset, order;
 	pgprot_t pgprot = PAGE_KERNEL;
 	struct dev_pagemap *pgmap;
 	struct page_map *page_map;
 	int error, nid, is_ram;
 
-	align_start = res->start & PA_SECTION_MASK;
-	align_size = ALIGN(res->start + resource_size(res), PA_SECTION_SIZE)
-		- align_start;
-	is_ram = region_intersects(align_start, align_size,
+	is_ram = region_intersects(res->start, resource_size(res),
 		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
 
 	if (is_ram == REGION_MIXED) {
@@ -344,7 +337,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
 
 	mutex_lock(&pgmap_lock);
 	error = 0;
-	align_end = align_start + align_size - 1;
 
 	/* we're storing full physical addresses in the radix */
 	BUILD_BUG_ON(sizeof(unsigned long) < sizeof(resource_size_t));
@@ -376,12 +368,12 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
 	if (nid < 0)
 		nid = numa_mem_id();
 
-	error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
-			align_size);
+	error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0,
+			resource_size(res));
 	if (error)
 		goto err_pfn_remap;
 
-	error = arch_add_memory(nid, align_start, align_size, true);
+	error = arch_add_memory(nid, res->start, resource_size(res), true);
 	if (error)
 		goto err_add_memory;
 
@@ -401,7 +393,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
 	return __va(res->start);
 
  err_add_memory:
-	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
+	untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
  err_pfn_remap:
  err_radix:
 	pgmap_radix_release(res);

_______________________________________________
Linux-nvdimm mailing list
Linux-nvdimm@lists.01.org
https://lists.01.org/mailman/listinfo/linux-nvdimm

WARNING: multiple messages have this Message-ID (diff)
From: Dan Williams <dan.j.williams@intel.com>
To: akpm@linux-foundation.org
Cc: toshi.kani@hpe.com, linux-nvdimm@ml01.01.org,
	linux-kernel@vger.kernel.org,
	Stephen Bates <stephen.bates@microsemi.com>,
	linux-mm@kvack.org, Logan Gunthorpe <logang@deltatee.com>
Subject: [PATCH 10/11] mm: enable section-unaligned devm_memremap_pages()
Date: Thu, 01 Dec 2016 14:30:40 -0800	[thread overview]
Message-ID: <148063144088.37496.13851137514859626846.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <148063138593.37496.4684424640746238765.stgit@dwillia2-desk3.amr.corp.intel.com>

Teach devm_memremap_pages() about the new sub-section capabilities of
arch_{add,remove}_memory().

Cc: Toshi Kani <toshi.kani@hpe.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Stephen Bates <stephen.bates@microsemi.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 kernel/memremap.c |   22 +++++++---------------
 1 file changed, 7 insertions(+), 15 deletions(-)

diff --git a/kernel/memremap.c b/kernel/memremap.c
index faf1b7b4114f..70b3b4e1b8b3 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -254,7 +254,6 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
 {
 	struct page_map *page_map = data;
 	struct resource *res = &page_map->res;
-	resource_size_t align_start, align_size;
 	struct dev_pagemap *pgmap = &page_map->pgmap;
 
 	if (percpu_ref_tryget_live(pgmap->ref)) {
@@ -263,10 +262,8 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
 	}
 
 	/* pages are dead and unused, undo the arch mapping */
-	align_start = res->start & PA_SECTION_MASK;
-	align_size = ALIGN(resource_size(res), PA_SECTION_SIZE);
-	arch_remove_memory(align_start, align_size);
-	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
+	arch_remove_memory(res->start, resource_size(res));
+	untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
 	pgmap_radix_release(res);
 	dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
 			"%s: failed to free all reserved pages\n", __func__);
@@ -301,17 +298,13 @@ struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
 void *devm_memremap_pages(struct device *dev, struct resource *res,
 		struct percpu_ref *ref, struct vmem_altmap *altmap)
 {
-	resource_size_t align_start, align_size, align_end;
 	unsigned long pfn, offset, order;
 	pgprot_t pgprot = PAGE_KERNEL;
 	struct dev_pagemap *pgmap;
 	struct page_map *page_map;
 	int error, nid, is_ram;
 
-	align_start = res->start & PA_SECTION_MASK;
-	align_size = ALIGN(res->start + resource_size(res), PA_SECTION_SIZE)
-		- align_start;
-	is_ram = region_intersects(align_start, align_size,
+	is_ram = region_intersects(res->start, resource_size(res),
 		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
 
 	if (is_ram == REGION_MIXED) {
@@ -344,7 +337,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
 
 	mutex_lock(&pgmap_lock);
 	error = 0;
-	align_end = align_start + align_size - 1;
 
 	/* we're storing full physical addresses in the radix */
 	BUILD_BUG_ON(sizeof(unsigned long) < sizeof(resource_size_t));
@@ -376,12 +368,12 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
 	if (nid < 0)
 		nid = numa_mem_id();
 
-	error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
-			align_size);
+	error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0,
+			resource_size(res));
 	if (error)
 		goto err_pfn_remap;
 
-	error = arch_add_memory(nid, align_start, align_size, true);
+	error = arch_add_memory(nid, res->start, resource_size(res), true);
 	if (error)
 		goto err_add_memory;
 
@@ -401,7 +393,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
 	return __va(res->start);
 
  err_add_memory:
-	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
+	untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
  err_pfn_remap:
  err_radix:
 	pgmap_radix_release(res);

WARNING: multiple messages have this Message-ID (diff)
From: Dan Williams <dan.j.williams@intel.com>
To: akpm@linux-foundation.org
Cc: toshi.kani@hpe.com, linux-nvdimm@lists.01.org,
	linux-kernel@vger.kernel.org,
	Stephen Bates <stephen.bates@microsemi.com>,
	linux-mm@kvack.org, Logan Gunthorpe <logang@deltatee.com>
Subject: [PATCH 10/11] mm: enable section-unaligned devm_memremap_pages()
Date: Thu, 01 Dec 2016 14:30:40 -0800	[thread overview]
Message-ID: <148063144088.37496.13851137514859626846.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <148063138593.37496.4684424640746238765.stgit@dwillia2-desk3.amr.corp.intel.com>

Teach devm_memremap_pages() about the new sub-section capabilities of
arch_{add,remove}_memory().

Cc: Toshi Kani <toshi.kani@hpe.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Stephen Bates <stephen.bates@microsemi.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 kernel/memremap.c |   22 +++++++---------------
 1 file changed, 7 insertions(+), 15 deletions(-)

diff --git a/kernel/memremap.c b/kernel/memremap.c
index faf1b7b4114f..70b3b4e1b8b3 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -254,7 +254,6 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
 {
 	struct page_map *page_map = data;
 	struct resource *res = &page_map->res;
-	resource_size_t align_start, align_size;
 	struct dev_pagemap *pgmap = &page_map->pgmap;
 
 	if (percpu_ref_tryget_live(pgmap->ref)) {
@@ -263,10 +262,8 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
 	}
 
 	/* pages are dead and unused, undo the arch mapping */
-	align_start = res->start & PA_SECTION_MASK;
-	align_size = ALIGN(resource_size(res), PA_SECTION_SIZE);
-	arch_remove_memory(align_start, align_size);
-	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
+	arch_remove_memory(res->start, resource_size(res));
+	untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
 	pgmap_radix_release(res);
 	dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
 			"%s: failed to free all reserved pages\n", __func__);
@@ -301,17 +298,13 @@ struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
 void *devm_memremap_pages(struct device *dev, struct resource *res,
 		struct percpu_ref *ref, struct vmem_altmap *altmap)
 {
-	resource_size_t align_start, align_size, align_end;
 	unsigned long pfn, offset, order;
 	pgprot_t pgprot = PAGE_KERNEL;
 	struct dev_pagemap *pgmap;
 	struct page_map *page_map;
 	int error, nid, is_ram;
 
-	align_start = res->start & PA_SECTION_MASK;
-	align_size = ALIGN(res->start + resource_size(res), PA_SECTION_SIZE)
-		- align_start;
-	is_ram = region_intersects(align_start, align_size,
+	is_ram = region_intersects(res->start, resource_size(res),
 		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
 
 	if (is_ram == REGION_MIXED) {
@@ -344,7 +337,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
 
 	mutex_lock(&pgmap_lock);
 	error = 0;
-	align_end = align_start + align_size - 1;
 
 	/* we're storing full physical addresses in the radix */
 	BUILD_BUG_ON(sizeof(unsigned long) < sizeof(resource_size_t));
@@ -376,12 +368,12 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
 	if (nid < 0)
 		nid = numa_mem_id();
 
-	error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
-			align_size);
+	error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0,
+			resource_size(res));
 	if (error)
 		goto err_pfn_remap;
 
-	error = arch_add_memory(nid, align_start, align_size, true);
+	error = arch_add_memory(nid, res->start, resource_size(res), true);
 	if (error)
 		goto err_add_memory;
 
@@ -401,7 +393,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
 	return __va(res->start);
 
  err_add_memory:
-	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
+	untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
  err_pfn_remap:
  err_radix:
 	pgmap_radix_release(res);

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2016-12-01 22:34 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-01 22:29 [PATCH 00/11] mm: sub-section memory hotplug support Dan Williams
2016-12-01 22:29 ` Dan Williams
2016-12-01 22:29 ` Dan Williams
2016-12-01 22:29 ` [PATCH 01/11] mm, devm_memremap_pages: use multi-order radix for ZONE_DEVICE lookups Dan Williams
2016-12-01 22:29   ` Dan Williams
2016-12-01 22:29   ` Dan Williams
2016-12-08  2:49   ` Dan Williams
2016-12-08  2:49     ` Dan Williams
2016-12-08  2:49     ` Dan Williams
2016-12-01 22:29 ` [PATCH 02/11] mm: introduce struct mem_section_usage to track partial population of a section Dan Williams
2016-12-01 22:29   ` Dan Williams
2016-12-01 22:29   ` Dan Williams
2016-12-01 22:30 ` [PATCH 03/11] mm: introduce common definitions for the size and mask " Dan Williams
2016-12-01 22:30   ` Dan Williams
2016-12-01 22:30   ` Dan Williams
2016-12-01 22:30 ` [PATCH 04/11] mm: cleanup sparse_init_one_section() return value Dan Williams
2016-12-01 22:30   ` Dan Williams
2016-12-01 22:30   ` Dan Williams
2016-12-01 22:30 ` [PATCH 05/11] mm: track active portions of a section at boot Dan Williams
2016-12-01 22:30   ` Dan Williams
2016-12-01 22:30   ` Dan Williams
2016-12-01 22:30 ` [PATCH 06/11] mm: fix register_new_memory() zone type detection Dan Williams
2016-12-01 22:30   ` Dan Williams
2016-12-01 22:30   ` Dan Williams
2016-12-01 22:30 ` [PATCH 07/11] mm: convert kmalloc_section_memmap() to populate_section_memmap() Dan Williams
2016-12-01 22:30   ` Dan Williams
2016-12-01 22:30   ` Dan Williams
2016-12-01 22:30 ` [PATCH 08/11] mm: prepare for hot-{add, remove} of sub-section ranges Dan Williams
2016-12-01 22:30   ` Dan Williams
2016-12-01 22:30   ` Dan Williams
2016-12-01 22:30 ` [PATCH 09/11] mm: support section-unaligned ZONE_DEVICE memory ranges Dan Williams
2016-12-01 22:30   ` Dan Williams
2016-12-01 22:30 ` Dan Williams [this message]
2016-12-01 22:30   ` [PATCH 10/11] mm: enable section-unaligned devm_memremap_pages() Dan Williams
2016-12-01 22:30   ` Dan Williams
2016-12-01 22:30 ` [PATCH 11/11] libnvdimm, pfn, dax: stop padding pmem namespaces to section alignment Dan Williams
2016-12-01 22:30   ` Dan Williams

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=148063144088.37496.13851137514859626846.stgit@dwillia2-desk3.amr.corp.intel.com \
    --to=dan.j.williams@intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nvdimm@lists.01.org \
    --cc=stephen.bates@microsemi.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.