From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ml01.01.org (Postfix) with ESMTPS id F1DFB80406 for ; Wed, 15 Mar 2017 23:13:02 -0700 (PDT) Subject: [PATCH v4 12/13] mm: enable section-unaligned devm_memremap_pages() From: Dan Williams Date: Wed, 15 Mar 2017 23:07:52 -0700 Message-ID: <148964447222.19438.8770660811344879796.stgit@dwillia2-desk3.amr.corp.intel.com> In-Reply-To: <148964440651.19438.2288075389153762985.stgit@dwillia2-desk3.amr.corp.intel.com> References: <148964440651.19438.2288075389153762985.stgit@dwillia2-desk3.amr.corp.intel.com> MIME-Version: 1.0 List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: linux-nvdimm-bounces@lists.01.org Sender: "Linux-nvdimm" To: akpm@linux-foundation.org Cc: Michal Hocko , linux-nvdimm@lists.01.org, linux-kernel@vger.kernel.org, Stephen Bates , linux-mm@kvack.org List-ID: Teach devm_memremap_pages() about the new sub-section capabilities of arch_{add,remove}_memory(). Cc: Michal Hocko Cc: Toshi Kani Cc: Andrew Morton Cc: Logan Gunthorpe Cc: Stephen Bates Signed-off-by: Dan Williams --- kernel/memremap.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/kernel/memremap.c b/kernel/memremap.c index c4f63346ff52..e6476a8e8b6a 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -256,7 +256,6 @@ static void devm_memremap_pages_release(struct device *dev, void *data) { struct page_map *page_map = data; struct resource *res = &page_map->res; - resource_size_t align_start, align_size; struct dev_pagemap *pgmap = &page_map->pgmap; if (percpu_ref_tryget_live(pgmap->ref)) { @@ -265,14 +264,10 @@ static void devm_memremap_pages_release(struct device *dev, void *data) } /* pages are dead and unused, undo the arch mapping */ - align_start = res->start & PA_SECTION_MASK; - align_size = ALIGN(resource_size(res), PA_SECTION_SIZE); - mem_hotplug_begin(); - arch_remove_memory(align_start, align_size); + arch_remove_memory(res->start, resource_size(res)); mem_hotplug_done(); - - untrack_pfn(NULL, PHYS_PFN(align_start), align_size); + untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); pgmap_radix_release(res); dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, "%s: failed to free all reserved pages\n", __func__); @@ -307,17 +302,13 @@ struct dev_pagemap *find_dev_pagemap(resource_size_t phys) void *devm_memremap_pages(struct device *dev, struct resource *res, struct percpu_ref *ref, struct vmem_altmap *altmap) { - resource_size_t align_start, align_size, align_end; unsigned long pfn, pgoff, order; pgprot_t pgprot = PAGE_KERNEL; struct dev_pagemap *pgmap; struct page_map *page_map; int error, nid, is_ram; - align_start = res->start & PA_SECTION_MASK; - align_size = ALIGN(res->start + resource_size(res), PA_SECTION_SIZE) - - align_start; - is_ram = region_intersects(align_start, align_size, + is_ram = region_intersects(res->start, resource_size(res), IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); if (is_ram == REGION_MIXED) { @@ -350,7 +341,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, mutex_lock(&pgmap_lock); error = 0; - align_end = align_start + align_size - 1; foreach_order_pgoff(res, order, pgoff) { struct dev_pagemap *dup; @@ -379,13 +369,13 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, if (nid < 0) nid = numa_mem_id(); - error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0, - align_size); + error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0, + resource_size(res)); if (error) goto err_pfn_remap; mem_hotplug_begin(); - error = arch_add_memory(nid, align_start, align_size, true); + error = arch_add_memory(nid, res->start, resource_size(res), true); mem_hotplug_done(); if (error) goto err_add_memory; @@ -406,7 +396,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, return __va(res->start); err_add_memory: - untrack_pfn(NULL, PHYS_PFN(align_start), align_size); + untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); err_pfn_remap: err_radix: pgmap_radix_release(res); _______________________________________________ Linux-nvdimm mailing list Linux-nvdimm@lists.01.org https://lists.01.org/mailman/listinfo/linux-nvdimm From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751839AbdCPGNG (ORCPT ); Thu, 16 Mar 2017 02:13:06 -0400 Received: from mga06.intel.com ([134.134.136.31]:19496 "EHLO mga06.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751767AbdCPGNE (ORCPT ); Thu, 16 Mar 2017 02:13:04 -0400 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.36,170,1486454400"; d="scan'208";a="61037443" Subject: [PATCH v4 12/13] mm: enable section-unaligned devm_memremap_pages() From: Dan Williams To: akpm@linux-foundation.org Cc: Michal Hocko , Toshi Kani , linux-nvdimm@ml01.01.org, linux-kernel@vger.kernel.org, Stephen Bates , linux-mm@kvack.org, Logan Gunthorpe Date: Wed, 15 Mar 2017 23:07:52 -0700 Message-ID: <148964447222.19438.8770660811344879796.stgit@dwillia2-desk3.amr.corp.intel.com> In-Reply-To: <148964440651.19438.2288075389153762985.stgit@dwillia2-desk3.amr.corp.intel.com> References: <148964440651.19438.2288075389153762985.stgit@dwillia2-desk3.amr.corp.intel.com> User-Agent: StGit/0.17.1-9-g687f MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 7bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Teach devm_memremap_pages() about the new sub-section capabilities of arch_{add,remove}_memory(). Cc: Michal Hocko Cc: Toshi Kani Cc: Andrew Morton Cc: Logan Gunthorpe Cc: Stephen Bates Signed-off-by: Dan Williams --- kernel/memremap.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/kernel/memremap.c b/kernel/memremap.c index c4f63346ff52..e6476a8e8b6a 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -256,7 +256,6 @@ static void devm_memremap_pages_release(struct device *dev, void *data) { struct page_map *page_map = data; struct resource *res = &page_map->res; - resource_size_t align_start, align_size; struct dev_pagemap *pgmap = &page_map->pgmap; if (percpu_ref_tryget_live(pgmap->ref)) { @@ -265,14 +264,10 @@ static void devm_memremap_pages_release(struct device *dev, void *data) } /* pages are dead and unused, undo the arch mapping */ - align_start = res->start & PA_SECTION_MASK; - align_size = ALIGN(resource_size(res), PA_SECTION_SIZE); - mem_hotplug_begin(); - arch_remove_memory(align_start, align_size); + arch_remove_memory(res->start, resource_size(res)); mem_hotplug_done(); - - untrack_pfn(NULL, PHYS_PFN(align_start), align_size); + untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); pgmap_radix_release(res); dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, "%s: failed to free all reserved pages\n", __func__); @@ -307,17 +302,13 @@ struct dev_pagemap *find_dev_pagemap(resource_size_t phys) void *devm_memremap_pages(struct device *dev, struct resource *res, struct percpu_ref *ref, struct vmem_altmap *altmap) { - resource_size_t align_start, align_size, align_end; unsigned long pfn, pgoff, order; pgprot_t pgprot = PAGE_KERNEL; struct dev_pagemap *pgmap; struct page_map *page_map; int error, nid, is_ram; - align_start = res->start & PA_SECTION_MASK; - align_size = ALIGN(res->start + resource_size(res), PA_SECTION_SIZE) - - align_start; - is_ram = region_intersects(align_start, align_size, + is_ram = region_intersects(res->start, resource_size(res), IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); if (is_ram == REGION_MIXED) { @@ -350,7 +341,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, mutex_lock(&pgmap_lock); error = 0; - align_end = align_start + align_size - 1; foreach_order_pgoff(res, order, pgoff) { struct dev_pagemap *dup; @@ -379,13 +369,13 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, if (nid < 0) nid = numa_mem_id(); - error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0, - align_size); + error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0, + resource_size(res)); if (error) goto err_pfn_remap; mem_hotplug_begin(); - error = arch_add_memory(nid, align_start, align_size, true); + error = arch_add_memory(nid, res->start, resource_size(res), true); mem_hotplug_done(); if (error) goto err_add_memory; @@ -406,7 +396,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, return __va(res->start); err_add_memory: - untrack_pfn(NULL, PHYS_PFN(align_start), align_size); + untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); err_pfn_remap: err_radix: pgmap_radix_release(res); From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-pg0-f70.google.com (mail-pg0-f70.google.com [74.125.83.70]) by kanga.kvack.org (Postfix) with ESMTP id 381316B0395 for ; Thu, 16 Mar 2017 02:13:04 -0400 (EDT) Received: by mail-pg0-f70.google.com with SMTP id b2so74000758pgc.6 for ; Wed, 15 Mar 2017 23:13:04 -0700 (PDT) Received: from mga11.intel.com (mga11.intel.com. [192.55.52.93]) by mx.google.com with ESMTPS id m17si4265569pli.193.2017.03.15.23.13.03 for (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Wed, 15 Mar 2017 23:13:03 -0700 (PDT) Subject: [PATCH v4 12/13] mm: enable section-unaligned devm_memremap_pages() From: Dan Williams Date: Wed, 15 Mar 2017 23:07:52 -0700 Message-ID: <148964447222.19438.8770660811344879796.stgit@dwillia2-desk3.amr.corp.intel.com> In-Reply-To: <148964440651.19438.2288075389153762985.stgit@dwillia2-desk3.amr.corp.intel.com> References: <148964440651.19438.2288075389153762985.stgit@dwillia2-desk3.amr.corp.intel.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 7bit Sender: owner-linux-mm@kvack.org List-ID: To: akpm@linux-foundation.org Cc: Michal Hocko , Toshi Kani , linux-nvdimm@lists.01.org, linux-kernel@vger.kernel.org, Stephen Bates , linux-mm@kvack.org, Logan Gunthorpe Teach devm_memremap_pages() about the new sub-section capabilities of arch_{add,remove}_memory(). Cc: Michal Hocko Cc: Toshi Kani Cc: Andrew Morton Cc: Logan Gunthorpe Cc: Stephen Bates Signed-off-by: Dan Williams --- kernel/memremap.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/kernel/memremap.c b/kernel/memremap.c index c4f63346ff52..e6476a8e8b6a 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -256,7 +256,6 @@ static void devm_memremap_pages_release(struct device *dev, void *data) { struct page_map *page_map = data; struct resource *res = &page_map->res; - resource_size_t align_start, align_size; struct dev_pagemap *pgmap = &page_map->pgmap; if (percpu_ref_tryget_live(pgmap->ref)) { @@ -265,14 +264,10 @@ static void devm_memremap_pages_release(struct device *dev, void *data) } /* pages are dead and unused, undo the arch mapping */ - align_start = res->start & PA_SECTION_MASK; - align_size = ALIGN(resource_size(res), PA_SECTION_SIZE); - mem_hotplug_begin(); - arch_remove_memory(align_start, align_size); + arch_remove_memory(res->start, resource_size(res)); mem_hotplug_done(); - - untrack_pfn(NULL, PHYS_PFN(align_start), align_size); + untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); pgmap_radix_release(res); dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, "%s: failed to free all reserved pages\n", __func__); @@ -307,17 +302,13 @@ struct dev_pagemap *find_dev_pagemap(resource_size_t phys) void *devm_memremap_pages(struct device *dev, struct resource *res, struct percpu_ref *ref, struct vmem_altmap *altmap) { - resource_size_t align_start, align_size, align_end; unsigned long pfn, pgoff, order; pgprot_t pgprot = PAGE_KERNEL; struct dev_pagemap *pgmap; struct page_map *page_map; int error, nid, is_ram; - align_start = res->start & PA_SECTION_MASK; - align_size = ALIGN(res->start + resource_size(res), PA_SECTION_SIZE) - - align_start; - is_ram = region_intersects(align_start, align_size, + is_ram = region_intersects(res->start, resource_size(res), IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); if (is_ram == REGION_MIXED) { @@ -350,7 +341,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, mutex_lock(&pgmap_lock); error = 0; - align_end = align_start + align_size - 1; foreach_order_pgoff(res, order, pgoff) { struct dev_pagemap *dup; @@ -379,13 +369,13 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, if (nid < 0) nid = numa_mem_id(); - error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0, - align_size); + error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0, + resource_size(res)); if (error) goto err_pfn_remap; mem_hotplug_begin(); - error = arch_add_memory(nid, align_start, align_size, true); + error = arch_add_memory(nid, res->start, resource_size(res), true); mem_hotplug_done(); if (error) goto err_add_memory; @@ -406,7 +396,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, return __va(res->start); err_add_memory: - untrack_pfn(NULL, PHYS_PFN(align_start), align_size); + untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); err_pfn_remap: err_radix: pgmap_radix_release(res); -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org