All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Dan Williams <dan.j.williams@intel.com>
Cc: linux-nvdimm@lists.01.org, x86@kernel.org,
	linux-kernel@vger.kernel.org, "Michal Hocko" <mhocko@kernel.org>,
	linux-mm@kvack.org, "Jérôme Glisse" <jglisse@redhat.com>,
	linuxppc-dev@lists.ozlabs.org
Subject: [PATCH 09/17] mm: split altmap memory map allocation from normal case
Date: Fri, 29 Dec 2017 08:53:58 +0100	[thread overview]
Message-ID: <20171229075406.1936-10-hch@lst.de> (raw)
In-Reply-To: <20171229075406.1936-1-hch@lst.de>

No functional changes, just untangling the call chain and document
why the altmap is passed around the hotplug code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
---
 arch/powerpc/mm/init_64.c |  5 ++++-
 arch/x86/mm/init_64.c     |  5 ++++-
 include/linux/mm.h        |  9 ++-------
 mm/sparse-vmemmap.c       | 15 +++------------
 4 files changed, 13 insertions(+), 21 deletions(-)

diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index db7d4e092157..7a2251d99ed3 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -200,7 +200,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 		if (vmemmap_populated(start, page_size))
 			continue;
 
-		p =  __vmemmap_alloc_block_buf(page_size, node, altmap);
+		if (altmap)
+			p = altmap_alloc_block_buf(page_size, altmap);
+		else
+			p = vmemmap_alloc_block_buf(page_size, node);
 		if (!p)
 			return -ENOMEM;
 
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 0cab4b5b59ba..1ab42c852069 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1385,7 +1385,10 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
 		if (pmd_none(*pmd)) {
 			void *p;
 
-			p = __vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
+			if (altmap)
+				p = altmap_alloc_block_buf(PMD_SIZE, altmap);
+			else
+				p = vmemmap_alloc_block_buf(PMD_SIZE, node);
 			if (p) {
 				pte_t entry;
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fd01135324b6..09637c353de0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2547,13 +2547,8 @@ pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
 void *vmemmap_alloc_block(unsigned long size, int node);
 struct vmem_altmap;
-void *__vmemmap_alloc_block_buf(unsigned long size, int node,
-		struct vmem_altmap *altmap);
-static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
-{
-	return __vmemmap_alloc_block_buf(size, node, NULL);
-}
-
+void *vmemmap_alloc_block_buf(unsigned long size, int node);
+void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap);
 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
 			       int node);
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 376dcf05a39c..d012c9e2811b 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -74,7 +74,7 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
 }
 
 /* need to make sure size is all the same during early stage */
-static void * __meminit alloc_block_buf(unsigned long size, int node)
+void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
 {
 	void *ptr;
 
@@ -129,7 +129,7 @@ static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap,
 	return pfn + nr_align;
 }
 
-static void * __meminit altmap_alloc_block_buf(unsigned long size,
+void * __meminit altmap_alloc_block_buf(unsigned long size,
 		struct vmem_altmap *altmap)
 {
 	unsigned long pfn, nr_pfns;
@@ -153,15 +153,6 @@ static void * __meminit altmap_alloc_block_buf(unsigned long size,
 	return ptr;
 }
 
-/* need to make sure size is all the same during early stage */
-void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node,
-		struct vmem_altmap *altmap)
-{
-	if (altmap)
-		return altmap_alloc_block_buf(size, altmap);
-	return alloc_block_buf(size, node);
-}
-
 void __meminit vmemmap_verify(pte_t *pte, int node,
 				unsigned long start, unsigned long end)
 {
@@ -178,7 +169,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
 	pte_t *pte = pte_offset_kernel(pmd, addr);
 	if (pte_none(*pte)) {
 		pte_t entry;
-		void *p = alloc_block_buf(PAGE_SIZE, node);
+		void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
 		if (!p)
 			return NULL;
 		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
-- 
2.14.2

_______________________________________________
Linux-nvdimm mailing list
Linux-nvdimm@lists.01.org
https://lists.01.org/mailman/listinfo/linux-nvdimm

WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Dan Williams <dan.j.williams@intel.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>,
	"Logan Gunthorpe" <logang@deltatee.com>,
	"Michal Hocko" <mhocko@kernel.org>,
	linux-nvdimm@lists.01.org, linuxppc-dev@lists.ozlabs.org,
	x86@kernel.org, linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: [PATCH 09/17] mm: split altmap memory map allocation from normal case
Date: Fri, 29 Dec 2017 08:53:58 +0100	[thread overview]
Message-ID: <20171229075406.1936-10-hch@lst.de> (raw)
In-Reply-To: <20171229075406.1936-1-hch@lst.de>

No functional changes, just untangling the call chain and document
why the altmap is passed around the hotplug code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
---
 arch/powerpc/mm/init_64.c |  5 ++++-
 arch/x86/mm/init_64.c     |  5 ++++-
 include/linux/mm.h        |  9 ++-------
 mm/sparse-vmemmap.c       | 15 +++------------
 4 files changed, 13 insertions(+), 21 deletions(-)

diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index db7d4e092157..7a2251d99ed3 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -200,7 +200,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 		if (vmemmap_populated(start, page_size))
 			continue;
 
-		p =  __vmemmap_alloc_block_buf(page_size, node, altmap);
+		if (altmap)
+			p = altmap_alloc_block_buf(page_size, altmap);
+		else
+			p = vmemmap_alloc_block_buf(page_size, node);
 		if (!p)
 			return -ENOMEM;
 
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 0cab4b5b59ba..1ab42c852069 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1385,7 +1385,10 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
 		if (pmd_none(*pmd)) {
 			void *p;
 
-			p = __vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
+			if (altmap)
+				p = altmap_alloc_block_buf(PMD_SIZE, altmap);
+			else
+				p = vmemmap_alloc_block_buf(PMD_SIZE, node);
 			if (p) {
 				pte_t entry;
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fd01135324b6..09637c353de0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2547,13 +2547,8 @@ pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
 void *vmemmap_alloc_block(unsigned long size, int node);
 struct vmem_altmap;
-void *__vmemmap_alloc_block_buf(unsigned long size, int node,
-		struct vmem_altmap *altmap);
-static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
-{
-	return __vmemmap_alloc_block_buf(size, node, NULL);
-}
-
+void *vmemmap_alloc_block_buf(unsigned long size, int node);
+void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap);
 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
 			       int node);
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 376dcf05a39c..d012c9e2811b 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -74,7 +74,7 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
 }
 
 /* need to make sure size is all the same during early stage */
-static void * __meminit alloc_block_buf(unsigned long size, int node)
+void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
 {
 	void *ptr;
 
@@ -129,7 +129,7 @@ static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap,
 	return pfn + nr_align;
 }
 
-static void * __meminit altmap_alloc_block_buf(unsigned long size,
+void * __meminit altmap_alloc_block_buf(unsigned long size,
 		struct vmem_altmap *altmap)
 {
 	unsigned long pfn, nr_pfns;
@@ -153,15 +153,6 @@ static void * __meminit altmap_alloc_block_buf(unsigned long size,
 	return ptr;
 }
 
-/* need to make sure size is all the same during early stage */
-void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node,
-		struct vmem_altmap *altmap)
-{
-	if (altmap)
-		return altmap_alloc_block_buf(size, altmap);
-	return alloc_block_buf(size, node);
-}
-
 void __meminit vmemmap_verify(pte_t *pte, int node,
 				unsigned long start, unsigned long end)
 {
@@ -178,7 +169,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
 	pte_t *pte = pte_offset_kernel(pmd, addr);
 	if (pte_none(*pte)) {
 		pte_t entry;
-		void *p = alloc_block_buf(PAGE_SIZE, node);
+		void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
 		if (!p)
 			return NULL;
 		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
-- 
2.14.2

WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Dan Williams <dan.j.williams@intel.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>,
	"Logan Gunthorpe" <logang@deltatee.com>,
	"Michal Hocko" <mhocko@kernel.org>,
	linux-nvdimm@lists.01.org, linuxppc-dev@lists.ozlabs.org,
	x86@kernel.org, linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: [PATCH 09/17] mm: split altmap memory map allocation from normal case
Date: Fri, 29 Dec 2017 08:53:58 +0100	[thread overview]
Message-ID: <20171229075406.1936-10-hch@lst.de> (raw)
In-Reply-To: <20171229075406.1936-1-hch@lst.de>

No functional changes, just untangling the call chain and document
why the altmap is passed around the hotplug code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
---
 arch/powerpc/mm/init_64.c |  5 ++++-
 arch/x86/mm/init_64.c     |  5 ++++-
 include/linux/mm.h        |  9 ++-------
 mm/sparse-vmemmap.c       | 15 +++------------
 4 files changed, 13 insertions(+), 21 deletions(-)

diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index db7d4e092157..7a2251d99ed3 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -200,7 +200,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 		if (vmemmap_populated(start, page_size))
 			continue;
 
-		p =  __vmemmap_alloc_block_buf(page_size, node, altmap);
+		if (altmap)
+			p = altmap_alloc_block_buf(page_size, altmap);
+		else
+			p = vmemmap_alloc_block_buf(page_size, node);
 		if (!p)
 			return -ENOMEM;
 
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 0cab4b5b59ba..1ab42c852069 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1385,7 +1385,10 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
 		if (pmd_none(*pmd)) {
 			void *p;
 
-			p = __vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
+			if (altmap)
+				p = altmap_alloc_block_buf(PMD_SIZE, altmap);
+			else
+				p = vmemmap_alloc_block_buf(PMD_SIZE, node);
 			if (p) {
 				pte_t entry;
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fd01135324b6..09637c353de0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2547,13 +2547,8 @@ pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
 void *vmemmap_alloc_block(unsigned long size, int node);
 struct vmem_altmap;
-void *__vmemmap_alloc_block_buf(unsigned long size, int node,
-		struct vmem_altmap *altmap);
-static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
-{
-	return __vmemmap_alloc_block_buf(size, node, NULL);
-}
-
+void *vmemmap_alloc_block_buf(unsigned long size, int node);
+void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap);
 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
 			       int node);
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 376dcf05a39c..d012c9e2811b 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -74,7 +74,7 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
 }
 
 /* need to make sure size is all the same during early stage */
-static void * __meminit alloc_block_buf(unsigned long size, int node)
+void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
 {
 	void *ptr;
 
@@ -129,7 +129,7 @@ static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap,
 	return pfn + nr_align;
 }
 
-static void * __meminit altmap_alloc_block_buf(unsigned long size,
+void * __meminit altmap_alloc_block_buf(unsigned long size,
 		struct vmem_altmap *altmap)
 {
 	unsigned long pfn, nr_pfns;
@@ -153,15 +153,6 @@ static void * __meminit altmap_alloc_block_buf(unsigned long size,
 	return ptr;
 }
 
-/* need to make sure size is all the same during early stage */
-void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node,
-		struct vmem_altmap *altmap)
-{
-	if (altmap)
-		return altmap_alloc_block_buf(size, altmap);
-	return alloc_block_buf(size, node);
-}
-
 void __meminit vmemmap_verify(pte_t *pte, int node,
 				unsigned long start, unsigned long end)
 {
@@ -178,7 +169,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
 	pte_t *pte = pte_offset_kernel(pmd, addr);
 	if (pte_none(*pte)) {
 		pte_t entry;
-		void *p = alloc_block_buf(PAGE_SIZE, node);
+		void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
 		if (!p)
 			return NULL;
 		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
-- 
2.14.2

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2017-12-29  7:50 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-29  7:53 revamp vmem_altmap / dev_pagemap handling V3 Christoph Hellwig
2017-12-29  7:53 ` Christoph Hellwig
2017-12-29  7:53 ` Christoph Hellwig
2017-12-29  7:53 ` [PATCH 01/17] memremap: provide stubs for vmem_altmap_offset and vmem_altmap_free Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53 ` [PATCH 02/17] mm: don't export arch_add_memory Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53 ` [PATCH 03/17] mm: don't export __add_pages Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53 ` [PATCH 04/17] mm: pass the vmem_altmap to arch_add_memory and __add_pages Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53 ` [PATCH 05/17] mm: pass the vmem_altmap to vmemmap_populate Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53 ` [PATCH 06/17] mm: pass the vmem_altmap to arch_remove_memory and __remove_pages Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53 ` [PATCH 07/17] mm: pass the vmem_altmap to vmemmap_free Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53 ` [PATCH 08/17] mm: pass the vmem_altmap to memmap_init_zone Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53 ` Christoph Hellwig [this message]
2017-12-29  7:53   ` [PATCH 09/17] mm: split altmap memory map allocation from normal case Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53 ` [PATCH 10/17] mm: merge vmem_altmap_alloc into altmap_alloc_block_buf Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:53   ` Christoph Hellwig
2017-12-29  7:54 ` [PATCH 11/17] mm: move get_dev_pagemap out of line Christoph Hellwig
2017-12-29  7:54   ` Christoph Hellwig
2017-12-29  7:54   ` Christoph Hellwig
2017-12-29  7:54 ` [PATCH 12/17] mm: optimize dev_pagemap reference counting around get_dev_pagemap Christoph Hellwig
2017-12-29  7:54   ` Christoph Hellwig
2017-12-29  7:54   ` Christoph Hellwig
2017-12-29  7:54 ` [PATCH 13/17] memremap: remove to_vmem_altmap Christoph Hellwig
2017-12-29  7:54   ` Christoph Hellwig
2017-12-29  7:54   ` Christoph Hellwig
2017-12-29  7:54 ` [PATCH 14/17] memremap: simplify duplicate region handling in devm_memremap_pages Christoph Hellwig
2017-12-29  7:54   ` Christoph Hellwig
2017-12-29  7:54   ` Christoph Hellwig
2017-12-29  7:54 ` [PATCH 15/17] memremap: drop private struct page_map Christoph Hellwig
2017-12-29  7:54   ` Christoph Hellwig
2017-12-29  7:54   ` Christoph Hellwig
2017-12-29  7:54 ` [PATCH 16/17] memremap: change devm_memremap_pages interface to use struct dev_pagemap Christoph Hellwig
2017-12-29  7:54   ` Christoph Hellwig
2017-12-29  7:54   ` Christoph Hellwig
2017-12-29  7:54 ` [PATCH 17/17] memremap: merge find_dev_pagemap into get_dev_pagemap Christoph Hellwig
2017-12-29  7:54   ` Christoph Hellwig
2017-12-29  7:54   ` Christoph Hellwig
2018-01-08 11:26 ` revamp vmem_altmap / dev_pagemap handling V3 Christoph Hellwig
2018-01-08 11:26   ` Christoph Hellwig
2018-01-08 11:26   ` Christoph Hellwig
2018-01-08 19:44   ` Dan Williams
2018-01-08 19:44     ` Dan Williams
2018-01-08 20:25     ` Michal Hocko
2018-01-08 20:25       ` Michal Hocko
2018-01-08 20:25       ` Michal Hocko
2018-01-08 21:27       ` Dan Williams
2018-01-08 21:27         ` Dan Williams
2018-01-08 21:27         ` Dan Williams
2018-01-08 21:50         ` Michal Hocko
2018-01-08 21:50           ` Michal Hocko
2018-01-08 21:50           ` Michal Hocko
2018-01-15  8:51     ` Christoph Hellwig
2018-01-15  8:51       ` Christoph Hellwig
2018-01-11 16:27   ` Jerome Glisse
2018-01-11 16:27     ` Jerome Glisse
2018-01-11 16:27     ` Jerome Glisse
  -- strict thread matches above, loose matches on Subject: below --
2017-12-15 14:09 revamp vmem_altmap / dev_pagemap handling V2 Christoph Hellwig
2017-12-15 14:09 ` [PATCH 09/17] mm: split altmap memory map allocation from normal case Christoph Hellwig
2017-12-15 14:09   ` Christoph Hellwig
2017-12-15 14:09   ` Christoph Hellwig
2017-12-16  2:18   ` Dan Williams
2017-12-16  2:18     ` Dan Williams
2017-12-16  2:18     ` Dan Williams

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171229075406.1936-10-hch@lst.de \
    --to=hch@lst.de \
    --cc=dan.j.williams@intel.com \
    --cc=jglisse@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nvdimm@lists.01.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mhocko@kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.