All of lore.kernel.org
 help / color / mirror / Atom feed
From: <artem.kuzin@huawei.com>
To: <x86@kernel.org>, <tglx@linutronix.de>, <mingo@redhat.com>,
	<bp@alien8.de>, <dave.hansen@linux.intel.com>, <hpa@zytor.com>,
	<luto@kernel.org>, <peterz@infradead.org>,
	<akpm@linux-foundation.org>, <urezki@gmail.com>,
	<hch@infradead.org>, <lstoakes@gmail.com>, <mcgrof@kernel.org>,
	<rmk+kernel@armlinux.org.uk>
Cc: <nikita.panov@huawei-partners.com>,
	<alexander.grubnikov@huawei.com>, <stepanov.anatoly@huawei.com>,
	<guohanjun@huawei.com>, <weiyongjun1@huawei.com>,
	<wangkefeng.wang@huawei.com>, <judy.chenhui@huawei.com>,
	<yusongping@huawei.com>, <kang.sun@huawei.com>,
	<linux-mm@kvack.org>, <linux-modules@vger.kernel.org>
Subject: [PATCH RFC 01/12] mm: allow per-NUMA node local PUD/PMD allocation
Date: Thu, 28 Dec 2023 21:10:45 +0800	[thread overview]
Message-ID: <20231228131056.602411-2-artem.kuzin@huawei.com> (raw)
In-Reply-To: <20231228131056.602411-1-artem.kuzin@huawei.com>

From: Artem Kuzin <artem.kuzin@huawei.com>

Co-developed-by: Nikita Panov <nikita.panov@huawei-partners.com>
Signed-off-by: Nikita Panov <nikita.panov@huawei-partners.com>
Co-developed-by: Alexander Grubnikov <alexander.grubnikov@huawei.com>
Signed-off-by: Alexander Grubnikov <alexander.grubnikov@huawei.com>
Signed-off-by: Artem Kuzin <artem.kuzin@huawei.com>
---
 include/asm-generic/pgalloc.h | 34 ++++++++++++++++++++++++++++++++++
 include/linux/gfp.h           |  2 ++
 mm/page_alloc.c               | 18 ++++++++++++++++++
 3 files changed, 54 insertions(+)

diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index a7cf825befae..6364375388bf 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -132,6 +132,24 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 	}
 	return (pmd_t *)page_address(page);
 }
+
+static inline pmd_t *pmd_alloc_one_node(unsigned int nid,
+					struct mm_struct *mm, unsigned long addr)
+{
+	struct page *page;
+	gfp_t gfp = GFP_PGTABLE_USER;
+
+	if (mm == &init_mm)
+		gfp = GFP_PGTABLE_KERNEL;
+	page = alloc_pages_node(nid, gfp, 0);
+	if (!page)
+		return NULL;
+	if (!pgtable_pmd_page_ctor(page)) {
+		__free_pages(page, 0);
+		return NULL;
+	}
+	return (pmd_t *)page_address(page);
+}
 #endif
 
 #ifndef __HAVE_ARCH_PMD_FREE
@@ -156,6 +174,16 @@ static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 	return (pud_t *)get_zeroed_page(gfp);
 }
 
+static inline pud_t *__pud_alloc_one_node(unsigned int nid,
+					  struct mm_struct *mm, unsigned long addr)
+{
+	gfp_t gfp = GFP_PGTABLE_USER;
+
+	if (mm == &init_mm)
+		gfp = GFP_PGTABLE_KERNEL;
+	return (pud_t *)get_zeroed_page_node(nid, gfp);
+}
+
 #ifndef __HAVE_ARCH_PUD_ALLOC_ONE
 /**
  * pud_alloc_one - allocate a page for PUD-level page table
@@ -170,6 +198,12 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
 	return __pud_alloc_one(mm, addr);
 }
+
+static inline pud_t *pud_alloc_one_node(unsigned int nid,
+					struct mm_struct *mm, unsigned long addr)
+{
+	return __pud_alloc_one_node(nid, mm, addr);
+}
 #endif
 
 static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 665f06675c83..6ee0004b9774 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -288,6 +288,8 @@ static inline struct page *alloc_page_vma(gfp_t gfp,
 
 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
+extern unsigned long __get_free_pages_node(unsigned int nid, gfp_t gfp_mask, unsigned int order);
+extern unsigned long get_zeroed_page_node(unsigned int nid, gfp_t gfp_mask);
 
 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
 void free_pages_exact(void *virt, size_t size);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7d3460c7a480..dc8f4a57d8b1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4537,6 +4537,24 @@ unsigned long get_zeroed_page(gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(get_zeroed_page);
 
+unsigned long __get_free_pages_node(unsigned int nid, gfp_t gfp_mask,
+		unsigned int order)
+{
+	struct page *page;
+
+	page = alloc_pages_node(nid, gfp_mask & ~__GFP_HIGHMEM, order);
+	if (!page)
+		return 0;
+	return (unsigned long) page_address(page);
+}
+EXPORT_SYMBOL(__get_free_pages_node);
+
+unsigned long get_zeroed_page_node(unsigned int nid, gfp_t gfp_mask)
+{
+	return __get_free_pages_node(nid, gfp_mask | __GFP_ZERO, 0);
+}
+EXPORT_SYMBOL(get_zeroed_page_node);
+
 /**
  * __free_pages - Free pages allocated with alloc_pages().
  * @page: The page pointer returned from alloc_pages().
-- 
2.34.1


  reply	other threads:[~2023-12-28 13:28 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-28 13:10 [PATCH RFC 00/12] x86 NUMA-aware kernel replication artem.kuzin
2023-12-28 13:10 ` artem.kuzin [this message]
2023-12-28 13:10 ` [PATCH RFC 02/12] mm: add config option and per-NUMA node VMS support artem.kuzin
2024-01-03 19:43   ` Christoph Lameter (Ampere)
2024-01-09 16:57     ` Artem Kuzin
2024-01-25 15:07       ` Dave Hansen
2024-01-29  6:22         ` Artem Kuzin
2024-01-30 23:36           ` Dave Hansen
2023-12-28 13:10 ` [PATCH RFC 03/12] mm: per-NUMA node replication core infrastructure artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 04/12] x86: add support of memory protection for NUMA replicas artem.kuzin
2024-01-09  6:46   ` Garg, Shivank
2024-01-09 15:53     ` a00561249@china.huawei.com
2024-01-10  6:19       ` Garg, Shivank
2023-12-28 13:10 ` [PATCH RFC 05/12] x86: enable memory protection for replicated memory artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 06/12] x86: align kernel text and rodata using HUGE_PAGE boundary artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 07/12] x86: enable per-NUMA node kernel text and rodata replication artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 08/12] x86: make kernel text patching aware about replicas artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 09/12] x86: add support of NUMA replication for efi page tables artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 10/12] mm: add replicas allocation support for vmalloc artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 11/12] x86: add kernel modules text and rodata replication support artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 12/12] mm: set memory permissions for BPF handlers replicas artem.kuzin
2024-01-10 10:03 ` [PATCH RFC 00/12] x86 NUMA-aware kernel replication Russell King (Oracle)
2024-01-25  4:30 ` Garg, Shivank
2024-01-29  7:51   ` Artem Kuzin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231228131056.602411-2-artem.kuzin@huawei.com \
    --to=artem.kuzin@huawei.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexander.grubnikov@huawei.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=guohanjun@huawei.com \
    --cc=hch@infradead.org \
    --cc=hpa@zytor.com \
    --cc=judy.chenhui@huawei.com \
    --cc=kang.sun@huawei.com \
    --cc=linux-mm@kvack.org \
    --cc=linux-modules@vger.kernel.org \
    --cc=lstoakes@gmail.com \
    --cc=luto@kernel.org \
    --cc=mcgrof@kernel.org \
    --cc=mingo@redhat.com \
    --cc=nikita.panov@huawei-partners.com \
    --cc=peterz@infradead.org \
    --cc=rmk+kernel@armlinux.org.uk \
    --cc=stepanov.anatoly@huawei.com \
    --cc=tglx@linutronix.de \
    --cc=urezki@gmail.com \
    --cc=wangkefeng.wang@huawei.com \
    --cc=weiyongjun1@huawei.com \
    --cc=x86@kernel.org \
    --cc=yusongping@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.