All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	x86@kernel.org, Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>,
	Borislav Petkov <bp@alien8.de>,
	Peter Zijlstra <peterz@infradead.org>,
	Andy Lutomirski <luto@amacapital.net>,
	David Howells <dhowells@redhat.com>
Cc: Kees Cook <keescook@chromium.org>,
	Dave Hansen <dave.hansen@intel.com>,
	Kai Huang <kai.huang@linux.intel.com>,
	Jacob Pan <jacob.jun.pan@linux.intel.com>,
	Alison Schofield <alison.schofield@intel.com>,
	linux-mm@kvack.org, kvm@vger.kernel.org,
	keyrings@vger.kernel.org, linux-kernel@vger.kernel.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH, RFC 05/62] mm/page_alloc: Handle allocation for encrypted memory
Date: Wed, 08 May 2019 14:43:25 +0000	[thread overview]
Message-ID: <20190508144422.13171-6-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20190508144422.13171-1-kirill.shutemov@linux.intel.com>

For encrypted memory, we need to allocate pages for a specific
encryption KeyID.

There are two cases when we need to allocate a page for encryption:

 - Allocation for an encrypted VMA;

 - Allocation for migration of encrypted page;

The first case can be covered within alloc_page_vma(). We know KeyID
from the VMA.

The second case requires few new page allocation routines that would
allocate the page for a specific KeyID.

An encrypted page has to be cleared after KeyID set. This is handled
in prep_encrypted_page() that will be provided by arch-specific code.

Any custom allocator that dials with encrypted pages has to call
prep_encrypted_page() too. See compaction_alloc() for instance.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 include/linux/gfp.h     | 45 ++++++++++++++++++++++++++++++++-----
 include/linux/migrate.h | 14 +++++++++---
 mm/compaction.c         |  3 +++
 mm/mempolicy.c          | 27 ++++++++++++++++------
 mm/migrate.c            |  4 ++--
 mm/page_alloc.c         | 50 +++++++++++++++++++++++++++++++++++++++++
 6 files changed, 126 insertions(+), 17 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b101aa294157..1716dbe587c9 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -463,16 +463,43 @@ static inline void arch_free_page(struct page *page, int order) { }
 static inline void arch_alloc_page(struct page *page, int order) { }
 #endif
 
+#ifndef prep_encrypted_page
+static inline void prep_encrypted_page(struct page *page, int order,
+		int keyid, bool zero)
+{
+}
+#endif
+
+/*
+ * Encrypted page has to be cleared once keyid is set, not on allocation.
+ */
+static inline bool deferred_page_zero(int keyid, gfp_t *gfp_mask)
+{
+	if (keyid && (*gfp_mask & __GFP_ZERO)) {
+		*gfp_mask &= ~__GFP_ZERO;
+		return true;
+	}
+
+	return false;
+}
+
 struct page *
 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 							nodemask_t *nodemask);
 
+struct page *
+__alloc_pages_nodemask_keyid(gfp_t gfp_mask, unsigned int order,
+		int preferred_nid, nodemask_t *nodemask, int keyid);
+
 static inline struct page *
 __alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
 {
 	return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
 }
 
+struct page *__alloc_pages_node_keyid(int nid, int keyid,
+		gfp_t gfp_mask, unsigned int order);
+
 /*
  * Allocate pages, preferring the node given as nid. The node must be valid and
  * online. For more general interface, see alloc_pages_node().
@@ -500,6 +527,19 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
 	return __alloc_pages_node(nid, gfp_mask, order);
 }
 
+static inline struct page *alloc_pages_node_keyid(int nid, int keyid,
+		gfp_t gfp_mask, unsigned int order)
+{
+	if (nid = NUMA_NO_NODE)
+		nid = numa_mem_id();
+
+	return __alloc_pages_node_keyid(nid, keyid, gfp_mask, order);
+}
+
+extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
+			struct vm_area_struct *vma, unsigned long addr,
+			int node, bool hugepage);
+
 #ifdef CONFIG_NUMA
 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
 
@@ -508,14 +548,9 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
 {
 	return alloc_pages_current(gfp_mask, order);
 }
-extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
-			struct vm_area_struct *vma, unsigned long addr,
-			int node, bool hugepage);
 #else
 #define alloc_pages(gfp_mask, order) \
 		alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
-	alloc_pages(gfp_mask, order)
 #endif
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
 #define alloc_page_vma(gfp_mask, vma, addr)			\
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index e13d9bf2f9a5..a6e068762d08 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -38,9 +38,16 @@ static inline struct page *new_page_nodemask(struct page *page,
 	unsigned int order = 0;
 	struct page *new_page = NULL;
 
-	if (PageHuge(page))
+	if (PageHuge(page)) {
+		/*
+		 * HugeTLB doesn't support encryption. We shouldn't see
+		 * such pages.
+		 */
+		if (WARN_ON_ONCE(page_keyid(page)))
+			return NULL;
 		return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
 				preferred_nid, nodemask);
+	}
 
 	if (PageTransHuge(page)) {
 		gfp_mask |= GFP_TRANSHUGE;
@@ -50,8 +57,9 @@ static inline struct page *new_page_nodemask(struct page *page,
 	if (PageHighMem(page) || (zone_idx(page_zone(page)) = ZONE_MOVABLE))
 		gfp_mask |= __GFP_HIGHMEM;
 
-	new_page = __alloc_pages_nodemask(gfp_mask, order,
-				preferred_nid, nodemask);
+	/* Allocate a page with the same KeyID as the source page */
+	new_page = __alloc_pages_nodemask_keyid(gfp_mask, order,
+				preferred_nid, nodemask, page_keyid(page));
 
 	if (new_page && PageTransHuge(new_page))
 		prep_transhuge_page(new_page);
diff --git a/mm/compaction.c b/mm/compaction.c
index 3319e0872d01..559b8bd6d245 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1557,6 +1557,9 @@ static struct page *compaction_alloc(struct page *migratepage,
 	list_del(&freepage->lru);
 	cc->nr_freepages--;
 
+	/* Prepare the page using the same KeyID as the source page */
+	if (freepage)
+		prep_encrypted_page(freepage, 0, page_keyid(migratepage), false);
 	return freepage;
 }
 
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 14b18449c623..5cad39fb7b35 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -961,22 +961,29 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
 /* page allocation callback for NUMA node migration */
 struct page *alloc_new_node_page(struct page *page, unsigned long node)
 {
-	if (PageHuge(page))
+	if (PageHuge(page)) {
+		/*
+		 * HugeTLB doesn't support encryption. We shouldn't see
+		 * such pages.
+		 */
+		if (WARN_ON_ONCE(page_keyid(page)))
+			return NULL;
 		return alloc_huge_page_node(page_hstate(compound_head(page)),
 					node);
-	else if (PageTransHuge(page)) {
+	} else if (PageTransHuge(page)) {
 		struct page *thp;
 
-		thp = alloc_pages_node(node,
+		thp = alloc_pages_node_keyid(node, page_keyid(page),
 			(GFP_TRANSHUGE | __GFP_THISNODE),
 			HPAGE_PMD_ORDER);
 		if (!thp)
 			return NULL;
 		prep_transhuge_page(thp);
 		return thp;
-	} else
-		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
-						    __GFP_THISNODE, 0);
+	} else {
+		return __alloc_pages_node_keyid(node, page_keyid(page),
+				GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
+	}
 }
 
 /*
@@ -2053,9 +2060,13 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 {
 	struct mempolicy *pol;
 	struct page *page;
-	int preferred_nid;
+	bool deferred_zero;
+	int keyid, preferred_nid;
 	nodemask_t *nmask;
 
+	keyid = vma_keyid(vma);
+	deferred_zero = deferred_page_zero(keyid, &gfp);
+
 	pol = get_vma_policy(vma, addr);
 
 	if (pol->mode = MPOL_INTERLEAVE) {
@@ -2097,6 +2108,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
 	mpol_cond_put(pol);
 out:
+	if (page)
+		prep_encrypted_page(page, order, keyid, deferred_zero);
 	return page;
 }
 
diff --git a/mm/migrate.c b/mm/migrate.c
index 663a5449367a..04b36a56865d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1880,7 +1880,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
 	int nid = (int) data;
 	struct page *newpage;
 
-	newpage = __alloc_pages_node(nid,
+	newpage = __alloc_pages_node_keyid(nid, page_keyid(page),
 					 (GFP_HIGHUSER_MOVABLE |
 					  __GFP_THISNODE | __GFP_NOMEMALLOC |
 					  __GFP_NORETRY | __GFP_NOWARN) &
@@ -2006,7 +2006,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 	int page_lru = page_is_file_cache(page);
 	unsigned long start = address & HPAGE_PMD_MASK;
 
-	new_page = alloc_pages_node(node,
+	new_page = alloc_pages_node_keyid(node, page_keyid(page),
 		(GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
 		HPAGE_PMD_ORDER);
 	if (!new_page)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c02cff1ed56e..ab1d8661aa87 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3930,6 +3930,41 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
 }
 #endif /* CONFIG_COMPACTION */
 
+#ifndef CONFIG_NUMA
+struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
+		struct vm_area_struct *vma, unsigned long addr,
+		int node, bool hugepage)
+{
+	struct page *page;
+	bool deferred_zero;
+	int keyid = vma_keyid(vma);
+
+	deferred_zero = deferred_page_zero(keyid, &gfp_mask);
+	page = alloc_pages(gfp_mask, order);
+	if (page)
+		prep_encrypted_page(page, order, keyid, deferred_zero);
+
+	return page;
+}
+#endif
+
+struct page * __alloc_pages_node_keyid(int nid, int keyid,
+		gfp_t gfp_mask, unsigned int order)
+{
+	struct page *page;
+	bool deferred_zero;
+
+	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+	VM_WARN_ON(!node_online(nid));
+
+	deferred_zero = deferred_page_zero(keyid, &gfp_mask);
+	page = __alloc_pages(gfp_mask, order, nid);
+	if (page)
+		prep_encrypted_page(page, order, keyid, deferred_zero);
+
+	return page;
+}
+
 #ifdef CONFIG_LOCKDEP
 static struct lockdep_map __fs_reclaim_map  	STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
@@ -4645,6 +4680,21 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 }
 EXPORT_SYMBOL(__alloc_pages_nodemask);
 
+struct page *
+__alloc_pages_nodemask_keyid(gfp_t gfp_mask, unsigned int order,
+		int preferred_nid, nodemask_t *nodemask, int keyid)
+{
+	struct page *page;
+	bool deferred_zero;
+
+	deferred_zero = deferred_page_zero(keyid, &gfp_mask);
+	page = __alloc_pages_nodemask(gfp_mask, order, preferred_nid, nodemask);
+	if (page)
+		prep_encrypted_page(page, order, keyid, deferred_zero);
+	return page;
+}
+EXPORT_SYMBOL(__alloc_pages_nodemask_keyid);
+
 /*
  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
  * address cannot represent highmem pages. Use alloc_pages and then kmap if
-- 
2.20.1

WARNING: multiple messages have this Message-ID (diff)
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	x86@kernel.org, Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>,
	Borislav Petkov <bp@alien8.de>,
	Peter Zijlstra <peterz@infradead.org>,
	Andy Lutomirski <luto@amacapital.net>,
	David Howells <dhowells@redhat.com>
Cc: Kees Cook <keescook@chromium.org>,
	Dave Hansen <dave.hansen@intel.com>,
	Kai Huang <kai.huang@linux.intel.com>,
	Jacob Pan <jacob.jun.pan@linux.intel.com>,
	Alison Schofield <alison.schofield@intel.com>,
	linux-mm@kvack.org, kvm@vger.kernel.org,
	keyrings@vger.kernel.org, linux-kernel@vger.kernel.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH, RFC 05/62] mm/page_alloc: Handle allocation for encrypted memory
Date: Wed,  8 May 2019 17:43:25 +0300	[thread overview]
Message-ID: <20190508144422.13171-6-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20190508144422.13171-1-kirill.shutemov@linux.intel.com>

For encrypted memory, we need to allocate pages for a specific
encryption KeyID.

There are two cases when we need to allocate a page for encryption:

 - Allocation for an encrypted VMA;

 - Allocation for migration of encrypted page;

The first case can be covered within alloc_page_vma(). We know KeyID
from the VMA.

The second case requires few new page allocation routines that would
allocate the page for a specific KeyID.

An encrypted page has to be cleared after KeyID set. This is handled
in prep_encrypted_page() that will be provided by arch-specific code.

Any custom allocator that dials with encrypted pages has to call
prep_encrypted_page() too. See compaction_alloc() for instance.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 include/linux/gfp.h     | 45 ++++++++++++++++++++++++++++++++-----
 include/linux/migrate.h | 14 +++++++++---
 mm/compaction.c         |  3 +++
 mm/mempolicy.c          | 27 ++++++++++++++++------
 mm/migrate.c            |  4 ++--
 mm/page_alloc.c         | 50 +++++++++++++++++++++++++++++++++++++++++
 6 files changed, 126 insertions(+), 17 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b101aa294157..1716dbe587c9 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -463,16 +463,43 @@ static inline void arch_free_page(struct page *page, int order) { }
 static inline void arch_alloc_page(struct page *page, int order) { }
 #endif
 
+#ifndef prep_encrypted_page
+static inline void prep_encrypted_page(struct page *page, int order,
+		int keyid, bool zero)
+{
+}
+#endif
+
+/*
+ * Encrypted page has to be cleared once keyid is set, not on allocation.
+ */
+static inline bool deferred_page_zero(int keyid, gfp_t *gfp_mask)
+{
+	if (keyid && (*gfp_mask & __GFP_ZERO)) {
+		*gfp_mask &= ~__GFP_ZERO;
+		return true;
+	}
+
+	return false;
+}
+
 struct page *
 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 							nodemask_t *nodemask);
 
+struct page *
+__alloc_pages_nodemask_keyid(gfp_t gfp_mask, unsigned int order,
+		int preferred_nid, nodemask_t *nodemask, int keyid);
+
 static inline struct page *
 __alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
 {
 	return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
 }
 
+struct page *__alloc_pages_node_keyid(int nid, int keyid,
+		gfp_t gfp_mask, unsigned int order);
+
 /*
  * Allocate pages, preferring the node given as nid. The node must be valid and
  * online. For more general interface, see alloc_pages_node().
@@ -500,6 +527,19 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
 	return __alloc_pages_node(nid, gfp_mask, order);
 }
 
+static inline struct page *alloc_pages_node_keyid(int nid, int keyid,
+		gfp_t gfp_mask, unsigned int order)
+{
+	if (nid == NUMA_NO_NODE)
+		nid = numa_mem_id();
+
+	return __alloc_pages_node_keyid(nid, keyid, gfp_mask, order);
+}
+
+extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
+			struct vm_area_struct *vma, unsigned long addr,
+			int node, bool hugepage);
+
 #ifdef CONFIG_NUMA
 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
 
@@ -508,14 +548,9 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
 {
 	return alloc_pages_current(gfp_mask, order);
 }
-extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
-			struct vm_area_struct *vma, unsigned long addr,
-			int node, bool hugepage);
 #else
 #define alloc_pages(gfp_mask, order) \
 		alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
-	alloc_pages(gfp_mask, order)
 #endif
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
 #define alloc_page_vma(gfp_mask, vma, addr)			\
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index e13d9bf2f9a5..a6e068762d08 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -38,9 +38,16 @@ static inline struct page *new_page_nodemask(struct page *page,
 	unsigned int order = 0;
 	struct page *new_page = NULL;
 
-	if (PageHuge(page))
+	if (PageHuge(page)) {
+		/*
+		 * HugeTLB doesn't support encryption. We shouldn't see
+		 * such pages.
+		 */
+		if (WARN_ON_ONCE(page_keyid(page)))
+			return NULL;
 		return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
 				preferred_nid, nodemask);
+	}
 
 	if (PageTransHuge(page)) {
 		gfp_mask |= GFP_TRANSHUGE;
@@ -50,8 +57,9 @@ static inline struct page *new_page_nodemask(struct page *page,
 	if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
 		gfp_mask |= __GFP_HIGHMEM;
 
-	new_page = __alloc_pages_nodemask(gfp_mask, order,
-				preferred_nid, nodemask);
+	/* Allocate a page with the same KeyID as the source page */
+	new_page = __alloc_pages_nodemask_keyid(gfp_mask, order,
+				preferred_nid, nodemask, page_keyid(page));
 
 	if (new_page && PageTransHuge(new_page))
 		prep_transhuge_page(new_page);
diff --git a/mm/compaction.c b/mm/compaction.c
index 3319e0872d01..559b8bd6d245 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1557,6 +1557,9 @@ static struct page *compaction_alloc(struct page *migratepage,
 	list_del(&freepage->lru);
 	cc->nr_freepages--;
 
+	/* Prepare the page using the same KeyID as the source page */
+	if (freepage)
+		prep_encrypted_page(freepage, 0, page_keyid(migratepage), false);
 	return freepage;
 }
 
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 14b18449c623..5cad39fb7b35 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -961,22 +961,29 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
 /* page allocation callback for NUMA node migration */
 struct page *alloc_new_node_page(struct page *page, unsigned long node)
 {
-	if (PageHuge(page))
+	if (PageHuge(page)) {
+		/*
+		 * HugeTLB doesn't support encryption. We shouldn't see
+		 * such pages.
+		 */
+		if (WARN_ON_ONCE(page_keyid(page)))
+			return NULL;
 		return alloc_huge_page_node(page_hstate(compound_head(page)),
 					node);
-	else if (PageTransHuge(page)) {
+	} else if (PageTransHuge(page)) {
 		struct page *thp;
 
-		thp = alloc_pages_node(node,
+		thp = alloc_pages_node_keyid(node, page_keyid(page),
 			(GFP_TRANSHUGE | __GFP_THISNODE),
 			HPAGE_PMD_ORDER);
 		if (!thp)
 			return NULL;
 		prep_transhuge_page(thp);
 		return thp;
-	} else
-		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
-						    __GFP_THISNODE, 0);
+	} else {
+		return __alloc_pages_node_keyid(node, page_keyid(page),
+				GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
+	}
 }
 
 /*
@@ -2053,9 +2060,13 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 {
 	struct mempolicy *pol;
 	struct page *page;
-	int preferred_nid;
+	bool deferred_zero;
+	int keyid, preferred_nid;
 	nodemask_t *nmask;
 
+	keyid = vma_keyid(vma);
+	deferred_zero = deferred_page_zero(keyid, &gfp);
+
 	pol = get_vma_policy(vma, addr);
 
 	if (pol->mode == MPOL_INTERLEAVE) {
@@ -2097,6 +2108,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
 	mpol_cond_put(pol);
 out:
+	if (page)
+		prep_encrypted_page(page, order, keyid, deferred_zero);
 	return page;
 }
 
diff --git a/mm/migrate.c b/mm/migrate.c
index 663a5449367a..04b36a56865d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1880,7 +1880,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
 	int nid = (int) data;
 	struct page *newpage;
 
-	newpage = __alloc_pages_node(nid,
+	newpage = __alloc_pages_node_keyid(nid, page_keyid(page),
 					 (GFP_HIGHUSER_MOVABLE |
 					  __GFP_THISNODE | __GFP_NOMEMALLOC |
 					  __GFP_NORETRY | __GFP_NOWARN) &
@@ -2006,7 +2006,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 	int page_lru = page_is_file_cache(page);
 	unsigned long start = address & HPAGE_PMD_MASK;
 
-	new_page = alloc_pages_node(node,
+	new_page = alloc_pages_node_keyid(node, page_keyid(page),
 		(GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
 		HPAGE_PMD_ORDER);
 	if (!new_page)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c02cff1ed56e..ab1d8661aa87 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3930,6 +3930,41 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
 }
 #endif /* CONFIG_COMPACTION */
 
+#ifndef CONFIG_NUMA
+struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
+		struct vm_area_struct *vma, unsigned long addr,
+		int node, bool hugepage)
+{
+	struct page *page;
+	bool deferred_zero;
+	int keyid = vma_keyid(vma);
+
+	deferred_zero = deferred_page_zero(keyid, &gfp_mask);
+	page = alloc_pages(gfp_mask, order);
+	if (page)
+		prep_encrypted_page(page, order, keyid, deferred_zero);
+
+	return page;
+}
+#endif
+
+struct page * __alloc_pages_node_keyid(int nid, int keyid,
+		gfp_t gfp_mask, unsigned int order)
+{
+	struct page *page;
+	bool deferred_zero;
+
+	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+	VM_WARN_ON(!node_online(nid));
+
+	deferred_zero = deferred_page_zero(keyid, &gfp_mask);
+	page = __alloc_pages(gfp_mask, order, nid);
+	if (page)
+		prep_encrypted_page(page, order, keyid, deferred_zero);
+
+	return page;
+}
+
 #ifdef CONFIG_LOCKDEP
 static struct lockdep_map __fs_reclaim_map =
 	STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
@@ -4645,6 +4680,21 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 }
 EXPORT_SYMBOL(__alloc_pages_nodemask);
 
+struct page *
+__alloc_pages_nodemask_keyid(gfp_t gfp_mask, unsigned int order,
+		int preferred_nid, nodemask_t *nodemask, int keyid)
+{
+	struct page *page;
+	bool deferred_zero;
+
+	deferred_zero = deferred_page_zero(keyid, &gfp_mask);
+	page = __alloc_pages_nodemask(gfp_mask, order, preferred_nid, nodemask);
+	if (page)
+		prep_encrypted_page(page, order, keyid, deferred_zero);
+	return page;
+}
+EXPORT_SYMBOL(__alloc_pages_nodemask_keyid);
+
 /*
  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
  * address cannot represent highmem pages. Use alloc_pages and then kmap if
-- 
2.20.1


  parent reply	other threads:[~2019-05-08 14:43 UTC|newest]

Thread overview: 324+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-08 14:43 [PATCH, RFC 00/62] Intel MKTME enabling Kirill A. Shutemov
2019-05-08 14:43 ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 01/62] mm: Do no merge VMAs with different encryption KeyIDs Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 02/62] mm: Add helpers to setup zero page mappings Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-29  7:21   ` Mike Rapoport
2019-05-29  7:21     ` Mike Rapoport
2019-05-08 14:43 ` [PATCH, RFC 03/62] mm/ksm: Do not merge pages with different KeyIDs Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-10 18:07   ` Dave Hansen
2019-05-10 18:07     ` Dave Hansen
2019-05-13 14:27     ` Kirill A. Shutemov
2019-05-13 14:27       ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 04/62] mm/page_alloc: Unify alloc_hugepage_vma() Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` Kirill A. Shutemov [this message]
2019-05-08 14:43   ` [PATCH, RFC 05/62] mm/page_alloc: Handle allocation for encrypted memory Kirill A. Shutemov
2019-05-29  7:21   ` Mike Rapoport
2019-05-29  7:21     ` Mike Rapoport
2019-05-29 12:47     ` Kirill A. Shutemov
2019-05-29 12:47       ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 06/62] mm/khugepaged: Handle encrypted pages Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 07/62] x86/mm: Mask out KeyID bits from page table entry pfn Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 08/62] x86/mm: Introduce variables to store number, shift and mask of KeyIDs Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 09/62] x86/mm: Preserve KeyID on pte_modify() and pgprot_modify() Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-06-14  9:15   ` Peter Zijlstra
2019-06-14  9:15     ` Peter Zijlstra
2019-06-14 13:03     ` Kirill A. Shutemov
2019-06-14 13:03       ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 10/62] x86/mm: Detect MKTME early Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 11/62] x86/mm: Add a helper to retrieve KeyID for a page Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 12/62] x86/mm: Add a helper to retrieve KeyID for a VMA Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 13/62] x86/mm: Add hooks to allocate and free encrypted pages Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-06-14  9:34   ` Peter Zijlstra
2019-06-14  9:34     ` Peter Zijlstra
2019-06-14 11:04     ` Peter Zijlstra
2019-06-14 11:04       ` Peter Zijlstra
2019-06-14 13:28       ` Kirill A. Shutemov
2019-06-14 13:28         ` Kirill A. Shutemov
2019-06-14 13:43         ` Peter Zijlstra
2019-06-14 13:43           ` Peter Zijlstra
2019-06-14 22:41           ` Kirill A. Shutemov
2019-06-14 22:41             ` Kirill A. Shutemov
2019-06-17  9:25             ` Peter Zijlstra
2019-06-17  9:25               ` Peter Zijlstra
2019-06-14 13:14     ` Kirill A. Shutemov
2019-06-14 13:14       ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 14/62] x86/mm: Map zero pages into encrypted mappings correctly Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 15/62] x86/mm: Rename CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 16/62] x86/mm: Allow to disable MKTME after enumeration Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 17/62] x86/mm: Calculate direct mapping size Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 18/62] x86/mm: Implement syncing per-KeyID direct mappings Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-06-14  9:51   ` Peter Zijlstra
2019-06-14  9:51     ` Peter Zijlstra
2019-06-14 22:43     ` Kirill A. Shutemov
2019-06-14 22:43       ` Kirill A. Shutemov
2019-06-17  9:27       ` Peter Zijlstra
2019-06-17  9:27         ` Peter Zijlstra
2019-06-17 14:43         ` Kirill A. Shutemov
2019-06-17 14:43           ` Kirill A. Shutemov
2019-06-17 14:51           ` Peter Zijlstra
2019-06-17 14:51             ` Peter Zijlstra
2019-06-17 15:17             ` Kirill A. Shutemov
2019-06-17 15:17               ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 19/62] x86/mm: Handle encrypted memory in page_to_virt() and __pa() Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-06-14 11:10   ` Peter Zijlstra
2019-06-14 11:10     ` Peter Zijlstra
2019-05-08 14:43 ` [PATCH, RFC 20/62] mm/page_ext: Export lookup_page_ext() symbol Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-06-14 11:12   ` Peter Zijlstra
2019-06-14 11:12     ` Peter Zijlstra
2019-06-14 22:44     ` Kirill A. Shutemov
2019-06-14 22:44       ` Kirill A. Shutemov
2019-06-17  9:30       ` Peter Zijlstra
2019-06-17  9:30         ` Peter Zijlstra
2019-06-17 11:01         ` Kai Huang
2019-06-17 11:01           ` Kai Huang
2019-06-17 11:01           ` Kai Huang
2019-06-17 11:13           ` Huang, Kai
2019-06-17 11:13             ` Huang, Kai
2019-05-08 14:43 ` [PATCH, RFC 21/62] mm/rmap: Clear vma->anon_vma on unlink_anon_vmas() Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 22/62] x86/pconfig: Set a valid encryption algorithm for all MKTME commands Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 23/62] keys/mktme: Introduce a Kernel Key Service for MKTME Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 24/62] keys/mktme: Preparse the MKTME key payload Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 25/62] keys/mktme: Instantiate and destroy MKTME keys Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 26/62] keys/mktme: Move the MKTME payload into a cache aligned structure Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-06-14 11:35   ` Peter Zijlstra
2019-06-14 11:35     ` Peter Zijlstra
2019-06-14 17:10     ` Alison Schofield
2019-06-14 17:10       ` Alison Schofield
2019-05-08 14:43 ` [PATCH, RFC 27/62] keys/mktme: Strengthen the entropy of CPU generated MKTME keys Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 28/62] keys/mktme: Set up PCONFIG programming targets for " Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 29/62] keys/mktme: Program MKTME keys into the platform hardware Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 30/62] keys/mktme: Set up a percpu_ref_count for MKTME keys Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 31/62] keys/mktme: Require CAP_SYS_RESOURCE capability " Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 32/62] keys/mktme: Store MKTME payloads if cmdline parameter allows Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 33/62] acpi: Remove __init from acpi table parsing functions Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 34/62] acpi/hmat: Determine existence of an ACPI HMAT Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 35/62] keys/mktme: Require ACPI HMAT to register the MKTME Key Service Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 36/62] acpi/hmat: Evaluate topology presented in ACPI HMAT for MKTME Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 37/62] keys/mktme: Do not allow key creation in unsafe topologies Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 38/62] keys/mktme: Support CPU hotplug for MKTME key service Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 39/62] keys/mktme: Find new PCONFIG targets during memory hotplug Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 40/62] keys/mktme: Program new PCONFIG targets with MKTME keys Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 41/62] keys/mktme: Support memory hotplug for " Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 42/62] mm: Generalize the mprotect implementation to support extensions Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 43/62] syscall/x86: Wire up a system call for MKTME encryption keys Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-29  7:21   ` Mike Rapoport
2019-05-29  7:21     ` Mike Rapoport
2019-05-29 18:12     ` Alison Schofield
2019-05-29 18:12       ` Alison Schofield
2019-05-08 14:44 ` [PATCH, RFC 44/62] x86/mm: Set KeyIDs in encrypted VMAs for MKTME Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-06-14 11:44   ` Peter Zijlstra
2019-06-14 11:44     ` Peter Zijlstra
2019-06-14 17:33     ` Alison Schofield
2019-06-14 17:33       ` Alison Schofield
2019-06-14 18:26       ` Dave Hansen
2019-06-14 18:26         ` Dave Hansen
2019-06-14 18:46         ` Alison Schofield
2019-06-14 18:46           ` Alison Schofield
2019-06-14 19:11           ` Dave Hansen
2019-06-14 19:11             ` Dave Hansen
2019-06-17  9:10             ` Peter Zijlstra
2019-06-17  9:10               ` Peter Zijlstra
2019-05-08 14:44 ` [PATCH, RFC 45/62] mm: Add the encrypt_mprotect() system call " Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-06-14 11:47   ` Peter Zijlstra
2019-06-14 11:47     ` Peter Zijlstra
2019-06-14 17:35     ` Alison Schofield
2019-06-14 17:35       ` Alison Schofield
2019-06-14 11:51   ` Peter Zijlstra
2019-06-14 11:51     ` Peter Zijlstra
2019-06-15  0:32     ` Alison Schofield
2019-06-15  0:32       ` Alison Schofield
2019-06-17  9:08       ` Peter Zijlstra
2019-06-17  9:08         ` Peter Zijlstra
2019-06-17 15:07   ` Andy Lutomirski
2019-06-17 15:07     ` Andy Lutomirski
2019-06-17 15:07     ` Andy Lutomirski
2019-06-17 15:28     ` Dave Hansen
2019-06-17 15:28       ` Dave Hansen
2019-06-17 15:46       ` Andy Lutomirski
2019-06-17 15:46         ` Andy Lutomirski
2019-06-17 15:46         ` Andy Lutomirski
2019-06-17 18:27         ` Dave Hansen
2019-06-17 18:27           ` Dave Hansen
2019-06-17 19:12           ` Andy Lutomirski
2019-06-17 19:12             ` Andy Lutomirski
2019-06-17 19:12             ` Andy Lutomirski
2019-06-17 21:36             ` Dave Hansen
2019-06-17 21:36               ` Dave Hansen
2019-06-18  0:48               ` Kai Huang
2019-06-18  0:48                 ` Kai Huang
2019-06-18  0:48                 ` Kai Huang
2019-06-18  1:50                 ` Andy Lutomirski
2019-06-18  1:50                   ` Andy Lutomirski
2019-06-18  1:50                   ` Andy Lutomirski
2019-06-18  2:11                   ` Kai Huang
2019-06-18  2:11                     ` Kai Huang
2019-06-18  2:11                     ` Kai Huang
2019-06-18  4:24                     ` Andy Lutomirski
2019-06-18  4:24                       ` Andy Lutomirski
2019-06-18  4:24                       ` Andy Lutomirski
2019-06-18 14:19                   ` Dave Hansen
2019-06-18 14:19                     ` Dave Hansen
2019-06-18  0:05             ` Kai Huang
2019-06-18  0:05               ` Kai Huang
2019-06-18  0:05               ` Kai Huang
2019-06-18  0:15               ` Andy Lutomirski
2019-06-18  0:15                 ` Andy Lutomirski
2019-06-18  0:15                 ` Andy Lutomirski
2019-06-18  1:35                 ` Kai Huang
2019-06-18  1:35                   ` Kai Huang
2019-06-18  1:35                   ` Kai Huang
2019-06-18  1:43                   ` Andy Lutomirski
2019-06-18  1:43                     ` Andy Lutomirski
2019-06-18  1:43                     ` Andy Lutomirski
2019-06-18  2:23                     ` Kai Huang
2019-06-18  2:23                       ` Kai Huang
2019-06-18  2:23                       ` Kai Huang
2019-06-18  9:12                       ` Peter Zijlstra
2019-06-18  9:12                         ` Peter Zijlstra
2019-06-18 14:09                         ` Dave Hansen
2019-06-18 14:09                           ` Dave Hansen
2019-06-18 16:15                           ` Kirill A. Shutemov
2019-06-18 16:15                             ` Kirill A. Shutemov
2019-06-18 16:22                             ` Dave Hansen
2019-06-18 16:22                               ` Dave Hansen
2019-06-18 16:36                               ` Andy Lutomirski
2019-06-18 16:36                                 ` Andy Lutomirski
2019-06-18 16:48                                 ` Dave Hansen
2019-06-18 16:48                                   ` Dave Hansen
2019-06-18 14:13                 ` Dave Hansen
2019-06-18 14:13                   ` Dave Hansen
2019-06-17 23:59           ` Kai Huang
2019-06-17 23:59             ` Kai Huang
2019-06-17 23:59             ` Kai Huang
2019-06-18  1:34             ` Lendacky, Thomas
2019-06-18  1:34               ` Lendacky, Thomas
2019-06-18  1:40               ` Andy Lutomirski
2019-06-18  1:40                 ` Andy Lutomirski
2019-06-18  1:40                 ` Andy Lutomirski
2019-06-18  2:02                 ` Lendacky, Thomas
2019-06-18  2:02                   ` Lendacky, Thomas
2019-06-18  4:19                 ` Andy Lutomirski
2019-06-18  4:19                   ` Andy Lutomirski
2019-06-18  4:19                   ` Andy Lutomirski
2019-05-08 14:44 ` [PATCH, RFC 46/62] x86/mm: Keep reference counts on encrypted VMAs " Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-06-14 11:54   ` Peter Zijlstra
2019-06-14 11:54     ` Peter Zijlstra
2019-06-14 18:39     ` Alison Schofield
2019-06-14 18:39       ` Alison Schofield
2019-05-08 14:44 ` [PATCH, RFC 47/62] mm: Restrict MKTME memory encryption to anonymous VMAs Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-06-14 11:55   ` Peter Zijlstra
2019-06-14 11:55     ` Peter Zijlstra
2019-06-15  0:07     ` Alison Schofield
2019-06-15  0:07       ` Alison Schofield
2019-05-08 14:44 ` [PATCH, RFC 48/62] selftests/x86/mktme: Test the MKTME APIs Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 17:09   ` Alison Schofield
2019-05-08 17:09     ` Alison Schofield
2019-05-08 14:44 ` [PATCH, RFC 49/62] mm, x86: export several MKTME variables Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-06-14 11:56   ` Peter Zijlstra
2019-06-14 11:56     ` Peter Zijlstra
2019-06-17  3:14     ` Kai Huang
2019-06-17  3:14       ` Kai Huang
2019-06-17  3:14       ` Kai Huang
2019-06-17  7:46       ` Peter Zijlstra
2019-06-17  7:46         ` Peter Zijlstra
2019-06-17  8:39         ` Kai Huang
2019-06-17  8:39           ` Kai Huang
2019-06-17  8:39           ` Kai Huang
2019-06-17 11:25           ` Kirill A. Shutemov
2019-06-17 11:25             ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 50/62] kvm, x86, mmu: setup MKTME keyID to spte for given PFN Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 51/62] iommu/vt-d: Support MKTME in DMA remapping Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-06-14 12:04   ` Peter Zijlstra
2019-06-14 12:04     ` Peter Zijlstra
2019-05-08 14:44 ` [PATCH, RFC 52/62] x86/mm: introduce common code for mem encryption Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 16:58   ` Christoph Hellwig
2019-05-08 16:58     ` Christoph Hellwig
2019-05-08 20:52     ` Jacob Pan
2019-05-08 20:52       ` Jacob Pan
2019-05-08 21:21       ` Kirill A. Shutemov
2019-05-08 21:21         ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 53/62] x86/mm: Use common code for DMA memory encryption Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 54/62] x86/mm: Disable MKTME on incompatible platform configurations Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 55/62] x86/mm: Disable MKTME if not all system memory supports encryption Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 56/62] x86: Introduce CONFIG_X86_INTEL_MKTME Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 57/62] x86/mktme: Overview of Multi-Key Total Memory Encryption Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-29  7:21   ` Mike Rapoport
2019-05-29  7:21     ` Mike Rapoport
2019-05-29 18:13     ` Alison Schofield
2019-05-29 18:13       ` Alison Schofield
2019-07-14 18:16   ` Randy Dunlap
2019-07-14 18:16     ` Randy Dunlap
2019-07-15  9:02     ` Kirill A. Shutemov
2019-07-15  9:02       ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 58/62] x86/mktme: Document the MKTME provided security mitigations Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 59/62] x86/mktme: Document the MKTME kernel configuration requirements Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 60/62] x86/mktme: Document the MKTME Key Service API Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 61/62] x86/mktme: Document the MKTME API for anonymous memory encryption Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 62/62] x86/mktme: Demonstration program using the MKTME APIs Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-29  7:30 ` [PATCH, RFC 00/62] Intel MKTME enabling Mike Rapoport
2019-05-29  7:30   ` Mike Rapoport
2019-05-29 18:20   ` Alison Schofield
2019-05-29 18:20     ` Alison Schofield
2019-06-14 12:15 ` Peter Zijlstra
2019-06-14 12:15   ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190508144422.13171-6-kirill.shutemov@linux.intel.com \
    --to=kirill.shutemov@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=alison.schofield@intel.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@intel.com \
    --cc=dhowells@redhat.com \
    --cc=hpa@zytor.com \
    --cc=jacob.jun.pan@linux.intel.com \
    --cc=kai.huang@linux.intel.com \
    --cc=keescook@chromium.org \
    --cc=keyrings@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@amacapital.net \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.