All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	x86@kernel.org, Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>,
	Borislav Petkov <bp@alien8.de>,
	Peter Zijlstra <peterz@infradead.org>,
	Andy Lutomirski <luto@amacapital.net>,
	David Howells <dhowells@redhat.com>
Cc: Kees Cook <keescook@chromium.org>,
	Dave Hansen <dave.hansen@intel.com>,
	Kai Huang <kai.huang@linux.intel.com>,
	Jacob Pan <jacob.jun.pan@linux.intel.com>,
	Alison Schofield <alison.schofield@intel.com>,
	linux-mm@kvack.org, kvm@vger.kernel.org,
	keyrings@vger.kernel.org, linux-kernel@vger.kernel.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH, RFC 18/62] x86/mm: Implement syncing per-KeyID direct mappings
Date: Wed, 08 May 2019 14:43:38 +0000	[thread overview]
Message-ID: <20190508144422.13171-19-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20190508144422.13171-1-kirill.shutemov@linux.intel.com>

For MKTME we use per-KeyID direct mappings. This allows kernel to have
access to encrypted memory.

sync_direct_mapping() sync per-KeyID direct mappings with a canonical
one -- KeyID-0.

The function tracks changes in the canonical mapping:
 - creating or removing chunks of the translation tree;
 - changes in mapping flags (i.e. protection bits);
 - splitting huge page mapping into a page table;
 - replacing page table with a huge page mapping;

The function need to be called on every change to the direct mapping:
hotplug, hotremove, changes in permissions bits, etc.

The function is nop until MKTME is enabled.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 arch/x86/include/asm/mktme.h |   6 +
 arch/x86/mm/init_64.c        |  10 +
 arch/x86/mm/mktme.c          | 441 +++++++++++++++++++++++++++++++++++
 3 files changed, 457 insertions(+)

diff --git a/arch/x86/include/asm/mktme.h b/arch/x86/include/asm/mktme.h
index 454d6d7c791d..bd6707e73219 100644
--- a/arch/x86/include/asm/mktme.h
+++ b/arch/x86/include/asm/mktme.h
@@ -59,6 +59,8 @@ static inline void arch_free_page(struct page *page, int order)
 		free_encrypted_page(page, order);
 }
 
+int sync_direct_mapping(void);
+
 #else
 #define mktme_keyid_mask	((phys_addr_t)0)
 #define mktme_nr_keyids		0
@@ -73,6 +75,10 @@ static inline bool mktme_enabled(void)
 
 static inline void mktme_disable(void) {}
 
+static inline int sync_direct_mapping(void)
+{
+	return 0;
+}
 #endif
 
 #endif
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3a08d707eec8..ad4ea3703faf 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -693,6 +693,7 @@ kernel_physical_mapping_init(unsigned long paddr_start,
 {
 	bool pgd_changed = false;
 	unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
+	int ret;
 
 	paddr_last = paddr_end;
 	vaddr = (unsigned long)__va(paddr_start);
@@ -726,6 +727,9 @@ kernel_physical_mapping_init(unsigned long paddr_start,
 		pgd_changed = true;
 	}
 
+	ret = sync_direct_mapping();
+	WARN_ON(ret);
+
 	if (pgd_changed)
 		sync_global_pgds(vaddr_start, vaddr_end - 1);
 
@@ -1135,10 +1139,13 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
 static void __meminit
 kernel_physical_mapping_remove(unsigned long start, unsigned long end)
 {
+	int ret;
 	start = (unsigned long)__va(start);
 	end = (unsigned long)__va(end);
 
 	remove_pagetable(start, end, true, NULL);
+	ret = sync_direct_mapping();
+	WARN_ON(ret);
 }
 
 int __ref arch_remove_memory(int nid, u64 start, u64 size,
@@ -1247,6 +1254,7 @@ void mark_rodata_ro(void)
 	unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
 	unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
 	unsigned long all_end;
+	int ret;
 
 	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
 	       (end - start) >> 10);
@@ -1280,6 +1288,8 @@ void mark_rodata_ro(void)
 	free_kernel_image_pages((void *)text_end, (void *)rodata_start);
 	free_kernel_image_pages((void *)rodata_end, (void *)_sdata);
 
+	ret = sync_direct_mapping();
+	WARN_ON(ret);
 	debug_checkwx();
 }
 
diff --git a/arch/x86/mm/mktme.c b/arch/x86/mm/mktme.c
index 9221c894e8e9..024165c9c7f3 100644
--- a/arch/x86/mm/mktme.c
+++ b/arch/x86/mm/mktme.c
@@ -1,6 +1,8 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <asm/mktme.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
 
 /* Mask to extract KeyID from physical address. */
 phys_addr_t mktme_keyid_mask;
@@ -36,6 +38,8 @@ static bool need_page_mktme(void)
 static void init_page_mktme(void)
 {
 	static_branch_enable(&mktme_enabled_key);
+
+	sync_direct_mapping();
 }
 
 struct page_ext_operations page_mktme_ops = {
@@ -96,3 +100,440 @@ void free_encrypted_page(struct page *page, int order)
 		page++;
 	}
 }
+
+static int sync_direct_mapping_pte(unsigned long keyid,
+		pmd_t *dst_pmd, pmd_t *src_pmd,
+		unsigned long addr, unsigned long end)
+{
+	pte_t *src_pte, *dst_pte;
+	pte_t *new_pte = NULL;
+	bool remove_pte;
+
+	/*
+	 * We want to unmap and free the page table if the source is empty and
+	 * the range covers whole page table.
+	 */
+	remove_pte = !src_pmd && PAGE_ALIGNED(addr) && PAGE_ALIGNED(end);
+
+	/*
+	 * PMD page got split into page table.
+	 * Clear PMD mapping. Page table will be established instead.
+	 */
+	if (pmd_large(*dst_pmd)) {
+		spin_lock(&init_mm.page_table_lock);
+		pmd_clear(dst_pmd);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	/* Allocate a new page table if needed. */
+	if (pmd_none(*dst_pmd)) {
+		new_pte = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+		if (!new_pte)
+			return -ENOMEM;
+		dst_pte = new_pte + pte_index(addr + keyid * direct_mapping_size);
+	} else {
+		dst_pte = pte_offset_map(dst_pmd, addr + keyid * direct_mapping_size);
+	}
+	src_pte = src_pmd ? pte_offset_map(src_pmd, addr) : NULL;
+
+	spin_lock(&init_mm.page_table_lock);
+
+	do {
+		pteval_t val;
+
+		if (!src_pte || pte_none(*src_pte)) {
+			set_pte(dst_pte, __pte(0));
+			goto next;
+		}
+
+		if (!pte_none(*dst_pte)) {
+			/*
+			 * Sanity check: PFNs must match between source
+			 * and destination even if the rest doesn't.
+			 */
+			BUG_ON(pte_pfn(*dst_pte) != pte_pfn(*src_pte));
+		}
+
+		/* Copy entry, but set KeyID. */
+		val = pte_val(*src_pte) | keyid << mktme_keyid_shift;
+		val &= __supported_pte_mask;
+		set_pte(dst_pte, __pte(val));
+next:
+		addr += PAGE_SIZE;
+		dst_pte++;
+		if (src_pte)
+			src_pte++;
+	} while (addr != end);
+
+	if (new_pte)
+		pmd_populate_kernel(&init_mm, dst_pmd, new_pte);
+
+	if (remove_pte) {
+		__free_page(pmd_page(*dst_pmd));
+		pmd_clear(dst_pmd);
+	}
+
+	spin_unlock(&init_mm.page_table_lock);
+
+	return 0;
+}
+
+static int sync_direct_mapping_pmd(unsigned long keyid,
+		pud_t *dst_pud, pud_t *src_pud,
+		unsigned long addr, unsigned long end)
+{
+	pmd_t *src_pmd, *dst_pmd;
+	pmd_t *new_pmd = NULL;
+	bool remove_pmd = false;
+	unsigned long next;
+	int ret = 0;
+
+	/*
+	 * We want to unmap and free the page table if the source is empty and
+	 * the range covers whole page table.
+	 */
+	remove_pmd = !src_pud && IS_ALIGNED(addr, PUD_SIZE) && IS_ALIGNED(end, PUD_SIZE);
+
+	/*
+	 * PUD page got split into page table.
+	 * Clear PUD mapping. Page table will be established instead.
+	 */
+	if (pud_large(*dst_pud)) {
+		spin_lock(&init_mm.page_table_lock);
+		pud_clear(dst_pud);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	/* Allocate a new page table if needed. */
+	if (pud_none(*dst_pud)) {
+		new_pmd = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+		if (!new_pmd)
+			return -ENOMEM;
+		dst_pmd = new_pmd + pmd_index(addr + keyid * direct_mapping_size);
+	} else {
+		dst_pmd = pmd_offset(dst_pud, addr + keyid * direct_mapping_size);
+	}
+	src_pmd = src_pud ? pmd_offset(src_pud, addr) : NULL;
+
+	do {
+		pmd_t *__src_pmd = src_pmd;
+
+		next = pmd_addr_end(addr, end);
+		if (!__src_pmd || pmd_none(*__src_pmd)) {
+			if (pmd_none(*dst_pmd))
+				goto next;
+			if (pmd_large(*dst_pmd)) {
+				spin_lock(&init_mm.page_table_lock);
+				set_pmd(dst_pmd, __pmd(0));
+				spin_unlock(&init_mm.page_table_lock);
+				goto next;
+			}
+			__src_pmd = NULL;
+		}
+
+		if (__src_pmd && pmd_large(*__src_pmd)) {
+			pmdval_t val;
+
+			if (pmd_large(*dst_pmd)) {
+				/*
+				 * Sanity check: PFNs must match between source
+				 * and destination even if the rest doesn't.
+				 */
+				BUG_ON(pmd_pfn(*dst_pmd) != pmd_pfn(*__src_pmd));
+			} else if (!pmd_none(*dst_pmd)) {
+				/*
+				 * Page table is replaced with a PMD page.
+				 * Free and unmap the page table.
+				 */
+				__free_page(pmd_page(*dst_pmd));
+				spin_lock(&init_mm.page_table_lock);
+				pmd_clear(dst_pmd);
+				spin_unlock(&init_mm.page_table_lock);
+			}
+
+			/* Copy entry, but set KeyID. */
+			val = pmd_val(*__src_pmd) | keyid << mktme_keyid_shift;
+			val &= __supported_pte_mask;
+			spin_lock(&init_mm.page_table_lock);
+			set_pmd(dst_pmd, __pmd(val));
+			spin_unlock(&init_mm.page_table_lock);
+			goto next;
+		}
+
+		ret = sync_direct_mapping_pte(keyid, dst_pmd, __src_pmd,
+				addr, next);
+next:
+		addr = next;
+		dst_pmd++;
+		if (src_pmd)
+			src_pmd++;
+	} while (addr != end && !ret);
+
+	if (new_pmd) {
+		spin_lock(&init_mm.page_table_lock);
+		pud_populate(&init_mm, dst_pud, new_pmd);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	if (remove_pmd) {
+		spin_lock(&init_mm.page_table_lock);
+		__free_page(pud_page(*dst_pud));
+		pud_clear(dst_pud);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	return ret;
+}
+
+static int sync_direct_mapping_pud(unsigned long keyid,
+		p4d_t *dst_p4d, p4d_t *src_p4d,
+		unsigned long addr, unsigned long end)
+{
+	pud_t *src_pud, *dst_pud;
+	pud_t *new_pud = NULL;
+	bool remove_pud = false;
+	unsigned long next;
+	int ret = 0;
+
+	/*
+	 * We want to unmap and free the page table if the source is empty and
+	 * the range covers whole page table.
+	 */
+	remove_pud = !src_p4d && IS_ALIGNED(addr, P4D_SIZE) && IS_ALIGNED(end, P4D_SIZE);
+
+	/*
+	 * P4D page got split into page table.
+	 * Clear P4D mapping. Page table will be established instead.
+	 */
+	if (p4d_large(*dst_p4d)) {
+		spin_lock(&init_mm.page_table_lock);
+		p4d_clear(dst_p4d);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	/* Allocate a new page table if needed. */
+	if (p4d_none(*dst_p4d)) {
+		new_pud = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+		if (!new_pud)
+			return -ENOMEM;
+		dst_pud = new_pud + pud_index(addr + keyid * direct_mapping_size);
+	} else {
+		dst_pud = pud_offset(dst_p4d, addr + keyid * direct_mapping_size);
+	}
+	src_pud = src_p4d ? pud_offset(src_p4d, addr) : NULL;
+
+	do {
+		pud_t *__src_pud = src_pud;
+
+		next = pud_addr_end(addr, end);
+		if (!__src_pud || pud_none(*__src_pud)) {
+			if (pud_none(*dst_pud))
+				goto next;
+			if (pud_large(*dst_pud)) {
+				spin_lock(&init_mm.page_table_lock);
+				set_pud(dst_pud, __pud(0));
+				spin_unlock(&init_mm.page_table_lock);
+				goto next;
+			}
+			__src_pud = NULL;
+		}
+
+		if (__src_pud && pud_large(*__src_pud)) {
+			pudval_t val;
+
+			if (pud_large(*dst_pud)) {
+				/*
+				 * Sanity check: PFNs must match between source
+				 * and destination even if the rest doesn't.
+				 */
+				BUG_ON(pud_pfn(*dst_pud) != pud_pfn(*__src_pud));
+			} else if (!pud_none(*dst_pud)) {
+				/*
+				 * Page table is replaced with a pud page.
+				 * Free and unmap the page table.
+				 */
+				__free_page(pud_page(*dst_pud));
+				spin_lock(&init_mm.page_table_lock);
+				pud_clear(dst_pud);
+				spin_unlock(&init_mm.page_table_lock);
+			}
+
+			/* Copy entry, but set KeyID. */
+			val = pud_val(*__src_pud) | keyid << mktme_keyid_shift;
+			val &= __supported_pte_mask;
+			spin_lock(&init_mm.page_table_lock);
+			set_pud(dst_pud, __pud(val));
+			spin_unlock(&init_mm.page_table_lock);
+			goto next;
+		}
+
+		ret = sync_direct_mapping_pmd(keyid, dst_pud, __src_pud,
+				addr, next);
+next:
+		addr = next;
+		dst_pud++;
+		if (src_pud)
+			src_pud++;
+	} while (addr != end && !ret);
+
+	if (new_pud) {
+		spin_lock(&init_mm.page_table_lock);
+		p4d_populate(&init_mm, dst_p4d, new_pud);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	if (remove_pud) {
+		spin_lock(&init_mm.page_table_lock);
+		__free_page(p4d_page(*dst_p4d));
+		p4d_clear(dst_p4d);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	return ret;
+}
+
+static int sync_direct_mapping_p4d(unsigned long keyid,
+		pgd_t *dst_pgd, pgd_t *src_pgd,
+		unsigned long addr, unsigned long end)
+{
+	p4d_t *src_p4d, *dst_p4d;
+	p4d_t *new_p4d_1 = NULL, *new_p4d_2 = NULL;
+	bool remove_p4d = false;
+	unsigned long next;
+	int ret = 0;
+
+	/*
+	 * We want to unmap and free the page table if the source is empty and
+	 * the range covers whole page table.
+	 */
+	remove_p4d = !src_pgd && IS_ALIGNED(addr, PGDIR_SIZE) && IS_ALIGNED(end, PGDIR_SIZE);
+
+	/* Allocate a new page table if needed. */
+	if (pgd_none(*dst_pgd)) {
+		new_p4d_1 = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+		if (!new_p4d_1)
+			return -ENOMEM;
+		dst_p4d = new_p4d_1 + p4d_index(addr + keyid * direct_mapping_size);
+	} else {
+		dst_p4d = p4d_offset(dst_pgd, addr + keyid * direct_mapping_size);
+	}
+	src_p4d = src_pgd ? p4d_offset(src_pgd, addr) : NULL;
+
+	do {
+		p4d_t *__src_p4d = src_p4d;
+
+		next = p4d_addr_end(addr, end);
+		if (!__src_p4d || p4d_none(*__src_p4d)) {
+			if (p4d_none(*dst_p4d))
+				goto next;
+			__src_p4d = NULL;
+		}
+
+		ret = sync_direct_mapping_pud(keyid, dst_p4d, __src_p4d,
+				addr, next);
+next:
+		addr = next;
+		dst_p4d++;
+
+		/*
+		 * Direct mappings are 1TiB-aligned. With 5-level paging it
+		 * means that on PGD level there can be misalignment between
+		 * source and distiantion.
+		 *
+		 * Allocate the new page table if dst_p4d crosses page table
+		 * boundary.
+		 */
+		if (!((unsigned long)dst_p4d & ~PAGE_MASK) && addr != end) {
+			if (pgd_none(dst_pgd[1])) {
+				new_p4d_2 = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+				if (!new_p4d_2)
+					ret = -ENOMEM;
+				dst_p4d = new_p4d_2;
+			} else {
+				dst_p4d = p4d_offset(dst_pgd + 1, 0);
+			}
+		}
+		if (src_p4d)
+			src_p4d++;
+	} while (addr != end && !ret);
+
+	if (new_p4d_1 || new_p4d_2) {
+		spin_lock(&init_mm.page_table_lock);
+		if (new_p4d_1)
+			pgd_populate(&init_mm, dst_pgd, new_p4d_1);
+		if (new_p4d_2)
+			pgd_populate(&init_mm, dst_pgd + 1, new_p4d_2);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	if (remove_p4d) {
+		spin_lock(&init_mm.page_table_lock);
+		__free_page(pgd_page(*dst_pgd));
+		pgd_clear(dst_pgd);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	return ret;
+}
+
+static int sync_direct_mapping_keyid(unsigned long keyid)
+{
+	pgd_t *src_pgd, *dst_pgd;
+	unsigned long addr, end, next;
+	int ret = 0;
+
+	addr = PAGE_OFFSET;
+	end = PAGE_OFFSET + direct_mapping_size;
+
+	dst_pgd = pgd_offset_k(addr + keyid * direct_mapping_size);
+	src_pgd = pgd_offset_k(addr);
+
+	do {
+		pgd_t *__src_pgd = src_pgd;
+
+		next = pgd_addr_end(addr, end);
+		if (pgd_none(*__src_pgd)) {
+			if (pgd_none(*dst_pgd))
+				continue;
+			__src_pgd = NULL;
+		}
+
+		ret = sync_direct_mapping_p4d(keyid, dst_pgd, __src_pgd,
+				addr, next);
+	} while (dst_pgd++, src_pgd++, addr = next, addr != end && !ret);
+
+	return ret;
+}
+
+/*
+ * For MKTME we maintain per-KeyID direct mappings. This allows kernel to have
+ * access to encrypted memory.
+ *
+ * sync_direct_mapping() sync per-KeyID direct mappings with a canonical
+ * one -- KeyID-0.
+ *
+ * The function tracks changes in the canonical mapping:
+ *  - creating or removing chunks of the translation tree;
+ *  - changes in mapping flags (i.e. protection bits);
+ *  - splitting huge page mapping into a page table;
+ *  - replacing page table with a huge page mapping;
+ *
+ * The function need to be called on every change to the direct mapping:
+ * hotplug, hotremove, changes in permissions bits, etc.
+ *
+ * The function is nop until MKTME is enabled.
+ */
+int sync_direct_mapping(void)
+{
+	int i, ret = 0;
+
+	if (!mktme_enabled())
+		return 0;
+
+	for (i = 1; !ret && i <= mktme_nr_keyids; i++)
+		ret = sync_direct_mapping_keyid(i);
+
+	flush_tlb_all();
+
+	return ret;
+}
-- 
2.20.1

WARNING: multiple messages have this Message-ID (diff)
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	x86@kernel.org, Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>,
	Borislav Petkov <bp@alien8.de>,
	Peter Zijlstra <peterz@infradead.org>,
	Andy Lutomirski <luto@amacapital.net>,
	David Howells <dhowells@redhat.com>
Cc: Kees Cook <keescook@chromium.org>,
	Dave Hansen <dave.hansen@intel.com>,
	Kai Huang <kai.huang@linux.intel.com>,
	Jacob Pan <jacob.jun.pan@linux.intel.com>,
	Alison Schofield <alison.schofield@intel.com>,
	linux-mm@kvack.org, kvm@vger.kernel.org,
	keyrings@vger.kernel.org, linux-kernel@vger.kernel.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH, RFC 18/62] x86/mm: Implement syncing per-KeyID direct mappings
Date: Wed,  8 May 2019 17:43:38 +0300	[thread overview]
Message-ID: <20190508144422.13171-19-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20190508144422.13171-1-kirill.shutemov@linux.intel.com>

For MKTME we use per-KeyID direct mappings. This allows kernel to have
access to encrypted memory.

sync_direct_mapping() sync per-KeyID direct mappings with a canonical
one -- KeyID-0.

The function tracks changes in the canonical mapping:
 - creating or removing chunks of the translation tree;
 - changes in mapping flags (i.e. protection bits);
 - splitting huge page mapping into a page table;
 - replacing page table with a huge page mapping;

The function need to be called on every change to the direct mapping:
hotplug, hotremove, changes in permissions bits, etc.

The function is nop until MKTME is enabled.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 arch/x86/include/asm/mktme.h |   6 +
 arch/x86/mm/init_64.c        |  10 +
 arch/x86/mm/mktme.c          | 441 +++++++++++++++++++++++++++++++++++
 3 files changed, 457 insertions(+)

diff --git a/arch/x86/include/asm/mktme.h b/arch/x86/include/asm/mktme.h
index 454d6d7c791d..bd6707e73219 100644
--- a/arch/x86/include/asm/mktme.h
+++ b/arch/x86/include/asm/mktme.h
@@ -59,6 +59,8 @@ static inline void arch_free_page(struct page *page, int order)
 		free_encrypted_page(page, order);
 }
 
+int sync_direct_mapping(void);
+
 #else
 #define mktme_keyid_mask	((phys_addr_t)0)
 #define mktme_nr_keyids		0
@@ -73,6 +75,10 @@ static inline bool mktme_enabled(void)
 
 static inline void mktme_disable(void) {}
 
+static inline int sync_direct_mapping(void)
+{
+	return 0;
+}
 #endif
 
 #endif
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3a08d707eec8..ad4ea3703faf 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -693,6 +693,7 @@ kernel_physical_mapping_init(unsigned long paddr_start,
 {
 	bool pgd_changed = false;
 	unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
+	int ret;
 
 	paddr_last = paddr_end;
 	vaddr = (unsigned long)__va(paddr_start);
@@ -726,6 +727,9 @@ kernel_physical_mapping_init(unsigned long paddr_start,
 		pgd_changed = true;
 	}
 
+	ret = sync_direct_mapping();
+	WARN_ON(ret);
+
 	if (pgd_changed)
 		sync_global_pgds(vaddr_start, vaddr_end - 1);
 
@@ -1135,10 +1139,13 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
 static void __meminit
 kernel_physical_mapping_remove(unsigned long start, unsigned long end)
 {
+	int ret;
 	start = (unsigned long)__va(start);
 	end = (unsigned long)__va(end);
 
 	remove_pagetable(start, end, true, NULL);
+	ret = sync_direct_mapping();
+	WARN_ON(ret);
 }
 
 int __ref arch_remove_memory(int nid, u64 start, u64 size,
@@ -1247,6 +1254,7 @@ void mark_rodata_ro(void)
 	unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
 	unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
 	unsigned long all_end;
+	int ret;
 
 	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
 	       (end - start) >> 10);
@@ -1280,6 +1288,8 @@ void mark_rodata_ro(void)
 	free_kernel_image_pages((void *)text_end, (void *)rodata_start);
 	free_kernel_image_pages((void *)rodata_end, (void *)_sdata);
 
+	ret = sync_direct_mapping();
+	WARN_ON(ret);
 	debug_checkwx();
 }
 
diff --git a/arch/x86/mm/mktme.c b/arch/x86/mm/mktme.c
index 9221c894e8e9..024165c9c7f3 100644
--- a/arch/x86/mm/mktme.c
+++ b/arch/x86/mm/mktme.c
@@ -1,6 +1,8 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <asm/mktme.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
 
 /* Mask to extract KeyID from physical address. */
 phys_addr_t mktme_keyid_mask;
@@ -36,6 +38,8 @@ static bool need_page_mktme(void)
 static void init_page_mktme(void)
 {
 	static_branch_enable(&mktme_enabled_key);
+
+	sync_direct_mapping();
 }
 
 struct page_ext_operations page_mktme_ops = {
@@ -96,3 +100,440 @@ void free_encrypted_page(struct page *page, int order)
 		page++;
 	}
 }
+
+static int sync_direct_mapping_pte(unsigned long keyid,
+		pmd_t *dst_pmd, pmd_t *src_pmd,
+		unsigned long addr, unsigned long end)
+{
+	pte_t *src_pte, *dst_pte;
+	pte_t *new_pte = NULL;
+	bool remove_pte;
+
+	/*
+	 * We want to unmap and free the page table if the source is empty and
+	 * the range covers whole page table.
+	 */
+	remove_pte = !src_pmd && PAGE_ALIGNED(addr) && PAGE_ALIGNED(end);
+
+	/*
+	 * PMD page got split into page table.
+	 * Clear PMD mapping. Page table will be established instead.
+	 */
+	if (pmd_large(*dst_pmd)) {
+		spin_lock(&init_mm.page_table_lock);
+		pmd_clear(dst_pmd);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	/* Allocate a new page table if needed. */
+	if (pmd_none(*dst_pmd)) {
+		new_pte = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+		if (!new_pte)
+			return -ENOMEM;
+		dst_pte = new_pte + pte_index(addr + keyid * direct_mapping_size);
+	} else {
+		dst_pte = pte_offset_map(dst_pmd, addr + keyid * direct_mapping_size);
+	}
+	src_pte = src_pmd ? pte_offset_map(src_pmd, addr) : NULL;
+
+	spin_lock(&init_mm.page_table_lock);
+
+	do {
+		pteval_t val;
+
+		if (!src_pte || pte_none(*src_pte)) {
+			set_pte(dst_pte, __pte(0));
+			goto next;
+		}
+
+		if (!pte_none(*dst_pte)) {
+			/*
+			 * Sanity check: PFNs must match between source
+			 * and destination even if the rest doesn't.
+			 */
+			BUG_ON(pte_pfn(*dst_pte) != pte_pfn(*src_pte));
+		}
+
+		/* Copy entry, but set KeyID. */
+		val = pte_val(*src_pte) | keyid << mktme_keyid_shift;
+		val &= __supported_pte_mask;
+		set_pte(dst_pte, __pte(val));
+next:
+		addr += PAGE_SIZE;
+		dst_pte++;
+		if (src_pte)
+			src_pte++;
+	} while (addr != end);
+
+	if (new_pte)
+		pmd_populate_kernel(&init_mm, dst_pmd, new_pte);
+
+	if (remove_pte) {
+		__free_page(pmd_page(*dst_pmd));
+		pmd_clear(dst_pmd);
+	}
+
+	spin_unlock(&init_mm.page_table_lock);
+
+	return 0;
+}
+
+static int sync_direct_mapping_pmd(unsigned long keyid,
+		pud_t *dst_pud, pud_t *src_pud,
+		unsigned long addr, unsigned long end)
+{
+	pmd_t *src_pmd, *dst_pmd;
+	pmd_t *new_pmd = NULL;
+	bool remove_pmd = false;
+	unsigned long next;
+	int ret = 0;
+
+	/*
+	 * We want to unmap and free the page table if the source is empty and
+	 * the range covers whole page table.
+	 */
+	remove_pmd = !src_pud && IS_ALIGNED(addr, PUD_SIZE) && IS_ALIGNED(end, PUD_SIZE);
+
+	/*
+	 * PUD page got split into page table.
+	 * Clear PUD mapping. Page table will be established instead.
+	 */
+	if (pud_large(*dst_pud)) {
+		spin_lock(&init_mm.page_table_lock);
+		pud_clear(dst_pud);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	/* Allocate a new page table if needed. */
+	if (pud_none(*dst_pud)) {
+		new_pmd = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+		if (!new_pmd)
+			return -ENOMEM;
+		dst_pmd = new_pmd + pmd_index(addr + keyid * direct_mapping_size);
+	} else {
+		dst_pmd = pmd_offset(dst_pud, addr + keyid * direct_mapping_size);
+	}
+	src_pmd = src_pud ? pmd_offset(src_pud, addr) : NULL;
+
+	do {
+		pmd_t *__src_pmd = src_pmd;
+
+		next = pmd_addr_end(addr, end);
+		if (!__src_pmd || pmd_none(*__src_pmd)) {
+			if (pmd_none(*dst_pmd))
+				goto next;
+			if (pmd_large(*dst_pmd)) {
+				spin_lock(&init_mm.page_table_lock);
+				set_pmd(dst_pmd, __pmd(0));
+				spin_unlock(&init_mm.page_table_lock);
+				goto next;
+			}
+			__src_pmd = NULL;
+		}
+
+		if (__src_pmd && pmd_large(*__src_pmd)) {
+			pmdval_t val;
+
+			if (pmd_large(*dst_pmd)) {
+				/*
+				 * Sanity check: PFNs must match between source
+				 * and destination even if the rest doesn't.
+				 */
+				BUG_ON(pmd_pfn(*dst_pmd) != pmd_pfn(*__src_pmd));
+			} else if (!pmd_none(*dst_pmd)) {
+				/*
+				 * Page table is replaced with a PMD page.
+				 * Free and unmap the page table.
+				 */
+				__free_page(pmd_page(*dst_pmd));
+				spin_lock(&init_mm.page_table_lock);
+				pmd_clear(dst_pmd);
+				spin_unlock(&init_mm.page_table_lock);
+			}
+
+			/* Copy entry, but set KeyID. */
+			val = pmd_val(*__src_pmd) | keyid << mktme_keyid_shift;
+			val &= __supported_pte_mask;
+			spin_lock(&init_mm.page_table_lock);
+			set_pmd(dst_pmd, __pmd(val));
+			spin_unlock(&init_mm.page_table_lock);
+			goto next;
+		}
+
+		ret = sync_direct_mapping_pte(keyid, dst_pmd, __src_pmd,
+				addr, next);
+next:
+		addr = next;
+		dst_pmd++;
+		if (src_pmd)
+			src_pmd++;
+	} while (addr != end && !ret);
+
+	if (new_pmd) {
+		spin_lock(&init_mm.page_table_lock);
+		pud_populate(&init_mm, dst_pud, new_pmd);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	if (remove_pmd) {
+		spin_lock(&init_mm.page_table_lock);
+		__free_page(pud_page(*dst_pud));
+		pud_clear(dst_pud);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	return ret;
+}
+
+static int sync_direct_mapping_pud(unsigned long keyid,
+		p4d_t *dst_p4d, p4d_t *src_p4d,
+		unsigned long addr, unsigned long end)
+{
+	pud_t *src_pud, *dst_pud;
+	pud_t *new_pud = NULL;
+	bool remove_pud = false;
+	unsigned long next;
+	int ret = 0;
+
+	/*
+	 * We want to unmap and free the page table if the source is empty and
+	 * the range covers whole page table.
+	 */
+	remove_pud = !src_p4d && IS_ALIGNED(addr, P4D_SIZE) && IS_ALIGNED(end, P4D_SIZE);
+
+	/*
+	 * P4D page got split into page table.
+	 * Clear P4D mapping. Page table will be established instead.
+	 */
+	if (p4d_large(*dst_p4d)) {
+		spin_lock(&init_mm.page_table_lock);
+		p4d_clear(dst_p4d);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	/* Allocate a new page table if needed. */
+	if (p4d_none(*dst_p4d)) {
+		new_pud = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+		if (!new_pud)
+			return -ENOMEM;
+		dst_pud = new_pud + pud_index(addr + keyid * direct_mapping_size);
+	} else {
+		dst_pud = pud_offset(dst_p4d, addr + keyid * direct_mapping_size);
+	}
+	src_pud = src_p4d ? pud_offset(src_p4d, addr) : NULL;
+
+	do {
+		pud_t *__src_pud = src_pud;
+
+		next = pud_addr_end(addr, end);
+		if (!__src_pud || pud_none(*__src_pud)) {
+			if (pud_none(*dst_pud))
+				goto next;
+			if (pud_large(*dst_pud)) {
+				spin_lock(&init_mm.page_table_lock);
+				set_pud(dst_pud, __pud(0));
+				spin_unlock(&init_mm.page_table_lock);
+				goto next;
+			}
+			__src_pud = NULL;
+		}
+
+		if (__src_pud && pud_large(*__src_pud)) {
+			pudval_t val;
+
+			if (pud_large(*dst_pud)) {
+				/*
+				 * Sanity check: PFNs must match between source
+				 * and destination even if the rest doesn't.
+				 */
+				BUG_ON(pud_pfn(*dst_pud) != pud_pfn(*__src_pud));
+			} else if (!pud_none(*dst_pud)) {
+				/*
+				 * Page table is replaced with a pud page.
+				 * Free and unmap the page table.
+				 */
+				__free_page(pud_page(*dst_pud));
+				spin_lock(&init_mm.page_table_lock);
+				pud_clear(dst_pud);
+				spin_unlock(&init_mm.page_table_lock);
+			}
+
+			/* Copy entry, but set KeyID. */
+			val = pud_val(*__src_pud) | keyid << mktme_keyid_shift;
+			val &= __supported_pte_mask;
+			spin_lock(&init_mm.page_table_lock);
+			set_pud(dst_pud, __pud(val));
+			spin_unlock(&init_mm.page_table_lock);
+			goto next;
+		}
+
+		ret = sync_direct_mapping_pmd(keyid, dst_pud, __src_pud,
+				addr, next);
+next:
+		addr = next;
+		dst_pud++;
+		if (src_pud)
+			src_pud++;
+	} while (addr != end && !ret);
+
+	if (new_pud) {
+		spin_lock(&init_mm.page_table_lock);
+		p4d_populate(&init_mm, dst_p4d, new_pud);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	if (remove_pud) {
+		spin_lock(&init_mm.page_table_lock);
+		__free_page(p4d_page(*dst_p4d));
+		p4d_clear(dst_p4d);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	return ret;
+}
+
+static int sync_direct_mapping_p4d(unsigned long keyid,
+		pgd_t *dst_pgd, pgd_t *src_pgd,
+		unsigned long addr, unsigned long end)
+{
+	p4d_t *src_p4d, *dst_p4d;
+	p4d_t *new_p4d_1 = NULL, *new_p4d_2 = NULL;
+	bool remove_p4d = false;
+	unsigned long next;
+	int ret = 0;
+
+	/*
+	 * We want to unmap and free the page table if the source is empty and
+	 * the range covers whole page table.
+	 */
+	remove_p4d = !src_pgd && IS_ALIGNED(addr, PGDIR_SIZE) && IS_ALIGNED(end, PGDIR_SIZE);
+
+	/* Allocate a new page table if needed. */
+	if (pgd_none(*dst_pgd)) {
+		new_p4d_1 = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+		if (!new_p4d_1)
+			return -ENOMEM;
+		dst_p4d = new_p4d_1 + p4d_index(addr + keyid * direct_mapping_size);
+	} else {
+		dst_p4d = p4d_offset(dst_pgd, addr + keyid * direct_mapping_size);
+	}
+	src_p4d = src_pgd ? p4d_offset(src_pgd, addr) : NULL;
+
+	do {
+		p4d_t *__src_p4d = src_p4d;
+
+		next = p4d_addr_end(addr, end);
+		if (!__src_p4d || p4d_none(*__src_p4d)) {
+			if (p4d_none(*dst_p4d))
+				goto next;
+			__src_p4d = NULL;
+		}
+
+		ret = sync_direct_mapping_pud(keyid, dst_p4d, __src_p4d,
+				addr, next);
+next:
+		addr = next;
+		dst_p4d++;
+
+		/*
+		 * Direct mappings are 1TiB-aligned. With 5-level paging it
+		 * means that on PGD level there can be misalignment between
+		 * source and distiantion.
+		 *
+		 * Allocate the new page table if dst_p4d crosses page table
+		 * boundary.
+		 */
+		if (!((unsigned long)dst_p4d & ~PAGE_MASK) && addr != end) {
+			if (pgd_none(dst_pgd[1])) {
+				new_p4d_2 = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+				if (!new_p4d_2)
+					ret = -ENOMEM;
+				dst_p4d = new_p4d_2;
+			} else {
+				dst_p4d = p4d_offset(dst_pgd + 1, 0);
+			}
+		}
+		if (src_p4d)
+			src_p4d++;
+	} while (addr != end && !ret);
+
+	if (new_p4d_1 || new_p4d_2) {
+		spin_lock(&init_mm.page_table_lock);
+		if (new_p4d_1)
+			pgd_populate(&init_mm, dst_pgd, new_p4d_1);
+		if (new_p4d_2)
+			pgd_populate(&init_mm, dst_pgd + 1, new_p4d_2);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	if (remove_p4d) {
+		spin_lock(&init_mm.page_table_lock);
+		__free_page(pgd_page(*dst_pgd));
+		pgd_clear(dst_pgd);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+
+	return ret;
+}
+
+static int sync_direct_mapping_keyid(unsigned long keyid)
+{
+	pgd_t *src_pgd, *dst_pgd;
+	unsigned long addr, end, next;
+	int ret = 0;
+
+	addr = PAGE_OFFSET;
+	end = PAGE_OFFSET + direct_mapping_size;
+
+	dst_pgd = pgd_offset_k(addr + keyid * direct_mapping_size);
+	src_pgd = pgd_offset_k(addr);
+
+	do {
+		pgd_t *__src_pgd = src_pgd;
+
+		next = pgd_addr_end(addr, end);
+		if (pgd_none(*__src_pgd)) {
+			if (pgd_none(*dst_pgd))
+				continue;
+			__src_pgd = NULL;
+		}
+
+		ret = sync_direct_mapping_p4d(keyid, dst_pgd, __src_pgd,
+				addr, next);
+	} while (dst_pgd++, src_pgd++, addr = next, addr != end && !ret);
+
+	return ret;
+}
+
+/*
+ * For MKTME we maintain per-KeyID direct mappings. This allows kernel to have
+ * access to encrypted memory.
+ *
+ * sync_direct_mapping() sync per-KeyID direct mappings with a canonical
+ * one -- KeyID-0.
+ *
+ * The function tracks changes in the canonical mapping:
+ *  - creating or removing chunks of the translation tree;
+ *  - changes in mapping flags (i.e. protection bits);
+ *  - splitting huge page mapping into a page table;
+ *  - replacing page table with a huge page mapping;
+ *
+ * The function need to be called on every change to the direct mapping:
+ * hotplug, hotremove, changes in permissions bits, etc.
+ *
+ * The function is nop until MKTME is enabled.
+ */
+int sync_direct_mapping(void)
+{
+	int i, ret = 0;
+
+	if (!mktme_enabled())
+		return 0;
+
+	for (i = 1; !ret && i <= mktme_nr_keyids; i++)
+		ret = sync_direct_mapping_keyid(i);
+
+	flush_tlb_all();
+
+	return ret;
+}
-- 
2.20.1


  parent reply	other threads:[~2019-05-08 14:43 UTC|newest]

Thread overview: 324+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-08 14:43 [PATCH, RFC 00/62] Intel MKTME enabling Kirill A. Shutemov
2019-05-08 14:43 ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 01/62] mm: Do no merge VMAs with different encryption KeyIDs Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 02/62] mm: Add helpers to setup zero page mappings Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-29  7:21   ` Mike Rapoport
2019-05-29  7:21     ` Mike Rapoport
2019-05-08 14:43 ` [PATCH, RFC 03/62] mm/ksm: Do not merge pages with different KeyIDs Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-10 18:07   ` Dave Hansen
2019-05-10 18:07     ` Dave Hansen
2019-05-13 14:27     ` Kirill A. Shutemov
2019-05-13 14:27       ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 04/62] mm/page_alloc: Unify alloc_hugepage_vma() Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 05/62] mm/page_alloc: Handle allocation for encrypted memory Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-29  7:21   ` Mike Rapoport
2019-05-29  7:21     ` Mike Rapoport
2019-05-29 12:47     ` Kirill A. Shutemov
2019-05-29 12:47       ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 06/62] mm/khugepaged: Handle encrypted pages Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 07/62] x86/mm: Mask out KeyID bits from page table entry pfn Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 08/62] x86/mm: Introduce variables to store number, shift and mask of KeyIDs Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 09/62] x86/mm: Preserve KeyID on pte_modify() and pgprot_modify() Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-06-14  9:15   ` Peter Zijlstra
2019-06-14  9:15     ` Peter Zijlstra
2019-06-14 13:03     ` Kirill A. Shutemov
2019-06-14 13:03       ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 10/62] x86/mm: Detect MKTME early Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 11/62] x86/mm: Add a helper to retrieve KeyID for a page Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 12/62] x86/mm: Add a helper to retrieve KeyID for a VMA Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 13/62] x86/mm: Add hooks to allocate and free encrypted pages Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-06-14  9:34   ` Peter Zijlstra
2019-06-14  9:34     ` Peter Zijlstra
2019-06-14 11:04     ` Peter Zijlstra
2019-06-14 11:04       ` Peter Zijlstra
2019-06-14 13:28       ` Kirill A. Shutemov
2019-06-14 13:28         ` Kirill A. Shutemov
2019-06-14 13:43         ` Peter Zijlstra
2019-06-14 13:43           ` Peter Zijlstra
2019-06-14 22:41           ` Kirill A. Shutemov
2019-06-14 22:41             ` Kirill A. Shutemov
2019-06-17  9:25             ` Peter Zijlstra
2019-06-17  9:25               ` Peter Zijlstra
2019-06-14 13:14     ` Kirill A. Shutemov
2019-06-14 13:14       ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 14/62] x86/mm: Map zero pages into encrypted mappings correctly Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 15/62] x86/mm: Rename CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 16/62] x86/mm: Allow to disable MKTME after enumeration Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 17/62] x86/mm: Calculate direct mapping size Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` Kirill A. Shutemov [this message]
2019-05-08 14:43   ` [PATCH, RFC 18/62] x86/mm: Implement syncing per-KeyID direct mappings Kirill A. Shutemov
2019-06-14  9:51   ` Peter Zijlstra
2019-06-14  9:51     ` Peter Zijlstra
2019-06-14 22:43     ` Kirill A. Shutemov
2019-06-14 22:43       ` Kirill A. Shutemov
2019-06-17  9:27       ` Peter Zijlstra
2019-06-17  9:27         ` Peter Zijlstra
2019-06-17 14:43         ` Kirill A. Shutemov
2019-06-17 14:43           ` Kirill A. Shutemov
2019-06-17 14:51           ` Peter Zijlstra
2019-06-17 14:51             ` Peter Zijlstra
2019-06-17 15:17             ` Kirill A. Shutemov
2019-06-17 15:17               ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 19/62] x86/mm: Handle encrypted memory in page_to_virt() and __pa() Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-06-14 11:10   ` Peter Zijlstra
2019-06-14 11:10     ` Peter Zijlstra
2019-05-08 14:43 ` [PATCH, RFC 20/62] mm/page_ext: Export lookup_page_ext() symbol Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-06-14 11:12   ` Peter Zijlstra
2019-06-14 11:12     ` Peter Zijlstra
2019-06-14 22:44     ` Kirill A. Shutemov
2019-06-14 22:44       ` Kirill A. Shutemov
2019-06-17  9:30       ` Peter Zijlstra
2019-06-17  9:30         ` Peter Zijlstra
2019-06-17 11:01         ` Kai Huang
2019-06-17 11:01           ` Kai Huang
2019-06-17 11:01           ` Kai Huang
2019-06-17 11:13           ` Huang, Kai
2019-06-17 11:13             ` Huang, Kai
2019-05-08 14:43 ` [PATCH, RFC 21/62] mm/rmap: Clear vma->anon_vma on unlink_anon_vmas() Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 22/62] x86/pconfig: Set a valid encryption algorithm for all MKTME commands Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 23/62] keys/mktme: Introduce a Kernel Key Service for MKTME Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 24/62] keys/mktme: Preparse the MKTME key payload Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 25/62] keys/mktme: Instantiate and destroy MKTME keys Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 26/62] keys/mktme: Move the MKTME payload into a cache aligned structure Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-06-14 11:35   ` Peter Zijlstra
2019-06-14 11:35     ` Peter Zijlstra
2019-06-14 17:10     ` Alison Schofield
2019-06-14 17:10       ` Alison Schofield
2019-05-08 14:43 ` [PATCH, RFC 27/62] keys/mktme: Strengthen the entropy of CPU generated MKTME keys Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 28/62] keys/mktme: Set up PCONFIG programming targets for " Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 29/62] keys/mktme: Program MKTME keys into the platform hardware Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 30/62] keys/mktme: Set up a percpu_ref_count for MKTME keys Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 31/62] keys/mktme: Require CAP_SYS_RESOURCE capability " Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 32/62] keys/mktme: Store MKTME payloads if cmdline parameter allows Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 33/62] acpi: Remove __init from acpi table parsing functions Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 34/62] acpi/hmat: Determine existence of an ACPI HMAT Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 35/62] keys/mktme: Require ACPI HMAT to register the MKTME Key Service Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 36/62] acpi/hmat: Evaluate topology presented in ACPI HMAT for MKTME Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 37/62] keys/mktme: Do not allow key creation in unsafe topologies Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 38/62] keys/mktme: Support CPU hotplug for MKTME key service Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:43 ` [PATCH, RFC 39/62] keys/mktme: Find new PCONFIG targets during memory hotplug Kirill A. Shutemov
2019-05-08 14:43   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 40/62] keys/mktme: Program new PCONFIG targets with MKTME keys Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 41/62] keys/mktme: Support memory hotplug for " Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 42/62] mm: Generalize the mprotect implementation to support extensions Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 43/62] syscall/x86: Wire up a system call for MKTME encryption keys Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-29  7:21   ` Mike Rapoport
2019-05-29  7:21     ` Mike Rapoport
2019-05-29 18:12     ` Alison Schofield
2019-05-29 18:12       ` Alison Schofield
2019-05-08 14:44 ` [PATCH, RFC 44/62] x86/mm: Set KeyIDs in encrypted VMAs for MKTME Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-06-14 11:44   ` Peter Zijlstra
2019-06-14 11:44     ` Peter Zijlstra
2019-06-14 17:33     ` Alison Schofield
2019-06-14 17:33       ` Alison Schofield
2019-06-14 18:26       ` Dave Hansen
2019-06-14 18:26         ` Dave Hansen
2019-06-14 18:46         ` Alison Schofield
2019-06-14 18:46           ` Alison Schofield
2019-06-14 19:11           ` Dave Hansen
2019-06-14 19:11             ` Dave Hansen
2019-06-17  9:10             ` Peter Zijlstra
2019-06-17  9:10               ` Peter Zijlstra
2019-05-08 14:44 ` [PATCH, RFC 45/62] mm: Add the encrypt_mprotect() system call " Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-06-14 11:47   ` Peter Zijlstra
2019-06-14 11:47     ` Peter Zijlstra
2019-06-14 17:35     ` Alison Schofield
2019-06-14 17:35       ` Alison Schofield
2019-06-14 11:51   ` Peter Zijlstra
2019-06-14 11:51     ` Peter Zijlstra
2019-06-15  0:32     ` Alison Schofield
2019-06-15  0:32       ` Alison Schofield
2019-06-17  9:08       ` Peter Zijlstra
2019-06-17  9:08         ` Peter Zijlstra
2019-06-17 15:07   ` Andy Lutomirski
2019-06-17 15:07     ` Andy Lutomirski
2019-06-17 15:07     ` Andy Lutomirski
2019-06-17 15:28     ` Dave Hansen
2019-06-17 15:28       ` Dave Hansen
2019-06-17 15:46       ` Andy Lutomirski
2019-06-17 15:46         ` Andy Lutomirski
2019-06-17 15:46         ` Andy Lutomirski
2019-06-17 18:27         ` Dave Hansen
2019-06-17 18:27           ` Dave Hansen
2019-06-17 19:12           ` Andy Lutomirski
2019-06-17 19:12             ` Andy Lutomirski
2019-06-17 19:12             ` Andy Lutomirski
2019-06-17 21:36             ` Dave Hansen
2019-06-17 21:36               ` Dave Hansen
2019-06-18  0:48               ` Kai Huang
2019-06-18  0:48                 ` Kai Huang
2019-06-18  0:48                 ` Kai Huang
2019-06-18  1:50                 ` Andy Lutomirski
2019-06-18  1:50                   ` Andy Lutomirski
2019-06-18  1:50                   ` Andy Lutomirski
2019-06-18  2:11                   ` Kai Huang
2019-06-18  2:11                     ` Kai Huang
2019-06-18  2:11                     ` Kai Huang
2019-06-18  4:24                     ` Andy Lutomirski
2019-06-18  4:24                       ` Andy Lutomirski
2019-06-18  4:24                       ` Andy Lutomirski
2019-06-18 14:19                   ` Dave Hansen
2019-06-18 14:19                     ` Dave Hansen
2019-06-18  0:05             ` Kai Huang
2019-06-18  0:05               ` Kai Huang
2019-06-18  0:05               ` Kai Huang
2019-06-18  0:15               ` Andy Lutomirski
2019-06-18  0:15                 ` Andy Lutomirski
2019-06-18  0:15                 ` Andy Lutomirski
2019-06-18  1:35                 ` Kai Huang
2019-06-18  1:35                   ` Kai Huang
2019-06-18  1:35                   ` Kai Huang
2019-06-18  1:43                   ` Andy Lutomirski
2019-06-18  1:43                     ` Andy Lutomirski
2019-06-18  1:43                     ` Andy Lutomirski
2019-06-18  2:23                     ` Kai Huang
2019-06-18  2:23                       ` Kai Huang
2019-06-18  2:23                       ` Kai Huang
2019-06-18  9:12                       ` Peter Zijlstra
2019-06-18  9:12                         ` Peter Zijlstra
2019-06-18 14:09                         ` Dave Hansen
2019-06-18 14:09                           ` Dave Hansen
2019-06-18 16:15                           ` Kirill A. Shutemov
2019-06-18 16:15                             ` Kirill A. Shutemov
2019-06-18 16:22                             ` Dave Hansen
2019-06-18 16:22                               ` Dave Hansen
2019-06-18 16:36                               ` Andy Lutomirski
2019-06-18 16:36                                 ` Andy Lutomirski
2019-06-18 16:48                                 ` Dave Hansen
2019-06-18 16:48                                   ` Dave Hansen
2019-06-18 14:13                 ` Dave Hansen
2019-06-18 14:13                   ` Dave Hansen
2019-06-17 23:59           ` Kai Huang
2019-06-17 23:59             ` Kai Huang
2019-06-17 23:59             ` Kai Huang
2019-06-18  1:34             ` Lendacky, Thomas
2019-06-18  1:34               ` Lendacky, Thomas
2019-06-18  1:40               ` Andy Lutomirski
2019-06-18  1:40                 ` Andy Lutomirski
2019-06-18  1:40                 ` Andy Lutomirski
2019-06-18  2:02                 ` Lendacky, Thomas
2019-06-18  2:02                   ` Lendacky, Thomas
2019-06-18  4:19                 ` Andy Lutomirski
2019-06-18  4:19                   ` Andy Lutomirski
2019-06-18  4:19                   ` Andy Lutomirski
2019-05-08 14:44 ` [PATCH, RFC 46/62] x86/mm: Keep reference counts on encrypted VMAs " Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-06-14 11:54   ` Peter Zijlstra
2019-06-14 11:54     ` Peter Zijlstra
2019-06-14 18:39     ` Alison Schofield
2019-06-14 18:39       ` Alison Schofield
2019-05-08 14:44 ` [PATCH, RFC 47/62] mm: Restrict MKTME memory encryption to anonymous VMAs Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-06-14 11:55   ` Peter Zijlstra
2019-06-14 11:55     ` Peter Zijlstra
2019-06-15  0:07     ` Alison Schofield
2019-06-15  0:07       ` Alison Schofield
2019-05-08 14:44 ` [PATCH, RFC 48/62] selftests/x86/mktme: Test the MKTME APIs Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 17:09   ` Alison Schofield
2019-05-08 17:09     ` Alison Schofield
2019-05-08 14:44 ` [PATCH, RFC 49/62] mm, x86: export several MKTME variables Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-06-14 11:56   ` Peter Zijlstra
2019-06-14 11:56     ` Peter Zijlstra
2019-06-17  3:14     ` Kai Huang
2019-06-17  3:14       ` Kai Huang
2019-06-17  3:14       ` Kai Huang
2019-06-17  7:46       ` Peter Zijlstra
2019-06-17  7:46         ` Peter Zijlstra
2019-06-17  8:39         ` Kai Huang
2019-06-17  8:39           ` Kai Huang
2019-06-17  8:39           ` Kai Huang
2019-06-17 11:25           ` Kirill A. Shutemov
2019-06-17 11:25             ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 50/62] kvm, x86, mmu: setup MKTME keyID to spte for given PFN Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 51/62] iommu/vt-d: Support MKTME in DMA remapping Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-06-14 12:04   ` Peter Zijlstra
2019-06-14 12:04     ` Peter Zijlstra
2019-05-08 14:44 ` [PATCH, RFC 52/62] x86/mm: introduce common code for mem encryption Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 16:58   ` Christoph Hellwig
2019-05-08 16:58     ` Christoph Hellwig
2019-05-08 20:52     ` Jacob Pan
2019-05-08 20:52       ` Jacob Pan
2019-05-08 21:21       ` Kirill A. Shutemov
2019-05-08 21:21         ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 53/62] x86/mm: Use common code for DMA memory encryption Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 54/62] x86/mm: Disable MKTME on incompatible platform configurations Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 55/62] x86/mm: Disable MKTME if not all system memory supports encryption Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 56/62] x86: Introduce CONFIG_X86_INTEL_MKTME Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 57/62] x86/mktme: Overview of Multi-Key Total Memory Encryption Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-29  7:21   ` Mike Rapoport
2019-05-29  7:21     ` Mike Rapoport
2019-05-29 18:13     ` Alison Schofield
2019-05-29 18:13       ` Alison Schofield
2019-07-14 18:16   ` Randy Dunlap
2019-07-14 18:16     ` Randy Dunlap
2019-07-15  9:02     ` Kirill A. Shutemov
2019-07-15  9:02       ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 58/62] x86/mktme: Document the MKTME provided security mitigations Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 59/62] x86/mktme: Document the MKTME kernel configuration requirements Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 60/62] x86/mktme: Document the MKTME Key Service API Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 61/62] x86/mktme: Document the MKTME API for anonymous memory encryption Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-08 14:44 ` [PATCH, RFC 62/62] x86/mktme: Demonstration program using the MKTME APIs Kirill A. Shutemov
2019-05-08 14:44   ` Kirill A. Shutemov
2019-05-29  7:30 ` [PATCH, RFC 00/62] Intel MKTME enabling Mike Rapoport
2019-05-29  7:30   ` Mike Rapoport
2019-05-29 18:20   ` Alison Schofield
2019-05-29 18:20     ` Alison Schofield
2019-06-14 12:15 ` Peter Zijlstra
2019-06-14 12:15   ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190508144422.13171-19-kirill.shutemov@linux.intel.com \
    --to=kirill.shutemov@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=alison.schofield@intel.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@intel.com \
    --cc=dhowells@redhat.com \
    --cc=hpa@zytor.com \
    --cc=jacob.jun.pan@linux.intel.com \
    --cc=kai.huang@linux.intel.com \
    --cc=keescook@chromium.org \
    --cc=keyrings@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@amacapital.net \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.