linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: x86@kernel.org, Linus Torvalds <torvalds@linux-foundation.org>,
	Andy Lutomirsky <luto@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Dave Hansen <dave.hansen@intel.com>,
	Borislav Petkov <bpetkov@suse.de>,
	Greg KH <gregkh@linuxfoundation.org>,
	keescook@google.com, hughd@google.com,
	Brian Gerst <brgerst@gmail.com>,
	Josh Poimboeuf <jpoimboe@redhat.com>,
	Denys Vlasenko <dvlasenk@redhat.com>,
	Rik van Riel <riel@redhat.com>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	Juergen Gross <jgross@suse.com>,
	David Laight <David.Laight@aculab.com>,
	Eduardo Valentin <eduval@amazon.com>,
	aliguori@amazon.com, Will Deacon <will.deacon@arm.com>,
	daniel.gruss@iaik.tugraz.at
Subject: [patch 47/60] x86/ldt: Map LDT entries into fixmap
Date: Mon, 04 Dec 2017 15:07:53 +0100	[thread overview]
Message-ID: <20171204150608.674502245@linutronix.de> (raw)
In-Reply-To: 20171204140706.296109558@linutronix.de

[-- Attachment #1: x86-ldt--Map_LDT_entries_into_fixmap.patch --]
[-- Type: text/plain, Size: 6867 bytes --]

From: Thomas Gleixner <tglx@linutronix.de>

LDT is not really commonly used on 64bit so the overhead of populating the
fixmap entries on context switch for the rare LDT syscall users is a
reasonable trade off vs. having extra dynamically managed mapping space per
process.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/include/asm/mmu_context.h |   44 ++++--------------
 arch/x86/kernel/ldt.c              |   87 +++++++++++++++++++++++++++++++------
 2 files changed, 84 insertions(+), 47 deletions(-)

--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -45,13 +45,17 @@ static inline void load_mm_cr4(struct mm
  */
 struct ldt_struct {
 	/*
-	 * Xen requires page-aligned LDTs with special permissions.  This is
-	 * needed to prevent us from installing evil descriptors such as
+	 * Xen requires page-aligned LDTs with special permissions.  This
+	 * is needed to prevent us from installing evil descriptors such as
 	 * call gates.  On native, we could merge the ldt_struct and LDT
-	 * allocations, but it's not worth trying to optimize.
+	 * allocations, but it's not worth trying to optimize and it does
+	 * not work with page table isolation enabled, which requires
+	 * page-aligned LDT entries as well.
 	 */
-	struct desc_struct *entries_va;
-	unsigned int nr_entries;
+	struct desc_struct	*entries_va;
+	phys_addr_t		entries_pa;
+	unsigned int		nr_entries;
+	unsigned int		order;
 };
 
 /*
@@ -59,6 +63,7 @@ struct ldt_struct {
  */
 int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
 void destroy_context_ldt(struct mm_struct *mm);
+void load_mm_ldt(struct mm_struct *mm);
 #else	/* CONFIG_MODIFY_LDT_SYSCALL */
 static inline int init_new_context_ldt(struct task_struct *tsk,
 				       struct mm_struct *mm)
@@ -66,38 +71,11 @@ static inline int init_new_context_ldt(s
 	return 0;
 }
 static inline void destroy_context_ldt(struct mm_struct *mm) {}
-#endif
-
 static inline void load_mm_ldt(struct mm_struct *mm)
 {
-#ifdef CONFIG_MODIFY_LDT_SYSCALL
-	struct ldt_struct *ldt;
-
-	/* READ_ONCE synchronizes with smp_store_release */
-	ldt = READ_ONCE(mm->context.ldt);
-
-	/*
-	 * Any change to mm->context.ldt is followed by an IPI to all
-	 * CPUs with the mm active.  The LDT will not be freed until
-	 * after the IPI is handled by all such CPUs.  This means that,
-	 * if the ldt_struct changes before we return, the values we see
-	 * will be safe, and the new values will be loaded before we run
-	 * any user code.
-	 *
-	 * NB: don't try to convert this to use RCU without extreme care.
-	 * We would still need IRQs off, because we don't want to change
-	 * the local LDT after an IPI loaded a newer value than the one
-	 * that we can see.
-	 */
-
-	if (unlikely(ldt))
-		set_ldt(ldt->entries_va, ldt->nr_entries);
-	else
-		clear_LDT();
-#else
 	clear_LDT();
-#endif
 }
+#endif
 
 static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
 {
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -22,6 +22,7 @@
 #include <asm/desc.h>
 #include <asm/mmu_context.h>
 #include <asm/syscalls.h>
+#include <asm/fixmap.h>
 
 static void refresh_ldt_segments(void)
 {
@@ -42,6 +43,61 @@ static void refresh_ldt_segments(void)
 #endif
 }
 
+#ifdef CONFIG_KERNEL_PAGE_TABLE_ISOLATION
+
+#define LDT_EPP		(PAGE_SIZE / LDT_ENTRY_SIZE)
+
+static void set_ldt_and_map(struct ldt_struct *ldt)
+{
+	phys_addr_t pa = ldt->entries_pa;
+	void *fixva;
+	int idx, i;
+
+	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_KPTI)) {
+		set_ldt(ldt->entries_va, ldt->nr_entries);
+		return;
+	}
+
+	idx = get_cpu_entry_area_index(smp_processor_id(), ldt_entries);
+	fixva = (void *) __fix_to_virt(idx);
+	for (i = 0; i < ldt->nr_entries; idx--, i += LDT_EPP, pa += PAGE_SIZE)
+		__set_fixmap(idx, pa, PAGE_KERNEL);
+	set_ldt(fixva, ldt->nr_entries);
+}
+#else
+static void set_ldt_and_map(struct ldt_struct *ldt)
+{
+	set_ldt(ldt->entries_va, ldt->nr_entries);
+}
+#endif
+
+void load_mm_ldt(struct mm_struct *mm)
+{
+	struct ldt_struct *ldt;
+
+	/* READ_ONCE synchronizes with smp_store_release */
+	ldt = READ_ONCE(mm->context.ldt);
+
+	/*
+	 * Any change to mm->context.ldt is followed by an IPI to all
+	 * CPUs with the mm active.  The LDT will not be freed until
+	 * after the IPI is handled by all such CPUs.  This means that,
+	 * if the ldt_struct changes before we return, the values we see
+	 * will be safe, and the new values will be loaded before we run
+	 * any user code.
+	 *
+	 * NB: don't try to convert this to use RCU without extreme care.
+	 * We would still need IRQs off, because we don't want to change
+	 * the local LDT after an IPI loaded a newer value than the one
+	 * that we can see.
+	 */
+
+	if (unlikely(ldt))
+		set_ldt_and_map(ldt);
+	else
+		clear_LDT();
+}
+
 /* context.lock is held for us, so we don't need any locking. */
 static void flush_ldt(void *__mm)
 {
@@ -52,26 +108,35 @@ static void flush_ldt(void *__mm)
 		return;
 
 	pc = &mm->context;
-	set_ldt(pc->ldt->entries_va, pc->ldt->nr_entries);
+	set_ldt_and_map(pc->ldt);
 
 	refresh_ldt_segments();
 }
 
+static void __free_ldt_struct(struct ldt_struct *ldt)
+{
+	free_pages((unsigned long)ldt->entries_va, ldt->order);
+	kfree(ldt);
+}
+
 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
 static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
 {
 	struct ldt_struct *new_ldt;
 	unsigned int alloc_size;
+	struct page *page;
+	int order;
 
 	if (num_entries > LDT_ENTRIES)
 		return NULL;
 
-	new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
+	new_ldt = kzalloc(sizeof(struct ldt_struct), GFP_KERNEL);
 	if (!new_ldt)
 		return NULL;
 
 	BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
 	alloc_size = num_entries * LDT_ENTRY_SIZE;
+	order = get_order(alloc_size);
 
 	/*
 	 * Xen is very picky: it requires a page-aligned LDT that has no
@@ -79,16 +144,14 @@ static struct ldt_struct *alloc_ldt_stru
 	 * Keep it simple: zero the whole allocation and never allocate less
 	 * than PAGE_SIZE.
 	 */
-	if (alloc_size > PAGE_SIZE)
-		new_ldt->entries_va = vzalloc(alloc_size);
-	else
-		new_ldt->entries_va = (void *)get_zeroed_page(GFP_KERNEL);
-
-	if (!new_ldt->entries_va) {
+	page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+	if (!page) {
 		kfree(new_ldt);
 		return NULL;
 	}
-
+	new_ldt->entries_va = page_address(page);
+	new_ldt->entries_pa = virt_to_phys(new_ldt->entries_va);
+	new_ldt->order = order;
 	new_ldt->nr_entries = num_entries;
 	return new_ldt;
 }
@@ -116,11 +179,7 @@ static void free_ldt_struct(struct ldt_s
 		return;
 
 	paravirt_free_ldt(ldt->entries_va, ldt->nr_entries);
-	if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
-		vfree_atomic(ldt->entries_va);
-	else
-		free_page((unsigned long)ldt->entries_va);
-	kfree(ldt);
+	__free_ldt_struct(ldt);
 }
 
 /*

  parent reply	other threads:[~2017-12-04 16:52 UTC|newest]

Thread overview: 118+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-04 14:07 [patch 00/60] x86/kpti: Kernel Page Table Isolation (was KAISER) Thomas Gleixner
2017-12-04 14:07 ` [patch 01/60] x86/entry/64/paravirt: Use paravirt-safe macro to access eflags Thomas Gleixner
2017-12-05 12:17   ` Juergen Gross
2017-12-04 14:07 ` [patch 02/60] x86/unwinder/orc: Dont bail on stack overflow Thomas Gleixner
2017-12-04 20:31   ` Andy Lutomirski
2017-12-04 21:31     ` Thomas Gleixner
2017-12-04 14:07 ` [patch 03/60] x86/unwinder: Handle stack overflows more gracefully Thomas Gleixner
2017-12-04 14:07 ` [patch 04/60] x86/irq: Remove an old outdated comment about context tracking races Thomas Gleixner
2017-12-04 14:07 ` [patch 05/60] x86/irq/64: Print the offending IP in the stack overflow warning Thomas Gleixner
2017-12-04 14:07 ` [patch 06/60] x86/entry/64: Allocate and enable the SYSENTER stack Thomas Gleixner
2017-12-04 14:07 ` [patch 07/60] x86/dumpstack: Add get_stack_info() support for " Thomas Gleixner
2017-12-04 14:07 ` [patch 08/60] x86/entry/gdt: Put per-CPU GDT remaps in ascending order Thomas Gleixner
2017-12-04 14:07 ` [patch 09/60] x86/mm/fixmap: Generalize the GDT fixmap mechanism, introduce struct cpu_entry_area Thomas Gleixner
2017-12-04 14:07 ` [patch 10/60] x86/kasan/64: Teach KASAN about the cpu_entry_area Thomas Gleixner
2017-12-04 14:07 ` [patch 11/60] x86/entry: Fix assumptions that the HW TSS is at the beginning of cpu_tss Thomas Gleixner
2017-12-04 14:07 ` [patch 12/60] x86/dumpstack: Handle stack overflow on all stacks Thomas Gleixner
2017-12-04 14:07 ` [patch 13/60] x86/entry: Move SYSENTER_stack to the beginning of struct tss_struct Thomas Gleixner
2017-12-04 14:07 ` [patch 14/60] x86/entry: Remap the TSS into the CPU entry area Thomas Gleixner
2017-12-04 18:20   ` Borislav Petkov
2017-12-04 14:07 ` [patch 15/60] x86/entry/64: Separate cpu_current_top_of_stack from TSS.sp0 Thomas Gleixner
2017-12-04 14:07 ` [patch 16/60] x86/espfix/64: Stop assuming that pt_regs is on the entry stack Thomas Gleixner
2017-12-04 14:07 ` [patch 17/60] x86/entry/64: Use a per-CPU trampoline stack for IDT entries Thomas Gleixner
2017-12-04 14:07 ` [patch 18/60] x86/entry/64: Return to userspace from the trampoline stack Thomas Gleixner
2017-12-04 14:07 ` [patch 19/60] x86/entry/64: Create a per-CPU SYSCALL entry trampoline Thomas Gleixner
2017-12-04 22:30   ` Andy Lutomirski
2017-12-04 14:07 ` [patch 20/60] x86/entry/64: Move the IST stacks into struct cpu_entry_area Thomas Gleixner
2017-12-04 14:07 ` [patch 21/60] x86/entry/64: Remove the SYSENTER stack canary Thomas Gleixner
2017-12-04 14:07 ` [patch 22/60] x86/entry: Clean up the SYSENTER_stack code Thomas Gleixner
2017-12-04 19:41   ` Borislav Petkov
2017-12-04 14:07 ` [patch 23/60] x86/entry/64: Make cpu_entry_area.tss read-only Thomas Gleixner
2017-12-04 20:25   ` Borislav Petkov
2017-12-04 14:07 ` [patch 24/60] x86/paravirt: Dont patch flush_tlb_single Thomas Gleixner
2017-12-05 12:18   ` Juergen Gross
2017-12-04 14:07 ` [patch 25/60] x86/paravirt: Provide a way to check for hypervisors Thomas Gleixner
2017-12-05 12:19   ` Juergen Gross
2017-12-04 14:07 ` [patch 26/60] x86/cpufeature: Make cpu bugs sticky Thomas Gleixner
2017-12-04 22:39   ` Borislav Petkov
2017-12-04 14:07 ` [patch 27/60] x86/cpufeatures: Add X86_BUG_CPU_INSECURE Thomas Gleixner
2017-12-04 23:18   ` Borislav Petkov
2017-12-04 14:07 ` [patch 28/60] x86/mm/kpti: Disable global pages if KERNEL_PAGE_TABLE_ISOLATION=y Thomas Gleixner
2017-12-05 14:34   ` Borislav Petkov
2017-12-04 14:07 ` [patch 29/60] x86/mm/kpti: Prepare the x86/entry assembly code for entry/exit CR3 switching Thomas Gleixner
2017-12-04 14:07 ` [patch 30/60] x86/mm/kpti: Add infrastructure for page table isolation Thomas Gleixner
2017-12-05 15:20   ` Borislav Petkov
2017-12-04 14:07 ` [patch 31/60] x86/mm/kpti: Add mapping helper functions Thomas Gleixner
2017-12-04 22:27   ` Andy Lutomirski
2017-12-05 16:01   ` Borislav Petkov
2017-12-07  8:33     ` Borislav Petkov
2017-12-04 14:07 ` [patch 32/60] x86/mm/kpti: Allow NX poison to be set in p4d/pgd Thomas Gleixner
2017-12-05 17:09   ` Borislav Petkov
2017-12-04 14:07 ` [patch 33/60] x86/mm/kpti: Allocate a separate user PGD Thomas Gleixner
2017-12-05 18:33   ` Borislav Petkov
2017-12-06 20:56     ` Ingo Molnar
2017-12-04 14:07 ` [patch 34/60] x86/mm/kpti: Populate " Thomas Gleixner
2017-12-05 19:17   ` Borislav Petkov
2017-12-04 14:07 ` [patch 35/60] x86/espfix: Ensure that ESPFIX is visible in " Thomas Gleixner
2017-12-04 22:28   ` Andy Lutomirski
2017-12-04 14:07 ` [patch 36/60] x86/mm/kpti: Add functions to clone kernel PMDs Thomas Gleixner
2017-12-06 15:39   ` Borislav Petkov
2017-12-04 14:07 ` [patch 37/60] x86mm//kpti: Force entry through trampoline when KPTI active Thomas Gleixner
2017-12-06 16:01   ` Borislav Petkov
2017-12-04 14:07 ` [patch 38/60] x86/fixmap: Move cpu entry area into a separate PMD Thomas Gleixner
2017-12-06 18:57   ` Borislav Petkov
2017-12-04 14:07 ` [patch 39/60] x86/mm/kpti: Share cpu_entry_area PMDs Thomas Gleixner
2017-12-06 21:18   ` Borislav Petkov
2017-12-04 14:07 ` [patch 40/60] x86: PMD align entry text Thomas Gleixner
2017-12-07  8:07   ` Borislav Petkov
2017-12-04 14:07 ` [patch 41/60] x86/mm/kpti: Share entry text PMD Thomas Gleixner
2017-12-07  8:24   ` Borislav Petkov
2017-12-04 14:07 ` [patch 42/60] x86/fixmap: Move IDT fixmap into the cpu_entry_area range Thomas Gleixner
2017-12-04 14:07 ` [patch 43/60] x86/fixmap: Add debugstore entries to cpu_entry_area Thomas Gleixner
2017-12-07  9:55   ` Borislav Petkov
2017-12-04 14:07 ` [patch 44/60] x86/events/intel/ds: Map debug buffers in fixmap Thomas Gleixner
2017-12-04 14:07 ` [patch 45/60] x86/fixmap: Add ldt entries to user shared fixmap Thomas Gleixner
2017-12-04 14:07 ` [patch 46/60] x86/ldt: Rename ldt_struct->entries member Thomas Gleixner
2017-12-04 14:07 ` Thomas Gleixner [this message]
2017-12-04 22:33   ` [patch 47/60] x86/ldt: Map LDT entries into fixmap Andy Lutomirski
2017-12-04 22:51     ` Thomas Gleixner
2017-12-04 14:07 ` [patch 48/60] x86/mm: Move the CR3 construction functions to tlbflush.h Thomas Gleixner
2017-12-04 14:07 ` [patch 49/60] x86/mm: Remove hard-coded ASID limit checks Thomas Gleixner
2017-12-04 14:07 ` [patch 50/60] x86/mm: Put MMU to hardware ASID translation in one place Thomas Gleixner
2017-12-04 14:07 ` [patch 51/60] x86/mm: Allow flushing for future ASID switches Thomas Gleixner
2017-12-04 22:22   ` Andy Lutomirski
2017-12-04 22:34     ` Dave Hansen
2017-12-04 22:36       ` Andy Lutomirski
2017-12-04 22:47     ` Peter Zijlstra
2017-12-04 22:54       ` Andy Lutomirski
2017-12-04 23:06         ` Peter Zijlstra
2017-12-04 14:07 ` [patch 52/60] x86/mm: Abstract switching CR3 Thomas Gleixner
2017-12-04 14:07 ` [patch 53/60] x86/mm: Use/Fix PCID to optimize user/kernel switches Thomas Gleixner
2017-12-05 21:46   ` Andy Lutomirski
2017-12-05 22:05     ` Peter Zijlstra
2017-12-05 22:08       ` Dave Hansen
2017-12-04 14:08 ` [patch 54/60] x86/mm: Optimize RESTORE_CR3 Thomas Gleixner
2017-12-04 14:08 ` [patch 55/60] x86/mm: Use INVPCID for __native_flush_tlb_single() Thomas Gleixner
2017-12-04 22:25   ` Andy Lutomirski
2017-12-04 22:51     ` Peter Zijlstra
2017-12-05 13:51       ` Dave Hansen
2017-12-05 14:08         ` Peter Zijlstra
2017-12-04 14:08 ` [patch 56/60] x86/mm/kpti: Disable native VSYSCALL Thomas Gleixner
2017-12-04 22:33   ` Andy Lutomirski
2017-12-04 14:08 ` [patch 57/60] x86/mm/kpti: Add Kconfig Thomas Gleixner
2017-12-04 16:54   ` Andy Lutomirski
2017-12-04 16:57     ` Thomas Gleixner
2017-12-05  9:34       ` Thomas Gleixner
2017-12-04 14:08 ` [patch 58/60] x86/mm/debug_pagetables: Add page table directory Thomas Gleixner
2017-12-04 14:08 ` [patch 59/60] x86/mm/dump_pagetables: Check user space page table for WX pages Thomas Gleixner
2017-12-04 14:08 ` [patch 60/60] x86/mm/debug_pagetables: Allow dumping current pagetables Thomas Gleixner
2017-12-04 18:02 ` [patch 00/60] x86/kpti: Kernel Page Table Isolation (was KAISER) Linus Torvalds
2017-12-04 18:18   ` Thomas Gleixner
2017-12-04 18:21     ` Boris Ostrovsky
2017-12-04 18:28     ` Linus Torvalds
2017-12-05 21:49 ` Andy Lutomirski
2017-12-05 21:57   ` Dave Hansen
2017-12-05 23:19     ` Andy Lutomirski
2018-01-19 20:56 ` Andrew Morton
2018-01-19 21:06   ` Dave Hansen
2018-01-20 19:59   ` Thomas Gleixner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171204150608.674502245@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=David.Laight@aculab.com \
    --cc=aliguori@amazon.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=bpetkov@suse.de \
    --cc=brgerst@gmail.com \
    --cc=daniel.gruss@iaik.tugraz.at \
    --cc=dave.hansen@intel.com \
    --cc=dvlasenk@redhat.com \
    --cc=eduval@amazon.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=hughd@google.com \
    --cc=jgross@suse.com \
    --cc=jpoimboe@redhat.com \
    --cc=keescook@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=peterz@infradead.org \
    --cc=riel@redhat.com \
    --cc=torvalds@linux-foundation.org \
    --cc=will.deacon@arm.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).