linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Balbir Singh <sblbir@amazon.com>
To: <linux-kernel@vger.kernel.org>
Cc: <x86@kernel.org>, Balbir Singh <sblbir@amazon.com>,
	<keescook@chromium.org>, <benh@amazon.com>
Subject: [RFC PATCH] arch/x86: Optionally flush L1D on context switch
Date: Sat, 14 Mar 2020 09:04:15 +1100	[thread overview]
Message-ID: <20200313220415.856-1-sblbir@amazon.com> (raw)

This patch is an RFC/PoC to start the discussion on optionally flushing
L1D cache.  The goal is to allow tasks that are paranoid due to the recent
snoop assisted data sampling vulnerabilites, to flush their L1D on being
switched out.  This protects their data from being snooped or leaked via
side channels after the task has context switched out.

There are two scenarios we might want to protect against, a task leaving
the CPU with data still in L1D (which is the main concern of this
patch), the second scenario is a malicious task coming in (not so well
trusted) for which we want to clean up the cache before it starts
execution. The latter was proposed by benh and is not currently
addressed by this patch, but can be easily accommodated by the same
mechanism.

This patch adds an arch specific prctl() to flush L1D cache on context
switch out, the existing mechanisms of tracking prev_mm via cpu_tlbstate
is reused (very similar to the cond_ipbp() changes). The patch has been
lighted tested.

The points of discussion/review are:

1. Discuss the use case and the right approach to address this
2. Does an arch prctl allowing for opt-in flushing make sense, would other
   arches care about something similar?
3. There is a fallback software L1D load, similar to what L1TF does, but
   we don't prefetch the TLB, is that sufficient?
4. The atomics can be improved and we could use a static key like ibpb
   does to optimize the code path
5. The code works with a special hack for 64 bit systems (TIF_L1D_FLUSH
   is bit 32), we could generalize it with some effort
6. Should we consider cleaning up the L1D on arrival of tasks?

In summary, this is an early PoC to start the discussion on the need for
conditional L1D flushing based on the security posture of the
application and the sensitivity of the data it has access to or might
have access to.

Cc: keescook@chromium.org
Cc: benh@amazon.com

Signed-off-by: Balbir Singh <sblbir@amazon.com>
---
 arch/x86/include/asm/thread_info.h |  8 +++
 arch/x86/include/asm/tlbflush.h    |  6 ++
 arch/x86/include/uapi/asm/prctl.h  |  3 +
 arch/x86/kernel/process_64.c       | 12 +++-
 arch/x86/mm/tlb.c                  | 89 ++++++++++++++++++++++++++++++
 5 files changed, 117 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 8de8ceccb8bc..c48ebfa17805 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -103,6 +103,9 @@ struct thread_info {
 #define TIF_ADDR32		29	/* 32-bit address space on 64 bits */
 #define TIF_X32			30	/* 32-bit native x86-64 binary */
 #define TIF_FSCHECK		31	/* Check FS is USER_DS on return */
+#ifdef CONFIG_64BIT
+#define TIF_L1D_FLUSH           32      /* Flush L1D on mm switches (processes) */
+#endif
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
@@ -132,6 +135,9 @@ struct thread_info {
 #define _TIF_ADDR32		(1 << TIF_ADDR32)
 #define _TIF_X32		(1 << TIF_X32)
 #define _TIF_FSCHECK		(1 << TIF_FSCHECK)
+#ifdef CONFIG_64BIT
+#define _TIF_L1D_FLUSH		(1UL << TIF_L1D_FLUSH)
+#endif
 
 /* Work to do before invoking the actual syscall. */
 #define _TIF_WORK_SYSCALL_ENTRY	\
@@ -239,6 +245,8 @@ extern void arch_task_cache_init(void);
 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 extern void arch_release_task_struct(struct task_struct *tsk);
 extern void arch_setup_new_exec(void);
+extern void enable_l1d_flush_for_task(struct task_struct *tsk);
+extern void disable_l1d_flush_for_task(struct task_struct *tsk);
 #define arch_setup_new_exec arch_setup_new_exec
 #endif	/* !__ASSEMBLY__ */
 
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 6f66d841262d..1d535059b358 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -219,6 +219,12 @@ struct tlb_state {
 	 */
 	unsigned long cr4;
 
+	/*
+	 * Flush the L1D cache on switch_mm_irqs_off() for a
+	 * task getting off the CPU, if it opted in to do so
+	 */
+	bool last_user_mm_l1d_flush;
+
 	/*
 	 * This is a list of all contexts that might exist in the TLB.
 	 * There is one per ASID that we use, and the ASID (what the
diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h
index 5a6aac9fa41f..1361e5e25791 100644
--- a/arch/x86/include/uapi/asm/prctl.h
+++ b/arch/x86/include/uapi/asm/prctl.h
@@ -14,4 +14,7 @@
 #define ARCH_MAP_VDSO_32	0x2002
 #define ARCH_MAP_VDSO_64	0x2003
 
+#define ARCH_SET_L1D_FLUSH	0x3001
+#define ARCH_GET_L1D_FLUSH	0x3002
+
 #endif /* _ASM_X86_PRCTL_H */
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ffd497804dbc..df9f8775ee94 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -700,7 +700,17 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
 	case ARCH_MAP_VDSO_64:
 		return prctl_map_vdso(&vdso_image_64, arg2);
 #endif
-
+#ifdef CONFIG_64BIT
+	case ARCH_GET_L1D_FLUSH:
+		return test_ti_thread_flag(&task->thread_info, TIF_L1D_FLUSH);
+	case ARCH_SET_L1D_FLUSH: {
+		if (arg2 >= 1)
+			enable_l1d_flush_for_task(task);
+		else
+			disable_l1d_flush_for_task(task);
+		break;
+	}
+#endif
 	default:
 		ret = -EINVAL;
 		break;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 66f96f21a7b6..35a3970df0ef 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -151,6 +151,92 @@ void leave_mm(int cpu)
 }
 EXPORT_SYMBOL_GPL(leave_mm);
 
+#define L1D_CACHE_ORDER 4
+static void *l1d_flush_pages;
+static DEFINE_MUTEX(l1d_flush_mutex);
+
+void enable_l1d_flush_for_task(struct task_struct *tsk)
+{
+	struct page *page;
+
+	if (static_cpu_has(X86_FEATURE_FLUSH_L1D))
+		goto done;
+
+	mutex_lock(&l1d_flush_mutex);
+	if (l1d_flush_pages)
+		goto done;
+	/*
+	 * These pages are never freed, we use the same
+	 * set of pages across multiple processes/contexts
+	 */
+	page = alloc_pages(GFP_KERNEL | __GFP_ZERO, L1D_CACHE_ORDER);
+	if (!page)
+		return;
+
+	l1d_flush_pages = page_address(page);
+	/* I don't think we need to worry about KSM */
+done:
+	set_ti_thread_flag(&tsk->thread_info, TIF_L1D_FLUSH);
+	mutex_unlock(&l1d_flush_mutex);
+}
+
+void disable_l1d_flush_for_task(struct task_struct *tsk)
+{
+	clear_ti_thread_flag(&tsk->thread_info, TIF_L1D_FLUSH);
+	smp_mb__after_atomic();
+}
+
+/*
+ * Flush the L1D cache for this CPU. We want to this at switch mm time,
+ * this is a pessimistic security measure and an opt-in for those tasks
+ * that host sensitive information and there are concerns about spills
+ * from fill buffers.
+ */
+static void l1d_flush(struct mm_struct *next, struct task_struct *tsk)
+{
+	struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
+	int size = PAGE_SIZE << L1D_CACHE_ORDER;
+
+	if (this_cpu_read(cpu_tlbstate.last_user_mm_l1d_flush) == 0)
+		goto check_next;
+
+	if (real_prev == next)
+		return;
+
+	if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+		wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+		goto done;
+	}
+
+	asm volatile(
+		/* Fill the cache */
+		"xorl	%%eax, %%eax\n"
+		".Lfill_cache:\n"
+		"movzbl	(%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
+		"addl	$64, %%eax\n\t"
+		"cmpl	%%eax, %[size]\n\t"
+		"jne	.Lfill_cache\n\t"
+		"lfence\n"
+		:: [flush_pages] "r" (l1d_flush_pages),
+		    [size] "r" (size)
+		: "eax", "ecx");
+
+done:
+	this_cpu_write(cpu_tlbstate.last_user_mm_l1d_flush, 0);
+	/* Make sure we clear the values before we set it again */
+	barrier();
+check_next:
+	if (tsk == NULL)
+		return;
+
+	/* Match the set/clear_bit barriers */
+	smp_rmb();
+
+	/* We don't need stringent checks as we opt-in/opt-out */
+	if (test_ti_thread_flag(&tsk->thread_info, TIF_L1D_FLUSH))
+		this_cpu_write(cpu_tlbstate.last_user_mm_l1d_flush, 1);
+}
+
 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 	       struct task_struct *tsk)
 {
@@ -433,6 +519,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 		trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
 	}
 
+	l1d_flush(next, tsk);
+
 	/* Make sure we write CR3 before loaded_mm. */
 	barrier();
 
@@ -503,6 +591,7 @@ void initialize_tlbstate_and_flush(void)
 	/* Reinitialize tlbstate. */
 	this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
 	this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
+	this_cpu_write(cpu_tlbstate.last_user_mm_l1d_flush, 0);
 	this_cpu_write(cpu_tlbstate.next_asid, 1);
 	this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
 	this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen);
-- 
2.17.1


             reply	other threads:[~2020-03-13 22:04 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-13 22:04 Balbir Singh [this message]
2020-03-18 23:14 ` [RFC PATCH] arch/x86: Optionally flush L1D on context switch Kees Cook
2020-03-20  1:35   ` Singh, Balbir
2020-03-19  0:38 ` Thomas Gleixner
2020-03-20  1:37   ` Singh, Balbir
2020-03-20 11:49     ` Thomas Gleixner
2020-03-21  1:42       ` Singh, Balbir
2020-03-21 10:05         ` Thomas Gleixner
2020-03-22  5:10           ` Herrenschmidt, Benjamin
2020-03-23  0:37           ` Singh, Balbir
2020-03-22  5:08       ` Herrenschmidt, Benjamin
2020-03-22 15:10         ` Andy Lutomirski
2020-03-22 23:17           ` Herrenschmidt, Benjamin
2020-03-23  0:12           ` Singh, Balbir
2020-03-22  5:01   ` Herrenschmidt, Benjamin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200313220415.856-1-sblbir@amazon.com \
    --to=sblbir@amazon.com \
    --cc=benh@amazon.com \
    --cc=keescook@chromium.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).