All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jim Mattson <jmattson@google.com>
To: speck@linutronix.de
Subject: [MODERATED] [PATCH] SPTE masking
Date: Wed, 8 Aug 2018 16:21:14 -0700	[thread overview]
Message-ID: <CALMp9eTY2wjoc3yZUC3PyKDVPiiGU6t2fHLUwUxGDh5C5_x+wQ@mail.gmail.com> (raw)

[PATCH] kvm: x86: Set highest physical address bit in non-present/reserved SPTEs

Always set the upper-most supported physical address bit to 1 for SPTEs
that are marked as non-present or reserved, to make them unusable for
L1TF attacks from the guest. Currently, this just applies to MMIO SPTEs.
(We do not need to mark PTEs that are completely 0 as physical page 0
is already reserved.)

This allows mitigation of L1TF without disabling hyper-threading by using
shadow paging mode instead of EPT.

Signed-off-by: Junaid Shahid <junaids@google.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
---
 arch/x86/kvm/mmu.c | 25 ++++++++++++++++++++-----
 arch/x86/kvm/x86.c |  8 ++++++--
 2 files changed, 26 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a44e568363a46..ef967785e056a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -221,6 +221,9 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
 						    PT64_EPT_EXECUTABLE_MASK;
 static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
 
+/* This mask must be set on all non-zero Non-Present or Reserved SPTEs */
+static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
+
 static void mmu_spte_set(u64 *sptep, u64 spte);
 
 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
@@ -308,9 +311,12 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
 {
 	unsigned int gen = kvm_current_mmio_generation(vcpu);
 	u64 mask = generation_mmio_spte_mask(gen);
+	u64 gpa = gfn << PAGE_SHIFT;
 
 	access &= ACC_WRITE_MASK | ACC_USER_MASK;
-	mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
+	mask |= shadow_mmio_value | shadow_nonpresent_or_rsvd_mask;
+	mask |= access | gpa;
+	mask |= (gpa & shadow_nonpresent_or_rsvd_mask) << 1;
 
 	trace_mark_mmio_spte(sptep, gfn, access, gen);
 	mmu_spte_set(sptep, mask);
@@ -323,8 +329,13 @@ static bool is_mmio_spte(u64 spte)
 
 static gfn_t get_mmio_spte_gfn(u64 spte)
 {
-	u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
-	return (spte & ~mask) >> PAGE_SHIFT;
+	u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
+		   shadow_nonpresent_or_rsvd_mask;
+	u64 gpa = spte & ~mask;
+
+	gpa |= (spte >> 1) & shadow_nonpresent_or_rsvd_mask;
+
+	return gpa >> PAGE_SHIFT;
 }
 
 static unsigned get_mmio_spte_access(u64 spte)
@@ -381,7 +392,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
-static void kvm_mmu_clear_all_pte_masks(void)
+static void kvm_mmu_reset_all_pte_masks(void)
 {
 	shadow_user_mask = 0;
 	shadow_accessed_mask = 0;
@@ -391,6 +402,10 @@ static void kvm_mmu_clear_all_pte_masks(void)
 	shadow_mmio_mask = 0;
 	shadow_present_mask = 0;
 	shadow_acc_track_mask = 0;
+
+	if (boot_cpu_data.x86_phys_bits < 51)
+		shadow_nonpresent_or_rsvd_mask =
+			1ull << (boot_cpu_data.x86_phys_bits - 1);
 }
 
 static int is_cpuid_PSE36(void)
@@ -5500,7 +5515,7 @@ int kvm_mmu_module_init(void)
 {
 	int ret = -ENOMEM;
 
-	kvm_mmu_clear_all_pte_masks();
+	kvm_mmu_reset_all_pte_masks();
 
 	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
 					    sizeof(struct pte_list_desc),
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a5caa5e5480ca..60e102adf80be 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6503,8 +6503,12 @@ static void kvm_set_mmio_spte_mask(void)
 	 * Set the reserved bits and the present bit of an paging-structure
 	 * entry to generate page fault with PFER.RSV = 1.
 	 */
-	 /* Mask the reserved physical address bits. */
-	mask = rsvd_bits(maxphyaddr, 51);
+
+	/*
+	 * Mask the uppermost physical address bit, which would be reserved as
+	 * long as the supported physical address width is less than 52.
+	 */
+	mask = 1ull << 51;
 
 	/* Set the present bit. */
 	mask |= 1ull;
-- 
2.18.0.597.ga71716f1ad-goog

             reply	other threads:[~2018-08-08 23:21 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-08-08 23:21 Jim Mattson [this message]
2018-08-09  2:57 ` [MODERATED] Re: [PATCH] SPTE masking Andi Kleen
2018-08-09  9:24   ` Paolo Bonzini
2018-08-09 17:43     ` Andi Kleen
2018-08-10  7:55       ` Paolo Bonzini
2018-08-10 15:59         ` Jim Mattson
2018-08-10 17:23         ` Andi Kleen
2018-08-10 17:32           ` Linus Torvalds
2018-08-10 17:45             ` Andi Kleen
2018-08-10 18:37               ` Paolo Bonzini
2018-08-10 19:17                 ` Andi Kleen
2018-08-12 10:57                   ` Paolo Bonzini
2018-08-09  9:25 ` Paolo Bonzini
2018-08-09  9:33   ` Andrew Cooper
2018-08-09 10:01     ` Paolo Bonzini
2018-08-09 10:47       ` Andrew Cooper
2018-08-09 11:13         ` Paolo Bonzini
2018-08-09 11:46           ` Andrew Cooper
2018-08-09 11:54             ` Paolo Bonzini
2018-08-09 14:01               ` Andrew Cooper
2018-08-09 15:00                 ` Paolo Bonzini
2018-08-09 20:14       ` Jim Mattson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CALMp9eTY2wjoc3yZUC3PyKDVPiiGU6t2fHLUwUxGDh5C5_x+wQ@mail.gmail.com \
    --to=jmattson@google.com \
    --cc=speck@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.