xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: "Roger Pau Monné" <roger.pau@citrix.com>
To: Jan Beulich <jbeulich@suse.com>
Cc: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>, Wei Liu <wl@xen.org>,
	Tim Deegan <tim@xen.org>,
	George Dunlap <george.dunlap@citrix.com>
Subject: Re: [PATCH 04/17] x86/PV: harden guest memory accesses against speculative abuse
Date: Tue, 9 Feb 2021 17:26:24 +0100	[thread overview]
Message-ID: <YCK3sH/4EVLzRfZ3@Air-de-Roger> (raw)
In-Reply-To: <5da0c123-3b90-97e8-e1e5-10286be38ce7@suse.com>

On Thu, Jan 14, 2021 at 04:04:57PM +0100, Jan Beulich wrote:
> Inspired by
> https://lore.kernel.org/lkml/f12e7d3cecf41b2c29734ea45a393be21d4a8058.1597848273.git.jpoimboe@redhat.com/
> and prior work in that area of x86 Linux, suppress speculation with
> guest specified pointer values by suitably masking the addresses to
> non-canonical space in case they fall into Xen's virtual address range.
> 
> Introduce a new Kconfig control.
> 
> Note that it is necessary in such code to avoid using "m" kind operands:
> If we didn't, there would be no guarantee that the register passed to
> guest_access_mask_ptr is also the (base) one used for the memory access.
> 
> As a minor unrelated change in get_unsafe_asm() the unnecessary "itype"
> parameter gets dropped and the XOR on the fixup path gets changed to be
> a 32-bit one in all cases: This way we avoid pointless REX.W or operand
> size overrides, or writes to partial registers.
> 
> Requested-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> The insn sequence chosen is certainly up for discussion; I've picked
> this one despite the RCR because alternatives I could come up with,
> like
> 
> 	mov	$(HYPERVISOR_VIRT_END), %rax
> 	mov	$~0, %rdx
> 	mov	$0x7fffffffffffffff, %rcx
> 	cmp	%rax, %rdi
> 	cmovb	%rcx, %rdx
> 	and	%rdx, %rdi
> 
> weren't necessarily better: Either, as above, they are longer and
> require a 3rd scratch register, or they also utilize the carry flag in
> some similar way.
> ---
> Judging from the comment ahead of put_unsafe_asm() we might as well not
> tell gcc at all anymore about the memory access there, now that there's
> no use of the operand anymore in the assembly code.
> 
> --- a/xen/arch/x86/usercopy.c
> +++ b/xen/arch/x86/usercopy.c
> @@ -10,12 +10,19 @@
>  #include <xen/sched.h>
>  #include <asm/uaccess.h>
>  
> -unsigned __copy_to_user_ll(void __user *to, const void *from, unsigned n)
> +#ifndef GUARD
> +# define GUARD UA_KEEP
> +#endif
> +
> +unsigned int copy_to_guest_ll(void __user *to, const void *from, unsigned int n)
>  {
>      unsigned dummy;
>  
>      stac();
>      asm volatile (
> +        GUARD(
> +        "    guest_access_mask_ptr %[to], %q[scratch1], %q[scratch2]\n"

Don't you need to also take 'n' into account here to assert that the
address doesn't end in hypervisor address space? Or that's fine as
speculation wouldn't go that far?

I also wonder why this needs to be done in assembly, could you check
the address(es) using C?

> +        )
>          "    cmp  $"STR(2*BYTES_PER_LONG-1)", %[cnt]\n"
>          "    jbe  1f\n"
>          "    mov  %k[to], %[cnt]\n"
> @@ -42,6 +49,7 @@ unsigned __copy_to_user_ll(void __user *
>          _ASM_EXTABLE(1b, 2b)
>          : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from),
>            [aux] "=&r" (dummy)
> +          GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy))
>          : "[aux]" (n)
>          : "memory" );
>      clac();
> @@ -49,12 +57,15 @@ unsigned __copy_to_user_ll(void __user *
>      return n;
>  }
>  
> -unsigned __copy_from_user_ll(void *to, const void __user *from, unsigned n)
> +unsigned int copy_from_guest_ll(void *to, const void __user *from, unsigned int n)
>  {
>      unsigned dummy;
>  
>      stac();
>      asm volatile (
> +        GUARD(
> +        "    guest_access_mask_ptr %[from], %q[scratch1], %q[scratch2]\n"
> +        )
>          "    cmp  $"STR(2*BYTES_PER_LONG-1)", %[cnt]\n"
>          "    jbe  1f\n"
>          "    mov  %k[to], %[cnt]\n"
> @@ -87,6 +98,7 @@ unsigned __copy_from_user_ll(void *to, c
>          _ASM_EXTABLE(1b, 6b)
>          : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from),
>            [aux] "=&r" (dummy)
> +          GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy))
>          : "[aux]" (n)
>          : "memory" );
>      clac();
> @@ -94,6 +106,8 @@ unsigned __copy_from_user_ll(void *to, c
>      return n;
>  }
>  
> +#if GUARD(1) + 0
> +
>  /**
>   * copy_to_user: - Copy a block of data into user space.
>   * @to:   Destination address, in user space.
> @@ -128,8 +142,11 @@ unsigned clear_user(void __user *to, uns
>  {
>      if ( access_ok(to, n) )
>      {
> +        long dummy;
> +
>          stac();
>          asm volatile (
> +            "    guest_access_mask_ptr %[to], %[scratch1], %[scratch2]\n"
>              "0:  rep stos"__OS"\n"
>              "    mov  %[bytes], %[cnt]\n"
>              "1:  rep stosb\n"
> @@ -140,7 +157,8 @@ unsigned clear_user(void __user *to, uns
>              ".previous\n"
>              _ASM_EXTABLE(0b,3b)
>              _ASM_EXTABLE(1b,2b)
> -            : [cnt] "=&c" (n), [to] "+D" (to)
> +            : [cnt] "=&c" (n), [to] "+D" (to), [scratch1] "=&r" (dummy),
> +              [scratch2] "=&r" (dummy)
>              : [bytes] "r" (n & (BYTES_PER_LONG - 1)),
>                [longs] "0" (n / BYTES_PER_LONG), "a" (0) );
>          clac();
> @@ -174,6 +192,16 @@ unsigned copy_from_user(void *to, const
>      return n;
>  }
>  
> +# undef GUARD
> +# define GUARD UA_DROP
> +# define copy_to_guest_ll copy_to_unsafe_ll
> +# define copy_from_guest_ll copy_from_unsafe_ll
> +# undef __user
> +# define __user
> +# include __FILE__
> +
> +#endif /* GUARD(1) */
> +
>  /*
>   * Local variables:
>   * mode: C
> --- a/xen/arch/x86/x86_64/entry.S
> +++ b/xen/arch/x86/x86_64/entry.S
> @@ -446,6 +446,8 @@ UNLIKELY_START(g, create_bounce_frame_ba
>          jmp   asm_domain_crash_synchronous  /* Does not return */
>  __UNLIKELY_END(create_bounce_frame_bad_sp)
>  
> +        guest_access_mask_ptr %rsi, %rax, %rcx
> +
>  #define STORE_GUEST_STACK(reg, n) \
>  0:      movq  %reg,(n)*8(%rsi); \
>          _ASM_EXTABLE(0b, domain_crash_page_fault_ ## n ## x8)
> --- a/xen/common/Kconfig
> +++ b/xen/common/Kconfig
> @@ -114,6 +114,24 @@ config SPECULATIVE_HARDEN_BRANCH
>  
>  	  If unsure, say Y.
>  
> +config SPECULATIVE_HARDEN_GUEST_ACCESS
> +	bool "Speculative PV Guest Memory Access Hardening"
> +	default y
> +	depends on PV
> +	help
> +	  Contemporary processors may use speculative execution as a
> +	  performance optimisation, but this can potentially be abused by an
> +	  attacker to leak data via speculative sidechannels.
> +
> +	  One source of data leakage is via speculative accesses to hypervisor
> +	  memory through guest controlled values used to access guest memory.
> +
> +	  When enabled, code paths accessing PV guest memory will have guest
> +	  controlled addresses massaged such that memory accesses through them
> +	  won't touch hypervisor address space.
> +
> +	  If unsure, say Y.
> +
>  endmenu
>  
>  config HYPFS
> --- a/xen/include/asm-x86/asm-defns.h
> +++ b/xen/include/asm-x86/asm-defns.h
> @@ -44,3 +44,16 @@
>  .macro INDIRECT_JMP arg:req
>      INDIRECT_BRANCH jmp \arg
>  .endm
> +
> +.macro guest_access_mask_ptr ptr:req, scratch1:req, scratch2:req
> +#if defined(CONFIG_SPECULATIVE_HARDEN_GUEST_ACCESS)
> +    mov $(HYPERVISOR_VIRT_END - 1), \scratch1
> +    mov $~0, \scratch2
> +    cmp \ptr, \scratch1
> +    rcr $1, \scratch2
> +    and \scratch2, \ptr
> +#elif defined(CONFIG_DEBUG) && defined(CONFIG_PV)
> +    xor $~\@, \scratch1
> +    xor $~\@, \scratch2
> +#endif
> +.endm
> --- a/xen/include/asm-x86/uaccess.h
> +++ b/xen/include/asm-x86/uaccess.h
> @@ -13,13 +13,19 @@
>  unsigned copy_to_user(void *to, const void *from, unsigned len);
>  unsigned clear_user(void *to, unsigned len);
>  unsigned copy_from_user(void *to, const void *from, unsigned len);
> +
>  /* Handles exceptions in both to and from, but doesn't do access_ok */
> -unsigned __copy_to_user_ll(void __user*to, const void *from, unsigned n);
> -unsigned __copy_from_user_ll(void *to, const void __user *from, unsigned n);
> +unsigned int copy_to_guest_ll(void __user*to, const void *from, unsigned int n);
> +unsigned int copy_from_guest_ll(void *to, const void __user *from, unsigned int n);
> +unsigned int copy_to_unsafe_ll(void *to, const void *from, unsigned int n);
> +unsigned int copy_from_unsafe_ll(void *to, const void *from, unsigned int n);
>  
>  extern long __get_user_bad(void);
>  extern void __put_user_bad(void);
>  
> +#define UA_KEEP(args...) args
> +#define UA_DROP(args...)

I assume UA means user access, and since you have dropped other uses
of user and changed to guest instead I wonder if we should name this
just A_{KEEP/DROP}.

Thanks, Roger.


  reply	other threads:[~2021-02-09 16:26 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-14 15:01 [PATCH 00/17] x86/PV: avoid speculation abuse through guest accessors plus Jan Beulich
2021-01-14 15:03 ` [PATCH 01/17] x86/shadow: use __put_user() instead of __copy_to_user() Jan Beulich
2021-01-14 15:04 ` [PATCH 02/17] x86: split __{get,put}_user() into "guest" and "unsafe" variants Jan Beulich
2021-02-05 15:43   ` Roger Pau Monné
2021-02-05 16:13     ` Jan Beulich
2021-02-05 16:18       ` Roger Pau Monné
2021-02-05 16:26         ` Jan Beulich
2021-02-09 13:07           ` Roger Pau Monné
2021-02-09 13:15             ` Jan Beulich
2021-02-09 14:46               ` Roger Pau Monné
2021-02-09 14:57                 ` Jan Beulich
2021-02-09 15:23                   ` Roger Pau Monné
2021-02-09 14:55   ` Roger Pau Monné
2021-02-09 15:14     ` Jan Beulich
2021-02-09 15:27       ` Roger Pau Monné
2021-01-14 15:04 ` [PATCH 03/17] x86: split __copy_{from,to}_user() " Jan Beulich
2021-02-09 16:06   ` Roger Pau Monné
2021-02-09 17:03     ` Jan Beulich
2021-01-14 15:04 ` [PATCH 04/17] x86/PV: harden guest memory accesses against speculative abuse Jan Beulich
2021-02-09 16:26   ` Roger Pau Monné [this message]
2021-02-10 16:55     ` Jan Beulich
2021-02-11  8:11       ` Roger Pau Monné
2021-02-11 11:28         ` Jan Beulich
2021-02-12 10:41   ` Roger Pau Monné
2021-02-12 12:48     ` Jan Beulich
2021-02-12 13:02       ` Roger Pau Monné
2021-02-12 13:15         ` Jan Beulich
2021-01-14 15:05 ` [PATCH 05/17] x86: rename {get,put}_user() to {get,put}_guest() Jan Beulich
2021-01-14 15:05 ` [PATCH 06/17] x86/gdbsx: convert "user" to "guest" accesses Jan Beulich
2021-01-14 15:06 ` [PATCH 07/17] x86: rename copy_{from,to}_user() to copy_{from,to}_guest_pv() Jan Beulich
2021-01-14 15:07 ` [PATCH 08/17] x86: move stac()/clac() from {get,put}_unsafe_asm() Jan Beulich
2021-01-14 15:07 ` [PATCH 09/17] x86/PV: use get_unsafe() instead of copy_from_unsafe() Jan Beulich
2021-01-14 15:08 ` [PATCH 10/17] x86/shadow: " Jan Beulich
2021-01-14 15:08 ` [PATCH 11/17] x86/shadow: polish shadow_write_entries() Jan Beulich
2021-01-14 15:09 ` [PATCH 12/17] x86/shadow: move shadow_set_l<N>e() to their own source file Jan Beulich
2021-01-14 15:09 ` [PATCH 13/17] x86/shadow: don't open-code SHF_* shorthands Jan Beulich
2021-01-14 15:10 ` [PATCH 14/17] x86/shadow: SH_type_l2h_shadow is PV-only Jan Beulich
2021-01-14 15:10 ` [PATCH 15/17] x86/shadow: drop SH_type_l2h_pae_shadow Jan Beulich
2021-01-22 13:11   ` Tim Deegan
2021-01-22 16:31     ` Jan Beulich
2021-01-22 20:02       ` Tim Deegan
2021-01-25 11:09         ` Jan Beulich
2021-01-25 11:33         ` Jan Beulich
2021-01-14 15:10 ` [PATCH 16/17] x86/shadow: only 4-level guest code needs building when !HVM Jan Beulich
2021-01-14 15:11 ` [PATCH 17/17] x86/shadow: adjust is_pv_*() checks Jan Beulich
2021-01-22 13:18 ` [PATCH 00/17] x86/PV: avoid speculation abuse through guest accessors plus Tim Deegan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=YCK3sH/4EVLzRfZ3@Air-de-Roger \
    --to=roger.pau@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=tim@xen.org \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).