All of lore.kernel.org
 help / color / mirror / Atom feed
From: Steve Rutherford <srutherford@google.com>
To: Ashish Kalra <Ashish.Kalra@amd.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>,
	Joerg Roedel <joro@8bytes.org>, Borislav Petkov <bp@suse.de>,
	Tom Lendacky <Thomas.Lendacky@amd.com>, X86 ML <x86@kernel.org>,
	KVM list <kvm@vger.kernel.org>,
	LKML <linux-kernel@vger.kernel.org>,
	David Rientjes <rientjes@google.com>,
	Venu Busireddy <venu.busireddy@oracle.com>,
	Brijesh Singh <brijesh.singh@amd.com>
Subject: Re: [PATCH v8 15/18] KVM: x86: Add guest support for detecting and enabling SEV Live Migration feature.
Date: Fri, 29 May 2020 19:08:03 -0700	[thread overview]
Message-ID: <CABayD+eSCAAfrcgod3OpEJ9puOdcUPDCCwsk=xmoxpk0yXTDxQ@mail.gmail.com> (raw)
In-Reply-To: <939af9274e47bb106f49b0154fd4222dd23e7f6d.1588711355.git.ashish.kalra@amd.com>

On Tue, May 5, 2020 at 2:20 PM Ashish Kalra <Ashish.Kalra@amd.com> wrote:
>
> From: Ashish Kalra <ashish.kalra@amd.com>
>
> The guest support for detecting and enabling SEV Live migration
> feature uses the following logic :
>
>  - kvm_init_plaform() checks if its booted under the EFI
>
>    - If not EFI,
>
>      i) check for the KVM_FEATURE_CPUID
>
>      ii) if CPUID reports that migration is support then issue wrmsrl
>          to enable the SEV migration support
>
>    - If EFI,
>
>      i) Check the KVM_FEATURE_CPUID.
>
>      ii) If CPUID reports that migration is supported, then reads the UEFI enviroment variable which
>          indicates OVMF support for live migration.
>
>      iii) If variable is set then wrmsr to enable the SEV migration support.
>
> The EFI live migration check is done using a late_initcall() callback.
>
> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
> ---
>  arch/x86/include/asm/mem_encrypt.h | 11 ++++++
>  arch/x86/kernel/kvm.c              | 62 ++++++++++++++++++++++++++++++
>  arch/x86/mm/mem_encrypt.c          | 11 ++++++
>  3 files changed, 84 insertions(+)
>
> diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
> index 848ce43b9040..d10e92ae5ca1 100644
> --- a/arch/x86/include/asm/mem_encrypt.h
> +++ b/arch/x86/include/asm/mem_encrypt.h
> @@ -20,6 +20,7 @@
>
>  extern u64 sme_me_mask;
>  extern bool sev_enabled;
> +extern bool sev_live_mig_enabled;
>
>  void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
>                          unsigned long decrypted_kernel_vaddr,
> @@ -42,6 +43,8 @@ void __init sme_enable(struct boot_params *bp);
>
>  int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
>  int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
> +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
> +                                           bool enc);
>
>  /* Architecture __weak replacement functions */
>  void __init mem_encrypt_init(void);
> @@ -55,6 +58,7 @@ bool sev_active(void);
>  #else  /* !CONFIG_AMD_MEM_ENCRYPT */
>
>  #define sme_me_mask    0ULL
> +#define sev_live_mig_enabled   false
>
>  static inline void __init sme_early_encrypt(resource_size_t paddr,
>                                             unsigned long size) { }
> @@ -76,6 +80,8 @@ static inline int __init
>  early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
>  static inline int __init
>  early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
> +static inline void __init
> +early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {}
>
>  #define __bss_decrypted
>
> @@ -102,6 +108,11 @@ static inline u64 sme_get_me_mask(void)
>         return sme_me_mask;
>  }
>
> +static inline bool sev_live_migration_enabled(void)
> +{
> +       return sev_live_mig_enabled;
> +}
> +
>  #endif /* __ASSEMBLY__ */
>
>  #endif /* __X86_MEM_ENCRYPT_H__ */
> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index 6efe0410fb72..4b29815de873 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -24,6 +24,7 @@
>  #include <linux/debugfs.h>
>  #include <linux/nmi.h>
>  #include <linux/swait.h>
> +#include <linux/efi.h>
>  #include <asm/timer.h>
>  #include <asm/cpu.h>
>  #include <asm/traps.h>
> @@ -403,6 +404,53 @@ static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
>         early_set_memory_decrypted((unsigned long) ptr, size);
>  }
>
> +#ifdef CONFIG_EFI
> +static bool setup_kvm_sev_migration(void)
> +{
> +       efi_char16_t efi_Sev_Live_Mig_support_name[] = L"SevLiveMigrationEnabled";
> +       efi_guid_t efi_variable_guid = MEM_ENCRYPT_GUID;
> +       efi_status_t status;
> +       unsigned long size;
> +       bool enabled;
> +
> +       if (!sev_live_migration_enabled())
> +               return false;
> +
> +       size = sizeof(enabled);
> +
> +       if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
> +               pr_info("setup_kvm_sev_migration: no efi\n");
> +               return false;
> +       }
> +
> +       /* Get variable contents into buffer */
> +       status = efi.get_variable(efi_Sev_Live_Mig_support_name,
> +                                 &efi_variable_guid, NULL, &size, &enabled);
> +
> +       if (status == EFI_NOT_FOUND) {
> +               pr_info("setup_kvm_sev_migration: variable not found\n");
> +               return false;
> +       }
> +
> +       if (status != EFI_SUCCESS) {
> +               pr_info("setup_kvm_sev_migration: get_variable fail\n");
> +               return false;
> +       }
> +
> +       if (enabled == 0) {
> +               pr_info("setup_kvm_sev_migration: live migration disabled in OVMF\n");
> +               return false;
> +       }
> +
> +       pr_info("setup_kvm_sev_migration: live migration enabled in OVMF\n");
> +       wrmsrl(MSR_KVM_SEV_LIVE_MIG_EN, KVM_SEV_LIVE_MIGRATION_ENABLED);
> +
> +       return true;
> +}
> +
> +late_initcall(setup_kvm_sev_migration);
> +#endif
> +
>  /*
>   * Iterate through all possible CPUs and map the memory region pointed
>   * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
> @@ -725,6 +773,20 @@ static void __init kvm_apic_init(void)
>
>  static void __init kvm_init_platform(void)
>  {
> +#ifdef CONFIG_AMD_MEM_ENCRYPT
> +       if (sev_active() &&
> +           kvm_para_has_feature(KVM_FEATURE_SEV_LIVE_MIGRATION)) {
> +               printk(KERN_INFO "KVM enable live migration\n");
> +               sev_live_mig_enabled = true;
> +               /*
> +                * If not booted using EFI, enable Live migration support.
> +                */
> +               if (!efi_enabled(EFI_BOOT))
> +                       wrmsrl(MSR_KVM_SEV_LIVE_MIG_EN,
> +                              KVM_SEV_LIVE_MIGRATION_ENABLED);
> +       } else
> +               printk(KERN_INFO "KVM enable live migration feature unsupported\n");
> +#endif
>         kvmclock_init();
>         x86_platform.apic_post_init = kvm_apic_init;
>  }
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index c9800fa811f6..f54be71bc75f 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -46,6 +46,8 @@ EXPORT_SYMBOL_GPL(sev_enable_key);
>
>  bool sev_enabled __section(.data);
>
> +bool sev_live_mig_enabled __section(.data);
> +
>  /* Buffer used for early in-place encryption by BSP, no locking needed */
>  static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
>
> @@ -204,6 +206,9 @@ static void set_memory_enc_dec_hypercall(unsigned long vaddr, int npages,
>         unsigned long sz = npages << PAGE_SHIFT;
>         unsigned long vaddr_end, vaddr_next;
>
> +       if (!sev_live_migration_enabled())
> +               return;
> +
>         vaddr_end = vaddr + sz;
>
>         for (; vaddr < vaddr_end; vaddr = vaddr_next) {
> @@ -374,6 +379,12 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
>         return early_set_memory_enc_dec(vaddr, size, true);
>  }
>
> +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
> +                                       bool enc)
> +{
> +       set_memory_enc_dec_hypercall(vaddr, npages, enc);
> +}
> +
>  /*
>   * SME and SEV are very similar but they are not the same, so there are
>   * times that the kernel will need to distinguish between SME and SEV. The
> --
> 2.17.1
>


Reviewed-by: Steve Rutherford <srutherford@google.com>

  reply	other threads:[~2020-05-30  2:08 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-05 21:13 [PATCH v8 00/18] Add AMD SEV guest live migration support Ashish Kalra
2020-05-05 21:14 ` [PATCH v8 01/18] KVM: SVM: Add KVM_SEV SEND_START command Ashish Kalra
2020-05-05 21:14 ` [PATCH v8 02/18] KVM: SVM: Add KVM_SEND_UPDATE_DATA command Ashish Kalra
2020-05-05 22:48   ` Venu Busireddy
2020-05-05 21:15 ` [PATCH v8 03/18] KVM: SVM: Add KVM_SEV_SEND_FINISH command Ashish Kalra
2020-05-05 22:51   ` Venu Busireddy
2020-05-05 21:15 ` [PATCH v8 04/18] KVM: SVM: Add support for KVM_SEV_RECEIVE_START command Ashish Kalra
2020-05-05 22:52   ` Venu Busireddy
2020-05-05 21:15 ` [PATCH v8 05/18] KVM: SVM: Add KVM_SEV_RECEIVE_UPDATE_DATA command Ashish Kalra
2020-05-05 21:16 ` [PATCH v8 06/18] KVM: SVM: Add KVM_SEV_RECEIVE_FINISH command Ashish Kalra
2020-05-05 21:16 ` [PATCH v8 07/18] KVM: x86: Add AMD SEV specific Hypercall3 Ashish Kalra
2020-05-05 21:17 ` [PATCH v8 08/18] KVM: X86: Introduce KVM_HC_PAGE_ENC_STATUS hypercall Ashish Kalra
2020-05-30  2:05   ` Steve Rutherford
2020-05-05 21:17 ` [PATCH v8 09/18] KVM: x86: Introduce KVM_GET_PAGE_ENC_BITMAP ioctl Ashish Kalra
2020-05-30  2:05   ` Steve Rutherford
2020-05-05 21:17 ` [PATCH v8 10/18] mm: x86: Invoke hypercall when page encryption status is changed Ashish Kalra
2020-05-30  2:06   ` Steve Rutherford
2020-05-05 21:18 ` [PATCH v8 11/18] KVM: x86: Introduce KVM_SET_PAGE_ENC_BITMAP ioctl Ashish Kalra
2020-05-30  2:06   ` Steve Rutherford
2020-05-05 21:18 ` [PATCH v8 12/18] KVM: SVM: Add support for static allocation of unified Page Encryption Bitmap Ashish Kalra
2020-05-30  2:07   ` Steve Rutherford
2020-05-30  5:49     ` Ashish Kalra
2020-12-04 11:08   ` Paolo Bonzini
2020-12-04 21:38     ` Ashish Kalra
2020-12-06 10:19       ` Paolo Bonzini
2020-05-05 21:19 ` [PATCH v8 13/18] KVM: x86: Introduce new KVM_FEATURE_SEV_LIVE_MIGRATION feature & Custom MSR Ashish Kalra
2020-05-30  2:07   ` Steve Rutherford
2020-12-04 11:20   ` Paolo Bonzini
2020-12-04 16:48     ` Sean Christopherson
2020-12-04 17:08       ` Ashish Kalra
2020-12-04 17:23         ` Sean Christopherson
2020-12-06 10:57           ` Paolo Bonzini
2020-12-06 14:09             ` Kalra, Ashish
2020-12-04 18:06       ` Ashish Kalra
2020-12-04 18:41         ` Sean Christopherson
2020-12-04 18:48           ` Kalra, Ashish
2020-12-04 19:02           ` Tom Lendacky
2020-12-04 21:42     ` Ashish Kalra
2020-05-05 21:20 ` [PATCH v8 14/18] EFI: Introduce the new AMD Memory Encryption GUID Ashish Kalra
2020-05-30  2:07   ` Steve Rutherford
2020-05-30  5:51     ` Ashish Kalra
2020-05-05 21:20 ` [PATCH v8 15/18] KVM: x86: Add guest support for detecting and enabling SEV Live Migration feature Ashish Kalra
2020-05-30  2:08   ` Steve Rutherford [this message]
2020-05-05 21:20 ` [PATCH v8 16/18] KVM: x86: Mark _bss_decrypted section variables as decrypted in page encryption bitmap Ashish Kalra
2020-05-30  2:08   ` Steve Rutherford
2020-05-05 21:21 ` [PATCH v8 17/18] KVM: x86: Add kexec support for SEV Live Migration Ashish Kalra
2020-05-05 21:21   ` Ashish Kalra
2020-05-30  2:08   ` Steve Rutherford
2020-05-30  2:08     ` Steve Rutherford
2020-05-05 21:22 ` [PATCH v8 18/18] KVM: SVM: Enable SEV live migration feature implicitly on Incoming VM(s) Ashish Kalra
2020-05-30  2:09   ` Steve Rutherford
2020-12-04 11:11   ` Paolo Bonzini
2020-12-04 11:22   ` Paolo Bonzini
2020-12-04 21:46     ` Ashish Kalra
2020-12-06 10:18       ` Paolo Bonzini
2020-05-18 19:07 ` [PATCH v8 00/18] Add AMD SEV guest live migration support Ashish Kalra
2020-06-01 20:02   ` Steve Rutherford
2020-06-03 22:14     ` Ashish Kalra
2020-08-05 18:29       ` Steve Rutherford

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CABayD+eSCAAfrcgod3OpEJ9puOdcUPDCCwsk=xmoxpk0yXTDxQ@mail.gmail.com' \
    --to=srutherford@google.com \
    --cc=Ashish.Kalra@amd.com \
    --cc=Thomas.Lendacky@amd.com \
    --cc=bp@suse.de \
    --cc=brijesh.singh@amd.com \
    --cc=hpa@zytor.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=rientjes@google.com \
    --cc=tglx@linutronix.de \
    --cc=venu.busireddy@oracle.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.