kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Steve Rutherford <srutherford@google.com>
To: Ashish Kalra <Ashish.Kalra@amd.com>
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"Ingo Molnar" <mingo@redhat.com>,
	"H. Peter Anvin" <hpa@zytor.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Joerg Roedel" <joro@8bytes.org>, "Borislav Petkov" <bp@suse.de>,
	"Tom Lendacky" <thomas.lendacky@amd.com>,
	"David Rientjes" <rientjes@google.com>,
	x86@kernel.org, "KVM list" <kvm@vger.kernel.org>,
	LKML <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH 08/12] KVM: X86: Introduce KVM_HC_PAGE_ENC_STATUS hypercall
Date: Wed, 19 Feb 2020 18:39:39 -0800	[thread overview]
Message-ID: <CABayD+fM-s0+j6JXN5qb0zce2Kqi6AC8+c+7qbqKr0NgC-QYiQ@mail.gmail.com> (raw)
In-Reply-To: <fc5e111e0a4eda0e6ea1ee3923327384906aff36.1581555616.git.ashish.kalra@amd.com>

On Wed, Feb 12, 2020 at 5:17 PM Ashish Kalra <Ashish.Kalra@amd.com> wrote:
> +static int sev_resize_page_enc_bitmap(struct kvm *kvm, unsigned long new_size)
> +{
> +       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> +       unsigned long *map;
> +       unsigned long sz;
> +
> +       if (sev->page_enc_bmap_size >= new_size)
> +               return 0;
> +
> +       sz = ALIGN(new_size, BITS_PER_LONG) / 8;
> +
> +       map = vmalloc(sz);
> +       if (!map) {
> +               pr_err_once("Failed to allocate encrypted bitmap size %lx\n",
> +                               sz);
> +               return -ENOMEM;
> +       }
> +
> +       /* mark the page encrypted (by default) */
> +       memset(map, 0xff, sz);
> +
> +       bitmap_copy(map, sev->page_enc_bmap, sev->page_enc_bmap_size);
Personally, I would do the arithmetic and swap the `memset(map, 0xff,
sz);` for `memset(map + sev->page_enc_bmap_size, 0xff, sz -
sev->page_enc_bmap_size);`, but gcc might be smart enough to do this
for you.

> +       kvfree(sev->page_enc_bmap);
> +
> +       sev->page_enc_bmap = map;
> +       sev->page_enc_bmap_size = new_size;
> +
> +       return 0;
> +}
> +
> +static int svm_page_enc_status_hc(struct kvm *kvm, unsigned long gpa,
> +                                 unsigned long npages, unsigned long enc)
> +{
> +       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> +       gfn_t gfn_start, gfn_end;
> +       int ret;
> +
> +       if (!sev_guest(kvm))
> +               return -EINVAL;
> +
> +       if (!npages)
> +               return 0;
> +
> +       gfn_start = gpa_to_gfn(gpa);
> +       gfn_end = gfn_start + npages;
> +
> +       /* out of bound access error check */
> +       if (gfn_end <= gfn_start)
> +               return -EINVAL;
> +
> +       /* lets make sure that gpa exist in our memslot */
> +       pfn_start = gfn_to_pfn(kvm, gfn_start);
> +       pfn_end = gfn_to_pfn(kvm, gfn_end);
I believe these functions assume as_id==0, which is probably fine in
practice. If one were to want to migrate a VM with SMM support (which
I believe is the only current usage of non-zero as_ids), it feels like
SMM would need to be in control of its own c-bit tracking, but that
doesn't seem super feasible (otherwise the guest kernel could corrupt
SMM by passing invalid c-bit statuses). I'm not certain anyone wants
SMM with SEV anyway?

> +
> +       if (is_error_noslot_pfn(pfn_start) && !is_noslot_pfn(pfn_start)) {
> +               /*
> +                * Allow guest MMIO range(s) to be added
> +                * to the page encryption bitmap.
> +                */
> +               return -EINVAL;
> +       }
> +
> +       if (is_error_noslot_pfn(pfn_end) && !is_noslot_pfn(pfn_end)) {
> +               /*
> +                * Allow guest MMIO range(s) to be added
> +                * to the page encryption bitmap.
> +                */
> +               return -EINVAL;
> +       }
> +
> +       mutex_lock(&kvm->lock);
> +       ret = sev_resize_page_enc_bitmap(kvm, gfn_end);
> +       if (ret)
> +               goto unlock;
> +
> +       if (enc)
> +               __bitmap_set(sev->page_enc_bmap, gfn_start,
> +                               gfn_end - gfn_start);
> +       else
> +               __bitmap_clear(sev->page_enc_bmap, gfn_start,
> +                               gfn_end - gfn_start);
> +
> +unlock:
> +       mutex_unlock(&kvm->lock);
> +       return ret;
> +}
> +
>  static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
>  {
>         struct kvm_sev_cmd sev_cmd;
> @@ -7972,6 +8064,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
>         .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
>
>         .apic_init_signal_blocked = svm_apic_init_signal_blocked,
> +
> +       .page_enc_status_hc = svm_page_enc_status_hc,
>  };
>
>  static int __init svm_init(void)
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 9a6664886f2e..7963f2979fdf 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -7879,6 +7879,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
>         .nested_get_evmcs_version = NULL,
>         .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
>         .apic_init_signal_blocked = vmx_apic_init_signal_blocked,
> +       .page_enc_status_hc = NULL,
>  };
>
>  static void vmx_cleanup_l1d_flush(void)
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index fbabb2f06273..298627fa3d39 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -7547,6 +7547,12 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
>                 kvm_sched_yield(vcpu->kvm, a0);
>                 ret = 0;
>                 break;
> +       case KVM_HC_PAGE_ENC_STATUS:
> +               ret = -KVM_ENOSYS;
> +               if (kvm_x86_ops->page_enc_status_hc)
> +                       ret = kvm_x86_ops->page_enc_status_hc(vcpu->kvm,
> +                                       a0, a1, a2);
> +               break;
>         default:
>                 ret = -KVM_ENOSYS;
>                 break;
Add a cap to kvm_vm_ioctl_enable_cap so that the vmm can configure
whether or not this hypercall is offered. Moving to an enable cap
would also allow the vmm to pass down the expected size of the c-bit
tracking buffer, so that you don't need to handle dynamic resizing in
response to guest hypercall, otherwise KVM will sporadically start
copying around large buffers when working with large VMs.

Stepping back a bit, I'm a little surprised by the fact that you don't
treat the c-bit buffers the same way as the dirty tracking buffers and
put them alongside the memslots. That's probably more effort, and the
strategy of using one large buffer should work fine (assuming you
don't need to support non-zero as_ids).

  reply	other threads:[~2020-02-20  2:40 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-13  1:14 [PATCH 00/12] SEV Live Migration Patchset Ashish Kalra
2020-02-13  1:14 ` [PATCH 01/12] KVM: SVM: Add KVM_SEV SEND_START command Ashish Kalra
2020-03-09 21:28   ` Steve Rutherford
2020-03-10  0:19     ` Steve Rutherford
2020-02-13  1:15 ` [PATCH 02/12] KVM: SVM: Add KVM_SEND_UPDATE_DATA command Ashish Kalra
2020-03-10  1:04   ` Steve Rutherford
2020-03-12  1:49     ` Ashish Kalra
2020-02-13  1:16 ` [PATCH 03/12] KVM: SVM: Add KVM_SEV_SEND_FINISH command Ashish Kalra
2020-03-10  1:09   ` Steve Rutherford
2020-02-13  1:16 ` [PATCH 04/12] KVM: SVM: Add support for KVM_SEV_RECEIVE_START command Ashish Kalra
2020-03-10  1:41   ` Steve Rutherford
2020-03-12  0:38     ` Ashish Kalra
2020-03-12  2:55       ` Steve Rutherford
2020-02-13  1:16 ` [PATCH 05/12] KVM: SVM: Add KVM_SEV_RECEIVE_UPDATE_DATA command Ashish Kalra
2020-02-13  1:16 ` [PATCH 06/12] KVM: SVM: Add KVM_SEV_RECEIVE_FINISH command Ashish Kalra
2020-02-13  1:17 ` [PATCH 07/12] KVM: x86: Add AMD SEV specific Hypercall3 Ashish Kalra
2020-02-13  1:17 ` [PATCH 08/12] KVM: X86: Introduce KVM_HC_PAGE_ENC_STATUS hypercall Ashish Kalra
2020-02-20  2:39   ` Steve Rutherford [this message]
2020-02-20  5:28     ` Ashish Kalra
2020-02-20 21:21       ` Ashish Kalra
2020-02-13  1:17 ` [PATCH 09/12] KVM: x86: Introduce KVM_GET_PAGE_ENC_BITMAP ioctl Ashish Kalra
2020-02-27 17:57   ` Venu Busireddy
2020-02-27 18:18     ` Venu Busireddy
2020-02-27 19:38       ` Ashish Kalra
2020-02-13  1:18 ` [PATCH 10/12] mm: x86: Invoke hypercall when page encryption status is changed Ashish Kalra
2020-02-13  5:42   ` Andy Lutomirski
2020-02-13 22:28     ` Ashish Kalra
2020-02-14 18:56       ` Andy Lutomirski
2020-02-14 20:36         ` Ashish Kalra
2020-02-20  1:58   ` Steve Rutherford
2020-02-20  2:12     ` Andy Lutomirski
2020-02-20  3:29       ` Steve Rutherford
2020-02-20 15:54       ` Brijesh Singh
2020-02-20 20:43         ` Steve Rutherford
2020-02-20 22:43           ` Brijesh Singh
2020-02-20 23:23             ` Steve Rutherford
2020-02-20 23:40               ` Andy Lutomirski
2020-02-13  1:18 ` [PATCH 11/12] KVM: x86: Introduce KVM_SET_PAGE_ENC_BITMAP ioctl Ashish Kalra
2020-02-13  1:18 ` [PATCH 12/12] KVM: x86: Introduce KVM_PAGE_ENC_BITMAP_RESET ioctl Ashish Kalra
2020-02-13  5:43 ` [PATCH 00/12] SEV Live Migration Patchset Andy Lutomirski
2020-02-13 23:09   ` Ashish Kalra
2020-02-14 18:58     ` Andy Lutomirski
2020-02-17 19:49       ` Ashish Kalra
2020-03-03  1:05         ` Steve Rutherford
2020-03-03  4:42           ` Ashish Kalra
2020-03-19 13:05         ` Paolo Bonzini
2020-03-19 16:18           ` Ashish Kalra
2020-03-19 16:24             ` Paolo Bonzini
2020-02-14  2:10   ` Brijesh Singh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CABayD+fM-s0+j6JXN5qb0zce2Kqi6AC8+c+7qbqKr0NgC-QYiQ@mail.gmail.com \
    --to=srutherford@google.com \
    --cc=Ashish.Kalra@amd.com \
    --cc=bp@suse.de \
    --cc=hpa@zytor.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=rientjes@google.com \
    --cc=rkrcmar@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).