All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Huang, Kai" <kai.huang@intel.com>
To: "kirill@shutemov.name" <kirill@shutemov.name>,
	"vkuznets@redhat.com" <vkuznets@redhat.com>
Cc: "kvm@vger.kernel.org" <kvm@vger.kernel.org>,
	"Kleen, Andi" <andi.kleen@intel.com>,
	"wad@chromium.org" <wad@chromium.org>,
	"keescook@chromium.org" <keescook@chromium.org>,
	"aarcange@redhat.com" <aarcange@redhat.com>,
	"dave.hansen@linux.intel.com" <dave.hansen@linux.intel.com>,
	"luto@kernel.org" <luto@kernel.org>,
	"wanpengli@tencent.com" <wanpengli@tencent.com>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	"kirill.shutemov@linux.intel.com"
	<kirill.shutemov@linux.intel.com>,
	"pbonzini@redhat.com" <pbonzini@redhat.com>,
	"linux-mm@kvack.org" <linux-mm@kvack.org>,
	"joro@8bytes.org" <joro@8bytes.org>,
	"peterz@infradead.org" <peterz@infradead.org>,
	"jmattson@google.com" <jmattson@google.com>,
	"Christopherson, Sean J" <sean.j.christopherson@intel.com>,
	"Edgecombe, Rick P" <rick.p.edgecombe@intel.com>,
	"rientjes@google.com" <rientjes@google.com>,
	"x86@kernel.org" <x86@kernel.org>
Subject: Re: [RFC 09/16] KVM: Protected memory extension
Date: Wed, 3 Jun 2020 01:34:45 +0000	[thread overview]
Message-ID: <05a440207cf0e6149a5ca2a7f1ecccde834a208c.camel@intel.com> (raw)
In-Reply-To: <20200525153435.c6mx3pjryyk4j4go@box>

On Mon, 2020-05-25 at 18:34 +0300, Kirill A. Shutemov wrote:
> On Mon, May 25, 2020 at 05:26:37PM +0200, Vitaly Kuznetsov wrote:
> > "Kirill A. Shutemov" <kirill@shutemov.name> writes:
> > 
> > > Add infrastructure that handles protected memory extension.
> > > 
> > > Arch-specific code has to provide hypercalls and define non-zero
> > > VM_KVM_PROTECTED.
> > > 
> > > Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> > > ---
> > >  include/linux/kvm_host.h |   4 ++
> > >  mm/mprotect.c            |   1 +
> > >  virt/kvm/kvm_main.c      | 131 +++++++++++++++++++++++++++++++++++++++
> > >  3 files changed, 136 insertions(+)
> > > 
> > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> > > index bd0bb600f610..d7072f6d6aa0 100644
> > > --- a/include/linux/kvm_host.h
> > > +++ b/include/linux/kvm_host.h
> > > @@ -700,6 +700,10 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
> > >  void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
> > >  				   struct kvm_memory_slot *slot);
> > >  
> > > +int kvm_protect_all_memory(struct kvm *kvm);
> > > +int kvm_protect_memory(struct kvm *kvm,
> > > +		       unsigned long gfn, unsigned long npages, bool protect);
> > > +
> > >  int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
> > >  			    struct page **pages, int nr_pages);
> > >  
> > > diff --git a/mm/mprotect.c b/mm/mprotect.c
> > > index 494192ca954b..552be3b4c80a 100644
> > > --- a/mm/mprotect.c
> > > +++ b/mm/mprotect.c
> > > @@ -505,6 +505,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct
> > > vm_area_struct **pprev,
> > >  	vm_unacct_memory(charged);
> > >  	return error;
> > >  }
> > > +EXPORT_SYMBOL_GPL(mprotect_fixup);
> > >  
> > >  /*
> > >   * pkey==-1 when doing a legacy mprotect()
> > > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> > > index 530af95efdf3..07d45da5d2aa 100644
> > > --- a/virt/kvm/kvm_main.c
> > > +++ b/virt/kvm/kvm_main.c
> > > @@ -155,6 +155,8 @@ static void kvm_uevent_notify_change(unsigned int
> > > type, struct kvm *kvm);
> > >  static unsigned long long kvm_createvm_count;
> > >  static unsigned long long kvm_active_vms;
> > >  
> > > +static int protect_memory(unsigned long start, unsigned long end, bool
> > > protect);
> > > +
> > >  __weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
> > >  		unsigned long start, unsigned long end, bool blockable)
> > >  {
> > > @@ -1309,6 +1311,14 @@ int __kvm_set_memory_region(struct kvm *kvm,
> > >  	if (r)
> > >  		goto out_bitmap;
> > >  
> > > +	if (mem->memory_size && kvm->mem_protected) {
> > > +		r = protect_memory(new.userspace_addr,
> > > +				   new.userspace_addr + new.npages * PAGE_SIZE,
> > > +				   true);
> > > +		if (r)
> > > +			goto out_bitmap;
> > > +	}
> > > +
> > >  	if (old.dirty_bitmap && !new.dirty_bitmap)
> > >  		kvm_destroy_dirty_bitmap(&old);
> > >  	return 0;
> > > @@ -2652,6 +2662,127 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu
> > > *vcpu, gfn_t gfn)
> > >  }
> > >  EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
> > >  
> > > +static int protect_memory(unsigned long start, unsigned long end, bool
> > > protect)
> > > +{
> > > +	struct mm_struct *mm = current->mm;
> > > +	struct vm_area_struct *vma, *prev;
> > > +	int ret;
> > > +
> > > +	if (down_write_killable(&mm->mmap_sem))
> > > +		return -EINTR;
> > > +
> > > +	ret = -ENOMEM;
> > > +	vma = find_vma(current->mm, start);
> > > +	if (!vma)
> > > +		goto out;
> > > +
> > > +	ret = -EINVAL;
> > > +	if (vma->vm_start > start)
> > > +		goto out;
> > > +
> > > +	if (start > vma->vm_start)
> > > +		prev = vma;
> > > +	else
> > > +		prev = vma->vm_prev;
> > > +
> > > +	ret = 0;
> > > +	while (true) {
> > > +		unsigned long newflags, tmp;
> > > +
> > > +		tmp = vma->vm_end;
> > > +		if (tmp > end)
> > > +			tmp = end;
> > > +
> > > +		newflags = vma->vm_flags;
> > > +		if (protect)
> > > +			newflags |= VM_KVM_PROTECTED;
> > > +		else
> > > +			newflags &= ~VM_KVM_PROTECTED;
> > > +
> > > +		/* The VMA has been handled as part of other memslot */
> > > +		if (newflags == vma->vm_flags)
> > > +			goto next;
> > > +
> > > +		ret = mprotect_fixup(vma, &prev, start, tmp, newflags);
> > > +		if (ret)
> > > +			goto out;
> > > +
> > > +next:
> > > +		start = tmp;
> > > +		if (start < prev->vm_end)
> > > +			start = prev->vm_end;
> > > +
> > > +		if (start >= end)
> > > +			goto out;
> > > +
> > > +		vma = prev->vm_next;
> > > +		if (!vma || vma->vm_start != start) {
> > > +			ret = -ENOMEM;
> > > +			goto out;
> > > +		}
> > > +	}
> > > +out:
> > > +	up_write(&mm->mmap_sem);
> > > +	return ret;
> > > +}
> > > +
> > > +int kvm_protect_memory(struct kvm *kvm,
> > > +		       unsigned long gfn, unsigned long npages, bool protect)
> > > +{
> > > +	struct kvm_memory_slot *memslot;
> > > +	unsigned long start, end;
> > > +	gfn_t numpages;
> > > +
> > > +	if (!VM_KVM_PROTECTED)
> > > +		return -KVM_ENOSYS;
> > > +
> > > +	if (!npages)
> > > +		return 0;
> > > +
> > > +	memslot = gfn_to_memslot(kvm, gfn);
> > > +	/* Not backed by memory. It's okay. */
> > > +	if (!memslot)
> > > +		return 0;
> > > +
> > > +	start = gfn_to_hva_many(memslot, gfn, &numpages);
> > > +	end = start + npages * PAGE_SIZE;
> > > +
> > > +	/* XXX: Share range across memory slots? */
> > > +	if (WARN_ON(numpages < npages))
> > > +		return -EINVAL;
> > > +
> > > +	return protect_memory(start, end, protect);
> > > +}
> > > +EXPORT_SYMBOL_GPL(kvm_protect_memory);
> > > +
> > > +int kvm_protect_all_memory(struct kvm *kvm)
> > > +{
> > > +	struct kvm_memslots *slots;
> > > +	struct kvm_memory_slot *memslot;
> > > +	unsigned long start, end;
> > > +	int i, ret = 0;;
> > > +
> > > +	if (!VM_KVM_PROTECTED)
> > > +		return -KVM_ENOSYS;
> > > +
> > > +	mutex_lock(&kvm->slots_lock);
> > > +	kvm->mem_protected = true;
> > 
> > What will happen upon guest reboot? Do we need to unprotect everything
> > to make sure we'll be able to boot? Also, after the reboot how will the
> > guest know that it is protected and needs to unprotect things? -> see my
> > idea about converting KVM_HC_ENABLE_MEM_PROTECTED to a stateful MSR (but
> > we'll likely have to reset it upon reboot anyway).
> 
> That's extremely good question. I have not considered reboot. I tend to use
> -no-reboot in my setup.
> 
> I'll think how to deal with reboot. I don't know how it works now to give
> a good answer.
> 
> The may not be a good solution: unprotecting memory on reboot means we
> expose user data. We can wipe the data before unprotecting, but we should
> not wipe BIOS and anything else that is required on reboot. I donno.

If you let Qemu to protect guest memory when creating the vm, but not ask guest
kernel to enable when it boots, you won't have this problem. And guest kernel
*queries* whether its memory is protected or not during boot. This is consistent
to SEV as well.

  reply	other threads:[~2020-06-03  1:35 UTC|newest]

Thread overview: 63+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-22 12:51 [RFC 00/16] KVM protected memory extension Kirill A. Shutemov
2020-05-22 12:51 ` [RFC 01/16] x86/mm: Move force_dma_unencrypted() to common code Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 02/16] x86/kvm: Introduce KVM memory protection feature Kirill A. Shutemov
2020-05-25 14:58   ` Vitaly Kuznetsov
2020-05-25 15:15     ` Kirill A. Shutemov
2020-05-27  5:03       ` Sean Christopherson
2020-05-27  8:39         ` Vitaly Kuznetsov
2020-05-27  8:52           ` Sean Christopherson
2020-06-03  2:09           ` Huang, Kai
2020-06-03 11:14             ` Vitaly Kuznetsov
2020-05-22 12:52 ` [RFC 03/16] x86/kvm: Make DMA pages shared Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 04/16] x86/kvm: Use bounce buffers for KVM memory protection Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 05/16] x86/kvm: Make VirtIO use DMA API in KVM guest Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 06/16] KVM: Use GUP instead of copy_from/to_user() to access guest memory Kirill A. Shutemov
2020-05-25 15:08   ` Vitaly Kuznetsov
2020-05-25 15:17     ` Kirill A. Shutemov
2020-06-01 16:35       ` Paolo Bonzini
2020-06-02 13:33         ` Kirill A. Shutemov
2020-05-26  6:14   ` Mike Rapoport
2020-05-26 21:56     ` Kirill A. Shutemov
2020-05-29 15:24   ` Kees Cook
2020-05-22 12:52 ` [RFC 07/16] KVM: mm: Introduce VM_KVM_PROTECTED Kirill A. Shutemov
2020-05-26  6:15   ` Mike Rapoport
2020-05-26 22:01     ` Kirill A. Shutemov
2020-05-26  6:40   ` John Hubbard
2020-05-26 22:04     ` Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 08/16] KVM: x86: Use GUP for page walk instead of __get_user() Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 09/16] KVM: Protected memory extension Kirill A. Shutemov
2020-05-25 15:26   ` Vitaly Kuznetsov
2020-05-25 15:34     ` Kirill A. Shutemov
2020-06-03  1:34       ` Huang, Kai [this message]
2020-05-22 12:52 ` [RFC 10/16] KVM: x86: Enabled protected " Kirill A. Shutemov
2020-05-25 15:26   ` Vitaly Kuznetsov
2020-05-26  6:16   ` Mike Rapoport
2020-05-26 21:58     ` Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 11/16] KVM: Rework copy_to/from_guest() to avoid direct mapping Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 12/16] x86/kvm: Share steal time page with host Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 13/16] x86/kvmclock: Share hvclock memory with the host Kirill A. Shutemov
2020-05-25 15:22   ` Vitaly Kuznetsov
2020-05-25 15:25     ` Kirill A. Shutemov
2020-05-25 15:42       ` Vitaly Kuznetsov
2020-05-22 12:52 ` [RFC 14/16] KVM: Introduce gfn_to_pfn_memslot_protected() Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 15/16] KVM: Handle protected memory in __kvm_map_gfn()/__kvm_unmap_gfn() Kirill A. Shutemov
2020-05-22 12:52 ` [RFC 16/16] KVM: Unmap protected pages from direct mapping Kirill A. Shutemov
2020-05-26  6:16   ` Mike Rapoport
2020-05-26 22:10     ` Kirill A. Shutemov
2020-05-25  5:27 ` [RFC 00/16] KVM protected memory extension Kirill A. Shutemov
2020-05-25 13:47 ` Liran Alon
2020-05-25 14:46   ` Kirill A. Shutemov
2020-05-25 15:56     ` Liran Alon
2020-05-26  6:17   ` Mike Rapoport
2020-05-26 10:16     ` Liran Alon
2020-05-26 11:38       ` Mike Rapoport
2020-05-27 15:45         ` Dave Hansen
2020-05-27 21:22           ` Mike Rapoport
2020-06-04 15:15 ` Marc Zyngier
2020-06-04 15:48   ` Sean Christopherson
2020-06-04 16:27     ` Marc Zyngier
2020-06-04 16:35     ` Will Deacon
2020-06-04 19:09       ` Nakajima, Jun
2020-06-04 21:03         ` Jim Mattson
2020-06-04 21:03           ` Jim Mattson
2020-06-04 23:29           ` Nakajima, Jun

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=05a440207cf0e6149a5ca2a7f1ecccde834a208c.camel@intel.com \
    --to=kai.huang@intel.com \
    --cc=aarcange@redhat.com \
    --cc=andi.kleen@intel.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=keescook@chromium.org \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=kirill@shutemov.name \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rick.p.edgecombe@intel.com \
    --cc=rientjes@google.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=vkuznets@redhat.com \
    --cc=wad@chromium.org \
    --cc=wanpengli@tencent.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.