From: "Kirill A. Shutemov" <kirill@shutemov.name>
To: Dave Hansen <dave.hansen@linux.intel.com>,
Andy Lutomirski <luto@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Paolo Bonzini <pbonzini@redhat.com>,
Sean Christopherson <sean.j.christopherson@intel.com>,
Vitaly Kuznetsov <vkuznets@redhat.com>,
Wanpeng Li <wanpengli@tencent.com>,
Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>
Cc: David Rientjes <rientjes@google.com>,
Andrea Arcangeli <aarcange@redhat.com>,
Kees Cook <keescook@chromium.org>, Will Drewry <wad@chromium.org>,
"Edgecombe, Rick P" <rick.p.edgecombe@intel.com>,
"Kleen, Andi" <andi.kleen@intel.com>,
Liran Alon <liran.alon@oracle.com>,
Mike Rapoport <rppt@kernel.org>,
x86@kernel.org, kvm@vger.kernel.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [RFCv2 09/16] KVM: mm: Introduce VM_KVM_PROTECTED
Date: Tue, 20 Oct 2020 09:18:52 +0300 [thread overview]
Message-ID: <20201020061859.18385-10-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20201020061859.18385-1-kirill.shutemov@linux.intel.com>
The new VMA flag that indicate a VMA that is not accessible to userspace
but usable by kernel with GUP if FOLL_KVM is specified.
The FOLL_KVM is only used in the KVM code. The code has to know how to
deal with such pages.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
include/linux/mm.h | 8 ++++++++
mm/gup.c | 20 ++++++++++++++++----
mm/huge_memory.c | 20 ++++++++++++++++----
mm/memory.c | 3 +++
mm/mmap.c | 3 +++
virt/kvm/async_pf.c | 2 +-
virt/kvm/kvm_main.c | 9 +++++----
7 files changed, 52 insertions(+), 13 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 16b799a0522c..c8d8cdcbc425 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -342,6 +342,8 @@ extern unsigned int kobjsize(const void *objp);
# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
#endif
+#define VM_KVM_PROTECTED 0
+
#ifndef VM_GROWSUP
# define VM_GROWSUP VM_NONE
#endif
@@ -658,6 +660,11 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
return vma->vm_flags & VM_ACCESS_FLAGS;
}
+static inline bool vma_is_kvm_protected(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & VM_KVM_PROTECTED;
+}
+
#ifdef CONFIG_SHMEM
/*
* The vma_is_shmem is not inline because it is used only by slow
@@ -2766,6 +2773,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
#define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */
#define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */
+#define FOLL_KVM 0x100000 /* access to VM_KVM_PROTECTED VMAs */
/*
* FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
diff --git a/mm/gup.c b/mm/gup.c
index e869c634cc9a..accf6db0c06f 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -384,10 +384,19 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
* FOLL_FORCE can write to even unwritable pte's, but only
* after we've gone through a COW cycle and they are dirty.
*/
-static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+static inline bool can_follow_write_pte(struct vm_area_struct *vma,
+ pte_t pte, unsigned int flags)
{
- return pte_write(pte) ||
- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+ if (pte_write(pte))
+ return true;
+
+ if ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte))
+ return true;
+
+ if (!vma_is_kvm_protected(vma) || !(vma->vm_flags & VM_WRITE))
+ return false;
+
+ return (vma->vm_flags & VM_SHARED) || page_mapcount(pte_page(pte)) == 1;
}
static struct page *follow_page_pte(struct vm_area_struct *vma,
@@ -430,7 +439,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
}
if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
- if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
+ if ((flags & FOLL_WRITE) && !can_follow_write_pte(vma, pte, flags)) {
pte_unmap_unlock(ptep, ptl);
return NULL;
}
@@ -750,6 +759,9 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
ctx->page_mask = 0;
+ if (vma_is_kvm_protected(vma) && (flags & FOLL_KVM))
+ flags &= ~FOLL_NUMA;
+
/* make this handle hugepd */
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
if (!IS_ERR(page)) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index da397779a6d4..ec8cf9a40cfd 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1322,10 +1322,19 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
* FOLL_FORCE can write to even unwritable pmd's, but only
* after we've gone through a COW cycle and they are dirty.
*/
-static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+static inline bool can_follow_write_pmd(struct vm_area_struct *vma,
+ pmd_t pmd, unsigned int flags)
{
- return pmd_write(pmd) ||
- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+ if (pmd_write(pmd))
+ return true;
+
+ if ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd))
+ return true;
+
+ if (!vma_is_kvm_protected(vma) || !(vma->vm_flags & VM_WRITE))
+ return false;
+
+ return (vma->vm_flags & VM_SHARED) || page_mapcount(pmd_page(pmd)) == 1;
}
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
@@ -1338,7 +1347,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
assert_spin_locked(pmd_lockptr(mm, pmd));
- if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
+ if (flags & FOLL_WRITE && !can_follow_write_pmd(vma, *pmd, flags))
goto out;
/* Avoid dumping huge zero page */
@@ -1412,6 +1421,9 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
bool was_writable;
int flags = 0;
+ if (vma_is_kvm_protected(vma))
+ return VM_FAULT_SIGBUS;
+
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_same(pmd, *vmf->pmd)))
goto out_unlock;
diff --git a/mm/memory.c b/mm/memory.c
index eeae590e526a..2c9756b4e52f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4165,6 +4165,9 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
bool was_writable = pte_savedwrite(vmf->orig_pte);
int flags = 0;
+ if (vma_is_kvm_protected(vma))
+ return VM_FAULT_SIGBUS;
+
/*
* The "pte" at this point cannot be used safely without
* validation through pte_unmap_same(). It's of NUMA type but
diff --git a/mm/mmap.c b/mm/mmap.c
index bdd19f5b994e..be699f688b6c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -112,6 +112,9 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
pgprot_val(arch_vm_get_page_prot(vm_flags)));
+ if (vm_flags & VM_KVM_PROTECTED)
+ ret = PAGE_NONE;
+
return arch_filter_pgprot(ret);
}
EXPORT_SYMBOL(vm_get_page_prot);
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index dd777688d14a..85a2f99f6e9b 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -61,7 +61,7 @@ static void async_pf_execute(struct work_struct *work)
* access remotely.
*/
mmap_read_lock(mm);
- get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, NULL,
+ get_user_pages_remote(mm, addr, 1, FOLL_WRITE | FOLL_KVM, NULL, NULL,
&locked);
if (locked)
mmap_read_unlock(mm);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a9884cb8c867..125db5a73e10 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1794,7 +1794,7 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
static inline int check_user_page_hwpoison(unsigned long addr)
{
- int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
+ int rc, flags = FOLL_HWPOISON | FOLL_WRITE | FOLL_KVM;
rc = get_user_pages(addr, 1, flags, NULL, NULL);
return rc == -EHWPOISON;
@@ -1836,7 +1836,7 @@ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
bool *writable, kvm_pfn_t *pfn)
{
- unsigned int flags = FOLL_HWPOISON;
+ unsigned int flags = FOLL_HWPOISON | FOLL_KVM;
struct page *page;
int npages = 0;
@@ -2327,7 +2327,7 @@ int copy_from_guest(void *data, unsigned long hva, int len, bool protected)
check_object_size(data, len, false);
while ((seg = next_segment(len, offset)) != 0) {
- npages = get_user_pages_unlocked(hva, 1, &page, 0);
+ npages = get_user_pages_unlocked(hva, 1, &page, FOLL_KVM);
if (npages != 1)
return -EFAULT;
memcpy(data, page_address(page) + offset, seg);
@@ -2354,7 +2354,8 @@ int copy_to_guest(unsigned long hva, const void *data, int len, bool protected)
check_object_size(data, len, true);
while ((seg = next_segment(len, offset)) != 0) {
- npages = get_user_pages_unlocked(hva, 1, &page, FOLL_WRITE);
+ npages = get_user_pages_unlocked(hva, 1, &page,
+ FOLL_WRITE | FOLL_KVM);
if (npages != 1)
return -EFAULT;
memcpy(page_address(page) + offset, data, seg);
--
2.26.2
next prev parent reply other threads:[~2020-10-20 6:19 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-10-20 6:18 [RFCv2 00/16] KVM protected memory extension Kirill A. Shutemov
2020-10-20 6:18 ` [RFCv2 01/16] x86/mm: Move force_dma_unencrypted() to common code Kirill A. Shutemov
2020-10-20 6:18 ` [RFCv2 02/16] x86/kvm: Introduce KVM memory protection feature Kirill A. Shutemov
2020-10-20 6:18 ` [RFCv2 03/16] x86/kvm: Make DMA pages shared Kirill A. Shutemov
2020-10-20 6:18 ` [RFCv2 04/16] x86/kvm: Use bounce buffers for KVM memory protection Kirill A. Shutemov
2020-10-20 6:18 ` [RFCv2 05/16] x86/kvm: Make VirtIO use DMA API in KVM guest Kirill A. Shutemov
2020-10-20 8:06 ` Christoph Hellwig
2020-10-20 12:47 ` Kirill A. Shutemov
2020-10-22 3:31 ` Halil Pasic
2020-10-20 6:18 ` [RFCv2 06/16] x86/kvmclock: Share hvclock memory with the host Kirill A. Shutemov
2020-10-20 6:18 ` [RFCv2 07/16] x86/realmode: Share trampoline area if KVM memory protection enabled Kirill A. Shutemov
2020-10-20 6:18 ` [RFCv2 08/16] KVM: Use GUP instead of copy_from/to_user() to access guest memory Kirill A. Shutemov
2020-10-20 8:25 ` John Hubbard
2020-10-20 12:51 ` Kirill A. Shutemov
2020-10-22 11:49 ` Matthew Wilcox
2020-10-22 19:58 ` John Hubbard
2020-10-26 4:21 ` Matthew Wilcox
2020-10-26 4:44 ` John Hubbard
2020-10-26 13:28 ` Matthew Wilcox
2020-10-26 14:16 ` Jason Gunthorpe
2020-10-26 20:52 ` John Hubbard
2020-10-20 17:29 ` Ira Weiny
2020-10-22 11:37 ` Kirill A. Shutemov
2020-10-20 6:18 ` Kirill A. Shutemov [this message]
2020-10-21 18:47 ` [RFCv2 09/16] KVM: mm: Introduce VM_KVM_PROTECTED Edgecombe, Rick P
2020-10-22 12:01 ` Kirill A. Shutemov
2020-10-20 6:18 ` [RFCv2 10/16] KVM: x86: Use GUP for page walk instead of __get_user() Kirill A. Shutemov
2020-10-20 6:18 ` [RFCv2 11/16] KVM: Protected memory extension Kirill A. Shutemov
2020-10-20 7:17 ` Peter Zijlstra
2020-10-20 12:55 ` Kirill A. Shutemov
2020-10-20 6:18 ` [RFCv2 12/16] KVM: x86: Enabled protected " Kirill A. Shutemov
2020-10-20 6:18 ` [RFCv2 13/16] KVM: Rework copy_to/from_guest() to avoid direct mapping Kirill A. Shutemov
2020-10-20 6:18 ` [RFCv2 14/16] KVM: Handle protected memory in __kvm_map_gfn()/__kvm_unmap_gfn() Kirill A. Shutemov
2020-10-21 18:50 ` Edgecombe, Rick P
2020-10-22 12:06 ` Kirill A. Shutemov
2020-10-22 16:59 ` Edgecombe, Rick P
2020-10-23 10:36 ` Kirill A. Shutemov
2020-10-22 3:26 ` Halil Pasic
2020-10-22 12:07 ` Kirill A. Shutemov
2020-10-20 6:18 ` [RFCv2 15/16] KVM: Unmap protected pages from direct mapping Kirill A. Shutemov
2020-10-20 7:12 ` Peter Zijlstra
2020-10-20 12:18 ` David Hildenbrand
2020-10-20 13:20 ` David Hildenbrand
2020-10-21 1:20 ` Edgecombe, Rick P
2020-10-26 19:55 ` Tom Lendacky
2020-10-21 18:49 ` Edgecombe, Rick P
2020-10-23 12:37 ` Mike Rapoport
2020-10-23 16:32 ` Sean Christopherson
2020-10-20 6:18 ` [RFCv2 16/16] mm: Do not use zero page for VM_KVM_PROTECTED VMAs Kirill A. Shutemov
2020-10-20 7:46 ` [RFCv2 00/16] KVM protected memory extension Vitaly Kuznetsov
2020-10-20 13:49 ` Kirill A. Shutemov
2020-10-21 14:46 ` Vitaly Kuznetsov
2020-10-23 11:35 ` Kirill A. Shutemov
2020-10-23 12:01 ` Vitaly Kuznetsov
2020-10-21 18:20 ` Andy Lutomirski
2020-10-26 15:29 ` Kirill A. Shutemov
2020-10-26 23:58 ` Andy Lutomirski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201020061859.18385-10-kirill.shutemov@linux.intel.com \
--to=kirill@shutemov.name \
--cc=aarcange@redhat.com \
--cc=andi.kleen@intel.com \
--cc=dave.hansen@linux.intel.com \
--cc=jmattson@google.com \
--cc=joro@8bytes.org \
--cc=keescook@chromium.org \
--cc=kirill.shutemov@linux.intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=liran.alon@oracle.com \
--cc=luto@kernel.org \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=rick.p.edgecombe@intel.com \
--cc=rientjes@google.com \
--cc=rppt@kernel.org \
--cc=sean.j.christopherson@intel.com \
--cc=vkuznets@redhat.com \
--cc=wad@chromium.org \
--cc=wanpengli@tencent.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).