From: Yuan Yao <yuan.yao@linux.intel.com>
To: Chao Peng <chao.p.peng@linux.intel.com>
Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
linux-arch@vger.kernel.org, linux-api@vger.kernel.org,
linux-doc@vger.kernel.org, qemu-devel@nongnu.org,
Paolo Bonzini <pbonzini@redhat.com>,
Jonathan Corbet <corbet@lwn.net>,
Sean Christopherson <seanjc@google.com>,
Vitaly Kuznetsov <vkuznets@redhat.com>,
Wanpeng Li <wanpengli@tencent.com>,
Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
Arnd Bergmann <arnd@arndb.de>,
Naoya Horiguchi <naoya.horiguchi@nec.com>,
Miaohe Lin <linmiaohe@huawei.com>,
x86@kernel.org, "H . Peter Anvin" <hpa@zytor.com>,
Hugh Dickins <hughd@google.com>, Jeff Layton <jlayton@kernel.org>,
"J . Bruce Fields" <bfields@fieldses.org>,
Andrew Morton <akpm@linux-foundation.org>,
Shuah Khan <shuah@kernel.org>, Mike Rapoport <rppt@kernel.org>,
Steven Price <steven.price@arm.com>,
"Maciej S . Szmigiero" <mail@maciej.szmigiero.name>,
Vlastimil Babka <vbabka@suse.cz>,
Vishal Annapurve <vannapurve@google.com>,
Yu Zhang <yu.c.zhang@linux.intel.com>,
"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
luto@kernel.org, jun.nakajima@intel.com, dave.hansen@intel.com,
ak@linux.intel.com, david@redhat.com, aarcange@redhat.com,
ddutile@redhat.com, dhildenb@redhat.com,
Quentin Perret <qperret@google.com>,
tabba@google.com, Michael Roth <michael.roth@amd.com>,
mhocko@suse.com, wei.w.wang@intel.com
Subject: Re: [PATCH v10 6/9] KVM: Unmap existing mappings when change the memory attributes
Date: Wed, 7 Dec 2022 16:13:14 +0800 [thread overview]
Message-ID: <20221207081314.hfyavisybcraezrh@yy-desk-7060> (raw)
In-Reply-To: <20221202061347.1070246-7-chao.p.peng@linux.intel.com>
On Fri, Dec 02, 2022 at 02:13:44PM +0800, Chao Peng wrote:
> Unmap the existing guest mappings when memory attribute is changed
> between shared and private. This is needed because shared pages and
> private pages are from different backends, unmapping existing ones
> gives a chance for page fault handler to re-populate the mappings
> according to the new attribute.
>
> Only architecture has private memory support needs this and the
> supported architecture is expected to rewrite the weak
> kvm_arch_has_private_mem().
>
> Also, during memory attribute changing and the unmapping time frame,
> page fault handler may happen in the same memory range and can cause
> incorrect page state, invoke kvm_mmu_invalidate_* helpers to let the
> page fault handler retry during this time frame.
>
> Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
> ---
> include/linux/kvm_host.h | 7 +-
> virt/kvm/kvm_main.c | 168 ++++++++++++++++++++++++++-------------
> 2 files changed, 116 insertions(+), 59 deletions(-)
>
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 3d69484d2704..3331c0c92838 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -255,7 +255,6 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
> #endif
>
> -#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
> struct kvm_gfn_range {
> struct kvm_memory_slot *slot;
> gfn_t start;
> @@ -264,6 +263,8 @@ struct kvm_gfn_range {
> bool may_block;
> };
> bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
> +
> +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
> bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
> bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
> bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
> @@ -785,11 +786,12 @@ struct kvm {
>
> #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
> struct mmu_notifier mmu_notifier;
> +#endif
> unsigned long mmu_invalidate_seq;
> long mmu_invalidate_in_progress;
> gfn_t mmu_invalidate_range_start;
> gfn_t mmu_invalidate_range_end;
> -#endif
> +
> struct list_head devices;
> u64 manual_dirty_log_protect;
> struct dentry *debugfs_dentry;
> @@ -1480,6 +1482,7 @@ bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
> int kvm_arch_post_init_vm(struct kvm *kvm);
> void kvm_arch_pre_destroy_vm(struct kvm *kvm);
> int kvm_arch_create_vm_debugfs(struct kvm *kvm);
> +bool kvm_arch_has_private_mem(struct kvm *kvm);
>
> #ifndef __KVM_HAVE_ARCH_VM_ALLOC
> /*
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index ad55dfbc75d7..4e1e1e113bf0 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -520,6 +520,62 @@ void kvm_destroy_vcpus(struct kvm *kvm)
> }
> EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
>
> +void kvm_mmu_invalidate_begin(struct kvm *kvm)
> +{
> + /*
> + * The count increase must become visible at unlock time as no
> + * spte can be established without taking the mmu_lock and
> + * count is also read inside the mmu_lock critical section.
> + */
> + kvm->mmu_invalidate_in_progress++;
> +
> + if (likely(kvm->mmu_invalidate_in_progress == 1)) {
> + kvm->mmu_invalidate_range_start = INVALID_GPA;
> + kvm->mmu_invalidate_range_end = INVALID_GPA;
> + }
> +}
> +
> +void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
> +{
> + WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
> +
> + if (likely(kvm->mmu_invalidate_in_progress == 1)) {
> + kvm->mmu_invalidate_range_start = start;
> + kvm->mmu_invalidate_range_end = end;
> + } else {
> + /*
> + * Fully tracking multiple concurrent ranges has diminishing
> + * returns. Keep things simple and just find the minimal range
> + * which includes the current and new ranges. As there won't be
> + * enough information to subtract a range after its invalidate
> + * completes, any ranges invalidated concurrently will
> + * accumulate and persist until all outstanding invalidates
> + * complete.
> + */
> + kvm->mmu_invalidate_range_start =
> + min(kvm->mmu_invalidate_range_start, start);
> + kvm->mmu_invalidate_range_end =
> + max(kvm->mmu_invalidate_range_end, end);
> + }
> +}
> +
> +void kvm_mmu_invalidate_end(struct kvm *kvm)
> +{
> + /*
> + * This sequence increase will notify the kvm page fault that
> + * the page that is going to be mapped in the spte could have
> + * been freed.
> + */
> + kvm->mmu_invalidate_seq++;
> + smp_wmb();
> + /*
> + * The above sequence increase must be visible before the
> + * below count decrease, which is ensured by the smp_wmb above
> + * in conjunction with the smp_rmb in mmu_invalidate_retry().
> + */
> + kvm->mmu_invalidate_in_progress--;
> +}
> +
> #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
> static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
> {
> @@ -714,45 +770,6 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
> kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);
> }
>
> -void kvm_mmu_invalidate_begin(struct kvm *kvm)
> -{
> - /*
> - * The count increase must become visible at unlock time as no
> - * spte can be established without taking the mmu_lock and
> - * count is also read inside the mmu_lock critical section.
> - */
> - kvm->mmu_invalidate_in_progress++;
> -
> - if (likely(kvm->mmu_invalidate_in_progress == 1)) {
> - kvm->mmu_invalidate_range_start = INVALID_GPA;
> - kvm->mmu_invalidate_range_end = INVALID_GPA;
> - }
> -}
> -
> -void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
> -{
> - WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
> -
> - if (likely(kvm->mmu_invalidate_in_progress == 1)) {
> - kvm->mmu_invalidate_range_start = start;
> - kvm->mmu_invalidate_range_end = end;
> - } else {
> - /*
> - * Fully tracking multiple concurrent ranges has diminishing
> - * returns. Keep things simple and just find the minimal range
> - * which includes the current and new ranges. As there won't be
> - * enough information to subtract a range after its invalidate
> - * completes, any ranges invalidated concurrently will
> - * accumulate and persist until all outstanding invalidates
> - * complete.
> - */
> - kvm->mmu_invalidate_range_start =
> - min(kvm->mmu_invalidate_range_start, start);
> - kvm->mmu_invalidate_range_end =
> - max(kvm->mmu_invalidate_range_end, end);
> - }
> -}
> -
> static bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
> {
> kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
> @@ -806,23 +823,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
> return 0;
> }
>
> -void kvm_mmu_invalidate_end(struct kvm *kvm)
> -{
> - /*
> - * This sequence increase will notify the kvm page fault that
> - * the page that is going to be mapped in the spte could have
> - * been freed.
> - */
> - kvm->mmu_invalidate_seq++;
> - smp_wmb();
> - /*
> - * The above sequence increase must be visible before the
> - * below count decrease, which is ensured by the smp_wmb above
> - * in conjunction with the smp_rmb in mmu_invalidate_retry().
> - */
> - kvm->mmu_invalidate_in_progress--;
> -}
> -
> static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
> const struct mmu_notifier_range *range)
> {
> @@ -1140,6 +1140,11 @@ int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
> return 0;
> }
>
> +bool __weak kvm_arch_has_private_mem(struct kvm *kvm)
> +{
> + return false;
> +}
> +
> static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
> {
> struct kvm *kvm = kvm_arch_alloc_vm();
> @@ -2349,15 +2354,47 @@ static u64 kvm_supported_mem_attributes(struct kvm *kvm)
> return 0;
> }
>
> +static void kvm_unmap_mem_range(struct kvm *kvm, gfn_t start, gfn_t end)
> +{
> + struct kvm_gfn_range gfn_range;
> + struct kvm_memory_slot *slot;
> + struct kvm_memslots *slots;
> + struct kvm_memslot_iter iter;
> + int i;
> + int r = 0;
> +
> + gfn_range.pte = __pte(0);
> + gfn_range.may_block = true;
> +
> + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
> + slots = __kvm_memslots(kvm, i);
> +
> + kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
> + slot = iter.slot;
> + gfn_range.start = max(start, slot->base_gfn);
> + gfn_range.end = min(end, slot->base_gfn + slot->npages);
> + if (gfn_range.start >= gfn_range.end)
> + continue;
> + gfn_range.slot = slot;
> +
> + r |= kvm_unmap_gfn_range(kvm, &gfn_range);
> + }
> + }
> +
> + if (r)
> + kvm_flush_remote_tlbs(kvm);
> +}
> +
> static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
> struct kvm_memory_attributes *attrs)
> {
> gfn_t start, end;
> unsigned long i;
> void *entry;
> + int idx;
> u64 supported_attrs = kvm_supported_mem_attributes(kvm);
>
> - /* flags is currently not used. */
> + /* 'flags' is currently not used. */
> if (attrs->flags)
> return -EINVAL;
> if (attrs->attributes & ~supported_attrs)
> @@ -2372,6 +2409,13 @@ static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
>
> entry = attrs->attributes ? xa_mk_value(attrs->attributes) : NULL;
>
> + if (kvm_arch_has_private_mem(kvm)) {
> + KVM_MMU_LOCK(kvm);
> + kvm_mmu_invalidate_begin(kvm);
> + kvm_mmu_invalidate_range_add(kvm, start, end);
Nit: this works for KVM_MEMORY_ATTRIBUTE_PRIVATE, but
the invalidation should be necessary yet for attribute change of:
KVM_MEMORY_ATTRIBUTE_READ
KVM_MEMORY_ATTRIBUTE_WRITE
KVM_MEMORY_ATTRIBUTE_EXECUTE
> + KVM_MMU_UNLOCK(kvm);
> + }
> +
> mutex_lock(&kvm->lock);
> for (i = start; i < end; i++)
> if (xa_err(xa_store(&kvm->mem_attr_array, i, entry,
> @@ -2379,6 +2423,16 @@ static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
> break;
> mutex_unlock(&kvm->lock);
>
> + if (kvm_arch_has_private_mem(kvm)) {
> + idx = srcu_read_lock(&kvm->srcu);
> + KVM_MMU_LOCK(kvm);
> + if (i > start)
> + kvm_unmap_mem_range(kvm, start, i);
> + kvm_mmu_invalidate_end(kvm);
Ditto.
> + KVM_MMU_UNLOCK(kvm);
> + srcu_read_unlock(&kvm->srcu, idx);
> + }
> +
> attrs->address = i << PAGE_SHIFT;
> attrs->size = (end - i) << PAGE_SHIFT;
>
> --
> 2.25.1
>
>
next prev parent reply other threads:[~2022-12-07 8:13 UTC|newest]
Thread overview: 398+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-02 6:13 [PATCH v10 0/9] KVM: mm: fd-based approach for supporting KVM Chao Peng
2022-12-02 6:13 ` [PATCH v10 1/9] mm: Introduce memfd_restricted system call to create restricted user memory Chao Peng
2022-12-06 14:57 ` Fuad Tabba
2022-12-07 13:50 ` Chao Peng
2022-12-13 23:49 ` Huang, Kai
2022-12-19 7:53 ` Chao Peng
2022-12-19 8:48 ` Huang, Kai
2022-12-20 7:22 ` Chao Peng
2022-12-20 8:33 ` Huang, Kai
2022-12-21 13:39 ` Chao Peng
2022-12-22 0:37 ` Huang, Kai
2022-12-23 8:20 ` Chao Peng
2023-01-23 14:03 ` Vlastimil Babka
2023-01-23 15:18 ` Kirill A. Shutemov
2023-02-13 14:23 ` Vlastimil Babka
2023-01-23 23:01 ` Huang, Kai
2023-01-23 23:38 ` Sean Christopherson
2023-01-24 7:51 ` Vlastimil Babka
2022-12-22 18:15 ` Sean Christopherson
2022-12-23 0:50 ` Huang, Kai
2022-12-23 8:24 ` Chao Peng
2023-01-23 15:43 ` Kirill A. Shutemov
2023-02-13 11:43 ` Vlastimil Babka
2023-02-13 13:10 ` Michael Roth
2023-01-13 21:54 ` Sean Christopherson
2023-01-17 12:41 ` Chao Peng
2023-01-17 16:34 ` Sean Christopherson
2023-01-18 8:16 ` Chao Peng
2023-01-18 10:17 ` Isaku Yamahata
2023-02-22 2:07 ` Alexey Kardashevskiy
2023-02-24 5:42 ` Chao Peng
2023-01-30 5:26 ` Ackerley Tng
2023-01-30 6:04 ` Wang, Wei W
2023-02-16 9:51 ` Nikunj A. Dadhania
2023-03-20 19:08 ` Michael Roth
2023-04-13 15:25 ` [PATCH v7 00/14] KVM: mm: fd-based approach for supporting KVM guest private memory Christian Brauner
2023-04-13 22:28 ` Sean Christopherson
2023-04-14 22:38 ` Ackerley Tng
2023-04-14 23:26 ` Sean Christopherson
2023-04-15 0:06 ` Sean Christopherson
2023-04-19 8:29 ` Christian Brauner
2023-04-20 0:49 ` Sean Christopherson
2023-04-20 8:35 ` Christian Brauner
2023-04-13 17:22 ` [PATCH v10 1/9] mm: Introduce memfd_restricted system call to create restricted user memory Ackerley Tng
2022-12-02 6:13 ` [PATCH v10 2/9] KVM: Introduce per-page memory attributes Chao Peng
2022-12-06 13:34 ` Fabiano Rosas
2022-12-07 14:31 ` Chao Peng
2022-12-06 15:07 ` Fuad Tabba
2022-12-07 14:51 ` Chao Peng
2022-12-16 15:09 ` Borislav Petkov
2022-12-19 8:15 ` Chao Peng
2022-12-19 10:17 ` Borislav Petkov
2022-12-20 7:24 ` Chao Peng
2022-12-28 8:28 ` Chenyi Qiang
2023-01-03 1:39 ` Chao Peng
2023-01-03 3:32 ` Wang, Wei W
2023-01-03 23:06 ` Sean Christopherson
2023-01-05 4:39 ` Chao Peng
2023-01-13 22:02 ` Sean Christopherson
2023-01-17 3:21 ` Binbin Wu
2023-01-17 13:30 ` Chao Peng
2023-01-17 17:25 ` Sean Christopherson
2023-02-09 7:25 ` Isaku Yamahata
2023-02-10 0:35 ` Sean Christopherson
2023-02-13 23:53 ` Isaku Yamahata
2023-02-14 18:07 ` Sean Christopherson
2023-05-19 17:32 ` Nicolas Saenz Julienne
2023-05-19 18:23 ` Sean Christopherson
2023-05-19 19:49 ` Nicolas Saenz Julienne
2023-05-19 19:57 ` Sean Christopherson
2023-05-23 18:59 ` Nicolas Saenz Julienne
2022-12-02 6:13 ` [PATCH v10 3/9] KVM: Extend the memslot to support fd-based private memory Chao Peng
2022-12-05 9:03 ` Fuad Tabba
2022-12-06 11:53 ` Chao Peng
2022-12-06 12:39 ` Fuad Tabba
2022-12-07 15:10 ` Chao Peng
2022-12-08 8:37 ` Xiaoyao Li
2022-12-08 11:30 ` Chao Peng
2022-12-13 12:04 ` Xiaoyao Li
2022-12-19 7:50 ` Chao Peng
2022-12-19 14:36 ` Borislav Petkov
2022-12-20 7:43 ` Chao Peng
2022-12-20 9:55 ` Borislav Petkov
2022-12-21 13:42 ` Chao Peng
2023-01-05 11:23 ` Jarkko Sakkinen
2023-01-06 9:40 ` Chao Peng
2023-01-09 19:32 ` Sean Christopherson
2023-01-10 9:14 ` Chao Peng
2023-01-10 22:51 ` Vishal Annapurve
2023-01-13 22:37 ` Sean Christopherson
2023-01-17 12:42 ` Chao Peng
2023-01-20 23:42 ` Jarkko Sakkinen
2023-01-20 23:28 ` Jarkko Sakkinen
2022-12-02 6:13 ` [PATCH v10 4/9] KVM: Add KVM_EXIT_MEMORY_FAULT exit Chao Peng
2022-12-06 15:47 ` Fuad Tabba
2022-12-07 15:11 ` Chao Peng
2023-01-13 23:13 ` Sean Christopherson
2022-12-02 6:13 ` [PATCH v10 5/9] KVM: Use gfn instead of hva for mmu_notifier_retry Chao Peng
2022-12-05 9:23 ` Fuad Tabba
2022-12-06 11:56 ` Chao Peng
2022-12-06 15:48 ` Fuad Tabba
2022-12-09 6:24 ` Chao Peng
2022-12-07 6:34 ` Isaku Yamahata
2022-12-07 15:14 ` Chao Peng
2022-12-02 6:13 ` [PATCH v10 6/9] KVM: Unmap existing mappings when change the memory attributes Chao Peng
2022-12-07 8:13 ` Yuan Yao [this message]
2022-12-08 11:20 ` Chao Peng
2022-12-09 5:43 ` Yuan Yao
2022-12-07 17:16 ` Fuad Tabba
2022-12-08 11:13 ` Chao Peng
2022-12-09 8:57 ` Fuad Tabba
2022-12-12 7:22 ` Chao Peng
2022-12-13 23:51 ` Huang, Kai
2022-12-19 7:54 ` Chao Peng
2023-01-13 22:50 ` Sean Christopherson
2022-12-02 6:13 ` [PATCH v10 7/9] KVM: Update lpage info when private/shared memory are mixed Chao Peng
2022-12-05 22:49 ` Isaku Yamahata
2022-12-06 12:02 ` Chao Peng
2022-12-07 6:42 ` Isaku Yamahata
2022-12-08 11:17 ` Chao Peng
2023-01-13 23:12 ` Sean Christopherson
2023-01-13 23:16 ` Sean Christopherson
2023-01-28 13:54 ` Chao Peng
2022-12-02 6:13 ` [PATCH v10 8/9] KVM: Handle page fault for private memory Chao Peng
2022-12-08 2:29 ` Yuan Yao
2022-12-08 11:23 ` Chao Peng
2022-12-09 5:45 ` Yuan Yao
2022-12-09 9:01 ` Fuad Tabba
2022-12-12 7:23 ` Chao Peng
2023-01-13 23:29 ` Sean Christopherson
2022-12-02 6:13 ` [PATCH v10 9/9] KVM: Enable and expose KVM_MEM_PRIVATE Chao Peng
2022-12-09 9:11 ` Fuad Tabba
2023-01-05 20:38 ` Vishal Annapurve
2023-01-06 4:13 ` Chao Peng
2023-01-14 0:01 ` Sean Christopherson
2023-01-17 13:12 ` Chao Peng
2023-01-17 19:35 ` Sean Christopherson
2023-01-18 8:23 ` Chao Peng
2023-01-28 14:00 ` Chao Peng
2023-03-08 0:13 ` Ackerley Tng
2023-03-08 7:40 ` Chao Peng
2023-03-23 0:41 ` Isaku Yamahata
2023-03-24 2:10 ` Chao Peng
2023-03-24 2:29 ` Xiaoyao Li
2023-03-28 10:41 ` Chao Peng
2023-04-14 21:08 ` Sean Christopherson
2023-04-18 23:38 ` Ackerley Tng
2023-04-25 23:01 ` Sean Christopherson
2023-03-07 19:14 ` Ackerley Tng
2023-03-07 20:27 ` Sean Christopherson
2023-01-14 0:37 ` [PATCH v10 0/9] KVM: mm: fd-based approach for supporting KVM Sean Christopherson
2023-01-16 13:48 ` Kirill A. Shutemov
2023-01-17 13:19 ` Chao Peng
2023-01-17 14:32 ` Fuad Tabba
2023-01-19 11:13 ` Isaku Yamahata
2023-01-19 15:25 ` Sean Christopherson
2023-01-19 22:37 ` Isaku Yamahata
2023-01-24 1:27 ` Sean Christopherson
2023-02-08 12:24 ` Isaku Yamahata
2023-02-13 13:01 ` Michael Roth
2023-02-21 12:11 ` Chao Peng
2023-03-23 1:27 ` Michael Roth
2023-03-24 2:13 ` Chao Peng
2023-04-12 22:01 ` Sean Christopherson
2023-04-17 14:37 ` Chao Peng
2023-04-17 15:01 ` Sean Christopherson
2023-01-24 16:08 ` Liam Merwick
2023-01-25 0:20 ` Sean Christopherson
2023-01-25 12:53 ` Kirill A. Shutemov
2023-01-25 16:01 ` Liam Merwick
2023-04-13 1:07 ` Sean Christopherson
2023-04-13 16:04 ` Kirill A. Shutemov
2023-02-16 5:13 ` Mike Rapoport
2023-02-16 9:41 ` David Hildenbrand
2023-02-22 21:53 ` Sean Christopherson
2023-04-17 15:40 ` Rename restrictedmem => guardedmem? (was: Re: [PATCH v10 0/9] KVM: mm: fd-based approach for supporting KVM) Sean Christopherson
2023-04-17 15:48 ` David Hildenbrand
2023-04-17 16:40 ` Sean Christopherson
2023-04-17 17:09 ` David Hildenbrand
2023-04-17 19:16 ` Sean Christopherson
2023-04-18 8:53 ` Fuad Tabba
2023-04-18 9:10 ` David Hildenbrand
2023-04-19 0:47 ` Sean Christopherson
2023-04-19 7:21 ` David Hildenbrand
2023-04-19 15:17 ` Sean Christopherson
2023-04-19 15:27 ` David Hildenbrand
2023-04-22 1:33 ` Sean Christopherson
2023-05-05 19:39 ` Ackerley Tng
2023-05-06 0:55 ` Sean Christopherson
2023-05-06 1:17 ` Vishal Annapurve
2023-05-15 23:46 ` Sean Christopherson
2023-07-13 22:46 ` Ackerley Tng
2023-07-14 19:29 ` Sean Christopherson
2023-07-14 23:09 ` Vishal Annapurve
2023-07-15 0:30 ` Sean Christopherson
2023-05-09 12:44 ` Chao Peng
2023-05-10 17:26 ` Vishal Annapurve
2023-05-10 20:23 ` Vishal Annapurve
2023-05-10 21:39 ` Sean Christopherson
2023-05-10 23:03 ` Vishal Annapurve
2023-05-11 20:22 ` Sean Christopherson
2023-05-19 1:07 ` Vishal Annapurve
2023-05-12 0:21 ` Michael Roth
2023-05-12 18:01 ` Sean Christopherson
2023-05-22 13:50 ` Michael Roth
2023-05-22 17:09 ` Sean Christopherson
2023-05-22 23:58 ` Michael Roth
2023-05-23 0:21 ` Sean Christopherson
2023-06-06 19:14 ` Ackerley Tng
2023-06-06 23:25 ` Sean Christopherson
2023-06-08 17:13 ` Ackerley Tng
2023-04-17 17:11 ` Ackerley Tng
2023-04-17 18:17 ` Sean Christopherson
2023-04-18 17:01 ` Ackerley Tng
2023-04-23 13:28 ` Jarkko Sakkinen
2023-05-05 20:00 ` David Hildenbrand
2023-05-06 7:44 ` Vlastimil Babka
2023-05-06 9:16 ` David Hildenbrand
2023-04-23 13:14 ` Jarkko Sakkinen
-- strict thread matches above, loose matches on Subject: below --
2023-03-31 23:50 [RFC PATCH v3 0/2] Providing mount in memfd_restricted() syscall Ackerley Tng
2023-03-31 23:50 ` [RFC PATCH v3 1/2] mm: restrictedmem: Allow userspace to specify mount for memfd_restricted Ackerley Tng
2023-04-03 8:21 ` David Hildenbrand
2023-04-05 22:29 ` Ackerley Tng
2023-04-04 8:25 ` Kirill A. Shutemov
2023-04-05 22:32 ` Ackerley Tng
2023-04-04 13:53 ` Christian Brauner
2023-04-04 14:58 ` Christian Brauner
2023-04-05 21:58 ` Ackerley Tng
2023-04-12 9:59 ` Christian Brauner
2023-04-13 22:53 ` Ackerley Tng
2023-04-13 23:07 ` Sean Christopherson
2023-03-31 23:50 ` [RFC PATCH v3 2/2] selftests: restrictedmem: Check hugepage-ness of shmem file backing restrictedmem fd Ackerley Tng
2023-04-03 8:24 ` David Hildenbrand
2023-04-11 1:35 ` Ackerley Tng
2022-07-06 8:20 [PATCH v7 00/14] KVM: mm: fd-based approach for supporting KVM guest private memory Chao Peng
2022-07-06 8:20 ` [PATCH v7 01/14] mm: Add F_SEAL_AUTO_ALLOCATE seal to memfd Chao Peng
2022-07-21 9:44 ` David Hildenbrand
2022-07-21 9:50 ` David Hildenbrand
2022-07-21 15:05 ` Sean Christopherson
2022-07-25 13:46 ` Chao Peng
2022-07-21 10:27 ` Gupta, Pankaj
2022-07-25 13:54 ` Chao Peng
2022-07-25 14:49 ` Gupta, Pankaj
2022-07-25 13:42 ` Chao Peng
2022-08-05 17:55 ` Paolo Bonzini
2022-08-05 18:06 ` David Hildenbrand
2022-08-10 9:40 ` Chao Peng
2022-08-10 9:38 ` Chao Peng
2022-08-17 23:41 ` Kirill A. Shutemov
2022-08-18 9:09 ` Paolo Bonzini
2022-08-23 7:36 ` David Hildenbrand
2022-08-24 10:20 ` Chao Peng
2022-08-26 15:19 ` Fuad Tabba
2022-08-29 15:18 ` Chao Peng
2022-07-06 8:20 ` [PATCH v7 02/14] selftests/memfd: Add tests for F_SEAL_AUTO_ALLOCATE Chao Peng
2022-08-05 13:11 ` David Hildenbrand
2022-07-06 8:20 ` [PATCH v7 03/14] mm: Introduce memfile_notifier Chao Peng
2022-08-05 13:22 ` David Hildenbrand
2022-08-10 9:22 ` Chao Peng
2022-08-10 10:05 ` David Hildenbrand
2022-08-10 14:38 ` Sean Christopherson
2022-08-11 12:27 ` Quentin Perret
2022-08-11 13:39 ` Chao Peng
2022-07-06 8:20 ` [PATCH v7 04/14] mm/shmem: Support memfile_notifier Chao Peng
2022-07-12 18:02 ` Gupta, Pankaj
2022-07-13 7:44 ` Chao Peng
2022-07-13 10:01 ` Gupta, Pankaj
2022-07-13 23:49 ` Chao Peng
2022-07-14 4:15 ` Gupta, Pankaj
2022-08-05 13:26 ` David Hildenbrand
2022-08-10 9:25 ` Chao Peng
2022-07-06 8:20 ` [PATCH v7 05/14] mm/memfd: Introduce MFD_INACCESSIBLE flag Chao Peng
2022-08-05 13:28 ` David Hildenbrand
2022-08-10 9:37 ` Chao Peng
2022-08-10 9:55 ` David Hildenbrand
2022-08-11 13:17 ` Chao Peng
2022-09-07 16:18 ` Kirill A. Shutemov
2022-07-06 8:20 ` [PATCH v7 06/14] KVM: Rename KVM_PRIVATE_MEM_SLOTS to KVM_INTERNAL_MEM_SLOTS Chao Peng
2022-07-06 8:20 ` [PATCH v7 07/14] KVM: Use gfn instead of hva for mmu_notifier_retry Chao Peng
2022-07-15 11:36 ` Gupta, Pankaj
2022-07-18 13:29 ` Chao Peng
2022-07-18 15:26 ` Sean Christopherson
2022-07-19 14:02 ` Chao Peng
2022-08-04 7:10 ` Isaku Yamahata
2022-08-10 8:19 ` Chao Peng
2022-07-06 8:20 ` [PATCH v7 08/14] KVM: Rename mmu_notifier_* Chao Peng
2022-07-29 19:02 ` Sean Christopherson
2022-08-03 10:13 ` Chao Peng
2022-08-05 19:54 ` Paolo Bonzini
2022-08-10 8:09 ` Chao Peng
2023-05-23 7:19 ` Kautuk Consul
2023-05-23 14:19 ` Sean Christopherson
2023-05-24 6:12 ` Kautuk Consul
2023-05-24 20:16 ` Sean Christopherson
2023-05-24 20:33 ` Peter Zijlstra
2023-05-24 21:39 ` Sean Christopherson
2023-05-25 8:54 ` Peter Zijlstra
2023-05-25 3:52 ` Kautuk Consul
2023-05-24 20:28 ` Peter Zijlstra
2022-07-06 8:20 ` [PATCH v7 09/14] KVM: Extend the memslot to support fd-based private memory Chao Peng
2022-07-29 19:51 ` Sean Christopherson
2022-08-03 10:08 ` Chao Peng
2022-08-03 14:42 ` Sean Christopherson
2022-07-06 8:20 ` [PATCH v7 10/14] KVM: Add KVM_EXIT_MEMORY_FAULT exit Chao Peng
2022-07-06 8:20 ` [PATCH v7 11/14] KVM: Register/unregister the guest private memory regions Chao Peng
2022-07-19 8:00 ` Gupta, Pankaj
2022-07-19 14:08 ` Chao Peng
2022-07-19 14:23 ` Gupta, Pankaj
2022-07-20 15:07 ` Chao Peng
2022-07-20 15:31 ` Gupta, Pankaj
2022-07-20 16:21 ` Sean Christopherson
2022-07-20 17:41 ` Gupta, Pankaj
2022-07-21 7:34 ` Wei Wang
2022-07-21 9:29 ` Chao Peng
2022-07-21 17:58 ` Sean Christopherson
2022-07-25 13:04 ` Chao Peng
2022-07-29 19:54 ` Sean Christopherson
2022-08-02 0:49 ` Sean Christopherson
2022-08-02 16:38 ` Sean Christopherson
2022-08-03 9:48 ` Chao Peng
2022-08-03 15:51 ` Sean Christopherson
2022-08-04 7:58 ` Chao Peng
2022-07-20 16:44 ` Sean Christopherson
2022-07-21 9:37 ` Chao Peng
2022-08-19 19:37 ` Vishal Annapurve
2022-08-24 10:37 ` Chao Peng
2022-08-26 15:19 ` Fuad Tabba
2022-08-29 15:21 ` Chao Peng
2022-07-06 8:20 ` [PATCH v7 12/14] KVM: Handle page fault for private memory Chao Peng
2022-07-29 20:58 ` Sean Christopherson
2022-08-03 9:52 ` Chao Peng
2022-07-06 8:20 ` [PATCH v7 13/14] KVM: Enable and expose KVM_MEM_PRIVATE Chao Peng
2022-07-19 9:55 ` Gupta, Pankaj
2022-07-19 14:12 ` Chao Peng
2022-07-06 8:20 ` [PATCH v7 14/14] memfd_create.2: Describe MFD_INACCESSIBLE flag Chao Peng
2022-08-01 14:40 ` Dave Hansen
2022-08-03 9:53 ` Chao Peng
2022-07-13 3:58 ` [PATCH v7 00/14] KVM: mm: fd-based approach for supporting KVM guest private memory Gupta, Pankaj
2022-07-13 7:57 ` Chao Peng
2022-07-13 10:35 ` Gupta, Pankaj
2022-07-13 23:59 ` Chao Peng
2022-07-14 4:39 ` Gupta, Pankaj
2022-07-14 5:06 ` Gupta, Pankaj
2022-07-14 4:29 ` Andy Lutomirski
2022-07-14 5:13 ` Gupta, Pankaj
2022-08-11 10:02 ` Nikunj A. Dadhania
2022-08-11 11:30 ` Gupta, Pankaj
2022-08-11 13:32 ` Chao Peng
2022-08-11 17:28 ` Nikunj A. Dadhania
2022-08-12 3:22 ` Nikunj A. Dadhania
2022-08-11 17:18 ` Nikunj A. Dadhania
2022-08-11 23:02 ` Gupta, Pankaj
2022-08-12 6:02 ` Gupta, Pankaj
2022-08-12 7:18 ` Gupta, Pankaj
2022-08-12 8:48 ` Nikunj A. Dadhania
2022-08-12 9:33 ` Gupta, Pankaj
2022-08-15 13:04 ` Chao Peng
2022-08-16 4:28 ` Nikunj A. Dadhania
2022-08-16 11:33 ` Gupta, Pankaj
2022-08-16 12:24 ` Kirill A . Shutemov
2022-08-16 13:03 ` Gupta, Pankaj
2022-08-16 15:38 ` Sean Christopherson
2022-08-17 15:27 ` Michael Roth
2022-08-23 1:25 ` Isaku Yamahata
2022-08-23 17:41 ` Gupta, Pankaj
2022-08-18 5:40 ` Hugh Dickins
2022-08-18 13:24 ` Kirill A . Shutemov
2022-08-19 0:20 ` Sean Christopherson
2022-08-19 3:38 ` Hugh Dickins
2022-08-19 22:53 ` Sean Christopherson
2022-08-23 7:55 ` David Hildenbrand
2022-08-23 16:05 ` Sean Christopherson
2022-08-24 9:41 ` Chao Peng
2022-09-09 4:55 ` Andy Lutomirski
2022-08-19 3:00 ` Hugh Dickins
2022-08-20 0:27 ` Kirill A. Shutemov
2022-08-21 5:15 ` Hugh Dickins
2022-08-31 14:24 ` Kirill A . Shutemov
2022-09-02 10:27 ` Chao Peng
2022-09-02 12:30 ` Kirill A . Shutemov
2022-09-08 1:10 ` Kirill A. Shutemov
2022-09-13 9:44 ` Sean Christopherson
2022-09-13 13:28 ` Kirill A. Shutemov
2022-09-13 14:53 ` Sean Christopherson
2022-09-13 16:00 ` Kirill A. Shutemov
2022-09-13 16:12 ` Sean Christopherson
2022-09-09 4:48 ` Andy Lutomirski
2022-09-09 14:32 ` Kirill A . Shutemov
2022-09-09 19:11 ` Andy Lutomirski
2022-09-09 23:02 ` Kirill A . Shutemov
2022-08-21 10:27 ` Matthew Wilcox
2022-08-24 10:27 ` Chao Peng
2022-09-09 4:44 ` Andy Lutomirski
2022-08-26 15:19 ` Fuad Tabba
2022-08-29 15:17 ` Chao Peng
2022-08-31 9:12 ` Fuad Tabba
2022-09-02 10:19 ` Chao Peng
2022-09-09 15:35 ` Michael Roth
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221207081314.hfyavisybcraezrh@yy-desk-7060 \
--to=yuan.yao@linux.intel.com \
--cc=aarcange@redhat.com \
--cc=ak@linux.intel.com \
--cc=akpm@linux-foundation.org \
--cc=arnd@arndb.de \
--cc=bfields@fieldses.org \
--cc=bp@alien8.de \
--cc=chao.p.peng@linux.intel.com \
--cc=corbet@lwn.net \
--cc=dave.hansen@intel.com \
--cc=david@redhat.com \
--cc=ddutile@redhat.com \
--cc=dhildenb@redhat.com \
--cc=hpa@zytor.com \
--cc=hughd@google.com \
--cc=jlayton@kernel.org \
--cc=jmattson@google.com \
--cc=joro@8bytes.org \
--cc=jun.nakajima@intel.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=kvm@vger.kernel.org \
--cc=linmiaohe@huawei.com \
--cc=linux-api@vger.kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mail@maciej.szmigiero.name \
--cc=mhocko@suse.com \
--cc=michael.roth@amd.com \
--cc=mingo@redhat.com \
--cc=naoya.horiguchi@nec.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=qperret@google.com \
--cc=rppt@kernel.org \
--cc=seanjc@google.com \
--cc=shuah@kernel.org \
--cc=steven.price@arm.com \
--cc=tabba@google.com \
--cc=tglx@linutronix.de \
--cc=vannapurve@google.com \
--cc=vbabka@suse.cz \
--cc=vkuznets@redhat.com \
--cc=wanpengli@tencent.com \
--cc=wei.w.wang@intel.com \
--cc=x86@kernel.org \
--cc=yu.c.zhang@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).