All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yan Zhao <yan.y.zhao@intel.com>
To: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>,
	Marc Zyngier <maz@kernel.org>,
	Oliver Upton <oliver.upton@linux.dev>,
	Huacai Chen <chenhuacai@kernel.org>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Anup Patel <anup@brainfault.org>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Albert Ou <aou@eecs.berkeley.edu>,
	"Matthew Wilcox (Oracle)" <willy@infradead.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Paul Moore <paul@paul-moore.com>,
	James Morris <jmorris@namei.org>,
	"Serge E. Hallyn" <serge@hallyn.com>, <kvm@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>, <kvmarm@lists.linux.dev>,
	<linux-mips@vger.kernel.org>, <linuxppc-dev@lists.ozlabs.org>,
	<kvm-riscv@lists.infradead.org>,
	<linux-riscv@lists.infradead.org>,
	<linux-fsdevel@vger.kernel.org>, <linux-mm@kvack.org>,
	<linux-security-module@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>,
	Chao Peng <chao.p.peng@linux.intel.com>,
	Fuad Tabba <tabba@google.com>,
	Jarkko Sakkinen <jarkko@kernel.org>,
	Yu Zhang <yu.c.zhang@linux.intel.com>,
	Vishal Annapurve <vannapurve@google.com>,
	Ackerley Tng <ackerleytng@google.com>,
	Maciej Szmigiero <mail@maciej.szmigiero.name>,
	Vlastimil Babka <vbabka@suse.cz>,
	David Hildenbrand <david@redhat.com>,
	Quentin Perret <qperret@google.com>,
	Michael Roth <michael.roth@amd.com>, Wang <wei.w.wang@intel.com>,
	Liam Merwick <liam.merwick@oracle.com>,
	"Isaku Yamahata" <isaku.yamahata@gmail.com>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Subject: Re: [RFC PATCH v11 01/29] KVM: Wrap kvm_gfn_range.pte in a per-action union
Date: Fri, 21 Jul 2023 14:26:11 +0800	[thread overview]
Message-ID: <ZLolA2U83tP75Qdd@yzhao56-desk.sh.intel.com> (raw)
In-Reply-To: <20230718234512.1690985-2-seanjc@google.com>

On Tue, Jul 18, 2023 at 04:44:44PM -0700, Sean Christopherson wrote:

May I know why KVM now needs to register to callback .change_pte()?
As also commented in kvm_mmu_notifier_change_pte(), .change_pte() must be
surrounded by .invalidate_range_{start,end}().

While kvm_mmu_notifier_invalidate_range_start() has called kvm_unmap_gfn_range()
to zap all leaf SPTEs, and page fault path will not install new SPTEs
successfully before kvm_mmu_notifier_invalidate_range_end(),
kvm_set_spte_gfn() should not be able to find any shadow present leaf entries to
update PFN.

Or could we just delete completely
"kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);"
from kvm_mmu_notifier_change_pte() ?

> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 6db9ef288ec3..55f03a68f1cd 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1721,7 +1721,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
>  
>  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>  {
> -	kvm_pfn_t pfn = pte_pfn(range->pte);
> +	kvm_pfn_t pfn = pte_pfn(range->arg.pte);
>  
>  	if (!kvm->arch.mmu.pgt)
>  		return false;
> diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
> index e8c08988ed37..7b2ac1319d70 100644
> --- a/arch/mips/kvm/mmu.c
> +++ b/arch/mips/kvm/mmu.c
> @@ -447,7 +447,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
>  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>  {
>  	gpa_t gpa = range->start << PAGE_SHIFT;
> -	pte_t hva_pte = range->pte;
> +	pte_t hva_pte = range->arg.pte;
>  	pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
>  	pte_t old_pte;
>  
> diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
> index f2eb47925806..857f4312b0f8 100644
> --- a/arch/riscv/kvm/mmu.c
> +++ b/arch/riscv/kvm/mmu.c
> @@ -559,7 +559,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
>  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>  {
>  	int ret;
> -	kvm_pfn_t pfn = pte_pfn(range->pte);
> +	kvm_pfn_t pfn = pte_pfn(range->arg.pte);
>  
>  	if (!kvm->arch.pgd)
>  		return false;
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index ec169f5c7dce..d72f2b20f430 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1588,7 +1588,7 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
>  	for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
>  				 range->start, range->end - 1, &iterator)
>  		ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
> -			       iterator.level, range->pte);
> +			       iterator.level, range->arg.pte);
>  
>  	return ret;
>  }
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 512163d52194..6250bd3d20c1 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -1241,7 +1241,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
>  	u64 new_spte;
>  
>  	/* Huge pages aren't expected to be modified without first being zapped. */
> -	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
> +	WARN_ON(pte_huge(range->arg.pte) || range->start + 1 != range->end);
>  
>  	if (iter->level != PG_LEVEL_4K ||
>  	    !is_shadow_present_pte(iter->old_spte))
> @@ -1255,9 +1255,9 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
>  	 */
>  	tdp_mmu_iter_set_spte(kvm, iter, 0);
>  
> -	if (!pte_write(range->pte)) {
> +	if (!pte_write(range->arg.pte)) {
>  		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
> -								  pte_pfn(range->pte));
> +								  pte_pfn(range->arg.pte));
>  
>  		tdp_mmu_iter_set_spte(kvm, iter, new_spte);
>  	}
 

WARNING: multiple messages have this Message-ID (diff)
From: Yan Zhao <yan.y.zhao@intel.com>
To: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>,
	Marc Zyngier <maz@kernel.org>,
	Oliver Upton <oliver.upton@linux.dev>,
	Huacai Chen <chenhuacai@kernel.org>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Anup Patel <anup@brainfault.org>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Albert Ou <aou@eecs.berkeley.edu>,
	"Matthew Wilcox (Oracle)" <willy@infradead.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Paul Moore <paul@paul-moore.com>,
	James Morris <jmorris@namei.org>,
	"Serge E. Hallyn" <serge@hallyn.com>, <kvm@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>, <kvmarm@lists.linux.dev>,
	<linux-mips@vger.kernel.org>, <linuxppc-dev@lists.ozlabs.org>,
	<kvm-riscv@lists.infradead.org>,
	<linux-riscv@lists.infradead.org>,
	<linux-fsdevel@vger.kernel.org>, <linux-mm@kvack.org>,
	<linux-security-module@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>,
	Chao Peng <chao.p.peng@linux.intel.com>,
	Fuad Tabba <tabba@google.com>,
	Jarkko Sakkinen <jarkko@kernel.org>,
	Yu Zhang <yu.c.zhang@linux.intel.com>,
	Vishal Annapurve <vannapurve@google.com>,
	Ackerley Tng <ackerleytng@google.com>,
	Maciej Szmigiero <mail@maciej.szmigiero.name>,
	Vlastimil Babka <vbabka@suse.cz>,
	David Hildenbrand <david@redhat.com>,
	Quentin Perret <qperret@google.com>,
	Michael Roth <michael.roth@amd.com>, Wang <wei.w.wang@intel.com>,
	Liam Merwick <liam.merwick@oracle.com>,
	"Isaku Yamahata" <isaku.yamahata@gmail.com>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Subject: Re: [RFC PATCH v11 01/29] KVM: Wrap kvm_gfn_range.pte in a per-action union
Date: Fri, 21 Jul 2023 14:26:11 +0800	[thread overview]
Message-ID: <ZLolA2U83tP75Qdd@yzhao56-desk.sh.intel.com> (raw)
In-Reply-To: <20230718234512.1690985-2-seanjc@google.com>

On Tue, Jul 18, 2023 at 04:44:44PM -0700, Sean Christopherson wrote:

May I know why KVM now needs to register to callback .change_pte()?
As also commented in kvm_mmu_notifier_change_pte(), .change_pte() must be
surrounded by .invalidate_range_{start,end}().

While kvm_mmu_notifier_invalidate_range_start() has called kvm_unmap_gfn_range()
to zap all leaf SPTEs, and page fault path will not install new SPTEs
successfully before kvm_mmu_notifier_invalidate_range_end(),
kvm_set_spte_gfn() should not be able to find any shadow present leaf entries to
update PFN.

Or could we just delete completely
"kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);"
from kvm_mmu_notifier_change_pte() ?

> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 6db9ef288ec3..55f03a68f1cd 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1721,7 +1721,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
>  
>  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>  {
> -	kvm_pfn_t pfn = pte_pfn(range->pte);
> +	kvm_pfn_t pfn = pte_pfn(range->arg.pte);
>  
>  	if (!kvm->arch.mmu.pgt)
>  		return false;
> diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
> index e8c08988ed37..7b2ac1319d70 100644
> --- a/arch/mips/kvm/mmu.c
> +++ b/arch/mips/kvm/mmu.c
> @@ -447,7 +447,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
>  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>  {
>  	gpa_t gpa = range->start << PAGE_SHIFT;
> -	pte_t hva_pte = range->pte;
> +	pte_t hva_pte = range->arg.pte;
>  	pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
>  	pte_t old_pte;
>  
> diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
> index f2eb47925806..857f4312b0f8 100644
> --- a/arch/riscv/kvm/mmu.c
> +++ b/arch/riscv/kvm/mmu.c
> @@ -559,7 +559,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
>  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>  {
>  	int ret;
> -	kvm_pfn_t pfn = pte_pfn(range->pte);
> +	kvm_pfn_t pfn = pte_pfn(range->arg.pte);
>  
>  	if (!kvm->arch.pgd)
>  		return false;
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index ec169f5c7dce..d72f2b20f430 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1588,7 +1588,7 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
>  	for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
>  				 range->start, range->end - 1, &iterator)
>  		ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
> -			       iterator.level, range->pte);
> +			       iterator.level, range->arg.pte);
>  
>  	return ret;
>  }
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 512163d52194..6250bd3d20c1 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -1241,7 +1241,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
>  	u64 new_spte;
>  
>  	/* Huge pages aren't expected to be modified without first being zapped. */
> -	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
> +	WARN_ON(pte_huge(range->arg.pte) || range->start + 1 != range->end);
>  
>  	if (iter->level != PG_LEVEL_4K ||
>  	    !is_shadow_present_pte(iter->old_spte))
> @@ -1255,9 +1255,9 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
>  	 */
>  	tdp_mmu_iter_set_spte(kvm, iter, 0);
>  
> -	if (!pte_write(range->pte)) {
> +	if (!pte_write(range->arg.pte)) {
>  		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
> -								  pte_pfn(range->pte));
> +								  pte_pfn(range->arg.pte));
>  
>  		tdp_mmu_iter_set_spte(kvm, iter, new_spte);
>  	}
 

_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

WARNING: multiple messages have this Message-ID (diff)
From: Yan Zhao <yan.y.zhao@intel.com>
To: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>,
	Marc Zyngier <maz@kernel.org>,
	Oliver Upton <oliver.upton@linux.dev>,
	Huacai Chen <chenhuacai@kernel.org>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Anup Patel <anup@brainfault.org>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Albert Ou <aou@eecs.berkeley.edu>,
	"Matthew Wilcox (Oracle)" <willy@infradead.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Paul Moore <paul@paul-moore.com>,
	James Morris <jmorris@namei.org>,
	"Serge E. Hallyn" <serge@hallyn.com>, <kvm@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>, <kvmarm@lists.linux.dev>,
	<linux-mips@vger.kernel.org>, <linuxppc-dev@lists.ozlabs.org>,
	<kvm-riscv@lists.infradead.org>,
	<linux-riscv@lists.infradead.org>,
	<linux-fsdevel@vger.kernel.org>, <linux-mm@kvack.org>,
	<linux-security-module@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>,
	Chao Peng <chao.p.peng@linux.intel.com>,
	Fuad Tabba <tabba@google.com>,
	Jarkko Sakkinen <jarkko@kernel.org>,
	Yu Zhang <yu.c.zhang@linux.intel.com>,
	Vishal Annapurve <vannapurve@google.com>,
	Ackerley Tng <ackerleytng@google.com>,
	Maciej Szmigiero <mail@maciej.szmigiero.name>,
	Vlastimil Babka <vbabka@suse.cz>,
	David Hildenbrand <david@redhat.com>,
	Quentin Perret <qperret@google.com>,
	Michael Roth <michael.roth@amd.com>, Wang <wei.w.wang@intel.com>,
	Liam Merwick <liam.merwick@oracle.com>,
	"Isaku Yamahata" <isaku.yamahata@gmail.com>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Subject: Re: [RFC PATCH v11 01/29] KVM: Wrap kvm_gfn_range.pte in a per-action union
Date: Fri, 21 Jul 2023 14:26:11 +0800	[thread overview]
Message-ID: <ZLolA2U83tP75Qdd@yzhao56-desk.sh.intel.com> (raw)
In-Reply-To: <20230718234512.1690985-2-seanjc@google.com>

On Tue, Jul 18, 2023 at 04:44:44PM -0700, Sean Christopherson wrote:

May I know why KVM now needs to register to callback .change_pte()?
As also commented in kvm_mmu_notifier_change_pte(), .change_pte() must be
surrounded by .invalidate_range_{start,end}().

While kvm_mmu_notifier_invalidate_range_start() has called kvm_unmap_gfn_range()
to zap all leaf SPTEs, and page fault path will not install new SPTEs
successfully before kvm_mmu_notifier_invalidate_range_end(),
kvm_set_spte_gfn() should not be able to find any shadow present leaf entries to
update PFN.

Or could we just delete completely
"kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);"
from kvm_mmu_notifier_change_pte() ?

> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 6db9ef288ec3..55f03a68f1cd 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1721,7 +1721,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
>  
>  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>  {
> -	kvm_pfn_t pfn = pte_pfn(range->pte);
> +	kvm_pfn_t pfn = pte_pfn(range->arg.pte);
>  
>  	if (!kvm->arch.mmu.pgt)
>  		return false;
> diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
> index e8c08988ed37..7b2ac1319d70 100644
> --- a/arch/mips/kvm/mmu.c
> +++ b/arch/mips/kvm/mmu.c
> @@ -447,7 +447,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
>  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>  {
>  	gpa_t gpa = range->start << PAGE_SHIFT;
> -	pte_t hva_pte = range->pte;
> +	pte_t hva_pte = range->arg.pte;
>  	pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
>  	pte_t old_pte;
>  
> diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
> index f2eb47925806..857f4312b0f8 100644
> --- a/arch/riscv/kvm/mmu.c
> +++ b/arch/riscv/kvm/mmu.c
> @@ -559,7 +559,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
>  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>  {
>  	int ret;
> -	kvm_pfn_t pfn = pte_pfn(range->pte);
> +	kvm_pfn_t pfn = pte_pfn(range->arg.pte);
>  
>  	if (!kvm->arch.pgd)
>  		return false;
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index ec169f5c7dce..d72f2b20f430 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1588,7 +1588,7 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
>  	for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
>  				 range->start, range->end - 1, &iterator)
>  		ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
> -			       iterator.level, range->pte);
> +			       iterator.level, range->arg.pte);
>  
>  	return ret;
>  }
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 512163d52194..6250bd3d20c1 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -1241,7 +1241,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
>  	u64 new_spte;
>  
>  	/* Huge pages aren't expected to be modified without first being zapped. */
> -	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
> +	WARN_ON(pte_huge(range->arg.pte) || range->start + 1 != range->end);
>  
>  	if (iter->level != PG_LEVEL_4K ||
>  	    !is_shadow_present_pte(iter->old_spte))
> @@ -1255,9 +1255,9 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
>  	 */
>  	tdp_mmu_iter_set_spte(kvm, iter, 0);
>  
> -	if (!pte_write(range->pte)) {
> +	if (!pte_write(range->arg.pte)) {
>  		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
> -								  pte_pfn(range->pte));
> +								  pte_pfn(range->arg.pte));
>  
>  		tdp_mmu_iter_set_spte(kvm, iter, new_spte);
>  	}
 

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: Yan Zhao <yan.y.zhao@intel.com>
To: Sean Christopherson <seanjc@google.com>
Cc: kvm@vger.kernel.org, David Hildenbrand <david@redhat.com>,
	Yu Zhang <yu.c.zhang@linux.intel.com>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	Chao Peng <chao.p.peng@linux.intel.com>,
	linux-riscv@lists.infradead.org,
	Isaku Yamahata <isaku.yamahata@gmail.com>,
	Paul Moore <paul@paul-moore.com>, Marc Zyngier <maz@kernel.org>,
	Huacai Chen <chenhuacai@kernel.org>,
	James Morris <jmorris@namei.org>,
	"Matthew Wilcox \(Oracle\)" <willy@infradead.org>,
	Wang <wei.w.wang@intel.com>, Fuad Tabba <tabba@google.com>,
	Jarkko Sakkinen <jarkko@kernel.org>,
	"Serge E. Hallyn" <serge@hallyn.com>,
	Maciej Szmigiero <mail@maciej.szmigiero.name>,
	Albert Ou <aou@eecs.berkeley.edu>,
	Vlastimil Babka <vbabka@suse.cz>,
	Michael Roth <michael.roth@amd.com>,
	Ackerley Tng <ackerleytng@google.com>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	kvmarm@lists.linux.dev, linux-arm-kernel@lists.infradead.org,
	Quentin Perret <qperret@google.com>,
	Liam Merwick <liam.merwick@oracle.com>,
	linux-mips@vger.kernel.org, Oliver Upton <oliver.upton@linux.dev>,
	linux-security-module@vger.kernel.org,
	Palmer Dabbelt <palmer@dabbelt.com>,
	kvm-riscv@lists.infradead.org, Anup Patel <anup@brainfault.org>,
	linux-fsdevel@vger.kernel.org,
	Paolo Bonzini <pbonzini@re dhat.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Vishal Annapurve <vannapurve@google.com>,
	linuxppc-dev@lists.ozlabs.org,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Subject: Re: [RFC PATCH v11 01/29] KVM: Wrap kvm_gfn_range.pte in a per-action union
Date: Fri, 21 Jul 2023 14:26:11 +0800	[thread overview]
Message-ID: <ZLolA2U83tP75Qdd@yzhao56-desk.sh.intel.com> (raw)
In-Reply-To: <20230718234512.1690985-2-seanjc@google.com>

On Tue, Jul 18, 2023 at 04:44:44PM -0700, Sean Christopherson wrote:

May I know why KVM now needs to register to callback .change_pte()?
As also commented in kvm_mmu_notifier_change_pte(), .change_pte() must be
surrounded by .invalidate_range_{start,end}().

While kvm_mmu_notifier_invalidate_range_start() has called kvm_unmap_gfn_range()
to zap all leaf SPTEs, and page fault path will not install new SPTEs
successfully before kvm_mmu_notifier_invalidate_range_end(),
kvm_set_spte_gfn() should not be able to find any shadow present leaf entries to
update PFN.

Or could we just delete completely
"kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);"
from kvm_mmu_notifier_change_pte() ?

> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 6db9ef288ec3..55f03a68f1cd 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1721,7 +1721,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
>  
>  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>  {
> -	kvm_pfn_t pfn = pte_pfn(range->pte);
> +	kvm_pfn_t pfn = pte_pfn(range->arg.pte);
>  
>  	if (!kvm->arch.mmu.pgt)
>  		return false;
> diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
> index e8c08988ed37..7b2ac1319d70 100644
> --- a/arch/mips/kvm/mmu.c
> +++ b/arch/mips/kvm/mmu.c
> @@ -447,7 +447,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
>  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>  {
>  	gpa_t gpa = range->start << PAGE_SHIFT;
> -	pte_t hva_pte = range->pte;
> +	pte_t hva_pte = range->arg.pte;
>  	pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
>  	pte_t old_pte;
>  
> diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
> index f2eb47925806..857f4312b0f8 100644
> --- a/arch/riscv/kvm/mmu.c
> +++ b/arch/riscv/kvm/mmu.c
> @@ -559,7 +559,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
>  bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>  {
>  	int ret;
> -	kvm_pfn_t pfn = pte_pfn(range->pte);
> +	kvm_pfn_t pfn = pte_pfn(range->arg.pte);
>  
>  	if (!kvm->arch.pgd)
>  		return false;
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index ec169f5c7dce..d72f2b20f430 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1588,7 +1588,7 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
>  	for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
>  				 range->start, range->end - 1, &iterator)
>  		ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
> -			       iterator.level, range->pte);
> +			       iterator.level, range->arg.pte);
>  
>  	return ret;
>  }
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 512163d52194..6250bd3d20c1 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -1241,7 +1241,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
>  	u64 new_spte;
>  
>  	/* Huge pages aren't expected to be modified without first being zapped. */
> -	WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
> +	WARN_ON(pte_huge(range->arg.pte) || range->start + 1 != range->end);
>  
>  	if (iter->level != PG_LEVEL_4K ||
>  	    !is_shadow_present_pte(iter->old_spte))
> @@ -1255,9 +1255,9 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
>  	 */
>  	tdp_mmu_iter_set_spte(kvm, iter, 0);
>  
> -	if (!pte_write(range->pte)) {
> +	if (!pte_write(range->arg.pte)) {
>  		new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
> -								  pte_pfn(range->pte));
> +								  pte_pfn(range->arg.pte));
>  
>  		tdp_mmu_iter_set_spte(kvm, iter, new_spte);
>  	}
 

  parent reply	other threads:[~2023-07-21  6:53 UTC|newest]

Thread overview: 517+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-07-18 23:44 [RFC PATCH v11 00/29] KVM: guest_memfd() and per-page attributes Sean Christopherson
2023-07-18 23:44 ` Sean Christopherson
2023-07-18 23:44 ` Sean Christopherson
2023-07-18 23:44 ` Sean Christopherson
2023-07-18 23:44 ` [RFC PATCH v11 01/29] KVM: Wrap kvm_gfn_range.pte in a per-action union Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-19 13:39   ` Jarkko Sakkinen
2023-07-19 13:39     ` Jarkko Sakkinen
2023-07-19 13:39     ` Jarkko Sakkinen
2023-07-19 13:39     ` Jarkko Sakkinen
2023-07-19 15:39     ` Sean Christopherson
2023-07-19 15:39       ` Sean Christopherson
2023-07-19 15:39       ` Sean Christopherson
2023-07-19 15:39       ` Sean Christopherson
2023-07-19 16:55   ` Paolo Bonzini
2023-07-19 16:55     ` Paolo Bonzini
2023-07-19 16:55     ` Paolo Bonzini
2023-07-19 16:55     ` Paolo Bonzini
2023-07-26 20:22     ` Sean Christopherson
2023-07-26 20:22       ` Sean Christopherson
2023-07-26 20:22       ` Sean Christopherson
2023-07-21  6:26   ` Yan Zhao [this message]
2023-07-21  6:26     ` Yan Zhao
2023-07-21  6:26     ` Yan Zhao
2023-07-21  6:26     ` Yan Zhao
2023-07-21 10:45     ` Xu Yilun
2023-07-21 10:45       ` Xu Yilun
2023-07-21 10:45       ` Xu Yilun
2023-07-21 10:45       ` Xu Yilun
2023-07-25 18:05       ` Sean Christopherson
2023-07-25 18:05         ` Sean Christopherson
2023-07-25 18:05         ` Sean Christopherson
2023-07-18 23:44 ` [RFC PATCH v11 02/29] KVM: Tweak kvm_hva_range and hva_handler_t to allow reusing for gfn ranges Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-19 17:12   ` Paolo Bonzini
2023-07-19 17:12     ` Paolo Bonzini
2023-07-19 17:12     ` Paolo Bonzini
2023-07-19 17:12     ` Paolo Bonzini
2023-07-18 23:44 ` [RFC PATCH v11 03/29] KVM: Use gfn instead of hva for mmu_notifier_retry Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-19 17:12   ` Paolo Bonzini
2023-07-19 17:12     ` Paolo Bonzini
2023-07-19 17:12     ` Paolo Bonzini
2023-07-19 17:12     ` Paolo Bonzini
2023-07-18 23:44 ` [RFC PATCH v11 04/29] KVM: PPC: Drop dead code related to KVM_ARCH_WANT_MMU_NOTIFIER Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-19 17:34   ` Paolo Bonzini
2023-07-19 17:34     ` Paolo Bonzini
2023-07-19 17:34     ` Paolo Bonzini
2023-07-19 17:34     ` Paolo Bonzini
2023-07-18 23:44 ` [RFC PATCH v11 05/29] KVM: Convert KVM_ARCH_WANT_MMU_NOTIFIER to CONFIG_KVM_GENERIC_MMU_NOTIFIER Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-19  7:31   ` Yuan Yao
2023-07-19  7:31     ` Yuan Yao
2023-07-19  7:31     ` Yuan Yao
2023-07-19  7:31     ` Yuan Yao
2023-07-19 14:15     ` Sean Christopherson
2023-07-19 14:15       ` Sean Christopherson
2023-07-19 14:15       ` Sean Christopherson
2023-07-19 14:15       ` Sean Christopherson
2023-07-20  1:15       ` Yuan Yao
2023-07-20  1:15         ` Yuan Yao
2023-07-20  1:15         ` Yuan Yao
2023-07-20  1:15         ` Yuan Yao
2023-07-18 23:44 ` [RFC PATCH v11 06/29] KVM: Introduce KVM_SET_USER_MEMORY_REGION2 Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-21  9:03   ` Paolo Bonzini
2023-07-21  9:03     ` Paolo Bonzini
2023-07-21  9:03     ` Paolo Bonzini
2023-07-21  9:03     ` Paolo Bonzini
2023-07-28  9:25   ` Quentin Perret
2023-07-28  9:25     ` Quentin Perret
2023-07-28  9:25     ` Quentin Perret
2023-07-28  9:25     ` Quentin Perret
2023-07-29  0:03     ` Sean Christopherson
2023-07-29  0:03       ` Sean Christopherson
2023-07-29  0:03       ` Sean Christopherson
2023-07-29  0:03       ` Sean Christopherson
2023-07-31  9:30       ` Quentin Perret
2023-07-31  9:30         ` Quentin Perret
2023-07-31  9:30         ` Quentin Perret
2023-07-31  9:30         ` Quentin Perret
2023-07-31 15:58       ` Paolo Bonzini
2023-07-31 15:58         ` Paolo Bonzini
2023-07-31 15:58         ` Paolo Bonzini
2023-07-31 15:58         ` Paolo Bonzini
2023-07-18 23:44 ` [RFC PATCH v11 07/29] KVM: Add KVM_EXIT_MEMORY_FAULT exit Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-19  7:54   ` Yuan Yao
2023-07-19  7:54     ` Yuan Yao
2023-07-19  7:54     ` Yuan Yao
2023-07-19  7:54     ` Yuan Yao
2023-07-19 14:16     ` Sean Christopherson
2023-07-19 14:16       ` Sean Christopherson
2023-07-19 14:16       ` Sean Christopherson
2023-07-19 14:16       ` Sean Christopherson
2023-07-18 23:44 ` [RFC PATCH v11 08/29] KVM: Introduce per-page memory attributes Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-20  8:09   ` Yuan Yao
2023-07-20  8:09     ` Yuan Yao
2023-07-20  8:09     ` Yuan Yao
2023-07-20  8:09     ` Yuan Yao
2023-07-20 19:02     ` Isaku Yamahata
2023-07-20 19:02       ` Isaku Yamahata
2023-07-20 19:02       ` Isaku Yamahata
2023-07-20 19:02       ` Isaku Yamahata
2023-07-20 20:20       ` Sean Christopherson
2023-07-20 20:20         ` Sean Christopherson
2023-07-20 20:20         ` Sean Christopherson
2023-07-20 20:20         ` Sean Christopherson
2023-07-21 10:57   ` Paolo Bonzini
2023-07-21 10:57     ` Paolo Bonzini
2023-07-21 10:57     ` Paolo Bonzini
2023-07-21 10:57     ` Paolo Bonzini
2023-07-21 15:56   ` Xiaoyao Li
2023-07-21 15:56     ` Xiaoyao Li
2023-07-21 15:56     ` Xiaoyao Li
2023-07-21 15:56     ` Xiaoyao Li
2023-07-24  4:43   ` Xu Yilun
2023-07-24  4:43     ` Xu Yilun
2023-07-24  4:43     ` Xu Yilun
2023-07-26 15:59     ` Sean Christopherson
2023-07-26 15:59       ` Sean Christopherson
2023-07-26 15:59       ` Sean Christopherson
2023-07-27  3:24       ` Xu Yilun
2023-07-27  3:24         ` Xu Yilun
2023-07-27  3:24         ` Xu Yilun
2023-07-27  3:24         ` Xu Yilun
2023-08-02 20:31   ` Isaku Yamahata
2023-08-02 20:31     ` Isaku Yamahata
2023-08-02 20:31     ` Isaku Yamahata
2023-08-02 20:31     ` Isaku Yamahata
2023-08-14  0:44   ` Binbin Wu
2023-08-14  0:44     ` Binbin Wu
2023-08-14  0:44     ` Binbin Wu
2023-08-14  0:44     ` Binbin Wu
2023-08-14 21:54     ` Sean Christopherson
2023-08-14 21:54       ` Sean Christopherson
2023-08-14 21:54       ` Sean Christopherson
2023-08-14 21:54       ` Sean Christopherson
2023-07-18 23:44 ` [RFC PATCH v11 09/29] KVM: x86: Disallow hugepages when memory attributes are mixed Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-21 11:59   ` Paolo Bonzini
2023-07-21 11:59     ` Paolo Bonzini
2023-07-21 11:59     ` Paolo Bonzini
2023-07-21 11:59     ` Paolo Bonzini
2023-07-21 17:41     ` Sean Christopherson
2023-07-21 17:41       ` Sean Christopherson
2023-07-21 17:41       ` Sean Christopherson
2023-07-21 17:41       ` Sean Christopherson
2023-07-18 23:44 ` [RFC PATCH v11 10/29] mm: Add AS_UNMOVABLE to mark mapping as completely unmovable Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-25 10:24   ` Kirill A . Shutemov
2023-07-25 10:24     ` Kirill A . Shutemov
2023-07-25 10:24     ` Kirill A . Shutemov
2023-07-25 12:51     ` Matthew Wilcox
2023-07-25 12:51       ` Matthew Wilcox
2023-07-25 12:51       ` Matthew Wilcox
2023-07-26 11:36       ` Kirill A . Shutemov
2023-07-26 11:36         ` Kirill A . Shutemov
2023-07-26 11:36         ` Kirill A . Shutemov
2023-07-28 16:02       ` Vlastimil Babka
2023-07-28 16:02         ` Vlastimil Babka
2023-07-28 16:02         ` Vlastimil Babka
2023-07-28 16:02         ` Vlastimil Babka
2023-07-28 16:13         ` Paolo Bonzini
2023-07-28 16:13           ` Paolo Bonzini
2023-07-28 16:13           ` Paolo Bonzini
2023-07-28 16:13           ` Paolo Bonzini
2023-09-01  8:23       ` Vlastimil Babka
2023-09-01  8:23         ` Vlastimil Babka
2023-09-01  8:23         ` Vlastimil Babka
2023-09-01  8:23         ` Vlastimil Babka
2023-07-18 23:44 ` [RFC PATCH v11 11/29] security: Export security_inode_init_security_anon() for use by KVM Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-19  2:14   ` Paul Moore
2023-07-19  2:14     ` Paul Moore
2023-07-19  2:14     ` Paul Moore
2023-07-19  2:14     ` Paul Moore
2023-07-31 10:46   ` Vlastimil Babka
2023-07-31 10:46     ` Vlastimil Babka
2023-07-31 10:46     ` Vlastimil Babka
2023-07-31 10:46     ` Vlastimil Babka
2023-07-18 23:44 ` [RFC PATCH v11 12/29] KVM: Add KVM_CREATE_GUEST_MEMFD ioctl() for guest-specific backing memory Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-19 17:21   ` Vishal Annapurve
2023-07-19 17:21     ` Vishal Annapurve
2023-07-19 17:21     ` Vishal Annapurve
2023-07-19 17:21     ` Vishal Annapurve
2023-07-19 17:47     ` Sean Christopherson
2023-07-19 17:47       ` Sean Christopherson
2023-07-19 17:47       ` Sean Christopherson
2023-07-19 17:47       ` Sean Christopherson
2023-07-20 14:45   ` Xiaoyao Li
2023-07-20 14:45     ` Xiaoyao Li
2023-07-20 14:45     ` Xiaoyao Li
2023-07-20 14:45     ` Xiaoyao Li
2023-07-20 15:14     ` Sean Christopherson
2023-07-20 15:14       ` Sean Christopherson
2023-07-20 15:14       ` Sean Christopherson
2023-07-20 15:14       ` Sean Christopherson
2023-07-20 21:28   ` Isaku Yamahata
2023-07-20 21:28     ` Isaku Yamahata
2023-07-20 21:28     ` Isaku Yamahata
2023-07-20 21:28     ` Isaku Yamahata
2023-07-21  6:13   ` Yuan Yao
2023-07-21  6:13     ` Yuan Yao
2023-07-21  6:13     ` Yuan Yao
2023-07-21  6:13     ` Yuan Yao
2023-07-21 22:27     ` Isaku Yamahata
2023-07-21 22:27       ` Isaku Yamahata
2023-07-21 22:27       ` Isaku Yamahata
2023-07-21 22:27       ` Isaku Yamahata
2023-07-21 22:33       ` Sean Christopherson
2023-07-21 22:33         ` Sean Christopherson
2023-07-21 22:33         ` Sean Christopherson
2023-07-21 22:33         ` Sean Christopherson
2023-07-21 15:05   ` Xiaoyao Li
2023-07-21 15:05     ` Xiaoyao Li
2023-07-21 15:05     ` Xiaoyao Li
2023-07-21 15:05     ` Xiaoyao Li
2023-07-21 15:42     ` Xiaoyao Li
2023-07-21 15:42       ` Xiaoyao Li
2023-07-21 15:42       ` Xiaoyao Li
2023-07-21 15:42       ` Xiaoyao Li
2023-07-21 17:42       ` Sean Christopherson
2023-07-21 17:42         ` Sean Christopherson
2023-07-21 17:42         ` Sean Christopherson
2023-07-21 17:42         ` Sean Christopherson
2023-07-21 17:17   ` Paolo Bonzini
2023-07-21 17:17     ` Paolo Bonzini
2023-07-21 17:17     ` Paolo Bonzini
2023-07-21 17:17     ` Paolo Bonzini
2023-07-21 17:50     ` Sean Christopherson
2023-07-21 17:50       ` Sean Christopherson
2023-07-21 17:50       ` Sean Christopherson
2023-07-21 17:50       ` Sean Christopherson
2023-07-25 15:09   ` Wang, Wei W
2023-07-25 15:09     ` Wang, Wei W
2023-07-25 15:09     ` Wang, Wei W
2023-07-25 16:03     ` Sean Christopherson
2023-07-25 16:03       ` Sean Christopherson
2023-07-25 16:03       ` Sean Christopherson
2023-07-26  1:51       ` Wang, Wei W
2023-07-26  1:51         ` Wang, Wei W
2023-07-26  1:51         ` Wang, Wei W
2023-07-31 16:23       ` Fuad Tabba
2023-07-31 16:23         ` Fuad Tabba
2023-07-31 16:23         ` Fuad Tabba
2023-07-31 16:23         ` Fuad Tabba
2023-07-26 17:18   ` Elliot Berman
2023-07-26 17:18     ` Elliot Berman
2023-07-26 17:18     ` Elliot Berman
2023-07-26 19:28     ` Sean Christopherson
2023-07-26 19:28       ` Sean Christopherson
2023-07-26 19:28       ` Sean Christopherson
2023-07-27 10:39   ` Fuad Tabba
2023-07-27 10:39     ` Fuad Tabba
2023-07-27 10:39     ` Fuad Tabba
2023-07-27 10:39     ` Fuad Tabba
2023-07-27 17:13     ` Sean Christopherson
2023-07-27 17:13       ` Sean Christopherson
2023-07-27 17:13       ` Sean Christopherson
2023-07-27 17:13       ` Sean Christopherson
2023-07-31 13:46       ` Fuad Tabba
2023-07-31 13:46         ` Fuad Tabba
2023-07-31 13:46         ` Fuad Tabba
2023-07-31 13:46         ` Fuad Tabba
2023-08-03 19:15   ` Ryan Afranji
2023-08-03 19:15     ` Ryan Afranji
2023-08-03 19:15     ` Ryan Afranji
2023-08-03 19:15     ` Ryan Afranji
2023-08-07 23:06   ` Ackerley Tng
2023-08-07 23:06     ` Ackerley Tng
2023-08-07 23:06     ` Ackerley Tng
2023-08-07 23:06     ` Ackerley Tng
2023-08-08 21:13     ` Sean Christopherson
2023-08-08 21:13       ` Sean Christopherson
2023-08-08 21:13       ` Sean Christopherson
2023-08-08 21:13       ` Sean Christopherson
2023-08-10 23:57       ` Vishal Annapurve
2023-08-10 23:57         ` Vishal Annapurve
2023-08-10 23:57         ` Vishal Annapurve
2023-08-10 23:57         ` Vishal Annapurve
2023-08-11 17:44         ` Sean Christopherson
2023-08-11 17:44           ` Sean Christopherson
2023-08-11 17:44           ` Sean Christopherson
2023-08-11 17:44           ` Sean Christopherson
2023-08-15 18:43       ` Ackerley Tng
2023-08-15 18:43         ` Ackerley Tng
2023-08-15 18:43         ` Ackerley Tng
2023-08-15 18:43         ` Ackerley Tng
2023-08-15 20:03         ` Sean Christopherson
2023-08-15 20:03           ` Sean Christopherson
2023-08-15 20:03           ` Sean Christopherson
2023-08-15 20:03           ` Sean Christopherson
2023-08-21 17:30           ` Ackerley Tng
2023-08-21 17:30             ` Ackerley Tng
2023-08-21 17:30             ` Ackerley Tng
2023-08-21 17:30             ` Ackerley Tng
2023-08-21 19:33             ` Sean Christopherson
2023-08-21 19:33               ` Sean Christopherson
2023-08-21 19:33               ` Sean Christopherson
2023-08-21 19:33               ` Sean Christopherson
2023-08-28 22:56               ` Ackerley Tng
2023-08-28 22:56                 ` Ackerley Tng
2023-08-28 22:56                 ` Ackerley Tng
2023-08-28 22:56                 ` Ackerley Tng
2023-08-29  2:53                 ` Elliot Berman
2023-08-29  2:53                   ` Elliot Berman
2023-08-29  2:53                   ` Elliot Berman
2023-08-29  2:53                   ` Elliot Berman
2023-09-14 19:12                   ` Sean Christopherson
2023-09-14 19:12                     ` Sean Christopherson
2023-09-14 19:12                     ` Sean Christopherson
2023-09-14 19:12                     ` Sean Christopherson
2023-09-14 18:15                 ` Sean Christopherson
2023-09-14 18:15                   ` Sean Christopherson
2023-09-14 18:15                   ` Sean Christopherson
2023-09-14 18:15                   ` Sean Christopherson
2023-09-14 23:19                   ` Ackerley Tng
2023-09-14 23:19                     ` Ackerley Tng
2023-09-14 23:19                     ` Ackerley Tng
2023-09-14 23:19                     ` Ackerley Tng
2023-09-15  0:33                     ` Sean Christopherson
2023-09-15  0:33                       ` Sean Christopherson
2023-09-15  0:33                       ` Sean Christopherson
2023-09-15  0:33                       ` Sean Christopherson
2023-08-30 15:12   ` Binbin Wu
2023-08-30 15:12     ` Binbin Wu
2023-08-30 15:12     ` Binbin Wu
2023-08-30 15:12     ` Binbin Wu
2023-08-30 16:44     ` Ackerley Tng
2023-08-30 16:44       ` Ackerley Tng
2023-08-30 16:44       ` Ackerley Tng
2023-08-30 16:44       ` Ackerley Tng
2023-09-01  3:45       ` Binbin Wu
2023-09-01  3:45         ` Binbin Wu
2023-09-01  3:45         ` Binbin Wu
2023-09-01  3:45         ` Binbin Wu
2023-09-01 16:46         ` Ackerley Tng
2023-09-01 16:46           ` Ackerley Tng
2023-09-01 16:46           ` Ackerley Tng
2023-09-01 16:46           ` Ackerley Tng
2023-07-18 23:44 ` [RFC PATCH v11 13/29] KVM: Add transparent hugepage support for dedicated guest memory Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-21 15:07   ` Paolo Bonzini
2023-07-21 15:07     ` Paolo Bonzini
2023-07-21 15:07     ` Paolo Bonzini
2023-07-21 15:07     ` Paolo Bonzini
2023-07-21 17:13     ` Sean Christopherson
2023-07-21 17:13       ` Sean Christopherson
2023-07-21 17:13       ` Sean Christopherson
2023-07-21 17:13       ` Sean Christopherson
2023-09-06 22:10       ` Paolo Bonzini
2023-09-06 22:10         ` Paolo Bonzini
2023-09-06 22:10         ` Paolo Bonzini
2023-09-06 22:10         ` Paolo Bonzini
2023-07-18 23:44 ` [RFC PATCH v11 14/29] KVM: x86/mmu: Handle page fault for private memory Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-21 15:09   ` Paolo Bonzini
2023-07-21 15:09     ` Paolo Bonzini
2023-07-21 15:09     ` Paolo Bonzini
2023-07-21 15:09     ` Paolo Bonzini
2023-07-18 23:44 ` [RFC PATCH v11 15/29] KVM: Drop superfluous __KVM_VCPU_MULTIPLE_ADDRESS_SPACE macro Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-21 15:07   ` Paolo Bonzini
2023-07-21 15:07     ` Paolo Bonzini
2023-07-21 15:07     ` Paolo Bonzini
2023-07-21 15:07     ` Paolo Bonzini
2023-07-18 23:44 ` [RFC PATCH v11 16/29] KVM: Allow arch code to track number of memslot address spaces per VM Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-18 23:44   ` Sean Christopherson
2023-07-21 15:12   ` Paolo Bonzini
2023-07-21 15:12     ` Paolo Bonzini
2023-07-21 15:12     ` Paolo Bonzini
2023-07-21 15:12     ` Paolo Bonzini
2023-07-18 23:45 ` [RFC PATCH v11 17/29] KVM: x86: Add support for "protected VMs" that can utilize private memory Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45 ` [RFC PATCH v11 18/29] KVM: selftests: Drop unused kvm_userspace_memory_region_find() helper Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-21 15:14   ` Paolo Bonzini
2023-07-21 15:14     ` Paolo Bonzini
2023-07-21 15:14     ` Paolo Bonzini
2023-07-21 15:14     ` Paolo Bonzini
2023-07-18 23:45 ` [RFC PATCH v11 19/29] KVM: selftests: Convert lib's mem regions to KVM_SET_USER_MEMORY_REGION2 Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45 ` [RFC PATCH v11 20/29] KVM: selftests: Add support for creating private memslots Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45 ` [RFC PATCH v11 21/29] KVM: selftests: Add helpers to convert guest memory b/w private and shared Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45 ` [RFC PATCH v11 22/29] KVM: selftests: Add helpers to do KVM_HC_MAP_GPA_RANGE hypercalls (x86) Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45 ` [RFC PATCH v11 23/29] KVM: selftests: Introduce VM "shape" to allow tests to specify the VM type Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45 ` [RFC PATCH v11 24/29] KVM: selftests: Add GUEST_SYNC[1-6] macros for synchronizing more data Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45 ` [RFC PATCH v11 25/29] KVM: selftests: Add x86-only selftest for private memory conversions Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45 ` [RFC PATCH v11 26/29] KVM: selftests: Add KVM_SET_USER_MEMORY_REGION2 helper Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45 ` [RFC PATCH v11 27/29] KVM: selftests: Expand set_memory_region_test to validate guest_memfd() Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-08-07 23:17   ` Ackerley Tng
2023-08-07 23:17     ` Ackerley Tng
2023-08-07 23:17     ` Ackerley Tng
2023-08-07 23:17     ` Ackerley Tng
2023-07-18 23:45 ` [RFC PATCH v11 28/29] KVM: selftests: Add basic selftest for guest_memfd() Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-08-07 23:20   ` Ackerley Tng
2023-08-07 23:20     ` Ackerley Tng
2023-08-07 23:20     ` Ackerley Tng
2023-08-07 23:20     ` Ackerley Tng
2023-08-18 23:03     ` Sean Christopherson
2023-08-18 23:03       ` Sean Christopherson
2023-08-18 23:03       ` Sean Christopherson
2023-08-18 23:03       ` Sean Christopherson
2023-08-07 23:25   ` Ackerley Tng
2023-08-07 23:25     ` Ackerley Tng
2023-08-07 23:25     ` Ackerley Tng
2023-08-07 23:25     ` Ackerley Tng
2023-08-18 23:01     ` Sean Christopherson
2023-08-18 23:01       ` Sean Christopherson
2023-08-18 23:01       ` Sean Christopherson
2023-08-18 23:01       ` Sean Christopherson
2023-08-21 19:49       ` Ackerley Tng
2023-08-21 19:49         ` Ackerley Tng
2023-08-21 19:49         ` Ackerley Tng
2023-08-21 19:49         ` Ackerley Tng
2023-07-18 23:45 ` [RFC PATCH v11 29/29] KVM: selftests: Test KVM exit behavior for private memory/access Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-18 23:45   ` Sean Christopherson
2023-07-24  6:38 ` [RFC PATCH v11 00/29] KVM: guest_memfd() and per-page attributes Nikunj A. Dadhania
2023-07-24  6:38   ` Nikunj A. Dadhania
2023-07-24  6:38   ` Nikunj A. Dadhania
2023-07-24 17:00   ` Sean Christopherson
2023-07-24 17:00     ` Sean Christopherson
2023-07-24 17:00     ` Sean Christopherson
2023-07-26 11:20     ` Nikunj A. Dadhania
2023-07-26 11:20       ` Nikunj A. Dadhania
2023-07-26 11:20       ` Nikunj A. Dadhania
2023-07-26 14:24       ` Sean Christopherson
2023-07-26 14:24         ` Sean Christopherson
2023-07-26 14:24         ` Sean Christopherson
2023-07-27  6:42         ` Nikunj A. Dadhania
2023-07-27  6:42           ` Nikunj A. Dadhania
2023-07-27  6:42           ` Nikunj A. Dadhania
2023-07-27  6:42           ` Nikunj A. Dadhania
2023-08-03 11:03       ` Vlastimil Babka
2023-08-03 11:03         ` Vlastimil Babka
2023-08-03 11:03         ` Vlastimil Babka
2023-08-03 11:03         ` Vlastimil Babka
2023-07-24 20:16 ` Sean Christopherson
2023-08-25 17:47 ` Sean Christopherson
2023-08-29  9:12   ` Chao Peng
2023-08-31 18:29     ` Sean Christopherson
2023-09-01  1:17       ` Chao Peng
2023-09-01  8:26         ` Vlastimil Babka
2023-09-01  9:10         ` Paolo Bonzini
2023-08-30  0:00   ` Isaku Yamahata
2023-09-09  0:16   ` Sean Christopherson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZLolA2U83tP75Qdd@yzhao56-desk.sh.intel.com \
    --to=yan.y.zhao@intel.com \
    --cc=ackerleytng@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=anup@brainfault.org \
    --cc=aou@eecs.berkeley.edu \
    --cc=chao.p.peng@linux.intel.com \
    --cc=chenhuacai@kernel.org \
    --cc=david@redhat.com \
    --cc=isaku.yamahata@gmail.com \
    --cc=jarkko@kernel.org \
    --cc=jmorris@namei.org \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=kvm-riscv@lists.infradead.org \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.linux.dev \
    --cc=liam.merwick@oracle.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=linux-security-module@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mail@maciej.szmigiero.name \
    --cc=maz@kernel.org \
    --cc=michael.roth@amd.com \
    --cc=mpe@ellerman.id.au \
    --cc=oliver.upton@linux.dev \
    --cc=palmer@dabbelt.com \
    --cc=paul.walmsley@sifive.com \
    --cc=paul@paul-moore.com \
    --cc=pbonzini@redhat.com \
    --cc=qperret@google.com \
    --cc=seanjc@google.com \
    --cc=serge@hallyn.com \
    --cc=tabba@google.com \
    --cc=vannapurve@google.com \
    --cc=vbabka@suse.cz \
    --cc=wei.w.wang@intel.com \
    --cc=willy@infradead.org \
    --cc=yu.c.zhang@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.