kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: Sean Christopherson <seanjc@google.com>,
	Marc Zyngier <maz@kernel.org>,
	Huacai Chen <chenhuacai@kernel.org>,
	Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>,
	Paul Mackerras <paulus@ozlabs.org>
Cc: James Morse <james.morse@arm.com>,
	Julien Thierry <julien.thierry.kdev@gmail.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-mips@vger.kernel.org,
	kvm@vger.kernel.org, kvm-ppc@vger.kernel.org,
	linux-kernel@vger.kernel.org, Ben Gardon <bgardon@google.com>
Subject: Re: [PATCH 12/18] KVM: MIPS/MMU: Convert to the gfn-based MMU notifier callbacks
Date: Wed, 31 Mar 2021 09:41:34 +0200	[thread overview]
Message-ID: <26c87b3e-7a89-6cfa-1410-25486b114f32@redhat.com> (raw)
In-Reply-To: <20210326021957.1424875-13-seanjc@google.com>

On 26/03/21 03:19, Sean Christopherson wrote:
> Move MIPS to the gfn-based MMU notifier APIs, which do the hva->gfn
> lookup in common code, and whose code is nearly identical to MIPS'
> lookup.
> 
> No meaningful functional change intended, though the exact order of
> operations is slightly different since the memslot lookups occur before
> calling into arch code.
> 
> Signed-off-by: Sean Christopherson <seanjc@google.com>

I'll post a couple patches to enable more coalescing of the flushes, but 
this particular patch is okay.

Paolo

> ---
>   arch/mips/include/asm/kvm_host.h |  1 +
>   arch/mips/kvm/mmu.c              | 97 ++++++--------------------------
>   2 files changed, 17 insertions(+), 81 deletions(-)
> 
> diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
> index feaa77036b67..374a3c8806e8 100644
> --- a/arch/mips/include/asm/kvm_host.h
> +++ b/arch/mips/include/asm/kvm_host.h
> @@ -967,6 +967,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
>   						   bool write);
>   
>   #define KVM_ARCH_WANT_MMU_NOTIFIER
> +#define KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
>   
>   /* Emulation */
>   int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
> diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
> index 3dabeda82458..3dc885df2e32 100644
> --- a/arch/mips/kvm/mmu.c
> +++ b/arch/mips/kvm/mmu.c
> @@ -439,85 +439,36 @@ static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
>   				  end_gfn << PAGE_SHIFT);
>   }
>   
> -static int handle_hva_to_gpa(struct kvm *kvm,
> -			     unsigned long start,
> -			     unsigned long end,
> -			     int (*handler)(struct kvm *kvm, gfn_t gfn,
> -					    gpa_t gfn_end,
> -					    struct kvm_memory_slot *memslot,
> -					    void *data),
> -			     void *data)
> +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
>   {
> -	struct kvm_memslots *slots;
> -	struct kvm_memory_slot *memslot;
> -	int ret = 0;
> -
> -	slots = kvm_memslots(kvm);
> -
> -	/* we only care about the pages that the guest sees */
> -	kvm_for_each_memslot(memslot, slots) {
> -		unsigned long hva_start, hva_end;
> -		gfn_t gfn, gfn_end;
> -
> -		hva_start = max(start, memslot->userspace_addr);
> -		hva_end = min(end, memslot->userspace_addr +
> -					(memslot->npages << PAGE_SHIFT));
> -		if (hva_start >= hva_end)
> -			continue;
> -
> -		/*
> -		 * {gfn(page) | page intersects with [hva_start, hva_end)} =
> -		 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
> -		 */
> -		gfn = hva_to_gfn_memslot(hva_start, memslot);
> -		gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
> -
> -		ret |= handler(kvm, gfn, gfn_end, memslot, data);
> -	}
> -
> -	return ret;
> -}
> -
> -
> -static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
> -				 struct kvm_memory_slot *memslot, void *data)
> -{
> -	kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end);
> -	return 1;
> -}
> -
> -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
> -			unsigned flags)
> -{
> -	handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
> +	kvm_mips_flush_gpa_pt(kvm, range->start, range->end);
>   
>   	kvm_mips_callbacks->flush_shadow_all(kvm);
>   	return 0;
>   }
>   
> -static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
> -				struct kvm_memory_slot *memslot, void *data)
> +static bool __kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>   {
> -	gpa_t gpa = gfn << PAGE_SHIFT;
> -	pte_t hva_pte = *(pte_t *)data;
> +	gpa_t gpa = range->start << PAGE_SHIFT;
> +	pte_t hva_pte = range->pte;
>   	pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
>   	pte_t old_pte;
>   
>   	if (!gpa_pte)
> -		return 0;
> +		return false;
>   
>   	/* Mapping may need adjusting depending on memslot flags */
>   	old_pte = *gpa_pte;
> -	if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
> +	if (range->slot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
>   		hva_pte = pte_mkclean(hva_pte);
> -	else if (memslot->flags & KVM_MEM_READONLY)
> +	else if (range->slot->flags & KVM_MEM_READONLY)
>   		hva_pte = pte_wrprotect(hva_pte);
>   
>   	set_pte(gpa_pte, hva_pte);
>   
>   	/* Replacing an absent or old page doesn't need flushes */
>   	if (!pte_present(old_pte) || !pte_young(old_pte))
> -		return 0;
> +		return false;
>   
>   	/* Pages swapped, aged, moved, or cleaned require flushes */
>   	return !pte_present(hva_pte) ||
> @@ -526,27 +477,21 @@ static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
>   	       (pte_dirty(old_pte) && !pte_dirty(hva_pte));
>   }
>   
> -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
> +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>   {
> -	unsigned long end = hva + PAGE_SIZE;
> -	int ret;
> -
> -	ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
> -	if (ret)
> +	if (__kvm_set_spte_gfn(kvm, range))
>   		kvm_mips_callbacks->flush_shadow_all(kvm);
> -	return 0;
> +	return false;
>   }
>   
> -static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
> -			       struct kvm_memory_slot *memslot, void *data)
> +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>   {
> -	return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end);
> +	return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end);
>   }
>   
> -static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
> -				    struct kvm_memory_slot *memslot, void *data)
> +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
>   {
> -	gpa_t gpa = gfn << PAGE_SHIFT;
> +	gpa_t gpa = range->start << PAGE_SHIFT;
>   	pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
>   
>   	if (!gpa_pte)
> @@ -554,16 +499,6 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
>   	return pte_young(*gpa_pte);
>   }
>   
> -int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
> -{
> -	return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
> -}
> -
> -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
> -{
> -	return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
> -}
> -
>   /**
>    * _kvm_mips_map_page_fast() - Fast path GPA fault handler.
>    * @vcpu:		VCPU pointer.
> 


  reply	other threads:[~2021-03-31  7:43 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-26  2:19 [PATCH 00/18] KVM: Consolidate and optimize MMU notifiers Sean Christopherson
2021-03-26  2:19 ` [PATCH 01/18] KVM: x86/mmu: Coalesce TDP MMU TLB flushes when zapping collapsible SPTEs Sean Christopherson
2021-03-26  2:19 ` [PATCH 02/18] KVM: x86/mmu: Move flushing for "slot" handlers to caller for legacy MMU Sean Christopherson
2021-03-26  2:19 ` [PATCH 03/18] KVM: x86/mmu: Coalesce TLB flushes when zapping collapsible SPTEs Sean Christopherson
2021-03-26  2:19 ` [PATCH 04/18] KVM: x86/mmu: Coalesce TLB flushes across address spaces for gfn range zap Sean Christopherson
2021-03-26  2:19 ` [PATCH 05/18] KVM: x86/mmu: Pass address space ID to __kvm_tdp_mmu_zap_gfn_range() Sean Christopherson
2021-03-26  2:19 ` [PATCH 06/18] KVM: x86/mmu: Pass address space ID to TDP MMU root walkers Sean Christopherson
2021-03-26  2:19 ` [PATCH 07/18] KVM: x86/mmu: Use leaf-only loop for walking TDP SPTEs when changing SPTE Sean Christopherson
2021-03-26  2:19 ` [PATCH 08/18] KVM: Move prototypes for MMU notifier callbacks to generic code Sean Christopherson
2021-03-26  2:19 ` [PATCH 09/18] KVM: Move arm64's MMU notifier trace events " Sean Christopherson
2021-03-26  2:19 ` [PATCH 10/18] KVM: Move x86's MMU notifier memslot walkers " Sean Christopherson
2021-03-31  7:52   ` Paolo Bonzini
2021-03-31 16:20     ` Sean Christopherson
2021-03-31 16:36       ` Paolo Bonzini
2021-03-26  2:19 ` [PATCH 11/18] KVM: arm64: Convert to the gfn-based MMU notifier callbacks Sean Christopherson
2021-03-26  2:19 ` [PATCH 12/18] KVM: MIPS/MMU: " Sean Christopherson
2021-03-31  7:41   ` Paolo Bonzini [this message]
2021-03-26  2:19 ` [PATCH 13/18] KVM: PPC: " Sean Christopherson
2021-03-26  2:19 ` [PATCH 14/18] KVM: Kill off the old hva-based " Sean Christopherson
2021-03-26  2:19 ` [PATCH 15/18] KVM: Take mmu_lock when handling MMU notifier iff the hva hits a memslot Sean Christopherson
2021-03-26  2:19 ` [PATCH 16/18] KVM: Don't take mmu_lock for range invalidation unless necessary Sean Christopherson
2021-03-31  7:52   ` Paolo Bonzini
2021-03-31  8:35   ` Paolo Bonzini
2021-03-31 16:41     ` Sean Christopherson
2021-03-31 16:47       ` Paolo Bonzini
2021-03-31 19:47         ` Sean Christopherson
2021-03-31 20:42           ` Paolo Bonzini
2021-03-31 21:05             ` Sean Christopherson
2021-03-31 21:22               ` Sean Christopherson
2021-03-31 21:36                 ` Paolo Bonzini
2021-03-31 21:35               ` Paolo Bonzini
2021-03-31 21:47                 ` Sean Christopherson
2021-03-31 20:15     ` Sean Christopherson
2021-03-31 20:30       ` Paolo Bonzini
2021-03-31 20:52     ` Sean Christopherson
2021-03-31 21:00       ` Paolo Bonzini
2021-03-26  2:19 ` [PATCH 17/18] KVM: x86/mmu: Allow yielding during MMU notifier unmap/zap, if possible Sean Christopherson
2021-03-26  2:19 ` [PATCH 18/18] KVM: x86/mmu: Drop trace_kvm_age_page() tracepoint Sean Christopherson
2021-03-30 18:32 ` [PATCH 00/18] KVM: Consolidate and optimize MMU notifiers Ben Gardon
2021-03-30 19:48   ` Paolo Bonzini
2021-03-30 19:58   ` Sean Christopherson
2021-03-31  7:57 ` Paolo Bonzini
2021-03-31  9:34   ` Marc Zyngier
2021-03-31  9:41     ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=26c87b3e-7a89-6cfa-1410-25486b114f32@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=aleksandar.qemu.devel@gmail.com \
    --cc=bgardon@google.com \
    --cc=chenhuacai@kernel.org \
    --cc=james.morse@arm.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=julien.thierry.kdev@gmail.com \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=maz@kernel.org \
    --cc=paulus@ozlabs.org \
    --cc=seanjc@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).