linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] KVM: async_pf: change kvm_setup_async_pf()/kvm_arch_setup_async_pf() return type to bool
@ 2020-06-15 12:13 Vitaly Kuznetsov
  2020-06-15 16:02 ` Vivek Goyal
  0 siblings, 1 reply; 2+ messages in thread
From: Vitaly Kuznetsov @ 2020-06-15 12:13 UTC (permalink / raw)
  To: kvm, Paolo Bonzini
  Cc: Sean Christopherson, Wanpeng Li, Jim Mattson, Vivek Goyal, linux-kernel

Unlike normal 'int' functions returning '0' on success, kvm_setup_async_pf()/
kvm_arch_setup_async_pf() return '1' when a job to handle page fault
asynchronously was scheduled and '0' otherwise. To avoid the confusion
change return type to 'bool'.

No functional change intended.

Suggested-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
 arch/s390/kvm/kvm-s390.c | 20 +++++++++-----------
 arch/x86/kvm/mmu/mmu.c   |  4 ++--
 include/linux/kvm_host.h |  4 ++--
 virt/kvm/async_pf.c      | 16 ++++++++++------
 4 files changed, 23 insertions(+), 21 deletions(-)

diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d47c19718615..7fd4fdb165fc 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -3954,33 +3954,31 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
 	return true;
 }
 
-static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
+static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
 {
 	hva_t hva;
 	struct kvm_arch_async_pf arch;
-	int rc;
 
 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
-		return 0;
+		return false;
 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
 	    vcpu->arch.pfault_compare)
-		return 0;
+		return false;
 	if (psw_extint_disabled(vcpu))
-		return 0;
+		return false;
 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
-		return 0;
+		return false;
 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
-		return 0;
+		return false;
 	if (!vcpu->arch.gmap->pfault_enabled)
-		return 0;
+		return false;
 
 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
 	hva += current->thread.gmap_addr & ~PAGE_MASK;
 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
-		return 0;
+		return false;
 
-	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
-	return rc;
+	return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
 }
 
 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 979a7e1c263d..3dd0af7e7515 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4045,8 +4045,8 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
 	walk_shadow_page_lockless_end(vcpu);
 }
 
-static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
-				   gfn_t gfn)
+static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+				    gfn_t gfn)
 {
 	struct kvm_arch_async_pf arch;
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 62ec926c78a0..9edc6fc71a89 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -211,8 +211,8 @@ struct kvm_async_pf {
 
 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
-		       unsigned long hva, struct kvm_arch_async_pf *arch);
+bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+			unsigned long hva, struct kvm_arch_async_pf *arch);
 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 #endif
 
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 45799606bb3e..390f758d5a27 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -156,17 +156,21 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
 	}
 }
 
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
-		       unsigned long hva, struct kvm_arch_async_pf *arch)
+/*
+ * Try to schedule a job to handle page fault asynchronously. Returns 'true' on
+ * success, 'false' on failure (page fault has to be handled synchronously).
+ */
+bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+			unsigned long hva, struct kvm_arch_async_pf *arch)
 {
 	struct kvm_async_pf *work;
 
 	if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
-		return 0;
+		return false;
 
 	/* Arch specific code should not do async PF in this case */
 	if (unlikely(kvm_is_error_hva(hva)))
-		return 0;
+		return false;
 
 	/*
 	 * do alloc nowait since if we are going to sleep anyway we
@@ -174,7 +178,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 	 */
 	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
 	if (!work)
-		return 0;
+		return false;
 
 	work->wakeup_all = false;
 	work->vcpu = vcpu;
@@ -193,7 +197,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 
 	schedule_work(&work->work);
 
-	return 1;
+	return true;
 }
 
 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
-- 
2.25.4


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] KVM: async_pf: change kvm_setup_async_pf()/kvm_arch_setup_async_pf() return type to bool
  2020-06-15 12:13 [PATCH] KVM: async_pf: change kvm_setup_async_pf()/kvm_arch_setup_async_pf() return type to bool Vitaly Kuznetsov
@ 2020-06-15 16:02 ` Vivek Goyal
  0 siblings, 0 replies; 2+ messages in thread
From: Vivek Goyal @ 2020-06-15 16:02 UTC (permalink / raw)
  To: Vitaly Kuznetsov
  Cc: kvm, Paolo Bonzini, Sean Christopherson, Wanpeng Li, Jim Mattson,
	linux-kernel

On Mon, Jun 15, 2020 at 02:13:34PM +0200, Vitaly Kuznetsov wrote:
> Unlike normal 'int' functions returning '0' on success, kvm_setup_async_pf()/
> kvm_arch_setup_async_pf() return '1' when a job to handle page fault
> asynchronously was scheduled and '0' otherwise. To avoid the confusion
> change return type to 'bool'.
> 
> No functional change intended.
> 
> Suggested-by: Sean Christopherson <sean.j.christopherson@intel.com>
> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>

Looks good to me. Nobody checks the error from kvm_arch_setup_async_pf().
Callers only seem to care about success or failure at this point of time,
and they fall back to synchorounous page fault upon failure. This 
could be changed back once somebody needs specific error code.

Acked-by: Vivek Goyal <vgoyal@redhat.com>

Vivek

> ---
>  arch/s390/kvm/kvm-s390.c | 20 +++++++++-----------
>  arch/x86/kvm/mmu/mmu.c   |  4 ++--
>  include/linux/kvm_host.h |  4 ++--
>  virt/kvm/async_pf.c      | 16 ++++++++++------
>  4 files changed, 23 insertions(+), 21 deletions(-)
> 
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index d47c19718615..7fd4fdb165fc 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -3954,33 +3954,31 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
>  	return true;
>  }
>  
> -static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
> +static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
>  {
>  	hva_t hva;
>  	struct kvm_arch_async_pf arch;
> -	int rc;
>  
>  	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
> -		return 0;
> +		return false;
>  	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
>  	    vcpu->arch.pfault_compare)
> -		return 0;
> +		return false;
>  	if (psw_extint_disabled(vcpu))
> -		return 0;
> +		return false;
>  	if (kvm_s390_vcpu_has_irq(vcpu, 0))
> -		return 0;
> +		return false;
>  	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
> -		return 0;
> +		return false;
>  	if (!vcpu->arch.gmap->pfault_enabled)
> -		return 0;
> +		return false;
>  
>  	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
>  	hva += current->thread.gmap_addr & ~PAGE_MASK;
>  	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
> -		return 0;
> +		return false;
>  
> -	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
> -	return rc;
> +	return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
>  }
>  
>  static int vcpu_pre_run(struct kvm_vcpu *vcpu)
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 979a7e1c263d..3dd0af7e7515 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -4045,8 +4045,8 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
>  	walk_shadow_page_lockless_end(vcpu);
>  }
>  
> -static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> -				   gfn_t gfn)
> +static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> +				    gfn_t gfn)
>  {
>  	struct kvm_arch_async_pf arch;
>  
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 62ec926c78a0..9edc6fc71a89 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -211,8 +211,8 @@ struct kvm_async_pf {
>  
>  void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
>  void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
> -int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> -		       unsigned long hva, struct kvm_arch_async_pf *arch);
> +bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> +			unsigned long hva, struct kvm_arch_async_pf *arch);
>  int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
>  #endif
>  
> diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
> index 45799606bb3e..390f758d5a27 100644
> --- a/virt/kvm/async_pf.c
> +++ b/virt/kvm/async_pf.c
> @@ -156,17 +156,21 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
>  	}
>  }
>  
> -int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> -		       unsigned long hva, struct kvm_arch_async_pf *arch)
> +/*
> + * Try to schedule a job to handle page fault asynchronously. Returns 'true' on
> + * success, 'false' on failure (page fault has to be handled synchronously).
> + */
> +bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
> +			unsigned long hva, struct kvm_arch_async_pf *arch)
>  {
>  	struct kvm_async_pf *work;
>  
>  	if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
> -		return 0;
> +		return false;
>  
>  	/* Arch specific code should not do async PF in this case */
>  	if (unlikely(kvm_is_error_hva(hva)))
> -		return 0;
> +		return false;
>  
>  	/*
>  	 * do alloc nowait since if we are going to sleep anyway we
> @@ -174,7 +178,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
>  	 */
>  	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
>  	if (!work)
> -		return 0;
> +		return false;
>  
>  	work->wakeup_all = false;
>  	work->vcpu = vcpu;
> @@ -193,7 +197,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
>  
>  	schedule_work(&work->work);
>  
> -	return 1;
> +	return true;
>  }
>  
>  int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
> -- 
> 2.25.4
> 


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-06-15 16:10 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-15 12:13 [PATCH] KVM: async_pf: change kvm_setup_async_pf()/kvm_arch_setup_async_pf() return type to bool Vitaly Kuznetsov
2020-06-15 16:02 ` Vivek Goyal

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).