kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4] KVM: Check the allocation of pv cpu mask
@ 2020-10-17 17:54 lihaiwei.kernel
  2020-10-19 11:23 ` Vitaly Kuznetsov
  0 siblings, 1 reply; 4+ messages in thread
From: lihaiwei.kernel @ 2020-10-17 17:54 UTC (permalink / raw)
  To: kvm, linux-kernel
  Cc: pbonzini, sean.j.christopherson, vkuznets, wanpengli, jmattson,
	joro, Haiwei Li

From: Haiwei Li <lihaiwei@tencent.com>

check the allocation of per-cpu __pv_cpu_mask. Init
'send_IPI_mask_allbutself' only when successful and check the allocation
of __pv_cpu_mask in 'kvm_flush_tlb_others'.

Suggested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Haiwei Li <lihaiwei@tencent.com>
---
v1 -> v2:
 * add CONFIG_SMP for kvm_send_ipi_mask_allbutself to prevent build error
v2 -> v3:
 * always check the allocation of __pv_cpu_mask in kvm_flush_tlb_others
v3 -> v4:
 * mov kvm_setup_pv_ipi to kvm_alloc_cpumask and get rid of kvm_apic_init

 arch/x86/kernel/kvm.c | 53 +++++++++++++++++++++++++++++--------------
 1 file changed, 36 insertions(+), 17 deletions(-)

diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 42c6e0deff9e..be28203cc098 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -547,16 +547,6 @@ static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
 	__send_ipi_mask(local_mask, vector);
 }
 
-/*
- * Set the IPI entry points
- */
-static void kvm_setup_pv_ipi(void)
-{
-	apic->send_IPI_mask = kvm_send_ipi_mask;
-	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
-	pr_info("setup PV IPIs\n");
-}
-
 static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
 {
 	int cpu;
@@ -619,6 +609,11 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,
 	struct kvm_steal_time *src;
 	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
 
+	if (unlikely(!flushmask)) {
+		native_flush_tlb_others(cpumask, info);
+		return;
+	}
+
 	cpumask_copy(flushmask, cpumask);
 	/*
 	 * We have to call flush only on online vCPUs. And
@@ -732,10 +727,6 @@ static uint32_t __init kvm_detect(void)
 
 static void __init kvm_apic_init(void)
 {
-#if defined(CONFIG_SMP)
-	if (pv_ipi_supported())
-		kvm_setup_pv_ipi();
-#endif
 }
 
 static void __init kvm_init_platform(void)
@@ -765,10 +756,18 @@ static __init int activate_jump_labels(void)
 }
 arch_initcall(activate_jump_labels);
 
+static void kvm_free_cpumask(void)
+{
+	unsigned int cpu;
+
+	for_each_possible_cpu(cpu)
+		free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
+}
+
 static __init int kvm_alloc_cpumask(void)
 {
 	int cpu;
-	bool alloc = false;
+	bool alloc = false, alloced = true;
 
 	if (!kvm_para_available() || nopv)
 		return 0;
@@ -783,10 +782,30 @@ static __init int kvm_alloc_cpumask(void)
 
 	if (alloc)
 		for_each_possible_cpu(cpu) {
-			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
-				GFP_KERNEL, cpu_to_node(cpu));
+			if (!zalloc_cpumask_var_node(
+				per_cpu_ptr(&__pv_cpu_mask, cpu),
+				GFP_KERNEL, cpu_to_node(cpu))) {
+				alloced = false;
+				break;
+			}
 		}
 
+#if defined(CONFIG_SMP)
+	/* Set the IPI entry points */
+	if (pv_ipi_supported()) {
+		apic->send_IPI_mask = kvm_send_ipi_mask;
+		if (alloced)
+			apic->send_IPI_mask_allbutself =
+				kvm_send_ipi_mask_allbutself;
+		pr_info("setup PV IPIs\n");
+	}
+#endif
+
+	if (!alloced) {
+		kvm_free_cpumask();
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 arch_initcall(kvm_alloc_cpumask);
-- 
2.18.4


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v4] KVM: Check the allocation of pv cpu mask
  2020-10-17 17:54 [PATCH v4] KVM: Check the allocation of pv cpu mask lihaiwei.kernel
@ 2020-10-19 11:23 ` Vitaly Kuznetsov
  2020-10-19 12:36   ` Haiwei Li
  0 siblings, 1 reply; 4+ messages in thread
From: Vitaly Kuznetsov @ 2020-10-19 11:23 UTC (permalink / raw)
  To: lihaiwei.kernel
  Cc: pbonzini, sean.j.christopherson, wanpengli, jmattson, joro,
	Haiwei Li, kvm, linux-kernel

lihaiwei.kernel@gmail.com writes:

> From: Haiwei Li <lihaiwei@tencent.com>
>
> check the allocation of per-cpu __pv_cpu_mask. Init
> 'send_IPI_mask_allbutself' only when successful and check the allocation
> of __pv_cpu_mask in 'kvm_flush_tlb_others'.
>
> Suggested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
> Signed-off-by: Haiwei Li <lihaiwei@tencent.com>
> ---
> v1 -> v2:
>  * add CONFIG_SMP for kvm_send_ipi_mask_allbutself to prevent build error
> v2 -> v3:
>  * always check the allocation of __pv_cpu_mask in kvm_flush_tlb_others
> v3 -> v4:
>  * mov kvm_setup_pv_ipi to kvm_alloc_cpumask and get rid of kvm_apic_init
>
>  arch/x86/kernel/kvm.c | 53 +++++++++++++++++++++++++++++--------------
>  1 file changed, 36 insertions(+), 17 deletions(-)
>
> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index 42c6e0deff9e..be28203cc098 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -547,16 +547,6 @@ static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
>  	__send_ipi_mask(local_mask, vector);
>  }
>  
> -/*
> - * Set the IPI entry points
> - */
> -static void kvm_setup_pv_ipi(void)
> -{
> -	apic->send_IPI_mask = kvm_send_ipi_mask;
> -	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
> -	pr_info("setup PV IPIs\n");
> -}
> -
>  static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
>  {
>  	int cpu;
> @@ -619,6 +609,11 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,
>  	struct kvm_steal_time *src;
>  	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
>  
> +	if (unlikely(!flushmask)) {
> +		native_flush_tlb_others(cpumask, info);
> +		return;
> +	}
> +
>  	cpumask_copy(flushmask, cpumask);
>  	/*
>  	 * We have to call flush only on online vCPUs. And
> @@ -732,10 +727,6 @@ static uint32_t __init kvm_detect(void)
>  
>  static void __init kvm_apic_init(void)
>  {
> -#if defined(CONFIG_SMP)
> -	if (pv_ipi_supported())
> -		kvm_setup_pv_ipi();
> -#endif
>  }

Do we still need the now-empty function?

>  
>  static void __init kvm_init_platform(void)
> @@ -765,10 +756,18 @@ static __init int activate_jump_labels(void)
>  }
>  arch_initcall(activate_jump_labels);
>  
> +static void kvm_free_cpumask(void)
> +{
> +	unsigned int cpu;
> +
> +	for_each_possible_cpu(cpu)
> +		free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
> +}
> +
>  static __init int kvm_alloc_cpumask(void)
>  {
>  	int cpu;
> -	bool alloc = false;
> +	bool alloc = false, alloced = true;
>  
>  	if (!kvm_para_available() || nopv)
>  		return 0;
> @@ -783,10 +782,30 @@ static __init int kvm_alloc_cpumask(void)
>  
>  	if (alloc)
>  		for_each_possible_cpu(cpu) {
> -			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
> -				GFP_KERNEL, cpu_to_node(cpu));
> +			if (!zalloc_cpumask_var_node(
> +				per_cpu_ptr(&__pv_cpu_mask, cpu),
> +				GFP_KERNEL, cpu_to_node(cpu))) {
> +				alloced = false;
> +				break;
> +			}
>  		}
>  
> +#if defined(CONFIG_SMP)
> +	/* Set the IPI entry points */
> +	if (pv_ipi_supported()) {

What if we define pv_ipi_supported() in !CONFIG_SMP case as 'false'?

The code we have above:

        if (pv_tlb_flush_supported())
		alloc = true;

#if defined(CONFIG_SMP)
        if (pv_ipi_supported())
		alloc = true;
#endif

      	if (alloc)
...

will transform into 'if (pv_tlb_flush_supported() ||
pv_ipi_supported())' and we'll get rid of 'alloc' variable.

Also, we can probably get rid of this new 'alloced' variable and switch
to checking if the cpumask for the last CPU in cpu_possible_mask is not
NULL.
 
> +		apic->send_IPI_mask = kvm_send_ipi_mask;
> +		if (alloced)
> +			apic->send_IPI_mask_allbutself =
> +				kvm_send_ipi_mask_allbutself;
> +		pr_info("setup PV IPIs\n");

I'd rather not set 'apic->send_IPI_mask = kvm_send_ipi_mask' in case we
failed to alloc cpumask too. It is weird that in case of an allocation
failure *some* IPIs will use the PV path and some won't. It's going to
be a nightmare to debug.

> +	}
> +#endif
> +
> +	if (!alloced) {
> +		kvm_free_cpumask();
> +		return -ENOMEM;
> +	}
> +
>  	return 0;
>  }
>  arch_initcall(kvm_alloc_cpumask);

-- 
Vitaly


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v4] KVM: Check the allocation of pv cpu mask
  2020-10-19 11:23 ` Vitaly Kuznetsov
@ 2020-10-19 12:36   ` Haiwei Li
  2020-10-19 12:50     ` Vitaly Kuznetsov
  0 siblings, 1 reply; 4+ messages in thread
From: Haiwei Li @ 2020-10-19 12:36 UTC (permalink / raw)
  To: Vitaly Kuznetsov
  Cc: pbonzini, sean.j.christopherson, wanpengli, jmattson, joro,
	Haiwei Li, kvm, linux-kernel

On 20/10/19 19:23, Vitaly Kuznetsov wrote:
> lihaiwei.kernel@gmail.com writes:
> 
>> From: Haiwei Li <lihaiwei@tencent.com>
>>
>> check the allocation of per-cpu __pv_cpu_mask. Init
>> 'send_IPI_mask_allbutself' only when successful and check the allocation
>> of __pv_cpu_mask in 'kvm_flush_tlb_others'.
>>
>> Suggested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
>> Signed-off-by: Haiwei Li <lihaiwei@tencent.com>
>> ---
>> v1 -> v2:
>>   * add CONFIG_SMP for kvm_send_ipi_mask_allbutself to prevent build error
>> v2 -> v3:
>>   * always check the allocation of __pv_cpu_mask in kvm_flush_tlb_others
>> v3 -> v4:
>>   * mov kvm_setup_pv_ipi to kvm_alloc_cpumask and get rid of kvm_apic_init
>>
>>   arch/x86/kernel/kvm.c | 53 +++++++++++++++++++++++++++++--------------
>>   1 file changed, 36 insertions(+), 17 deletions(-)
>>
>> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
>> index 42c6e0deff9e..be28203cc098 100644
>> --- a/arch/x86/kernel/kvm.c
>> +++ b/arch/x86/kernel/kvm.c
>> @@ -547,16 +547,6 @@ static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
>>   	__send_ipi_mask(local_mask, vector);
>>   }
>>   
>> -/*
>> - * Set the IPI entry points
>> - */
>> -static void kvm_setup_pv_ipi(void)
>> -{
>> -	apic->send_IPI_mask = kvm_send_ipi_mask;
>> -	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
>> -	pr_info("setup PV IPIs\n");
>> -}
>> -
>>   static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
>>   {
>>   	int cpu;
>> @@ -619,6 +609,11 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,
>>   	struct kvm_steal_time *src;
>>   	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
>>   
>> +	if (unlikely(!flushmask)) {
>> +		native_flush_tlb_others(cpumask, info);
>> +		return;
>> +	}
>> +
>>   	cpumask_copy(flushmask, cpumask);
>>   	/*
>>   	 * We have to call flush only on online vCPUs. And
>> @@ -732,10 +727,6 @@ static uint32_t __init kvm_detect(void)
>>   
>>   static void __init kvm_apic_init(void)
>>   {
>> -#if defined(CONFIG_SMP)
>> -	if (pv_ipi_supported())
>> -		kvm_setup_pv_ipi();
>> -#endif
>>   }
> 
> Do we still need the now-empty function?

It's not necessary. I will remove it.

> 
>>   
>>   static void __init kvm_init_platform(void)
>> @@ -765,10 +756,18 @@ static __init int activate_jump_labels(void)
>>   }
>>   arch_initcall(activate_jump_labels);
>>   
>> +static void kvm_free_cpumask(void)
>> +{
>> +	unsigned int cpu;
>> +
>> +	for_each_possible_cpu(cpu)
>> +		free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
>> +}
>> +
>>   static __init int kvm_alloc_cpumask(void)
>>   {
>>   	int cpu;
>> -	bool alloc = false;
>> +	bool alloc = false, alloced = true;
>>   
>>   	if (!kvm_para_available() || nopv)
>>   		return 0;
>> @@ -783,10 +782,30 @@ static __init int kvm_alloc_cpumask(void)
>>   
>>   	if (alloc)
>>   		for_each_possible_cpu(cpu) {
>> -			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
>> -				GFP_KERNEL, cpu_to_node(cpu));
>> +			if (!zalloc_cpumask_var_node(
>> +				per_cpu_ptr(&__pv_cpu_mask, cpu),
>> +				GFP_KERNEL, cpu_to_node(cpu))) {
>> +				alloced = false;
>> +				break;
>> +			}
>>   		}
>>   
>> +#if defined(CONFIG_SMP)
>> +	/* Set the IPI entry points */
>> +	if (pv_ipi_supported()) {
> 
> What if we define pv_ipi_supported() in !CONFIG_SMP case as 'false'?
> 
> The code we have above:
> 
>          if (pv_tlb_flush_supported())
> 		alloc = true;
> 
> #if defined(CONFIG_SMP)
>          if (pv_ipi_supported())
> 		alloc = true;
> #endif
> 
>        	if (alloc)
> ...
> 
> will transform into 'if (pv_tlb_flush_supported() ||
> pv_ipi_supported())' and we'll get rid of 'alloc' variable.
> 
> Also, we can probably get rid of this new 'alloced' variable and switch
> to checking if the cpumask for the last CPU in cpu_possible_mask is not
> NULL.

Get it. It's a good point. I will do it. Thanks for your patience and 
kindness.

>   
>> +		apic->send_IPI_mask = kvm_send_ipi_mask;
>> +		if (alloced)
>> +			apic->send_IPI_mask_allbutself =
>> +				kvm_send_ipi_mask_allbutself;
>> +		pr_info("setup PV IPIs\n");
> 
> I'd rather not set 'apic->send_IPI_mask = kvm_send_ipi_mask' in case we
> failed to alloc cpumask too. It is weird that in case of an allocation
> failure *some* IPIs will use the PV path and some won't. It's going to
> be a nightmare to debug.

Agree. And 'pv_ops.mmu.tlb_remove_table = tlb_remove_table' should not 
be set either. What do you think? Thanks.

     Haiwei Li

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v4] KVM: Check the allocation of pv cpu mask
  2020-10-19 12:36   ` Haiwei Li
@ 2020-10-19 12:50     ` Vitaly Kuznetsov
  0 siblings, 0 replies; 4+ messages in thread
From: Vitaly Kuznetsov @ 2020-10-19 12:50 UTC (permalink / raw)
  To: Haiwei Li
  Cc: pbonzini, sean.j.christopherson, wanpengli, jmattson, joro,
	Haiwei Li, kvm, linux-kernel

Haiwei Li <lihaiwei.kernel@gmail.com> writes:

> And 'pv_ops.mmu.tlb_remove_table = tlb_remove_table' should not 
> be set either.

AFAIU by looking at the commit which added it (48a8b97cfd80 "x86/mm:
Only use tlb_remove_table() for paravirt") it sholdn't hurt much. We
could've avoided the assignment but it happens much earlier, in
kvm_guest_init() and there's no good way to un-patch pvops back.

-- 
Vitaly


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2020-10-19 12:51 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-17 17:54 [PATCH v4] KVM: Check the allocation of pv cpu mask lihaiwei.kernel
2020-10-19 11:23 ` Vitaly Kuznetsov
2020-10-19 12:36   ` Haiwei Li
2020-10-19 12:50     ` Vitaly Kuznetsov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).