xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] xen/x86: Move irq allocation from Xen smp_op.cpu_up()
       [not found] <1458221613-21563-1-git-send-email-boris.ostrovsky@oracle.com>
@ 2016-03-17 13:33 ` Boris Ostrovsky
  2016-03-17 13:33 ` [PATCH 2/2] hotplug: Prevent alloc/free of irq descriptors during cpu up/down (again) Boris Ostrovsky
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Boris Ostrovsky @ 2016-03-17 13:33 UTC (permalink / raw)
  To: david.vrabel, konrad.wilk, tglx
  Cc: xen-devel, Boris Ostrovsky, x86, linux-kernel

Commit ce0d3c0a6fb1 ("genirq: Revert sparse irq locking around
__cpu_up() and move it to x86 for now") reverted irq locking
introduced by commit a89941816726 ("hotplug: Prevent alloc/free
of irq descriptors during cpu up/down") because of Xen allocating
irqs in both of its cpu_up ops.

We can move those allocations into CPU notifiers so that original
patch can be reinstated.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
 arch/x86/xen/enlighten.c |   53 ++++++++++++++++++++++++++++++++++++++-------
 arch/x86/xen/smp.c       |   45 +-------------------------------------
 arch/x86/xen/smp.h       |    3 ++
 3 files changed, 49 insertions(+), 52 deletions(-)

diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2c26108..d1a86db 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -137,6 +137,8 @@ RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
 __read_mostly int xen_have_vector_callback;
 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
 
+static struct notifier_block xen_cpu_notifier;
+
 /*
  * Point at some empty memory to start with. We map the real shared_info
  * page as soon as fixmap is up and running.
@@ -1596,6 +1598,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
 	xen_initial_gdt = &per_cpu(gdt_page, 0);
 
 	xen_smp_init();
+	register_cpu_notifier(&xen_cpu_notifier);
 
 #ifdef CONFIG_ACPI_NUMA
 	/*
@@ -1783,17 +1786,49 @@ static void __init init_hvm_pv_info(void)
 	xen_domain_type = XEN_HVM_DOMAIN;
 }
 
-static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
-			      void *hcpu)
+static int xen_cpu_notify(struct notifier_block *self, unsigned long action,
+			  void *hcpu)
 {
 	int cpu = (long)hcpu;
+	int rc;
+
 	switch (action) {
 	case CPU_UP_PREPARE:
-		xen_vcpu_setup(cpu);
-		if (xen_have_vector_callback) {
-			if (xen_feature(XENFEAT_hvm_safe_pvclock))
-				xen_setup_timer(cpu);
+		if (xen_hvm_domain()) {
+			/*
+			 * This can happen if CPU was offlined earlier and
+			 * offlining timed out in common_cpu_die().
+			 */
+			if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
+				xen_smp_intr_free(cpu);
+				xen_uninit_lock_cpu(cpu);
+			}
+
+			xen_vcpu_setup(cpu);
 		}
+
+		if (xen_pv_domain() ||
+		    (xen_have_vector_callback &&
+		     xen_feature(XENFEAT_hvm_safe_pvclock)))
+			xen_setup_timer(cpu);
+
+		rc = xen_smp_intr_init(cpu);
+		if (rc) {
+			WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
+			     cpu, rc);
+			return NOTIFY_BAD;
+		}
+
+		break;
+	case CPU_ONLINE:
+		xen_init_lock_cpu(cpu);
+		break;
+	case CPU_UP_CANCELED:
+		xen_smp_intr_free(cpu);
+		if (xen_pv_domain() ||
+		    (xen_have_vector_callback &&
+		     xen_feature(XENFEAT_hvm_safe_pvclock)))
+			xen_teardown_timer(cpu);
 		break;
 	default:
 		break;
@@ -1801,8 +1836,8 @@ static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
 	return NOTIFY_OK;
 }
 
-static struct notifier_block xen_hvm_cpu_notifier = {
-	.notifier_call	= xen_hvm_cpu_notify,
+static struct notifier_block xen_cpu_notifier = {
+	.notifier_call	= xen_cpu_notify,
 };
 
 #ifdef CONFIG_KEXEC_CORE
@@ -1834,7 +1869,7 @@ static void __init xen_hvm_guest_init(void)
 	if (xen_feature(XENFEAT_hvm_callback_vector))
 		xen_have_vector_callback = 1;
 	xen_hvm_smp_init();
-	register_cpu_notifier(&xen_hvm_cpu_notifier);
+	register_cpu_notifier(&xen_cpu_notifier);
 	xen_unplug_emulated_devices();
 	x86_init.irqs.intr_init = xen_init_IRQ;
 	xen_hvm_init_time_ops();
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 719cf29..09d5cc0 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -115,7 +115,7 @@ asmlinkage __visible void cpu_bringup_and_idle(int cpu)
 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
-static void xen_smp_intr_free(unsigned int cpu)
+void xen_smp_intr_free(unsigned int cpu)
 {
 	if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
 		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
@@ -159,7 +159,7 @@ static void xen_smp_intr_free(unsigned int cpu)
 		per_cpu(xen_pmu_irq, cpu).name = NULL;
 	}
 };
-static int xen_smp_intr_init(unsigned int cpu)
+int xen_smp_intr_init(unsigned int cpu)
 {
 	int rc;
 	char *resched_name, *callfunc_name, *debug_name, *pmu_name;
@@ -468,8 +468,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
 	common_cpu_up(cpu, idle);
 
 	xen_setup_runstate_info(cpu);
-	xen_setup_timer(cpu);
-	xen_init_lock_cpu(cpu);
 
 	/*
 	 * PV VCPUs are always successfully taken down (see 'while' loop
@@ -488,10 +486,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
 
 	xen_pmu_init(cpu);
 
-	rc = xen_smp_intr_init(cpu);
-	if (rc)
-		return rc;
-
 	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
 	BUG_ON(rc);
 
@@ -761,47 +755,12 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
 	xen_init_lock_cpu(0);
 }
 
-static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
-{
-	int rc;
-
-	/*
-	 * This can happen if CPU was offlined earlier and
-	 * offlining timed out in common_cpu_die().
-	 */
-	if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
-		xen_smp_intr_free(cpu);
-		xen_uninit_lock_cpu(cpu);
-	}
-
-	/*
-	 * xen_smp_intr_init() needs to run before native_cpu_up()
-	 * so that IPI vectors are set up on the booting CPU before
-	 * it is marked online in native_cpu_up().
-	*/
-	rc = xen_smp_intr_init(cpu);
-	WARN_ON(rc);
-	if (!rc)
-		rc =  native_cpu_up(cpu, tidle);
-
-	/*
-	 * We must initialize the slowpath CPU kicker _after_ the native
-	 * path has executed. If we initialized it before none of the
-	 * unlocker IPI kicks would reach the booting CPU as the booting
-	 * CPU had not set itself 'online' in cpu_online_mask. That mask
-	 * is checked when IPIs are sent (on HVM at least).
-	 */
-	xen_init_lock_cpu(cpu);
-	return rc;
-}
-
 void __init xen_hvm_smp_init(void)
 {
 	if (!xen_have_vector_callback)
 		return;
 	smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
 	smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
-	smp_ops.cpu_up = xen_hvm_cpu_up;
 	smp_ops.cpu_die = xen_cpu_die;
 	smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
 	smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
index 963d62a..45faaf3 100644
--- a/arch/x86/xen/smp.h
+++ b/arch/x86/xen/smp.h
@@ -8,6 +8,9 @@ extern void xen_send_IPI_allbutself(int vector);
 extern void xen_send_IPI_all(int vector);
 extern void xen_send_IPI_self(int vector);
 
+extern int xen_smp_intr_init(unsigned int cpu);
+extern void xen_smp_intr_free(unsigned int cpu);
+
 #ifdef CONFIG_XEN_PVH
 extern void xen_pvh_early_cpu_init(int cpu, bool entry);
 #else
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/2] hotplug: Prevent alloc/free of irq descriptors during cpu up/down (again)
       [not found] <1458221613-21563-1-git-send-email-boris.ostrovsky@oracle.com>
  2016-03-17 13:33 ` [PATCH 1/2] xen/x86: Move irq allocation from Xen smp_op.cpu_up() Boris Ostrovsky
@ 2016-03-17 13:33 ` Boris Ostrovsky
       [not found] ` <1458221613-21563-2-git-send-email-boris.ostrovsky@oracle.com>
  2016-04-22 14:35 ` [PATCH 0/2] Reinstate irq alloc/dealloc locking patch Boris Ostrovsky
  3 siblings, 0 replies; 5+ messages in thread
From: Boris Ostrovsky @ 2016-03-17 13:33 UTC (permalink / raw)
  To: david.vrabel, konrad.wilk, tglx
  Cc: xen-devel, Boris Ostrovsky, x86, linux-kernel

Now that Xen no longer allocates irqs in _cpu_up() we can restore
commit a89941816726 ("hotplug: Prevent alloc/free of irq descriptors
during cpu up/down")

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
 arch/x86/kernel/smpboot.c |   11 -----------
 kernel/cpu.c              |    8 ++++++++
 2 files changed, 8 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 643dbdc..cabe21e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1083,17 +1083,8 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
 
 	common_cpu_up(cpu, tidle);
 
-	/*
-	 * We have to walk the irq descriptors to setup the vector
-	 * space for the cpu which comes online.  Prevent irq
-	 * alloc/free across the bringup.
-	 */
-	irq_lock_sparse();
-
 	err = do_boot_cpu(apicid, cpu, tidle);
-
 	if (err) {
-		irq_unlock_sparse();
 		pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
 		return -EIO;
 	}
@@ -1111,8 +1102,6 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
 		touch_nmi_watchdog();
 	}
 
-	irq_unlock_sparse();
-
 	return 0;
 }
 
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 6ea42e8..2ff63b3 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -342,8 +342,16 @@ static int bringup_cpu(unsigned int cpu)
 	struct task_struct *idle = idle_thread_get(cpu);
 	int ret;
 
+	/*
+	 * Some architectures have to walk the irq descriptors to
+	 * setup the vector space for the cpu which comes online.
+	 * Prevent irq alloc/free across the bringup.
+	 */
+	irq_lock_sparse();
+
 	/* Arch-specific enabling code. */
 	ret = __cpu_up(cpu, idle);
+	irq_unlock_sparse();
 	if (ret) {
 		cpu_notify(CPU_UP_CANCELED, cpu);
 		return ret;
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/2] xen/x86: Move irq allocation from Xen smp_op.cpu_up()
       [not found] ` <1458221613-21563-2-git-send-email-boris.ostrovsky@oracle.com>
@ 2016-03-25 15:10   ` Konrad Rzeszutek Wilk
  2016-03-25 15:23     ` Boris Ostrovsky
  0 siblings, 1 reply; 5+ messages in thread
From: Konrad Rzeszutek Wilk @ 2016-03-25 15:10 UTC (permalink / raw)
  To: Boris Ostrovsky; +Cc: xen-devel, tglx, x86, david.vrabel, linux-kernel

On Thu, Mar 17, 2016 at 09:33:32AM -0400, Boris Ostrovsky wrote:
> Commit ce0d3c0a6fb1 ("genirq: Revert sparse irq locking around
> __cpu_up() and move it to x86 for now") reverted irq locking
> introduced by commit a89941816726 ("hotplug: Prevent alloc/free
> of irq descriptors during cpu up/down") because of Xen allocating
> irqs in both of its cpu_up ops.
> 
> We can move those allocations into CPU notifiers so that original
> patch can be reinstated.

Original being "hotplug: Prevent alloc/free..." ?

> 
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
> ---
>  arch/x86/xen/enlighten.c |   53 ++++++++++++++++++++++++++++++++++++++-------
>  arch/x86/xen/smp.c       |   45 +-------------------------------------
>  arch/x86/xen/smp.h       |    3 ++
>  3 files changed, 49 insertions(+), 52 deletions(-)
> 
> diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
> index 2c26108..d1a86db 100644
> --- a/arch/x86/xen/enlighten.c
> +++ b/arch/x86/xen/enlighten.c
> @@ -137,6 +137,8 @@ RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
>  __read_mostly int xen_have_vector_callback;
>  EXPORT_SYMBOL_GPL(xen_have_vector_callback);
>  
> +static struct notifier_block xen_cpu_notifier;
> +
>  /*
>   * Point at some empty memory to start with. We map the real shared_info
>   * page as soon as fixmap is up and running.
> @@ -1596,6 +1598,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
>  	xen_initial_gdt = &per_cpu(gdt_page, 0);
>  
>  	xen_smp_init();
> +	register_cpu_notifier(&xen_cpu_notifier);
>  
>  #ifdef CONFIG_ACPI_NUMA
>  	/*
> @@ -1783,17 +1786,49 @@ static void __init init_hvm_pv_info(void)
>  	xen_domain_type = XEN_HVM_DOMAIN;
>  }
>  
> -static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
> -			      void *hcpu)
> +static int xen_cpu_notify(struct notifier_block *self, unsigned long action,
> +			  void *hcpu)
>  {
>  	int cpu = (long)hcpu;
> +	int rc;
> +
>  	switch (action) {
>  	case CPU_UP_PREPARE:
> -		xen_vcpu_setup(cpu);
> -		if (xen_have_vector_callback) {
> -			if (xen_feature(XENFEAT_hvm_safe_pvclock))
> -				xen_setup_timer(cpu);
> +		if (xen_hvm_domain()) {
> +			/*
> +			 * This can happen if CPU was offlined earlier and
> +			 * offlining timed out in common_cpu_die().
> +			 */
> +			if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
> +				xen_smp_intr_free(cpu);
> +				xen_uninit_lock_cpu(cpu);
> +			}
> +
> +			xen_vcpu_setup(cpu);
>  		}
> +
> +		if (xen_pv_domain() ||
> +		    (xen_have_vector_callback &&
> +		     xen_feature(XENFEAT_hvm_safe_pvclock)))
> +			xen_setup_timer(cpu);
> +
> +		rc = xen_smp_intr_init(cpu);
> +		if (rc) {
> +			WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
> +			     cpu, rc);
> +			return NOTIFY_BAD;
> +		}
> +
> +		break;
> +	case CPU_ONLINE:
> +		xen_init_lock_cpu(cpu);
> +		break;
> +	case CPU_UP_CANCELED:
> +		xen_smp_intr_free(cpu);

xen_uninit_lock_cpu ?


> +		if (xen_pv_domain() ||
> +		    (xen_have_vector_callback &&
> +		     xen_feature(XENFEAT_hvm_safe_pvclock)))
> +			xen_teardown_timer(cpu);
>  		break;
>  	default:
>  		break;
> @@ -1801,8 +1836,8 @@ static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
>  	return NOTIFY_OK;
>  }
>  
> -static struct notifier_block xen_hvm_cpu_notifier = {
> -	.notifier_call	= xen_hvm_cpu_notify,
> +static struct notifier_block xen_cpu_notifier = {
> +	.notifier_call	= xen_cpu_notify,
>  };
>  
>  #ifdef CONFIG_KEXEC_CORE
> @@ -1834,7 +1869,7 @@ static void __init xen_hvm_guest_init(void)
>  	if (xen_feature(XENFEAT_hvm_callback_vector))
>  		xen_have_vector_callback = 1;
>  	xen_hvm_smp_init();
> -	register_cpu_notifier(&xen_hvm_cpu_notifier);
> +	register_cpu_notifier(&xen_cpu_notifier);
>  	xen_unplug_emulated_devices();
>  	x86_init.irqs.intr_init = xen_init_IRQ;
>  	xen_hvm_init_time_ops();
> diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
> index 719cf29..09d5cc0 100644
> --- a/arch/x86/xen/smp.c
> +++ b/arch/x86/xen/smp.c
> @@ -115,7 +115,7 @@ asmlinkage __visible void cpu_bringup_and_idle(int cpu)
>  	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
>  }
>  
> -static void xen_smp_intr_free(unsigned int cpu)
> +void xen_smp_intr_free(unsigned int cpu)
>  {
>  	if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
>  		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
> @@ -159,7 +159,7 @@ static void xen_smp_intr_free(unsigned int cpu)
>  		per_cpu(xen_pmu_irq, cpu).name = NULL;
>  	}
>  };
> -static int xen_smp_intr_init(unsigned int cpu)
> +int xen_smp_intr_init(unsigned int cpu)
>  {
>  	int rc;
>  	char *resched_name, *callfunc_name, *debug_name, *pmu_name;
> @@ -468,8 +468,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
>  	common_cpu_up(cpu, idle);
>  
>  	xen_setup_runstate_info(cpu);
> -	xen_setup_timer(cpu);
> -	xen_init_lock_cpu(cpu);
>  
>  	/*
>  	 * PV VCPUs are always successfully taken down (see 'while' loop
> @@ -488,10 +486,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
>  
>  	xen_pmu_init(cpu);
>  
> -	rc = xen_smp_intr_init(cpu);
> -	if (rc)
> -		return rc;
> -
>  	rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
>  	BUG_ON(rc);
>  
> @@ -761,47 +755,12 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
>  	xen_init_lock_cpu(0);
>  }
>  
> -static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
> -{
> -	int rc;
> -
> -	/*
> -	 * This can happen if CPU was offlined earlier and
> -	 * offlining timed out in common_cpu_die().
> -	 */
> -	if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
> -		xen_smp_intr_free(cpu);
> -		xen_uninit_lock_cpu(cpu);
> -	}
> -
> -	/*
> -	 * xen_smp_intr_init() needs to run before native_cpu_up()
> -	 * so that IPI vectors are set up on the booting CPU before
> -	 * it is marked online in native_cpu_up().
> -	*/
> -	rc = xen_smp_intr_init(cpu);
> -	WARN_ON(rc);
> -	if (!rc)
> -		rc =  native_cpu_up(cpu, tidle);
> -
> -	/*
> -	 * We must initialize the slowpath CPU kicker _after_ the native
> -	 * path has executed. If we initialized it before none of the
> -	 * unlocker IPI kicks would reach the booting CPU as the booting
> -	 * CPU had not set itself 'online' in cpu_online_mask. That mask
> -	 * is checked when IPIs are sent (on HVM at least).
> -	 */
> -	xen_init_lock_cpu(cpu);
> -	return rc;
> -}
> -
>  void __init xen_hvm_smp_init(void)
>  {
>  	if (!xen_have_vector_callback)
>  		return;
>  	smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
>  	smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
> -	smp_ops.cpu_up = xen_hvm_cpu_up;
>  	smp_ops.cpu_die = xen_cpu_die;
>  	smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
>  	smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
> diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
> index 963d62a..45faaf3 100644
> --- a/arch/x86/xen/smp.h
> +++ b/arch/x86/xen/smp.h
> @@ -8,6 +8,9 @@ extern void xen_send_IPI_allbutself(int vector);
>  extern void xen_send_IPI_all(int vector);
>  extern void xen_send_IPI_self(int vector);
>  
> +extern int xen_smp_intr_init(unsigned int cpu);
> +extern void xen_smp_intr_free(unsigned int cpu);
> +
>  #ifdef CONFIG_XEN_PVH
>  extern void xen_pvh_early_cpu_init(int cpu, bool entry);
>  #else
> -- 
> 1.7.1
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/2] xen/x86: Move irq allocation from Xen smp_op.cpu_up()
  2016-03-25 15:10   ` [PATCH 1/2] xen/x86: Move irq allocation from Xen smp_op.cpu_up() Konrad Rzeszutek Wilk
@ 2016-03-25 15:23     ` Boris Ostrovsky
  0 siblings, 0 replies; 5+ messages in thread
From: Boris Ostrovsky @ 2016-03-25 15:23 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk; +Cc: xen-devel, tglx, x86, david.vrabel, linux-kernel



On 03/25/2016 11:10 AM, Konrad Rzeszutek Wilk wrote:
> On Thu, Mar 17, 2016 at 09:33:32AM -0400, Boris Ostrovsky wrote:
>> Commit ce0d3c0a6fb1 ("genirq: Revert sparse irq locking around
>> __cpu_up() and move it to x86 for now") reverted irq locking
>> introduced by commit a89941816726 ("hotplug: Prevent alloc/free
>> of irq descriptors during cpu up/down") because of Xen allocating
>> irqs in both of its cpu_up ops.
>>
>> We can move those allocations into CPU notifiers so that original
>> patch can be reinstated.
> Original being "hotplug: Prevent alloc/free..." ?

Yes.

>
> -static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
> -			      void *hcpu)
> +static int xen_cpu_notify(struct notifier_block *self, unsigned long action,
> +			  void *hcpu)
>   {
>   	int cpu = (long)hcpu;
> +	int rc;
> +
>   	switch (action) {
>   	case CPU_UP_PREPARE:
> -		xen_vcpu_setup(cpu);
> -		if (xen_have_vector_callback) {
> -			if (xen_feature(XENFEAT_hvm_safe_pvclock))
> -				xen_setup_timer(cpu);
> +		if (xen_hvm_domain()) {
> +			/*
> +			 * This can happen if CPU was offlined earlier and
> +			 * offlining timed out in common_cpu_die().
> +			 */
> +			if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
> +				xen_smp_intr_free(cpu);
> +				xen_uninit_lock_cpu(cpu);
> +			}
> +
> +			xen_vcpu_setup(cpu);
>   		}
> +
> +		if (xen_pv_domain() ||
> +		    (xen_have_vector_callback &&
> +		     xen_feature(XENFEAT_hvm_safe_pvclock)))
> +			xen_setup_timer(cpu);
> +
> +		rc = xen_smp_intr_init(cpu);
> +		if (rc) {
> +			WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
> +			     cpu, rc);
> +			return NOTIFY_BAD;
> +		}
> +
> +		break;
> +	case CPU_ONLINE:
> +		xen_init_lock_cpu(cpu);
> +		break;
> +	case CPU_UP_CANCELED:
> +		xen_smp_intr_free(cpu);
> xen_uninit_lock_cpu ?

I don't think this is needed: we initialize lock in CPU_ONLINE notifier 
which can only be called after CPU_UP_CANCELED would have run (in which 
case we'll never do CPU_ONLINE)

-boris

>
>
>> +		if (xen_pv_domain() ||
>> +		    (xen_have_vector_callback &&
>> +		     xen_feature(XENFEAT_hvm_safe_pvclock)))
>> +			xen_teardown_timer(cpu);
>>   		break;
>>   	default:
>>   		break;


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 0/2] Reinstate irq alloc/dealloc locking patch
       [not found] <1458221613-21563-1-git-send-email-boris.ostrovsky@oracle.com>
                   ` (2 preceding siblings ...)
       [not found] ` <1458221613-21563-2-git-send-email-boris.ostrovsky@oracle.com>
@ 2016-04-22 14:35 ` Boris Ostrovsky
  3 siblings, 0 replies; 5+ messages in thread
From: Boris Ostrovsky @ 2016-04-22 14:35 UTC (permalink / raw)
  To: david.vrabel, konrad.wilk, tglx; +Cc: xen-devel, x86, linux-kernel

Ping?

On 03/17/2016 09:33 AM, Boris Ostrovsky wrote:
> Original version of that patch (commit a89941816726) had to be reverted
> due to Xen allocating irqs in its cpu_up ops.
>
> The first patch moves allocations into hotplug notifiers and the second
> one restores the original patch (with minor adjustments to new hotplug
> framework)
>
> Boris Ostrovsky (2):
>    xen/x86: Move irq allocation from Xen smp_op.cpu_up()
>    hotplug: Prevent alloc/free of irq descriptors during cpu up/down
>      (again)
>
>   arch/x86/kernel/smpboot.c |   11 ---------
>   arch/x86/xen/enlighten.c  |   53 +++++++++++++++++++++++++++++++++++++-------
>   arch/x86/xen/smp.c        |   45 +------------------------------------
>   arch/x86/xen/smp.h        |    3 ++
>   kernel/cpu.c              |    8 ++++++
>   5 files changed, 57 insertions(+), 63 deletions(-)
>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2016-04-22 14:35 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <1458221613-21563-1-git-send-email-boris.ostrovsky@oracle.com>
2016-03-17 13:33 ` [PATCH 1/2] xen/x86: Move irq allocation from Xen smp_op.cpu_up() Boris Ostrovsky
2016-03-17 13:33 ` [PATCH 2/2] hotplug: Prevent alloc/free of irq descriptors during cpu up/down (again) Boris Ostrovsky
     [not found] ` <1458221613-21563-2-git-send-email-boris.ostrovsky@oracle.com>
2016-03-25 15:10   ` [PATCH 1/2] xen/x86: Move irq allocation from Xen smp_op.cpu_up() Konrad Rzeszutek Wilk
2016-03-25 15:23     ` Boris Ostrovsky
2016-04-22 14:35 ` [PATCH 0/2] Reinstate irq alloc/dealloc locking patch Boris Ostrovsky

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).