From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1755490AbcAVVhI (ORCPT ); Fri, 22 Jan 2016 16:37:08 -0500 Received: from aserp1040.oracle.com ([141.146.126.69]:27476 "EHLO aserp1040.oracle.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1755259AbcAVVge (ORCPT ); Fri, 22 Jan 2016 16:36:34 -0500 From: Boris Ostrovsky To: david.vrabel@citrix.com, konrad.wilk@oracle.com Cc: xen-devel@lists.xenproject.org, linux-kernel@vger.kernel.org, roger.pau@citrix.com, mcgrof@suse.com, Boris Ostrovsky Subject: [PATCH v1 11/12] xen/hvmlite: Boot secondary CPUs Date: Fri, 22 Jan 2016 16:35:57 -0500 Message-Id: <1453498558-6028-12-git-send-email-boris.ostrovsky@oracle.com> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1453498558-6028-1-git-send-email-boris.ostrovsky@oracle.com> References: <1453498558-6028-1-git-send-email-boris.ostrovsky@oracle.com> X-Source-IP: userv0021.oracle.com [156.151.31.71] Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org HVMlite secondary VCPUs use baremetal bringup path (i.e. native_* smp_ops) but need to do some preparation in PV code. Signed-off-by: Boris Ostrovsky --- arch/x86/xen/enlighten.c | 2 + arch/x86/xen/pmu.c | 4 +- arch/x86/xen/smp.c | 60 +++++++++++++++++++++++++++++++++------------- 3 files changed, 47 insertions(+), 19 deletions(-) diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 2ed8b2b..850ce66 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1937,6 +1937,8 @@ static void __init xen_hvm_guest_init(void) xen_have_vector_callback = 1; xen_hvm_smp_init(); register_cpu_notifier(&xen_hvm_cpu_notifier); + if (xen_hvmlite) + smp_found_config = 1; xen_unplug_emulated_devices(); x86_init.irqs.intr_init = xen_init_IRQ; xen_hvm_init_time_ops(); diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index 724a087..7bc209b 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c @@ -518,7 +518,7 @@ void xen_pmu_init(int cpu) BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE); - if (xen_hvm_domain()) + if (xen_hvm_domain() && !xen_hvmlite) return; xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL); @@ -556,7 +556,7 @@ void xen_pmu_finish(int cpu) { struct xen_pmu_params xp; - if (xen_hvm_domain()) + if (xen_hvm_domain() && !xen_hvmlite) return; xp.vcpu = cpu; diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index fb085ef..fbad829 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -348,26 +348,31 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) } xen_init_lock_cpu(0); - smp_store_boot_cpu_info(); - cpu_data(0).x86_max_cores = 1; + if (!xen_hvmlite) { + smp_store_boot_cpu_info(); + cpu_data(0).x86_max_cores = 1; + + for_each_possible_cpu(i) { + zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), + GFP_KERNEL); + zalloc_cpumask_var(&per_cpu(cpu_core_map, i), + GFP_KERNEL); + zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), + GFP_KERNEL); + } + set_cpu_sibling_map(0); - for_each_possible_cpu(i) { - zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); - zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); - zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); + if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) + panic("could not allocate xen_cpu_initialized_map\n"); + + cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); } - set_cpu_sibling_map(0); xen_pmu_init(0); if (xen_smp_intr_init(0)) BUG(); - if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) - panic("could not allocate xen_cpu_initialized_map\n"); - - cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); - /* Restrict the possible_map according to max_cpus. */ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) @@ -375,8 +380,11 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) set_cpu_possible(cpu, false); } - for_each_possible_cpu(cpu) + for_each_possible_cpu(cpu) { set_cpu_present(cpu, true); + if (xen_hvmlite) + physid_set(cpu, phys_cpu_present_map); + } } static int @@ -810,10 +818,15 @@ void __init xen_smp_init(void) static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) { + if (xen_hvmlite) + xen_smp_prepare_cpus(max_cpus); + native_smp_prepare_cpus(max_cpus); - WARN_ON(xen_smp_intr_init(0)); - xen_init_lock_cpu(0); + if (!xen_hvmlite) { + WARN_ON(xen_smp_intr_init(0)); + xen_init_lock_cpu(0); + } } static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) @@ -836,8 +849,21 @@ static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) */ rc = xen_smp_intr_init(cpu); WARN_ON(rc); - if (!rc) - rc = native_cpu_up(cpu, tidle); + + if (xen_hvmlite) { + rc = cpu_initialize_context(cpu, tidle); + if (rc) { + xen_smp_intr_free(cpu); + return rc; + } + xen_pmu_init(cpu); + } + + if (!rc) { + rc = native_cpu_up(cpu, tidle); + if (rc && xen_hvmlite) + xen_pmu_finish(cpu); + } /* * We must initialize the slowpath CPU kicker _after_ the native -- 1.7.1