From: Juergen Gross <jgross@suse.com>
To: Vitaly Kuznetsov <vkuznets@redhat.com>, xen-devel@lists.xenproject.org
Cc: x86@kernel.org, linux-kernel@vger.kernel.org,
Boris Ostrovsky <boris.ostrovsky@oracle.com>,
Andrew Jones <drjones@redhat.com>
Subject: Re: [PATCH v2 01/21] x86/xen: separate PV and HVM hypervisors
Date: Wed, 8 Mar 2017 16:06:41 +0100 [thread overview]
Message-ID: <46b3ed57-187a-427e-774e-ec6caaae83c8@suse.com> (raw)
In-Reply-To: <20170302175357.8222-2-vkuznets@redhat.com>
On 02/03/17 18:53, Vitaly Kuznetsov wrote:
> As a preparation to splitting the code we need to untangle it:
>
> x86_hyper_xen -> x86_hyper_xen_hvm and x86_hyper_xen_pv
> xen_platform() -> xen_platform_hvm() and xen_platform_pv()
> xen_cpu_up_prepare() -> xen_cpu_up_prepare_pv() and xen_cpu_up_prepare_hvm()
> xen_cpu_dead() -> xen_cpu_dead_pv() and xen_cpu_dead_pv_hvm()
>
> Add two parameters to xen_cpuhp_setup() to pass proper cpu_up_prepare and
> cpu_dead hooks. xen_set_cpu_features() is now PV-only so the redundant
> xen_pv_domain() check can be dropped.
>
> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
> ---
> arch/x86/include/asm/hypervisor.h | 3 +-
> arch/x86/kernel/cpu/hypervisor.c | 3 +-
> arch/x86/xen/enlighten.c | 113 +++++++++++++++++++++++++-------------
> 3 files changed, 78 insertions(+), 41 deletions(-)
>
> diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
> index 67942b6..6f7545c6 100644
> --- a/arch/x86/include/asm/hypervisor.h
> +++ b/arch/x86/include/asm/hypervisor.h
> @@ -53,7 +53,8 @@ extern const struct hypervisor_x86 *x86_hyper;
> /* Recognized hypervisors */
> extern const struct hypervisor_x86 x86_hyper_vmware;
> extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
> -extern const struct hypervisor_x86 x86_hyper_xen;
> +extern const struct hypervisor_x86 x86_hyper_xen_pv;
> +extern const struct hypervisor_x86 x86_hyper_xen_hvm;
> extern const struct hypervisor_x86 x86_hyper_kvm;
>
> extern void init_hypervisor(struct cpuinfo_x86 *c);
> diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
> index 35691a6..a77f18d 100644
> --- a/arch/x86/kernel/cpu/hypervisor.c
> +++ b/arch/x86/kernel/cpu/hypervisor.c
> @@ -29,7 +29,8 @@
> static const __initconst struct hypervisor_x86 * const hypervisors[] =
> {
> #ifdef CONFIG_XEN
> - &x86_hyper_xen,
> + &x86_hyper_xen_pv,
> + &x86_hyper_xen_hvm,
> #endif
> &x86_hyper_vmware,
> &x86_hyper_ms_hyperv,
> diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
> index ec1d5c4..4c1a582 100644
> --- a/arch/x86/xen/enlighten.c
> +++ b/arch/x86/xen/enlighten.c
> @@ -139,9 +139,11 @@ void *xen_initial_gdt;
>
> RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
>
> -static int xen_cpu_up_prepare(unsigned int cpu);
> +static int xen_cpu_up_prepare_pv(unsigned int cpu);
> +static int xen_cpu_up_prepare_hvm(unsigned int cpu);
> static int xen_cpu_up_online(unsigned int cpu);
> -static int xen_cpu_dead(unsigned int cpu);
> +static int xen_cpu_dead_pv(unsigned int cpu);
> +static int xen_cpu_dead_hvm(unsigned int cpu);
>
> /*
> * Point at some empty memory to start with. We map the real shared_info
> @@ -1447,13 +1449,14 @@ static void __init xen_dom0_set_legacy_features(void)
> x86_platform.legacy.rtc = 1;
> }
>
> -static int xen_cpuhp_setup(void)
> +static int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
> + int (*cpu_dead_cb)(unsigned int))
> {
> int rc;
>
> rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
> "x86/xen/hvm_guest:prepare",
> - xen_cpu_up_prepare, xen_cpu_dead);
> + cpu_up_prepare_cb, cpu_dead_cb);
> if (rc >= 0) {
> rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
> "x86/xen/hvm_guest:online",
> @@ -1559,7 +1562,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
> possible map and a non-dummy shared_info. */
> per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
>
> - WARN_ON(xen_cpuhp_setup());
> + WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
>
> local_irq_disable();
> early_boot_irqs_disabled = true;
> @@ -1840,28 +1843,41 @@ static void __init init_hvm_pv_info(void)
> }
> #endif
>
> -static int xen_cpu_up_prepare(unsigned int cpu)
> +static int xen_cpu_up_prepare_pv(unsigned int cpu)
> {
> int rc;
>
> - if (xen_hvm_domain()) {
> - /*
> - * This can happen if CPU was offlined earlier and
> - * offlining timed out in common_cpu_die().
> - */
> - if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
> - xen_smp_intr_free(cpu);
> - xen_uninit_lock_cpu(cpu);
> - }
> + xen_setup_timer(cpu);
>
> - if (cpu_acpi_id(cpu) != U32_MAX)
> - per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
> - else
> - per_cpu(xen_vcpu_id, cpu) = cpu;
> - xen_vcpu_setup(cpu);
> + rc = xen_smp_intr_init(cpu);
> + if (rc) {
> + WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
> + cpu, rc);
> + return rc;
> + }
> + return 0;
> +}
> +
> +static int xen_cpu_up_prepare_hvm(unsigned int cpu)
> +{
> + int rc;
> +
> + /*
> + * This can happen if CPU was offlined earlier and
> + * offlining timed out in common_cpu_die().
> + */
> + if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
> + xen_smp_intr_free(cpu);
> + xen_uninit_lock_cpu(cpu);
> }
>
> - if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
> + if (cpu_acpi_id(cpu) != U32_MAX)
> + per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
> + else
> + per_cpu(xen_vcpu_id, cpu) = cpu;
> + xen_vcpu_setup(cpu);
> +
> + if (xen_feature(XENFEAT_hvm_safe_pvclock))
> xen_setup_timer(cpu);
>
> rc = xen_smp_intr_init(cpu);
> @@ -1873,16 +1889,25 @@ static int xen_cpu_up_prepare(unsigned int cpu)
> return 0;
> }
>
> -static int xen_cpu_dead(unsigned int cpu)
> +static int xen_cpu_dead_pv(unsigned int cpu)
> {
> xen_smp_intr_free(cpu);
>
> - if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
> - xen_teardown_timer(cpu);
> + xen_teardown_timer(cpu);
>
> return 0;
> }
>
> +static int xen_cpu_dead_hvm(unsigned int cpu)
> +{
> + xen_smp_intr_free(cpu);
> +
> + if (xen_feature(XENFEAT_hvm_safe_pvclock))
> + xen_teardown_timer(cpu);
> +
> + return 0;
> +}
> +
> static int xen_cpu_up_online(unsigned int cpu)
> {
> xen_init_lock_cpu(cpu);
> @@ -1919,7 +1944,7 @@ static void __init xen_hvm_guest_init(void)
> BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector));
>
> xen_hvm_smp_init();
> - WARN_ON(xen_cpuhp_setup());
> + WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm));
> xen_unplug_emulated_devices();
> x86_init.irqs.intr_init = xen_init_IRQ;
> xen_hvm_init_time_ops();
> @@ -1942,9 +1967,17 @@ static __init int xen_parse_nopv(char *arg)
> }
> early_param("xen_nopv", xen_parse_nopv);
>
> -static uint32_t __init xen_platform(void)
> +static uint32_t __init xen_platform_pv(void)
> {
> - if (xen_nopv)
> + if (xen_pv_domain())
> + return xen_cpuid_base();
> +
> + return 0;
> +}
> +
> +static uint32_t __init xen_platform_hvm(void)
> +{
> + if (xen_pv_domain() || xen_nopv)
> return 0;
>
> return xen_cpuid_base();
> @@ -1966,10 +1999,8 @@ EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
>
> static void xen_set_cpu_features(struct cpuinfo_x86 *c)
> {
> - if (xen_pv_domain()) {
> - clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
> - set_cpu_cap(c, X86_FEATURE_XENPV);
> - }
> + clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
> + set_cpu_cap(c, X86_FEATURE_XENPV);
> }
>
> static void xen_pin_vcpu(int cpu)
> @@ -2011,17 +2042,21 @@ static void xen_pin_vcpu(int cpu)
> }
> }
>
> -const struct hypervisor_x86 x86_hyper_xen = {
> - .name = "Xen",
> - .detect = xen_platform,
> -#ifdef CONFIG_XEN_PVHVM
> - .init_platform = xen_hvm_guest_init,
> -#endif
> - .x2apic_available = xen_x2apic_para_available,
> +const struct hypervisor_x86 x86_hyper_xen_pv = {
> + .name = "Xen PV",
> + .detect = xen_platform_pv,
> .set_cpu_features = xen_set_cpu_features,
> .pin_vcpu = xen_pin_vcpu,
> };
> -EXPORT_SYMBOL(x86_hyper_xen);
> +EXPORT_SYMBOL(x86_hyper_xen_pv);
> +
> +const struct hypervisor_x86 x86_hyper_xen_hvm = {
> + .name = "Xen HVM",
> + .detect = xen_platform_hvm,
> + .init_platform = xen_hvm_guest_init,
> + .x2apic_available = xen_x2apic_para_available,
Please keep the .pin_vcpu member. It will be needed for a PVH dom0.
Juergen
next prev parent reply other threads:[~2017-03-08 15:07 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-03-02 17:53 [PATCH v2 00/21] x86/xen: untangle PV and PVHVM guest support code Vitaly Kuznetsov
2017-03-02 17:53 ` [PATCH v2 01/21] x86/xen: separate PV and HVM hypervisors Vitaly Kuznetsov
2017-03-08 15:06 ` Juergen Gross [this message]
2017-03-02 17:53 ` [PATCH v2 02/21] x86/xen: globalize have_vcpu_info_placement Vitaly Kuznetsov
2017-03-08 15:07 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 03/21] x86/xen: add CONFIG_XEN_PV to Kconfig Vitaly Kuznetsov
2017-03-08 15:12 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 04/21] x86/xen: split off enlighten_pvh.c Vitaly Kuznetsov
2017-03-08 15:14 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 05/21] x86/xen: split off enlighten_hvm.c Vitaly Kuznetsov
2017-03-08 15:16 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 06/21] x86/xen: split off enlighten_pv.c Vitaly Kuznetsov
2017-03-08 15:18 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 07/21] x86/xen: split xen_smp_intr_init()/xen_smp_intr_free() Vitaly Kuznetsov
2017-03-13 7:30 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 08/21] x86/xen: split xen_smp_prepare_boot_cpu() Vitaly Kuznetsov
2017-03-13 7:33 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 09/21] x86/xen: split xen_cpu_die() Vitaly Kuznetsov
2017-03-13 7:34 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 10/21] x86/xen: split off smp_hvm.c Vitaly Kuznetsov
2017-03-13 7:37 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 11/21] x86/xen: split off smp_pv.c Vitaly Kuznetsov
2017-03-13 9:33 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 12/21] x86/xen: split off mmu_hvm.c Vitaly Kuznetsov
2017-03-13 9:34 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 13/21] x86/xen: split off mmu_pv.c Vitaly Kuznetsov
2017-03-13 9:35 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 14/21] x86/xen: split suspend.c for PV and PVHVM guests Vitaly Kuznetsov
2017-03-05 9:13 ` [Xen-devel] " kbuild test robot
2017-03-02 17:53 ` [PATCH v2 15/21] x86/xen: put setup.c, pmu.c and apic.c under CONFIG_XEN_PV Vitaly Kuznetsov
2017-03-02 17:53 ` [PATCH v2 16/21] x86/xen: define startup_xen for XEN PV only Vitaly Kuznetsov
2017-03-13 9:36 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 17/21] x86/xen: create stubs for HVM-only builds in page.h Vitaly Kuznetsov
2017-03-13 9:43 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 18/21] xen/balloon: decorate PV-only parts with #ifdef CONFIG_XEN_PV Vitaly Kuznetsov
2017-03-13 9:47 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 19/21] xen: create xen_create/destroy_contiguous_region() stubs for PVHVM only builds Vitaly Kuznetsov
2017-03-13 9:47 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 20/21] x86/xen: enable PVHVM-only builds Vitaly Kuznetsov
2017-03-13 9:48 ` Juergen Gross
2017-03-02 17:53 ` [PATCH v2 21/21] x86/xen: rename some PV-only functions in smp_pv.c Vitaly Kuznetsov
2017-03-13 9:49 ` Juergen Gross
2017-03-05 17:59 ` [Xen-devel] [PATCH v2 00/21] x86/xen: untangle PV and PVHVM guest support code Boris Ostrovsky
2017-03-13 13:02 ` Vitaly Kuznetsov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=46b3ed57-187a-427e-774e-ec6caaae83c8@suse.com \
--to=jgross@suse.com \
--cc=boris.ostrovsky@oracle.com \
--cc=drjones@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=vkuznets@redhat.com \
--cc=x86@kernel.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).