All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jan Beulich <jbeulich@suse.com>
To: Roger Pau Monne <roger.pau@citrix.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>, Wei Liu <wl@xen.org>,
	xen-devel@lists.xenproject.org
Subject: Re: [PATCH 3/3] amd/msr: implement VIRT_SPEC_CTRL for HVM guests using legacy SSBD
Date: Mon, 14 Feb 2022 17:44:01 +0100	[thread overview]
Message-ID: <63da71fb-820f-bab5-4cec-f9ec54ffbce1@suse.com> (raw)
In-Reply-To: <20220201164651.6369-4-roger.pau@citrix.com>

On 01.02.2022 17:46, Roger Pau Monne wrote:
> @@ -716,26 +702,117 @@ void amd_init_ssbd(const struct cpuinfo_x86 *c)
>  		if (rdmsr_safe(MSR_AMD64_LS_CFG, val) ||
>  		    ({
>  			    val &= ~mask;
> -			    if (opt_ssbd)
> +			    if (enable)
>  				    val |= mask;
>  			    false;
>  		    }) ||
>  		    wrmsr_safe(MSR_AMD64_LS_CFG, val) ||
>  		    ({
>  			    rdmsrl(MSR_AMD64_LS_CFG, val);
> -			    (val & mask) != (opt_ssbd * mask);
> +			    (val & mask) != (enable * mask);
>  		    }))
>  			bit = -1;
>  	}
>  
> -	if (bit < 0)
> +	return bit >= 0;
> +}
> +
> +void amd_init_ssbd(const struct cpuinfo_x86 *c)
> +{
> +	struct cpu_info *info = get_cpu_info();
> +
> +	if (cpu_has_ssb_no)
> +		return;
> +
> +	if (cpu_has_amd_ssbd) {
> +		/* Handled by common MSR_SPEC_CTRL logic */
> +		return;
> +	}
> +
> +	if (cpu_has_virt_ssbd) {
> +		wrmsrl(MSR_VIRT_SPEC_CTRL, opt_ssbd ? SPEC_CTRL_SSBD : 0);
> +		goto out;
> +	}
> +
> +	if (!set_legacy_ssbd(c, opt_ssbd)) {
>  		printk_once(XENLOG_ERR "No SSBD controls available\n");
> +		return;
> +	}
> +
> +	if (!smp_processor_id())
> +		setup_force_cpu_cap(X86_FEATURE_LEGACY_SSBD);

I don't think you need a new feature flag here: You only ever use it
with boot_cpu_has() and there's no alternatives patching keyed to it,
so a single global flag will likely do.

>   out:
>  	info->last_spec_ctrl = info->xen_spec_ctrl = opt_ssbd ? SPEC_CTRL_SSBD
>  							      : 0;
>  }
>  
> +static struct ssbd_core {
> +    spinlock_t lock;
> +    unsigned int count;
> +} *ssbd_core;
> +static unsigned int __read_mostly ssbd_max_cores;

__ro_after_init?

> +bool __init amd_setup_legacy_ssbd(void)
> +{
> +	unsigned int i;
> +
> +	if (boot_cpu_data.x86 != 0x17 || boot_cpu_data.x86_num_siblings == 1)

Maybe better "<= 1", not the least ...

> +		return true;
> +
> +	/*
> +	 * One could be forgiven for thinking that c->x86_max_cores is the
> +	 * correct value to use here.
> +	 *
> +	 * However, that value is derived from the current configuration, and
> +	 * c->cpu_core_id is sparse on all but the top end CPUs.  Derive
> +	 * max_cpus from ApicIdCoreIdSize which will cover any sparseness.
> +	 */
> +	if (boot_cpu_data.extended_cpuid_level >= 0x80000008) {
> +		ssbd_max_cores = 1u << MASK_EXTR(cpuid_ecx(0x80000008), 0xf000);
> +		ssbd_max_cores /= boot_cpu_data.x86_num_siblings;

... because of this division. I don't know whether we're also susceptible
to this, but I've seen Linux (on top of Xen) being confused enough about
the topology related CPUID data we expose that it ended up running with
the value set to zero (and then exploding e.g. on a similar use).

> +	}
> +	if (!ssbd_max_cores)
> +		return false;
> +
> +	/* Max is two sockets for Fam17h hardware. */
> +	ssbd_core = xzalloc_array(struct ssbd_core, ssbd_max_cores * 2);
> +	if (!ssbd_core)
> +		return false;
> +
> +	for (i = 0; i < ssbd_max_cores * 2; i++) {
> +		spin_lock_init(&ssbd_core[i].lock);
> +		/* Record the current state. */
> +		ssbd_core[i].count = opt_ssbd ?
> +				     boot_cpu_data.x86_num_siblings : 0;
> +	}
> +
> +	return true;
> +}
> +
> +void amd_set_legacy_ssbd(bool enable)
> +{
> +	const struct cpuinfo_x86 *c = &current_cpu_data;
> +	struct ssbd_core *core;
> +	unsigned long flags;
> +
> +	if (c->x86 != 0x17 || c->x86_num_siblings == 1) {
> +		set_legacy_ssbd(c, enable);
> +		return;
> +	}
> +
> +	ASSERT(c->phys_proc_id < 2);
> +	ASSERT(c->cpu_core_id < ssbd_max_cores);
> +	core = &ssbd_core[c->phys_proc_id * ssbd_max_cores + c->cpu_core_id];
> +	spin_lock_irqsave(&core->lock, flags);

May I suggest a brief comment on the irqsave aspect here? Aiui when
called from vmexit_virt_spec_ctrl() while we're still in a GIF=0
section, IF is 1 and hence check_lock() would be unhappy (albeit in
a false positive way).

> +	core->count += enable ? 1 : -1;
> +	ASSERT(core->count <= c->x86_num_siblings);
> +	if ((enable  && core->count == 1) ||
> +	    (!enable && core->count == 0))

Maybe simply "if ( core->count == enable )"? Or do compilers not like
comparisons with booleans?

> --- a/xen/arch/x86/spec_ctrl.c
> +++ b/xen/arch/x86/spec_ctrl.c
> @@ -22,6 +22,7 @@
>  #include <xen/param.h>
>  #include <xen/warning.h>
>  
> +#include <asm/amd.h>
>  #include <asm/hvm/svm/svm.h>
>  #include <asm/microcode.h>
>  #include <asm/msr.h>
> @@ -1056,7 +1057,8 @@ void __init init_speculation_mitigations(void)
>              setup_force_cpu_cap(X86_FEATURE_SC_MSR_HVM);
>      }
>  
> -    if ( opt_msr_sc_hvm && cpu_has_virt_ssbd )
> +    if ( opt_msr_sc_hvm && (cpu_has_virt_ssbd ||
> +         (boot_cpu_has(X86_FEATURE_LEGACY_SSBD) && amd_setup_legacy_ssbd())) )

Nit: I think such expressions are better wrapped such that
indentation expresses the number of pending open parentheses.

Jan



  reply	other threads:[~2022-02-14 16:44 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-01 16:46 [PATCH 0/3] amd/msr: implement MSR_VIRT_SPEC_CTRL for HVM guests Roger Pau Monne
2022-02-01 16:46 ` [PATCH 1/3] amd/msr: implement VIRT_SPEC_CTRL for HVM guests on top of SPEC_CTRL Roger Pau Monne
2022-02-14 15:07   ` Jan Beulich
2022-03-09 15:03     ` Roger Pau Monné
2022-03-09 15:40       ` Jan Beulich
2022-03-09 16:31         ` Roger Pau Monné
2022-03-09 17:04           ` Jan Beulich
2022-02-01 16:46 ` [PATCH 2/3] amd/msr: allow passthrough of VIRT_SPEC_CTRL for HVM guests Roger Pau Monne
2022-02-14 16:02   ` Jan Beulich
2022-03-10 16:41     ` Roger Pau Monné
2022-03-11  7:31       ` Jan Beulich
2022-02-01 16:46 ` [PATCH 3/3] amd/msr: implement VIRT_SPEC_CTRL for HVM guests using legacy SSBD Roger Pau Monne
2022-02-14 16:44   ` Jan Beulich [this message]
2022-03-14 15:32     ` Roger Pau Monné
2022-03-14 15:52       ` Jan Beulich
2022-02-14 20:46 ` [PATCH 0/3] amd/msr: implement MSR_VIRT_SPEC_CTRL for HVM guests Andrew Cooper

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=63da71fb-820f-bab5-4cec-f9ec54ffbce1@suse.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=roger.pau@citrix.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.