All of lore.kernel.org
 help / color / mirror / Atom feed
* [patch V11 00/16] SSB 0
@ 2018-05-02 21:51 Thomas Gleixner
  2018-05-02 21:51 ` [patch V11 01/16] SSB 1 Thomas Gleixner
                   ` (17 more replies)
  0 siblings, 18 replies; 35+ messages in thread
From: Thomas Gleixner @ 2018-05-02 21:51 UTC (permalink / raw)
  To: speck

Changes since V10:

  - Addressed Ingos review feedback

  - Picked up Reviewed-bys

Delta patch below. Bundle is coming in separate mail. Git repo branches are
updated as well. The master branch contains also the fix for the lost IBRS
issue Tim was seeing.

If there are no further issues and nitpicks, I'm going to make the
changes immutable and changes need to go incremental on top.

Thanks,

	tglx

8<--------------------
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 29984fd3dd18..a8d2ae1e335b 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -4051,11 +4051,12 @@
 
 			on     - Unconditionally disable Speculative Store Bypass
 			off    - Unconditionally enable Speculative Store Bypass
-			auto   - Kernel detects whether the CPU model contains a
+			auto   - Kernel detects whether the CPU model contains an
 				 implementation of Speculative Store Bypass and
-				 picks the most appropriate mitigation
-			prctl  - Control Speculative Store Bypass for a thread
-				 via prctl. By default it is enabled. The state
+				 picks the most appropriate mitigation.
+			prctl  - Control Speculative Store Bypass per thread
+				 via prctl. Speculative Store Bypass is enabled
+				 for a process by default. The state of the control
 				 is inherited on fork.
 
 			Not specifying this option is equivalent to
diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst
index 8ff39a26a992..ddbebcd01208 100644
--- a/Documentation/userspace-api/spec_ctrl.rst
+++ b/Documentation/userspace-api/spec_ctrl.rst
@@ -10,7 +10,7 @@ The kernel provides mitigation for such vulnerabilities in various
 forms. Some of these mitigations are compile time configurable and some on
 the kernel command line.
 
-There is also a class of mitigations which is very expensive, but they can
+There is also a class of mitigations which are very expensive, but they can
 be restricted to a certain set of processes or tasks in controlled
 environments. The mechanism to control these mitigations is via
 :manpage:`prctl(2)`.
@@ -25,7 +25,7 @@ PR_GET_SPECULATION_CTRL
 -----------------------
 
 PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
-which is selected with arg2 of prctl(2). The return value uses bit 0-2 with
+which is selected with arg2 of prctl(2). The return value uses bits 0-2 with
 the following meaning:
 
 ==== ================ ===================================================
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 5bee7a2ca4ff..810f50bb338d 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -70,7 +70,11 @@
 #define MSR_IA32_ARCH_CAPABILITIES	0x0000010a
 #define ARCH_CAP_RDCL_NO		(1 << 0)   /* Not susceptible to Meltdown */
 #define ARCH_CAP_IBRS_ALL		(1 << 1)   /* Enhanced IBRS support */
-#define ARCH_CAP_RDS_NO			(1 << 4)   /* Not susceptible to speculative store bypass */
+#define ARCH_CAP_RDS_NO			(1 << 4)   /*
+						    * Not susceptible to Speculative Store Bypass
+						    * attack, so no Reduced Data Speculation control
+						    * required.
+						    */
 
 #define MSR_IA32_BBL_CR_CTL		0x00000119
 #define MSR_IA32_BBL_CR_CTL3		0x0000011e
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 023e2edc0f3c..71ad01422655 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -225,8 +225,8 @@ enum spectre_v2_mitigation {
  * ourselves and always use this as the base for SPEC_CTRL.
  * We also use this when handling guest entry/exit as below.
  */
-extern void x86_set_spec_ctrl(u64);
-extern u64 x86_get_default_spec_ctrl(void);
+extern void x86_spec_ctrl_set(u64);
+extern u64 x86_spec_ctrl_get_default(void);
 
 /* The Speculative Store Bypass disable variants */
 enum ssb_mitigation {
@@ -285,7 +285,7 @@ static inline void indirect_branch_prediction_barrier(void)
  */
 #define firmware_restrict_branch_speculation_start()			\
 do {									\
-	u64 val = x86_get_default_spec_ctrl() | SPEC_CTRL_IBRS;		\
+	u64 val = x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS;		\
 									\
 	preempt_disable();						\
 	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
@@ -294,7 +294,7 @@ do {									\
 
 #define firmware_restrict_branch_speculation_end()			\
 do {									\
-	u64 val = x86_get_default_spec_ctrl();				\
+	u64 val = x86_spec_ctrl_get_default();				\
 									\
 	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
 			      X86_FEATURE_USE_IBRS_FW);			\
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
index 607236af4008..45ef00ad5105 100644
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -12,8 +12,8 @@
  * shadowable for guests but this is not (currently) the case.
  * Takes the guest view of SPEC_CTRL MSR as a parameter.
  */
-extern void x86_set_guest_spec_ctrl(u64);
-extern void x86_restore_host_spec_ctrl(u64);
+extern void x86_spec_ctrl_set_guest(u64);
+extern void x86_spec_ctrl_restore_host(u64);
 
 /* AMD specific Speculative Store Bypass MSR data */
 extern u64 x86_amd_ls_cfg_base;
@@ -30,7 +30,7 @@ static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
 
 static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
 {
-	return tifn & _TIF_RDS ? x86_amd_ls_cfg_rds_mask : 0ULL;
+	return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
 }
 
 extern void speculative_store_bypass_update(void);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 50c6ba6d031b..18efc33a8d2e 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -572,7 +572,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
 		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
 			setup_force_cpu_cap(X86_FEATURE_RDS);
 			setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
-			x86_amd_ls_cfg_rds_mask = (1ULL << bit);
+			x86_amd_ls_cfg_rds_mask = 1ULL << bit;
 		}
 	}
 }
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c28856e475c8..15f77d4518c7 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -32,7 +32,7 @@ static void __init spectre_v2_select_mitigation(void);
 static void __init ssb_select_mitigation(void);
 
 /*
- * Our boot-time value of SPEC_CTRL MSR. We read it once so that any
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
  * writes to SPEC_CTRL contain whatever reserved bits have been set.
  */
 u64 __ro_after_init x86_spec_ctrl_base;
@@ -41,11 +41,11 @@ u64 __ro_after_init x86_spec_ctrl_base;
  * The vendor and possibly platform specific bits which can be modified in
  * x86_spec_ctrl_base.
  */
-static u64 __ro_after_init x86_spec_ctrl_mask = ~(SPEC_CTRL_IBRS);
+static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
 
 /*
- * AMD specific MSR info for Store Bypass control.  x86_amd_ls_cfg_rds_mask
- * is initialized in identify_boot_cpu().
+ * AMD specific MSR info for Speculative Store Bypass control.
+ * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
  */
 u64 __ro_after_init x86_amd_ls_cfg_base;
 u64 __ro_after_init x86_amd_ls_cfg_rds_mask;
@@ -61,7 +61,7 @@ void __init check_bugs(void)
 
 	/*
 	 * Read the SPEC_CTRL MSR to account for reserved bits which may
-	 * have unknown values. AMD64_LS_CFG msr is cached in the early AMD
+	 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
 	 * init code as it is not enumerated and depends on the family.
 	 */
 	if (boot_cpu_has(X86_FEATURE_IBRS))
@@ -131,22 +131,22 @@ static const char *spectre_v2_strings[] = {
 
 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
 
-void x86_set_spec_ctrl(u64 val)
+void x86_spec_ctrl_set(u64 val)
 {
 	if (val & x86_spec_ctrl_mask)
 		WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
 	else
 		wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
 }
-EXPORT_SYMBOL_GPL(x86_set_spec_ctrl);
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
 
-u64 x86_get_default_spec_ctrl(void)
+u64 x86_spec_ctrl_get_default(void)
 {
 	return x86_spec_ctrl_base;
 }
-EXPORT_SYMBOL_GPL(x86_get_default_spec_ctrl);
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
 
-void x86_set_guest_spec_ctrl(u64 guest_spec_ctrl)
+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
 {
 	u64 host = x86_spec_ctrl_base;
 
@@ -159,9 +159,9 @@ void x86_set_guest_spec_ctrl(u64 guest_spec_ctrl)
 	if (host != guest_spec_ctrl)
 		wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
 }
-EXPORT_SYMBOL_GPL(x86_set_guest_spec_ctrl);
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
 
-void x86_restore_host_spec_ctrl(u64 guest_spec_ctrl)
+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
 {
 	u64 host = x86_spec_ctrl_base;
 
@@ -174,7 +174,7 @@ void x86_restore_host_spec_ctrl(u64 guest_spec_ctrl)
 	if (host != guest_spec_ctrl)
 		wrmsrl(MSR_IA32_SPEC_CTRL, host);
 }
-EXPORT_SYMBOL_GPL(x86_restore_host_spec_ctrl);
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
 
 static void x86_amd_rds_enable(void)
 {
@@ -504,8 +504,8 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
 		switch (boot_cpu_data.x86_vendor) {
 		case X86_VENDOR_INTEL:
 			x86_spec_ctrl_base |= SPEC_CTRL_RDS;
-			x86_spec_ctrl_mask &= ~(SPEC_CTRL_RDS);
-			x86_set_spec_ctrl(SPEC_CTRL_RDS);
+			x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
+			x86_spec_ctrl_set(SPEC_CTRL_RDS);
 			break;
 		case X86_VENDOR_AMD:
 			x86_amd_rds_enable();
@@ -560,7 +560,7 @@ static int ssb_prctl_get(void)
 	}
 }
 
-int arch_prctl_set_spec_ctrl(unsigned long which, unsigned long ctrl)
+int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
 {
 	if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
 		return -ERANGE;
@@ -573,7 +573,7 @@ int arch_prctl_set_spec_ctrl(unsigned long which, unsigned long ctrl)
 	}
 }
 
-int arch_prctl_get_spec_ctrl(unsigned long which)
+int arch_prctl_spec_ctrl_get(unsigned long which)
 {
 	switch (which) {
 	case PR_SPEC_STORE_BYPASS:
@@ -583,10 +583,10 @@ int arch_prctl_get_spec_ctrl(unsigned long which)
 	}
 }
 
-void x86_setup_ap_spec_ctrl(void)
+void x86_spec_ctrl_setup_ap(void)
 {
 	if (boot_cpu_has(X86_FEATURE_IBRS))
-		x86_set_spec_ctrl(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
+		x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
 
 	if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
 		x86_amd_rds_enable();
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index f3dbdde978a4..e0517bcee446 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -848,6 +848,11 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
 		c->x86_power = edx;
 	}
 
+	if (c->extended_cpuid_level >= 0x80000008) {
+		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
+		c->x86_capability[CPUID_8000_0008_EBX] = ebx;
+	}
+
 	if (c->extended_cpuid_level >= 0x8000000a)
 		c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
 
@@ -871,7 +876,6 @@ static void get_cpu_address_sizes(struct cpuinfo_x86 *c)
 
 		c->x86_virt_bits = (eax >> 8) & 0xff;
 		c->x86_phys_bits = eax & 0xff;
-		c->x86_capability[CPUID_8000_0008_EBX] = ebx;
 	}
 #ifdef CONFIG_X86_32
 	else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
@@ -924,26 +928,26 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
 };
 
 static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
-	{ X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_PINEVIEW },
-	{ X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_LINCROFT },
-	{ X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_PENWELL },
-	{ X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_CLOVERVIEW },
-	{ X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_CEDARVIEW },
-	{ X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_SILVERMONT1 },
-	{ X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_AIRMONT },
-	{ X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_SILVERMONT2 },
-	{ X86_VENDOR_INTEL,     6, INTEL_FAM6_ATOM_MERRIFIELD },
-	{ X86_VENDOR_INTEL,     6, INTEL_FAM6_CORE_YONAH },
-	{ X86_VENDOR_INTEL,     6, INTEL_FAM6_XEON_PHI_KNL },
-	{ X86_VENDOR_INTEL,     6, INTEL_FAM6_XEON_PHI_KNM },
-	{ X86_VENDOR_CENTAUR,	5 },
-	{ X86_VENDOR_INTEL,	5 },
-	{ X86_VENDOR_NSC,	5 },
-	{ X86_VENDOR_AMD,	0xf },
-	{ X86_VENDOR_AMD,	0x10 },
-	{ X86_VENDOR_AMD,	0x11 },
-	{ X86_VENDOR_AMD,	0x12 },
-	{ X86_VENDOR_ANY,	4 },
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_PINEVIEW	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_LINCROFT	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_PENWELL		},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_CLOVERVIEW	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_CEDARVIEW	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT1	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_AIRMONT		},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT2	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MERRIFIELD	},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_CORE_YONAH		},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNL		},
+	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNM		},
+	{ X86_VENDOR_CENTAUR,	5,					},
+	{ X86_VENDOR_INTEL,	5,					},
+	{ X86_VENDOR_NSC,	5,					},
+	{ X86_VENDOR_AMD,	0x12,					},
+	{ X86_VENDOR_AMD,	0x11,					},
+	{ X86_VENDOR_AMD,	0x10,					},
+	{ X86_VENDOR_AMD,	0xf,					},
+	{ X86_VENDOR_ANY,	4,					},
 	{}
 };
 
@@ -1384,7 +1388,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
 #endif
 	mtrr_ap_init();
 	validate_apic_and_package_id(c);
-	x86_setup_ap_spec_ctrl();
+	x86_spec_ctrl_setup_ap();
 }
 
 static __init int setup_noclflush(char *arg)
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index faaabc160293..37672d299e35 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -50,6 +50,6 @@ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
 
 unsigned int aperfmperf_get_khz(int cpu);
 
-extern void x86_setup_ap_spec_ctrl(void);
+extern void x86_spec_ctrl_setup_ap(void);
 
 #endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ba4763e9a285..437c1b371129 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5557,7 +5557,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	 * is no need to worry about the conditional branch over the wrmsr
 	 * being speculatively taken.
 	 */
-	x86_set_guest_spec_ctrl(svm->spec_ctrl);
+	x86_spec_ctrl_set_guest(svm->spec_ctrl);
 
 	asm volatile (
 		"push %%" _ASM_BP "; \n\t"
@@ -5669,7 +5669,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
 		svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-	x86_restore_host_spec_ctrl(svm->spec_ctrl);
+	x86_spec_ctrl_restore_host(svm->spec_ctrl);
 
 	/* Eliminate branch target predictions from guest mode */
 	vmexit_fill_RSB();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9744e48457d6..16a111e44691 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9722,7 +9722,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	 * is no need to worry about the conditional branch over the wrmsr
 	 * being speculatively taken.
 	 */
-	x86_set_guest_spec_ctrl(vmx->spec_ctrl);
+	x86_spec_ctrl_set_guest(vmx->spec_ctrl);
 
 	vmx->__launched = vmx->loaded_vmcs->launched;
 
@@ -9870,7 +9870,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
 		vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-	x86_restore_host_spec_ctrl(vmx->spec_ctrl);
+	x86_spec_ctrl_restore_host(vmx->spec_ctrl);
 
 	/* Eliminate branch target predictions from guest mode */
 	vmexit_fill_RSB();
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index 1e63a0a90e96..700bb8a4e4ea 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -57,7 +57,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
 })
 
 /* Speculation control prctl */
-int arch_prctl_set_spec_ctrl(unsigned long which, unsigned long ctrl);
-int arch_prctl_get_spec_ctrl(unsigned long which);
+int arch_prctl_spec_ctrl_get(unsigned long which);
+int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl);
 
 #endif /* _LINUX_NOSPEC_H */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 4e7a160d3b28..ebf057ac1346 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -208,8 +208,8 @@ struct prctl_mm_map {
 # define PR_SVE_VL_INHERIT		(1 << 17) /* inherit across exec */
 
 /* Per task speculation control */
-#define PR_SET_SPECULATION_CTRL		52
-#define PR_GET_SPECULATION_CTRL		53
+#define PR_GET_SPECULATION_CTRL		52
+#define PR_SET_SPECULATION_CTRL		53
 /* Speculation control variants */
 # define PR_SPEC_STORE_BYPASS		0
 /* Return and control values for PR_SET/GET_SPECULATION_CTRL */
diff --git a/kernel/sys.c b/kernel/sys.c
index d7afe29319f1..b76dee23bdc9 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2244,12 +2244,12 @@ static int propagate_has_child_subreaper(struct task_struct *p, void *data)
 	return 1;
 }
 
-int __weak arch_prctl_set_spec_ctrl(unsigned long which, unsigned long ctrl)
+int __weak arch_prctl_spec_ctrl_get(unsigned long which)
 {
 	return -EINVAL;
 }
 
-int __weak arch_prctl_get_spec_ctrl(unsigned long which)
+int __weak arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
 {
 	return -EINVAL;
 }
@@ -2462,15 +2462,15 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
 	case PR_SVE_GET_VL:
 		error = SVE_GET_VL();
 		break;
-	case PR_SET_SPECULATION_CTRL:
-		if (arg4 || arg5)
-			return -EINVAL;
-		error = arch_prctl_set_spec_ctrl(arg2, arg3);
-		break;
 	case PR_GET_SPECULATION_CTRL:
 		if (arg3 || arg4 || arg5)
 			return -EINVAL;
-		error = arch_prctl_get_spec_ctrl(arg2);
+		error = arch_prctl_spec_ctrl_get(arg2);
+		break;
+	case PR_SET_SPECULATION_CTRL:
+		if (arg4 || arg5)
+			return -EINVAL;
+		error = arch_prctl_spec_ctrl_set(arg2, arg3);
 		break;
 	default:
 		error = -EINVAL;

^ permalink raw reply related	[flat|nested] 35+ messages in thread

end of thread, other threads:[~2018-05-16 13:52 UTC | newest]

Thread overview: 35+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-05-02 21:51 [patch V11 00/16] SSB 0 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 01/16] SSB 1 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 02/16] SSB 2 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 03/16] SSB 3 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 04/16] SSB 4 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 05/16] SSB 5 Thomas Gleixner
2018-05-10 17:52   ` [MODERATED] " Andi Kleen
2018-05-10 18:30     ` Konrad Rzeszutek Wilk
2018-05-10 19:08       ` Andi Kleen
2018-05-10 21:22         ` Konrad Rzeszutek Wilk
2018-05-10 22:25           ` Andi Kleen
2018-05-10 23:50             ` Konrad Rzeszutek Wilk
2018-05-11 16:11               ` Andi Kleen
2018-05-16  7:55               ` Paolo Bonzini
2018-05-16 13:52                 ` Konrad Rzeszutek Wilk
2018-05-02 21:51 ` [patch V11 06/16] SSB 6 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 07/16] SSB 7 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 08/16] SSB 8 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 09/16] SSB 9 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 10/16] SSB 10 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 11/16] SSB 11 Thomas Gleixner
2018-05-04 20:58   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-02 21:51 ` [patch V11 12/16] SSB 12 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 13/16] SSB 13 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 14/16] SSB 14 Thomas Gleixner
2018-05-03  7:19   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-03  7:31     ` Thomas Gleixner
2018-05-03  7:22   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-02 21:51 ` [patch V11 15/16] SSB 15 Thomas Gleixner
2018-05-03  7:21   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-02 21:51 ` [patch V11 16/16] SSB 16 Thomas Gleixner
2018-05-02 23:21 ` [patch V11 00/16] SSB 0 Thomas Gleixner
2018-05-03  4:27 ` [MODERATED] Encrypted Message Tim Chen
2018-05-03  6:10   ` [MODERATED] Re: [patch V11 00/16] SSB 0 Ingo Molnar
2018-05-03  6:30   ` Thomas Gleixner

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.