From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1750825AbdAWJnG (ORCPT ); Mon, 23 Jan 2017 04:43:06 -0500 Received: from terminus.zytor.com ([65.50.211.136]:36188 "EHLO mail.zytor.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1750724AbdAWJmz (ORCPT ); Mon, 23 Jan 2017 04:42:55 -0500 Date: Mon, 23 Jan 2017 01:13:22 -0800 From: tip-bot for Borislav Petkov Message-ID: Cc: mingo@kernel.org, tglx@linutronix.de, linux-kernel@vger.kernel.org, hpa@zytor.com, bp@suse.de Reply-To: tglx@linutronix.de, mingo@kernel.org, bp@suse.de, linux-kernel@vger.kernel.org, hpa@zytor.com In-Reply-To: <20170120202955.4091-14-bp@alien8.de> References: <20170120202955.4091-14-bp@alien8.de> To: linux-tip-commits@vger.kernel.org Subject: [tip:x86/mce] x86/microcode/AMD: Unify load_ucode_amd_ap() Git-Commit-ID: e71bb4ec073901ad50bfa86fed74fce7ac3210fe X-Mailer: tip-git-log-daemon Robot-ID: Robot-Unsubscribe: Contact to get blacklisted from these emails MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Content-Type: text/plain; charset=UTF-8 Content-Disposition: inline Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Commit-ID: e71bb4ec073901ad50bfa86fed74fce7ac3210fe Gitweb: http://git.kernel.org/tip/e71bb4ec073901ad50bfa86fed74fce7ac3210fe Author: Borislav Petkov AuthorDate: Fri, 20 Jan 2017 21:29:52 +0100 Committer: Thomas Gleixner CommitDate: Mon, 23 Jan 2017 10:02:50 +0100 x86/microcode/AMD: Unify load_ucode_amd_ap() Use a version for both bitness by adding a helper which does the actual container finding and parsing which can be used on any CPU - BSP or AP. Streamlines the paths more. Signed-off-by: Borislav Petkov Link: http://lkml.kernel.org/r/20170120202955.4091-14-bp@alien8.de Signed-off-by: Thomas Gleixner --- arch/x86/kernel/cpu/microcode/amd.c | 81 ++++++++++++++----------------------- 1 file changed, 31 insertions(+), 50 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 6174347..fe9e865 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -261,7 +261,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) #endif } -void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) +void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret) { struct ucode_cpu_info *uci; struct cpio_data cp; @@ -281,89 +281,71 @@ void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) cp = find_microcode_in_initrd(path, use_pa); - if (!(cp.data && cp.size)) - return; - /* Needed in load_microcode_amd() */ uci->cpu_sig.sig = cpuid_1_eax; - apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL); + *ret = cp; } -#ifdef CONFIG_X86_32 -/* - * On 32-bit, since AP's early load occurs before paging is turned on, we - * cannot traverse cpu_equiv_table and microcode_cache in kernel heap memory. - * So during cold boot, AP will apply_ucode_in_initrd() just like the BSP. - * In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch, - * which is used upon resume from suspend. - */ -void load_ucode_amd_ap(unsigned int cpuid_1_eax) +void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) { - struct microcode_amd *mc; - struct cpio_data cp; + struct cpio_data cp = { }; - mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); - if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { - __apply_microcode_amd(mc); - return; - } - - if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) - cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true); + __load_ucode_amd(cpuid_1_eax, &cp); if (!(cp.data && cp.size)) return; - /* - * This would set amd_ucode_patch above so that the following APs can - * use it directly instead of going down this path again. - */ apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL); } -#else + void load_ucode_amd_ap(unsigned int cpuid_1_eax) { struct equiv_cpu_entry *eq; struct microcode_amd *mc; + struct cont_desc *desc; u16 eq_id; + if (IS_ENABLED(CONFIG_X86_32)) { + mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); + desc = (struct cont_desc *)__pa_nodebug(&cont); + } else { + mc = (struct microcode_amd *)amd_ucode_patch; + desc = &cont; + } + /* First AP hasn't cached it yet, go through the blob. */ - if (!cont.data) { - struct cpio_data cp; + if (!desc->data) { + struct cpio_data cp = { }; - if (cont.size == -1) + if (desc->size == -1) return; reget: - if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) { - cp = find_microcode_in_initrd(ucode_path, false); - - if (!(cp.data && cp.size)) { - /* - * Mark it so that other APs do not scan again - * for no real reason and slow down boot - * needlessly. - */ - cont.size = -1; - return; - } + __load_ucode_amd(cpuid_1_eax, &cp); + if (!(cp.data && cp.size)) { + /* + * Mark it so that other APs do not scan again for no + * real reason and slow down boot needlessly. + */ + desc->size = -1; + return; } - if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false, &cont)) { - cont.data = NULL; - cont.size = -1; + if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false, desc)) { + desc->data = NULL; + desc->size = -1; return; } } - eq = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ); + eq = (struct equiv_cpu_entry *)(desc->data + CONTAINER_HDR_SZ); eq_id = find_equiv_id(eq, cpuid_1_eax); if (!eq_id) return; - if (eq_id == cont.eq_id) { + if (eq_id == desc->eq_id) { u32 rev, dummy; native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); @@ -384,7 +366,6 @@ reget: goto reget; } } -#endif /* CONFIG_X86_32 */ static enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);