From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752979AbcFUSiu (ORCPT ); Tue, 21 Jun 2016 14:38:50 -0400 Received: from mail-pa0-f54.google.com ([209.85.220.54]:35711 "EHLO mail-pa0-f54.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752457AbcFUSir (ORCPT ); Tue, 21 Jun 2016 14:38:47 -0400 From: David Carrillo-Cisneros To: linux-kernel@vger.kernel.org Cc: "x86@kernel.org" , Ingo Molnar , Andi Kleen , Kan Liang , Peter Zijlstra , David Carrillo-Cisneros Subject: [PATCH v02 4/5] perf/x86/intel: MSR_LAST_BRANCH_FROM_x quirk for ctx switch Date: Tue, 21 Jun 2016 11:31:13 -0700 Message-Id: <1466533874-52003-5-git-send-email-davidcc@google.com> X-Mailer: git-send-email 2.8.0.rc3.226.g39d4020 In-Reply-To: <1466533874-52003-1-git-send-email-davidcc@google.com> References: <1466533874-52003-1-git-send-email-davidcc@google.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Add quirk for context switch to save/restore the value of MSR_LAST_BRANCH_FROM_x when LBR is enabled and there is potential for kernel addresses to be in the lbr_from register. To test this patch, use a perf tool and kernel with the patch next in this series. That patch removes the work around that masked the hw bug: $ ./lbr_perf record --call-graph lbr -e cycles:k sleep 1 where lbr_perf is the patched perf tool, that allows to specify :k on lbr mode. The above command will trigger a #GPF : [ 411.191445] ------------[ cut here ]------------ [ 411.196015] WARNING: CPU: 28 PID: 14096 at arch/x86/mm/extable.c:65 ex_handler_wrmsr_unsafe+0x70/0x80 [ 411.205123] unchecked MSR access error: WRMSR to 0x681 (tried to write 0x1fffffff81010794) ... [ 411.265962] Call Trace: [ 411.268384] [] dump_stack+0x4d/0x63 [ 411.273462] [] __warn+0xe5/0x100 [ 411.278278] [] warn_slowpath_fmt+0x49/0x50 [ 411.283955] [] ex_handler_wrmsr_unsafe+0x70/0x80 [ 411.290144] [] fixup_exception+0x42/0x50 [ 411.295658] [] do_general_protection+0x8a/0x160 [ 411.301764] [] general_protection+0x22/0x30 [ 411.307527] [] ? intel_pmu_lbr_sched_task+0xc9/0x380 [ 411.314063] [] intel_pmu_sched_task+0x3c/0x60 [ 411.319996] [] x86_pmu_sched_task+0x1b/0x20 [ 411.325762] [] perf_pmu_sched_task+0x6b/0xb0 [ 411.331610] [] __perf_event_task_sched_in+0x7d/0x150 [ 411.338145] [] finish_task_switch+0x15c/0x200 [ 411.344078] [] __schedule+0x274/0x6cc [ 411.349325] [] schedule+0x39/0x90 [ 411.354229] [] exit_to_usermode_loop+0x39/0x89 [ 411.360246] [] prepare_exit_to_usermode+0x2e/0x30 [ 411.366524] [] retint_user+0x8/0x10 [ 411.371599] ---[ end trace 1ed61b8a551e95d3 ]--- Signed-off-by: David Carrillo-Cisneros Reviewed-by: Stephane Eranian --- arch/x86/events/intel/lbr.c | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 2ee5dde..6cd7cc0 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -281,6 +281,21 @@ inline u64 lbr_from_signext_quirk_wr(u64 val) return val; } +/* + * If quirk is needed, ensure sign extension is 61 bits. + */ + +u64 lbr_from_signext_quirk_rd(u64 val) +{ + if (static_branch_unlikely(&lbr_from_quirk_key)) + /* + * Quirk is on when TSX is not enabled. Therefore TSX + * flags must be read as OFF. + */ + val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT); + return val; +} + static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) { int i; @@ -297,7 +312,8 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) tos = task_ctx->tos; for (i = 0; i < tos; i++) { lbr_idx = (tos - i) & mask; - wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); + wrmsrl(x86_pmu.lbr_from + lbr_idx, + lbr_from_signext_quirk_wr(task_ctx->lbr_from[i])); wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); @@ -310,7 +326,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) { int i; unsigned lbr_idx, mask; - u64 tos; + u64 tos, val; if (task_ctx->lbr_callstack_users == 0) { task_ctx->lbr_stack_state = LBR_NONE; @@ -321,7 +337,8 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) tos = intel_pmu_lbr_tos(); for (i = 0; i < tos; i++) { lbr_idx = (tos - i) & mask; - rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); + rdmsrl(x86_pmu.lbr_from + lbr_idx, val); + task_ctx->lbr_from[i] = lbr_from_signext_quirk_rd(val); rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); @@ -499,6 +516,8 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) int lbr_flags = lbr_desc[lbr_format]; rdmsrl(x86_pmu.lbr_from + lbr_idx, from); + from = lbr_from_signext_quirk_rd(from); + rdmsrl(x86_pmu.lbr_to + lbr_idx, to); if (lbr_format == LBR_FORMAT_INFO && need_info) { -- 2.8.0.rc3.226.g39d4020