From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753376AbbBXXSw (ORCPT ); Tue, 24 Feb 2015 18:18:52 -0500 Received: from mga01.intel.com ([192.55.52.88]:5514 "EHLO mga01.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753180AbbBXXSb (ORCPT ); Tue, 24 Feb 2015 18:18:31 -0500 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.09,641,1418112000"; d="scan'208";a="532360003" From: Vikas Shivappa To: linux-kernel@vger.kernel.org Cc: vikas.shivappa@intel.com, vikas.shivappa@linux.intel.com, matt.fleming@intel.com, hpa@zytor.com, tglx@linutronix.de, mingo@kernel.org, tj@kernel.org, peterz@infradead.org, will.auld@intel.com, dave.hansen@intel.com, andi.kleen@intel.com, tony.luck@intel.com, kanaka.d.juvva@intel.com Subject: [PATCH 5/7] x86/intel_rdt: Software Cache for IA32_PQR_MSR Date: Tue, 24 Feb 2015 15:16:42 -0800 Message-Id: <1424819804-4082-6-git-send-email-vikas.shivappa@linux.intel.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1424819804-4082-1-git-send-email-vikas.shivappa@linux.intel.com> References: <1424819804-4082-1-git-send-email-vikas.shivappa@linux.intel.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org This patch implements a common software cache for IA32_PQR_MSR(RMID 0:9, CLOSId 32:63) to be used by both CMT and CAT. CMT updates the RMID where as CAT updates the CLOSid in the software cache. When the new RMID/CLOSid value is different from the cached values, IA32_PQR_MSR is updated. Since the measured rdmsr latency for IA32_PQR_MSR is very high(~250 cycles) this software cache is necessary to avoid reading the MSR to compare the current CLOSid value. Signed-off-by: Vikas Shivappa --- arch/x86/include/asm/intel_rdt.h | 31 +++++++++++++++--------------- arch/x86/include/asm/rdt_common.h | 13 +++++++++++++ arch/x86/kernel/cpu/perf_event_intel_cqm.c | 20 +++++++------------ 3 files changed, 36 insertions(+), 28 deletions(-) create mode 100644 arch/x86/include/asm/rdt_common.h diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h index bc57b56..27621c8 100644 --- a/arch/x86/include/asm/intel_rdt.h +++ b/arch/x86/include/asm/intel_rdt.h @@ -4,12 +4,13 @@ #ifdef CONFIG_CGROUP_RDT #include +#include -#define MSR_IA32_PQR_ASSOC 0xc8f #define MAX_CBM_LENGTH 32 #define IA32_L3_CBM_BASE 0xc90 #define CBM_FROM_INDEX(x) (IA32_L3_CBM_BASE + x) -DECLARE_PER_CPU(unsigned int, x86_cpu_clos); + +DECLARE_PER_CPU(struct intel_pqr_state, pqr_state); extern struct static_key rdt_enable_key; struct rdt_subsys_info { @@ -64,30 +65,30 @@ static inline struct intel_rdt *task_rdt(struct task_struct *task) static inline void rdt_sched_in(struct task_struct *task) { struct intel_rdt *ir; - unsigned int clos; + struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); + unsigned long flags; if (!rdt_enabled()) return; - /* - * This needs to be fixed after CQM code stabilizes - * to cache the whole PQR instead of just CLOSid. - * PQR has closid in high 32 bits and CQM-RMID in low 10 bits. - * Should not write a 0 to the low 10 bits of PQR - * and corrupt RMID. - */ - clos = this_cpu_read(x86_cpu_clos); - + raw_spin_lock_irqsave(&state->lock, flags); rcu_read_lock(); ir = task_rdt(task); - if (ir->clos == clos) { + if (ir->clos == state->clos) { rcu_read_unlock(); + raw_spin_unlock_irqrestore(&state->lock, flags); return; } - wrmsr(MSR_IA32_PQR_ASSOC, 0, ir->clos); - this_cpu_write(x86_cpu_clos, ir->clos); + /* + * PQR has closid in high 32 bits and CQM-RMID + * in low 10 bits. Rewrite the exsting rmid from + * software cache. + */ + wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, ir->clos); + state->clos = ir->clos; rcu_read_unlock(); + raw_spin_unlock_irqrestore(&state->lock, flags); } #else diff --git a/arch/x86/include/asm/rdt_common.h b/arch/x86/include/asm/rdt_common.h new file mode 100644 index 0000000..c87f908 --- /dev/null +++ b/arch/x86/include/asm/rdt_common.h @@ -0,0 +1,13 @@ +#ifndef _X86_RDT_H_ +#define _X86_RDT_H_ + +#define MSR_IA32_PQR_ASSOC 0x0c8f + +struct intel_pqr_state { + raw_spinlock_t lock; + int rmid; + int clos; + int cnt; +}; + +#endif diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c index 596d1ec..63c52e0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c +++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c @@ -7,22 +7,16 @@ #include #include #include +#include #include "perf_event.h" -#define MSR_IA32_PQR_ASSOC 0x0c8f #define MSR_IA32_QM_CTR 0x0c8e #define MSR_IA32_QM_EVTSEL 0x0c8d static unsigned int cqm_max_rmid = -1; static unsigned int cqm_l3_scale; /* supposedly cacheline size */ -struct intel_cqm_state { - raw_spinlock_t lock; - int rmid; - int cnt; -}; - -static DEFINE_PER_CPU(struct intel_cqm_state, cqm_state); +DEFINE_PER_CPU(struct intel_pqr_state, pqr_state); /* * Protects cache_cgroups and cqm_rmid_free_lru and cqm_rmid_limbo_lru. @@ -931,7 +925,7 @@ out: static void intel_cqm_event_start(struct perf_event *event, int mode) { - struct intel_cqm_state *state = this_cpu_ptr(&cqm_state); + struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); unsigned int rmid = event->hw.cqm_rmid; unsigned long flags; @@ -948,14 +942,14 @@ static void intel_cqm_event_start(struct perf_event *event, int mode) WARN_ON_ONCE(state->rmid); state->rmid = rmid; - wrmsrl(MSR_IA32_PQR_ASSOC, state->rmid); + wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, state->clos); raw_spin_unlock_irqrestore(&state->lock, flags); } static void intel_cqm_event_stop(struct perf_event *event, int mode) { - struct intel_cqm_state *state = this_cpu_ptr(&cqm_state); + struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); unsigned long flags; if (event->hw.cqm_state & PERF_HES_STOPPED) @@ -968,7 +962,7 @@ static void intel_cqm_event_stop(struct perf_event *event, int mode) if (!--state->cnt) { state->rmid = 0; - wrmsrl(MSR_IA32_PQR_ASSOC, 0); + wrmsr(MSR_IA32_PQR_ASSOC, 0, state->clos); } else { WARN_ON_ONCE(!state->rmid); } @@ -1213,7 +1207,7 @@ static inline void cqm_pick_event_reader(int cpu) static void intel_cqm_cpu_prepare(unsigned int cpu) { - struct intel_cqm_state *state = &per_cpu(cqm_state, cpu); + struct intel_pqr_state *state = &per_cpu(pqr_state, cpu); struct cpuinfo_x86 *c = &cpu_data(cpu); raw_spin_lock_init(&state->lock); -- 1.9.1