From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756139AbcBVWVq (ORCPT ); Mon, 22 Feb 2016 17:21:46 -0500 Received: from www.linutronix.de ([62.245.132.108]:36927 "EHLO Galois.linutronix.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932283AbcBVWUm (ORCPT ); Mon, 22 Feb 2016 17:20:42 -0500 Message-Id: <20160222221012.748151799@linutronix.de> User-Agent: quilt/0.63-1 Date: Mon, 22 Feb 2016 22:19:25 -0000 From: Thomas Gleixner To: LKML Cc: Peter Zijlstra , x86@kernel.org, Borislav Petkov , Stephane Eranian , Harish Chegondi , Kan Liang , Andi Kleen , Jacob Pan Subject: [patch V3 25/28] x86/perf/intel/rapl: Utilize event->pmu_private References: <20160222220733.632098221@linutronix.de> MIME-Version: 1.0 Content-Type: text/plain; charset=ISO-8859-15 Content-Disposition: inline; filename=x86-perf-intel-rapl--Utilize-event->pmu_private.patch X-Linutronix-Spam-Score: -1.0 X-Linutronix-Spam-Level: - X-Linutronix-Spam-Status: No , -1.0 points, 5.0 required, ALL_TRUSTED=-1,SHORTCIRCUIT=-0.0001,URIBL_BLOCKED=0.001 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Store the pmu in event->pmu_private and use it instead of the per cpu data. Preparatory step to get rid of the per cpu allocations. The usage sites are the perf fast path, so we keep that even after the conversion to per package storage as a cpu to package lookup involves 3 loads versus 1 with the pmu_private pointer. Signed-off-by: Thomas Gleixner --- arch/x86/kernel/cpu/perf_event_intel_rapl.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c @@ -122,6 +122,7 @@ static struct perf_pmu_events_attr event struct rapl_pmu { raw_spinlock_t lock; int n_active; + int cpu; struct list_head active_list; struct pmu *pmu; ktime_t timer_interval; @@ -203,7 +204,7 @@ static void rapl_start_hrtimer(struct ra static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) { - struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); + struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer); struct perf_event *event; unsigned long flags; @@ -249,7 +250,7 @@ static void __rapl_pmu_event_start(struc static void rapl_pmu_event_start(struct perf_event *event, int mode) { - struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); + struct rapl_pmu *pmu = event->pmu_private; unsigned long flags; raw_spin_lock_irqsave(&pmu->lock, flags); @@ -259,7 +260,7 @@ static void rapl_pmu_event_start(struct static void rapl_pmu_event_stop(struct perf_event *event, int mode) { - struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); + struct rapl_pmu *pmu = event->pmu_private; struct hw_perf_event *hwc = &event->hw; unsigned long flags; @@ -293,7 +294,7 @@ static void rapl_pmu_event_stop(struct p static int rapl_pmu_event_add(struct perf_event *event, int mode) { - struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); + struct rapl_pmu *pmu = event->pmu_private; struct hw_perf_event *hwc = &event->hw; unsigned long flags; @@ -316,6 +317,7 @@ static void rapl_pmu_event_del(struct pe static int rapl_pmu_event_init(struct perf_event *event) { + struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); u64 cfg = event->attr.config & RAPL_EVENT_MASK; int bit, msr, ret = 0; @@ -327,6 +329,9 @@ static int rapl_pmu_event_init(struct pe if (event->attr.config & ~RAPL_EVENT_MASK) return -EINVAL; + if (event->cpu < 0) + return -EINVAL; + /* * check event is known (determines counter) */ @@ -365,6 +370,8 @@ static int rapl_pmu_event_init(struct pe return -EINVAL; /* must be done before validate_group */ + event->cpu = pmu->cpu; + event->pmu_private = pmu; event->hw.event_base = msr; event->hw.config = cfg; event->hw.idx = bit; @@ -572,6 +579,7 @@ static int rapl_cpu_prepare(int cpu) INIT_LIST_HEAD(&pmu->active_list); pmu->pmu = &rapl_pmu_class; + pmu->cpu = cpu; pmu->timer_interval = ms_to_ktime(rapl_timer_ms);