linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Christoph Lameter <cl@linux.com>
To: Tejun Heo <tj@kernel.org>
Cc: akpm@linuxfoundation.org, rostedt@goodmis.org,
	linux-kernel@vger.kernel.org, Ingo Molnar <mingo@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>
Subject: [PATCH 20/41] scheduler: Replace __get_cpu_var with this_cpu_ptr
Date: Tue, 03 Dec 2013 17:32:52 -0600	[thread overview]
Message-ID: <20131203233255.493692717@linux.com> (raw)
In-Reply-To: 20131203233232.928771708@linux.com

[-- Attachment #1: this_scheduler --]
[-- Type: text/plain, Size: 8616 bytes --]

Convert all uses of __get_cpu_var for address calculation to use
this_cpu_ptr instead.

CC: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Christoph Lameter <cl@linux.com>

Index: linux/include/linux/kernel_stat.h
===================================================================
--- linux.orig/include/linux/kernel_stat.h	2013-12-02 16:07:53.004544351 -0600
+++ linux/include/linux/kernel_stat.h	2013-12-02 16:07:52.994544630 -0600
@@ -44,8 +44,8 @@ DECLARE_PER_CPU(struct kernel_stat, ksta
 DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
 
 /* Must have preemption disabled for this to be meaningful. */
-#define kstat_this_cpu (&__get_cpu_var(kstat))
-#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
+#define kstat_this_cpu this_cpu_ptr(&kstat)
+#define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat)
 #define kstat_cpu(cpu) per_cpu(kstat, cpu)
 #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
 
Index: linux/kernel/events/callchain.c
===================================================================
--- linux.orig/kernel/events/callchain.c	2013-12-02 16:07:53.004544351 -0600
+++ linux/kernel/events/callchain.c	2013-12-02 16:07:52.994544630 -0600
@@ -137,7 +137,7 @@ static struct perf_callchain_entry *get_
 	int cpu;
 	struct callchain_cpus_entries *entries;
 
-	*rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
+	*rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
 	if (*rctx == -1)
 		return NULL;
 
@@ -153,7 +153,7 @@ static struct perf_callchain_entry *get_
 static void
 put_callchain_entry(int rctx)
 {
-	put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
+	put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
 }
 
 struct perf_callchain_entry *
Index: linux/kernel/events/core.c
===================================================================
--- linux.orig/kernel/events/core.c	2013-12-02 16:07:53.004544351 -0600
+++ linux/kernel/events/core.c	2013-12-02 16:07:53.004544351 -0600
@@ -240,10 +240,10 @@ void perf_sample_event_took(u64 sample_l
 		return;
 
 	/* decay the counter by 1 average sample */
-	local_samples_len = __get_cpu_var(running_sample_length);
+	local_samples_len = __this_cpu_read(running_sample_length);
 	local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
 	local_samples_len += sample_len_ns;
-	__get_cpu_var(running_sample_length) = local_samples_len;
+	__this_cpu_write(running_sample_length, local_samples_len);
 
 	/*
 	 * note: this will be biased artifically low until we have
@@ -869,7 +869,7 @@ static DEFINE_PER_CPU(struct list_head,
 static void perf_pmu_rotate_start(struct pmu *pmu)
 {
 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
-	struct list_head *head = &__get_cpu_var(rotation_list);
+	struct list_head *head = this_cpu_ptr(&rotation_list);
 
 	WARN_ON(!irqs_disabled());
 
@@ -2354,7 +2354,7 @@ void __perf_event_task_sched_out(struct
 	 * to check if we have to switch out PMU state.
 	 * cgroup event are system-wide mode only
 	 */
-	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
+	if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
 		perf_cgroup_sched_out(task, next);
 }
 
@@ -2599,11 +2599,11 @@ void __perf_event_task_sched_in(struct t
 	 * to check if we have to switch in PMU state.
 	 * cgroup event are system-wide mode only
 	 */
-	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
+	if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
 		perf_cgroup_sched_in(prev, task);
 
 	/* check for system-wide branch_stack events */
-	if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
+	if (atomic_read(this_cpu_ptr(&perf_branch_stack_events)))
 		perf_branch_stack_sched_in(prev, task);
 }
 
@@ -2854,7 +2854,7 @@ bool perf_event_can_stop_tick(void)
 
 void perf_event_task_tick(void)
 {
-	struct list_head *head = &__get_cpu_var(rotation_list);
+	struct list_head *head = this_cpu_ptr(&rotation_list);
 	struct perf_cpu_context *cpuctx, *tmp;
 	struct perf_event_context *ctx;
 	int throttled;
@@ -5554,7 +5554,7 @@ static void do_perf_sw_event(enum perf_t
 				    struct perf_sample_data *data,
 				    struct pt_regs *regs)
 {
-	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
 	struct perf_event *event;
 	struct hlist_head *head;
 
@@ -5573,7 +5573,7 @@ end:
 
 int perf_swevent_get_recursion_context(void)
 {
-	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
 
 	return get_recursion_context(swhash->recursion);
 }
@@ -5581,7 +5581,7 @@ EXPORT_SYMBOL_GPL(perf_swevent_get_recur
 
 inline void perf_swevent_put_recursion_context(int rctx)
 {
-	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
 
 	put_recursion_context(swhash->recursion, rctx);
 }
@@ -5610,7 +5610,7 @@ static void perf_swevent_read(struct per
 
 static int perf_swevent_add(struct perf_event *event, int flags)
 {
-	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
 	struct hw_perf_event *hwc = &event->hw;
 	struct hlist_head *head;
 
Index: linux/kernel/sched/fair.c
===================================================================
--- linux.orig/kernel/sched/fair.c	2013-12-02 16:07:53.004544351 -0600
+++ linux/kernel/sched/fair.c	2013-12-02 16:07:53.004544351 -0600
@@ -6146,7 +6146,7 @@ static int load_balance(int this_cpu, st
 	struct sched_group *group;
 	struct rq *busiest;
 	unsigned long flags;
-	struct cpumask *cpus = __get_cpu_var(load_balance_mask);
+	struct cpumask *cpus = this_cpu_ptr(load_balance_mask);
 
 	struct lb_env env = {
 		.sd		= sd,
Index: linux/kernel/sched/rt.c
===================================================================
--- linux.orig/kernel/sched/rt.c	2013-12-02 16:07:53.004544351 -0600
+++ linux/kernel/sched/rt.c	2013-12-02 16:07:53.004544351 -0600
@@ -1387,7 +1387,7 @@ static DEFINE_PER_CPU(cpumask_var_t, loc
 static int find_lowest_rq(struct task_struct *task)
 {
 	struct sched_domain *sd;
-	struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
+	struct cpumask *lowest_mask = this_cpu_ptr(local_cpu_mask);
 	int this_cpu = smp_processor_id();
 	int cpu      = task_cpu(task);
 
Index: linux/kernel/sched/sched.h
===================================================================
--- linux.orig/kernel/sched/sched.h	2013-12-02 16:07:53.004544351 -0600
+++ linux/kernel/sched/sched.h	2013-12-02 16:07:53.004544351 -0600
@@ -545,10 +545,10 @@ static inline int cpu_of(struct rq *rq)
 DECLARE_PER_CPU(struct rq, runqueues);
 
 #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
-#define this_rq()		(&__get_cpu_var(runqueues))
+#define this_rq()		this_cpu_ptr(&runqueues)
 #define task_rq(p)		cpu_rq(task_cpu(p))
 #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
-#define raw_rq()		(&__raw_get_cpu_var(runqueues))
+#define raw_rq()		raw_cpu_ptr(&runqueues)
 
 static inline u64 rq_clock(struct rq *rq)
 {
Index: linux/kernel/user-return-notifier.c
===================================================================
--- linux.orig/kernel/user-return-notifier.c	2013-12-02 16:07:53.004544351 -0600
+++ linux/kernel/user-return-notifier.c	2013-12-02 16:07:53.004544351 -0600
@@ -14,7 +14,7 @@ static DEFINE_PER_CPU(struct hlist_head,
 void user_return_notifier_register(struct user_return_notifier *urn)
 {
 	set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
-	hlist_add_head(&urn->link, &__get_cpu_var(return_notifier_list));
+	hlist_add_head(&urn->link, this_cpu_ptr(&return_notifier_list));
 }
 EXPORT_SYMBOL_GPL(user_return_notifier_register);
 
@@ -25,7 +25,7 @@ EXPORT_SYMBOL_GPL(user_return_notifier_r
 void user_return_notifier_unregister(struct user_return_notifier *urn)
 {
 	hlist_del(&urn->link);
-	if (hlist_empty(&__get_cpu_var(return_notifier_list)))
+	if (hlist_empty(this_cpu_ptr(&return_notifier_list)))
 		clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
 }
 EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
Index: linux/kernel/taskstats.c
===================================================================
--- linux.orig/kernel/taskstats.c	2013-12-02 16:07:53.004544351 -0600
+++ linux/kernel/taskstats.c	2013-12-02 16:07:53.004544351 -0600
@@ -638,7 +638,7 @@ void taskstats_exit(struct task_struct *
 		fill_tgid_exit(tsk);
 	}
 
-	listeners = __this_cpu_ptr(&listener_array);
+	listeners = raw_cpu_ptr(&listener_array);
 	if (list_empty(&listeners->list))
 		return;
 


  parent reply	other threads:[~2013-12-03 23:46 UTC|newest]

Thread overview: 84+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-12-03 23:32 [PATCH 00/41] percpu: Consistent per cpu operations V1 Christoph Lameter
2013-12-03 23:32 ` [PATCH 01/41] mm: Replace __get_cpu_var uses with this_cpu_ptr Christoph Lameter
2014-08-26 17:59   ` Tejun Heo
2013-12-03 23:32 ` [PATCH 02/41] tracing: " Christoph Lameter
2014-08-26 17:59   ` Tejun Heo
2014-09-17 16:12     ` Steven Rostedt
2013-12-03 23:32 ` [PATCH 03/41] percpu: Replace __get_cpu_var " Christoph Lameter
2014-08-26 18:00   ` Tejun Heo
2013-12-03 23:32 ` [PATCH 04/41] kernel misc: Replace __get_cpu_var uses Christoph Lameter
2014-08-26 18:00   ` Tejun Heo
2013-12-03 23:32 ` [PATCH 05/41] drivers/char/random: " Christoph Lameter
2014-08-26 18:00   ` Tejun Heo
2013-12-03 23:32 ` [PATCH 06/41] drivers/cpuidle: Replace __get_cpu_var uses for address calculation Christoph Lameter
2014-08-26 18:01   ` Tejun Heo
2013-12-03 23:32 ` [PATCH 07/41] drivers/oprofile: " Christoph Lameter
2013-12-03 23:32 ` [PATCH 08/41] drivers/leds: Replace __get_cpu_var use through this_cpu_ptr Christoph Lameter
2014-08-26 18:01   ` Tejun Heo
2013-12-03 23:32 ` [PATCH 09/41] drivers/clocksource: Replace __get_cpu_var used for address calculation Christoph Lameter
2014-08-26 18:02   ` Tejun Heo
2013-12-03 23:32 ` [PATCH 10/41] staging/zsmalloc: Replace instances of using __get_cpu_var " Christoph Lameter
2014-08-26 18:02   ` Tejun Heo
2013-12-03 23:32 ` [PATCH 11/41] parisc: Replace __get_cpu_var uses " Christoph Lameter
2013-12-04  0:10   ` James Bottomley
2013-12-05 18:46     ` Christoph Lameter
     [not found]     ` <alpine.DEB.2.02.1312051244540.26098@gentwo.org>
2013-12-05 20:58       ` quilt 0.60 does not include CCed email without name Christoph Lameter
2013-12-06  8:18         ` Martin Quinson
2013-12-08  9:19           ` Andreas Gruenbacher
2013-12-08  9:52             ` Martin Quinson
2013-12-03 23:32 ` [PATCH 12/41] metag: Replace __get_cpu_var uses for address calculation Christoph Lameter
2013-12-03 23:32 ` [PATCH 13/41] percpu: Add raw_cpu_ops Christoph Lameter
2013-12-10 15:34   ` Tejun Heo
2013-12-10 15:45     ` Ingo Molnar
2013-12-10 15:49       ` Tejun Heo
2013-12-10 15:55         ` Ingo Molnar
2013-12-10 18:13           ` Christoph Lameter
2013-12-10 18:31             ` Ingo Molnar
2013-12-10 19:32               ` Tejun Heo
2013-12-10 20:02               ` Christoph Lameter
2013-12-11 15:12                 ` Ingo Molnar
2013-12-11 17:48                   ` Christoph Lameter
2013-12-11 17:52                     ` Ingo Molnar
2013-12-11 18:30                       ` Christoph Lameter
2013-12-10 18:11         ` Christoph Lameter
2013-12-03 23:32 ` [PATCH 14/41] x86: Rename __this_cpu_xxx_# operations to raw_cpu_xxx_# Christoph Lameter
2013-12-03 23:32 ` [PATCH 15/41] mm: Use raw_cpu ops for determining current NUMA node Christoph Lameter
2013-12-03 23:32 ` [PATCH 16/41] modules: Use raw_cpu_write for initialization of per cpu refcount Christoph Lameter
2013-12-03 23:32 ` [PATCH 17/41] net:i Replace __this_cpu_inc in route.c with raw_cpu_inc Christoph Lameter
2013-12-03 23:32 ` [PATCH 18/41] percpu: Add preemption checks to __this_cpu ops Christoph Lameter
2013-12-03 23:32 ` [PATCH 19/41] time: Replace __get_cpu_var uses Christoph Lameter
2013-12-03 23:32 ` Christoph Lameter [this message]
2013-12-03 23:32 ` [PATCH 21/41] block: Replace __this_cpu_ptr with raw_cpu_ptr Christoph Lameter
2013-12-04  2:20   ` Jens Axboe
2013-12-04 16:38     ` Christoph Lameter
2013-12-04 16:51       ` Jens Axboe
2013-12-03 23:32 ` [PATCH 22/41] rcu: Replace __this_cpu_ptr uses " Christoph Lameter
2013-12-04  1:12   ` Paul E. McKenney
2013-12-04 16:37     ` Christoph Lameter
2013-12-03 23:32 ` [PATCH 23/41] watchdog: Replace __raw_get_cpu_var uses Christoph Lameter
2013-12-03 23:32 ` [PATCH 24/41] net: Replace get_cpu_var through this_cpu_ptr Christoph Lameter
2013-12-03 23:32 ` [PATCH 25/41] drivers/net/ethernet/tile: Replace __get_cpu_var uses for address calculation Christoph Lameter
2013-12-09 20:22   ` Chris Metcalf
2013-12-03 23:32 ` [PATCH 26/41] md: Replace __this_cpu_ptr with raw_cpu_ptr Christoph Lameter
2013-12-03 23:32 ` [PATCH 27/41] irqchips: Replace __this_cpu_ptr uses Christoph Lameter
2013-12-03 23:33 ` [PATCH 28/41] x86: Replace __get_cpu_var uses Christoph Lameter
2013-12-03 23:33 ` [PATCH 29/41] arm: Replace __this_cpu_ptr with raw_cpu_ptr Christoph Lameter
2013-12-03 23:33 ` [PATCH 30/41] mips: Replace __get_cpu_var uses Christoph Lameter
2013-12-03 23:33 ` [PATCH 31/41] s390: " Christoph Lameter
2013-12-03 23:33 ` [PATCH 32/41] ia64: " Christoph Lameter
2013-12-03 23:33 ` [PATCH 33/41] powerpc: " Christoph Lameter
2013-12-03 23:33 ` [PATCH 34/41] sparc: " Christoph Lameter
2013-12-03 23:33 ` [PATCH 35/41] tile: " Christoph Lameter
2013-12-09 20:52   ` Chris Metcalf
2013-12-10 15:23     ` Christoph Lameter
2013-12-03 23:33 ` [PATCH 36/41] blackfin: " Christoph Lameter
2013-12-03 23:33 ` [PATCH 37/41] avr32: Replace __get_cpu_var with __this_cpu_write Christoph Lameter
2013-12-03 23:33 ` [PATCH 38/41] alpha: Replace __get_cpu_var Christoph Lameter
2013-12-03 23:33 ` [PATCH 39/41] sh: Replace __get_cpu_var uses Christoph Lameter
2013-12-03 23:33 ` [PATCH 40/41] Remove __get_cpu_var and __raw_get_cpu_var macros [only in 3.15] Christoph Lameter
2013-12-03 23:33 ` [PATCH 41/41] percpu: Remove __this_cpu_ptr Christoph Lameter
2013-12-04  8:40 ` [PATCH 00/41] percpu: Consistent per cpu operations V1 Ingo Molnar
2013-12-04 16:43   ` Christoph Lameter
2013-12-04 17:02     ` Ingo Molnar
2013-12-05 19:09       ` Christoph Lameter
2014-01-17 15:18 [PATCH 00/41] percpu: Consistent per cpu operations V3 Christoph Lameter
2014-01-17 15:18 ` [PATCH 20/41] scheduler: Replace __get_cpu_var with this_cpu_ptr Christoph Lameter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20131203233255.493692717@linux.com \
    --to=cl@linux.com \
    --cc=akpm@linuxfoundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    --cc=tj@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).