All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] cpusets: introduce cpuset->cpumask_lock
@ 2009-09-10 19:13 Oleg Nesterov
  0 siblings, 0 replies; 2+ messages in thread
From: Oleg Nesterov @ 2009-09-10 19:13 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Rusty Russell, linux-kernel

Preparation for the next patch.

Introduce cpuset->cpumask_lock. From now ->cpus_allowed of the "active"
cpuset is always changed under this spinlock_t.

A separate patch to simplify the review/fixing, in case I missed some
places where ->cpus_allowed is updated.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
---

 kernel/cpuset.c |    9 +++++++++
 1 file changed, 9 insertions(+)

--- CPUHP/kernel/cpuset.c~1_ADD_CPUMASK_LOCK	2009-09-10 19:35:16.000000000 +0200
+++ CPUHP/kernel/cpuset.c	2009-09-10 20:06:39.000000000 +0200
@@ -92,6 +92,7 @@ struct cpuset {
 	struct cgroup_subsys_state css;
 
 	unsigned long flags;		/* "unsigned long" so bitops work */
+	spinlock_t cpumask_lock;	/* protects ->cpus_allowed */
 	cpumask_var_t cpus_allowed;	/* CPUs allowed to tasks in cpuset */
 	nodemask_t mems_allowed;	/* Memory Nodes allowed to tasks */
 
@@ -891,7 +892,9 @@ static int update_cpumask(struct cpuset 
 	is_load_balanced = is_sched_load_balance(trialcs);
 
 	mutex_lock(&callback_mutex);
+	spin_lock(&cs->cpumask_lock);
 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
+	spin_unlock(&cs->cpumask_lock);
 	mutex_unlock(&callback_mutex);
 
 	/*
@@ -1781,6 +1784,8 @@ static struct cgroup_subsys_state *cpuse
 	cs = kmalloc(sizeof(*cs), GFP_KERNEL);
 	if (!cs)
 		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&cs->cpumask_lock);
 	if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
 		kfree(cs);
 		return ERR_PTR(-ENOMEM);
@@ -1981,8 +1986,10 @@ static void scan_for_empty_cpusets(struc
 
 		/* Remove offline cpus and mems from this cpuset. */
 		mutex_lock(&callback_mutex);
+		spin_lock(&cp->cpumask_lock);
 		cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
 			    cpu_online_mask);
+		spin_unlock(&cp->cpumask_lock);
 		nodes_and(cp->mems_allowed, cp->mems_allowed,
 						node_states[N_HIGH_MEMORY]);
 		mutex_unlock(&callback_mutex);
@@ -2030,7 +2037,9 @@ static int cpuset_track_online_cpus(stru
 
 	cgroup_lock();
 	mutex_lock(&callback_mutex);
+	spin_lock(&top_cpuset.cpumask_lock);
 	cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
+	spin_unlock(&top_cpuset.cpumask_lock);
 	mutex_unlock(&callback_mutex);
 	scan_for_empty_cpusets(&top_cpuset);
 	ndoms = generate_sched_domains(&doms, &attr);


^ permalink raw reply	[flat|nested] 2+ messages in thread

* [PATCH 1/3] cpusets: introduce cpuset->cpumask_lock
@ 2009-09-10 19:22 Oleg Nesterov
  0 siblings, 0 replies; 2+ messages in thread
From: Oleg Nesterov @ 2009-09-10 19:22 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Gautham Shenoy, Ingo Molnar, Jiri Slaby, Lai Jiangshan, Li Zefan,
	Miao Xie, Paul Menage, Peter Zijlstra, Rafael J. Wysocki,
	Rusty Russell, linux-kernel

Preparation for the next patch.

Introduce cpuset->cpumask_lock. From now ->cpus_allowed of the "active"
cpuset is always changed under this spinlock_t.

A separate patch to simplify the review/fixing, in case I missed some
places where ->cpus_allowed is updated.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
---

 kernel/cpuset.c |    9 +++++++++
 1 file changed, 9 insertions(+)

--- CPUHP/kernel/cpuset.c~1_ADD_CPUMASK_LOCK	2009-09-10 19:35:16.000000000 +0200
+++ CPUHP/kernel/cpuset.c	2009-09-10 20:06:39.000000000 +0200
@@ -92,6 +92,7 @@ struct cpuset {
 	struct cgroup_subsys_state css;
 
 	unsigned long flags;		/* "unsigned long" so bitops work */
+	spinlock_t cpumask_lock;	/* protects ->cpus_allowed */
 	cpumask_var_t cpus_allowed;	/* CPUs allowed to tasks in cpuset */
 	nodemask_t mems_allowed;	/* Memory Nodes allowed to tasks */
 
@@ -891,7 +892,9 @@ static int update_cpumask(struct cpuset 
 	is_load_balanced = is_sched_load_balance(trialcs);
 
 	mutex_lock(&callback_mutex);
+	spin_lock(&cs->cpumask_lock);
 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
+	spin_unlock(&cs->cpumask_lock);
 	mutex_unlock(&callback_mutex);
 
 	/*
@@ -1781,6 +1784,8 @@ static struct cgroup_subsys_state *cpuse
 	cs = kmalloc(sizeof(*cs), GFP_KERNEL);
 	if (!cs)
 		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&cs->cpumask_lock);
 	if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
 		kfree(cs);
 		return ERR_PTR(-ENOMEM);
@@ -1981,8 +1986,10 @@ static void scan_for_empty_cpusets(struc
 
 		/* Remove offline cpus and mems from this cpuset. */
 		mutex_lock(&callback_mutex);
+		spin_lock(&cp->cpumask_lock);
 		cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
 			    cpu_online_mask);
+		spin_unlock(&cp->cpumask_lock);
 		nodes_and(cp->mems_allowed, cp->mems_allowed,
 						node_states[N_HIGH_MEMORY]);
 		mutex_unlock(&callback_mutex);
@@ -2030,7 +2037,9 @@ static int cpuset_track_online_cpus(stru
 
 	cgroup_lock();
 	mutex_lock(&callback_mutex);
+	spin_lock(&top_cpuset.cpumask_lock);
 	cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
+	spin_unlock(&top_cpuset.cpumask_lock);
 	mutex_unlock(&callback_mutex);
 	scan_for_empty_cpusets(&top_cpuset);
 	ndoms = generate_sched_domains(&doms, &attr);


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2009-09-10 19:27 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-09-10 19:13 [PATCH 1/3] cpusets: introduce cpuset->cpumask_lock Oleg Nesterov
2009-09-10 19:22 Oleg Nesterov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.