All of lore.kernel.org
 help / color / mirror / Atom feed
From: Stephen Rothwell <sfr@canb.auug.org.au>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-next@vger.kernel.org, linux-kernel@vger.kernel.org,
	Shaohua Li <shli@kernel.org>, Wang YanQing <udknight@gmail.com>,
	Thomas Gleixner <tglx@linutronix.de>, Ingo Molnar <mingo@elte.hu>,
	"H. Peter Anvin" <hpa@zytor.com>,
	Peter Zijlstra <peterz@infradead.org>
Subject: linux-next: manual merge of the akpm tree with the tip tree
Date: Mon, 28 Jan 2013 23:29:17 +1100	[thread overview]
Message-ID: <20130128232917.f8e35b592e7f8282227980f0@canb.auug.org.au> (raw)

[-- Attachment #1: Type: text/plain, Size: 4939 bytes --]

Hi Andrew,

Today's linux-next merge of the akpm tree got a conflict in kernel/smp.c between commit c7b798525b50 ("smp: Fix SMP function call empty cpu mask race") from the tip tree and commit "smp: make smp_call_function_many() use logic similar to smp_call_function_single()" from the akpm tree.

I fixed it up (maybe - see below) and can carry the fix as necessary (no
action is required).

-- 
Cheers,
Stephen Rothwell                    sfr@canb.auug.org.au

diff --cc kernel/smp.c
index 93e576e,51a81b0..0000000
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@@ -30,10 -21,8 +21,9 @@@ enum 
  };
  
  struct call_function_data {
- 	struct call_single_data	csd;
- 	atomic_t		refs;
+ 	struct call_single_data	__percpu *csd;
  	cpumask_var_t		cpumask;
 +	cpumask_var_t		cpumask_ipi;
  };
  
  static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
@@@ -482,85 -397,39 +398,45 @@@ void smp_call_function_many(const struc
  	}
  
  	data = &__get_cpu_var(cfd_data);
- 	csd_lock(&data->csd);
- 
- 	/* This BUG_ON verifies our reuse assertions and can be removed */
- 	BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
- 
- 	/*
- 	 * The global call function queue list add and delete are protected
- 	 * by a lock, but the list is traversed without any lock, relying
- 	 * on the rcu list add and delete to allow safe concurrent traversal.
- 	 * We reuse the call function data without waiting for any grace
- 	 * period after some other cpu removes it from the global queue.
- 	 * This means a cpu might find our data block as it is being
- 	 * filled out.
- 	 *
- 	 * We hold off the interrupt handler on the other cpu by
- 	 * ordering our writes to the cpu mask vs our setting of the
- 	 * refs counter.  We assert only the cpu owning the data block
- 	 * will set a bit in cpumask, and each bit will only be cleared
- 	 * by the subject cpu.  Each cpu must first find its bit is
- 	 * set and then check that refs is set indicating the element is
- 	 * ready to be processed, otherwise it must skip the entry.
- 	 *
- 	 * On the previous iteration refs was set to 0 by another cpu.
- 	 * To avoid the use of transitivity, set the counter to 0 here
- 	 * so the wmb will pair with the rmb in the interrupt handler.
- 	 */
- 	atomic_set(&data->refs, 0);	/* convert 3rd to 1st party write */
  
- 	data->csd.func = func;
- 	data->csd.info = info;
- 
- 	/* Ensure 0 refs is visible before mask.  Also orders func and info */
- 	smp_wmb();
- 
- 	/* We rely on the "and" being processed before the store */
  	cpumask_and(data->cpumask, mask, cpu_online_mask);
  	cpumask_clear_cpu(this_cpu, data->cpumask);
- 	refs = cpumask_weight(data->cpumask);
  
  	/* Some callers race with other cpus changing the passed mask */
- 	if (unlikely(!refs)) {
- 		csd_unlock(&data->csd);
+ 	if (unlikely(!cpumask_weight(data->cpumask)))
  		return;
- 	}
  
 +	/*
 +	 * After we put an entry into the list, data->cpumask
 +	 * may be cleared again when another CPU sends another IPI for
 +	 * a SMP function call, so data->cpumask will be zero.
 +	 */
 +	cpumask_copy(data->cpumask_ipi, data->cpumask);
- 	raw_spin_lock_irqsave(&call_function.lock, flags);
- 	/*
- 	 * Place entry at the _HEAD_ of the list, so that any cpu still
- 	 * observing the entry in generic_smp_call_function_interrupt()
- 	 * will not miss any other list entries:
- 	 */
- 	list_add_rcu(&data->csd.list, &call_function.queue);
- 	/*
- 	 * We rely on the wmb() in list_add_rcu to complete our writes
- 	 * to the cpumask before this write to refs, which indicates
- 	 * data is on the list and is ready to be processed.
- 	 */
- 	atomic_set(&data->refs, refs);
- 	raw_spin_unlock_irqrestore(&call_function.lock, flags);
- 
- 	/*
- 	 * Make the list addition visible before sending the ipi.
- 	 * (IPIs must obey or appear to obey normal Linux cache
- 	 * coherency rules -- see comment in generic_exec_single).
- 	 */
- 	smp_mb();
+ 	for_each_cpu(cpu, data->cpumask) {
+ 		struct call_single_data *csd = per_cpu_ptr(data->csd, cpu);
+ 		struct call_single_queue *dst =
+ 					&per_cpu(call_single_queue, cpu);
+ 		unsigned long flags;
+ 
+ 		csd_lock(csd);
+ 		csd->func = func;
+ 		csd->info = info;
+ 
+ 		raw_spin_lock_irqsave(&dst->lock, flags);
+ 		list_add_tail(&csd->list, &dst->list);
+ 		raw_spin_unlock_irqrestore(&dst->lock, flags);
+ 	}
  
  	/* Send a message to all CPUs in the map */
 -	arch_send_call_function_ipi_mask(data->cpumask);
 +	arch_send_call_function_ipi_mask(data->cpumask_ipi);
  
- 	/* Optionally wait for the CPUs to complete */
- 	if (wait)
- 		csd_lock_wait(&data->csd);
+ 	if (wait) {
+ 		for_each_cpu(cpu, data->cpumask) {
+ 			struct call_single_data *csd =
+ 					per_cpu_ptr(data->csd, cpu);
+ 			csd_lock_wait(csd);
+ 		}
+ 	}
  }
  EXPORT_SYMBOL(smp_call_function_many);
  

[-- Attachment #2: Type: application/pgp-signature, Size: 836 bytes --]

             reply	other threads:[~2013-01-28 12:29 UTC|newest]

Thread overview: 86+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-01-28 12:29 Stephen Rothwell [this message]
  -- strict thread matches above, loose matches on Subject: below --
2020-05-29 10:57 linux-next: manual merge of the akpm tree with the tip tree Stephen Rothwell
2020-05-29 10:49 Stephen Rothwell
2020-01-22  6:37 Stephen Rothwell
2019-02-13  6:49 Stephen Rothwell
2019-02-13 19:59 ` Andrew Morton
2018-01-09  5:02 Stephen Rothwell
2018-01-09 10:36 ` Andy Shevchenko
2017-10-16 18:48 Mark Brown
2017-10-16 20:01 ` Mark Brown
2017-04-12  7:08 Stephen Rothwell
2017-03-31  5:44 Stephen Rothwell
2017-03-31  6:42 ` Peter Zijlstra
2017-03-31 13:54   ` Andi Kleen
2017-03-31 14:45     ` Peter Zijlstra
2017-03-31 16:02       ` Andi Kleen
2017-03-31 17:48         ` Peter Zijlstra
2017-03-24  5:40 Stephen Rothwell
2017-03-24  8:05 ` Peter Zijlstra
2016-03-10  5:28 Stephen Rothwell
2016-03-10  8:00 ` Ingo Molnar
2016-03-10  8:00   ` Ingo Molnar
2016-03-10 20:38   ` Andrew Morton
2016-02-09  4:50 Stephen Rothwell
2016-02-09 14:04 ` Matt Fleming
2016-02-09 14:07   ` Ingo Molnar
2015-06-09 14:12 Stephen Rothwell
2015-04-08  8:49 Stephen Rothwell
2015-04-08 15:13 ` Ingo Molnar
2015-04-08 20:46   ` Andrew Morton
2015-04-08 21:57   ` Stephen Rothwell
2014-03-21  6:45 Stephen Rothwell
2014-01-13  6:17 Stephen Rothwell
2013-04-23  7:17 Stephen Rothwell
2013-02-14  4:33 Stephen Rothwell
2013-02-14  4:25 Stephen Rothwell
2013-02-14  4:34 ` H. Peter Anvin
2013-02-04  7:00 Stephen Rothwell
2012-12-10  8:29 Stephen Rothwell
2012-12-10 10:46 ` Ingo Molnar
2012-12-10  8:25 Stephen Rothwell
2012-12-10  8:20 Stephen Rothwell
2012-12-10  8:11 Stephen Rothwell
2012-12-10  8:01 Stephen Rothwell
2012-12-10 11:13 ` Will Deacon
2012-12-10  7:47 Stephen Rothwell
2012-11-15  6:32 Stephen Rothwell
2012-10-01 14:22 Stephen Rothwell
2012-09-27  7:15 Stephen Rothwell
2012-09-27  7:10 Stephen Rothwell
2012-09-27  7:04 Stephen Rothwell
2012-09-27  6:57 Stephen Rothwell
2012-09-27  6:49 Stephen Rothwell
2012-07-27  3:50 Stephen Rothwell
2012-07-25  4:08 Stephen Rothwell
2012-07-25  7:10 ` Ingo Molnar
2012-07-25  7:35   ` Johannes Weiner
2012-07-25 18:57     ` Andrew Morton
2012-07-25 19:03       ` Ingo Molnar
2012-07-25 19:26         ` Andrew Morton
2012-07-26  7:51           ` Ingo Molnar
2012-07-26 18:05           ` Andrew Morton
2012-07-25 19:20       ` Johannes Weiner
2012-07-26  7:03     ` Stephen Rothwell
2012-05-21  8:29 Stephen Rothwell
2012-05-21  8:04 Stephen Rothwell
2012-05-21  7:59 Stephen Rothwell
2012-03-27  4:57 Stephen Rothwell
2012-03-26  4:01 Stephen Rothwell
2012-03-26  5:20 ` Alex Shi
2012-03-08  6:32 Stephen Rothwell
2012-03-08  6:28 Stephen Rothwell
2012-02-29  6:27 Stephen Rothwell
2012-02-28  4:52 Stephen Rothwell
2012-02-27  5:53 Stephen Rothwell
2012-02-27  5:57 ` Cyrill Gorcunov
2012-02-27  6:02   ` H. Peter Anvin
2012-02-27  6:05     ` Cyrill Gorcunov
2012-02-27  6:01 ` H. Peter Anvin
2012-02-27  6:19   ` Stephen Rothwell
2012-02-27  5:44 Stephen Rothwell
2012-02-27  5:33 Stephen Rothwell
2012-02-27  5:23 Stephen Rothwell
2012-02-27  5:16 Stephen Rothwell
2011-12-06  4:04 Stephen Rothwell
2011-09-27  7:13 Stephen Rothwell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20130128232917.f8e35b592e7f8282227980f0@canb.auug.org.au \
    --to=sfr@canb.auug.org.au \
    --cc=akpm@linux-foundation.org \
    --cc=hpa@zytor.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-next@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=peterz@infradead.org \
    --cc=shli@kernel.org \
    --cc=tglx@linutronix.de \
    --cc=udknight@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.