All of lore.kernel.org
 help / color / mirror / Atom feed
From: kernel test robot <lkp@intel.com>
To: kbuild-all@lists.01.org
Subject: [peterz-queue:sched/cleanup 4/4] kernel/rcu/tree.c:1385:5: error: implicit declaration of function 'irq_work_queue_remote'
Date: Tue, 20 Apr 2021 09:30:53 +0800	[thread overview]
Message-ID: <202104200948.dlmNjUMv-lkp@intel.com> (raw)

[-- Attachment #1: Type: text/plain, Size: 8311 bytes --]

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git sched/cleanup
head:   05dd40f3f6f4b06c16b9ad246e5a5523f10b4dff
commit: 05dd40f3f6f4b06c16b9ad246e5a5523f10b4dff [4/4] rcu/tree: Use irq_work_queue_remote()
config: arm-randconfig-r034-20210419 (attached as .config)
compiler: clang version 13.0.0 (https://github.com/llvm/llvm-project 2b50f5a4343f8fb06acaa5c36355bcf58092c9cd)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # install arm cross compiling tool for clang build
        # apt-get install binutils-arm-linux-gnueabi
        # https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git/commit/?id=05dd40f3f6f4b06c16b9ad246e5a5523f10b4dff
        git remote add peterz-queue https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git
        git fetch --no-tags peterz-queue sched/cleanup
        git checkout 05dd40f3f6f4b06c16b9ad246e5a5523f10b4dff
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 ARCH=arm 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

>> kernel/rcu/tree.c:1385:5: error: implicit declaration of function 'irq_work_queue_remote' [-Werror,-Wimplicit-function-declaration]
                                   irq_work_queue_remote(rdp->cpu, &rdp->rcu_iw);
                                   ^
   kernel/rcu/tree.c:1385:5: note: did you mean 'irq_work_queue_on'?
   include/linux/irq_work.h:52:6: note: 'irq_work_queue_on' declared here
   bool irq_work_queue_on(struct irq_work *work, int cpu);
        ^
   1 error generated.


vim +/irq_work_queue_remote +1385 kernel/rcu/tree.c

  1260	
  1261	/*
  1262	 * Return true if the specified CPU has passed through a quiescent
  1263	 * state by virtue of being in or having passed through an dynticks
  1264	 * idle state since the last call to dyntick_save_progress_counter()
  1265	 * for this same CPU, or by virtue of having been offline.
  1266	 */
  1267	static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
  1268	{
  1269		unsigned long jtsq;
  1270		bool *rnhqp;
  1271		bool *ruqp;
  1272		struct rcu_node *rnp = rdp->mynode;
  1273	
  1274		raw_lockdep_assert_held_rcu_node(rnp);
  1275	
  1276		/*
  1277		 * If the CPU passed through or entered a dynticks idle phase with
  1278		 * no active irq/NMI handlers, then we can safely pretend that the CPU
  1279		 * already acknowledged the request to pass through a quiescent
  1280		 * state.  Either way, that CPU cannot possibly be in an RCU
  1281		 * read-side critical section that started before the beginning
  1282		 * of the current RCU grace period.
  1283		 */
  1284		if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
  1285			trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
  1286			rcu_gpnum_ovf(rnp, rdp);
  1287			return 1;
  1288		}
  1289	
  1290		/*
  1291		 * Complain if a CPU that is considered to be offline from RCU's
  1292		 * perspective has not yet reported a quiescent state.  After all,
  1293		 * the offline CPU should have reported a quiescent state during
  1294		 * the CPU-offline process, or, failing that, by rcu_gp_init()
  1295		 * if it ran concurrently with either the CPU going offline or the
  1296		 * last task on a leaf rcu_node structure exiting its RCU read-side
  1297		 * critical section while all CPUs corresponding to that structure
  1298		 * are offline.  This added warning detects bugs in any of these
  1299		 * code paths.
  1300		 *
  1301		 * The rcu_node structure's ->lock is held here, which excludes
  1302		 * the relevant portions the CPU-hotplug code, the grace-period
  1303		 * initialization code, and the rcu_read_unlock() code paths.
  1304		 *
  1305		 * For more detail, please refer to the "Hotplug CPU" section
  1306		 * of RCU's Requirements documentation.
  1307		 */
  1308		if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
  1309			bool onl;
  1310			struct rcu_node *rnp1;
  1311	
  1312			pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
  1313				__func__, rnp->grplo, rnp->grphi, rnp->level,
  1314				(long)rnp->gp_seq, (long)rnp->completedqs);
  1315			for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
  1316				pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
  1317					__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
  1318			onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
  1319			pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
  1320				__func__, rdp->cpu, ".o"[onl],
  1321				(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
  1322				(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
  1323			return 1; /* Break things loose after complaining. */
  1324		}
  1325	
  1326		/*
  1327		 * A CPU running for an extended time within the kernel can
  1328		 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
  1329		 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
  1330		 * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
  1331		 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
  1332		 * variable are safe because the assignments are repeated if this
  1333		 * CPU failed to pass through a quiescent state.  This code
  1334		 * also checks .jiffies_resched in case jiffies_to_sched_qs
  1335		 * is set way high.
  1336		 */
  1337		jtsq = READ_ONCE(jiffies_to_sched_qs);
  1338		ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
  1339		rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
  1340		if (!READ_ONCE(*rnhqp) &&
  1341		    (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
  1342		     time_after(jiffies, rcu_state.jiffies_resched) ||
  1343		     rcu_state.cbovld)) {
  1344			WRITE_ONCE(*rnhqp, true);
  1345			/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
  1346			smp_store_release(ruqp, true);
  1347		} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
  1348			WRITE_ONCE(*ruqp, true);
  1349		}
  1350	
  1351		/*
  1352		 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
  1353		 * The above code handles this, but only for straight cond_resched().
  1354		 * And some in-kernel loops check need_resched() before calling
  1355		 * cond_resched(), which defeats the above code for CPUs that are
  1356		 * running in-kernel with scheduling-clock interrupts disabled.
  1357		 * So hit them over the head with the resched_cpu() hammer!
  1358		 */
  1359		if (tick_nohz_full_cpu(rdp->cpu) &&
  1360		    (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
  1361		     rcu_state.cbovld)) {
  1362			WRITE_ONCE(*ruqp, true);
  1363			resched_cpu(rdp->cpu);
  1364			WRITE_ONCE(rdp->last_fqs_resched, jiffies);
  1365		}
  1366	
  1367		/*
  1368		 * If more than halfway to RCU CPU stall-warning time, invoke
  1369		 * resched_cpu() more frequently to try to loosen things up a bit.
  1370		 * Also check to see if the CPU is getting hammered with interrupts,
  1371		 * but only once per grace period, just to keep the IPIs down to
  1372		 * a dull roar.
  1373		 */
  1374		if (time_after(jiffies, rcu_state.jiffies_resched)) {
  1375			if (time_after(jiffies,
  1376				       READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
  1377				resched_cpu(rdp->cpu);
  1378				WRITE_ONCE(rdp->last_fqs_resched, jiffies);
  1379			}
  1380			if (!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
  1381			    (rnp->ffmask & rdp->grpmask)) {
  1382				rdp->rcu_iw_gp_seq = rnp->gp_seq;
  1383				if (likely(rdp->cpu != smp_processor_id())) {
  1384					rdp->rcu_iw_pending = true;
> 1385					irq_work_queue_remote(rdp->cpu, &rdp->rcu_iw);
  1386				}
  1387			}
  1388		}
  1389	
  1390		return 0;
  1391	}
  1392	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 19450 bytes --]

             reply	other threads:[~2021-04-20  1:30 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-20  1:30 kernel test robot [this message]
  -- strict thread matches above, loose matches on Subject: below --
2021-03-20  9:27 [peterz-queue:sched/cleanup 4/4] kernel/rcu/tree.c:1385:5: error: implicit declaration of function 'irq_work_queue_remote' kernel test robot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=202104200948.dlmNjUMv-lkp@intel.com \
    --to=lkp@intel.com \
    --cc=kbuild-all@lists.01.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.