tree: https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master head: 47466dcf84ee66a973ea7d2fca7e582fe9328932 commit: d68fee0d6334fbfb581f54e7620705553c59b47a [4647/6726] rcu: Mark rcu_state.gp_seq to detect concurrent writes config: i386-randconfig-g001-20200304 (attached as .config) compiler: gcc-7 (Debian 7.5.0-5) 7.5.0 reproduce: git checkout d68fee0d6334fbfb581f54e7620705553c59b47a # save the attached .config to linux build tree make ARCH=i386 If you fix the issue, kindly add following tag Reported-by: kbuild test robot Note: the linux-next/master HEAD 47466dcf84ee66a973ea7d2fca7e582fe9328932 builds fine. It may have been fixed somewhere. All errors (new ones prefixed by >>): kernel/rcu/tree.c: In function 'rcu_start_this_gp': >> kernel/rcu/tree.c:1212:41: error: implicit declaration of function 'data_race'; did you mean 'notrace'? [-Werror=implicit-function-declaration] trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq")); ^~~~~~~~~ notrace kernel/rcu/tree.c: In function 'rcu_gp_init': kernel/rcu/tree.c:1498:2: error: implicit declaration of function 'ASSERT_EXCLUSIVE_WRITER' [-Werror=implicit-function-declaration] ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); ^~~~~~~~~~~~~~~~~~~~~~~ cc1: some warnings being treated as errors vim +1212 kernel/rcu/tree.c 1137 1138 /* 1139 * rcu_start_this_gp - Request the start of a particular grace period 1140 * @rnp_start: The leaf node of the CPU from which to start. 1141 * @rdp: The rcu_data corresponding to the CPU from which to start. 1142 * @gp_seq_req: The gp_seq of the grace period to start. 1143 * 1144 * Start the specified grace period, as needed to handle newly arrived 1145 * callbacks. The required future grace periods are recorded in each 1146 * rcu_node structure's ->gp_seq_needed field. Returns true if there 1147 * is reason to awaken the grace-period kthread. 1148 * 1149 * The caller must hold the specified rcu_node structure's ->lock, which 1150 * is why the caller is responsible for waking the grace-period kthread. 1151 * 1152 * Returns true if the GP thread needs to be awakened else false. 1153 */ 1154 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, 1155 unsigned long gp_seq_req) 1156 { 1157 bool ret = false; 1158 struct rcu_node *rnp; 1159 1160 /* 1161 * Use funnel locking to either acquire the root rcu_node 1162 * structure's lock or bail out if the need for this grace period 1163 * has already been recorded -- or if that grace period has in 1164 * fact already started. If there is already a grace period in 1165 * progress in a non-leaf node, no recording is needed because the 1166 * end of the grace period will scan the leaf rcu_node structures. 1167 * Note that rnp_start->lock must not be released. 1168 */ 1169 raw_lockdep_assert_held_rcu_node(rnp_start); 1170 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); 1171 for (rnp = rnp_start; 1; rnp = rnp->parent) { 1172 if (rnp != rnp_start) 1173 raw_spin_lock_rcu_node(rnp); 1174 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || 1175 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || 1176 (rnp != rnp_start && 1177 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { 1178 trace_rcu_this_gp(rnp, rdp, gp_seq_req, 1179 TPS("Prestarted")); 1180 goto unlock_out; 1181 } 1182 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req); 1183 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { 1184 /* 1185 * We just marked the leaf or internal node, and a 1186 * grace period is in progress, which means that 1187 * rcu_gp_cleanup() will see the marking. Bail to 1188 * reduce contention. 1189 */ 1190 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, 1191 TPS("Startedleaf")); 1192 goto unlock_out; 1193 } 1194 if (rnp != rnp_start && rnp->parent != NULL) 1195 raw_spin_unlock_rcu_node(rnp); 1196 if (!rnp->parent) 1197 break; /* At root, and perhaps also leaf. */ 1198 } 1199 1200 /* If GP already in progress, just leave, otherwise start one. */ 1201 if (rcu_gp_in_progress()) { 1202 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); 1203 goto unlock_out; 1204 } 1205 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); 1206 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT); 1207 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 1208 if (!READ_ONCE(rcu_state.gp_kthread)) { 1209 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); 1210 goto unlock_out; 1211 } > 1212 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq")); 1213 ret = true; /* Caller must wake GP kthread. */ 1214 unlock_out: 1215 /* Push furthest requested GP to leaf node and rcu_data structure. */ 1216 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { 1217 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed); 1218 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1219 } 1220 if (rnp != rnp_start) 1221 raw_spin_unlock_rcu_node(rnp); 1222 return ret; 1223 } 1224 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org