tree: https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git netfs-folio head: 3ce8c87a69a9d67778ec2b31543ded42147da837 commit: efccabcb40444cc288a5ce6817fbe2407d6ed961 [5/6] netfs, 9p, afs, ceph: Use folios config: sparc-randconfig-r032-20211101 (attached as .config) compiler: sparc64-linux-gcc (GCC) 11.2.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git/commit/?id=efccabcb40444cc288a5ce6817fbe2407d6ed961 git remote add dhowells-fs https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git git fetch --no-tags dhowells-fs netfs-folio git checkout efccabcb40444cc288a5ce6817fbe2407d6ed961 # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross ARCH=sparc If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot All errors (new ones prefixed by >>): fs/netfs/read_helper.c: In function 'netfs_rreq_unlock': >> fs/netfs/read_helper.c:435:25: error: implicit declaration of function 'flush_dcache_folio'; did you mean 'flush_dcache_page'? [-Werror=implicit-function-declaration] 435 | flush_dcache_folio(folio); | ^~~~~~~~~~~~~~~~~~ | flush_dcache_page cc1: some warnings being treated as errors vim +435 fs/netfs/read_helper.c 368 369 /* 370 * Unlock the folios in a read operation. We need to set PG_fscache on any 371 * folios we're going to write back before we unlock them. 372 */ 373 static void netfs_rreq_unlock(struct netfs_read_request *rreq) 374 { 375 struct netfs_read_subrequest *subreq; 376 struct folio *folio; 377 unsigned int iopos, account = 0; 378 pgoff_t start_page = rreq->start / PAGE_SIZE; 379 pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1; 380 bool subreq_failed = false; 381 382 XA_STATE(xas, &rreq->mapping->i_pages, start_page); 383 384 if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) { 385 __clear_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags); 386 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { 387 __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags); 388 } 389 } 390 391 /* Walk through the pagecache and the I/O request lists simultaneously. 392 * We may have a mixture of cached and uncached sections and we only 393 * really want to write out the uncached sections. This is slightly 394 * complicated by the possibility that we might have huge pages with a 395 * mixture inside. 396 */ 397 subreq = list_first_entry(&rreq->subrequests, 398 struct netfs_read_subrequest, rreq_link); 399 iopos = 0; 400 subreq_failed = (subreq->error < 0); 401 402 trace_netfs_rreq(rreq, netfs_rreq_trace_unlock); 403 404 rcu_read_lock(); 405 xas_for_each(&xas, folio, last_page) { 406 unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE; 407 unsigned int pgend = pgpos + folio_size(folio); 408 bool pg_failed = false; 409 410 for (;;) { 411 if (!subreq) { 412 pg_failed = true; 413 break; 414 } 415 if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) 416 folio_start_fscache(folio); 417 pg_failed |= subreq_failed; 418 if (pgend < iopos + subreq->len) 419 break; 420 421 account += subreq->transferred; 422 iopos += subreq->len; 423 if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) { 424 subreq = list_next_entry(subreq, rreq_link); 425 subreq_failed = (subreq->error < 0); 426 } else { 427 subreq = NULL; 428 subreq_failed = false; 429 } 430 if (pgend == iopos) 431 break; 432 } 433 434 if (!pg_failed) { > 435 flush_dcache_folio(folio); 436 folio_mark_uptodate(folio); 437 } 438 439 if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { 440 if (folio_index(folio) == rreq->no_unlock_folio && 441 test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) 442 _debug("no unlock"); 443 else 444 folio_unlock(folio); 445 } 446 } 447 rcu_read_unlock(); 448 449 task_io_account_read(account); 450 if (rreq->netfs_ops->done) 451 rreq->netfs_ops->done(rreq); 452 } 453 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org