Hi, I love your patch! Yet something to improve: [auto build test ERROR on linus/master] [also build test ERROR on v5.6-rc3 next-20200224] [cannot apply to xfs-linux/for-next linux/master djwong-xfs/djwong-devel] [if your patch is applied to the wrong git tree, please drop us a note to help improve the system. BTW, we also suggest to use '--base' option to specify the base tree in git format-patch, please see https://stackoverflow.com/a/37406982] url: https://github.com/0day-ci/linux/commits/ira-weiny-intel-com/Enable-per-file-per-directory-DAX-operations-V4/20200222-083336 base: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 0c0ddd6ae47c9238c18f475bcca675ca74c9dc31 config: nds32-randconfig-a001-20200224 (attached as .config) compiler: nds32le-linux-gcc (GCC) 9.2.0 reproduce: wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # save the attached .config to linux build tree GCC_VERSION=9.2.0 make.cross ARCH=nds32 If you fix the issue, kindly add following tag Reported-by: kbuild test robot All errors (new ones prefixed by >>): In file included from include/linux/string.h:6, from include/linux/uuid.h:12, from fs/xfs/xfs_linux.h:10, from fs/xfs/xfs.h:22, from fs/xfs/xfs_icache.c:6: fs/xfs/xfs_icache.c: In function 'xfs_iget_cache_hit': >> fs/xfs/xfs_icache.c:423:33: error: 'struct inode' has no member named 'i_aops_sem' 423 | ASSERT(!rwsem_is_locked(&inode->i_aops_sem)); | ^~ include/linux/compiler.h:77:40: note: in definition of macro 'likely' 77 | # define likely(x) __builtin_expect(!!(x), 1) | ^ fs/xfs/xfs_icache.c:423:3: note: in expansion of macro 'ASSERT' 423 | ASSERT(!rwsem_is_locked(&inode->i_aops_sem)); | ^~~~~~ vim +423 fs/xfs/xfs_icache.c 342 343 /* 344 * Check the validity of the inode we just found it the cache 345 */ 346 static int 347 xfs_iget_cache_hit( 348 struct xfs_perag *pag, 349 struct xfs_inode *ip, 350 xfs_ino_t ino, 351 int flags, 352 int lock_flags) __releases(RCU) 353 { 354 struct inode *inode = VFS_I(ip); 355 struct xfs_mount *mp = ip->i_mount; 356 int error; 357 358 /* 359 * check for re-use of an inode within an RCU grace period due to the 360 * radix tree nodes not being updated yet. We monitor for this by 361 * setting the inode number to zero before freeing the inode structure. 362 * If the inode has been reallocated and set up, then the inode number 363 * will not match, so check for that, too. 364 */ 365 spin_lock(&ip->i_flags_lock); 366 if (ip->i_ino != ino) { 367 trace_xfs_iget_skip(ip); 368 XFS_STATS_INC(mp, xs_ig_frecycle); 369 error = -EAGAIN; 370 goto out_error; 371 } 372 373 374 /* 375 * If we are racing with another cache hit that is currently 376 * instantiating this inode or currently recycling it out of 377 * reclaimabe state, wait for the initialisation to complete 378 * before continuing. 379 * 380 * XXX(hch): eventually we should do something equivalent to 381 * wait_on_inode to wait for these flags to be cleared 382 * instead of polling for it. 383 */ 384 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { 385 trace_xfs_iget_skip(ip); 386 XFS_STATS_INC(mp, xs_ig_frecycle); 387 error = -EAGAIN; 388 goto out_error; 389 } 390 391 /* 392 * Check the inode free state is valid. This also detects lookup 393 * racing with unlinks. 394 */ 395 error = xfs_iget_check_free_state(ip, flags); 396 if (error) 397 goto out_error; 398 399 /* 400 * If IRECLAIMABLE is set, we've torn down the VFS inode already. 401 * Need to carefully get it back into useable state. 402 */ 403 if (ip->i_flags & XFS_IRECLAIMABLE) { 404 trace_xfs_iget_reclaim(ip); 405 406 if (flags & XFS_IGET_INCORE) { 407 error = -EAGAIN; 408 goto out_error; 409 } 410 411 /* 412 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode 413 * from stomping over us while we recycle the inode. We can't 414 * clear the radix tree reclaimable tag yet as it requires 415 * pag_ici_lock to be held exclusive. 416 */ 417 ip->i_flags |= XFS_IRECLAIM; 418 419 spin_unlock(&ip->i_flags_lock); 420 rcu_read_unlock(); 421 422 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); > 423 ASSERT(!rwsem_is_locked(&inode->i_aops_sem)); 424 error = xfs_reinit_inode(mp, inode); 425 if (error) { 426 bool wake; 427 /* 428 * Re-initializing the inode failed, and we are in deep 429 * trouble. Try to re-add it to the reclaim list. 430 */ 431 rcu_read_lock(); 432 spin_lock(&ip->i_flags_lock); 433 wake = !!__xfs_iflags_test(ip, XFS_INEW); 434 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); 435 if (wake) 436 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); 437 ASSERT(ip->i_flags & XFS_IRECLAIMABLE); 438 trace_xfs_iget_reclaim_fail(ip); 439 goto out_error; 440 } 441 442 spin_lock(&pag->pag_ici_lock); 443 spin_lock(&ip->i_flags_lock); 444 445 /* 446 * Clear the per-lifetime state in the inode as we are now 447 * effectively a new inode and need to return to the initial 448 * state before reuse occurs. 449 */ 450 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; 451 ip->i_flags |= XFS_INEW; 452 xfs_inode_clear_reclaim_tag(pag, ip->i_ino); 453 inode->i_state = I_NEW; 454 ip->i_sick = 0; 455 ip->i_checked = 0; 456 457 spin_unlock(&ip->i_flags_lock); 458 spin_unlock(&pag->pag_ici_lock); 459 } else { 460 /* If the VFS inode is being torn down, pause and try again. */ 461 if (!igrab(inode)) { 462 trace_xfs_iget_skip(ip); 463 error = -EAGAIN; 464 goto out_error; 465 } 466 467 /* We've got a live one. */ 468 spin_unlock(&ip->i_flags_lock); 469 rcu_read_unlock(); 470 trace_xfs_iget_hit(ip); 471 } 472 473 if (lock_flags != 0) 474 xfs_ilock(ip, lock_flags); 475 476 if (!(flags & XFS_IGET_INCORE)) 477 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE); 478 XFS_STATS_INC(mp, xs_ig_found); 479 480 return 0; 481 482 out_error: 483 spin_unlock(&ip->i_flags_lock); 484 rcu_read_unlock(); 485 return error; 486 } 487 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org