All of lore.kernel.org
 help / color / mirror / Atom feed
* possible deadlock in __do_page_fault
@ 2018-09-20 21:04 syzbot
  2018-09-20 21:10 ` Andrew Morton
  2018-10-01  5:23 ` syzbot
  0 siblings, 2 replies; 25+ messages in thread
From: syzbot @ 2018-09-20 21:04 UTC (permalink / raw)
  To: ak, akpm, hannes, jack, jrdr.linux, linux-kernel, linux-mm,
	mawilcox, mgorman, syzkaller-bugs

Hello,

syzbot found the following crash on:

HEAD commit:    a0cb0cabe4bb Add linux-next specific files for 20180920
git tree:       linux-next
console output: https://syzkaller.appspot.com/x/log.txt?x=15139721400000
kernel config:  https://syzkaller.appspot.com/x/.config?x=786006c5dafbadf6
dashboard link: https://syzkaller.appspot.com/bug?extid=a76129f18c89f3e2ddd4
compiler:       gcc (GCC) 8.0.1 20180413 (experimental)

Unfortunately, I don't have any reproducer for this crash yet.

IMPORTANT: if you fix the bug, please add the following tag to the commit:
Reported-by: syzbot+a76129f18c89f3e2ddd4@syzkaller.appspotmail.com


======================================================
WARNING: possible circular locking dependency detected
4.19.0-rc4-next-20180920+ #76 Not tainted
------------------------------------------------------
syz-executor3/21327 is trying to acquire lock:
000000009bc5286f (&mm->mmap_sem){++++}, at: __do_page_fault+0xb61/0xec0  
arch/x86/mm/fault.c:1333

but task is already holding lock:
00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at: inode_lock  
include/linux/fs.h:745 [inline]
00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at:  
generic_file_write_iter+0xed/0x870 mm/filemap.c:3304

which lock already depends on the new lock.


the existing dependency chain (in reverse order) is:

-> #2 (&sb->s_type->i_mutex_key#10){+.+.}:
        down_write+0x8a/0x130 kernel/locking/rwsem.c:70
        inode_lock include/linux/fs.h:745 [inline]
        shmem_fallocate+0x18b/0x12c0 mm/shmem.c:2651
        ashmem_shrink_scan+0x238/0x660 drivers/staging/android/ashmem.c:455
        ashmem_ioctl+0x3ae/0x13a0 drivers/staging/android/ashmem.c:797
        vfs_ioctl fs/ioctl.c:46 [inline]
        file_ioctl fs/ioctl.c:501 [inline]
        do_vfs_ioctl+0x1de/0x1720 fs/ioctl.c:685
        ksys_ioctl+0xa9/0xd0 fs/ioctl.c:702
        __do_sys_ioctl fs/ioctl.c:709 [inline]
        __se_sys_ioctl fs/ioctl.c:707 [inline]
        __x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:707
        do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
        entry_SYSCALL_64_after_hwframe+0x49/0xbe

-> #1 (ashmem_mutex){+.+.}:
        __mutex_lock_common kernel/locking/mutex.c:925 [inline]
        __mutex_lock+0x166/0x1700 kernel/locking/mutex.c:1072
        mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:1087
        ashmem_mmap+0x55/0x520 drivers/staging/android/ashmem.c:361
        call_mmap include/linux/fs.h:1830 [inline]
        mmap_region+0xe82/0x1cd0 mm/mmap.c:1762
        do_mmap+0xa10/0x1220 mm/mmap.c:1535
        do_mmap_pgoff include/linux/mm.h:2298 [inline]
        vm_mmap_pgoff+0x213/0x2c0 mm/util.c:357
        ksys_mmap_pgoff+0x4da/0x660 mm/mmap.c:1585
        __do_sys_mmap arch/x86/kernel/sys_x86_64.c:100 [inline]
        __se_sys_mmap arch/x86/kernel/sys_x86_64.c:91 [inline]
        __x64_sys_mmap+0xe9/0x1b0 arch/x86/kernel/sys_x86_64.c:91
        do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
        entry_SYSCALL_64_after_hwframe+0x49/0xbe

-> #0 (&mm->mmap_sem){++++}:
        lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3900
        down_read+0x8d/0x120 kernel/locking/rwsem.c:24
        __do_page_fault+0xb61/0xec0 arch/x86/mm/fault.c:1333
        do_page_fault+0xed/0x7d1 arch/x86/mm/fault.c:1472
        page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1139
        fault_in_pages_readable include/linux/pagemap.h:601 [inline]
        iov_iter_fault_in_readable+0x1b4/0x450 lib/iov_iter.c:421
        generic_perform_write+0x216/0x6a0 mm/filemap.c:3144
        __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3279
        generic_file_write_iter+0x436/0x870 mm/filemap.c:3307
        call_write_iter include/linux/fs.h:1825 [inline]
        do_iter_readv_writev+0x8b0/0xa80 fs/read_write.c:680
        do_iter_write+0x185/0x5f0 fs/read_write.c:959
        vfs_writev+0x1f1/0x360 fs/read_write.c:1004
        do_pwritev+0x1cc/0x280 fs/read_write.c:1093
        __do_sys_pwritev fs/read_write.c:1140 [inline]
        __se_sys_pwritev fs/read_write.c:1135 [inline]
        __x64_sys_pwritev+0x9a/0xf0 fs/read_write.c:1135
        do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
        entry_SYSCALL_64_after_hwframe+0x49/0xbe

other info that might help us debug this:

Chain exists of:
   &mm->mmap_sem --> ashmem_mutex --> &sb->s_type->i_mutex_key#10

  Possible unsafe locking scenario:

        CPU0                    CPU1
        ----                    ----
   lock(&sb->s_type->i_mutex_key#10);
                                lock(ashmem_mutex);
                                lock(&sb->s_type->i_mutex_key#10);
   lock(&mm->mmap_sem);

  *** DEADLOCK ***

2 locks held by syz-executor3/21327:
  #0: 000000003de4eab1 (sb_writers#3){.+.+}, at: file_start_write  
include/linux/fs.h:2784 [inline]
  #0: 000000003de4eab1 (sb_writers#3){.+.+}, at: vfs_writev+0x2bd/0x360  
fs/read_write.c:1003
  #1: 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at: inode_lock  
include/linux/fs.h:745 [inline]
  #1: 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at:  
generic_file_write_iter+0xed/0x870 mm/filemap.c:3304

stack backtrace:
CPU: 1 PID: 21327 Comm: syz-executor3 Not tainted 4.19.0-rc4-next-20180920+  
#76
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS  
Google 01/01/2011
Call Trace:
  __dump_stack lib/dump_stack.c:77 [inline]
  dump_stack+0x1d3/0x2c4 lib/dump_stack.c:113
  print_circular_bug.isra.33.cold.54+0x1bd/0x27d  
kernel/locking/lockdep.c:1221
  check_prev_add kernel/locking/lockdep.c:1861 [inline]
  check_prevs_add kernel/locking/lockdep.c:1974 [inline]
  validate_chain kernel/locking/lockdep.c:2415 [inline]
  __lock_acquire+0x33e4/0x4ec0 kernel/locking/lockdep.c:3411
  lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3900
  down_read+0x8d/0x120 kernel/locking/rwsem.c:24
  __do_page_fault+0xb61/0xec0 arch/x86/mm/fault.c:1333
  do_page_fault+0xed/0x7d1 arch/x86/mm/fault.c:1472
  page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1139
RIP: 0010:fault_in_pages_readable include/linux/pagemap.h:601 [inline]
RIP: 0010:iov_iter_fault_in_readable+0x1b4/0x450 lib/iov_iter.c:421
Code: fd 49 39 dc 76 17 eb 3c e8 e9 a0 ef fd 49 81 c4 00 10 00 00 4c 39 a5  
28 ff ff ff 72 2e e8 d4 a0 ef fd 0f 1f 00 0f ae e8 31 db <41> 8a 04 24 0f  
1f 00 31 ff 89 de 88 85 58 ff ff ff e8 c6 a1 ef fd
RSP: 0018:ffff88018dfe7650 EFLAGS: 00010246
RAX: 0000000000040000 RBX: 0000000000000000 RCX: ffffc90005662000
RDX: 00000000000001c2 RSI: ffffffff838daf1c RDI: 0000000000000005
RBP: ffff88018dfe7728 R08: ffff880198ad2240 R09: ffffed00319dc039
R10: ffffed00319dc039 R11: ffff88018cee01cb R12: 0000000020012000
R13: 0000000000000001 R14: 0000000000000001 R15: ffff88018dfe7c50
  generic_perform_write+0x216/0x6a0 mm/filemap.c:3144
  __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3279
  generic_file_write_iter+0x436/0x870 mm/filemap.c:3307
  call_write_iter include/linux/fs.h:1825 [inline]
  do_iter_readv_writev+0x8b0/0xa80 fs/read_write.c:680
  do_iter_write+0x185/0x5f0 fs/read_write.c:959
  vfs_writev+0x1f1/0x360 fs/read_write.c:1004
  do_pwritev+0x1cc/0x280 fs/read_write.c:1093
  __do_sys_pwritev fs/read_write.c:1140 [inline]
  __se_sys_pwritev fs/read_write.c:1135 [inline]
  __x64_sys_pwritev+0x9a/0xf0 fs/read_write.c:1135
  do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
  entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x457679
Code: 1d b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7  
48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff  
ff 0f 83 eb b3 fb ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007f96ef162c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000128
RAX: ffffffffffffffda RBX: 00007f96ef1636d4 RCX: 0000000000457679
RDX: 0000000000000001 RSI: 0000000020000000 RDI: 0000000000000004
RBP: 000000000072bf00 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
R13: 00000000004d4a88 R14: 00000000004c31d4 R15: 0000000000000000
kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
FAULT_FLAG_ALLOW_RETRY missing 30
kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
CPU: 1 PID: 21327 Comm: syz-executor3 Not tainted 4.19.0-rc4-next-20180920+  
#76
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS  
Google 01/01/2011
Call Trace:
  __dump_stack lib/dump_stack.c:77 [inline]
  dump_stack+0x1d3/0x2c4 lib/dump_stack.c:113
  handle_userfault.cold.33+0x47/0x62 fs/userfaultfd.c:432
  do_anonymous_page mm/memory.c:2915 [inline]
  handle_pte_fault mm/memory.c:3732 [inline]
  __handle_mm_fault+0x45ed/0x53e0 mm/memory.c:3858
  handle_mm_fault+0x54f/0xc70 mm/memory.c:3895
  __do_page_fault+0x673/0xec0 arch/x86/mm/fault.c:1397
  do_page_fault+0xed/0x7d1 arch/x86/mm/fault.c:1472
  page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1139
RIP: 0010:fault_in_pages_readable include/linux/pagemap.h:601 [inline]
RIP: 0010:iov_iter_fault_in_readable+0x1b4/0x450 lib/iov_iter.c:421
Code: fd 49 39 dc 76 17 eb 3c e8 e9 a0 ef fd 49 81 c4 00 10 00 00 4c 39 a5  
28 ff ff ff 72 2e e8 d4 a0 ef fd 0f 1f 00 0f ae e8 31 db <41> 8a 04 24 0f  
1f 00 31 ff 89 de 88 85 58 ff ff ff e8 c6 a1 ef fd
RSP: 0018:ffff88018dfe7650 EFLAGS: 00010246
RAX: 0000000000040000 RBX: 0000000000000000 RCX: ffffc90005662000
RDX: 00000000000001c2 RSI: ffffffff838daf1c RDI: 0000000000000005
RBP: ffff88018dfe7728 R08: ffff880198ad2240 R09: ffffed00319dc039
R10: ffffed00319dc039 R11: ffff88018cee01cb R12: 0000000020012000
R13: 0000000000000001 R14: 0000000000000001 R15: ffff88018dfe7c50
  generic_perform_write+0x216/0x6a0 mm/filemap.c:3144
  __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3279
  generic_file_write_iter+0x436/0x870 mm/filemap.c:3307
  call_write_iter include/linux/fs.h:1825 [inline]
  do_iter_readv_writev+0x8b0/0xa80 fs/read_write.c:680
  do_iter_write+0x185/0x5f0 fs/read_write.c:959
  vfs_writev+0x1f1/0x360 fs/read_write.c:1004
  do_pwritev+0x1cc/0x280 fs/read_write.c:1093
  __do_sys_pwritev fs/read_write.c:1140 [inline]
  __se_sys_pwritev fs/read_write.c:1135 [inline]
  __x64_sys_pwritev+0x9a/0xf0 fs/read_write.c:1135
  do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
  entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x457679
Code: 1d b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7  
48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff  
ff 0f 83 eb b3 fb ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007f96ef162c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000128
RAX: ffffffffffffffda RBX: 00007f96ef1636d4 RCX: 0000000000457679
RDX: 0000000000000001 RSI: 0000000020000000 RDI: 0000000000000004
RBP: 000000000072bf00 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
R13: 00000000004d4a88 R14: 00000000004c31d4 R15: 0000000000000000
kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop5' (00000000699d1086): kobject_uevent_env
kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (00000000699d1086): kobject_uevent_env
kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop5' (00000000699d1086): kobject_uevent_env
kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (00000000699d1086): kobject_uevent_env
kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop5' (00000000699d1086): kobject_uevent_env
kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (00000000699d1086): kobject_uevent_env
kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (00000000699d1086): kobject_uevent_env
kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop5' (00000000699d1086): kobject_uevent_env
kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
audit: type=1326 audit(2000000012.020:46): auid=4294967295 uid=0 gid=0  
ses=4294967295 subj==unconfined pid=21534 comm="syz-executor0"  
exe="/root/syz-executor0" sig=31 arch=c000003e syscall=202 compat=0  
ip=0x457679 code=0x0
kobject: 'loop5' (00000000699d1086): kobject_uevent_env
kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop5' (00000000699d1086): kobject_uevent_env
kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
audit: type=1326 audit(2000000012.950:47): auid=4294967295 uid=0 gid=0  
ses=4294967295 subj==unconfined pid=21566 comm="syz-executor0"  
exe="/root/syz-executor0" sig=31 arch=c000003e syscall=202 compat=0  
ip=0x457679 code=0x0
kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
audit: type=1326 audit(2000000013.460:48): auid=4294967295 uid=0 gid=0  
ses=4294967295 subj==unconfined pid=21585 comm="syz-executor4"  
exe="/root/syz-executor4" sig=31 arch=c000003e syscall=202 compat=0  
ip=0x457679 code=0x0
audit: type=1326 audit(2000000013.510:49): auid=4294967295 uid=0 gid=0  
ses=4294967295 subj==unconfined pid=21588 comm="syz-executor2"  
exe="/root/syz-executor2" sig=31 arch=c000003e syscall=202 compat=0  
ip=0x457679 code=0x0
kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop5' (00000000699d1086): kobject_uevent_env
audit: type=1326 audit(2000000013.800:50): auid=4294967295 uid=0 gid=0  
ses=4294967295 subj==unconfined pid=21601 comm="syz-executor0"  
exe="/root/syz-executor0" sig=31 arch=c000003e syscall=202 compat=0  
ip=0x457679 code=0x0
kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop5' (00000000699d1086): kobject_uevent_env
kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop5' (00000000699d1086): kobject_uevent_env
kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
= '/devices/virtual/block/loop5'


---
This bug is generated by a bot. It may contain errors.
See https://goo.gl/tpsmEJ for more information about syzbot.
syzbot engineers can be reached at syzkaller@googlegroups.com.

syzbot will keep track of this bug report. See:
https://goo.gl/tpsmEJ#bug-status-tracking for how to communicate with  
syzbot.

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2018-09-20 21:04 possible deadlock in __do_page_fault syzbot
@ 2018-09-20 21:10 ` Andrew Morton
  2018-09-20 21:12   ` Todd Kjos
  2018-10-01  5:23 ` syzbot
  1 sibling, 1 reply; 25+ messages in thread
From: Andrew Morton @ 2018-09-20 21:10 UTC (permalink / raw)
  To: syzbot
  Cc: ak, hannes, jack, jrdr.linux, linux-kernel, linux-mm, mawilcox,
	mgorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen


Thanks.  Let's cc the ashmem folks.

On Thu, 20 Sep 2018 14:04:05 -0700 syzbot <syzbot+a76129f18c89f3e2ddd4@syzkaller.appspotmail.com> wrote:

> Hello,
> 
> syzbot found the following crash on:
> 
> HEAD commit:    a0cb0cabe4bb Add linux-next specific files for 20180920
> git tree:       linux-next
> console output: https://syzkaller.appspot.com/x/log.txt?x=15139721400000
> kernel config:  https://syzkaller.appspot.com/x/.config?x=786006c5dafbadf6
> dashboard link: https://syzkaller.appspot.com/bug?extid=a76129f18c89f3e2ddd4
> compiler:       gcc (GCC) 8.0.1 20180413 (experimental)
> 
> Unfortunately, I don't have any reproducer for this crash yet.
> 
> IMPORTANT: if you fix the bug, please add the following tag to the commit:
> Reported-by: syzbot+a76129f18c89f3e2ddd4@syzkaller.appspotmail.com
> 
> 
> ======================================================
> WARNING: possible circular locking dependency detected
> 4.19.0-rc4-next-20180920+ #76 Not tainted
> ------------------------------------------------------
> syz-executor3/21327 is trying to acquire lock:
> 000000009bc5286f (&mm->mmap_sem){++++}, at: __do_page_fault+0xb61/0xec0  
> arch/x86/mm/fault.c:1333
> 
> but task is already holding lock:
> 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at: inode_lock  
> include/linux/fs.h:745 [inline]
> 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at:  
> generic_file_write_iter+0xed/0x870 mm/filemap.c:3304
> 
> which lock already depends on the new lock.
> 
> 
> the existing dependency chain (in reverse order) is:
> 
> -> #2 (&sb->s_type->i_mutex_key#10){+.+.}:
>         down_write+0x8a/0x130 kernel/locking/rwsem.c:70
>         inode_lock include/linux/fs.h:745 [inline]
>         shmem_fallocate+0x18b/0x12c0 mm/shmem.c:2651
>         ashmem_shrink_scan+0x238/0x660 drivers/staging/android/ashmem.c:455
>         ashmem_ioctl+0x3ae/0x13a0 drivers/staging/android/ashmem.c:797
>         vfs_ioctl fs/ioctl.c:46 [inline]
>         file_ioctl fs/ioctl.c:501 [inline]
>         do_vfs_ioctl+0x1de/0x1720 fs/ioctl.c:685
>         ksys_ioctl+0xa9/0xd0 fs/ioctl.c:702
>         __do_sys_ioctl fs/ioctl.c:709 [inline]
>         __se_sys_ioctl fs/ioctl.c:707 [inline]
>         __x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:707
>         do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
>         entry_SYSCALL_64_after_hwframe+0x49/0xbe
> 
> -> #1 (ashmem_mutex){+.+.}:
>         __mutex_lock_common kernel/locking/mutex.c:925 [inline]
>         __mutex_lock+0x166/0x1700 kernel/locking/mutex.c:1072
>         mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:1087
>         ashmem_mmap+0x55/0x520 drivers/staging/android/ashmem.c:361
>         call_mmap include/linux/fs.h:1830 [inline]
>         mmap_region+0xe82/0x1cd0 mm/mmap.c:1762
>         do_mmap+0xa10/0x1220 mm/mmap.c:1535
>         do_mmap_pgoff include/linux/mm.h:2298 [inline]
>         vm_mmap_pgoff+0x213/0x2c0 mm/util.c:357
>         ksys_mmap_pgoff+0x4da/0x660 mm/mmap.c:1585
>         __do_sys_mmap arch/x86/kernel/sys_x86_64.c:100 [inline]
>         __se_sys_mmap arch/x86/kernel/sys_x86_64.c:91 [inline]
>         __x64_sys_mmap+0xe9/0x1b0 arch/x86/kernel/sys_x86_64.c:91
>         do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
>         entry_SYSCALL_64_after_hwframe+0x49/0xbe
> 
> -> #0 (&mm->mmap_sem){++++}:
>         lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3900
>         down_read+0x8d/0x120 kernel/locking/rwsem.c:24
>         __do_page_fault+0xb61/0xec0 arch/x86/mm/fault.c:1333
>         do_page_fault+0xed/0x7d1 arch/x86/mm/fault.c:1472
>         page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1139
>         fault_in_pages_readable include/linux/pagemap.h:601 [inline]
>         iov_iter_fault_in_readable+0x1b4/0x450 lib/iov_iter.c:421
>         generic_perform_write+0x216/0x6a0 mm/filemap.c:3144
>         __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3279
>         generic_file_write_iter+0x436/0x870 mm/filemap.c:3307
>         call_write_iter include/linux/fs.h:1825 [inline]
>         do_iter_readv_writev+0x8b0/0xa80 fs/read_write.c:680
>         do_iter_write+0x185/0x5f0 fs/read_write.c:959
>         vfs_writev+0x1f1/0x360 fs/read_write.c:1004
>         do_pwritev+0x1cc/0x280 fs/read_write.c:1093
>         __do_sys_pwritev fs/read_write.c:1140 [inline]
>         __se_sys_pwritev fs/read_write.c:1135 [inline]
>         __x64_sys_pwritev+0x9a/0xf0 fs/read_write.c:1135
>         do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
>         entry_SYSCALL_64_after_hwframe+0x49/0xbe
> 
> other info that might help us debug this:
> 
> Chain exists of:
>    &mm->mmap_sem --> ashmem_mutex --> &sb->s_type->i_mutex_key#10
> 
>   Possible unsafe locking scenario:
> 
>         CPU0                    CPU1
>         ----                    ----
>    lock(&sb->s_type->i_mutex_key#10);
>                                 lock(ashmem_mutex);
>                                 lock(&sb->s_type->i_mutex_key#10);
>    lock(&mm->mmap_sem);
> 
>   *** DEADLOCK ***
> 
> 2 locks held by syz-executor3/21327:
>   #0: 000000003de4eab1 (sb_writers#3){.+.+}, at: file_start_write  
> include/linux/fs.h:2784 [inline]
>   #0: 000000003de4eab1 (sb_writers#3){.+.+}, at: vfs_writev+0x2bd/0x360  
> fs/read_write.c:1003
>   #1: 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at: inode_lock  
> include/linux/fs.h:745 [inline]
>   #1: 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at:  
> generic_file_write_iter+0xed/0x870 mm/filemap.c:3304
> 
> stack backtrace:
> CPU: 1 PID: 21327 Comm: syz-executor3 Not tainted 4.19.0-rc4-next-20180920+  
> #76
> Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS  
> Google 01/01/2011
> Call Trace:
>   __dump_stack lib/dump_stack.c:77 [inline]
>   dump_stack+0x1d3/0x2c4 lib/dump_stack.c:113
>   print_circular_bug.isra.33.cold.54+0x1bd/0x27d  
> kernel/locking/lockdep.c:1221
>   check_prev_add kernel/locking/lockdep.c:1861 [inline]
>   check_prevs_add kernel/locking/lockdep.c:1974 [inline]
>   validate_chain kernel/locking/lockdep.c:2415 [inline]
>   __lock_acquire+0x33e4/0x4ec0 kernel/locking/lockdep.c:3411
>   lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3900
>   down_read+0x8d/0x120 kernel/locking/rwsem.c:24
>   __do_page_fault+0xb61/0xec0 arch/x86/mm/fault.c:1333
>   do_page_fault+0xed/0x7d1 arch/x86/mm/fault.c:1472
>   page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1139
> RIP: 0010:fault_in_pages_readable include/linux/pagemap.h:601 [inline]
> RIP: 0010:iov_iter_fault_in_readable+0x1b4/0x450 lib/iov_iter.c:421
> Code: fd 49 39 dc 76 17 eb 3c e8 e9 a0 ef fd 49 81 c4 00 10 00 00 4c 39 a5  
> 28 ff ff ff 72 2e e8 d4 a0 ef fd 0f 1f 00 0f ae e8 31 db <41> 8a 04 24 0f  
> 1f 00 31 ff 89 de 88 85 58 ff ff ff e8 c6 a1 ef fd
> RSP: 0018:ffff88018dfe7650 EFLAGS: 00010246
> RAX: 0000000000040000 RBX: 0000000000000000 RCX: ffffc90005662000
> RDX: 00000000000001c2 RSI: ffffffff838daf1c RDI: 0000000000000005
> RBP: ffff88018dfe7728 R08: ffff880198ad2240 R09: ffffed00319dc039
> R10: ffffed00319dc039 R11: ffff88018cee01cb R12: 0000000020012000
> R13: 0000000000000001 R14: 0000000000000001 R15: ffff88018dfe7c50
>   generic_perform_write+0x216/0x6a0 mm/filemap.c:3144
>   __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3279
>   generic_file_write_iter+0x436/0x870 mm/filemap.c:3307
>   call_write_iter include/linux/fs.h:1825 [inline]
>   do_iter_readv_writev+0x8b0/0xa80 fs/read_write.c:680
>   do_iter_write+0x185/0x5f0 fs/read_write.c:959
>   vfs_writev+0x1f1/0x360 fs/read_write.c:1004
>   do_pwritev+0x1cc/0x280 fs/read_write.c:1093
>   __do_sys_pwritev fs/read_write.c:1140 [inline]
>   __se_sys_pwritev fs/read_write.c:1135 [inline]
>   __x64_sys_pwritev+0x9a/0xf0 fs/read_write.c:1135
>   do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
>   entry_SYSCALL_64_after_hwframe+0x49/0xbe
> RIP: 0033:0x457679
> Code: 1d b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7  
> 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff  
> ff 0f 83 eb b3 fb ff c3 66 2e 0f 1f 84 00 00 00 00
> RSP: 002b:00007f96ef162c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000128
> RAX: ffffffffffffffda RBX: 00007f96ef1636d4 RCX: 0000000000457679
> RDX: 0000000000000001 RSI: 0000000020000000 RDI: 0000000000000004
> RBP: 000000000072bf00 R08: 0000000000000000 R09: 0000000000000000
> R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
> R13: 00000000004d4a88 R14: 00000000004c31d4 R15: 0000000000000000
> kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
> = '/devices/virtual/block/loop2'
> FAULT_FLAG_ALLOW_RETRY missing 30
> kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
> = '/devices/virtual/block/loop1'
> CPU: 1 PID: 21327 Comm: syz-executor3 Not tainted 4.19.0-rc4-next-20180920+  
> #76
> Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS  
> Google 01/01/2011
> Call Trace:
>   __dump_stack lib/dump_stack.c:77 [inline]
>   dump_stack+0x1d3/0x2c4 lib/dump_stack.c:113
>   handle_userfault.cold.33+0x47/0x62 fs/userfaultfd.c:432
>   do_anonymous_page mm/memory.c:2915 [inline]
>   handle_pte_fault mm/memory.c:3732 [inline]
>   __handle_mm_fault+0x45ed/0x53e0 mm/memory.c:3858
>   handle_mm_fault+0x54f/0xc70 mm/memory.c:3895
>   __do_page_fault+0x673/0xec0 arch/x86/mm/fault.c:1397
>   do_page_fault+0xed/0x7d1 arch/x86/mm/fault.c:1472
>   page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1139
> RIP: 0010:fault_in_pages_readable include/linux/pagemap.h:601 [inline]
> RIP: 0010:iov_iter_fault_in_readable+0x1b4/0x450 lib/iov_iter.c:421
> Code: fd 49 39 dc 76 17 eb 3c e8 e9 a0 ef fd 49 81 c4 00 10 00 00 4c 39 a5  
> 28 ff ff ff 72 2e e8 d4 a0 ef fd 0f 1f 00 0f ae e8 31 db <41> 8a 04 24 0f  
> 1f 00 31 ff 89 de 88 85 58 ff ff ff e8 c6 a1 ef fd
> RSP: 0018:ffff88018dfe7650 EFLAGS: 00010246
> RAX: 0000000000040000 RBX: 0000000000000000 RCX: ffffc90005662000
> RDX: 00000000000001c2 RSI: ffffffff838daf1c RDI: 0000000000000005
> RBP: ffff88018dfe7728 R08: ffff880198ad2240 R09: ffffed00319dc039
> R10: ffffed00319dc039 R11: ffff88018cee01cb R12: 0000000020012000
> R13: 0000000000000001 R14: 0000000000000001 R15: ffff88018dfe7c50
>   generic_perform_write+0x216/0x6a0 mm/filemap.c:3144
>   __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3279
>   generic_file_write_iter+0x436/0x870 mm/filemap.c:3307
>   call_write_iter include/linux/fs.h:1825 [inline]
>   do_iter_readv_writev+0x8b0/0xa80 fs/read_write.c:680
>   do_iter_write+0x185/0x5f0 fs/read_write.c:959
>   vfs_writev+0x1f1/0x360 fs/read_write.c:1004
>   do_pwritev+0x1cc/0x280 fs/read_write.c:1093
>   __do_sys_pwritev fs/read_write.c:1140 [inline]
>   __se_sys_pwritev fs/read_write.c:1135 [inline]
>   __x64_sys_pwritev+0x9a/0xf0 fs/read_write.c:1135
>   do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
>   entry_SYSCALL_64_after_hwframe+0x49/0xbe
> RIP: 0033:0x457679
> Code: 1d b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7  
> 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff  
> ff 0f 83 eb b3 fb ff c3 66 2e 0f 1f 84 00 00 00 00
> RSP: 002b:00007f96ef162c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000128
> RAX: ffffffffffffffda RBX: 00007f96ef1636d4 RCX: 0000000000457679
> RDX: 0000000000000001 RSI: 0000000020000000 RDI: 0000000000000004
> RBP: 000000000072bf00 R08: 0000000000000000 R09: 0000000000000000
> R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
> R13: 00000000004d4a88 R14: 00000000004c31d4 R15: 0000000000000000
> kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path  
> = '/devices/virtual/block/loop4'
> kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
> = '/devices/virtual/block/loop0'
> kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
> = '/devices/virtual/block/loop5'
> kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
> = '/devices/virtual/block/loop1'
> kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
> = '/devices/virtual/block/loop3'
> kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
> = '/devices/virtual/block/loop5'
> kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
> = '/devices/virtual/block/loop0'
> kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
> = '/devices/virtual/block/loop1'
> kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
> = '/devices/virtual/block/loop2'
> kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
> = '/devices/virtual/block/loop1'
> kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
> = '/devices/virtual/block/loop5'
> kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
> = '/devices/virtual/block/loop0'
> kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path  
> = '/devices/virtual/block/loop4'
> kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
> = '/devices/virtual/block/loop0'
> kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
> = '/devices/virtual/block/loop2'
> kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
> = '/devices/virtual/block/loop3'
> kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
> = '/devices/virtual/block/loop5'
> kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path  
> = '/devices/virtual/block/loop4'
> kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
> = '/devices/virtual/block/loop2'
> kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
> = '/devices/virtual/block/loop0'
> kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
> = '/devices/virtual/block/loop5'
> kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
> = '/devices/virtual/block/loop3'
> kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
> = '/devices/virtual/block/loop1'
> kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path  
> = '/devices/virtual/block/loop4'
> kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
> = '/devices/virtual/block/loop2'
> kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
> = '/devices/virtual/block/loop0'
> kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
> = '/devices/virtual/block/loop3'
> kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
> = '/devices/virtual/block/loop5'
> kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
> = '/devices/virtual/block/loop1'
> kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
> = '/devices/virtual/block/loop0'
> kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
> = '/devices/virtual/block/loop2'
> kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
> = '/devices/virtual/block/loop3'
> kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
> = '/devices/virtual/block/loop5'
> kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path  
> = '/devices/virtual/block/loop4'
> kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
> = '/devices/virtual/block/loop1'
> kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
> = '/devices/virtual/block/loop0'
> kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
> = '/devices/virtual/block/loop5'
> kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
> = '/devices/virtual/block/loop2'
> kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
> = '/devices/virtual/block/loop3'
> kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path  
> = '/devices/virtual/block/loop4'
> kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
> = '/devices/virtual/block/loop0'
> kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
> = '/devices/virtual/block/loop1'
> audit: type=1326 audit(2000000012.020:46): auid=4294967295 uid=0 gid=0  
> ses=4294967295 subj==unconfined pid=21534 comm="syz-executor0"  
> exe="/root/syz-executor0" sig=31 arch=c000003e syscall=202 compat=0  
> ip=0x457679 code=0x0
> kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
> = '/devices/virtual/block/loop5'
> kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
> = '/devices/virtual/block/loop3'
> kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
> = '/devices/virtual/block/loop2'
> kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
> = '/devices/virtual/block/loop2'
> kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
> = '/devices/virtual/block/loop2'
> kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
> = '/devices/virtual/block/loop1'
> kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
> = '/devices/virtual/block/loop0'
> kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
> = '/devices/virtual/block/loop5'
> audit: type=1326 audit(2000000012.950:47): auid=4294967295 uid=0 gid=0  
> ses=4294967295 subj==unconfined pid=21566 comm="syz-executor0"  
> exe="/root/syz-executor0" sig=31 arch=c000003e syscall=202 compat=0  
> ip=0x457679 code=0x0
> kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
> = '/devices/virtual/block/loop2'
> kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path  
> = '/devices/virtual/block/loop2'
> kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path  
> = '/devices/virtual/block/loop3'
> kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path  
> = '/devices/virtual/block/loop4'
> audit: type=1326 audit(2000000013.460:48): auid=4294967295 uid=0 gid=0  
> ses=4294967295 subj==unconfined pid=21585 comm="syz-executor4"  
> exe="/root/syz-executor4" sig=31 arch=c000003e syscall=202 compat=0  
> ip=0x457679 code=0x0
> audit: type=1326 audit(2000000013.510:49): auid=4294967295 uid=0 gid=0  
> ses=4294967295 subj==unconfined pid=21588 comm="syz-executor2"  
> exe="/root/syz-executor2" sig=31 arch=c000003e syscall=202 compat=0  
> ip=0x457679 code=0x0
> kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path  
> = '/devices/virtual/block/loop1'
> kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path  
> = '/devices/virtual/block/loop0'
> kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> audit: type=1326 audit(2000000013.800:50): auid=4294967295 uid=0 gid=0  
> ses=4294967295 subj==unconfined pid=21601 comm="syz-executor0"  
> exe="/root/syz-executor0" sig=31 arch=c000003e syscall=202 compat=0  
> ip=0x457679 code=0x0
> kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
> = '/devices/virtual/block/loop5'
> kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
> = '/devices/virtual/block/loop5'
> kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> kobject: 'loop5' (00000000699d1086): fill_kobj_path: path  
> = '/devices/virtual/block/loop5'
> 
> 
> ---
> This bug is generated by a bot. It may contain errors.
> See https://goo.gl/tpsmEJ for more information about syzbot.
> syzbot engineers can be reached at syzkaller@googlegroups.com.
> 
> syzbot will keep track of this bug report. See:
> https://goo.gl/tpsmEJ#bug-status-tracking for how to communicate with  
> syzbot.

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2018-09-20 21:10 ` Andrew Morton
@ 2018-09-20 21:12   ` Todd Kjos
  2018-09-20 23:33     ` Joel Fernandes
  0 siblings, 1 reply; 25+ messages in thread
From: Todd Kjos @ 2018-09-20 21:12 UTC (permalink / raw)
  To: akpm, Joel Fernandes
  Cc: syzbot+a76129f18c89f3e2ddd4, ak, hannes, jack, jrdr.linux, LKML,
	Linux-MM, mawilcox, mgorman, syzkaller-bugs,
	Arve Hjønnevåg, Todd Kjos, Martijn Coenen

+Joel Fernandes

On Thu, Sep 20, 2018 at 2:11 PM Andrew Morton <akpm@linux-foundation.org> wrote:
>
>
> Thanks.  Let's cc the ashmem folks.
>
> On Thu, 20 Sep 2018 14:04:05 -0700 syzbot <syzbot+a76129f18c89f3e2ddd4@syzkaller.appspotmail.com> wrote:
>
> > Hello,
> >
> > syzbot found the following crash on:
> >
> > HEAD commit:    a0cb0cabe4bb Add linux-next specific files for 20180920
> > git tree:       linux-next
> > console output: https://syzkaller.appspot.com/x/log.txt?x=15139721400000
> > kernel config:  https://syzkaller.appspot.com/x/.config?x=786006c5dafbadf6
> > dashboard link: https://syzkaller.appspot.com/bug?extid=a76129f18c89f3e2ddd4
> > compiler:       gcc (GCC) 8.0.1 20180413 (experimental)
> >
> > Unfortunately, I don't have any reproducer for this crash yet.
> >
> > IMPORTANT: if you fix the bug, please add the following tag to the commit:
> > Reported-by: syzbot+a76129f18c89f3e2ddd4@syzkaller.appspotmail.com
> >
> >
> > ======================================================
> > WARNING: possible circular locking dependency detected
> > 4.19.0-rc4-next-20180920+ #76 Not tainted
> > ------------------------------------------------------
> > syz-executor3/21327 is trying to acquire lock:
> > 000000009bc5286f (&mm->mmap_sem){++++}, at: __do_page_fault+0xb61/0xec0
> > arch/x86/mm/fault.c:1333
> >
> > but task is already holding lock:
> > 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at: inode_lock
> > include/linux/fs.h:745 [inline]
> > 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at:
> > generic_file_write_iter+0xed/0x870 mm/filemap.c:3304
> >
> > which lock already depends on the new lock.
> >
> >
> > the existing dependency chain (in reverse order) is:
> >
> > -> #2 (&sb->s_type->i_mutex_key#10){+.+.}:
> >         down_write+0x8a/0x130 kernel/locking/rwsem.c:70
> >         inode_lock include/linux/fs.h:745 [inline]
> >         shmem_fallocate+0x18b/0x12c0 mm/shmem.c:2651
> >         ashmem_shrink_scan+0x238/0x660 drivers/staging/android/ashmem.c:455
> >         ashmem_ioctl+0x3ae/0x13a0 drivers/staging/android/ashmem.c:797
> >         vfs_ioctl fs/ioctl.c:46 [inline]
> >         file_ioctl fs/ioctl.c:501 [inline]
> >         do_vfs_ioctl+0x1de/0x1720 fs/ioctl.c:685
> >         ksys_ioctl+0xa9/0xd0 fs/ioctl.c:702
> >         __do_sys_ioctl fs/ioctl.c:709 [inline]
> >         __se_sys_ioctl fs/ioctl.c:707 [inline]
> >         __x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:707
> >         do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> >         entry_SYSCALL_64_after_hwframe+0x49/0xbe
> >
> > -> #1 (ashmem_mutex){+.+.}:
> >         __mutex_lock_common kernel/locking/mutex.c:925 [inline]
> >         __mutex_lock+0x166/0x1700 kernel/locking/mutex.c:1072
> >         mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:1087
> >         ashmem_mmap+0x55/0x520 drivers/staging/android/ashmem.c:361
> >         call_mmap include/linux/fs.h:1830 [inline]
> >         mmap_region+0xe82/0x1cd0 mm/mmap.c:1762
> >         do_mmap+0xa10/0x1220 mm/mmap.c:1535
> >         do_mmap_pgoff include/linux/mm.h:2298 [inline]
> >         vm_mmap_pgoff+0x213/0x2c0 mm/util.c:357
> >         ksys_mmap_pgoff+0x4da/0x660 mm/mmap.c:1585
> >         __do_sys_mmap arch/x86/kernel/sys_x86_64.c:100 [inline]
> >         __se_sys_mmap arch/x86/kernel/sys_x86_64.c:91 [inline]
> >         __x64_sys_mmap+0xe9/0x1b0 arch/x86/kernel/sys_x86_64.c:91
> >         do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> >         entry_SYSCALL_64_after_hwframe+0x49/0xbe
> >
> > -> #0 (&mm->mmap_sem){++++}:
> >         lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3900
> >         down_read+0x8d/0x120 kernel/locking/rwsem.c:24
> >         __do_page_fault+0xb61/0xec0 arch/x86/mm/fault.c:1333
> >         do_page_fault+0xed/0x7d1 arch/x86/mm/fault.c:1472
> >         page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1139
> >         fault_in_pages_readable include/linux/pagemap.h:601 [inline]
> >         iov_iter_fault_in_readable+0x1b4/0x450 lib/iov_iter.c:421
> >         generic_perform_write+0x216/0x6a0 mm/filemap.c:3144
> >         __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3279
> >         generic_file_write_iter+0x436/0x870 mm/filemap.c:3307
> >         call_write_iter include/linux/fs.h:1825 [inline]
> >         do_iter_readv_writev+0x8b0/0xa80 fs/read_write.c:680
> >         do_iter_write+0x185/0x5f0 fs/read_write.c:959
> >         vfs_writev+0x1f1/0x360 fs/read_write.c:1004
> >         do_pwritev+0x1cc/0x280 fs/read_write.c:1093
> >         __do_sys_pwritev fs/read_write.c:1140 [inline]
> >         __se_sys_pwritev fs/read_write.c:1135 [inline]
> >         __x64_sys_pwritev+0x9a/0xf0 fs/read_write.c:1135
> >         do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> >         entry_SYSCALL_64_after_hwframe+0x49/0xbe
> >
> > other info that might help us debug this:
> >
> > Chain exists of:
> >    &mm->mmap_sem --> ashmem_mutex --> &sb->s_type->i_mutex_key#10
> >
> >   Possible unsafe locking scenario:
> >
> >         CPU0                    CPU1
> >         ----                    ----
> >    lock(&sb->s_type->i_mutex_key#10);
> >                                 lock(ashmem_mutex);
> >                                 lock(&sb->s_type->i_mutex_key#10);
> >    lock(&mm->mmap_sem);
> >
> >   *** DEADLOCK ***
> >
> > 2 locks held by syz-executor3/21327:
> >   #0: 000000003de4eab1 (sb_writers#3){.+.+}, at: file_start_write
> > include/linux/fs.h:2784 [inline]
> >   #0: 000000003de4eab1 (sb_writers#3){.+.+}, at: vfs_writev+0x2bd/0x360
> > fs/read_write.c:1003
> >   #1: 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at: inode_lock
> > include/linux/fs.h:745 [inline]
> >   #1: 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at:
> > generic_file_write_iter+0xed/0x870 mm/filemap.c:3304
> >
> > stack backtrace:
> > CPU: 1 PID: 21327 Comm: syz-executor3 Not tainted 4.19.0-rc4-next-20180920+
> > #76
> > Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
> > Google 01/01/2011
> > Call Trace:
> >   __dump_stack lib/dump_stack.c:77 [inline]
> >   dump_stack+0x1d3/0x2c4 lib/dump_stack.c:113
> >   print_circular_bug.isra.33.cold.54+0x1bd/0x27d
> > kernel/locking/lockdep.c:1221
> >   check_prev_add kernel/locking/lockdep.c:1861 [inline]
> >   check_prevs_add kernel/locking/lockdep.c:1974 [inline]
> >   validate_chain kernel/locking/lockdep.c:2415 [inline]
> >   __lock_acquire+0x33e4/0x4ec0 kernel/locking/lockdep.c:3411
> >   lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3900
> >   down_read+0x8d/0x120 kernel/locking/rwsem.c:24
> >   __do_page_fault+0xb61/0xec0 arch/x86/mm/fault.c:1333
> >   do_page_fault+0xed/0x7d1 arch/x86/mm/fault.c:1472
> >   page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1139
> > RIP: 0010:fault_in_pages_readable include/linux/pagemap.h:601 [inline]
> > RIP: 0010:iov_iter_fault_in_readable+0x1b4/0x450 lib/iov_iter.c:421
> > Code: fd 49 39 dc 76 17 eb 3c e8 e9 a0 ef fd 49 81 c4 00 10 00 00 4c 39 a5
> > 28 ff ff ff 72 2e e8 d4 a0 ef fd 0f 1f 00 0f ae e8 31 db <41> 8a 04 24 0f
> > 1f 00 31 ff 89 de 88 85 58 ff ff ff e8 c6 a1 ef fd
> > RSP: 0018:ffff88018dfe7650 EFLAGS: 00010246
> > RAX: 0000000000040000 RBX: 0000000000000000 RCX: ffffc90005662000
> > RDX: 00000000000001c2 RSI: ffffffff838daf1c RDI: 0000000000000005
> > RBP: ffff88018dfe7728 R08: ffff880198ad2240 R09: ffffed00319dc039
> > R10: ffffed00319dc039 R11: ffff88018cee01cb R12: 0000000020012000
> > R13: 0000000000000001 R14: 0000000000000001 R15: ffff88018dfe7c50
> >   generic_perform_write+0x216/0x6a0 mm/filemap.c:3144
> >   __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3279
> >   generic_file_write_iter+0x436/0x870 mm/filemap.c:3307
> >   call_write_iter include/linux/fs.h:1825 [inline]
> >   do_iter_readv_writev+0x8b0/0xa80 fs/read_write.c:680
> >   do_iter_write+0x185/0x5f0 fs/read_write.c:959
> >   vfs_writev+0x1f1/0x360 fs/read_write.c:1004
> >   do_pwritev+0x1cc/0x280 fs/read_write.c:1093
> >   __do_sys_pwritev fs/read_write.c:1140 [inline]
> >   __se_sys_pwritev fs/read_write.c:1135 [inline]
> >   __x64_sys_pwritev+0x9a/0xf0 fs/read_write.c:1135
> >   do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> >   entry_SYSCALL_64_after_hwframe+0x49/0xbe
> > RIP: 0033:0x457679
> > Code: 1d b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7
> > 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff
> > ff 0f 83 eb b3 fb ff c3 66 2e 0f 1f 84 00 00 00 00
> > RSP: 002b:00007f96ef162c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000128
> > RAX: ffffffffffffffda RBX: 00007f96ef1636d4 RCX: 0000000000457679
> > RDX: 0000000000000001 RSI: 0000000020000000 RDI: 0000000000000004
> > RBP: 000000000072bf00 R08: 0000000000000000 R09: 0000000000000000
> > R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
> > R13: 00000000004d4a88 R14: 00000000004c31d4 R15: 0000000000000000
> > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > = '/devices/virtual/block/loop2'
> > FAULT_FLAG_ALLOW_RETRY missing 30
> > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > = '/devices/virtual/block/loop1'
> > CPU: 1 PID: 21327 Comm: syz-executor3 Not tainted 4.19.0-rc4-next-20180920+
> > #76
> > Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
> > Google 01/01/2011
> > Call Trace:
> >   __dump_stack lib/dump_stack.c:77 [inline]
> >   dump_stack+0x1d3/0x2c4 lib/dump_stack.c:113
> >   handle_userfault.cold.33+0x47/0x62 fs/userfaultfd.c:432
> >   do_anonymous_page mm/memory.c:2915 [inline]
> >   handle_pte_fault mm/memory.c:3732 [inline]
> >   __handle_mm_fault+0x45ed/0x53e0 mm/memory.c:3858
> >   handle_mm_fault+0x54f/0xc70 mm/memory.c:3895
> >   __do_page_fault+0x673/0xec0 arch/x86/mm/fault.c:1397
> >   do_page_fault+0xed/0x7d1 arch/x86/mm/fault.c:1472
> >   page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1139
> > RIP: 0010:fault_in_pages_readable include/linux/pagemap.h:601 [inline]
> > RIP: 0010:iov_iter_fault_in_readable+0x1b4/0x450 lib/iov_iter.c:421
> > Code: fd 49 39 dc 76 17 eb 3c e8 e9 a0 ef fd 49 81 c4 00 10 00 00 4c 39 a5
> > 28 ff ff ff 72 2e e8 d4 a0 ef fd 0f 1f 00 0f ae e8 31 db <41> 8a 04 24 0f
> > 1f 00 31 ff 89 de 88 85 58 ff ff ff e8 c6 a1 ef fd
> > RSP: 0018:ffff88018dfe7650 EFLAGS: 00010246
> > RAX: 0000000000040000 RBX: 0000000000000000 RCX: ffffc90005662000
> > RDX: 00000000000001c2 RSI: ffffffff838daf1c RDI: 0000000000000005
> > RBP: ffff88018dfe7728 R08: ffff880198ad2240 R09: ffffed00319dc039
> > R10: ffffed00319dc039 R11: ffff88018cee01cb R12: 0000000020012000
> > R13: 0000000000000001 R14: 0000000000000001 R15: ffff88018dfe7c50
> >   generic_perform_write+0x216/0x6a0 mm/filemap.c:3144
> >   __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3279
> >   generic_file_write_iter+0x436/0x870 mm/filemap.c:3307
> >   call_write_iter include/linux/fs.h:1825 [inline]
> >   do_iter_readv_writev+0x8b0/0xa80 fs/read_write.c:680
> >   do_iter_write+0x185/0x5f0 fs/read_write.c:959
> >   vfs_writev+0x1f1/0x360 fs/read_write.c:1004
> >   do_pwritev+0x1cc/0x280 fs/read_write.c:1093
> >   __do_sys_pwritev fs/read_write.c:1140 [inline]
> >   __se_sys_pwritev fs/read_write.c:1135 [inline]
> >   __x64_sys_pwritev+0x9a/0xf0 fs/read_write.c:1135
> >   do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> >   entry_SYSCALL_64_after_hwframe+0x49/0xbe
> > RIP: 0033:0x457679
> > Code: 1d b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7
> > 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff
> > ff 0f 83 eb b3 fb ff c3 66 2e 0f 1f 84 00 00 00 00
> > RSP: 002b:00007f96ef162c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000128
> > RAX: ffffffffffffffda RBX: 00007f96ef1636d4 RCX: 0000000000457679
> > RDX: 0000000000000001 RSI: 0000000020000000 RDI: 0000000000000004
> > RBP: 000000000072bf00 R08: 0000000000000000 R09: 0000000000000000
> > R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
> > R13: 00000000004d4a88 R14: 00000000004c31d4 R15: 0000000000000000
> > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
> > = '/devices/virtual/block/loop4'
> > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > = '/devices/virtual/block/loop0'
> > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > = '/devices/virtual/block/loop5'
> > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > = '/devices/virtual/block/loop1'
> > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > = '/devices/virtual/block/loop3'
> > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > = '/devices/virtual/block/loop5'
> > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > = '/devices/virtual/block/loop0'
> > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > = '/devices/virtual/block/loop1'
> > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > = '/devices/virtual/block/loop2'
> > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > = '/devices/virtual/block/loop1'
> > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > = '/devices/virtual/block/loop5'
> > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > = '/devices/virtual/block/loop0'
> > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
> > = '/devices/virtual/block/loop4'
> > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > = '/devices/virtual/block/loop0'
> > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > = '/devices/virtual/block/loop2'
> > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > = '/devices/virtual/block/loop3'
> > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > = '/devices/virtual/block/loop5'
> > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
> > = '/devices/virtual/block/loop4'
> > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > = '/devices/virtual/block/loop2'
> > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > = '/devices/virtual/block/loop0'
> > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > = '/devices/virtual/block/loop5'
> > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > = '/devices/virtual/block/loop3'
> > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > = '/devices/virtual/block/loop1'
> > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
> > = '/devices/virtual/block/loop4'
> > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > = '/devices/virtual/block/loop2'
> > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > = '/devices/virtual/block/loop0'
> > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > = '/devices/virtual/block/loop3'
> > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > = '/devices/virtual/block/loop5'
> > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > = '/devices/virtual/block/loop1'
> > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > = '/devices/virtual/block/loop0'
> > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > = '/devices/virtual/block/loop2'
> > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > = '/devices/virtual/block/loop3'
> > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > = '/devices/virtual/block/loop5'
> > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
> > = '/devices/virtual/block/loop4'
> > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > = '/devices/virtual/block/loop1'
> > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > = '/devices/virtual/block/loop0'
> > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > = '/devices/virtual/block/loop5'
> > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > = '/devices/virtual/block/loop2'
> > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > = '/devices/virtual/block/loop3'
> > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
> > = '/devices/virtual/block/loop4'
> > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > = '/devices/virtual/block/loop0'
> > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > = '/devices/virtual/block/loop1'
> > audit: type=1326 audit(2000000012.020:46): auid=4294967295 uid=0 gid=0
> > ses=4294967295 subj==unconfined pid=21534 comm="syz-executor0"
> > exe="/root/syz-executor0" sig=31 arch=c000003e syscall=202 compat=0
> > ip=0x457679 code=0x0
> > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > = '/devices/virtual/block/loop5'
> > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > = '/devices/virtual/block/loop3'
> > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > = '/devices/virtual/block/loop2'
> > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > = '/devices/virtual/block/loop2'
> > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > = '/devices/virtual/block/loop2'
> > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > = '/devices/virtual/block/loop1'
> > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > = '/devices/virtual/block/loop0'
> > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > = '/devices/virtual/block/loop5'
> > audit: type=1326 audit(2000000012.950:47): auid=4294967295 uid=0 gid=0
> > ses=4294967295 subj==unconfined pid=21566 comm="syz-executor0"
> > exe="/root/syz-executor0" sig=31 arch=c000003e syscall=202 compat=0
> > ip=0x457679 code=0x0
> > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > = '/devices/virtual/block/loop2'
> > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > = '/devices/virtual/block/loop2'
> > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > = '/devices/virtual/block/loop3'
> > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
> > = '/devices/virtual/block/loop4'
> > audit: type=1326 audit(2000000013.460:48): auid=4294967295 uid=0 gid=0
> > ses=4294967295 subj==unconfined pid=21585 comm="syz-executor4"
> > exe="/root/syz-executor4" sig=31 arch=c000003e syscall=202 compat=0
> > ip=0x457679 code=0x0
> > audit: type=1326 audit(2000000013.510:49): auid=4294967295 uid=0 gid=0
> > ses=4294967295 subj==unconfined pid=21588 comm="syz-executor2"
> > exe="/root/syz-executor2" sig=31 arch=c000003e syscall=202 compat=0
> > ip=0x457679 code=0x0
> > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > = '/devices/virtual/block/loop1'
> > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > = '/devices/virtual/block/loop0'
> > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > audit: type=1326 audit(2000000013.800:50): auid=4294967295 uid=0 gid=0
> > ses=4294967295 subj==unconfined pid=21601 comm="syz-executor0"
> > exe="/root/syz-executor0" sig=31 arch=c000003e syscall=202 compat=0
> > ip=0x457679 code=0x0
> > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > = '/devices/virtual/block/loop5'
> > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > = '/devices/virtual/block/loop5'
> > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > = '/devices/virtual/block/loop5'
> >
> >
> > ---
> > This bug is generated by a bot. It may contain errors.
> > See https://goo.gl/tpsmEJ for more information about syzbot.
> > syzbot engineers can be reached at syzkaller@googlegroups.com.
> >
> > syzbot will keep track of this bug report. See:
> > https://goo.gl/tpsmEJ#bug-status-tracking for how to communicate with
> > syzbot.

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2018-09-20 21:12   ` Todd Kjos
@ 2018-09-20 23:33     ` Joel Fernandes
  2018-09-21  6:37       ` Dmitry Vyukov
  2018-09-21 23:21       ` Andrew Morton
  0 siblings, 2 replies; 25+ messages in thread
From: Joel Fernandes @ 2018-09-20 23:33 UTC (permalink / raw)
  To: Todd Kjos
  Cc: Andrew Morton, Joel Fernandes, syzbot+a76129f18c89f3e2ddd4, ak,
	Johannes Weiner, jack, jrdr.linux, LKML, linux-mm, mawilcox,
	mgorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On Thu, Sep 20, 2018 at 5:12 PM Todd Kjos <tkjos@google.com> wrote:
>
> +Joel Fernandes
>
> On Thu, Sep 20, 2018 at 2:11 PM Andrew Morton <akpm@linux-foundation.org> wrote:
> >
> >
> > Thanks.  Let's cc the ashmem folks.
> >

This should be fixed by https://patchwork.kernel.org/patch/10572477/

It has Neil Brown's Reviewed-by but looks like didn't yet appear in
anyone's tree, could Greg take this patch?

thanks,

 - Joel



> > On Thu, 20 Sep 2018 14:04:05 -0700 syzbot <syzbot+a76129f18c89f3e2ddd4@syzkaller.appspotmail.com> wrote:
> >
> > > Hello,
> > >
> > > syzbot found the following crash on:
> > >
> > > HEAD commit:    a0cb0cabe4bb Add linux-next specific files for 20180920
> > > git tree:       linux-next
> > > console output: https://syzkaller.appspot.com/x/log.txt?x=15139721400000
> > > kernel config:  https://syzkaller.appspot.com/x/.config?x=786006c5dafbadf6
> > > dashboard link: https://syzkaller.appspot.com/bug?extid=a76129f18c89f3e2ddd4
> > > compiler:       gcc (GCC) 8.0.1 20180413 (experimental)
> > >
> > > Unfortunately, I don't have any reproducer for this crash yet.
> > >
> > > IMPORTANT: if you fix the bug, please add the following tag to the commit:
> > > Reported-by: syzbot+a76129f18c89f3e2ddd4@syzkaller.appspotmail.com
> > >
> > >
> > > ======================================================
> > > WARNING: possible circular locking dependency detected
> > > 4.19.0-rc4-next-20180920+ #76 Not tainted
> > > ------------------------------------------------------
> > > syz-executor3/21327 is trying to acquire lock:
> > > 000000009bc5286f (&mm->mmap_sem){++++}, at: __do_page_fault+0xb61/0xec0
> > > arch/x86/mm/fault.c:1333
> > >
> > > but task is already holding lock:
> > > 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at: inode_lock
> > > include/linux/fs.h:745 [inline]
> > > 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at:
> > > generic_file_write_iter+0xed/0x870 mm/filemap.c:3304
> > >
> > > which lock already depends on the new lock.
> > >
> > >
> > > the existing dependency chain (in reverse order) is:
> > >
> > > -> #2 (&sb->s_type->i_mutex_key#10){+.+.}:
> > >         down_write+0x8a/0x130 kernel/locking/rwsem.c:70
> > >         inode_lock include/linux/fs.h:745 [inline]
> > >         shmem_fallocate+0x18b/0x12c0 mm/shmem.c:2651
> > >         ashmem_shrink_scan+0x238/0x660 drivers/staging/android/ashmem.c:455
> > >         ashmem_ioctl+0x3ae/0x13a0 drivers/staging/android/ashmem.c:797
> > >         vfs_ioctl fs/ioctl.c:46 [inline]
> > >         file_ioctl fs/ioctl.c:501 [inline]
> > >         do_vfs_ioctl+0x1de/0x1720 fs/ioctl.c:685
> > >         ksys_ioctl+0xa9/0xd0 fs/ioctl.c:702
> > >         __do_sys_ioctl fs/ioctl.c:709 [inline]
> > >         __se_sys_ioctl fs/ioctl.c:707 [inline]
> > >         __x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:707
> > >         do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> > >         entry_SYSCALL_64_after_hwframe+0x49/0xbe
> > >
> > > -> #1 (ashmem_mutex){+.+.}:
> > >         __mutex_lock_common kernel/locking/mutex.c:925 [inline]
> > >         __mutex_lock+0x166/0x1700 kernel/locking/mutex.c:1072
> > >         mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:1087
> > >         ashmem_mmap+0x55/0x520 drivers/staging/android/ashmem.c:361
> > >         call_mmap include/linux/fs.h:1830 [inline]
> > >         mmap_region+0xe82/0x1cd0 mm/mmap.c:1762
> > >         do_mmap+0xa10/0x1220 mm/mmap.c:1535
> > >         do_mmap_pgoff include/linux/mm.h:2298 [inline]
> > >         vm_mmap_pgoff+0x213/0x2c0 mm/util.c:357
> > >         ksys_mmap_pgoff+0x4da/0x660 mm/mmap.c:1585
> > >         __do_sys_mmap arch/x86/kernel/sys_x86_64.c:100 [inline]
> > >         __se_sys_mmap arch/x86/kernel/sys_x86_64.c:91 [inline]
> > >         __x64_sys_mmap+0xe9/0x1b0 arch/x86/kernel/sys_x86_64.c:91
> > >         do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> > >         entry_SYSCALL_64_after_hwframe+0x49/0xbe
> > >
> > > -> #0 (&mm->mmap_sem){++++}:
> > >         lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3900
> > >         down_read+0x8d/0x120 kernel/locking/rwsem.c:24
> > >         __do_page_fault+0xb61/0xec0 arch/x86/mm/fault.c:1333
> > >         do_page_fault+0xed/0x7d1 arch/x86/mm/fault.c:1472
> > >         page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1139
> > >         fault_in_pages_readable include/linux/pagemap.h:601 [inline]
> > >         iov_iter_fault_in_readable+0x1b4/0x450 lib/iov_iter.c:421
> > >         generic_perform_write+0x216/0x6a0 mm/filemap.c:3144
> > >         __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3279
> > >         generic_file_write_iter+0x436/0x870 mm/filemap.c:3307
> > >         call_write_iter include/linux/fs.h:1825 [inline]
> > >         do_iter_readv_writev+0x8b0/0xa80 fs/read_write.c:680
> > >         do_iter_write+0x185/0x5f0 fs/read_write.c:959
> > >         vfs_writev+0x1f1/0x360 fs/read_write.c:1004
> > >         do_pwritev+0x1cc/0x280 fs/read_write.c:1093
> > >         __do_sys_pwritev fs/read_write.c:1140 [inline]
> > >         __se_sys_pwritev fs/read_write.c:1135 [inline]
> > >         __x64_sys_pwritev+0x9a/0xf0 fs/read_write.c:1135
> > >         do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> > >         entry_SYSCALL_64_after_hwframe+0x49/0xbe
> > >
> > > other info that might help us debug this:
> > >
> > > Chain exists of:
> > >    &mm->mmap_sem --> ashmem_mutex --> &sb->s_type->i_mutex_key#10
> > >
> > >   Possible unsafe locking scenario:
> > >
> > >         CPU0                    CPU1
> > >         ----                    ----
> > >    lock(&sb->s_type->i_mutex_key#10);
> > >                                 lock(ashmem_mutex);
> > >                                 lock(&sb->s_type->i_mutex_key#10);
> > >    lock(&mm->mmap_sem);
> > >
> > >   *** DEADLOCK ***
> > >
> > > 2 locks held by syz-executor3/21327:
> > >   #0: 000000003de4eab1 (sb_writers#3){.+.+}, at: file_start_write
> > > include/linux/fs.h:2784 [inline]
> > >   #0: 000000003de4eab1 (sb_writers#3){.+.+}, at: vfs_writev+0x2bd/0x360
> > > fs/read_write.c:1003
> > >   #1: 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at: inode_lock
> > > include/linux/fs.h:745 [inline]
> > >   #1: 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at:
> > > generic_file_write_iter+0xed/0x870 mm/filemap.c:3304
> > >
> > > stack backtrace:
> > > CPU: 1 PID: 21327 Comm: syz-executor3 Not tainted 4.19.0-rc4-next-20180920+
> > > #76
> > > Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
> > > Google 01/01/2011
> > > Call Trace:
> > >   __dump_stack lib/dump_stack.c:77 [inline]
> > >   dump_stack+0x1d3/0x2c4 lib/dump_stack.c:113
> > >   print_circular_bug.isra.33.cold.54+0x1bd/0x27d
> > > kernel/locking/lockdep.c:1221
> > >   check_prev_add kernel/locking/lockdep.c:1861 [inline]
> > >   check_prevs_add kernel/locking/lockdep.c:1974 [inline]
> > >   validate_chain kernel/locking/lockdep.c:2415 [inline]
> > >   __lock_acquire+0x33e4/0x4ec0 kernel/locking/lockdep.c:3411
> > >   lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3900
> > >   down_read+0x8d/0x120 kernel/locking/rwsem.c:24
> > >   __do_page_fault+0xb61/0xec0 arch/x86/mm/fault.c:1333
> > >   do_page_fault+0xed/0x7d1 arch/x86/mm/fault.c:1472
> > >   page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1139
> > > RIP: 0010:fault_in_pages_readable include/linux/pagemap.h:601 [inline]
> > > RIP: 0010:iov_iter_fault_in_readable+0x1b4/0x450 lib/iov_iter.c:421
> > > Code: fd 49 39 dc 76 17 eb 3c e8 e9 a0 ef fd 49 81 c4 00 10 00 00 4c 39 a5
> > > 28 ff ff ff 72 2e e8 d4 a0 ef fd 0f 1f 00 0f ae e8 31 db <41> 8a 04 24 0f
> > > 1f 00 31 ff 89 de 88 85 58 ff ff ff e8 c6 a1 ef fd
> > > RSP: 0018:ffff88018dfe7650 EFLAGS: 00010246
> > > RAX: 0000000000040000 RBX: 0000000000000000 RCX: ffffc90005662000
> > > RDX: 00000000000001c2 RSI: ffffffff838daf1c RDI: 0000000000000005
> > > RBP: ffff88018dfe7728 R08: ffff880198ad2240 R09: ffffed00319dc039
> > > R10: ffffed00319dc039 R11: ffff88018cee01cb R12: 0000000020012000
> > > R13: 0000000000000001 R14: 0000000000000001 R15: ffff88018dfe7c50
> > >   generic_perform_write+0x216/0x6a0 mm/filemap.c:3144
> > >   __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3279
> > >   generic_file_write_iter+0x436/0x870 mm/filemap.c:3307
> > >   call_write_iter include/linux/fs.h:1825 [inline]
> > >   do_iter_readv_writev+0x8b0/0xa80 fs/read_write.c:680
> > >   do_iter_write+0x185/0x5f0 fs/read_write.c:959
> > >   vfs_writev+0x1f1/0x360 fs/read_write.c:1004
> > >   do_pwritev+0x1cc/0x280 fs/read_write.c:1093
> > >   __do_sys_pwritev fs/read_write.c:1140 [inline]
> > >   __se_sys_pwritev fs/read_write.c:1135 [inline]
> > >   __x64_sys_pwritev+0x9a/0xf0 fs/read_write.c:1135
> > >   do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> > >   entry_SYSCALL_64_after_hwframe+0x49/0xbe
> > > RIP: 0033:0x457679
> > > Code: 1d b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7
> > > 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff
> > > ff 0f 83 eb b3 fb ff c3 66 2e 0f 1f 84 00 00 00 00
> > > RSP: 002b:00007f96ef162c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000128
> > > RAX: ffffffffffffffda RBX: 00007f96ef1636d4 RCX: 0000000000457679
> > > RDX: 0000000000000001 RSI: 0000000020000000 RDI: 0000000000000004
> > > RBP: 000000000072bf00 R08: 0000000000000000 R09: 0000000000000000
> > > R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
> > > R13: 00000000004d4a88 R14: 00000000004c31d4 R15: 0000000000000000
> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > > = '/devices/virtual/block/loop2'
> > > FAULT_FLAG_ALLOW_RETRY missing 30
> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop1'
> > > CPU: 1 PID: 21327 Comm: syz-executor3 Not tainted 4.19.0-rc4-next-20180920+
> > > #76
> > > Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
> > > Google 01/01/2011
> > > Call Trace:
> > >   __dump_stack lib/dump_stack.c:77 [inline]
> > >   dump_stack+0x1d3/0x2c4 lib/dump_stack.c:113
> > >   handle_userfault.cold.33+0x47/0x62 fs/userfaultfd.c:432
> > >   do_anonymous_page mm/memory.c:2915 [inline]
> > >   handle_pte_fault mm/memory.c:3732 [inline]
> > >   __handle_mm_fault+0x45ed/0x53e0 mm/memory.c:3858
> > >   handle_mm_fault+0x54f/0xc70 mm/memory.c:3895
> > >   __do_page_fault+0x673/0xec0 arch/x86/mm/fault.c:1397
> > >   do_page_fault+0xed/0x7d1 arch/x86/mm/fault.c:1472
> > >   page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1139
> > > RIP: 0010:fault_in_pages_readable include/linux/pagemap.h:601 [inline]
> > > RIP: 0010:iov_iter_fault_in_readable+0x1b4/0x450 lib/iov_iter.c:421
> > > Code: fd 49 39 dc 76 17 eb 3c e8 e9 a0 ef fd 49 81 c4 00 10 00 00 4c 39 a5
> > > 28 ff ff ff 72 2e e8 d4 a0 ef fd 0f 1f 00 0f ae e8 31 db <41> 8a 04 24 0f
> > > 1f 00 31 ff 89 de 88 85 58 ff ff ff e8 c6 a1 ef fd
> > > RSP: 0018:ffff88018dfe7650 EFLAGS: 00010246
> > > RAX: 0000000000040000 RBX: 0000000000000000 RCX: ffffc90005662000
> > > RDX: 00000000000001c2 RSI: ffffffff838daf1c RDI: 0000000000000005
> > > RBP: ffff88018dfe7728 R08: ffff880198ad2240 R09: ffffed00319dc039
> > > R10: ffffed00319dc039 R11: ffff88018cee01cb R12: 0000000020012000
> > > R13: 0000000000000001 R14: 0000000000000001 R15: ffff88018dfe7c50
> > >   generic_perform_write+0x216/0x6a0 mm/filemap.c:3144
> > >   __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3279
> > >   generic_file_write_iter+0x436/0x870 mm/filemap.c:3307
> > >   call_write_iter include/linux/fs.h:1825 [inline]
> > >   do_iter_readv_writev+0x8b0/0xa80 fs/read_write.c:680
> > >   do_iter_write+0x185/0x5f0 fs/read_write.c:959
> > >   vfs_writev+0x1f1/0x360 fs/read_write.c:1004
> > >   do_pwritev+0x1cc/0x280 fs/read_write.c:1093
> > >   __do_sys_pwritev fs/read_write.c:1140 [inline]
> > >   __se_sys_pwritev fs/read_write.c:1135 [inline]
> > >   __x64_sys_pwritev+0x9a/0xf0 fs/read_write.c:1135
> > >   do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
> > >   entry_SYSCALL_64_after_hwframe+0x49/0xbe
> > > RIP: 0033:0x457679
> > > Code: 1d b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7
> > > 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff
> > > ff 0f 83 eb b3 fb ff c3 66 2e 0f 1f 84 00 00 00 00
> > > RSP: 002b:00007f96ef162c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000128
> > > RAX: ffffffffffffffda RBX: 00007f96ef1636d4 RCX: 0000000000457679
> > > RDX: 0000000000000001 RSI: 0000000020000000 RDI: 0000000000000004
> > > RBP: 000000000072bf00 R08: 0000000000000000 R09: 0000000000000000
> > > R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
> > > R13: 00000000004d4a88 R14: 00000000004c31d4 R15: 0000000000000000
> > > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> > > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop4'
> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > > = '/devices/virtual/block/loop0'
> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > > = '/devices/virtual/block/loop5'
> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop1'
> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > > = '/devices/virtual/block/loop3'
> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > > = '/devices/virtual/block/loop5'
> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > > = '/devices/virtual/block/loop0'
> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop1'
> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > > = '/devices/virtual/block/loop2'
> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop1'
> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > > = '/devices/virtual/block/loop5'
> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > > = '/devices/virtual/block/loop0'
> > > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> > > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop4'
> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > > = '/devices/virtual/block/loop0'
> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > > = '/devices/virtual/block/loop2'
> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > > = '/devices/virtual/block/loop3'
> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > > = '/devices/virtual/block/loop5'
> > > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> > > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop4'
> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > > = '/devices/virtual/block/loop2'
> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > > = '/devices/virtual/block/loop0'
> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > > = '/devices/virtual/block/loop5'
> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > > = '/devices/virtual/block/loop3'
> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop1'
> > > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> > > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop4'
> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > > = '/devices/virtual/block/loop2'
> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > > = '/devices/virtual/block/loop0'
> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > > = '/devices/virtual/block/loop3'
> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > > = '/devices/virtual/block/loop5'
> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop1'
> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > > = '/devices/virtual/block/loop0'
> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > > = '/devices/virtual/block/loop2'
> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > > = '/devices/virtual/block/loop3'
> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > > = '/devices/virtual/block/loop5'
> > > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> > > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop4'
> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop1'
> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > > = '/devices/virtual/block/loop0'
> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > > = '/devices/virtual/block/loop5'
> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > > = '/devices/virtual/block/loop2'
> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > > = '/devices/virtual/block/loop3'
> > > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> > > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop4'
> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > > = '/devices/virtual/block/loop0'
> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop1'
> > > audit: type=1326 audit(2000000012.020:46): auid=4294967295 uid=0 gid=0
> > > ses=4294967295 subj==unconfined pid=21534 comm="syz-executor0"
> > > exe="/root/syz-executor0" sig=31 arch=c000003e syscall=202 compat=0
> > > ip=0x457679 code=0x0
> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > > = '/devices/virtual/block/loop5'
> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > > = '/devices/virtual/block/loop3'
> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > > = '/devices/virtual/block/loop2'
> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > > = '/devices/virtual/block/loop2'
> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > > = '/devices/virtual/block/loop2'
> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop1'
> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > > = '/devices/virtual/block/loop0'
> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > > = '/devices/virtual/block/loop5'
> > > audit: type=1326 audit(2000000012.950:47): auid=4294967295 uid=0 gid=0
> > > ses=4294967295 subj==unconfined pid=21566 comm="syz-executor0"
> > > exe="/root/syz-executor0" sig=31 arch=c000003e syscall=202 compat=0
> > > ip=0x457679 code=0x0
> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > > = '/devices/virtual/block/loop2'
> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
> > > = '/devices/virtual/block/loop2'
> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
> > > = '/devices/virtual/block/loop3'
> > > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
> > > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop4'
> > > audit: type=1326 audit(2000000013.460:48): auid=4294967295 uid=0 gid=0
> > > ses=4294967295 subj==unconfined pid=21585 comm="syz-executor4"
> > > exe="/root/syz-executor4" sig=31 arch=c000003e syscall=202 compat=0
> > > ip=0x457679 code=0x0
> > > audit: type=1326 audit(2000000013.510:49): auid=4294967295 uid=0 gid=0
> > > ses=4294967295 subj==unconfined pid=21588 comm="syz-executor2"
> > > exe="/root/syz-executor2" sig=31 arch=c000003e syscall=202 compat=0
> > > ip=0x457679 code=0x0
> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
> > > = '/devices/virtual/block/loop1'
> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
> > > = '/devices/virtual/block/loop0'
> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > > audit: type=1326 audit(2000000013.800:50): auid=4294967295 uid=0 gid=0
> > > ses=4294967295 subj==unconfined pid=21601 comm="syz-executor0"
> > > exe="/root/syz-executor0" sig=31 arch=c000003e syscall=202 compat=0
> > > ip=0x457679 code=0x0
> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > > = '/devices/virtual/block/loop5'
> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > > = '/devices/virtual/block/loop5'
> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
> > > = '/devices/virtual/block/loop5'
> > >
> > >
> > > ---
> > > This bug is generated by a bot. It may contain errors.
> > > See https://goo.gl/tpsmEJ for more information about syzbot.
> > > syzbot engineers can be reached at syzkaller@googlegroups.com.
> > >
> > > syzbot will keep track of this bug report. See:
> > > https://goo.gl/tpsmEJ#bug-status-tracking for how to communicate with
> > > syzbot.

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2018-09-20 23:33     ` Joel Fernandes
@ 2018-09-21  6:37       ` Dmitry Vyukov
  2018-09-21 23:21       ` Andrew Morton
  1 sibling, 0 replies; 25+ messages in thread
From: Dmitry Vyukov @ 2018-09-21  6:37 UTC (permalink / raw)
  To: Joel Fernandes
  Cc: Todd Kjos, Andrew Morton, Joel Fernandes,
	syzbot+a76129f18c89f3e2ddd4, Andi Kleen, Johannes Weiner,
	Jan Kara, Souptick Joarder, LKML, Linux-MM, Matthew Wilcox,
	Mel Gorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On Fri, Sep 21, 2018 at 1:33 AM, Joel Fernandes <joel@joelfernandes.org> wrote:
> On Thu, Sep 20, 2018 at 5:12 PM Todd Kjos <tkjos@google.com> wrote:
>>
>> +Joel Fernandes
>>
>> On Thu, Sep 20, 2018 at 2:11 PM Andrew Morton <akpm@linux-foundation.org> wrote:
>> >
>> >
>> > Thanks.  Let's cc the ashmem folks.
>> >
>
> This should be fixed by https://patchwork.kernel.org/patch/10572477/
>
> It has Neil Brown's Reviewed-by but looks like didn't yet appear in
> anyone's tree, could Greg take this patch?


Let's tell syzbot about the fix:

#syz fix: mm: shmem: Correctly annotate new inodes for lockdep

>> > On Thu, 20 Sep 2018 14:04:05 -0700 syzbot <syzbot+a76129f18c89f3e2ddd4@syzkaller.appspotmail.com> wrote:
>> >
>> > > Hello,
>> > >
>> > > syzbot found the following crash on:
>> > >
>> > > HEAD commit:    a0cb0cabe4bb Add linux-next specific files for 20180920
>> > > git tree:       linux-next
>> > > console output: https://syzkaller.appspot.com/x/log.txt?x=15139721400000
>> > > kernel config:  https://syzkaller.appspot.com/x/.config?x=786006c5dafbadf6
>> > > dashboard link: https://syzkaller.appspot.com/bug?extid=a76129f18c89f3e2ddd4
>> > > compiler:       gcc (GCC) 8.0.1 20180413 (experimental)
>> > >
>> > > Unfortunately, I don't have any reproducer for this crash yet.
>> > >
>> > > IMPORTANT: if you fix the bug, please add the following tag to the commit:
>> > > Reported-by: syzbot+a76129f18c89f3e2ddd4@syzkaller.appspotmail.com
>> > >
>> > >
>> > > ======================================================
>> > > WARNING: possible circular locking dependency detected
>> > > 4.19.0-rc4-next-20180920+ #76 Not tainted
>> > > ------------------------------------------------------
>> > > syz-executor3/21327 is trying to acquire lock:
>> > > 000000009bc5286f (&mm->mmap_sem){++++}, at: __do_page_fault+0xb61/0xec0
>> > > arch/x86/mm/fault.c:1333
>> > >
>> > > but task is already holding lock:
>> > > 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at: inode_lock
>> > > include/linux/fs.h:745 [inline]
>> > > 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at:
>> > > generic_file_write_iter+0xed/0x870 mm/filemap.c:3304
>> > >
>> > > which lock already depends on the new lock.
>> > >
>> > >
>> > > the existing dependency chain (in reverse order) is:
>> > >
>> > > -> #2 (&sb->s_type->i_mutex_key#10){+.+.}:
>> > >         down_write+0x8a/0x130 kernel/locking/rwsem.c:70
>> > >         inode_lock include/linux/fs.h:745 [inline]
>> > >         shmem_fallocate+0x18b/0x12c0 mm/shmem.c:2651
>> > >         ashmem_shrink_scan+0x238/0x660 drivers/staging/android/ashmem.c:455
>> > >         ashmem_ioctl+0x3ae/0x13a0 drivers/staging/android/ashmem.c:797
>> > >         vfs_ioctl fs/ioctl.c:46 [inline]
>> > >         file_ioctl fs/ioctl.c:501 [inline]
>> > >         do_vfs_ioctl+0x1de/0x1720 fs/ioctl.c:685
>> > >         ksys_ioctl+0xa9/0xd0 fs/ioctl.c:702
>> > >         __do_sys_ioctl fs/ioctl.c:709 [inline]
>> > >         __se_sys_ioctl fs/ioctl.c:707 [inline]
>> > >         __x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:707
>> > >         do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
>> > >         entry_SYSCALL_64_after_hwframe+0x49/0xbe
>> > >
>> > > -> #1 (ashmem_mutex){+.+.}:
>> > >         __mutex_lock_common kernel/locking/mutex.c:925 [inline]
>> > >         __mutex_lock+0x166/0x1700 kernel/locking/mutex.c:1072
>> > >         mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:1087
>> > >         ashmem_mmap+0x55/0x520 drivers/staging/android/ashmem.c:361
>> > >         call_mmap include/linux/fs.h:1830 [inline]
>> > >         mmap_region+0xe82/0x1cd0 mm/mmap.c:1762
>> > >         do_mmap+0xa10/0x1220 mm/mmap.c:1535
>> > >         do_mmap_pgoff include/linux/mm.h:2298 [inline]
>> > >         vm_mmap_pgoff+0x213/0x2c0 mm/util.c:357
>> > >         ksys_mmap_pgoff+0x4da/0x660 mm/mmap.c:1585
>> > >         __do_sys_mmap arch/x86/kernel/sys_x86_64.c:100 [inline]
>> > >         __se_sys_mmap arch/x86/kernel/sys_x86_64.c:91 [inline]
>> > >         __x64_sys_mmap+0xe9/0x1b0 arch/x86/kernel/sys_x86_64.c:91
>> > >         do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
>> > >         entry_SYSCALL_64_after_hwframe+0x49/0xbe
>> > >
>> > > -> #0 (&mm->mmap_sem){++++}:
>> > >         lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3900
>> > >         down_read+0x8d/0x120 kernel/locking/rwsem.c:24
>> > >         __do_page_fault+0xb61/0xec0 arch/x86/mm/fault.c:1333
>> > >         do_page_fault+0xed/0x7d1 arch/x86/mm/fault.c:1472
>> > >         page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1139
>> > >         fault_in_pages_readable include/linux/pagemap.h:601 [inline]
>> > >         iov_iter_fault_in_readable+0x1b4/0x450 lib/iov_iter.c:421
>> > >         generic_perform_write+0x216/0x6a0 mm/filemap.c:3144
>> > >         __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3279
>> > >         generic_file_write_iter+0x436/0x870 mm/filemap.c:3307
>> > >         call_write_iter include/linux/fs.h:1825 [inline]
>> > >         do_iter_readv_writev+0x8b0/0xa80 fs/read_write.c:680
>> > >         do_iter_write+0x185/0x5f0 fs/read_write.c:959
>> > >         vfs_writev+0x1f1/0x360 fs/read_write.c:1004
>> > >         do_pwritev+0x1cc/0x280 fs/read_write.c:1093
>> > >         __do_sys_pwritev fs/read_write.c:1140 [inline]
>> > >         __se_sys_pwritev fs/read_write.c:1135 [inline]
>> > >         __x64_sys_pwritev+0x9a/0xf0 fs/read_write.c:1135
>> > >         do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
>> > >         entry_SYSCALL_64_after_hwframe+0x49/0xbe
>> > >
>> > > other info that might help us debug this:
>> > >
>> > > Chain exists of:
>> > >    &mm->mmap_sem --> ashmem_mutex --> &sb->s_type->i_mutex_key#10
>> > >
>> > >   Possible unsafe locking scenario:
>> > >
>> > >         CPU0                    CPU1
>> > >         ----                    ----
>> > >    lock(&sb->s_type->i_mutex_key#10);
>> > >                                 lock(ashmem_mutex);
>> > >                                 lock(&sb->s_type->i_mutex_key#10);
>> > >    lock(&mm->mmap_sem);
>> > >
>> > >   *** DEADLOCK ***
>> > >
>> > > 2 locks held by syz-executor3/21327:
>> > >   #0: 000000003de4eab1 (sb_writers#3){.+.+}, at: file_start_write
>> > > include/linux/fs.h:2784 [inline]
>> > >   #0: 000000003de4eab1 (sb_writers#3){.+.+}, at: vfs_writev+0x2bd/0x360
>> > > fs/read_write.c:1003
>> > >   #1: 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at: inode_lock
>> > > include/linux/fs.h:745 [inline]
>> > >   #1: 00000000a2c51c08 (&sb->s_type->i_mutex_key#10){+.+.}, at:
>> > > generic_file_write_iter+0xed/0x870 mm/filemap.c:3304
>> > >
>> > > stack backtrace:
>> > > CPU: 1 PID: 21327 Comm: syz-executor3 Not tainted 4.19.0-rc4-next-20180920+
>> > > #76
>> > > Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
>> > > Google 01/01/2011
>> > > Call Trace:
>> > >   __dump_stack lib/dump_stack.c:77 [inline]
>> > >   dump_stack+0x1d3/0x2c4 lib/dump_stack.c:113
>> > >   print_circular_bug.isra.33.cold.54+0x1bd/0x27d
>> > > kernel/locking/lockdep.c:1221
>> > >   check_prev_add kernel/locking/lockdep.c:1861 [inline]
>> > >   check_prevs_add kernel/locking/lockdep.c:1974 [inline]
>> > >   validate_chain kernel/locking/lockdep.c:2415 [inline]
>> > >   __lock_acquire+0x33e4/0x4ec0 kernel/locking/lockdep.c:3411
>> > >   lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3900
>> > >   down_read+0x8d/0x120 kernel/locking/rwsem.c:24
>> > >   __do_page_fault+0xb61/0xec0 arch/x86/mm/fault.c:1333
>> > >   do_page_fault+0xed/0x7d1 arch/x86/mm/fault.c:1472
>> > >   page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1139
>> > > RIP: 0010:fault_in_pages_readable include/linux/pagemap.h:601 [inline]
>> > > RIP: 0010:iov_iter_fault_in_readable+0x1b4/0x450 lib/iov_iter.c:421
>> > > Code: fd 49 39 dc 76 17 eb 3c e8 e9 a0 ef fd 49 81 c4 00 10 00 00 4c 39 a5
>> > > 28 ff ff ff 72 2e e8 d4 a0 ef fd 0f 1f 00 0f ae e8 31 db <41> 8a 04 24 0f
>> > > 1f 00 31 ff 89 de 88 85 58 ff ff ff e8 c6 a1 ef fd
>> > > RSP: 0018:ffff88018dfe7650 EFLAGS: 00010246
>> > > RAX: 0000000000040000 RBX: 0000000000000000 RCX: ffffc90005662000
>> > > RDX: 00000000000001c2 RSI: ffffffff838daf1c RDI: 0000000000000005
>> > > RBP: ffff88018dfe7728 R08: ffff880198ad2240 R09: ffffed00319dc039
>> > > R10: ffffed00319dc039 R11: ffff88018cee01cb R12: 0000000020012000
>> > > R13: 0000000000000001 R14: 0000000000000001 R15: ffff88018dfe7c50
>> > >   generic_perform_write+0x216/0x6a0 mm/filemap.c:3144
>> > >   __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3279
>> > >   generic_file_write_iter+0x436/0x870 mm/filemap.c:3307
>> > >   call_write_iter include/linux/fs.h:1825 [inline]
>> > >   do_iter_readv_writev+0x8b0/0xa80 fs/read_write.c:680
>> > >   do_iter_write+0x185/0x5f0 fs/read_write.c:959
>> > >   vfs_writev+0x1f1/0x360 fs/read_write.c:1004
>> > >   do_pwritev+0x1cc/0x280 fs/read_write.c:1093
>> > >   __do_sys_pwritev fs/read_write.c:1140 [inline]
>> > >   __se_sys_pwritev fs/read_write.c:1135 [inline]
>> > >   __x64_sys_pwritev+0x9a/0xf0 fs/read_write.c:1135
>> > >   do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
>> > >   entry_SYSCALL_64_after_hwframe+0x49/0xbe
>> > > RIP: 0033:0x457679
>> > > Code: 1d b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7
>> > > 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff
>> > > ff 0f 83 eb b3 fb ff c3 66 2e 0f 1f 84 00 00 00 00
>> > > RSP: 002b:00007f96ef162c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000128
>> > > RAX: ffffffffffffffda RBX: 00007f96ef1636d4 RCX: 0000000000457679
>> > > RDX: 0000000000000001 RSI: 0000000020000000 RDI: 0000000000000004
>> > > RBP: 000000000072bf00 R08: 0000000000000000 R09: 0000000000000000
>> > > R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
>> > > R13: 00000000004d4a88 R14: 00000000004c31d4 R15: 0000000000000000
>> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
>> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop2'
>> > > FAULT_FLAG_ALLOW_RETRY missing 30
>> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
>> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop1'
>> > > CPU: 1 PID: 21327 Comm: syz-executor3 Not tainted 4.19.0-rc4-next-20180920+
>> > > #76
>> > > Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
>> > > Google 01/01/2011
>> > > Call Trace:
>> > >   __dump_stack lib/dump_stack.c:77 [inline]
>> > >   dump_stack+0x1d3/0x2c4 lib/dump_stack.c:113
>> > >   handle_userfault.cold.33+0x47/0x62 fs/userfaultfd.c:432
>> > >   do_anonymous_page mm/memory.c:2915 [inline]
>> > >   handle_pte_fault mm/memory.c:3732 [inline]
>> > >   __handle_mm_fault+0x45ed/0x53e0 mm/memory.c:3858
>> > >   handle_mm_fault+0x54f/0xc70 mm/memory.c:3895
>> > >   __do_page_fault+0x673/0xec0 arch/x86/mm/fault.c:1397
>> > >   do_page_fault+0xed/0x7d1 arch/x86/mm/fault.c:1472
>> > >   page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1139
>> > > RIP: 0010:fault_in_pages_readable include/linux/pagemap.h:601 [inline]
>> > > RIP: 0010:iov_iter_fault_in_readable+0x1b4/0x450 lib/iov_iter.c:421
>> > > Code: fd 49 39 dc 76 17 eb 3c e8 e9 a0 ef fd 49 81 c4 00 10 00 00 4c 39 a5
>> > > 28 ff ff ff 72 2e e8 d4 a0 ef fd 0f 1f 00 0f ae e8 31 db <41> 8a 04 24 0f
>> > > 1f 00 31 ff 89 de 88 85 58 ff ff ff e8 c6 a1 ef fd
>> > > RSP: 0018:ffff88018dfe7650 EFLAGS: 00010246
>> > > RAX: 0000000000040000 RBX: 0000000000000000 RCX: ffffc90005662000
>> > > RDX: 00000000000001c2 RSI: ffffffff838daf1c RDI: 0000000000000005
>> > > RBP: ffff88018dfe7728 R08: ffff880198ad2240 R09: ffffed00319dc039
>> > > R10: ffffed00319dc039 R11: ffff88018cee01cb R12: 0000000020012000
>> > > R13: 0000000000000001 R14: 0000000000000001 R15: ffff88018dfe7c50
>> > >   generic_perform_write+0x216/0x6a0 mm/filemap.c:3144
>> > >   __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3279
>> > >   generic_file_write_iter+0x436/0x870 mm/filemap.c:3307
>> > >   call_write_iter include/linux/fs.h:1825 [inline]
>> > >   do_iter_readv_writev+0x8b0/0xa80 fs/read_write.c:680
>> > >   do_iter_write+0x185/0x5f0 fs/read_write.c:959
>> > >   vfs_writev+0x1f1/0x360 fs/read_write.c:1004
>> > >   do_pwritev+0x1cc/0x280 fs/read_write.c:1093
>> > >   __do_sys_pwritev fs/read_write.c:1140 [inline]
>> > >   __se_sys_pwritev fs/read_write.c:1135 [inline]
>> > >   __x64_sys_pwritev+0x9a/0xf0 fs/read_write.c:1135
>> > >   do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
>> > >   entry_SYSCALL_64_after_hwframe+0x49/0xbe
>> > > RIP: 0033:0x457679
>> > > Code: 1d b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7
>> > > 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff
>> > > ff 0f 83 eb b3 fb ff c3 66 2e 0f 1f 84 00 00 00 00
>> > > RSP: 002b:00007f96ef162c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000128
>> > > RAX: ffffffffffffffda RBX: 00007f96ef1636d4 RCX: 0000000000457679
>> > > RDX: 0000000000000001 RSI: 0000000020000000 RDI: 0000000000000004
>> > > RBP: 000000000072bf00 R08: 0000000000000000 R09: 0000000000000000
>> > > R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
>> > > R13: 00000000004d4a88 R14: 00000000004c31d4 R15: 0000000000000000
>> > > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
>> > > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop4'
>> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
>> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop0'
>> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
>> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop5'
>> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
>> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop1'
>> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
>> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop3'
>> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
>> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop5'
>> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
>> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop0'
>> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
>> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop1'
>> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
>> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop2'
>> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
>> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop1'
>> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
>> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop5'
>> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
>> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop0'
>> > > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
>> > > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop4'
>> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
>> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop0'
>> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
>> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop2'
>> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
>> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop3'
>> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
>> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop5'
>> > > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
>> > > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop4'
>> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
>> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop2'
>> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
>> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop0'
>> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
>> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop5'
>> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
>> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop3'
>> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
>> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop1'
>> > > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
>> > > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop4'
>> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
>> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop2'
>> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
>> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop0'
>> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
>> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop3'
>> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
>> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop5'
>> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
>> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop1'
>> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
>> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop0'
>> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
>> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop2'
>> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
>> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop3'
>> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
>> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop5'
>> > > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
>> > > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop4'
>> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
>> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop1'
>> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
>> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop0'
>> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
>> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop5'
>> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
>> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop2'
>> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
>> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop3'
>> > > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
>> > > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop4'
>> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
>> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop0'
>> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
>> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop1'
>> > > audit: type=1326 audit(2000000012.020:46): auid=4294967295 uid=0 gid=0
>> > > ses=4294967295 subj==unconfined pid=21534 comm="syz-executor0"
>> > > exe="/root/syz-executor0" sig=31 arch=c000003e syscall=202 compat=0
>> > > ip=0x457679 code=0x0
>> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
>> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop5'
>> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
>> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop3'
>> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
>> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop2'
>> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
>> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop2'
>> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
>> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop2'
>> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
>> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop1'
>> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
>> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop0'
>> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
>> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop5'
>> > > audit: type=1326 audit(2000000012.950:47): auid=4294967295 uid=0 gid=0
>> > > ses=4294967295 subj==unconfined pid=21566 comm="syz-executor0"
>> > > exe="/root/syz-executor0" sig=31 arch=c000003e syscall=202 compat=0
>> > > ip=0x457679 code=0x0
>> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
>> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop2'
>> > > kobject: 'loop2' (00000000dc629c38): kobject_uevent_env
>> > > kobject: 'loop2' (00000000dc629c38): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop2'
>> > > kobject: 'loop3' (00000000aff594d4): kobject_uevent_env
>> > > kobject: 'loop3' (00000000aff594d4): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop3'
>> > > kobject: 'loop4' (000000004119f3b1): kobject_uevent_env
>> > > kobject: 'loop4' (000000004119f3b1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop4'
>> > > audit: type=1326 audit(2000000013.460:48): auid=4294967295 uid=0 gid=0
>> > > ses=4294967295 subj==unconfined pid=21585 comm="syz-executor4"
>> > > exe="/root/syz-executor4" sig=31 arch=c000003e syscall=202 compat=0
>> > > ip=0x457679 code=0x0
>> > > audit: type=1326 audit(2000000013.510:49): auid=4294967295 uid=0 gid=0
>> > > ses=4294967295 subj==unconfined pid=21588 comm="syz-executor2"
>> > > exe="/root/syz-executor2" sig=31 arch=c000003e syscall=202 compat=0
>> > > ip=0x457679 code=0x0
>> > > kobject: 'loop1' (00000000980d23a1): kobject_uevent_env
>> > > kobject: 'loop1' (00000000980d23a1): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop1'
>> > > kobject: 'loop0' (00000000ee0adfaf): kobject_uevent_env
>> > > kobject: 'loop0' (00000000ee0adfaf): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop0'
>> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
>> > > audit: type=1326 audit(2000000013.800:50): auid=4294967295 uid=0 gid=0
>> > > ses=4294967295 subj==unconfined pid=21601 comm="syz-executor0"
>> > > exe="/root/syz-executor0" sig=31 arch=c000003e syscall=202 compat=0
>> > > ip=0x457679 code=0x0
>> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop5'
>> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
>> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop5'
>> > > kobject: 'loop5' (00000000699d1086): kobject_uevent_env
>> > > kobject: 'loop5' (00000000699d1086): fill_kobj_path: path
>> > > = '/devices/virtual/block/loop5'
>> > >
>> > >
>> > > ---
>> > > This bug is generated by a bot. It may contain errors.
>> > > See https://goo.gl/tpsmEJ for more information about syzbot.
>> > > syzbot engineers can be reached at syzkaller@googlegroups.com.
>> > >
>> > > syzbot will keep track of this bug report. See:
>> > > https://goo.gl/tpsmEJ#bug-status-tracking for how to communicate with
>> > > syzbot.
>
> --
> You received this message because you are subscribed to the Google Groups "syzkaller-bugs" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to syzkaller-bugs+unsubscribe@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/syzkaller-bugs/CAEXW_YSot%2B3AMQ%3DjmDRowmqoOmQmujp9r8Dh18KJJN1EDmyHOw%40mail.gmail.com.
> For more options, visit https://groups.google.com/d/optout.

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2018-09-20 23:33     ` Joel Fernandes
  2018-09-21  6:37       ` Dmitry Vyukov
@ 2018-09-21 23:21       ` Andrew Morton
  2019-01-22 10:02         ` Tetsuo Handa
  1 sibling, 1 reply; 25+ messages in thread
From: Andrew Morton @ 2018-09-21 23:21 UTC (permalink / raw)
  To: Joel Fernandes
  Cc: Todd Kjos, Joel Fernandes, syzbot+a76129f18c89f3e2ddd4, ak,
	Johannes Weiner, jack, jrdr.linux, LKML, linux-mm, mawilcox,
	mgorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On Thu, 20 Sep 2018 19:33:15 -0400 Joel Fernandes <joel@joelfernandes.org> wrote:

> On Thu, Sep 20, 2018 at 5:12 PM Todd Kjos <tkjos@google.com> wrote:
> >
> > +Joel Fernandes
> >
> > On Thu, Sep 20, 2018 at 2:11 PM Andrew Morton <akpm@linux-foundation.org> wrote:
> > >
> > >
> > > Thanks.  Let's cc the ashmem folks.
> > >
> 
> This should be fixed by https://patchwork.kernel.org/patch/10572477/
> 
> It has Neil Brown's Reviewed-by but looks like didn't yet appear in
> anyone's tree, could Greg take this patch?

All is well.  That went into mainline yesterday, with a cc:stable.

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2018-09-20 21:04 possible deadlock in __do_page_fault syzbot
  2018-09-20 21:10 ` Andrew Morton
@ 2018-10-01  5:23 ` syzbot
  1 sibling, 0 replies; 25+ messages in thread
From: syzbot @ 2018-10-01  5:23 UTC (permalink / raw)
  To: ak, akpm, arve, dhowells, dvyukov, gregkh, hannes, jack, jlayton,
	joel, joelaf, jrdr.linux, linux-kernel, linux-mm, maco, mawilcox,
	mgorman, syzkaller-bugs, tkjos, tkjos

syzbot has found a reproducer for the following crash on:

HEAD commit:    17b57b1883c1 Linux 4.19-rc6
git tree:       upstream
console output: https://syzkaller.appspot.com/x/log.txt?x=17920a7e400000
kernel config:  https://syzkaller.appspot.com/x/.config?x=c0af03fe452b65fb
dashboard link: https://syzkaller.appspot.com/bug?extid=a76129f18c89f3e2ddd4
compiler:       gcc (GCC) 8.0.1 20180413 (experimental)
syz repro:      https://syzkaller.appspot.com/x/repro.syz?x=160c0f11400000
C reproducer:   https://syzkaller.appspot.com/x/repro.c?x=1788de81400000

IMPORTANT: if you fix the bug, please add the following tag to the commit:
Reported-by: syzbot+a76129f18c89f3e2ddd4@syzkaller.appspotmail.com

audit: type=1800 audit(1538371187.479:30): pid=5202 uid=0 auid=4294967295  
ses=4294967295 subj=_ op=collect_data cause=failed(directio)  
comm="startpar" name="rmnologin" dev="sda1" ino=2423 res=0

======================================================
WARNING: possible circular locking dependency detected
4.19.0-rc6+ #39 Not tainted
------------------------------------------------------
syz-executor559/5371 is trying to acquire lock:
00000000e34677d1 (&mm->mmap_sem){++++}, at: __do_page_fault+0xb70/0xed0  
arch/x86/mm/fault.c:1331

but task is already holding lock:
00000000b0c242ca (&sb->s_type->i_mutex_key#11){+.+.}, at: inode_lock  
include/linux/fs.h:738 [inline]
00000000b0c242ca (&sb->s_type->i_mutex_key#11){+.+.}, at:  
generic_file_write_iter+0xed/0x870 mm/filemap.c:3289

which lock already depends on the new lock.


the existing dependency chain (in reverse order) is:

-> #2 (&sb->s_type->i_mutex_key#11){+.+.}:
        down_write+0x8a/0x130 kernel/locking/rwsem.c:70
        inode_lock include/linux/fs.h:738 [inline]
        shmem_fallocate+0x18b/0x12c0 mm/shmem.c:2651
        ashmem_shrink_scan+0x238/0x660 drivers/staging/android/ashmem.c:455
        ashmem_ioctl+0x3ae/0x13a0 drivers/staging/android/ashmem.c:797
        vfs_ioctl fs/ioctl.c:46 [inline]
        file_ioctl fs/ioctl.c:501 [inline]
        do_vfs_ioctl+0x1de/0x1720 fs/ioctl.c:685
        ksys_ioctl+0xa9/0xd0 fs/ioctl.c:702
        __do_sys_ioctl fs/ioctl.c:709 [inline]
        __se_sys_ioctl fs/ioctl.c:707 [inline]
        __x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:707
        do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
        entry_SYSCALL_64_after_hwframe+0x49/0xbe

-> #1 (ashmem_mutex){+.+.}:
        __mutex_lock_common kernel/locking/mutex.c:925 [inline]
        __mutex_lock+0x166/0x1700 kernel/locking/mutex.c:1072
        mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:1087
        ashmem_mmap+0x55/0x520 drivers/staging/android/ashmem.c:361
        call_mmap include/linux/fs.h:1813 [inline]
        mmap_region+0xe82/0x1cd0 mm/mmap.c:1762
        do_mmap+0xa10/0x1220 mm/mmap.c:1535
        do_mmap_pgoff include/linux/mm.h:2298 [inline]
        vm_mmap_pgoff+0x213/0x2c0 mm/util.c:357
        ksys_mmap_pgoff+0x4da/0x660 mm/mmap.c:1585
        __do_sys_mmap arch/x86/kernel/sys_x86_64.c:100 [inline]
        __se_sys_mmap arch/x86/kernel/sys_x86_64.c:91 [inline]
        __x64_sys_mmap+0xe9/0x1b0 arch/x86/kernel/sys_x86_64.c:91
        do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
        entry_SYSCALL_64_after_hwframe+0x49/0xbe

-> #0 (&mm->mmap_sem){++++}:
        lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3900
        down_read+0xb0/0x1d0 kernel/locking/rwsem.c:24
        __do_page_fault+0xb70/0xed0 arch/x86/mm/fault.c:1331
        do_page_fault+0xf2/0x7e0 arch/x86/mm/fault.c:1470
        page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1161
        fault_in_pages_readable include/linux/pagemap.h:609 [inline]
        iov_iter_fault_in_readable+0x363/0x450 lib/iov_iter.c:421
        generic_perform_write+0x216/0x6a0 mm/filemap.c:3129
        __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3264
        generic_file_write_iter+0x436/0x870 mm/filemap.c:3292
        call_write_iter include/linux/fs.h:1808 [inline]
        new_sync_write fs/read_write.c:474 [inline]
        __vfs_write+0x6b8/0x9f0 fs/read_write.c:487
        vfs_write+0x1fc/0x560 fs/read_write.c:549
        ksys_write+0x101/0x260 fs/read_write.c:598
        __do_sys_write fs/read_write.c:610 [inline]
        __se_sys_write fs/read_write.c:607 [inline]
        __x64_sys_write+0x73/0xb0 fs/read_write.c:607
        do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
        entry_SYSCALL_64_after_hwframe+0x49/0xbe

other info that might help us debug this:

Chain exists of:
   &mm->mmap_sem --> ashmem_mutex --> &sb->s_type->i_mutex_key#11

  Possible unsafe locking scenario:

        CPU0                    CPU1
        ----                    ----
   lock(&sb->s_type->i_mutex_key#11);
                                lock(ashmem_mutex);
                                lock(&sb->s_type->i_mutex_key#11);
   lock(&mm->mmap_sem);

  *** DEADLOCK ***

2 locks held by syz-executor559/5371:
  #0: 0000000012b388bb (sb_writers#5){.+.+}, at: file_start_write  
include/linux/fs.h:2759 [inline]
  #0: 0000000012b388bb (sb_writers#5){.+.+}, at: vfs_write+0x42a/0x560  
fs/read_write.c:548
  #1: 00000000b0c242ca (&sb->s_type->i_mutex_key#11){+.+.}, at: inode_lock  
include/linux/fs.h:738 [inline]
  #1: 00000000b0c242ca (&sb->s_type->i_mutex_key#11){+.+.}, at:  
generic_file_write_iter+0xed/0x870 mm/filemap.c:3289

stack backtrace:
CPU: 1 PID: 5371 Comm: syz-executor559 Not tainted 4.19.0-rc6+ #39
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS  
Google 01/01/2011
Call Trace:
  __dump_stack lib/dump_stack.c:77 [inline]
  dump_stack+0x1c4/0x2b4 lib/dump_stack.c:113
  print_circular_bug.isra.33.cold.54+0x1bd/0x27d  
kernel/locking/lockdep.c:1221
  check_prev_add kernel/locking/lockdep.c:1861 [inline]
  check_prevs_add kernel/locking/lockdep.c:1974 [inline]
  validate_chain kernel/locking/lockdep.c:2415 [inline]
  __lock_acquire+0x33e4/0x4ec0 kernel/locking/lockdep.c:3411
  lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3900
  down_read+0xb0/0x1d0 kernel/locking/rwsem.c:24
  __do_page_fault+0xb70/0xed0 arch/x86/mm/fault.c:1331
  do_page_fault+0xf2/0x7e0 arch/x86/mm/fault.c:1470
  page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1161
RIP: 0010:fault_in_pages_readable include/linux/pagemap.h:609 [inline]
RIP: 0010:iov_iter_fault_in_readable+0x363/0x450 lib/iov_iter.c:421
Code: 00 31 ff 44 89 ee 88 55 98 e8 59 27 f4 fd 45 85 ed 74 c2 e9 7d fe ff  
ff e8 3a 26 f4 fd 0f 1f 00 0f ae e8 48 8b 85 28 ff ff ff <8a> 00 0f 1f 00  
31 ff 89 de 88 85 58 ff ff ff e8 29 27 f4 fd 85 db
RSP: 0018:ffff8801bf4e77d0 EFLAGS: 00010293
RAX: 000000002100053f RBX: 0000000000000000 RCX: ffffffff838a8de2
RDX: 0000000000000000 RSI: ffffffff838a8f46 RDI: 0000000000000007
RBP: ffff8801bf4e78a8 R08: ffff8801d81b24c0 R09: fffff94000da818e
R10: fffff94000da818e R11: ffffea0006d40c77 R12: 0000000000000000
R13: 0000000000001000 R14: 0000000000001000 R15: ffff8801bf4e7bc8
  generic_perform_write+0x216/0x6a0 mm/filemap.c:3129
  __generic_file_write_iter+0x26e/0x630 mm/filemap.c:3264
  generic_file_write_iter+0x436/0x870 mm/filemap.c:3292
  call_write_iter include/linux/fs.h:1808 [inline]
  new_sync_write fs/read_write.c:474 [inline]
  __vfs_write+0x6b8/0x9f0 fs/read_write.c:487
  vfs_write+0x1fc/0x560 fs/read_write.c:549
  ksys_write+0x101/0x260 fs/read_write.c:598
  __do_sys_write fs/read_write.c:610 [inline]
  __se_sys_write fs/read_write.c:607 [inline]
  __x64_sys_write+0x73/0xb0 fs/read_write.c:607
  do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290
  entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x446339
Code: e8 2c b3 02 00 48 83 c4 18 c3 0f 1f 80 00 00 00 00 48 89 f8 48 89 f7  
48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff  
ff 0f 83 2b 09 fc ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007ff3053d5da8 EFLAGS: 00000293 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00000000006dac28 RCX: 0000000000446339
RDX: 00000000fffffda2 RSI: 0000000020000540 RDI: 0000000000000003
RBP: 00000000006dac20 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000293 R12: 00000000006dac2c
R13: dfdd4f11168a8b2b R14: 6873612f7665642f R15: 00000000006dad2c
kobject: 'regulatory.0' (000000004f5af2e3): kobject_uevent_env
kobject: 'regulatory.0' (000000004f5af2e3): fill_kobj_path: path  
= '/devices/platform/regulatory.0'


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2018-09-21 23:21       ` Andrew Morton
@ 2019-01-22 10:02         ` Tetsuo Handa
  2019-01-22 10:12           ` Dmitry Vyukov
  2019-01-22 15:32           ` Joel Fernandes
  0 siblings, 2 replies; 25+ messages in thread
From: Tetsuo Handa @ 2019-01-22 10:02 UTC (permalink / raw)
  To: Andrew Morton, Joel Fernandes
  Cc: Todd Kjos, Joel Fernandes, syzbot+a76129f18c89f3e2ddd4, ak,
	Johannes Weiner, jack, jrdr.linux, LKML, linux-mm, mawilcox,
	mgorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On 2018/09/22 8:21, Andrew Morton wrote:
> On Thu, 20 Sep 2018 19:33:15 -0400 Joel Fernandes <joel@joelfernandes.org> wrote:
> 
>> On Thu, Sep 20, 2018 at 5:12 PM Todd Kjos <tkjos@google.com> wrote:
>>>
>>> +Joel Fernandes
>>>
>>> On Thu, Sep 20, 2018 at 2:11 PM Andrew Morton <akpm@linux-foundation.org> wrote:
>>>>
>>>>
>>>> Thanks.  Let's cc the ashmem folks.
>>>>
>>
>> This should be fixed by https://patchwork.kernel.org/patch/10572477/
>>
>> It has Neil Brown's Reviewed-by but looks like didn't yet appear in
>> anyone's tree, could Greg take this patch?
> 
> All is well.  That went into mainline yesterday, with a cc:stable.
> 

This problem was not fixed at all.

Why do we need to call fallocate() synchronously with ashmem_mutex held?
Why can't we call fallocate() asynchronously from WQ_MEM_RECLAIM workqueue
context so that we can call fallocate() with ashmem_mutex not held?

I don't know how ashmem works, but as far as I can guess, offloading is
possible as long as other operations which depend on the completion of
fallocate() operation (e.g. read()/mmap(), querying/changing pinned status)
wait for completion of asynchronous fallocate() operation (like a draft
patch shown below is doing).

---
 drivers/staging/android/ashmem.c | 50 ++++++++++++++++++++++++++++----
 1 file changed, 45 insertions(+), 5 deletions(-)

diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 90a8a9f1ac7d..1a890c43a10a 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -75,6 +75,17 @@ struct ashmem_range {
 /* LRU list of unpinned pages, protected by ashmem_mutex */
 static LIST_HEAD(ashmem_lru_list);
 
+static struct workqueue_struct *ashmem_wq;
+static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
+
+struct ashmem_shrink_work {
+	struct work_struct work;
+	struct file *file;
+	loff_t start;
+	loff_t end;
+};
+
 /*
  * long lru_count - The count of pages on our LRU list.
  *
@@ -292,6 +303,7 @@ static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 	int ret = 0;
 
 	mutex_lock(&ashmem_mutex);
+	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 
 	/* If size is not set, or set to 0, always return EOF. */
 	if (asma->size == 0)
@@ -359,6 +371,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 	int ret = 0;
 
 	mutex_lock(&ashmem_mutex);
+	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 
 	/* user needs to SET_SIZE before mapping */
 	if (!asma->size) {
@@ -421,6 +434,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 	return ret;
 }
 
+static void ashmem_shrink_worker(struct work_struct *work)
+{
+	struct ashmem_shrink_work *w = container_of(work, typeof(*w), work);
+
+	w->file->f_op->fallocate(w->file,
+				 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+				 w->start, w->end - w->start);
+	fput(w->file);
+	kfree(w);
+	if (atomic_dec_and_test(&ashmem_shrink_inflight))
+		wake_up_all(&ashmem_shrink_wait);
+}
+
 /*
  * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
  *
@@ -449,12 +475,18 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 		return -1;
 
 	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
-		loff_t start = range->pgstart * PAGE_SIZE;
-		loff_t end = (range->pgend + 1) * PAGE_SIZE;
+		struct ashmem_shrink_work *w = kzalloc(sizeof(*w), GFP_ATOMIC);
+
+		if (!w)
+			break;
+		INIT_WORK(&w->work, ashmem_shrink_worker);
+		w->file = range->asma->file;
+		get_file(w->file);
+		w->start = range->pgstart * PAGE_SIZE;
+		w->end = (range->pgend + 1) * PAGE_SIZE;
+		atomic_inc(&ashmem_shrink_inflight);
+		queue_work(ashmem_wq, &w->work);
 
-		range->asma->file->f_op->fallocate(range->asma->file,
-				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
-				start, end - start);
 		range->purged = ASHMEM_WAS_PURGED;
 		lru_del(range);
 
@@ -713,6 +745,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
 		return -EFAULT;
 
 	mutex_lock(&ashmem_mutex);
+	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 
 	if (!asma->file)
 		goto out_unlock;
@@ -883,8 +916,15 @@ static int __init ashmem_init(void)
 		goto out_free2;
 	}
 
+	ashmem_wq = alloc_workqueue("ashmem_wq", WQ_MEM_RECLAIM, 0);
+	if (!ashmem_wq) {
+		pr_err("failed to create workqueue\n");
+		goto out_demisc;
+	}
+
 	ret = register_shrinker(&ashmem_shrinker);
 	if (ret) {
+		destroy_workqueue(ashmem_wq);
 		pr_err("failed to register shrinker!\n");
 		goto out_demisc;
 	}
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2019-01-22 10:02         ` Tetsuo Handa
@ 2019-01-22 10:12           ` Dmitry Vyukov
  2019-01-22 10:32             ` Tetsuo Handa
  2019-01-22 15:32           ` Joel Fernandes
  1 sibling, 1 reply; 25+ messages in thread
From: Dmitry Vyukov @ 2019-01-22 10:12 UTC (permalink / raw)
  To: Tetsuo Handa
  Cc: Andrew Morton, Joel Fernandes, Todd Kjos, Joel Fernandes,
	syzbot+a76129f18c89f3e2ddd4, Andi Kleen, Johannes Weiner,
	Jan Kara, Souptick Joarder, LKML, Linux-MM, Matthew Wilcox,
	Mel Gorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On Tue, Jan 22, 2019 at 11:02 AM Tetsuo Handa
<penguin-kernel@i-love.sakura.ne.jp> wrote:
>
> On 2018/09/22 8:21, Andrew Morton wrote:
> > On Thu, 20 Sep 2018 19:33:15 -0400 Joel Fernandes <joel@joelfernandes.org> wrote:
> >
> >> On Thu, Sep 20, 2018 at 5:12 PM Todd Kjos <tkjos@google.com> wrote:
> >>>
> >>> +Joel Fernandes
> >>>
> >>> On Thu, Sep 20, 2018 at 2:11 PM Andrew Morton <akpm@linux-foundation.org> wrote:
> >>>>
> >>>>
> >>>> Thanks.  Let's cc the ashmem folks.
> >>>>
> >>
> >> This should be fixed by https://patchwork.kernel.org/patch/10572477/
> >>
> >> It has Neil Brown's Reviewed-by but looks like didn't yet appear in
> >> anyone's tree, could Greg take this patch?
> >
> > All is well.  That went into mainline yesterday, with a cc:stable.
> >
>
> This problem was not fixed at all.

There are at least 2 other open deadlocks involving ashmem:

https://syzkaller.appspot.com/bug?extid=148c2885d71194f18d28
https://syzkaller.appspot.com/bug?extid=4b8b031b89e6b96c4b2e

Does this fix any of these too?


> Why do we need to call fallocate() synchronously with ashmem_mutex held?
> Why can't we call fallocate() asynchronously from WQ_MEM_RECLAIM workqueue
> context so that we can call fallocate() with ashmem_mutex not held?
>
> I don't know how ashmem works, but as far as I can guess, offloading is
> possible as long as other operations which depend on the completion of
> fallocate() operation (e.g. read()/mmap(), querying/changing pinned status)
> wait for completion of asynchronous fallocate() operation (like a draft
> patch shown below is doing).
>
> ---
>  drivers/staging/android/ashmem.c | 50 ++++++++++++++++++++++++++++----
>  1 file changed, 45 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
> index 90a8a9f1ac7d..1a890c43a10a 100644
> --- a/drivers/staging/android/ashmem.c
> +++ b/drivers/staging/android/ashmem.c
> @@ -75,6 +75,17 @@ struct ashmem_range {
>  /* LRU list of unpinned pages, protected by ashmem_mutex */
>  static LIST_HEAD(ashmem_lru_list);
>
> +static struct workqueue_struct *ashmem_wq;
> +static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
> +static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
> +
> +struct ashmem_shrink_work {
> +       struct work_struct work;
> +       struct file *file;
> +       loff_t start;
> +       loff_t end;
> +};
> +
>  /*
>   * long lru_count - The count of pages on our LRU list.
>   *
> @@ -292,6 +303,7 @@ static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
>         int ret = 0;
>
>         mutex_lock(&ashmem_mutex);
> +       wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
>
>         /* If size is not set, or set to 0, always return EOF. */
>         if (asma->size == 0)
> @@ -359,6 +371,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
>         int ret = 0;
>
>         mutex_lock(&ashmem_mutex);
> +       wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
>
>         /* user needs to SET_SIZE before mapping */
>         if (!asma->size) {
> @@ -421,6 +434,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
>         return ret;
>  }
>
> +static void ashmem_shrink_worker(struct work_struct *work)
> +{
> +       struct ashmem_shrink_work *w = container_of(work, typeof(*w), work);
> +
> +       w->file->f_op->fallocate(w->file,
> +                                FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
> +                                w->start, w->end - w->start);
> +       fput(w->file);
> +       kfree(w);
> +       if (atomic_dec_and_test(&ashmem_shrink_inflight))
> +               wake_up_all(&ashmem_shrink_wait);
> +}
> +
>  /*
>   * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
>   *
> @@ -449,12 +475,18 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
>                 return -1;
>
>         list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
> -               loff_t start = range->pgstart * PAGE_SIZE;
> -               loff_t end = (range->pgend + 1) * PAGE_SIZE;
> +               struct ashmem_shrink_work *w = kzalloc(sizeof(*w), GFP_ATOMIC);
> +
> +               if (!w)
> +                       break;
> +               INIT_WORK(&w->work, ashmem_shrink_worker);
> +               w->file = range->asma->file;
> +               get_file(w->file);
> +               w->start = range->pgstart * PAGE_SIZE;
> +               w->end = (range->pgend + 1) * PAGE_SIZE;
> +               atomic_inc(&ashmem_shrink_inflight);
> +               queue_work(ashmem_wq, &w->work);
>
> -               range->asma->file->f_op->fallocate(range->asma->file,
> -                               FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
> -                               start, end - start);
>                 range->purged = ASHMEM_WAS_PURGED;
>                 lru_del(range);
>
> @@ -713,6 +745,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
>                 return -EFAULT;
>
>         mutex_lock(&ashmem_mutex);
> +       wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
>
>         if (!asma->file)
>                 goto out_unlock;
> @@ -883,8 +916,15 @@ static int __init ashmem_init(void)
>                 goto out_free2;
>         }
>
> +       ashmem_wq = alloc_workqueue("ashmem_wq", WQ_MEM_RECLAIM, 0);
> +       if (!ashmem_wq) {
> +               pr_err("failed to create workqueue\n");
> +               goto out_demisc;
> +       }
> +
>         ret = register_shrinker(&ashmem_shrinker);
>         if (ret) {
> +               destroy_workqueue(ashmem_wq);
>                 pr_err("failed to register shrinker!\n");
>                 goto out_demisc;
>         }
> --
> 2.17.1

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2019-01-22 10:12           ` Dmitry Vyukov
@ 2019-01-22 10:32             ` Tetsuo Handa
  2019-01-22 13:52               ` Dmitry Vyukov
  0 siblings, 1 reply; 25+ messages in thread
From: Tetsuo Handa @ 2019-01-22 10:32 UTC (permalink / raw)
  To: Dmitry Vyukov
  Cc: Andrew Morton, Joel Fernandes, Todd Kjos, Joel Fernandes,
	syzbot+a76129f18c89f3e2ddd4, Andi Kleen, Johannes Weiner,
	Jan Kara, Souptick Joarder, LKML, Linux-MM, Matthew Wilcox,
	Mel Gorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On 2019/01/22 19:12, Dmitry Vyukov wrote:
> On Tue, Jan 22, 2019 at 11:02 AM Tetsuo Handa
> <penguin-kernel@i-love.sakura.ne.jp> wrote:
>>
>> On 2018/09/22 8:21, Andrew Morton wrote:
>>> On Thu, 20 Sep 2018 19:33:15 -0400 Joel Fernandes <joel@joelfernandes.org> wrote:
>>>
>>>> On Thu, Sep 20, 2018 at 5:12 PM Todd Kjos <tkjos@google.com> wrote:
>>>>>
>>>>> +Joel Fernandes
>>>>>
>>>>> On Thu, Sep 20, 2018 at 2:11 PM Andrew Morton <akpm@linux-foundation.org> wrote:
>>>>>>
>>>>>>
>>>>>> Thanks.  Let's cc the ashmem folks.
>>>>>>
>>>>
>>>> This should be fixed by https://patchwork.kernel.org/patch/10572477/
>>>>
>>>> It has Neil Brown's Reviewed-by but looks like didn't yet appear in
>>>> anyone's tree, could Greg take this patch?
>>>
>>> All is well.  That went into mainline yesterday, with a cc:stable.
>>>
>>
>> This problem was not fixed at all.
> 
> There are at least 2 other open deadlocks involving ashmem:

Yes, they involve ashmem_shrink_scan() => {shmem|vfs}_fallocate() sequence.
This approach tries to eliminate this sequence.

> 
> https://syzkaller.appspot.com/bug?extid=148c2885d71194f18d28
> https://syzkaller.appspot.com/bug?extid=4b8b031b89e6b96c4b2e
> 
> Does this fix any of these too?

I need checks from ashmem folks whether this approach is possible/correct.
But you can ask syzbot to test this patch before ashmem folks respond.

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2019-01-22 10:32             ` Tetsuo Handa
@ 2019-01-22 13:52               ` Dmitry Vyukov
  2019-01-22 13:54                 ` Dmitry Vyukov
  0 siblings, 1 reply; 25+ messages in thread
From: Dmitry Vyukov @ 2019-01-22 13:52 UTC (permalink / raw)
  To: Tetsuo Handa
  Cc: Andrew Morton, Joel Fernandes, Todd Kjos, Joel Fernandes,
	syzbot+a76129f18c89f3e2ddd4, Andi Kleen, Johannes Weiner,
	Jan Kara, Souptick Joarder, LKML, Linux-MM, Matthew Wilcox,
	Mel Gorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

[-- Attachment #1: Type: text/plain, Size: 1787 bytes --]

On Tue, Jan 22, 2019 at 11:32 AM Tetsuo Handa
<penguin-kernel@i-love.sakura.ne.jp> wrote:
>
> On 2019/01/22 19:12, Dmitry Vyukov wrote:
> > On Tue, Jan 22, 2019 at 11:02 AM Tetsuo Handa
> > <penguin-kernel@i-love.sakura.ne.jp> wrote:
> >>
> >> On 2018/09/22 8:21, Andrew Morton wrote:
> >>> On Thu, 20 Sep 2018 19:33:15 -0400 Joel Fernandes <joel@joelfernandes.org> wrote:
> >>>
> >>>> On Thu, Sep 20, 2018 at 5:12 PM Todd Kjos <tkjos@google.com> wrote:
> >>>>>
> >>>>> +Joel Fernandes
> >>>>>
> >>>>> On Thu, Sep 20, 2018 at 2:11 PM Andrew Morton <akpm@linux-foundation.org> wrote:
> >>>>>>
> >>>>>>
> >>>>>> Thanks.  Let's cc the ashmem folks.
> >>>>>>
> >>>>
> >>>> This should be fixed by https://patchwork.kernel.org/patch/10572477/
> >>>>
> >>>> It has Neil Brown's Reviewed-by but looks like didn't yet appear in
> >>>> anyone's tree, could Greg take this patch?
> >>>
> >>> All is well.  That went into mainline yesterday, with a cc:stable.
> >>>
> >>
> >> This problem was not fixed at all.
> >
> > There are at least 2 other open deadlocks involving ashmem:
>
> Yes, they involve ashmem_shrink_scan() => {shmem|vfs}_fallocate() sequence.
> This approach tries to eliminate this sequence.
>
> >
> > https://syzkaller.appspot.com/bug?extid=148c2885d71194f18d28
> > https://syzkaller.appspot.com/bug?extid=4b8b031b89e6b96c4b2e
> >
> > Does this fix any of these too?
>
> I need checks from ashmem folks whether this approach is possible/correct.
> But you can ask syzbot to test this patch before ashmem folks respond.

Right. Let's do this.

As with any kernel changes only you really know how to apply it, git
tree/base commit info is missing, so let's do guessing and
finger-crossing as usual:

#syz fix: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
master

[-- Attachment #2: ashmem.patch --]
[-- Type: text/x-patch, Size: 3279 bytes --]

diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 90a8a9f1ac7d..1a890c43a10a 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -75,6 +75,17 @@ struct ashmem_range {
 /* LRU list of unpinned pages, protected by ashmem_mutex */
 static LIST_HEAD(ashmem_lru_list);
 
+static struct workqueue_struct *ashmem_wq;
+static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
+
+struct ashmem_shrink_work {
+	struct work_struct work;
+	struct file *file;
+	loff_t start;
+	loff_t end;
+};
+
 /*
  * long lru_count - The count of pages on our LRU list.
  *
@@ -292,6 +303,7 @@ static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 	int ret = 0;
 
 	mutex_lock(&ashmem_mutex);
+	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 
 	/* If size is not set, or set to 0, always return EOF. */
 	if (asma->size == 0)
@@ -359,6 +371,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 	int ret = 0;
 
 	mutex_lock(&ashmem_mutex);
+	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 
 	/* user needs to SET_SIZE before mapping */
 	if (!asma->size) {
@@ -421,6 +434,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 	return ret;
 }
 
+static void ashmem_shrink_worker(struct work_struct *work)
+{
+	struct ashmem_shrink_work *w = container_of(work, typeof(*w), work);
+
+	w->file->f_op->fallocate(w->file,
+				 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+				 w->start, w->end - w->start);
+	fput(w->file);
+	kfree(w);
+	if (atomic_dec_and_test(&ashmem_shrink_inflight))
+		wake_up_all(&ashmem_shrink_wait);
+}
+
 /*
  * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
  *
@@ -449,12 +475,18 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 		return -1;
 
 	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
-		loff_t start = range->pgstart * PAGE_SIZE;
-		loff_t end = (range->pgend + 1) * PAGE_SIZE;
+		struct ashmem_shrink_work *w = kzalloc(sizeof(*w), GFP_ATOMIC);
+
+		if (!w)
+			break;
+		INIT_WORK(&w->work, ashmem_shrink_worker);
+		w->file = range->asma->file;
+		get_file(w->file);
+		w->start = range->pgstart * PAGE_SIZE;
+		w->end = (range->pgend + 1) * PAGE_SIZE;
+		atomic_inc(&ashmem_shrink_inflight);
+		queue_work(ashmem_wq, &w->work);
 
-		range->asma->file->f_op->fallocate(range->asma->file,
-				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
-				start, end - start);
 		range->purged = ASHMEM_WAS_PURGED;
 		lru_del(range);
 
@@ -713,6 +745,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
 		return -EFAULT;
 
 	mutex_lock(&ashmem_mutex);
+	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 
 	if (!asma->file)
 		goto out_unlock;
@@ -883,8 +916,15 @@ static int __init ashmem_init(void)
 		goto out_free2;
 	}
 
+	ashmem_wq = alloc_workqueue("ashmem_wq", WQ_MEM_RECLAIM, 0);
+	if (!ashmem_wq) {
+		pr_err("failed to create workqueue\n");
+		goto out_demisc;
+	}
+
 	ret = register_shrinker(&ashmem_shrinker);
 	if (ret) {
+		destroy_workqueue(ashmem_wq);
 		pr_err("failed to register shrinker!\n");
 		goto out_demisc;
 	}

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2019-01-22 13:52               ` Dmitry Vyukov
@ 2019-01-22 13:54                 ` Dmitry Vyukov
  2019-01-22 14:08                     ` syzbot
  0 siblings, 1 reply; 25+ messages in thread
From: Dmitry Vyukov @ 2019-01-22 13:54 UTC (permalink / raw)
  To: Tetsuo Handa
  Cc: Andrew Morton, Joel Fernandes, Todd Kjos, Joel Fernandes,
	syzbot+a76129f18c89f3e2ddd4, Andi Kleen, Johannes Weiner,
	Jan Kara, Souptick Joarder, LKML, Linux-MM, Mel Gorman,
	syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On Tue, Jan 22, 2019 at 2:52 PM Dmitry Vyukov <dvyukov@google.com> wrote:
>
> On Tue, Jan 22, 2019 at 11:32 AM Tetsuo Handa
> <penguin-kernel@i-love.sakura.ne.jp> wrote:
> >
> > On 2019/01/22 19:12, Dmitry Vyukov wrote:
> > > On Tue, Jan 22, 2019 at 11:02 AM Tetsuo Handa
> > > <penguin-kernel@i-love.sakura.ne.jp> wrote:
> > >>
> > >> On 2018/09/22 8:21, Andrew Morton wrote:
> > >>> On Thu, 20 Sep 2018 19:33:15 -0400 Joel Fernandes <joel@joelfernandes.org> wrote:
> > >>>
> > >>>> On Thu, Sep 20, 2018 at 5:12 PM Todd Kjos <tkjos@google.com> wrote:
> > >>>>>
> > >>>>> +Joel Fernandes
> > >>>>>
> > >>>>> On Thu, Sep 20, 2018 at 2:11 PM Andrew Morton <akpm@linux-foundation.org> wrote:
> > >>>>>>
> > >>>>>>
> > >>>>>> Thanks.  Let's cc the ashmem folks.
> > >>>>>>
> > >>>>
> > >>>> This should be fixed by https://patchwork.kernel.org/patch/10572477/
> > >>>>
> > >>>> It has Neil Brown's Reviewed-by but looks like didn't yet appear in
> > >>>> anyone's tree, could Greg take this patch?
> > >>>
> > >>> All is well.  That went into mainline yesterday, with a cc:stable.
> > >>>
> > >>
> > >> This problem was not fixed at all.
> > >
> > > There are at least 2 other open deadlocks involving ashmem:
> >
> > Yes, they involve ashmem_shrink_scan() => {shmem|vfs}_fallocate() sequence.
> > This approach tries to eliminate this sequence.
> >
> > >
> > > https://syzkaller.appspot.com/bug?extid=148c2885d71194f18d28
> > > https://syzkaller.appspot.com/bug?extid=4b8b031b89e6b96c4b2e
> > >
> > > Does this fix any of these too?
> >
> > I need checks from ashmem folks whether this approach is possible/correct.
> > But you can ask syzbot to test this patch before ashmem folks respond.
>
> Right. Let's do this.
>
> As with any kernel changes only you really know how to apply it, git
> tree/base commit info is missing, so let's do guessing and
> finger-crossing as usual:
>
> #syz fix: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
> master

This of course should be:

#syz test: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
master

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
@ 2019-01-22 14:08                     ` syzbot
  0 siblings, 0 replies; 25+ messages in thread
From: syzbot @ 2019-01-22 14:08 UTC (permalink / raw)
  To: ak, akpm, arve, dvyukov, gregkh, hannes, jack, joel, joelaf,
	jrdr.linux, linux-kernel, linux-mm, maco, mgorman,
	penguin-kernel, syzkaller-bugs, tkjos, tkjos

Hello,

syzbot has tested the proposed patch but the reproducer still triggered  
crash:
possible deadlock in __do_page_fault

8021q: adding VLAN 0 to HW filter on device team0
8021q: adding VLAN 0 to HW filter on device team0
8021q: adding VLAN 0 to HW filter on device team0
8021q: adding VLAN 0 to HW filter on device team0
======================================================
WARNING: possible circular locking dependency detected
5.0.0-rc3+ #1 Not tainted
------------------------------------------------------
syz-executor2/7371 is trying to acquire lock:
00000000435ca279 (&mm->mmap_sem){++++}, at: do_user_addr_fault  
arch/x86/mm/fault.c:1426 [inline]
00000000435ca279 (&mm->mmap_sem){++++}, at: __do_page_fault+0x9c2/0xd60  
arch/x86/mm/fault.c:1541

but task is already holding lock:
00000000b64def52 (&sb->s_type->i_mutex_key#11){+.+.}, at: inode_lock  
include/linux/fs.h:757 [inline]
00000000b64def52 (&sb->s_type->i_mutex_key#11){+.+.}, at:  
generic_file_write_iter+0xe5/0x6a0 mm/filemap.c:3358

which lock already depends on the new lock.


the existing dependency chain (in reverse order) is:

-> #2 (&sb->s_type->i_mutex_key#11){+.+.}:
        down_write+0x8a/0x130 kernel/locking/rwsem.c:70
        inode_lock include/linux/fs.h:757 [inline]
        shmem_fallocate+0x168/0x1200 mm/shmem.c:2633
        ashmem_shrink_scan drivers/staging/android/ashmem.c:455 [inline]
        ashmem_shrink_scan+0x239/0x630 drivers/staging/android/ashmem.c:439
        ashmem_ioctl+0x38a/0x12c0 drivers/staging/android/ashmem.c:797
        vfs_ioctl fs/ioctl.c:46 [inline]
        file_ioctl fs/ioctl.c:509 [inline]
        do_vfs_ioctl+0x107b/0x17d0 fs/ioctl.c:696
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
        ksys_ioctl+0xab/0xd0 fs/ioctl.c:713
        __do_sys_ioctl fs/ioctl.c:720 [inline]
        __se_sys_ioctl fs/ioctl.c:718 [inline]
        __x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:718
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
        do_syscall_64+0x1a3/0x800 arch/x86/entry/common.c:290
        entry_SYSCALL_64_after_hwframe+0x49/0xbe

-> #1 (ashmem_mutex){+.+.}:
        __mutex_lock_common kernel/locking/mutex.c:925 [inline]
        __mutex_lock+0x12f/0x1670 kernel/locking/mutex.c:1072
        mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:1087
        ashmem_mmap+0x55/0x520 drivers/staging/android/ashmem.c:361
        call_mmap include/linux/fs.h:1867 [inline]
        mmap_region+0xde5/0x1ca0 mm/mmap.c:1786
        do_mmap+0xa09/0x1220 mm/mmap.c:1559
        do_mmap_pgoff include/linux/mm.h:2379 [inline]
        vm_mmap_pgoff+0x20b/0x2b0 mm/util.c:350
        ksys_mmap_pgoff+0x4f8/0x650 mm/mmap.c:1609
        __do_sys_mmap arch/x86/kernel/sys_x86_64.c:100 [inline]
        __se_sys_mmap arch/x86/kernel/sys_x86_64.c:91 [inline]
        __x64_sys_mmap+0xe9/0x1b0 arch/x86/kernel/sys_x86_64.c:91
        do_syscall_64+0x1a3/0x800 arch/x86/entry/common.c:290
        entry_SYSCALL_64_after_hwframe+0x49/0xbe

-> #0 (&mm->mmap_sem){++++}:
        lock_acquire+0x1db/0x570 kernel/locking/lockdep.c:3841
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
        down_read+0x8d/0x120 kernel/locking/rwsem.c:24
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
        do_user_addr_fault arch/x86/mm/fault.c:1426 [inline]
        __do_page_fault+0x9c2/0xd60 arch/x86/mm/fault.c:1541
        do_page_fault+0xe6/0x7d8 arch/x86/mm/fault.c:1572
        page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1143
        fault_in_pages_readable include/linux/pagemap.h:611 [inline]
        iov_iter_fault_in_readable+0x377/0x450 lib/iov_iter.c:425
        generic_perform_write+0x202/0x6b0 mm/filemap.c:3198
        __generic_file_write_iter+0x25e/0x630 mm/filemap.c:3333
        generic_file_write_iter+0x34e/0x6a0 mm/filemap.c:3361
        call_write_iter include/linux/fs.h:1862 [inline]
        new_sync_write fs/read_write.c:474 [inline]
        __vfs_write+0x764/0xb40 fs/read_write.c:487
        vfs_write+0x20c/0x580 fs/read_write.c:549
        ksys_write+0x105/0x260 fs/read_write.c:598
        __do_sys_write fs/read_write.c:610 [inline]
        __se_sys_write fs/read_write.c:607 [inline]
        __x64_sys_write+0x73/0xb0 fs/read_write.c:607
        do_syscall_64+0x1a3/0x800 arch/x86/entry/common.c:290
        entry_SYSCALL_64_after_hwframe+0x49/0xbe

other info that might help us debug this:

Chain exists of:
   &mm->mmap_sem --> ashmem_mutex --> &sb->s_type->i_mutex_key#11

  Possible unsafe locking scenario:

        CPU0                    CPU1
        ----                    ----
   lock(&sb->s_type->i_mutex_key#11);
                                lock(ashmem_mutex);
                                lock(&sb->s_type->i_mutex_key#11);
   lock(&mm->mmap_sem);

  *** DEADLOCK ***

2 locks held by syz-executor2/7371:
  #0: 00000000cdd032c7 (sb_writers#5){.+.+}, at: file_start_write  
include/linux/fs.h:2815 [inline]
  #0: 00000000cdd032c7 (sb_writers#5){.+.+}, at: vfs_write+0x429/0x580  
fs/read_write.c:548
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
  #1: 00000000b64def52 (&sb->s_type->i_mutex_key#11){+.+.}, at: inode_lock  
include/linux/fs.h:757 [inline]
  #1: 00000000b64def52 (&sb->s_type->i_mutex_key#11){+.+.}, at:  
generic_file_write_iter+0xe5/0x6a0 mm/filemap.c:3358

stack backtrace:
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
CPU: 1 PID: 7371 Comm: syz-executor2 Not tainted 5.0.0-rc3+ #1
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS  
Google 01/01/2011
Call Trace:
  __dump_stack lib/dump_stack.c:77 [inline]
  dump_stack+0x1db/0x2d0 lib/dump_stack.c:113
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
  print_circular_bug.isra.0.cold+0x1cc/0x28f kernel/locking/lockdep.c:1224
  check_prev_add kernel/locking/lockdep.c:1866 [inline]
  check_prevs_add kernel/locking/lockdep.c:1979 [inline]
  validate_chain kernel/locking/lockdep.c:2350 [inline]
  __lock_acquire+0x3014/0x4a30 kernel/locking/lockdep.c:3338
  lock_acquire+0x1db/0x570 kernel/locking/lockdep.c:3841
  down_read+0x8d/0x120 kernel/locking/rwsem.c:24
  do_user_addr_fault arch/x86/mm/fault.c:1426 [inline]
  __do_page_fault+0x9c2/0xd60 arch/x86/mm/fault.c:1541
  do_page_fault+0xe6/0x7d8 arch/x86/mm/fault.c:1572
  page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1143
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
RIP: 0010:fault_in_pages_readable include/linux/pagemap.h:611 [inline]
RIP: 0010:iov_iter_fault_in_readable+0x377/0x450 lib/iov_iter.c:425
Code: 89 f6 41 88 57 e0 e8 b8 2f f4 fd 45 85 f6 74 c1 e9 70 fe ff ff e8 29  
2e f4 fd 0f 1f 00 0f ae e8 44 89 f0 48 8b 8d 68 ff ff ff <8a> 11 89 c3 0f  
1f 00 41 88 57 d0 31 ff 89 de e8 85 2f f4 fd 85 db
RSP: 0018:ffff8881c52478a8 EFLAGS: 00010293
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
RAX: 0000000000000000 RBX: 0000000000000000 RCX: 000000002020053f
RDX: 0000000000000000 RSI: ffffffff838db067 RDI: 0000000000000007
RBP: ffff8881c5247948 R08: ffff8881c4c18240 R09: fffff94000d13e07
R10: fffff94000d13e06 R11: ffffea000689f037 R12: 0000000000001000
R13: 0000000000001000 R14: 0000000000000000 R15: ffff8881c5247920
  generic_perform_write+0x202/0x6b0 mm/filemap.c:3198
  __generic_file_write_iter+0x25e/0x630 mm/filemap.c:3333
  generic_file_write_iter+0x34e/0x6a0 mm/filemap.c:3361
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
  call_write_iter include/linux/fs.h:1862 [inline]
  new_sync_write fs/read_write.c:474 [inline]
  __vfs_write+0x764/0xb40 fs/read_write.c:487
  vfs_write+0x20c/0x580 fs/read_write.c:549
  ksys_write+0x105/0x260 fs/read_write.c:598
  __do_sys_write fs/read_write.c:610 [inline]
  __se_sys_write fs/read_write.c:607 [inline]
  __x64_sys_write+0x73/0xb0 fs/read_write.c:607
  do_syscall_64+0x1a3/0x800 arch/x86/entry/common.c:290
  entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x457579
Code: 1d b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7  
48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff  
ff 0f 83 eb b3 fb ff c3 66 2e 0f 1f 84 00 00 00 00
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
RSP: 002b:00007f51cc66ac78 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000000457579
RDX: 00000000fffffda2 RSI: 0000000020000540 RDI: 0000000000000003
RBP: 000000000072bf00 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f51cc66b6d4
R13: 00000000004c554e R14: 00000000004d8e68 R15: 00000000ffffffff
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'


Tested on:

commit:         48b161983ae5 Merge tag 'xarray-5.0-rc3' of git://git.infra..
git tree:       upstream
console output: https://syzkaller.appspot.com/x/log.txt?x=13d8ae5b400000
kernel config:  https://syzkaller.appspot.com/x/.config?x=ae7255cd515c8fef
compiler:       gcc (GCC) 9.0.0 20181231 (experimental)


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
@ 2019-01-22 14:08                     ` syzbot
  0 siblings, 0 replies; 25+ messages in thread
From: syzbot @ 2019-01-22 14:08 UTC (permalink / raw)
  To: ak, akpm, arve, dvyukov, gregkh, hannes, jack, joel, joelaf,
	jrdr.linux, linux-kernel, linux-mm, maco, mgorman,
	penguin-kernel, syzkaller-bugs, tkjos, tkjos

Hello,

syzbot has tested the proposed patch but the reproducer still triggered  
crash:
possible deadlock in __do_page_fault

8021q: adding VLAN 0 to HW filter on device team0
8021q: adding VLAN 0 to HW filter on device team0
8021q: adding VLAN 0 to HW filter on device team0
8021q: adding VLAN 0 to HW filter on device team0
======================================================
WARNING: possible circular locking dependency detected
5.0.0-rc3+ #1 Not tainted
------------------------------------------------------
syz-executor2/7371 is trying to acquire lock:
00000000435ca279 (&mm->mmap_sem){++++}, at: do_user_addr_fault  
arch/x86/mm/fault.c:1426 [inline]
00000000435ca279 (&mm->mmap_sem){++++}, at: __do_page_fault+0x9c2/0xd60  
arch/x86/mm/fault.c:1541

but task is already holding lock:
00000000b64def52 (&sb->s_type->i_mutex_key#11){+.+.}, at: inode_lock  
include/linux/fs.h:757 [inline]
00000000b64def52 (&sb->s_type->i_mutex_key#11){+.+.}, at:  
generic_file_write_iter+0xe5/0x6a0 mm/filemap.c:3358

which lock already depends on the new lock.


the existing dependency chain (in reverse order) is:

-> #2 (&sb->s_type->i_mutex_key#11){+.+.}:
        down_write+0x8a/0x130 kernel/locking/rwsem.c:70
        inode_lock include/linux/fs.h:757 [inline]
        shmem_fallocate+0x168/0x1200 mm/shmem.c:2633
        ashmem_shrink_scan drivers/staging/android/ashmem.c:455 [inline]
        ashmem_shrink_scan+0x239/0x630 drivers/staging/android/ashmem.c:439
        ashmem_ioctl+0x38a/0x12c0 drivers/staging/android/ashmem.c:797
        vfs_ioctl fs/ioctl.c:46 [inline]
        file_ioctl fs/ioctl.c:509 [inline]
        do_vfs_ioctl+0x107b/0x17d0 fs/ioctl.c:696
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
        ksys_ioctl+0xab/0xd0 fs/ioctl.c:713
        __do_sys_ioctl fs/ioctl.c:720 [inline]
        __se_sys_ioctl fs/ioctl.c:718 [inline]
        __x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:718
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
        do_syscall_64+0x1a3/0x800 arch/x86/entry/common.c:290
        entry_SYSCALL_64_after_hwframe+0x49/0xbe

-> #1 (ashmem_mutex){+.+.}:
        __mutex_lock_common kernel/locking/mutex.c:925 [inline]
        __mutex_lock+0x12f/0x1670 kernel/locking/mutex.c:1072
        mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:1087
        ashmem_mmap+0x55/0x520 drivers/staging/android/ashmem.c:361
        call_mmap include/linux/fs.h:1867 [inline]
        mmap_region+0xde5/0x1ca0 mm/mmap.c:1786
        do_mmap+0xa09/0x1220 mm/mmap.c:1559
        do_mmap_pgoff include/linux/mm.h:2379 [inline]
        vm_mmap_pgoff+0x20b/0x2b0 mm/util.c:350
        ksys_mmap_pgoff+0x4f8/0x650 mm/mmap.c:1609
        __do_sys_mmap arch/x86/kernel/sys_x86_64.c:100 [inline]
        __se_sys_mmap arch/x86/kernel/sys_x86_64.c:91 [inline]
        __x64_sys_mmap+0xe9/0x1b0 arch/x86/kernel/sys_x86_64.c:91
        do_syscall_64+0x1a3/0x800 arch/x86/entry/common.c:290
        entry_SYSCALL_64_after_hwframe+0x49/0xbe

-> #0 (&mm->mmap_sem){++++}:
        lock_acquire+0x1db/0x570 kernel/locking/lockdep.c:3841
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
        down_read+0x8d/0x120 kernel/locking/rwsem.c:24
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
        do_user_addr_fault arch/x86/mm/fault.c:1426 [inline]
        __do_page_fault+0x9c2/0xd60 arch/x86/mm/fault.c:1541
        do_page_fault+0xe6/0x7d8 arch/x86/mm/fault.c:1572
        page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1143
        fault_in_pages_readable include/linux/pagemap.h:611 [inline]
        iov_iter_fault_in_readable+0x377/0x450 lib/iov_iter.c:425
        generic_perform_write+0x202/0x6b0 mm/filemap.c:3198
        __generic_file_write_iter+0x25e/0x630 mm/filemap.c:3333
        generic_file_write_iter+0x34e/0x6a0 mm/filemap.c:3361
        call_write_iter include/linux/fs.h:1862 [inline]
        new_sync_write fs/read_write.c:474 [inline]
        __vfs_write+0x764/0xb40 fs/read_write.c:487
        vfs_write+0x20c/0x580 fs/read_write.c:549
        ksys_write+0x105/0x260 fs/read_write.c:598
        __do_sys_write fs/read_write.c:610 [inline]
        __se_sys_write fs/read_write.c:607 [inline]
        __x64_sys_write+0x73/0xb0 fs/read_write.c:607
        do_syscall_64+0x1a3/0x800 arch/x86/entry/common.c:290
        entry_SYSCALL_64_after_hwframe+0x49/0xbe

other info that might help us debug this:

Chain exists of:
   &mm->mmap_sem --> ashmem_mutex --> &sb->s_type->i_mutex_key#11

  Possible unsafe locking scenario:

        CPU0                    CPU1
        ----                    ----
   lock(&sb->s_type->i_mutex_key#11);
                                lock(ashmem_mutex);
                                lock(&sb->s_type->i_mutex_key#11);
   lock(&mm->mmap_sem);

  *** DEADLOCK ***

2 locks held by syz-executor2/7371:
  #0: 00000000cdd032c7 (sb_writers#5){.+.+}, at: file_start_write  
include/linux/fs.h:2815 [inline]
  #0: 00000000cdd032c7 (sb_writers#5){.+.+}, at: vfs_write+0x429/0x580  
fs/read_write.c:548
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
  #1: 00000000b64def52 (&sb->s_type->i_mutex_key#11){+.+.}, at: inode_lock  
include/linux/fs.h:757 [inline]
  #1: 00000000b64def52 (&sb->s_type->i_mutex_key#11){+.+.}, at:  
generic_file_write_iter+0xe5/0x6a0 mm/filemap.c:3358

stack backtrace:
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
CPU: 1 PID: 7371 Comm: syz-executor2 Not tainted 5.0.0-rc3+ #1
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS  
Google 01/01/2011
Call Trace:
  __dump_stack lib/dump_stack.c:77 [inline]
  dump_stack+0x1db/0x2d0 lib/dump_stack.c:113
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
  print_circular_bug.isra.0.cold+0x1cc/0x28f kernel/locking/lockdep.c:1224
  check_prev_add kernel/locking/lockdep.c:1866 [inline]
  check_prevs_add kernel/locking/lockdep.c:1979 [inline]
  validate_chain kernel/locking/lockdep.c:2350 [inline]
  __lock_acquire+0x3014/0x4a30 kernel/locking/lockdep.c:3338
  lock_acquire+0x1db/0x570 kernel/locking/lockdep.c:3841
  down_read+0x8d/0x120 kernel/locking/rwsem.c:24
  do_user_addr_fault arch/x86/mm/fault.c:1426 [inline]
  __do_page_fault+0x9c2/0xd60 arch/x86/mm/fault.c:1541
  do_page_fault+0xe6/0x7d8 arch/x86/mm/fault.c:1572
  page_fault+0x1e/0x30 arch/x86/entry/entry_64.S:1143
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
RIP: 0010:fault_in_pages_readable include/linux/pagemap.h:611 [inline]
RIP: 0010:iov_iter_fault_in_readable+0x377/0x450 lib/iov_iter.c:425
Code: 89 f6 41 88 57 e0 e8 b8 2f f4 fd 45 85 f6 74 c1 e9 70 fe ff ff e8 29  
2e f4 fd 0f 1f 00 0f ae e8 44 89 f0 48 8b 8d 68 ff ff ff <8a> 11 89 c3 0f  
1f 00 41 88 57 d0 31 ff 89 de e8 85 2f f4 fd 85 db
RSP: 0018:ffff8881c52478a8 EFLAGS: 00010293
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
RAX: 0000000000000000 RBX: 0000000000000000 RCX: 000000002020053f
RDX: 0000000000000000 RSI: ffffffff838db067 RDI: 0000000000000007
RBP: ffff8881c5247948 R08: ffff8881c4c18240 R09: fffff94000d13e07
R10: fffff94000d13e06 R11: ffffea000689f037 R12: 0000000000001000
R13: 0000000000001000 R14: 0000000000000000 R15: ffff8881c5247920
  generic_perform_write+0x202/0x6b0 mm/filemap.c:3198
  __generic_file_write_iter+0x25e/0x630 mm/filemap.c:3333
  generic_file_write_iter+0x34e/0x6a0 mm/filemap.c:3361
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
  call_write_iter include/linux/fs.h:1862 [inline]
  new_sync_write fs/read_write.c:474 [inline]
  __vfs_write+0x764/0xb40 fs/read_write.c:487
  vfs_write+0x20c/0x580 fs/read_write.c:549
  ksys_write+0x105/0x260 fs/read_write.c:598
  __do_sys_write fs/read_write.c:610 [inline]
  __se_sys_write fs/read_write.c:607 [inline]
  __x64_sys_write+0x73/0xb0 fs/read_write.c:607
  do_syscall_64+0x1a3/0x800 arch/x86/entry/common.c:290
  entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x457579
Code: 1d b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7  
48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff  
ff 0f 83 eb b3 fb ff c3 66 2e 0f 1f 84 00 00 00 00
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
RSP: 002b:00007f51cc66ac78 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000000457579
RDX: 00000000fffffda2 RSI: 0000000020000540 RDI: 0000000000000003
RBP: 000000000072bf00 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f51cc66b6d4
R13: 00000000004c554e R14: 00000000004d8e68 R15: 00000000ffffffff
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop0' (00000000a9b29aa3): kobject_uevent_env
kobject: 'loop0' (00000000a9b29aa3): fill_kobj_path: path  
= '/devices/virtual/block/loop0'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'
kobject: 'loop3' (00000000bfa624b6): kobject_uevent_env
kobject: 'loop3' (00000000bfa624b6): fill_kobj_path: path  
= '/devices/virtual/block/loop3'
kobject: 'loop5' (000000008a2391db): kobject_uevent_env
kobject: 'loop5' (000000008a2391db): fill_kobj_path: path  
= '/devices/virtual/block/loop5'
kobject: 'loop2' (0000000019bfd72c): kobject_uevent_env
kobject: 'loop2' (0000000019bfd72c): fill_kobj_path: path  
= '/devices/virtual/block/loop2'
kobject: 'loop1' (00000000893eaf09): kobject_uevent_env
kobject: 'loop1' (00000000893eaf09): fill_kobj_path: path  
= '/devices/virtual/block/loop1'
kobject: 'loop4' (00000000e9e52bda): kobject_uevent_env
kobject: 'loop4' (00000000e9e52bda): fill_kobj_path: path  
= '/devices/virtual/block/loop4'


Tested on:

commit:         48b161983ae5 Merge tag 'xarray-5.0-rc3' of git://git.infra..
git tree:       upstream
console output: https://syzkaller.appspot.com/x/log.txt?x=13d8ae5b400000
kernel config:  https://syzkaller.appspot.com/x/.config?x=ae7255cd515c8fef
compiler:       gcc (GCC) 9.0.0 20181231 (experimental)


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2019-01-22 10:02         ` Tetsuo Handa
  2019-01-22 10:12           ` Dmitry Vyukov
@ 2019-01-22 15:32           ` Joel Fernandes
  2019-01-23  2:01             ` Tetsuo Handa
  1 sibling, 1 reply; 25+ messages in thread
From: Joel Fernandes @ 2019-01-22 15:32 UTC (permalink / raw)
  To: Tetsuo Handa
  Cc: Andrew Morton, Todd Kjos, syzbot+a76129f18c89f3e2ddd4, ak,
	Johannes Weiner, jack, jrdr.linux, LKML, linux-mm, mawilcox,
	mgorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On Tue, Jan 22, 2019 at 07:02:35PM +0900, Tetsuo Handa wrote:
> On 2018/09/22 8:21, Andrew Morton wrote:
> > On Thu, 20 Sep 2018 19:33:15 -0400 Joel Fernandes <joel@joelfernandes.org> wrote:
> > 
> >> On Thu, Sep 20, 2018 at 5:12 PM Todd Kjos <tkjos@google.com> wrote:
> >>>
> >>> +Joel Fernandes
> >>>
> >>> On Thu, Sep 20, 2018 at 2:11 PM Andrew Morton <akpm@linux-foundation.org> wrote:
> >>>>
> >>>>
> >>>> Thanks.  Let's cc the ashmem folks.
> >>>>
> >>
> >> This should be fixed by https://patchwork.kernel.org/patch/10572477/
> >>
> >> It has Neil Brown's Reviewed-by but looks like didn't yet appear in
> >> anyone's tree, could Greg take this patch?
> > 
> > All is well.  That went into mainline yesterday, with a cc:stable.
> > 
> 
> This problem was not fixed at all.

Why do you say so, are you able to reproduce the issue? If so, could you
share the splat?

Many splats involve issues to do with the inode lock class being the same for
files and directories, the above patch does fix those issues.

> Why do we need to call fallocate() synchronously with ashmem_mutex held?
> Why can't we call fallocate() asynchronously from WQ_MEM_RECLAIM workqueue
> context so that we can call fallocate() with ashmem_mutex not held?
> 
> I don't know how ashmem works, but as far as I can guess, offloading is
> possible as long as other operations which depend on the completion of
> fallocate() operation (e.g. read()/mmap(), querying/changing pinned status)
> wait for completion of asynchronous fallocate() operation (like a draft
> patch shown below is doing).

This adds a bit of complexity, I am worried if it will introduce more
bugs especially because ashmem is going away in the long term, in favor of
memfd - and if its worth adding more complexity / maintenance burden to it.

I am wondering if we can do this synchronously, without using a workqueue.
All you would need is a temporary list of areas to punch. In
ashmem_shrink_scan, you would create this list under mutex and then once you
release the mutex, you can go through this list and do the fallocate followed
by the wake up of waiters on the wait queue, right? If you can do it this
way, then it would be better IMO.

side note: these sort of splats are exactly the reason we're moving to memfd,
since the wrapper design of ashmem is prone to deadlocks. We fixed a lot of
deadlocks though but still.

thanks,

 - Joel


> ---
>  drivers/staging/android/ashmem.c | 50 ++++++++++++++++++++++++++++----
>  1 file changed, 45 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
> index 90a8a9f1ac7d..1a890c43a10a 100644
> --- a/drivers/staging/android/ashmem.c
> +++ b/drivers/staging/android/ashmem.c
> @@ -75,6 +75,17 @@ struct ashmem_range {
>  /* LRU list of unpinned pages, protected by ashmem_mutex */
>  static LIST_HEAD(ashmem_lru_list);
>  
> +static struct workqueue_struct *ashmem_wq;
> +static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
> +static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
> +
> +struct ashmem_shrink_work {
> +	struct work_struct work;
> +	struct file *file;
> +	loff_t start;
> +	loff_t end;
> +};
> +
>  /*
>   * long lru_count - The count of pages on our LRU list.
>   *
> @@ -292,6 +303,7 @@ static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
>  	int ret = 0;
>  
>  	mutex_lock(&ashmem_mutex);
> +	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
>  
>  	/* If size is not set, or set to 0, always return EOF. */
>  	if (asma->size == 0)
> @@ -359,6 +371,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
>  	int ret = 0;
>  
>  	mutex_lock(&ashmem_mutex);
> +	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
>  
>  	/* user needs to SET_SIZE before mapping */
>  	if (!asma->size) {
> @@ -421,6 +434,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
>  	return ret;
>  }
>  
> +static void ashmem_shrink_worker(struct work_struct *work)
> +{
> +	struct ashmem_shrink_work *w = container_of(work, typeof(*w), work);
> +
> +	w->file->f_op->fallocate(w->file,
> +				 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
> +				 w->start, w->end - w->start);
> +	fput(w->file);
> +	kfree(w);
> +	if (atomic_dec_and_test(&ashmem_shrink_inflight))
> +		wake_up_all(&ashmem_shrink_wait);
> +}
> +
>  /*
>   * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
>   *
> @@ -449,12 +475,18 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
>  		return -1;
>  
>  	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
> -		loff_t start = range->pgstart * PAGE_SIZE;
> -		loff_t end = (range->pgend + 1) * PAGE_SIZE;
> +		struct ashmem_shrink_work *w = kzalloc(sizeof(*w), GFP_ATOMIC);
> +
> +		if (!w)
> +			break;
> +		INIT_WORK(&w->work, ashmem_shrink_worker);
> +		w->file = range->asma->file;
> +		get_file(w->file);
> +		w->start = range->pgstart * PAGE_SIZE;
> +		w->end = (range->pgend + 1) * PAGE_SIZE;
> +		atomic_inc(&ashmem_shrink_inflight);
> +		queue_work(ashmem_wq, &w->work);
>  
> -		range->asma->file->f_op->fallocate(range->asma->file,
> -				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
> -				start, end - start);
>  		range->purged = ASHMEM_WAS_PURGED;
>  		lru_del(range);
>  
> @@ -713,6 +745,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
>  		return -EFAULT;
>  
>  	mutex_lock(&ashmem_mutex);
> +	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
>  
>  	if (!asma->file)
>  		goto out_unlock;
> @@ -883,8 +916,15 @@ static int __init ashmem_init(void)
>  		goto out_free2;
>  	}
>  
> +	ashmem_wq = alloc_workqueue("ashmem_wq", WQ_MEM_RECLAIM, 0);
> +	if (!ashmem_wq) {
> +		pr_err("failed to create workqueue\n");
> +		goto out_demisc;
> +	}
> +
>  	ret = register_shrinker(&ashmem_shrinker);
>  	if (ret) {
> +		destroy_workqueue(ashmem_wq);
>  		pr_err("failed to register shrinker!\n");
>  		goto out_demisc;
>  	}
> -- 
> 2.17.1

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2019-01-22 15:32           ` Joel Fernandes
@ 2019-01-23  2:01             ` Tetsuo Handa
  2019-01-23 15:57               ` Joel Fernandes
  0 siblings, 1 reply; 25+ messages in thread
From: Tetsuo Handa @ 2019-01-23  2:01 UTC (permalink / raw)
  To: Joel Fernandes
  Cc: Andrew Morton, Todd Kjos, syzbot+a76129f18c89f3e2ddd4, ak,
	Johannes Weiner, jack, jrdr.linux, LKML, linux-mm, mawilcox,
	mgorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

Joel Fernandes wrote:
> > Why do we need to call fallocate() synchronously with ashmem_mutex held?
> > Why can't we call fallocate() asynchronously from WQ_MEM_RECLAIM workqueue
> > context so that we can call fallocate() with ashmem_mutex not held?
> > 
> > I don't know how ashmem works, but as far as I can guess, offloading is
> > possible as long as other operations which depend on the completion of
> > fallocate() operation (e.g. read()/mmap(), querying/changing pinned status)
> > wait for completion of asynchronous fallocate() operation (like a draft
> > patch shown below is doing).
> 
> This adds a bit of complexity, I am worried if it will introduce more
> bugs especially because ashmem is going away in the long term, in favor of
> memfd - and if its worth adding more complexity / maintenance burden to it.

I don't care migrating to memfd. I care when bugs are fixed.

> 
> I am wondering if we can do this synchronously, without using a workqueue.
> All you would need is a temporary list of areas to punch. In
> ashmem_shrink_scan, you would create this list under mutex and then once you
> release the mutex, you can go through this list and do the fallocate followed
> by the wake up of waiters on the wait queue, right? If you can do it this
> way, then it would be better IMO.

Are you sure that none of locks held before doing GFP_KERNEL allocation
interferes lock dependency used by fallocate() ? If yes, we can do without a
workqueue context (like a draft patch shown below). Since I don't understand
what locks are potentially involved, I offloaded to a clean workqueue context.

Anyway, I need your checks regarding whether this approach is waiting for
completion at all locations which need to wait for completion.

---
 drivers/staging/android/ashmem.c | 25 ++++++++++++++++++++-----
 1 file changed, 20 insertions(+), 5 deletions(-)

diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 90a8a9f1ac7d..6a267563cb66 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -75,6 +75,9 @@ struct ashmem_range {
 /* LRU list of unpinned pages, protected by ashmem_mutex */
 static LIST_HEAD(ashmem_lru_list);
 
+static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
+
 /*
  * long lru_count - The count of pages on our LRU list.
  *
@@ -292,6 +295,7 @@ static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 	int ret = 0;
 
 	mutex_lock(&ashmem_mutex);
+	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 
 	/* If size is not set, or set to 0, always return EOF. */
 	if (asma->size == 0)
@@ -359,6 +363,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 	int ret = 0;
 
 	mutex_lock(&ashmem_mutex);
+	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 
 	/* user needs to SET_SIZE before mapping */
 	if (!asma->size) {
@@ -438,7 +443,6 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 static unsigned long
 ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-	struct ashmem_range *range, *next;
 	unsigned long freed = 0;
 
 	/* We might recurse into filesystem code, so bail out if necessary */
@@ -448,17 +452,27 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 	if (!mutex_trylock(&ashmem_mutex))
 		return -1;
 
-	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
+	while (!list_empty(&ashmem_lru_list)) {
+		struct ashmem_range *range =
+			list_first_entry(&ashmem_lru_list, typeof(*range), lru);
 		loff_t start = range->pgstart * PAGE_SIZE;
 		loff_t end = (range->pgend + 1) * PAGE_SIZE;
+		struct file *f = range->asma->file;
 
-		range->asma->file->f_op->fallocate(range->asma->file,
-				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
-				start, end - start);
+		get_file(f);
+		atomic_inc(&ashmem_shrink_inflight);
 		range->purged = ASHMEM_WAS_PURGED;
 		lru_del(range);
 
 		freed += range_size(range);
+		mutex_unlock(&ashmem_mutex);
+		f->f_op->fallocate(f,
+				   FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+				   start, end - start);
+		fput(f);
+		if (atomic_dec_and_test(&ashmem_shrink_inflight))
+			wake_up_all(&ashmem_shrink_wait);
+		mutex_lock(&ashmem_mutex);
 		if (--sc->nr_to_scan <= 0)
 			break;
 	}
@@ -713,6 +727,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
 		return -EFAULT;
 
 	mutex_lock(&ashmem_mutex);
+	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 
 	if (!asma->file)
 		goto out_unlock;
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2019-01-23  2:01             ` Tetsuo Handa
@ 2019-01-23 15:57               ` Joel Fernandes
  2019-01-24  1:52                 ` Tetsuo Handa
  0 siblings, 1 reply; 25+ messages in thread
From: Joel Fernandes @ 2019-01-23 15:57 UTC (permalink / raw)
  To: Tetsuo Handa
  Cc: Andrew Morton, Todd Kjos, syzbot+a76129f18c89f3e2ddd4, ak,
	Johannes Weiner, jack, jrdr.linux, LKML, linux-mm, mawilcox,
	mgorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On Wed, Jan 23, 2019 at 11:01:04AM +0900, Tetsuo Handa wrote:
> Joel Fernandes wrote:
> > > Why do we need to call fallocate() synchronously with ashmem_mutex held?
> > > Why can't we call fallocate() asynchronously from WQ_MEM_RECLAIM workqueue
> > > context so that we can call fallocate() with ashmem_mutex not held?
> > > 
> > > I don't know how ashmem works, but as far as I can guess, offloading is
> > > possible as long as other operations which depend on the completion of
> > > fallocate() operation (e.g. read()/mmap(), querying/changing pinned status)
> > > wait for completion of asynchronous fallocate() operation (like a draft
> > > patch shown below is doing).
> > 
> > This adds a bit of complexity, I am worried if it will introduce more
> > bugs especially because ashmem is going away in the long term, in favor of
> > memfd - and if its worth adding more complexity / maintenance burden to it.
> 
> I don't care migrating to memfd. I care when bugs are fixed.

That's fair. I'm not a fan of bugs either. I was just making a point that -
we want to fix things while not introducing unwanted complexity and cause
more bugs. That said, thanks for the patch and trying to fix it.

> > I am wondering if we can do this synchronously, without using a workqueue.
> > All you would need is a temporary list of areas to punch. In
> > ashmem_shrink_scan, you would create this list under mutex and then once you
> > release the mutex, you can go through this list and do the fallocate followed
> > by the wake up of waiters on the wait queue, right? If you can do it this
> > way, then it would be better IMO.
> 
> Are you sure that none of locks held before doing GFP_KERNEL allocation
> interferes lock dependency used by fallocate() ? If yes, we can do without a
> workqueue context (like a draft patch shown below). Since I don't understand
> what locks are potentially involved, I offloaded to a clean workqueue context.

fallocate acquires inode locks. So there is a lock dependency between
- memory reclaim (fake lock)
- inode locks.

This dependency is there whether we have your patch or not. I am not aware of
any other locks that are held other than these. But you could also just use
lockdep to dump all held locks at that point to confirm.

> Anyway, I need your checks regarding whether this approach is waiting for
> completion at all locations which need to wait for completion.

I think you are waiting in unwanted locations. The only location you need to
wait in is ashmem_pin_unpin.

So, to my eyes all that is needed to fix this bug is:

1. Delete the range from the ashmem_lru_list
2. Release the ashmem_mutex
3. fallocate the range.
4. Do the completion so that any waiting pin/unpin can proceed.

Could you clarify why you feel you need to wait for completion at those other
locations?

Note that once a range is unpinned, it is open sesame and userspace cannot
really expect consistent data from such range till it is pinned again.

Thanks!

 - Joel


> ---
>  drivers/staging/android/ashmem.c | 25 ++++++++++++++++++++-----
>  1 file changed, 20 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
> index 90a8a9f1ac7d..6a267563cb66 100644
> --- a/drivers/staging/android/ashmem.c
> +++ b/drivers/staging/android/ashmem.c
> @@ -75,6 +75,9 @@ struct ashmem_range {
>  /* LRU list of unpinned pages, protected by ashmem_mutex */
>  static LIST_HEAD(ashmem_lru_list);
>  
> +static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
> +static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
> +
>  /*
>   * long lru_count - The count of pages on our LRU list.
>   *
> @@ -292,6 +295,7 @@ static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
>  	int ret = 0;
>  
>  	mutex_lock(&ashmem_mutex);
> +	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
>  
>  	/* If size is not set, or set to 0, always return EOF. */
>  	if (asma->size == 0)
> @@ -359,6 +363,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
>  	int ret = 0;
>  
>  	mutex_lock(&ashmem_mutex);
> +	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
>  
>  	/* user needs to SET_SIZE before mapping */
>  	if (!asma->size) {
> @@ -438,7 +443,6 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
>  static unsigned long
>  ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
>  {
> -	struct ashmem_range *range, *next;
>  	unsigned long freed = 0;
>  
>  	/* We might recurse into filesystem code, so bail out if necessary */
> @@ -448,17 +452,27 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
>  	if (!mutex_trylock(&ashmem_mutex))
>  		return -1;
>  
> -	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
> +	while (!list_empty(&ashmem_lru_list)) {
> +		struct ashmem_range *range =
> +			list_first_entry(&ashmem_lru_list, typeof(*range), lru);
>  		loff_t start = range->pgstart * PAGE_SIZE;
>  		loff_t end = (range->pgend + 1) * PAGE_SIZE;
> +		struct file *f = range->asma->file;
>  
> -		range->asma->file->f_op->fallocate(range->asma->file,
> -				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
> -				start, end - start);
> +		get_file(f);
> +		atomic_inc(&ashmem_shrink_inflight);
>  		range->purged = ASHMEM_WAS_PURGED;
>  		lru_del(range);
>  
>  		freed += range_size(range);
> +		mutex_unlock(&ashmem_mutex);
> +		f->f_op->fallocate(f,
> +				   FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
> +				   start, end - start);
> +		fput(f);
> +		if (atomic_dec_and_test(&ashmem_shrink_inflight))
> +			wake_up_all(&ashmem_shrink_wait);
> +		mutex_lock(&ashmem_mutex);
>  		if (--sc->nr_to_scan <= 0)
>  			break;
>  	}
> @@ -713,6 +727,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
>  		return -EFAULT;
>  
>  	mutex_lock(&ashmem_mutex);
> +	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
>  
>  	if (!asma->file)
>  		goto out_unlock;
> -- 
> 2.17.1

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2019-01-23 15:57               ` Joel Fernandes
@ 2019-01-24  1:52                 ` Tetsuo Handa
  2019-01-24 13:46                   ` Joel Fernandes
  0 siblings, 1 reply; 25+ messages in thread
From: Tetsuo Handa @ 2019-01-24  1:52 UTC (permalink / raw)
  To: Joel Fernandes
  Cc: Andrew Morton, Todd Kjos, syzbot+a76129f18c89f3e2ddd4, ak,
	Johannes Weiner, jack, jrdr.linux, LKML, linux-mm, mawilcox,
	mgorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

Joel Fernandes wrote:
> > Anyway, I need your checks regarding whether this approach is waiting for
> > completion at all locations which need to wait for completion.
> 
> I think you are waiting in unwanted locations. The only location you need to
> wait in is ashmem_pin_unpin.
> 
> So, to my eyes all that is needed to fix this bug is:
> 
> 1. Delete the range from the ashmem_lru_list
> 2. Release the ashmem_mutex
> 3. fallocate the range.
> 4. Do the completion so that any waiting pin/unpin can proceed.
> 
> Could you clarify why you feel you need to wait for completion at those other
> locations?

Because I don't know how ashmem works.

> 
> Note that once a range is unpinned, it is open sesame and userspace cannot
> really expect consistent data from such range till it is pinned again.

Then, I'm tempted to eliminate shrinker and LRU list (like a draft patch shown
below). I think this is not equivalent to current code because this shrinks
upon only range_alloc() time and I don't know whether it is OK to temporarily
release ashmem_mutex during range_alloc() at "Case #4" of ashmem_pin(), but
can't we go this direction? 

By the way, why not to check range_alloc() failure before calling range_shrink() ?

---
 drivers/staging/android/ashmem.c | 154 +++++--------------------------
 1 file changed, 21 insertions(+), 133 deletions(-)

diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 90a8a9f1ac7d..90668eebf35b 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -53,7 +53,6 @@ struct ashmem_area {
 
 /**
  * struct ashmem_range - A range of unpinned/evictable pages
- * @lru:	         The entry in the LRU list
  * @unpinned:	         The entry in its area's unpinned list
  * @asma:	         The associated anonymous shared memory area.
  * @pgstart:	         The starting page (inclusive)
@@ -64,7 +63,6 @@ struct ashmem_area {
  * It is protected by 'ashmem_mutex'
  */
 struct ashmem_range {
-	struct list_head lru;
 	struct list_head unpinned;
 	struct ashmem_area *asma;
 	size_t pgstart;
@@ -72,15 +70,8 @@ struct ashmem_range {
 	unsigned int purged;
 };
 
-/* LRU list of unpinned pages, protected by ashmem_mutex */
-static LIST_HEAD(ashmem_lru_list);
-
-/*
- * long lru_count - The count of pages on our LRU list.
- *
- * This is protected by ashmem_mutex.
- */
-static unsigned long lru_count;
+static atomic_t ashmem_purge_inflight = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ashmem_purge_wait);
 
 /*
  * ashmem_mutex - protects the list of and each individual ashmem_area
@@ -97,7 +88,7 @@ static inline unsigned long range_size(struct ashmem_range *range)
 	return range->pgend - range->pgstart + 1;
 }
 
-static inline bool range_on_lru(struct ashmem_range *range)
+static inline bool range_not_purged(struct ashmem_range *range)
 {
 	return range->purged == ASHMEM_NOT_PURGED;
 }
@@ -133,32 +124,6 @@ static inline bool range_before_page(struct ashmem_range *range, size_t page)
 
 #define PROT_MASK		(PROT_EXEC | PROT_READ | PROT_WRITE)
 
-/**
- * lru_add() - Adds a range of memory to the LRU list
- * @range:     The memory range being added.
- *
- * The range is first added to the end (tail) of the LRU list.
- * After this, the size of the range is added to @lru_count
- */
-static inline void lru_add(struct ashmem_range *range)
-{
-	list_add_tail(&range->lru, &ashmem_lru_list);
-	lru_count += range_size(range);
-}
-
-/**
- * lru_del() - Removes a range of memory from the LRU list
- * @range:     The memory range being removed
- *
- * The range is first deleted from the LRU list.
- * After this, the size of the range is removed from @lru_count
- */
-static inline void lru_del(struct ashmem_range *range)
-{
-	list_del(&range->lru);
-	lru_count -= range_size(range);
-}
-
 /**
  * range_alloc() - Allocates and initializes a new ashmem_range structure
  * @asma:	   The associated ashmem_area
@@ -188,9 +153,23 @@ static int range_alloc(struct ashmem_area *asma,
 
 	list_add_tail(&range->unpinned, &prev_range->unpinned);
 
-	if (range_on_lru(range))
-		lru_add(range);
+	if (range_not_purged(range)) {
+		loff_t start = range->pgstart * PAGE_SIZE;
+		loff_t end = (range->pgend + 1) * PAGE_SIZE;
+		struct file *f = range->asma->file;
 
+		get_file(f);
+		atomic_inc(&ashmem_purge_inflight);
+		range->purged = ASHMEM_WAS_PURGED;
+		mutex_unlock(&ashmem_mutex);
+		f->f_op->fallocate(f,
+				   FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+				   start, end - start);
+		fput(f);
+		if (atomic_dec_and_test(&ashmem_purge_inflight))
+			wake_up(&ashmem_purge_wait);
+		mutex_lock(&ashmem_mutex);
+	}
 	return 0;
 }
 
@@ -201,8 +180,6 @@ static int range_alloc(struct ashmem_area *asma,
 static void range_del(struct ashmem_range *range)
 {
 	list_del(&range->unpinned);
-	if (range_on_lru(range))
-		lru_del(range);
 	kmem_cache_free(ashmem_range_cachep, range);
 }
 
@@ -214,20 +191,12 @@ static void range_del(struct ashmem_range *range)
  *
  * This does not modify the data inside the existing range in any way - It
  * simply shrinks the boundaries of the range.
- *
- * Theoretically, with a little tweaking, this could eventually be changed
- * to range_resize, and expand the lru_count if the new range is larger.
  */
 static inline void range_shrink(struct ashmem_range *range,
 				size_t start, size_t end)
 {
-	size_t pre = range_size(range);
-
 	range->pgstart = start;
 	range->pgend = end;
-
-	if (range_on_lru(range))
-		lru_count -= pre - range_size(range);
 }
 
 /**
@@ -421,72 +390,6 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 	return ret;
 }
 
-/*
- * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
- *
- * 'nr_to_scan' is the number of objects to scan for freeing.
- *
- * 'gfp_mask' is the mask of the allocation that got us into this mess.
- *
- * Return value is the number of objects freed or -1 if we cannot
- * proceed without risk of deadlock (due to gfp_mask).
- *
- * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
- * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
- * pages freed.
- */
-static unsigned long
-ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
-{
-	struct ashmem_range *range, *next;
-	unsigned long freed = 0;
-
-	/* We might recurse into filesystem code, so bail out if necessary */
-	if (!(sc->gfp_mask & __GFP_FS))
-		return SHRINK_STOP;
-
-	if (!mutex_trylock(&ashmem_mutex))
-		return -1;
-
-	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
-		loff_t start = range->pgstart * PAGE_SIZE;
-		loff_t end = (range->pgend + 1) * PAGE_SIZE;
-
-		range->asma->file->f_op->fallocate(range->asma->file,
-				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
-				start, end - start);
-		range->purged = ASHMEM_WAS_PURGED;
-		lru_del(range);
-
-		freed += range_size(range);
-		if (--sc->nr_to_scan <= 0)
-			break;
-	}
-	mutex_unlock(&ashmem_mutex);
-	return freed;
-}
-
-static unsigned long
-ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
-{
-	/*
-	 * note that lru_count is count of pages on the lru, not a count of
-	 * objects on the list. This means the scan function needs to return the
-	 * number of pages freed, not the number of objects scanned.
-	 */
-	return lru_count;
-}
-
-static struct shrinker ashmem_shrinker = {
-	.count_objects = ashmem_shrink_count,
-	.scan_objects = ashmem_shrink_scan,
-	/*
-	 * XXX (dchinner): I wish people would comment on why they need on
-	 * significant changes to the default value here
-	 */
-	.seeks = DEFAULT_SEEKS * 4,
-};
-
 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
 {
 	int ret = 0;
@@ -713,6 +616,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
 		return -EFAULT;
 
 	mutex_lock(&ashmem_mutex);
+	wait_event(ashmem_purge_wait, !atomic_read(&ashmem_purge_inflight));
 
 	if (!asma->file)
 		goto out_unlock;
@@ -787,15 +691,7 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
 		break;
 	case ASHMEM_PURGE_ALL_CACHES:
-		ret = -EPERM;
-		if (capable(CAP_SYS_ADMIN)) {
-			struct shrink_control sc = {
-				.gfp_mask = GFP_KERNEL,
-				.nr_to_scan = LONG_MAX,
-			};
-			ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
-			ashmem_shrink_scan(&ashmem_shrinker, &sc);
-		}
+		ret = capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
 		break;
 	}
 
@@ -883,18 +779,10 @@ static int __init ashmem_init(void)
 		goto out_free2;
 	}
 
-	ret = register_shrinker(&ashmem_shrinker);
-	if (ret) {
-		pr_err("failed to register shrinker!\n");
-		goto out_demisc;
-	}
-
 	pr_info("initialized\n");
 
 	return 0;
 
-out_demisc:
-	misc_deregister(&ashmem_misc);
 out_free2:
 	kmem_cache_destroy(ashmem_range_cachep);
 out_free1:
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2019-01-24  1:52                 ` Tetsuo Handa
@ 2019-01-24 13:46                   ` Joel Fernandes
  2019-01-25 16:02                       ` Tetsuo Handa
  2019-01-26  1:57                       ` Tetsuo Handa
  0 siblings, 2 replies; 25+ messages in thread
From: Joel Fernandes @ 2019-01-24 13:46 UTC (permalink / raw)
  To: Tetsuo Handa
  Cc: Andrew Morton, Todd Kjos, syzbot+a76129f18c89f3e2ddd4, ak,
	Johannes Weiner, jack, jrdr.linux, LKML, linux-mm, mawilcox,
	mgorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On Thu, Jan 24, 2019 at 10:52:30AM +0900, Tetsuo Handa wrote:
> Joel Fernandes wrote:
> > > Anyway, I need your checks regarding whether this approach is waiting for
> > > completion at all locations which need to wait for completion.
> > 
> > I think you are waiting in unwanted locations. The only location you need to
> > wait in is ashmem_pin_unpin.
> > 
> > So, to my eyes all that is needed to fix this bug is:
> > 
> > 1. Delete the range from the ashmem_lru_list
> > 2. Release the ashmem_mutex
> > 3. fallocate the range.
> > 4. Do the completion so that any waiting pin/unpin can proceed.
> > 
> > Could you clarify why you feel you need to wait for completion at those other
> > locations?
> 
> Because I don't know how ashmem works.

You sound like you're almost there though.

> > Note that once a range is unpinned, it is open sesame and userspace cannot
> > really expect consistent data from such range till it is pinned again.
> 
> Then, I'm tempted to eliminate shrinker and LRU list (like a draft patch shown
> below). I think this is not equivalent to current code because this shrinks
> upon only range_alloc() time and I don't know whether it is OK to temporarily
> release ashmem_mutex during range_alloc() at "Case #4" of ashmem_pin(), but
> can't we go this direction? 

No, the point of the shrinker is to do a lazy free. We cannot free things
during unpin since it can be pinned again and we need to find that range by
going through the list. We also cannot get rid of any lists. Since if
something is re-pinned, we need to find it and find out if it was purged. We
also need the list for knowing what was unpinned so the shrinker works.

By the way, all this may be going away quite soon (the whole driver) as I
said, so just give it a little bit of time.

I am happy to fix it soon if that's not the case (which I should know soon -
like a couple of weeks) but I'd like to hold off till then.

> By the way, why not to check range_alloc() failure before calling range_shrink() ?

That would be a nice thing to do. Send a patch?

thanks,

 - Joel


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2019-01-24 13:46                   ` Joel Fernandes
@ 2019-01-25 16:02                       ` Tetsuo Handa
  2019-01-26  1:57                       ` Tetsuo Handa
  1 sibling, 0 replies; 25+ messages in thread
From: Tetsuo Handa @ 2019-01-25 16:02 UTC (permalink / raw)
  To: Joel Fernandes
  Cc: Andrew Morton, Todd Kjos, syzbot+a76129f18c89f3e2ddd4, ak,
	Johannes Weiner, jack, jrdr.linux, LKML, linux-mm, mawilcox,
	mgorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On 2019/01/24 22:46, Joel Fernandes wrote:
> On Thu, Jan 24, 2019 at 10:52:30AM +0900, Tetsuo Handa wrote:
>> Joel Fernandes wrote:
>>>> Anyway, I need your checks regarding whether this approach is waiting for
>>>> completion at all locations which need to wait for completion.
>>>
>>> I think you are waiting in unwanted locations. The only location you need to
>>> wait in is ashmem_pin_unpin.
>>>
>>> So, to my eyes all that is needed to fix this bug is:
>>>
>>> 1. Delete the range from the ashmem_lru_list
>>> 2. Release the ashmem_mutex
>>> 3. fallocate the range.
>>> 4. Do the completion so that any waiting pin/unpin can proceed.
>>>
>>> Could you clarify why you feel you need to wait for completion at those other
>>> locations?

OK. Here is an updated patch.
Passed syzbot's best-effort testing using reproducers on all three reports.

From f192176dbee54075d41249e9f22918c32cb4d4fc Mon Sep 17 00:00:00 2001
From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Date: Fri, 25 Jan 2019 23:43:01 +0900
Subject: [PATCH] staging: android: ashmem: Don't call fallocate() with ashmem_mutex held.

syzbot is hitting lockdep warnings [1][2][3]. This patch tries to fix
the warning by eliminating ashmem_shrink_scan() => {shmem|vfs}_fallocate()
sequence.

[1] https://syzkaller.appspot.com/bug?id=87c399f6fa6955006080b24142e2ce7680295ad4
[2] https://syzkaller.appspot.com/bug?id=7ebea492de7521048355fc84210220e1038a7908
[3] https://syzkaller.appspot.com/bug?id=e02419c12131c24e2a957ea050c2ab6dcbbc3270

Reported-by: syzbot <syzbot+a76129f18c89f3e2ddd4@syzkaller.appspotmail.com>
Reported-by: syzbot <syzbot+148c2885d71194f18d28@syzkaller.appspotmail.com>
Reported-by: syzbot <syzbot+4b8b031b89e6b96c4b2e@syzkaller.appspotmail.com>
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
---
 drivers/staging/android/ashmem.c | 23 ++++++++++++++++++-----
 1 file changed, 18 insertions(+), 5 deletions(-)

diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 90a8a9f..d40c1d2 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -75,6 +75,9 @@ struct ashmem_range {
 /* LRU list of unpinned pages, protected by ashmem_mutex */
 static LIST_HEAD(ashmem_lru_list);
 
+static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
+
 /*
  * long lru_count - The count of pages on our LRU list.
  *
@@ -438,7 +441,6 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 static unsigned long
 ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-	struct ashmem_range *range, *next;
 	unsigned long freed = 0;
 
 	/* We might recurse into filesystem code, so bail out if necessary */
@@ -448,17 +450,27 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 	if (!mutex_trylock(&ashmem_mutex))
 		return -1;
 
-	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
+	while (!list_empty(&ashmem_lru_list)) {
+		struct ashmem_range *range =
+			list_first_entry(&ashmem_lru_list, typeof(*range), lru);
 		loff_t start = range->pgstart * PAGE_SIZE;
 		loff_t end = (range->pgend + 1) * PAGE_SIZE;
+		struct file *f = range->asma->file;
 
-		range->asma->file->f_op->fallocate(range->asma->file,
-				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
-				start, end - start);
+		get_file(f);
+		atomic_inc(&ashmem_shrink_inflight);
 		range->purged = ASHMEM_WAS_PURGED;
 		lru_del(range);
 
 		freed += range_size(range);
+		mutex_unlock(&ashmem_mutex);
+		f->f_op->fallocate(f,
+				   FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+				   start, end - start);
+		fput(f);
+		if (atomic_dec_and_test(&ashmem_shrink_inflight))
+			wake_up_all(&ashmem_shrink_wait);
+		mutex_lock(&ashmem_mutex);
 		if (--sc->nr_to_scan <= 0)
 			break;
 	}
@@ -713,6 +725,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
 		return -EFAULT;
 
 	mutex_lock(&ashmem_mutex);
+	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 
 	if (!asma->file)
 		goto out_unlock;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
@ 2019-01-25 16:02                       ` Tetsuo Handa
  0 siblings, 0 replies; 25+ messages in thread
From: Tetsuo Handa @ 2019-01-25 16:02 UTC (permalink / raw)
  To: Joel Fernandes
  Cc: Andrew Morton, Todd Kjos, syzbot+a76129f18c89f3e2ddd4, ak,
	Johannes Weiner, jack, jrdr.linux, LKML, linux-mm, mawilcox,
	mgorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On 2019/01/24 22:46, Joel Fernandes wrote:
> On Thu, Jan 24, 2019 at 10:52:30AM +0900, Tetsuo Handa wrote:
>> Joel Fernandes wrote:
>>>> Anyway, I need your checks regarding whether this approach is waiting for
>>>> completion at all locations which need to wait for completion.
>>>
>>> I think you are waiting in unwanted locations. The only location you need to
>>> wait in is ashmem_pin_unpin.
>>>
>>> So, to my eyes all that is needed to fix this bug is:
>>>
>>> 1. Delete the range from the ashmem_lru_list
>>> 2. Release the ashmem_mutex
>>> 3. fallocate the range.
>>> 4. Do the completion so that any waiting pin/unpin can proceed.
>>>
>>> Could you clarify why you feel you need to wait for completion at those other
>>> locations?

OK. Here is an updated patch.
Passed syzbot's best-effort testing using reproducers on all three reports.

>From f192176dbee54075d41249e9f22918c32cb4d4fc Mon Sep 17 00:00:00 2001
From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Date: Fri, 25 Jan 2019 23:43:01 +0900
Subject: [PATCH] staging: android: ashmem: Don't call fallocate() with ashmem_mutex held.

syzbot is hitting lockdep warnings [1][2][3]. This patch tries to fix
the warning by eliminating ashmem_shrink_scan() => {shmem|vfs}_fallocate()
sequence.

[1] https://syzkaller.appspot.com/bug?id=87c399f6fa6955006080b24142e2ce7680295ad4
[2] https://syzkaller.appspot.com/bug?id=7ebea492de7521048355fc84210220e1038a7908
[3] https://syzkaller.appspot.com/bug?id=e02419c12131c24e2a957ea050c2ab6dcbbc3270

Reported-by: syzbot <syzbot+a76129f18c89f3e2ddd4@syzkaller.appspotmail.com>
Reported-by: syzbot <syzbot+148c2885d71194f18d28@syzkaller.appspotmail.com>
Reported-by: syzbot <syzbot+4b8b031b89e6b96c4b2e@syzkaller.appspotmail.com>
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
---
 drivers/staging/android/ashmem.c | 23 ++++++++++++++++++-----
 1 file changed, 18 insertions(+), 5 deletions(-)

diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 90a8a9f..d40c1d2 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -75,6 +75,9 @@ struct ashmem_range {
 /* LRU list of unpinned pages, protected by ashmem_mutex */
 static LIST_HEAD(ashmem_lru_list);
 
+static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
+
 /*
  * long lru_count - The count of pages on our LRU list.
  *
@@ -438,7 +441,6 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 static unsigned long
 ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-	struct ashmem_range *range, *next;
 	unsigned long freed = 0;
 
 	/* We might recurse into filesystem code, so bail out if necessary */
@@ -448,17 +450,27 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 	if (!mutex_trylock(&ashmem_mutex))
 		return -1;
 
-	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
+	while (!list_empty(&ashmem_lru_list)) {
+		struct ashmem_range *range =
+			list_first_entry(&ashmem_lru_list, typeof(*range), lru);
 		loff_t start = range->pgstart * PAGE_SIZE;
 		loff_t end = (range->pgend + 1) * PAGE_SIZE;
+		struct file *f = range->asma->file;
 
-		range->asma->file->f_op->fallocate(range->asma->file,
-				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
-				start, end - start);
+		get_file(f);
+		atomic_inc(&ashmem_shrink_inflight);
 		range->purged = ASHMEM_WAS_PURGED;
 		lru_del(range);
 
 		freed += range_size(range);
+		mutex_unlock(&ashmem_mutex);
+		f->f_op->fallocate(f,
+				   FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+				   start, end - start);
+		fput(f);
+		if (atomic_dec_and_test(&ashmem_shrink_inflight))
+			wake_up_all(&ashmem_shrink_wait);
+		mutex_lock(&ashmem_mutex);
 		if (--sc->nr_to_scan <= 0)
 			break;
 	}
@@ -713,6 +725,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
 		return -EFAULT;
 
 	mutex_lock(&ashmem_mutex);
+	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 
 	if (!asma->file)
 		goto out_unlock;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2019-01-24 13:46                   ` Joel Fernandes
@ 2019-01-26  1:57                       ` Tetsuo Handa
  2019-01-26  1:57                       ` Tetsuo Handa
  1 sibling, 0 replies; 25+ messages in thread
From: Tetsuo Handa @ 2019-01-26  1:57 UTC (permalink / raw)
  To: Joel Fernandes
  Cc: Andrew Morton, Todd Kjos, syzbot+a76129f18c89f3e2ddd4, ak,
	Johannes Weiner, jack, jrdr.linux, LKML, linux-mm, mawilcox,
	mgorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On 2019/01/24 22:46, Joel Fernandes wrote:
> On Thu, Jan 24, 2019 at 10:52:30AM +0900, Tetsuo Handa wrote:
>> Then, I'm tempted to eliminate shrinker and LRU list (like a draft patch shown
>> below). I think this is not equivalent to current code because this shrinks
>> upon only range_alloc() time and I don't know whether it is OK to temporarily
>> release ashmem_mutex during range_alloc() at "Case #4" of ashmem_pin(), but
>> can't we go this direction? 
> 
> No, the point of the shrinker is to do a lazy free. We cannot free things
> during unpin since it can be pinned again and we need to find that range by
> going through the list. We also cannot get rid of any lists. Since if
> something is re-pinned, we need to find it and find out if it was purged. We
> also need the list for knowing what was unpinned so the shrinker works.
> 
> By the way, all this may be going away quite soon (the whole driver) as I
> said, so just give it a little bit of time.
> 
> I am happy to fix it soon if that's not the case (which I should know soon -
> like a couple of weeks) but I'd like to hold off till then.
> 
>> By the way, why not to check range_alloc() failure before calling range_shrink() ?
> 
> That would be a nice thing to do. Send a patch?

OK. Here is a patch. I chose __GFP_NOFAIL rather than adding error handling,
for small GFP_KERNEL allocation won't fail unless current thread was killed by
the OOM killer or memory allocation fault injection forces it fail, and
range_alloc() will not be called for multiple times from one syscall.

But note that doing GFP_KERNEL allocation with ashmem_mutex held has a risk of
needlessly invoking the OOM killer because "the point of the shrinker is to do
a lazy free" counts on ashmem_mutex not held by GFP_KERNEL allocating thread.
Although other shrinkers likely make forward progress by releasing memory,
technically you should avoid doing GFP_KERNEL allocation with ashmem_mutex held
if shrinker depends on ashmem_mutex not held.



From e1c4a9b53b0bb11a0743a8f861915c043deb616d Mon Sep 17 00:00:00 2001
From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Date: Sat, 26 Jan 2019 10:52:39 +0900
Subject: [PATCH] staging: android: ashmem: Don't allow range_alloc() to fail.

ashmem_pin() is calling range_shrink() without checking whether
range_alloc() succeeded. Since memory allocation fault injection might
force range_alloc() to fail while range_alloc() is called for only once
for one ioctl() request, make range_alloc() not to fail.

Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
---
 drivers/staging/android/ashmem.c | 17 ++++++-----------
 1 file changed, 6 insertions(+), 11 deletions(-)

diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index d40c1d2..a8070a2 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -171,18 +171,14 @@ static inline void lru_del(struct ashmem_range *range)
  * @end:	   The ending page (inclusive)
  *
  * This function is protected by ashmem_mutex.
- *
- * Return: 0 if successful, or -ENOMEM if there is an error
  */
-static int range_alloc(struct ashmem_area *asma,
-		       struct ashmem_range *prev_range, unsigned int purged,
-		       size_t start, size_t end)
+static void range_alloc(struct ashmem_area *asma,
+			struct ashmem_range *prev_range, unsigned int purged,
+			size_t start, size_t end)
 {
 	struct ashmem_range *range;
 
-	range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
-	if (!range)
-		return -ENOMEM;
+	range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL | __GFP_NOFAIL);
 
 	range->asma = asma;
 	range->pgstart = start;
@@ -193,8 +189,6 @@ static int range_alloc(struct ashmem_area *asma,
 
 	if (range_on_lru(range))
 		lru_add(range);
-
-	return 0;
 }
 
 /**
@@ -687,7 +681,8 @@ static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
 		}
 	}
 
-	return range_alloc(asma, range, purged, pgstart, pgend);
+	range_alloc(asma, range, purged, pgstart, pgend);
+	return 0;
 }
 
 /*
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
@ 2019-01-26  1:57                       ` Tetsuo Handa
  0 siblings, 0 replies; 25+ messages in thread
From: Tetsuo Handa @ 2019-01-26  1:57 UTC (permalink / raw)
  To: Joel Fernandes
  Cc: Andrew Morton, Todd Kjos, syzbot+a76129f18c89f3e2ddd4, ak,
	Johannes Weiner, jack, jrdr.linux, LKML, linux-mm, mawilcox,
	mgorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On 2019/01/24 22:46, Joel Fernandes wrote:
> On Thu, Jan 24, 2019 at 10:52:30AM +0900, Tetsuo Handa wrote:
>> Then, I'm tempted to eliminate shrinker and LRU list (like a draft patch shown
>> below). I think this is not equivalent to current code because this shrinks
>> upon only range_alloc() time and I don't know whether it is OK to temporarily
>> release ashmem_mutex during range_alloc() at "Case #4" of ashmem_pin(), but
>> can't we go this direction? 
> 
> No, the point of the shrinker is to do a lazy free. We cannot free things
> during unpin since it can be pinned again and we need to find that range by
> going through the list. We also cannot get rid of any lists. Since if
> something is re-pinned, we need to find it and find out if it was purged. We
> also need the list for knowing what was unpinned so the shrinker works.
> 
> By the way, all this may be going away quite soon (the whole driver) as I
> said, so just give it a little bit of time.
> 
> I am happy to fix it soon if that's not the case (which I should know soon -
> like a couple of weeks) but I'd like to hold off till then.
> 
>> By the way, why not to check range_alloc() failure before calling range_shrink() ?
> 
> That would be a nice thing to do. Send a patch?

OK. Here is a patch. I chose __GFP_NOFAIL rather than adding error handling,
for small GFP_KERNEL allocation won't fail unless current thread was killed by
the OOM killer or memory allocation fault injection forces it fail, and
range_alloc() will not be called for multiple times from one syscall.

But note that doing GFP_KERNEL allocation with ashmem_mutex held has a risk of
needlessly invoking the OOM killer because "the point of the shrinker is to do
a lazy free" counts on ashmem_mutex not held by GFP_KERNEL allocating thread.
Although other shrinkers likely make forward progress by releasing memory,
technically you should avoid doing GFP_KERNEL allocation with ashmem_mutex held
if shrinker depends on ashmem_mutex not held.



>From e1c4a9b53b0bb11a0743a8f861915c043deb616d Mon Sep 17 00:00:00 2001
From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Date: Sat, 26 Jan 2019 10:52:39 +0900
Subject: [PATCH] staging: android: ashmem: Don't allow range_alloc() to fail.

ashmem_pin() is calling range_shrink() without checking whether
range_alloc() succeeded. Since memory allocation fault injection might
force range_alloc() to fail while range_alloc() is called for only once
for one ioctl() request, make range_alloc() not to fail.

Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
---
 drivers/staging/android/ashmem.c | 17 ++++++-----------
 1 file changed, 6 insertions(+), 11 deletions(-)

diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index d40c1d2..a8070a2 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -171,18 +171,14 @@ static inline void lru_del(struct ashmem_range *range)
  * @end:	   The ending page (inclusive)
  *
  * This function is protected by ashmem_mutex.
- *
- * Return: 0 if successful, or -ENOMEM if there is an error
  */
-static int range_alloc(struct ashmem_area *asma,
-		       struct ashmem_range *prev_range, unsigned int purged,
-		       size_t start, size_t end)
+static void range_alloc(struct ashmem_area *asma,
+			struct ashmem_range *prev_range, unsigned int purged,
+			size_t start, size_t end)
 {
 	struct ashmem_range *range;
 
-	range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
-	if (!range)
-		return -ENOMEM;
+	range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL | __GFP_NOFAIL);
 
 	range->asma = asma;
 	range->pgstart = start;
@@ -193,8 +189,6 @@ static int range_alloc(struct ashmem_area *asma,
 
 	if (range_on_lru(range))
 		lru_add(range);
-
-	return 0;
 }
 
 /**
@@ -687,7 +681,8 @@ static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
 		}
 	}
 
-	return range_alloc(asma, range, purged, pgstart, pgend);
+	range_alloc(asma, range, purged, pgstart, pgend);
+	return 0;
 }
 
 /*
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2019-01-25 16:02                       ` Tetsuo Handa
  (?)
@ 2019-01-28 16:45                       ` Joel Fernandes
  2019-01-29 10:44                         ` Tetsuo Handa
  -1 siblings, 1 reply; 25+ messages in thread
From: Joel Fernandes @ 2019-01-28 16:45 UTC (permalink / raw)
  To: Tetsuo Handa
  Cc: Andrew Morton, Todd Kjos, syzbot+a76129f18c89f3e2ddd4, ak,
	Johannes Weiner, jack, jrdr.linux, LKML, linux-mm, mawilcox,
	mgorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On Sat, Jan 26, 2019 at 01:02:06AM +0900, Tetsuo Handa wrote:
> On 2019/01/24 22:46, Joel Fernandes wrote:
> > On Thu, Jan 24, 2019 at 10:52:30AM +0900, Tetsuo Handa wrote:
> >> Joel Fernandes wrote:
> >>>> Anyway, I need your checks regarding whether this approach is waiting for
> >>>> completion at all locations which need to wait for completion.
> >>>
> >>> I think you are waiting in unwanted locations. The only location you need to
> >>> wait in is ashmem_pin_unpin.
> >>>
> >>> So, to my eyes all that is needed to fix this bug is:
> >>>
> >>> 1. Delete the range from the ashmem_lru_list
> >>> 2. Release the ashmem_mutex
> >>> 3. fallocate the range.
> >>> 4. Do the completion so that any waiting pin/unpin can proceed.
> >>>
> >>> Could you clarify why you feel you need to wait for completion at those other
> >>> locations?
> 
> OK. Here is an updated patch.
> Passed syzbot's best-effort testing using reproducers on all three reports.
> 
> From f192176dbee54075d41249e9f22918c32cb4d4fc Mon Sep 17 00:00:00 2001
> From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
> Date: Fri, 25 Jan 2019 23:43:01 +0900
> Subject: [PATCH] staging: android: ashmem: Don't call fallocate() with ashmem_mutex held.
> 
> syzbot is hitting lockdep warnings [1][2][3]. This patch tries to fix
> the warning by eliminating ashmem_shrink_scan() => {shmem|vfs}_fallocate()
> sequence.
> 
> [1] https://syzkaller.appspot.com/bug?id=87c399f6fa6955006080b24142e2ce7680295ad4
> [2] https://syzkaller.appspot.com/bug?id=7ebea492de7521048355fc84210220e1038a7908
> [3] https://syzkaller.appspot.com/bug?id=e02419c12131c24e2a957ea050c2ab6dcbbc3270
> 
> Reported-by: syzbot <syzbot+a76129f18c89f3e2ddd4@syzkaller.appspotmail.com>
> Reported-by: syzbot <syzbot+148c2885d71194f18d28@syzkaller.appspotmail.com>
> Reported-by: syzbot <syzbot+4b8b031b89e6b96c4b2e@syzkaller.appspotmail.com>
> Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
> ---
>  drivers/staging/android/ashmem.c | 23 ++++++++++++++++++-----
>  1 file changed, 18 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
> index 90a8a9f..d40c1d2 100644
> --- a/drivers/staging/android/ashmem.c
> +++ b/drivers/staging/android/ashmem.c
> @@ -75,6 +75,9 @@ struct ashmem_range {
>  /* LRU list of unpinned pages, protected by ashmem_mutex */
>  static LIST_HEAD(ashmem_lru_list);
>  
> +static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
> +static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
> +
>  /*
>   * long lru_count - The count of pages on our LRU list.
>   *
> @@ -438,7 +441,6 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
>  static unsigned long
>  ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
>  {
> -	struct ashmem_range *range, *next;
>  	unsigned long freed = 0;
>  
>  	/* We might recurse into filesystem code, so bail out if necessary */
> @@ -448,17 +450,27 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
>  	if (!mutex_trylock(&ashmem_mutex))
>  		return -1;
>  
> -	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
> +	while (!list_empty(&ashmem_lru_list)) {
> +		struct ashmem_range *range =
> +			list_first_entry(&ashmem_lru_list, typeof(*range), lru);
>  		loff_t start = range->pgstart * PAGE_SIZE;
>  		loff_t end = (range->pgend + 1) * PAGE_SIZE;
> +		struct file *f = range->asma->file;
>  
> -		range->asma->file->f_op->fallocate(range->asma->file,
> -				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
> -				start, end - start);
> +		get_file(f);
> +		atomic_inc(&ashmem_shrink_inflight);
>  		range->purged = ASHMEM_WAS_PURGED;
>  		lru_del(range);
>  
>  		freed += range_size(range);
> +		mutex_unlock(&ashmem_mutex);
> +		f->f_op->fallocate(f,
> +				   FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
> +				   start, end - start);
> +		fput(f);
> +		if (atomic_dec_and_test(&ashmem_shrink_inflight))
> +			wake_up_all(&ashmem_shrink_wait);
> +		mutex_lock(&ashmem_mutex);

Let us replace mutex_lock with mutex_trylock, as done before the loop? Here
is there is an opportunity to not block other ashmem operations. Otherwise
LGTM. Also, CC stable.

thanks,

 - Joel


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: possible deadlock in __do_page_fault
  2019-01-28 16:45                       ` Joel Fernandes
@ 2019-01-29 10:44                         ` Tetsuo Handa
  0 siblings, 0 replies; 25+ messages in thread
From: Tetsuo Handa @ 2019-01-29 10:44 UTC (permalink / raw)
  To: Joel Fernandes
  Cc: Andrew Morton, Todd Kjos, syzbot+a76129f18c89f3e2ddd4, ak,
	Johannes Weiner, jack, jrdr.linux, LKML, linux-mm, mawilcox,
	mgorman, syzkaller-bugs, Arve Hjønnevåg, Todd Kjos,
	Martijn Coenen, Greg Kroah-Hartman

On 2019/01/29 1:45, Joel Fernandes wrote:
>>  		freed += range_size(range);
>> +		mutex_unlock(&ashmem_mutex);
>> +		f->f_op->fallocate(f,
>> +				   FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
>> +				   start, end - start);
>> +		fput(f);
>> +		if (atomic_dec_and_test(&ashmem_shrink_inflight))
>> +			wake_up_all(&ashmem_shrink_wait);
>> +		mutex_lock(&ashmem_mutex);
> 
> Let us replace mutex_lock with mutex_trylock, as done before the loop? Here
> is there is an opportunity to not block other ashmem operations. Otherwise
> LGTM. Also, CC stable.

If shrinker succeeded to grab ashmem_mutex using mutex_trylock(), it is
guaranteed that that thread is not inside

  mutex_lock(&ashmem_mutex);
  kmalloc(GFP_KERNEL);
  mutex_unlock(&ashmem_mutex);

block. Therefore, I think that it is safe to use mutex_lock() here.

Nonetheless, although syzbot did not find other dependency, I can update this
patch to use mutex_trylock() if you worry about not-yet-discovered dependency.



From fd850fecd248951ad1ad26b37ec5bf84afe41cbb Mon Sep 17 00:00:00 2001
From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Date: Tue, 29 Jan 2019 10:56:47 +0900
Subject: [PATCH v2] staging: android: ashmem: Don't call fallocate() with ashmem_mutex held.

syzbot is hitting lockdep warnings [1][2][3]. This patch tries to fix
the warning by eliminating ashmem_shrink_scan() => {shmem|vfs}_fallocate()
sequence.

[1] https://syzkaller.appspot.com/bug?id=87c399f6fa6955006080b24142e2ce7680295ad4
[2] https://syzkaller.appspot.com/bug?id=7ebea492de7521048355fc84210220e1038a7908
[3] https://syzkaller.appspot.com/bug?id=e02419c12131c24e2a957ea050c2ab6dcbbc3270

Reported-by: syzbot <syzbot+a76129f18c89f3e2ddd4@syzkaller.appspotmail.com>
Reported-by: syzbot <syzbot+148c2885d71194f18d28@syzkaller.appspotmail.com>
Reported-by: syzbot <syzbot+4b8b031b89e6b96c4b2e@syzkaller.appspotmail.com>
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: stable@vger.kernel.org
---
 drivers/staging/android/ashmem.c | 25 ++++++++++++++++++++-----
 1 file changed, 20 insertions(+), 5 deletions(-)

diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 90a8a9f1ac7d..ade8438a827a 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -75,6 +75,9 @@ struct ashmem_range {
 /* LRU list of unpinned pages, protected by ashmem_mutex */
 static LIST_HEAD(ashmem_lru_list);
 
+static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
+
 /*
  * long lru_count - The count of pages on our LRU list.
  *
@@ -438,7 +441,6 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 static unsigned long
 ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-	struct ashmem_range *range, *next;
 	unsigned long freed = 0;
 
 	/* We might recurse into filesystem code, so bail out if necessary */
@@ -448,21 +450,33 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 	if (!mutex_trylock(&ashmem_mutex))
 		return -1;
 
-	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
+	while (!list_empty(&ashmem_lru_list)) {
+		struct ashmem_range *range =
+			list_first_entry(&ashmem_lru_list, typeof(*range), lru);
 		loff_t start = range->pgstart * PAGE_SIZE;
 		loff_t end = (range->pgend + 1) * PAGE_SIZE;
+		struct file *f = range->asma->file;
 
-		range->asma->file->f_op->fallocate(range->asma->file,
-				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
-				start, end - start);
+		get_file(f);
+		atomic_inc(&ashmem_shrink_inflight);
 		range->purged = ASHMEM_WAS_PURGED;
 		lru_del(range);
 
 		freed += range_size(range);
+		mutex_unlock(&ashmem_mutex);
+		f->f_op->fallocate(f,
+				   FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+				   start, end - start);
+		fput(f);
+		if (atomic_dec_and_test(&ashmem_shrink_inflight))
+			wake_up_all(&ashmem_shrink_wait);
+		if (!mutex_trylock(&ashmem_mutex))
+			goto out;
 		if (--sc->nr_to_scan <= 0)
 			break;
 	}
 	mutex_unlock(&ashmem_mutex);
+out:
 	return freed;
 }
 
@@ -713,6 +727,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
 		return -EFAULT;
 
 	mutex_lock(&ashmem_mutex);
+	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 
 	if (!asma->file)
 		goto out_unlock;
-- 
2.17.1



^ permalink raw reply related	[flat|nested] 25+ messages in thread

end of thread, other threads:[~2019-01-29 10:44 UTC | newest]

Thread overview: 25+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-09-20 21:04 possible deadlock in __do_page_fault syzbot
2018-09-20 21:10 ` Andrew Morton
2018-09-20 21:12   ` Todd Kjos
2018-09-20 23:33     ` Joel Fernandes
2018-09-21  6:37       ` Dmitry Vyukov
2018-09-21 23:21       ` Andrew Morton
2019-01-22 10:02         ` Tetsuo Handa
2019-01-22 10:12           ` Dmitry Vyukov
2019-01-22 10:32             ` Tetsuo Handa
2019-01-22 13:52               ` Dmitry Vyukov
2019-01-22 13:54                 ` Dmitry Vyukov
2019-01-22 14:08                   ` syzbot
2019-01-22 14:08                     ` syzbot
2019-01-22 15:32           ` Joel Fernandes
2019-01-23  2:01             ` Tetsuo Handa
2019-01-23 15:57               ` Joel Fernandes
2019-01-24  1:52                 ` Tetsuo Handa
2019-01-24 13:46                   ` Joel Fernandes
2019-01-25 16:02                     ` Tetsuo Handa
2019-01-25 16:02                       ` Tetsuo Handa
2019-01-28 16:45                       ` Joel Fernandes
2019-01-29 10:44                         ` Tetsuo Handa
2019-01-26  1:57                     ` Tetsuo Handa
2019-01-26  1:57                       ` Tetsuo Handa
2018-10-01  5:23 ` syzbot

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.