From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-lj1-f196.google.com ([209.85.208.196]:40686 "EHLO mail-lj1-f196.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725859AbeJFEHF (ORCPT ); Sat, 6 Oct 2018 00:07:05 -0400 Received: by mail-lj1-f196.google.com with SMTP id r83-v6so12811465ljr.7 for ; Fri, 05 Oct 2018 14:06:34 -0700 (PDT) MIME-Version: 1.0 References: <20180904160632.21210-1-jack@suse.cz> <20180904160632.21210-13-jack@suse.cz> In-Reply-To: <20180904160632.21210-13-jack@suse.cz> From: Paul Moore Date: Fri, 5 Oct 2018 17:06:22 -0400 Message-ID: Subject: Re: [PATCH 12/11 TESTSUITE] audit_testsuite: Add stress test for tree watches To: jack@suse.cz Cc: viro@zeniv.linux.org.uk, linux-audit@redhat.com, linux-fsdevel@vger.kernel.org, rgb@redhat.com, amir73il@gmail.com Content-Type: text/plain; charset="UTF-8" Sender: linux-fsdevel-owner@vger.kernel.org List-ID: On Tue, Sep 4, 2018 at 12:06 PM Jan Kara wrote: > Add stress test for stressing audit tree watches by adding and deleting > rules while events are generated and watched filesystems are mounted and > unmounted in parallel. > > Signed-off-by: Jan Kara > --- > tests/stress_tree/Makefile | 8 +++ > tests/stress_tree/test | 171 +++++++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 179 insertions(+) > create mode 100644 tests/stress_tree/Makefile > create mode 100755 tests/stress_tree/test No commentary on the test itself, other than perhaps it should live under test_manual/, but in running the tests in a loop today I am reliably able to panic my test kernel after ~30m or so. I'm using the kernel linked below which is Fedora Rawhide + selinux/next + audit/next + audit/working-fsnotify_fixes; a link to the patches added to the Rawhide kernel can be found in the list archive linked below. * https://groups.google.com/forum/#!topic/kernel-secnext/SFv0d-ij3z8 * https://copr.fedorainfracloud.org/coprs/pcmoore/kernel-secnext/build/805949 The initial panic dump is below: [ 139.619065] list_del corruption. prev->next should be ffff985fa98d4100, but was ffff985fae91e370 [ 139.622504] ------------[ cut here ]------------ [ 139.623402] kernel BUG at lib/list_debug.c:53! [ 139.624294] invalid opcode: 0000 [#1] SMP PTI [ 139.625439] CPU: 1 PID: 3248 Comm: auditctl Not tainted 4.19.0-0.rc6.git2.1.1.secnext.fc30.x86_64 #1 [ 139.630761] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011 [ 139.631812] RIP: 0010:__list_del_entry_valid.cold.1+0x34/0x4c [ 139.632853] Code: ce 34 ac e8 b8 f7 c0 ff 0f 0b 48 c7 c7 a8 cf 34 ac e8 aa f7 c0 ff 0f 0b 48 89 f2 48 89 fe 48 c7 c7 68 cf 34 ac e8 96 f7 c0 ff <0f> 0b 48 89 fe 48 c7 c7 30 cf 34 ac e8 85 f7 c0 ff 0f 0b 90 90 90 [ 139.636347] RSP: 0018:ffffb4890293fbb8 EFLAGS: 00010246 [ 139.637295] RAX: 0000000000000054 RBX: ffff985fae91e620 RCX: 0000000000000000 [ 139.638573] RDX: 0000000000000000 RSI: ffff985fb77d6be8 RDI: ffff985fb77d6be8 [ 139.639855] RBP: ffff985fa98d40c0 R08: 00046c1ac1fe8d00 R09: 0000000000000000 [ 139.641136] R10: 0000000000000000 R11: 0000000000000000 R12: ffff985fa98d4100 [ 139.642416] R13: 0000000000000000 R14: ffff985faf00fe20 R15: 00000000000003f4 [ 139.643699] FS: 00007f9898252b80(0000) GS:ffff985fb7600000(0000) knlGS:0000000000000000 [ 139.645199] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 139.646244] CR2: 0000561d333b8218 CR3: 000000023110a000 CR4: 00000000001406e0 [ 139.647533] Call Trace: [ 139.647997] audit_remove_tree_rule+0xad/0x160 [ 139.648819] audit_del_rule+0x90/0x190 [ 139.649507] audit_rule_change+0x98e/0xbf0 [ 139.650259] audit_receive_msg+0x142/0x1160 [ 139.651030] ? netlink_deliver_tap+0x99/0x410 [ 139.651832] audit_receive+0x54/0xb0 [ 139.652495] netlink_unicast+0x181/0x210 [ 139.653211] netlink_sendmsg+0x218/0x3e0 [ 139.653936] sock_sendmsg+0x36/0x40 [ 139.654583] __sys_sendto+0xf1/0x160 [ 139.655244] ? syscall_trace_enter+0x1d3/0x330 [ 139.656055] ? trace_hardirqs_off_thunk+0x1a/0x1c [ 139.656912] __x64_sys_sendto+0x24/0x30 [ 139.657615] do_syscall_64+0x60/0x1f0 [ 139.658285] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 139.659192] RIP: 0033:0x7f989835a7fb [ 139.659847] Code: 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 f3 0f 1e fa 48 8d 05 25 6f 0c 00 41 89 ca 8b 00 85 c0 75 14 b8 2c 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 75 c3 0f 1f 40 00 41 57 4d 89 c7 41 56 41 89 [ 139.663170] RSP: 002b:00007ffc95ae6b48 EFLAGS: 00000246 ORIG_RAX: 000000000000002c [ 139.664531] RAX: ffffffffffffffda RBX: 0000000000000460 RCX: 00007f989835a7fb [ 139.665809] RDX: 0000000000000460 RSI: 00007ffc95ae6b80 RDI: 0000000000000003 [ 139.667087] RBP: 0000000000000003 R08: 00007ffc95ae6b6c R09: 000000000000000c [ 139.668370] R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffc95ae6b80 [ 139.669648] R13: 00007ffc95ae6b6c R14: 0000000000000002 R15: 000000000000044f [ 139.670935] Modules linked in: ib_isert iscsi_target_mod ib_srpt target_core_mod ib_srp scsi_transport_srp rpcrdma ib_umad rdma_ucm ib_iser ib_ipoib rdma_cm iw_cm libiscsi scsi_transport_iscsi ib_cm mlx5_ib ib_uverbs ib_core crct10dif_pclmul crc32_pclmul ghash_clmulni_intel joydev virtio_balloon i2c_piix4 sunrpc drm_kms_helper ttm crc32c_intel mlx5_core drm virtio_console mlxfw serio_raw devlink virtio_blk virtio_net net_failover failover ata_generic pata_acpi qemu_fw_cfg [ 139.678676] ---[ end trace be98f2acb1e536e4 ]--- > diff --git a/tests/stress_tree/Makefile b/tests/stress_tree/Makefile > new file mode 100644 > index 000000000000..7ade09aad86f > --- /dev/null > +++ b/tests/stress_tree/Makefile > @@ -0,0 +1,8 @@ > +TARGETS=$(patsubst %.c,%,$(wildcard *.c)) > + > +LDLIBS += -lpthread > + > +all: $(TARGETS) > +clean: > + rm -f $(TARGETS) > + > diff --git a/tests/stress_tree/test b/tests/stress_tree/test > new file mode 100755 > index 000000000000..6215bec810d1 > --- /dev/null > +++ b/tests/stress_tree/test > @@ -0,0 +1,171 @@ > +#!/usr/bin/perl > + > +use strict; > + > +use Test; > +BEGIN { plan tests => 1 } > + > +use File::Temp qw/ tempdir tempfile /; > + > +### > +# functions > + > +sub key_gen { > + my @chars = ( "A" .. "Z", "a" .. "z" ); > + my $key = "testsuite-" . time . "-"; > + $key .= $chars[ rand @chars ] for 1 .. 8; > + return $key; > +} > + > +# Run stat on random files in subtrees to generate audit events > +sub run_stat { > + my($dir,$dirs) = @_; > + my $path; > + > + while (1) { > + $path = "$dir/mnt/mnt".int(rand($dirs))."/subdir".int(rand($dirs)); > + stat($path); > + } > +} > + > +# Generate audit rules for subtrees. Do one rule per subtree. Because watch > +# recursively iterates child mounts and we mount $dir/leaf$i under various > +# subtrees, the inode corresponding to $dir/leaf$i gets tagged by different > +# trees. > +sub run_mark_audit { > + my($dir,$dirs,$key) = @_; > + > + while (1) { > + for (my $i=0; $i < $dirs; $i++) { > + system("auditctl -w $dir/mnt/mnt$i -p r -k $key"); > + } > + system("auditctl -D -k $key >& /dev/null"); > + } > +} > + > +sub umount_all { > + my($dir,$dirs,$ignore_fail) = @_; > + > + for (my $i=0; $i < $dirs; $i++) { > + while (system("umount $dir/leaf$i >& /dev/null") > 0 && > + $ignore_fail == 0) { > + # Nothing - loop until umount succeeds > + } > + } > + for (my $i=0; $i < $dirs; $i++) { > + for (my $j=0; $j < $dirs; $j++) { > + while (system("umount $dir/mnt/mnt$i/subdir$j >& /dev/null") > 0 && > + $ignore_fail == 0) { > + # Nothing - loop until umount succeeds > + } > + } > + while (system("umount $dir/mnt/mnt$i >& /dev/null") > 0 && > + $ignore_fail == 0) { > + # Nothing - loop until umount succeeds > + } > + } > +} > + > +# Mount and unmount filesystems. We pick random leaf mount so that sometimes > +# a leaf mount point root inode will gather more tags from different trees > +# and sometimes we will be quicker in unmounting all instances of leaf and > +# thus excercise inode evistion path > +sub run_mount { > + my($dir,$dirs) = @_; > + > + while (1) { > + # We use tmpfs here and not just bind mounts of some dir so > + # that the root inode gets evicted once all instances are > + # unmounted. > + for (my $i=0; $i < $dirs; $i++) { > + system("mount -t tmpfs none $dir/leaf$i"); > + } > + for (my $i=0; $i < $dirs; $i++) { > + system("mount --bind $dir/dir$i $dir/mnt/mnt$i"); > + for (my $j=0; $j < $dirs; $j++) { > + my $leaf="$dir/leaf".int(rand($dirs)); > + system("mount --bind $leaf $dir/mnt/mnt$i/subdir$j"); > + } > + } > + umount_all($dir, $dirs, 0); > + } > +} > + > + > +### > +# setup > + > +# reset audit > +system("auditctl -D >& /dev/null"); > + > +# create temp directory > +my $dir = tempdir( TEMPLATE => '/tmp/audit-testsuite-XXXX', CLEANUP => 1 ); > + > +# create stdout/stderr sinks > +( my $fh_out, my $stdout ) = tempfile( > + TEMPLATE => '/tmp/audit-testsuite-out-XXXX', > + UNLINK => 1 > +); > +( my $fh_err, my $stderr ) = tempfile( > + TEMPLATE => '/tmp/audit-testsuite-err-XXXX', > + UNLINK => 1 > +); > + > +### > +# tests > + > +my $dirs = 4; > + > +# setup directory hierarchy > +for (my $i=0; $i < $dirs; $i++) { > + mkdir $dir."/dir".$i; > + for (my $j=0; $j < $dirs; $j++) { > + mkdir $dir."/dir".$i."/subdir".$j; > + } > +} > +mkdir "$dir/mnt"; > +for (my $i=0; $i < $dirs; $i++) { > + mkdir "$dir/mnt/mnt$i"; > + mkdir "$dir/leaf$i"; > +} > + > +my $stat_pid = fork(); > + > +if ($stat_pid == 0) { > + run_stat($dir, $dirs); > + # Never reached > + exit; > +} > + > +my $mount_pid = fork(); > + > +if ($mount_pid == 0) { > + run_mount($dir, $dirs); > + # Never reached > + exit; > +} > + > +my $key = key_gen(); > + > +my $audit_pid = fork(); > + > +if ($audit_pid == 0) { > + run_mark_audit($dir, $dirs, $key); > + # Never reached > + exit; > +} > + > +# Sleep for a minute to let stress test run... > +sleep(60); > +ok(1); > + > +### > +# cleanup > + > +kill('KILL', $stat_pid, $mount_pid, $audit_pid); > +# Wait for children to terminate > +waitpid($stat_pid, 0); > +waitpid($mount_pid, 0); > +waitpid($audit_pid, 0); > +system("auditctl -D >& /dev/null"); > +umount_all($dir, $dirs, 1); > -- > 2.16.4 > -- paul moore www.paul-moore.com From mboxrd@z Thu Jan 1 00:00:00 1970 From: Paul Moore Subject: Re: [PATCH 12/11 TESTSUITE] audit_testsuite: Add stress test for tree watches Date: Fri, 5 Oct 2018 17:06:22 -0400 Message-ID: References: <20180904160632.21210-1-jack@suse.cz> <20180904160632.21210-13-jack@suse.cz> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: Received: from mx1.redhat.com (ext-mx01.extmail.prod.ext.phx2.redhat.com [10.5.110.25]) by smtp.corp.redhat.com (Postfix) with ESMTPS id DCE8712A45 for ; Fri, 5 Oct 2018 21:06:37 +0000 (UTC) Received: from mail-lj1-f195.google.com (mail-lj1-f195.google.com [209.85.208.195]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id B17F981DF7 for ; Fri, 5 Oct 2018 21:06:35 +0000 (UTC) Received: by mail-lj1-f195.google.com with SMTP id v6-v6so12788659ljc.11 for ; Fri, 05 Oct 2018 14:06:35 -0700 (PDT) In-Reply-To: <20180904160632.21210-13-jack@suse.cz> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: linux-audit-bounces@redhat.com Errors-To: linux-audit-bounces@redhat.com To: jack@suse.cz Cc: linux-fsdevel@vger.kernel.org, rgb@redhat.com, linux-audit@redhat.com, amir73il@gmail.com, viro@zeniv.linux.org.uk List-Id: linux-audit@redhat.com On Tue, Sep 4, 2018 at 12:06 PM Jan Kara wrote: > Add stress test for stressing audit tree watches by adding and deleting > rules while events are generated and watched filesystems are mounted and > unmounted in parallel. > > Signed-off-by: Jan Kara > --- > tests/stress_tree/Makefile | 8 +++ > tests/stress_tree/test | 171 +++++++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 179 insertions(+) > create mode 100644 tests/stress_tree/Makefile > create mode 100755 tests/stress_tree/test No commentary on the test itself, other than perhaps it should live under test_manual/, but in running the tests in a loop today I am reliably able to panic my test kernel after ~30m or so. I'm using the kernel linked below which is Fedora Rawhide + selinux/next + audit/next + audit/working-fsnotify_fixes; a link to the patches added to the Rawhide kernel can be found in the list archive linked below. * https://groups.google.com/forum/#!topic/kernel-secnext/SFv0d-ij3z8 * https://copr.fedorainfracloud.org/coprs/pcmoore/kernel-secnext/build/805949 The initial panic dump is below: [ 139.619065] list_del corruption. prev->next should be ffff985fa98d4100, but was ffff985fae91e370 [ 139.622504] ------------[ cut here ]------------ [ 139.623402] kernel BUG at lib/list_debug.c:53! [ 139.624294] invalid opcode: 0000 [#1] SMP PTI [ 139.625439] CPU: 1 PID: 3248 Comm: auditctl Not tainted 4.19.0-0.rc6.git2.1.1.secnext.fc30.x86_64 #1 [ 139.630761] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011 [ 139.631812] RIP: 0010:__list_del_entry_valid.cold.1+0x34/0x4c [ 139.632853] Code: ce 34 ac e8 b8 f7 c0 ff 0f 0b 48 c7 c7 a8 cf 34 ac e8 aa f7 c0 ff 0f 0b 48 89 f2 48 89 fe 48 c7 c7 68 cf 34 ac e8 96 f7 c0 ff <0f> 0b 48 89 fe 48 c7 c7 30 cf 34 ac e8 85 f7 c0 ff 0f 0b 90 90 90 [ 139.636347] RSP: 0018:ffffb4890293fbb8 EFLAGS: 00010246 [ 139.637295] RAX: 0000000000000054 RBX: ffff985fae91e620 RCX: 0000000000000000 [ 139.638573] RDX: 0000000000000000 RSI: ffff985fb77d6be8 RDI: ffff985fb77d6be8 [ 139.639855] RBP: ffff985fa98d40c0 R08: 00046c1ac1fe8d00 R09: 0000000000000000 [ 139.641136] R10: 0000000000000000 R11: 0000000000000000 R12: ffff985fa98d4100 [ 139.642416] R13: 0000000000000000 R14: ffff985faf00fe20 R15: 00000000000003f4 [ 139.643699] FS: 00007f9898252b80(0000) GS:ffff985fb7600000(0000) knlGS:0000000000000000 [ 139.645199] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 139.646244] CR2: 0000561d333b8218 CR3: 000000023110a000 CR4: 00000000001406e0 [ 139.647533] Call Trace: [ 139.647997] audit_remove_tree_rule+0xad/0x160 [ 139.648819] audit_del_rule+0x90/0x190 [ 139.649507] audit_rule_change+0x98e/0xbf0 [ 139.650259] audit_receive_msg+0x142/0x1160 [ 139.651030] ? netlink_deliver_tap+0x99/0x410 [ 139.651832] audit_receive+0x54/0xb0 [ 139.652495] netlink_unicast+0x181/0x210 [ 139.653211] netlink_sendmsg+0x218/0x3e0 [ 139.653936] sock_sendmsg+0x36/0x40 [ 139.654583] __sys_sendto+0xf1/0x160 [ 139.655244] ? syscall_trace_enter+0x1d3/0x330 [ 139.656055] ? trace_hardirqs_off_thunk+0x1a/0x1c [ 139.656912] __x64_sys_sendto+0x24/0x30 [ 139.657615] do_syscall_64+0x60/0x1f0 [ 139.658285] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 139.659192] RIP: 0033:0x7f989835a7fb [ 139.659847] Code: 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 f3 0f 1e fa 48 8d 05 25 6f 0c 00 41 89 ca 8b 00 85 c0 75 14 b8 2c 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 75 c3 0f 1f 40 00 41 57 4d 89 c7 41 56 41 89 [ 139.663170] RSP: 002b:00007ffc95ae6b48 EFLAGS: 00000246 ORIG_RAX: 000000000000002c [ 139.664531] RAX: ffffffffffffffda RBX: 0000000000000460 RCX: 00007f989835a7fb [ 139.665809] RDX: 0000000000000460 RSI: 00007ffc95ae6b80 RDI: 0000000000000003 [ 139.667087] RBP: 0000000000000003 R08: 00007ffc95ae6b6c R09: 000000000000000c [ 139.668370] R10: 0000000000000000 R11: 0000000000000246 R12: 00007ffc95ae6b80 [ 139.669648] R13: 00007ffc95ae6b6c R14: 0000000000000002 R15: 000000000000044f [ 139.670935] Modules linked in: ib_isert iscsi_target_mod ib_srpt target_core_mod ib_srp scsi_transport_srp rpcrdma ib_umad rdma_ucm ib_iser ib_ipoib rdma_cm iw_cm libiscsi scsi_transport_iscsi ib_cm mlx5_ib ib_uverbs ib_core crct10dif_pclmul crc32_pclmul ghash_clmulni_intel joydev virtio_balloon i2c_piix4 sunrpc drm_kms_helper ttm crc32c_intel mlx5_core drm virtio_console mlxfw serio_raw devlink virtio_blk virtio_net net_failover failover ata_generic pata_acpi qemu_fw_cfg [ 139.678676] ---[ end trace be98f2acb1e536e4 ]--- > diff --git a/tests/stress_tree/Makefile b/tests/stress_tree/Makefile > new file mode 100644 > index 000000000000..7ade09aad86f > --- /dev/null > +++ b/tests/stress_tree/Makefile > @@ -0,0 +1,8 @@ > +TARGETS=$(patsubst %.c,%,$(wildcard *.c)) > + > +LDLIBS += -lpthread > + > +all: $(TARGETS) > +clean: > + rm -f $(TARGETS) > + > diff --git a/tests/stress_tree/test b/tests/stress_tree/test > new file mode 100755 > index 000000000000..6215bec810d1 > --- /dev/null > +++ b/tests/stress_tree/test > @@ -0,0 +1,171 @@ > +#!/usr/bin/perl > + > +use strict; > + > +use Test; > +BEGIN { plan tests => 1 } > + > +use File::Temp qw/ tempdir tempfile /; > + > +### > +# functions > + > +sub key_gen { > + my @chars = ( "A" .. "Z", "a" .. "z" ); > + my $key = "testsuite-" . time . "-"; > + $key .= $chars[ rand @chars ] for 1 .. 8; > + return $key; > +} > + > +# Run stat on random files in subtrees to generate audit events > +sub run_stat { > + my($dir,$dirs) = @_; > + my $path; > + > + while (1) { > + $path = "$dir/mnt/mnt".int(rand($dirs))."/subdir".int(rand($dirs)); > + stat($path); > + } > +} > + > +# Generate audit rules for subtrees. Do one rule per subtree. Because watch > +# recursively iterates child mounts and we mount $dir/leaf$i under various > +# subtrees, the inode corresponding to $dir/leaf$i gets tagged by different > +# trees. > +sub run_mark_audit { > + my($dir,$dirs,$key) = @_; > + > + while (1) { > + for (my $i=0; $i < $dirs; $i++) { > + system("auditctl -w $dir/mnt/mnt$i -p r -k $key"); > + } > + system("auditctl -D -k $key >& /dev/null"); > + } > +} > + > +sub umount_all { > + my($dir,$dirs,$ignore_fail) = @_; > + > + for (my $i=0; $i < $dirs; $i++) { > + while (system("umount $dir/leaf$i >& /dev/null") > 0 && > + $ignore_fail == 0) { > + # Nothing - loop until umount succeeds > + } > + } > + for (my $i=0; $i < $dirs; $i++) { > + for (my $j=0; $j < $dirs; $j++) { > + while (system("umount $dir/mnt/mnt$i/subdir$j >& /dev/null") > 0 && > + $ignore_fail == 0) { > + # Nothing - loop until umount succeeds > + } > + } > + while (system("umount $dir/mnt/mnt$i >& /dev/null") > 0 && > + $ignore_fail == 0) { > + # Nothing - loop until umount succeeds > + } > + } > +} > + > +# Mount and unmount filesystems. We pick random leaf mount so that sometimes > +# a leaf mount point root inode will gather more tags from different trees > +# and sometimes we will be quicker in unmounting all instances of leaf and > +# thus excercise inode evistion path > +sub run_mount { > + my($dir,$dirs) = @_; > + > + while (1) { > + # We use tmpfs here and not just bind mounts of some dir so > + # that the root inode gets evicted once all instances are > + # unmounted. > + for (my $i=0; $i < $dirs; $i++) { > + system("mount -t tmpfs none $dir/leaf$i"); > + } > + for (my $i=0; $i < $dirs; $i++) { > + system("mount --bind $dir/dir$i $dir/mnt/mnt$i"); > + for (my $j=0; $j < $dirs; $j++) { > + my $leaf="$dir/leaf".int(rand($dirs)); > + system("mount --bind $leaf $dir/mnt/mnt$i/subdir$j"); > + } > + } > + umount_all($dir, $dirs, 0); > + } > +} > + > + > +### > +# setup > + > +# reset audit > +system("auditctl -D >& /dev/null"); > + > +# create temp directory > +my $dir = tempdir( TEMPLATE => '/tmp/audit-testsuite-XXXX', CLEANUP => 1 ); > + > +# create stdout/stderr sinks > +( my $fh_out, my $stdout ) = tempfile( > + TEMPLATE => '/tmp/audit-testsuite-out-XXXX', > + UNLINK => 1 > +); > +( my $fh_err, my $stderr ) = tempfile( > + TEMPLATE => '/tmp/audit-testsuite-err-XXXX', > + UNLINK => 1 > +); > + > +### > +# tests > + > +my $dirs = 4; > + > +# setup directory hierarchy > +for (my $i=0; $i < $dirs; $i++) { > + mkdir $dir."/dir".$i; > + for (my $j=0; $j < $dirs; $j++) { > + mkdir $dir."/dir".$i."/subdir".$j; > + } > +} > +mkdir "$dir/mnt"; > +for (my $i=0; $i < $dirs; $i++) { > + mkdir "$dir/mnt/mnt$i"; > + mkdir "$dir/leaf$i"; > +} > + > +my $stat_pid = fork(); > + > +if ($stat_pid == 0) { > + run_stat($dir, $dirs); > + # Never reached > + exit; > +} > + > +my $mount_pid = fork(); > + > +if ($mount_pid == 0) { > + run_mount($dir, $dirs); > + # Never reached > + exit; > +} > + > +my $key = key_gen(); > + > +my $audit_pid = fork(); > + > +if ($audit_pid == 0) { > + run_mark_audit($dir, $dirs, $key); > + # Never reached > + exit; > +} > + > +# Sleep for a minute to let stress test run... > +sleep(60); > +ok(1); > + > +### > +# cleanup > + > +kill('KILL', $stat_pid, $mount_pid, $audit_pid); > +# Wait for children to terminate > +waitpid($stat_pid, 0); > +waitpid($mount_pid, 0); > +waitpid($audit_pid, 0); > +system("auditctl -D >& /dev/null"); > +umount_all($dir, $dirs, 1); > -- > 2.16.4 > -- paul moore www.paul-moore.com