linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "yukuai (C)" <yukuai3@huawei.com>
To: Jan Kara <jack@suse.cz>
Cc: "Michal Koutný" <mkoutny@suse.com>,
	"Paolo Valente" <paolo.valente@linaro.org>,
	linux-block@vger.kernel.org, fvogt@suse.de,
	cgroups@vger.kernel.org
Subject: Re: Use after free with BFQ and cgroups
Date: Thu, 23 Dec 2021 09:02:55 +0800	[thread overview]
Message-ID: <d770663a-911c-c9c1-1185-558634f4c738@huawei.com> (raw)
In-Reply-To: <20211222152103.GF685@quack2.suse.cz>

[-- Attachment #1: Type: text/plain, Size: 444 bytes --]

在 2021/12/22 23:21, Jan Kara 写道:
> On Thu 09-12-21 10:23:33, yukuai (C) wrote:
>> We confirmed this by our reproducer through a simple patch:
>> stop merging bfq_queues if their parents are different.
> 
> Can you please share your reproducer? I have prepared some patches which
> I'd like to verify before posting... Thanks!

Hi,

Here is the reproducer, usually the problem will come up within an
hour.

Thanks,
Kuai
> 
> 								Honza
> 

[-- Attachment #2: null_bad.sh --]
[-- Type: text/plain, Size: 3242 bytes --]

#!/bin/bash
NR=1
basedir=/sys/fs/cgroup/blkio/null
CG_PREFIX=/sys/fs/cgroup/blkio/null/nullb

function set_cgroup()
{
	testdir=$1
	dev_id=$2
	let weight=RANDOM%900+100
	let iops=RANDOM%1000+100
	let bps=RANDOM%10485760+10485760
	echo "$weight" > $testdir/blkio.bfq.weight
	echo "$dev_id $iops" > $testdir/blkio.throttle.read_iops_device
	echo "$dev_id $iops" > $testdir/blkio.throttle.write_iops_device
	echo "$dev_id $bps" > $testdir/blkio.throttle.read_bps_device
	echo "$dev_id $bps" > $testdir/blkio.throttle.write_bps_device
}

function set_sys()
{
	local queue_dir=/sys/block/$1/queue

	let rq_affinity=RANDOM%3
	echo $rq_affinity > $queue_dir/rq_affinity

	let add_random=RANDOM%2
	echo $add_random > $queue_dir/add_random

	let rotational=RANDOM%2
	echo $rotational > $queue_dir/rotational

	let nomerges=RANDOM%2
	echo $nomerges > $queue_dir/nomerges

	let s_num=RANDOM%5
	case $s_num in
		0)
		scheduler=none
		;;
		1)
		scheduler=bfq
		;;
		2)
		scheduler=bfq
		;;
		3)
		scheduler=none
		;;
	esac
	echo bfq > $queue_dir/scheduler
}

create_cg()
{
	local i
	local path

	for i in $(seq 0 $NR)
	do
		path=${CG_PREFIX}${i}
		mkdir -p $path
	done
}

switch_cg()
{
	local path=${CG_PREFIX}$1
	local t

	for t in $(cat $path/tasks)
	do
		echo $t > /sys/fs/cgroup/blkio/tasks
	done

	echo "tasks in $path"
	cat $path/tasks
}

rm_cg()
{
	local path=${CG_PREFIX}$1

	rmdir $path
	return $?
}

mkdir $basedir
cgdir1=/sys/fs/cgroup/blkio/null/nullb0
cgdir2=/sys/fs/cgroup/blkio/null/nullb1

ADD_MOD="modprobe null_blk"
while true
do
	let flag=RANDOM%2
	if [ $flag -eq 1 ];then
		$ADD_MOD queue_mode=2 blocking=1 nr_devices=2
	else
		$ADD_MOD queue_mode=2 nr_devices=2
	fi
		
	create_cg

	dev_id=`lsblk | grep nullb0 | awk '{print $2}'`
	set_cgroup $basedir $dev_id 
	set_sys nullb0

	dev_id=`lsblk | grep nullb1 | awk '{print $2}'`
	set_cgroup $basedir $dev_id 
	set_sys nullb1

	let flag=RANDOM%20
	if [ $flag -eq 5 ];then
		echo 1 > /sys/block/nullb0/make-it-fail
		echo 1 > /sys/block/nullb1/make-it-fail
	else
		echo 0 > /sys/block/nullb0/make-it-fail
		echo 0 > /sys/block/nullb1/make-it-fail
	fi

	i=0
	while [ $i -le 3 ]
	do
		cgexec -g "blkio:null/nullb0" fio -filename=/dev/nullb0 -ioengine=libaio -time_based=1 -rw=rw -thread -size=100g -bs=512 -numjobs=4 -iodepth=8 -runtime=5 -group_reporting -name=brd-IOwrite -rwmixread=50 &>/dev/null &
		cgexec -g "blkio:null/nullb0" fio -filename=/dev/nullb0 -ioengine=psync -direct=1 -time_based=1 -rw=rw -thread -size=100g -bs=512 -numjobs=4 -iodepth=8 -runtime=5 -group_reporting -name=brd-IOwrite -rwmixread=50 &>/dev/null &
		cgexec -g "blkio:null/nullb1" fio -filename=/dev/nullb1 -ioengine=libaio -time_based=1 -rw=rw -thread -size=100g -bs=1024k -numjobs=4 -iodepth=8 -runtime=5 -group_reporting -name=brd-IOwrite -rwmixread=50 &>/dev/null &
		cgexec -g "blkio:null/nullb1" fio -filename=/dev/nullb1 -ioengine=psync -direct=1 -time_based=1 -rw=rw -thread -size=100g -bs=1024k -numjobs=4 -iodepth=8 -runtime=5 -group_reporting -name=brd-IOwrite -rwmixread=50 &>/dev/null &
		((i=i+1))
	done

	sleep 3

	until rm_cg 0
	do
		switch_cg 0
		sleep 0.1
	done

	until rm_cg 1
	do
		switch_cg 1
		sleep 0.1
	done

	while true
	do
		rmmod null_blk &>/dev/null && break
		sleep 0.1
	done
done


  reply	other threads:[~2021-12-23  1:03 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-25 17:28 Use after free with BFQ and cgroups Jan Kara
2021-11-26 14:47 ` Michal Koutný
2021-11-29 17:11   ` Jan Kara
2021-12-09  2:23     ` yukuai (C)
2021-12-09 15:33       ` Paolo Valente
2021-12-13 17:33       ` Jan Kara
2021-12-14  1:24         ` yukuai (C)
2021-12-20 18:38           ` Jan Kara
2021-12-22 15:21       ` Jan Kara
2021-12-23  1:02         ` yukuai (C) [this message]
2021-12-23 17:13           ` Jan Kara
2021-11-29 17:12   ` Tejun Heo
2021-11-30 11:50     ` Jan Kara
2021-11-30 16:22       ` Tejun Heo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=d770663a-911c-c9c1-1185-558634f4c738@huawei.com \
    --to=yukuai3@huawei.com \
    --cc=cgroups@vger.kernel.org \
    --cc=fvogt@suse.de \
    --cc=jack@suse.cz \
    --cc=linux-block@vger.kernel.org \
    --cc=mkoutny@suse.com \
    --cc=paolo.valente@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).