On Wed, Jun 29, 2022 at 10:15:31AM -0400, Emanuele Giuseppe Esposito wrote: > diff --git a/blockdev.c b/blockdev.c > index 71f793c4ab..5b79093155 100644 > --- a/blockdev.c > +++ b/blockdev.c > @@ -150,12 +150,15 @@ void blockdev_mark_auto_del(BlockBackend *blk) > return; > } > > - for (job = block_job_next(NULL); job; job = block_job_next(job)) { > + JOB_LOCK_GUARD(); > + > + for (job = block_job_next_locked(NULL); job; > + job = block_job_next_locked(job)) { > if (block_job_has_bdrv(job, blk_bs(blk))) { > AioContext *aio_context = job->job.aio_context; > aio_context_acquire(aio_context); Is there a lock ordering rule for job_mutex and the AioContext lock? I haven't audited the code, but there might be ABBA lock ordering issues. > diff --git a/qemu-img.c b/qemu-img.c > index 4cf4d2423d..289d88a156 100644 > --- a/qemu-img.c > +++ b/qemu-img.c > @@ -912,25 +912,30 @@ static void run_block_job(BlockJob *job, Error **errp) > int ret = 0; > > aio_context_acquire(aio_context); > - job_ref(&job->job); > - do { > - float progress = 0.0f; > - aio_poll(aio_context, true); > + WITH_JOB_LOCK_GUARD() { Here the lock order is the opposite of above.