* [PATCH 0/2] Use QLNULL for null lockable
@ 2020-06-03 22:49 Joe Slater
2020-06-03 22:49 ` [PATCH 1/2] lockable: use QLNULL for a " Joe Slater
2020-06-03 22:49 ` [PATCH 2/2] lockable: do not rely on optimization for null lockables Joe Slater
0 siblings, 2 replies; 5+ messages in thread
From: Joe Slater @ 2020-06-03 22:49 UTC (permalink / raw)
To: qemu-devel; +Cc: qemu-trivial, qemu-stable
We currently will fail to build for optimizations like -Og because they do not
eliminate dead code. We do not need such clean up if we use QLNULL. There is no
need to produce a QemuLockable that will be thrown away.
Only testing:
$ ../configure
$ make -j16 CFLAGS="$CFLAGS" # which I set to use -Og, then -O2
Joe Slater (2):
lockable: use QLNULL for a null lockable
lockable: do not rely on optimization for null lockables
block/block-backend.c | 4 ++--
block/block-copy.c | 2 +-
block/mirror.c | 5 +++--
fsdev/qemu-fsdev-throttle.c | 6 +++---
hw/9pfs/9p.c | 2 +-
include/qemu/lockable.h | 16 ++++++----------
util/qemu-co-shared-resource.c | 2 +-
7 files changed, 17 insertions(+), 20 deletions(-)
--
2.7.4
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1/2] lockable: use QLNULL for a null lockable
2020-06-03 22:49 [PATCH 0/2] Use QLNULL for null lockable Joe Slater
@ 2020-06-03 22:49 ` Joe Slater
2020-06-04 16:22 ` Eric Blake
2020-06-03 22:49 ` [PATCH 2/2] lockable: do not rely on optimization for null lockables Joe Slater
1 sibling, 1 reply; 5+ messages in thread
From: Joe Slater @ 2020-06-03 22:49 UTC (permalink / raw)
To: qemu-devel; +Cc: qemu-trivial, qemu-stable
Allows us to build with -Og and optimizations that
do not clean up dead-code.
Signed-off-by: Joe Slater <joe.slater@windriver.com>
to be squished
Signed-off-by: Joe Slater <joe.slater@windriver.com>
---
block/block-backend.c | 4 ++--
block/block-copy.c | 2 +-
block/mirror.c | 5 +++--
fsdev/qemu-fsdev-throttle.c | 6 +++---
hw/9pfs/9p.c | 2 +-
include/qemu/lockable.h | 3 +++
util/qemu-co-shared-resource.c | 2 +-
7 files changed, 14 insertions(+), 10 deletions(-)
diff --git a/block/block-backend.c b/block/block-backend.c
index 6936b25..92128e8 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -1174,7 +1174,7 @@ static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
if (blk->quiesce_counter && !blk->disable_request_queuing) {
blk_dec_in_flight(blk);
- qemu_co_queue_wait(&blk->queued_requests, NULL);
+ qemu_co_queue_wait(&blk->queued_requests, QLNULL);
blk_inc_in_flight(blk);
}
}
@@ -2367,7 +2367,7 @@ static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter)
if (blk->dev_ops && blk->dev_ops->drained_end) {
blk->dev_ops->drained_end(blk->dev_opaque);
}
- while (qemu_co_enter_next(&blk->queued_requests, NULL)) {
+ while (qemu_co_enter_next(&blk->queued_requests, QLNULL)) {
/* Resume all queued requests */
}
}
diff --git a/block/block-copy.c b/block/block-copy.c
index bb8d056..8de0b54 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -120,7 +120,7 @@ static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
return false;
}
- qemu_co_queue_wait(&task->wait_queue, NULL);
+ qemu_co_queue_wait(&task->wait_queue, QLNULL);
return true;
}
diff --git a/block/mirror.c b/block/mirror.c
index e8e8844..76c082d2 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -28,6 +28,7 @@
#define MAX_IO_BYTES (1 << 20) /* 1 Mb */
#define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
+
/* The mirroring buffer is a list of granularity-sized chunks.
* Free chunks are organized in a list.
*/
@@ -157,7 +158,7 @@ static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
if (ranges_overlap(self_start_chunk, self_nb_chunks,
op_start_chunk, op_nb_chunks))
{
- qemu_co_queue_wait(&op->waiting_requests, NULL);
+ qemu_co_queue_wait(&op->waiting_requests, QLNULL);
break;
}
}
@@ -297,7 +298,7 @@ mirror_wait_for_any_operation(MirrorBlockJob *s, bool active)
if (!op->is_pseudo_op && op->is_in_flight &&
op->is_active_write == active)
{
- qemu_co_queue_wait(&op->waiting_requests, NULL);
+ qemu_co_queue_wait(&op->waiting_requests, QLNULL);
return;
}
}
diff --git a/fsdev/qemu-fsdev-throttle.c b/fsdev/qemu-fsdev-throttle.c
index 5c83a1c..78d256d 100644
--- a/fsdev/qemu-fsdev-throttle.c
+++ b/fsdev/qemu-fsdev-throttle.c
@@ -22,13 +22,13 @@
static void fsdev_throttle_read_timer_cb(void *opaque)
{
FsThrottle *fst = opaque;
- qemu_co_enter_next(&fst->throttled_reqs[false], NULL);
+ qemu_co_enter_next(&fst->throttled_reqs[false], QLNULL);
}
static void fsdev_throttle_write_timer_cb(void *opaque)
{
FsThrottle *fst = opaque;
- qemu_co_enter_next(&fst->throttled_reqs[true], NULL);
+ qemu_co_enter_next(&fst->throttled_reqs[true], QLNULL);
}
int fsdev_throttle_parse_opts(QemuOpts *opts, FsThrottle *fst, Error **errp)
@@ -100,7 +100,7 @@ void coroutine_fn fsdev_co_throttle_request(FsThrottle *fst, bool is_write,
if (throttle_enabled(&fst->cfg)) {
if (throttle_schedule_timer(&fst->ts, &fst->tt, is_write) ||
!qemu_co_queue_empty(&fst->throttled_reqs[is_write])) {
- qemu_co_queue_wait(&fst->throttled_reqs[is_write], NULL);
+ qemu_co_queue_wait(&fst->throttled_reqs[is_write], QLNULL);
}
throttle_account(&fst->ts, is_write, iov_size(iov, iovcnt));
diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index 45a788f..35976e2 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -2888,7 +2888,7 @@ static void coroutine_fn v9fs_flush(void *opaque)
/*
* Wait for pdu to complete.
*/
- qemu_co_queue_wait(&cancel_pdu->complete, NULL);
+ qemu_co_queue_wait(&cancel_pdu->complete, QLNULL);
if (!qemu_co_queue_next(&cancel_pdu->complete)) {
cancel_pdu->cancelled = 0;
pdu_free(cancel_pdu);
diff --git a/include/qemu/lockable.h b/include/qemu/lockable.h
index b620023..7f7aebb 100644
--- a/include/qemu/lockable.h
+++ b/include/qemu/lockable.h
@@ -24,6 +24,9 @@ struct QemuLockable {
QemuLockUnlockFunc *unlock;
};
+#define QLNULL ((QemuLockable *)0)
+
+
/* This function gives an error if an invalid, non-NULL pointer type is passed
* to QEMU_MAKE_LOCKABLE. For optimized builds, we can rely on dead-code elimination
* from the compiler, and give the errors already at link time.
diff --git a/util/qemu-co-shared-resource.c b/util/qemu-co-shared-resource.c
index 1c83cd9..7423ea4 100644
--- a/util/qemu-co-shared-resource.c
+++ b/util/qemu-co-shared-resource.c
@@ -64,7 +64,7 @@ void coroutine_fn co_get_from_shres(SharedResource *s, uint64_t n)
{
assert(n <= s->total);
while (!co_try_get_from_shres(s, n)) {
- qemu_co_queue_wait(&s->queue, NULL);
+ qemu_co_queue_wait(&s->queue, QLNULL);
}
}
--
2.7.4
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/2] lockable: do not rely on optimization for null lockables
2020-06-03 22:49 [PATCH 0/2] Use QLNULL for null lockable Joe Slater
2020-06-03 22:49 ` [PATCH 1/2] lockable: use QLNULL for a " Joe Slater
@ 2020-06-03 22:49 ` Joe Slater
2020-06-04 16:31 ` Eric Blake
1 sibling, 1 reply; 5+ messages in thread
From: Joe Slater @ 2020-06-03 22:49 UTC (permalink / raw)
To: qemu-devel; +Cc: qemu-trivial, qemu-stable
If we use QLNULL for null lockables, we can always
use referencing unknown_lock_type as a link time
error indicator.
Signed-off-by: Joe Slater <joe.slater@windriver.com>
---
include/qemu/lockable.h | 19 ++++++-------------
1 file changed, 6 insertions(+), 13 deletions(-)
diff --git a/include/qemu/lockable.h b/include/qemu/lockable.h
index 7f7aebb..7fc5750 100644
--- a/include/qemu/lockable.h
+++ b/include/qemu/lockable.h
@@ -24,21 +24,14 @@ struct QemuLockable {
QemuLockUnlockFunc *unlock;
};
-#define QLNULL ((QemuLockable *)0)
-
-
-/* This function gives an error if an invalid, non-NULL pointer type is passed
- * to QEMU_MAKE_LOCKABLE. For optimized builds, we can rely on dead-code elimination
- * from the compiler, and give the errors already at link time.
+/*
+ * If unknown_lock_type() is referenced, it means we have tried to passed something
+ * not recognized as lockable to the macros below. Use QLNULL to intentionally pass
+ * a null lockable.
*/
-#if defined(__OPTIMIZE__) && !defined(__SANITIZE_ADDRESS__)
+#define QLNULL ((QemuLockable *)0)
void unknown_lock_type(void *);
-#else
-static inline void unknown_lock_type(void *unused)
-{
- abort();
-}
-#endif
+
static inline __attribute__((__always_inline__)) QemuLockable *
qemu_make_lockable(void *x, QemuLockable *lockable)
--
2.7.4
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH 1/2] lockable: use QLNULL for a null lockable
2020-06-03 22:49 ` [PATCH 1/2] lockable: use QLNULL for a " Joe Slater
@ 2020-06-04 16:22 ` Eric Blake
0 siblings, 0 replies; 5+ messages in thread
From: Eric Blake @ 2020-06-04 16:22 UTC (permalink / raw)
To: Joe Slater, qemu-devel; +Cc: qemu-trivial, qemu-stable
On 6/3/20 5:49 PM, Joe Slater wrote:
> Allows us to build with -Og and optimizations that
> do not clean up dead-code.
>
> Signed-off-by: Joe Slater <joe.slater@windriver.com>
>
> to be squished
>
> Signed-off-by: Joe Slater <joe.slater@windriver.com>
These last two lines can be elided (looks like a rebase mishap).
> ---
> block/block-backend.c | 4 ++--
> block/block-copy.c | 2 +-
> block/mirror.c | 5 +++--
> fsdev/qemu-fsdev-throttle.c | 6 +++---
> hw/9pfs/9p.c | 2 +-
> include/qemu/lockable.h | 3 +++
> util/qemu-co-shared-resource.c | 2 +-
> 7 files changed, 14 insertions(+), 10 deletions(-)
>
If you use scripts/git.orderfile, your diff would show with the .h
changes first, which are arguably the meat of this patch.
> +++ b/block/mirror.c
> @@ -28,6 +28,7 @@
> #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
> #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
>
> +
> /* The mirroring buffer is a list of granularity-sized chunks.
> * Free chunks are organized in a list.
> */
This hunk looks spurious.
> +++ b/include/qemu/lockable.h
> @@ -24,6 +24,9 @@ struct QemuLockable {
> QemuLockUnlockFunc *unlock;
> };
>
> +#define QLNULL ((QemuLockable *)0)
Why not ((QemuLocakable *)NULL) ?
Why no comments why this type-safe NULL is useful?
> +
> +
> /* This function gives an error if an invalid, non-NULL pointer type is passed
> * to QEMU_MAKE_LOCKABLE. For optimized builds, we can rely on dead-code elimination
> * from the compiler, and give the errors already at link time.
> diff --git a/util/qemu-co-shared-resource.c b/util/qemu-co-shared-resource.c
> index 1c83cd9..7423ea4 100644
> --- a/util/qemu-co-shared-resource.c
> +++ b/util/qemu-co-shared-resource.c
> @@ -64,7 +64,7 @@ void coroutine_fn co_get_from_shres(SharedResource *s, uint64_t n)
> {
> assert(n <= s->total);
> while (!co_try_get_from_shres(s, n)) {
> - qemu_co_queue_wait(&s->queue, NULL);
> + qemu_co_queue_wait(&s->queue, QLNULL);
It looks like your macro is added to give the compiler some type-safety,
but it is not obvious how it matters from just the commit message,
without also looking at patch 2/2.
--
Eric Blake, Principal Software Engineer
Red Hat, Inc. +1-919-301-3226
Virtualization: qemu.org | libvirt.org
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH 2/2] lockable: do not rely on optimization for null lockables
2020-06-03 22:49 ` [PATCH 2/2] lockable: do not rely on optimization for null lockables Joe Slater
@ 2020-06-04 16:31 ` Eric Blake
0 siblings, 0 replies; 5+ messages in thread
From: Eric Blake @ 2020-06-04 16:31 UTC (permalink / raw)
To: Joe Slater, qemu-devel; +Cc: qemu-trivial, qemu-stable
On 6/3/20 5:49 PM, Joe Slater wrote:
> If we use QLNULL for null lockables, we can always
> use referencing unknown_lock_type as a link time
> error indicator.
>
> Signed-off-by: Joe Slater <joe.slater@windriver.com>
> ---
> include/qemu/lockable.h | 19 ++++++-------------
> 1 file changed, 6 insertions(+), 13 deletions(-)
>
> diff --git a/include/qemu/lockable.h b/include/qemu/lockable.h
> index 7f7aebb..7fc5750 100644
> --- a/include/qemu/lockable.h
> +++ b/include/qemu/lockable.h
> @@ -24,21 +24,14 @@ struct QemuLockable {
> QemuLockUnlockFunc *unlock;
> };
>
> -#define QLNULL ((QemuLockable *)0)
> -
> -
> -/* This function gives an error if an invalid, non-NULL pointer type is passed
> - * to QEMU_MAKE_LOCKABLE. For optimized builds, we can rely on dead-code elimination
> - * from the compiler, and give the errors already at link time.
> +/*
> + * If unknown_lock_type() is referenced, it means we have tried to passed something
> + * not recognized as lockable to the macros below. Use QLNULL to intentionally pass
> + * a null lockable.
> */
> -#if defined(__OPTIMIZE__) && !defined(__SANITIZE_ADDRESS__)
> +#define QLNULL ((QemuLockable *)0)
Looks like a bit of churn (especially if you take my comment on 1/2 to
spell it NULL instead of 0). Should these be combined into a single patch?
> void unknown_lock_type(void *);
> -#else
> -static inline void unknown_lock_type(void *unused)
> -{
> - abort();
> -}
> -#endif
> +
>
> static inline __attribute__((__always_inline__)) QemuLockable *
> qemu_make_lockable(void *x, QemuLockable *lockable)
Reading further in the file:
/* Auxiliary macros to simplify QEMU_MAKE_LOCABLE. */
We should fix that typo while improving things.
/* QEMU_MAKE_LOCKABLE - Make a polymorphic QemuLockable
*
* @x: a lock object (currently one of QemuMutex, QemuRecMutex,
CoMutex, QemuSpin).
*
* Returns a QemuLockable object that can be passed around
* to a function that can operate with locks of any kind, or
* NULL if @x is %NULL.
Should this comment be tweaked to call out %QLNULL?
--
Eric Blake, Principal Software Engineer
Red Hat, Inc. +1-919-301-3226
Virtualization: qemu.org | libvirt.org
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2020-06-04 16:32 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-03 22:49 [PATCH 0/2] Use QLNULL for null lockable Joe Slater
2020-06-03 22:49 ` [PATCH 1/2] lockable: use QLNULL for a " Joe Slater
2020-06-04 16:22 ` Eric Blake
2020-06-03 22:49 ` [PATCH 2/2] lockable: do not rely on optimization for null lockables Joe Slater
2020-06-04 16:31 ` Eric Blake
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.