All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v1 0/2] io-uring: use consisten tracepoint format
@ 2022-02-14 18:04 Stefan Roesch
  2022-02-14 18:04 ` [PATCH v1 1/2] io-uring: add __fill_cqe function Stefan Roesch
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Stefan Roesch @ 2022-02-14 18:04 UTC (permalink / raw)
  To: io-uring, kernel-team; +Cc: shr

So far the tracepoints haven't used a consistent format. This change
adds consistent formatting for the io-uring tracepoints. This makes it
easier to follow individual requests.

Where it makes sense it uses the following format:
- context structure pointer
- request structure pointer
- user data
- opcode.



Stefan Roesch (2):
  io-uring: add __fill_cqe function
  io-uring: Make tracepoints consistent.

 fs/io_uring.c                   |  42 +++--
 include/trace/events/io_uring.h | 320 +++++++++++++++-----------------
 2 files changed, 178 insertions(+), 184 deletions(-)


base-commit: 754e0b0e35608ed5206d6a67a791563c631cec07
-- 
2.30.2


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH v1 1/2] io-uring: add __fill_cqe function
  2022-02-14 18:04 [PATCH v1 0/2] io-uring: use consisten tracepoint format Stefan Roesch
@ 2022-02-14 18:04 ` Stefan Roesch
  2022-02-14 18:04 ` [PATCH v1 2/2] io-uring: Make tracepoints consistent Stefan Roesch
  2022-02-15 21:28 ` [PATCH v1 0/2] io-uring: use consisten tracepoint format Jens Axboe
  2 siblings, 0 replies; 4+ messages in thread
From: Stefan Roesch @ 2022-02-14 18:04 UTC (permalink / raw)
  To: io-uring, kernel-team; +Cc: shr

This introduces the __fill_cqe function. This is necessary
to correctly issue the io_uring_complete tracepoint.

Signed-off-by: Stefan Roesch <shr@fb.com>
---
 fs/io_uring.c | 22 +++++++++++++---------
 1 file changed, 13 insertions(+), 9 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 77b9c7e4793b..b5f207fbe9a3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1900,13 +1900,11 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
 	return true;
 }
 
-static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
+static inline bool __fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
 				 s32 res, u32 cflags)
 {
 	struct io_uring_cqe *cqe;
 
-	trace_io_uring_complete(ctx, user_data, res, cflags);
-
 	/*
 	 * If we can't get a cq entry, userspace overflowed the
 	 * submission (by quite a lot). Increment the overflow count in
@@ -1922,17 +1920,24 @@ static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
 	return io_cqring_event_overflow(ctx, user_data, res, cflags);
 }
 
+static inline bool __io_fill_cqe(struct io_kiocb *req, s32 res, u32 cflags)
+{
+	trace_io_uring_complete(req->ctx, req->user_data, res, cflags);
+	return __fill_cqe(req->ctx, req->user_data, res, cflags);
+}
+
 static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
 {
 	if (!(req->flags & REQ_F_CQE_SKIP))
-		__io_fill_cqe(req->ctx, req->user_data, res, cflags);
+		__io_fill_cqe(req, res, cflags);
 }
 
 static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
 				     s32 res, u32 cflags)
 {
 	ctx->cq_extra++;
-	return __io_fill_cqe(ctx, user_data, res, cflags);
+	trace_io_uring_complete(ctx, user_data, res, cflags);
+	return __fill_cqe(ctx, user_data, res, cflags);
 }
 
 static void __io_req_complete_post(struct io_kiocb *req, s32 res,
@@ -1941,7 +1946,7 @@ static void __io_req_complete_post(struct io_kiocb *req, s32 res,
 	struct io_ring_ctx *ctx = req->ctx;
 
 	if (!(req->flags & REQ_F_CQE_SKIP))
-		__io_fill_cqe(ctx, req->user_data, res, cflags);
+		__io_fill_cqe(req, res, cflags);
 	/*
 	 * If we're the last reference to this request, add to our locked
 	 * free_list cache.
@@ -2530,8 +2535,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
 						    comp_list);
 
 			if (!(req->flags & REQ_F_CQE_SKIP))
-				__io_fill_cqe(ctx, req->user_data, req->result,
-					      req->cflags);
+				__io_fill_cqe(req, req->result, req->cflags);
 		}
 
 		io_commit_cqring(ctx);
@@ -2653,7 +2657,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
 		if (unlikely(req->flags & REQ_F_CQE_SKIP))
 			continue;
 
-		__io_fill_cqe(ctx, req->user_data, req->result, io_put_kbuf(req));
+		__io_fill_cqe(req, req->result, io_put_kbuf(req));
 		nr_events++;
 	}
 
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v1 2/2] io-uring: Make tracepoints consistent.
  2022-02-14 18:04 [PATCH v1 0/2] io-uring: use consisten tracepoint format Stefan Roesch
  2022-02-14 18:04 ` [PATCH v1 1/2] io-uring: add __fill_cqe function Stefan Roesch
@ 2022-02-14 18:04 ` Stefan Roesch
  2022-02-15 21:28 ` [PATCH v1 0/2] io-uring: use consisten tracepoint format Jens Axboe
  2 siblings, 0 replies; 4+ messages in thread
From: Stefan Roesch @ 2022-02-14 18:04 UTC (permalink / raw)
  To: io-uring, kernel-team; +Cc: shr

This makes the io-uring tracepoints consistent. Where it makes sense
the tracepoints start with the following four fields:
- context (ring)
- request
- user_data
- opcode.

Signed-off-by: Stefan Roesch <shr@fb.com>
---
 fs/io_uring.c                   |  24 +--
 include/trace/events/io_uring.h | 320 +++++++++++++++-----------------
 2 files changed, 167 insertions(+), 177 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index b5f207fbe9a3..4117ae5476a2 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1610,8 +1610,8 @@ static void io_queue_async_work(struct io_kiocb *req, bool *dont_use)
 	if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
 		req->work.flags |= IO_WQ_WORK_CANCEL;
 
-	trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
-					&req->work, req->flags);
+	trace_io_uring_queue_async_work(ctx, req, req->user_data, req->opcode, req->flags,
+					&req->work, io_wq_is_hashed(&req->work));
 	io_wq_enqueue(tctx->io_wq, &req->work);
 	if (link)
 		io_queue_linked_timeout(link);
@@ -1922,7 +1922,7 @@ static inline bool __fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
 
 static inline bool __io_fill_cqe(struct io_kiocb *req, s32 res, u32 cflags)
 {
-	trace_io_uring_complete(req->ctx, req->user_data, res, cflags);
+	trace_io_uring_complete(req->ctx, req, req->user_data, res, cflags);
 	return __fill_cqe(req->ctx, req->user_data, res, cflags);
 }
 
@@ -1936,7 +1936,7 @@ static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
 				     s32 res, u32 cflags)
 {
 	ctx->cq_extra++;
-	trace_io_uring_complete(ctx, user_data, res, cflags);
+	trace_io_uring_complete(ctx, NULL, user_data, res, cflags);
 	return __fill_cqe(ctx, user_data, res, cflags);
 }
 
@@ -2188,7 +2188,9 @@ static void io_fail_links(struct io_kiocb *req)
 		nxt = link->link;
 		link->link = NULL;
 
-		trace_io_uring_fail_link(req, link);
+		trace_io_uring_fail_link(req->ctx, req, req->user_data,
+					req->opcode, link);
+
 		if (!ignore_cqes) {
 			link->flags &= ~REQ_F_CQE_SKIP;
 			io_fill_cqe_req(link, res, 0);
@@ -5614,7 +5616,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask)
 	else
 		req->io_task_work.func = io_apoll_task_func;
 
-	trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
+	trace_io_uring_task_add(req->ctx, req, req->user_data, req->opcode, mask);
 	io_req_task_work_add(req, false);
 }
 
@@ -5843,7 +5845,7 @@ static int io_arm_poll_handler(struct io_kiocb *req)
 	if (ret || ipt.error)
 		return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
 
-	trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
+	trace_io_uring_poll_arm(ctx, req, req->user_data, req->opcode,
 				mask, apoll->poll.events);
 	return IO_APOLL_OK;
 }
@@ -6652,7 +6654,7 @@ static __cold void io_drain_req(struct io_kiocb *req)
 		goto queue;
 	}
 
-	trace_io_uring_defer(ctx, req, req->user_data);
+	trace_io_uring_defer(ctx, req, req->user_data, req->opcode);
 	de->req = req;
 	de->seq = seq;
 	list_add_tail(&de->list, &ctx->defer_list);
@@ -6986,7 +6988,7 @@ static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
 {
 	struct file *file = fget(fd);
 
-	trace_io_uring_file_get(ctx, fd);
+	trace_io_uring_file_get(ctx, req, req->user_data, fd);
 
 	/* we don't allow fixed io_uring files */
 	if (file && unlikely(file->f_op == &io_uring_fops))
@@ -7284,7 +7286,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
 
 	ret = io_init_req(ctx, req, sqe);
 	if (unlikely(ret)) {
-		trace_io_uring_req_failed(sqe, ret);
+		trace_io_uring_req_failed(sqe, ctx, req, ret);
 
 		/* fail even hard links since we don't submit */
 		if (link->head) {
@@ -7311,7 +7313,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
 	}
 
 	/* don't need @sqe from now on */
-	trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
+	trace_io_uring_submit_sqe(ctx, req, req->user_data, req->opcode,
 				  req->flags, true,
 				  ctx->flags & IORING_SETUP_SQPOLL);
 
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index 7346f0164cf4..079a861bfeeb 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -29,15 +29,15 @@ TRACE_EVENT(io_uring_create,
 	TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
 
 	TP_STRUCT__entry (
-		__field(  int,		fd			)
-		__field(  void *,	ctx			)
+		__field(  int,		fd		)
+		__field(  void *,	ctx		)
 		__field(  u32,		sq_entries	)
 		__field(  u32,		cq_entries	)
 		__field(  u32,		flags		)
 	),
 
 	TP_fast_assign(
-		__entry->fd			= fd;
+		__entry->fd		= fd;
 		__entry->ctx		= ctx;
 		__entry->sq_entries	= sq_entries;
 		__entry->cq_entries	= cq_entries;
@@ -73,12 +73,12 @@ TRACE_EVENT(io_uring_register,
 	TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret),
 
 	TP_STRUCT__entry (
-		__field(  void *,	ctx			)
-		__field(  unsigned,	opcode		)
-		__field(  unsigned,	nr_files	)
-		__field(  unsigned,	nr_bufs		)
-		__field(  bool,		eventfd		)
-		__field(  long,		ret			)
+		__field(  void *,	ctx	)
+		__field(  unsigned,	opcode	)
+		__field(  unsigned,	nr_files)
+		__field(  unsigned,	nr_bufs	)
+		__field(  bool,		eventfd	)
+		__field(  long,		ret	)
 	),
 
 	TP_fast_assign(
@@ -100,6 +100,8 @@ TRACE_EVENT(io_uring_register,
  * io_uring_file_get - called before getting references to an SQE file
  *
  * @ctx:	pointer to a ring context structure
+ * @req:	pointer to a submitted request
+ * @user_data:	user data associated with the request
  * @fd:		SQE file descriptor
  *
  * Allows to trace out how often an SQE file reference is obtained, which can
@@ -108,59 +110,71 @@ TRACE_EVENT(io_uring_register,
  */
 TRACE_EVENT(io_uring_file_get,
 
-	TP_PROTO(void *ctx, int fd),
+	TP_PROTO(void *ctx, void *req, unsigned long long user_data, int fd),
 
-	TP_ARGS(ctx, fd),
+	TP_ARGS(ctx, req, user_data, fd),
 
 	TP_STRUCT__entry (
-		__field(  void *,	ctx	)
-		__field(  int,		fd	)
+		__field(  void *,	ctx		)
+		__field(  void *,	req		)
+		__field(  u64,		user_data	)
+		__field(  int,		fd		)
 	),
 
 	TP_fast_assign(
-		__entry->ctx	= ctx;
+		__entry->ctx		= ctx;
+		__entry->req		= req;
+		__entry->user_data	= user_data;
 		__entry->fd		= fd;
 	),
 
-	TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd)
+	TP_printk("ring %p, req %p, user_data %llu, fd %d",
+		__entry->ctx, __entry->req, __entry->user_data, __entry->fd)
 );
 
 /**
  * io_uring_queue_async_work - called before submitting a new async work
  *
  * @ctx:	pointer to a ring context structure
- * @hashed:	type of workqueue, hashed or normal
  * @req:	pointer to a submitted request
+ * @user_data:	user data associated with the request
+ * @opcode:	opcode of request
+ * @flags	request flags
  * @work:	pointer to a submitted io_wq_work
+ * @rw:		type of workqueue, hashed or normal
  *
  * Allows to trace asynchronous work submission.
  */
 TRACE_EVENT(io_uring_queue_async_work,
 
-	TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work,
-			 unsigned int flags),
+	TP_PROTO(void *ctx, void * req, unsigned long long user_data, u8 opcode,
+		unsigned int flags, struct io_wq_work *work, int rw),
 
-	TP_ARGS(ctx, rw, req, work, flags),
+	TP_ARGS(ctx, req, user_data, flags, opcode, work, rw),
 
 	TP_STRUCT__entry (
-		__field(  void *,			ctx	)
-		__field(  int,				rw	)
-		__field(  void *,			req	)
-		__field(  struct io_wq_work *,		work	)
-		__field(  unsigned int,			flags	)
+		__field(  void *,			ctx		)
+		__field(  void *,			req		)
+		__field(  u64,				user_data	)
+		__field(  u8,				opcode		)
+		__field(  unsigned int,			flags		)
+		__field(  struct io_wq_work *,		work		)
+		__field(  int,				rw		)
 	),
 
 	TP_fast_assign(
-		__entry->ctx	= ctx;
-		__entry->rw	= rw;
-		__entry->req	= req;
-		__entry->work	= work;
-		__entry->flags	= flags;
+		__entry->ctx		= ctx;
+		__entry->req		= req;
+		__entry->user_data	= user_data;
+		__entry->flags		= flags;
+		__entry->opcode		= opcode;
+		__entry->work		= work;
+		__entry->rw		= rw;
 	),
 
-	TP_printk("ring %p, request %p, flags %d, %s queue, work %p",
-			  __entry->ctx, __entry->req, __entry->flags,
-			  __entry->rw ? "hashed" : "normal", __entry->work)
+	TP_printk("ring %p, request %p, user_data %llu, opcode %d, flags %d, %s queue, work %p",
+		__entry->ctx, __entry->req, __entry->user_data, __entry->opcode,
+		__entry->flags, __entry->rw ? "hashed" : "normal", __entry->work)
 );
 
 /**
@@ -169,30 +183,33 @@ TRACE_EVENT(io_uring_queue_async_work,
  * @ctx:	pointer to a ring context structure
  * @req:	pointer to a deferred request
  * @user_data:	user data associated with the request
+ * @opcode:	opcode of request
  *
  * Allows to track deferred requests, to get an insight about what requests are
  * not started immediately.
  */
 TRACE_EVENT(io_uring_defer,
 
-	TP_PROTO(void *ctx, void *req, unsigned long long user_data),
+	TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode),
 
-	TP_ARGS(ctx, req, user_data),
+	TP_ARGS(ctx, req, user_data, opcode),
 
 	TP_STRUCT__entry (
-		__field(  void *,	ctx		)
-		__field(  void *,	req		)
-		__field(  unsigned long long, data	)
+		__field(  void *,		ctx	)
+		__field(  void *,		req	)
+		__field(  unsigned long long,	data	)
+		__field(  u8,			opcode	)
 	),
 
 	TP_fast_assign(
 		__entry->ctx	= ctx;
 		__entry->req	= req;
 		__entry->data	= user_data;
+		__entry->opcode	= opcode;
 	),
 
-	TP_printk("ring %p, request %p user_data %llu", __entry->ctx,
-			__entry->req, __entry->data)
+	TP_printk("ring %p, request %p, user_data %llu, opcode %d",
+		__entry->ctx, __entry->req, __entry->data, __entry->opcode)
 );
 
 /**
@@ -250,7 +267,7 @@ TRACE_EVENT(io_uring_cqring_wait,
 	),
 
 	TP_fast_assign(
-		__entry->ctx	= ctx;
+		__entry->ctx		= ctx;
 		__entry->min_events	= min_events;
 	),
 
@@ -260,7 +277,10 @@ TRACE_EVENT(io_uring_cqring_wait,
 /**
  * io_uring_fail_link - called before failing a linked request
  *
+ * @ctx:	pointer to a ring context structure
  * @req:	request, which links were cancelled
+ * @user_data:	user data associated with the request
+ * @opcode:	opcode of request
  * @link:	cancelled link
  *
  * Allows to track linked requests cancellation, to see not only that some work
@@ -268,27 +288,36 @@ TRACE_EVENT(io_uring_cqring_wait,
  */
 TRACE_EVENT(io_uring_fail_link,
 
-	TP_PROTO(void *req, void *link),
+	TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, void *link),
 
-	TP_ARGS(req, link),
+	TP_ARGS(ctx, req, user_data, opcode, link),
 
 	TP_STRUCT__entry (
-		__field(  void *,	req	)
-		__field(  void *,	link	)
+		__field(  void *,		ctx		)
+		__field(  void *,		req		)
+		__field(  unsigned long long,	user_data	)
+		__field(  u8,			opcode		)
+		__field(  void *,		link		)
 	),
 
 	TP_fast_assign(
-		__entry->req	= req;
-		__entry->link	= link;
+		__entry->ctx		= ctx;
+		__entry->req		= req;
+		__entry->user_data	= user_data;
+		__entry->opcode		= opcode;
+		__entry->link		= link;
 	),
 
-	TP_printk("request %p, link %p", __entry->req, __entry->link)
+	TP_printk("ring %p, request %p, user_data %llu, opcode %d, link %p",
+		__entry->ctx, __entry->req, __entry->user_data, __entry->opcode,
+		__entry->link)
 );
 
 /**
  * io_uring_complete - called when completing an SQE
  *
  * @ctx:		pointer to a ring context structure
+ * @req:		pointer to a submitted request
  * @user_data:		user data associated with the request
  * @res:		result of the request
  * @cflags:		completion flags
@@ -296,12 +325,13 @@ TRACE_EVENT(io_uring_fail_link,
  */
 TRACE_EVENT(io_uring_complete,
 
-	TP_PROTO(void *ctx, u64 user_data, int res, unsigned cflags),
+	TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags),
 
-	TP_ARGS(ctx, user_data, res, cflags),
+	TP_ARGS(ctx, req, user_data, res, cflags),
 
 	TP_STRUCT__entry (
 		__field(  void *,	ctx		)
+		__field(  void *,	req		)
 		__field(  u64,		user_data	)
 		__field(  int,		res		)
 		__field(  unsigned,	cflags		)
@@ -309,14 +339,16 @@ TRACE_EVENT(io_uring_complete,
 
 	TP_fast_assign(
 		__entry->ctx		= ctx;
+		__entry->req		= req;
 		__entry->user_data	= user_data;
 		__entry->res		= res;
 		__entry->cflags		= cflags;
 	),
 
-	TP_printk("ring %p, user_data 0x%llx, result %d, cflags %x",
-			  __entry->ctx, (unsigned long long)__entry->user_data,
-			  __entry->res, __entry->cflags)
+	TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags %x",
+		__entry->ctx, __entry->req,
+		(unsigned long long)__entry->user_data,
+		__entry->res, __entry->cflags)
 );
 
 /**
@@ -324,8 +356,8 @@ TRACE_EVENT(io_uring_complete,
  *
  * @ctx:		pointer to a ring context structure
  * @req:		pointer to a submitted request
- * @opcode:		opcode of request
  * @user_data:		user data associated with the request
+ * @opcode:		opcode of request
  * @flags		request flags
  * @force_nonblock:	whether a context blocking or not
  * @sq_thread:		true if sq_thread has submitted this SQE
@@ -335,34 +367,34 @@ TRACE_EVENT(io_uring_complete,
  */
 TRACE_EVENT(io_uring_submit_sqe,
 
-	TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, u32 flags,
+	TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, u32 flags,
 		 bool force_nonblock, bool sq_thread),
 
-	TP_ARGS(ctx, req, opcode, user_data, flags, force_nonblock, sq_thread),
+	TP_ARGS(ctx, req, user_data, opcode, flags, force_nonblock, sq_thread),
 
 	TP_STRUCT__entry (
-		__field(  void *,	ctx		)
-		__field(  void *,	req		)
-		__field(  u8,		opcode		)
-		__field(  u64,		user_data	)
-		__field(  u32,		flags		)
-		__field(  bool,		force_nonblock	)
-		__field(  bool,		sq_thread	)
+		__field(  void *,		ctx		)
+		__field(  void *,		req		)
+		__field(  unsigned long long,	user_data	)
+		__field(  u8,			opcode		)
+		__field(  u32,			flags		)
+		__field(  bool,			force_nonblock	)
+		__field(  bool,			sq_thread	)
 	),
 
 	TP_fast_assign(
 		__entry->ctx		= ctx;
 		__entry->req		= req;
-		__entry->opcode		= opcode;
 		__entry->user_data	= user_data;
+		__entry->opcode		= opcode;
 		__entry->flags		= flags;
 		__entry->force_nonblock	= force_nonblock;
 		__entry->sq_thread	= sq_thread;
 	),
 
-	TP_printk("ring %p, req %p, op %d, data 0x%llx, flags %u, "
+	TP_printk("ring %p, req %p, user_data %llu, opcode %d, flags %u, "
 		  "non block %d, sq_thread %d", __entry->ctx, __entry->req,
-		  __entry->opcode, (unsigned long long)__entry->user_data,
+		  __entry->user_data, __entry->opcode,
 		  __entry->flags, __entry->force_nonblock, __entry->sq_thread)
 );
 
@@ -371,8 +403,8 @@ TRACE_EVENT(io_uring_submit_sqe,
  *
  * @ctx:		pointer to a ring context structure
  * @req:		pointer to the armed request
- * @opcode:		opcode of request
  * @user_data:		user data associated with the request
+ * @opcode:		opcode of request
  * @mask:		request poll events mask
  * @events:		registered events of interest
  *
@@ -381,155 +413,110 @@ TRACE_EVENT(io_uring_submit_sqe,
  */
 TRACE_EVENT(io_uring_poll_arm,
 
-	TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data,
+	TP_PROTO(void *ctx, void *req, u64 user_data, u8 opcode,
 		 int mask, int events),
 
-	TP_ARGS(ctx, req, opcode, user_data, mask, events),
+	TP_ARGS(ctx, req, user_data, opcode, mask, events),
 
 	TP_STRUCT__entry (
-		__field(  void *,	ctx		)
-		__field(  void *,	req		)
-		__field(  u8,		opcode		)
-		__field(  u64,		user_data	)
-		__field(  int,		mask		)
-		__field(  int,		events		)
+		__field(  void *,		ctx		)
+		__field(  void *,		req		)
+		__field(  unsigned long long,	user_data	)
+		__field(  u8,			opcode		)
+		__field(  int,			mask		)
+		__field(  int,			events		)
 	),
 
 	TP_fast_assign(
 		__entry->ctx		= ctx;
 		__entry->req		= req;
-		__entry->opcode		= opcode;
 		__entry->user_data	= user_data;
+		__entry->opcode		= opcode;
 		__entry->mask		= mask;
 		__entry->events		= events;
 	),
 
-	TP_printk("ring %p, req %p, op %d, data 0x%llx, mask 0x%x, events 0x%x",
-		  __entry->ctx, __entry->req, __entry->opcode,
-		  (unsigned long long) __entry->user_data,
+	TP_printk("ring %p, req %p, user_data %llu, opcode %d, mask 0x%x, events 0x%x",
+		  __entry->ctx, __entry->req, __entry->user_data, __entry->opcode,
 		  __entry->mask, __entry->events)
 );
 
-TRACE_EVENT(io_uring_poll_wake,
-
-	TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
-
-	TP_ARGS(ctx, opcode, user_data, mask),
-
-	TP_STRUCT__entry (
-		__field(  void *,	ctx		)
-		__field(  u8,		opcode		)
-		__field(  u64,		user_data	)
-		__field(  int,		mask		)
-	),
-
-	TP_fast_assign(
-		__entry->ctx		= ctx;
-		__entry->opcode		= opcode;
-		__entry->user_data	= user_data;
-		__entry->mask		= mask;
-	),
-
-	TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x",
-			  __entry->ctx, __entry->opcode,
-			  (unsigned long long) __entry->user_data,
-			  __entry->mask)
-);
-
-TRACE_EVENT(io_uring_task_add,
-
-	TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
-
-	TP_ARGS(ctx, opcode, user_data, mask),
-
-	TP_STRUCT__entry (
-		__field(  void *,	ctx		)
-		__field(  u8,		opcode		)
-		__field(  u64,		user_data	)
-		__field(  int,		mask		)
-	),
-
-	TP_fast_assign(
-		__entry->ctx		= ctx;
-		__entry->opcode		= opcode;
-		__entry->user_data	= user_data;
-		__entry->mask		= mask;
-	),
-
-	TP_printk("ring %p, op %d, data 0x%llx, mask %x",
-			  __entry->ctx, __entry->opcode,
-			  (unsigned long long) __entry->user_data,
-			  __entry->mask)
-);
-
 /*
- * io_uring_task_run - called when task_work_run() executes the poll events
- *                     notification callbacks
+ * io_uring_task_add - called after adding a task
  *
  * @ctx:		pointer to a ring context structure
- * @req:		pointer to the armed request
- * @opcode:		opcode of request
+ * @req:		pointer to request
  * @user_data:		user data associated with the request
+ * @opcode:		opcode of request
+ * @mask:		request poll events mask
  *
- * Allows to track when notified poll events are processed
  */
-TRACE_EVENT(io_uring_task_run,
+TRACE_EVENT(io_uring_task_add,
 
-	TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data),
+	TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, int mask),
 
-	TP_ARGS(ctx, req, opcode, user_data),
+	TP_ARGS(ctx, req, user_data, opcode, mask),
 
 	TP_STRUCT__entry (
-		__field(  void *,	ctx		)
-		__field(  void *,	req		)
-		__field(  u8,		opcode		)
-		__field(  u64,		user_data	)
+		__field(  void *,		ctx		)
+		__field(  void *,		req		)
+		__field(  unsigned long long,	user_data	)
+		__field(  u8,			opcode		)
+		__field(  int,			mask		)
 	),
 
 	TP_fast_assign(
 		__entry->ctx		= ctx;
 		__entry->req		= req;
-		__entry->opcode		= opcode;
 		__entry->user_data	= user_data;
+		__entry->opcode		= opcode;
+		__entry->mask		= mask;
 	),
 
-	TP_printk("ring %p, req %p, op %d, data 0x%llx",
-		  __entry->ctx, __entry->req, __entry->opcode,
-		  (unsigned long long) __entry->user_data)
+	TP_printk("ring %p, req %p, user_data %llu, opcode %d, mask %x",
+		__entry->ctx, __entry->req, __entry->user_data, __entry->opcode,
+		__entry->mask)
 );
 
 /*
  * io_uring_req_failed - called when an sqe is errored dring submission
  *
  * @sqe:		pointer to the io_uring_sqe that failed
+ * @ctx:		pointer to a ring context structure
+ * @req:		pointer to request
  * @error:		error it failed with
  *
  * Allows easier diagnosing of malformed requests in production systems.
  */
 TRACE_EVENT(io_uring_req_failed,
 
-	TP_PROTO(const struct io_uring_sqe *sqe, int error),
+	TP_PROTO(const struct io_uring_sqe *sqe, void *ctx, void *req, int error),
 
-	TP_ARGS(sqe, error),
+	TP_ARGS(sqe, ctx, req, error),
 
 	TP_STRUCT__entry (
-		__field(  u8,	opcode )
-		__field(  u8,	flags )
-		__field(  u8,	ioprio )
-		__field( u64,	off )
-		__field( u64,	addr )
-		__field( u32,	len )
-		__field( u32,	op_flags )
-		__field( u64,	user_data )
-		__field( u16,	buf_index )
-		__field( u16,	personality )
-		__field( u32,	file_index )
-		__field( u64,	pad1 )
-		__field( u64,	pad2 )
-		__field( int,	error )
+		__field(  void *,		ctx		)
+		__field(  void *,		req		)
+		__field(  unsigned long long,	user_data	)
+		__field(  u8,			opcode		)
+		__field(  u8,			flags		)
+		__field(  u8,			ioprio		)
+		__field( u64,			off		)
+		__field( u64,			addr		)
+		__field( u32,			len		)
+		__field( u32,			op_flags	)
+		__field( u16,			buf_index	)
+		__field( u16,			personality	)
+		__field( u32,			file_index	)
+		__field( u64,			pad1		)
+		__field( u64,			pad2		)
+		__field( int,			error		)
 	),
 
 	TP_fast_assign(
+		__entry->ctx		= ctx;
+		__entry->req		= req;
+		__entry->user_data	= sqe->user_data;
 		__entry->opcode		= sqe->opcode;
 		__entry->flags		= sqe->flags;
 		__entry->ioprio		= sqe->ioprio;
@@ -537,7 +524,6 @@ TRACE_EVENT(io_uring_req_failed,
 		__entry->addr		= sqe->addr;
 		__entry->len		= sqe->len;
 		__entry->op_flags	= sqe->rw_flags;
-		__entry->user_data	= sqe->user_data;
 		__entry->buf_index	= sqe->buf_index;
 		__entry->personality	= sqe->personality;
 		__entry->file_index	= sqe->file_index;
@@ -546,13 +532,15 @@ TRACE_EVENT(io_uring_req_failed,
 		__entry->error		= error;
 	),
 
-	TP_printk("op %d, flags=0x%x, prio=%d, off=%llu, addr=%llu, "
-		  "len=%u, rw_flags=0x%x, user_data=0x%llx, buf_index=%d, "
+	TP_printk("ring %p, req %p, user_data %llu, "
+		"op %d, flags=0x%x, prio=%d, off=%llu, addr=%llu, "
+		  "len=%u, rw_flags=0x%x, buf_index=%d, "
 		  "personality=%d, file_index=%d, pad=0x%llx/%llx, error=%d",
+		  __entry->ctx, __entry->req, __entry->user_data,
 		  __entry->opcode, __entry->flags, __entry->ioprio,
 		  (unsigned long long)__entry->off,
 		  (unsigned long long) __entry->addr, __entry->len,
-		  __entry->op_flags, (unsigned long long) __entry->user_data,
+		  __entry->op_flags,
 		  __entry->buf_index, __entry->personality, __entry->file_index,
 		  (unsigned long long) __entry->pad1,
 		  (unsigned long long) __entry->pad2, __entry->error)
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v1 0/2] io-uring: use consisten tracepoint format
  2022-02-14 18:04 [PATCH v1 0/2] io-uring: use consisten tracepoint format Stefan Roesch
  2022-02-14 18:04 ` [PATCH v1 1/2] io-uring: add __fill_cqe function Stefan Roesch
  2022-02-14 18:04 ` [PATCH v1 2/2] io-uring: Make tracepoints consistent Stefan Roesch
@ 2022-02-15 21:28 ` Jens Axboe
  2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2022-02-15 21:28 UTC (permalink / raw)
  To: io-uring, Stefan Roesch, kernel-team

On Mon, 14 Feb 2022 10:04:28 -0800, Stefan Roesch wrote:
> So far the tracepoints haven't used a consistent format. This change
> adds consistent formatting for the io-uring tracepoints. This makes it
> easier to follow individual requests.
> 
> Where it makes sense it uses the following format:
> - context structure pointer
> - request structure pointer
> - user data
> - opcode.
> 
> [...]

Applied, thanks!

[1/2] io-uring: add __fill_cqe function
      commit: 8592d34e8ffa738e4dec7edae3b39055d4215a7e
[2/2] io-uring: Make tracepoints consistent.
      commit: b57d34f5cc694761d9ab89e519cf41045a174a07

Best regards,
-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-02-15 21:28 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-02-14 18:04 [PATCH v1 0/2] io-uring: use consisten tracepoint format Stefan Roesch
2022-02-14 18:04 ` [PATCH v1 1/2] io-uring: add __fill_cqe function Stefan Roesch
2022-02-14 18:04 ` [PATCH v1 2/2] io-uring: Make tracepoints consistent Stefan Roesch
2022-02-15 21:28 ` [PATCH v1 0/2] io-uring: use consisten tracepoint format Jens Axboe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.