提交 3576dfdf 编写于 作者: J Jens Axboe 提交者: Cheng Jian

io_uring: improve trace_io_uring_defer() trace point

mainline inclusion
from mainline-5.5-rc1
commit 915967f6
category: feature
bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27
CVE: NA
---------------------------

We don't have shadow requests anymore, so get rid of the shadow
argument. Add the user_data argument, as that's often useful to easily
match up requests, instead of having to look at request pointers.
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NZhihao Cheng <chengzhihao1@huawei.com>
Signed-off-by: Nyangerkun <yangerkun@huawei.com>
Reviewed-by: Nzhangyi (F) <yi.zhang@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 4251d384
...@@ -2587,7 +2587,7 @@ static int io_req_defer(struct io_kiocb *req) ...@@ -2587,7 +2587,7 @@ static int io_req_defer(struct io_kiocb *req)
req->flags |= REQ_F_FREE_SQE; req->flags |= REQ_F_FREE_SQE;
req->submit.sqe = sqe_copy; req->submit.sqe = sqe_copy;
trace_io_uring_defer(ctx, req, false); trace_io_uring_defer(ctx, req, req->user_data);
list_add_tail(&req->list, &ctx->defer_list); list_add_tail(&req->list, &ctx->defer_list);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
return -EIOCBQUEUED; return -EIOCBQUEUED;
......
...@@ -163,35 +163,35 @@ TRACE_EVENT(io_uring_queue_async_work, ...@@ -163,35 +163,35 @@ TRACE_EVENT(io_uring_queue_async_work,
); );
/** /**
* io_uring_defer_list - called before the io_uring work added into defer_list * io_uring_defer - called when an io_uring request is deferred
* *
* @ctx: pointer to a ring context structure * @ctx: pointer to a ring context structure
* @req: pointer to a deferred request * @req: pointer to a deferred request
* @shadow: whether request is shadow or not * @user_data: user data associated with the request
* *
* Allows to track deferred requests, to get an insight about what requests are * Allows to track deferred requests, to get an insight about what requests are
* not started immediately. * not started immediately.
*/ */
TRACE_EVENT(io_uring_defer, TRACE_EVENT(io_uring_defer,
TP_PROTO(void *ctx, void *req, bool shadow), TP_PROTO(void *ctx, void *req, unsigned long long user_data),
TP_ARGS(ctx, req, shadow), TP_ARGS(ctx, req, user_data),
TP_STRUCT__entry ( TP_STRUCT__entry (
__field( void *, ctx ) __field( void *, ctx )
__field( void *, req ) __field( void *, req )
__field( bool, shadow ) __field( unsigned long long, data )
), ),
TP_fast_assign( TP_fast_assign(
__entry->ctx = ctx; __entry->ctx = ctx;
__entry->req = req; __entry->req = req;
__entry->shadow = shadow; __entry->data = user_data;
), ),
TP_printk("ring %p, request %p%s", __entry->ctx, __entry->req, TP_printk("ring %p, request %p user_data %llu", __entry->ctx,
__entry->shadow ? ", shadow": "") __entry->req, __entry->data)
); );
/** /**
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册