提交 919dbca8 编写于 作者: B Bart Van Assche 提交者: Jens Axboe

blktrace: Use the new blk_opf_t type

Improve static type checking by using the new blk_opf_t type for a function
argument that represents a combination of a request operation and request
flags. Rename that argument from 'op' into 'opf' to make its role more
clear.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: NBart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20220714180729.1065367-12-bvanassche@acm.orgSigned-off-by: NJens Axboe <axboe@kernel.dk>
上级 22c80aac
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/compat.h> #include <linux/compat.h>
#include <uapi/linux/blktrace_api.h> #include <uapi/linux/blktrace_api.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/blk_types.h>
#if defined(CONFIG_BLK_DEV_IO_TRACE) #if defined(CONFIG_BLK_DEV_IO_TRACE)
...@@ -105,7 +106,7 @@ struct compat_blk_user_trace_setup { ...@@ -105,7 +106,7 @@ struct compat_blk_user_trace_setup {
#endif #endif
void blk_fill_rwbs(char *rwbs, unsigned int op); void blk_fill_rwbs(char *rwbs, blk_opf_t opf);
static inline sector_t blk_rq_trace_sector(struct request *rq) static inline sector_t blk_rq_trace_sector(struct request *rq)
{ {
......
...@@ -205,7 +205,7 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), ...@@ -205,7 +205,7 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
#define BLK_TC_PREFLUSH BLK_TC_FLUSH #define BLK_TC_PREFLUSH BLK_TC_FLUSH
/* The ilog2() calls fall out because they're constant */ /* The ilog2() calls fall out because they're constant */
#define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \ #define MASK_TC_BIT(rw, __name) ((__force u32)(rw & REQ_ ## __name) << \
(ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name)) (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
/* /*
...@@ -213,8 +213,8 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), ...@@ -213,8 +213,8 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
* blk_io_trace structure and places it in a per-cpu subbuffer. * blk_io_trace structure and places it in a per-cpu subbuffer.
*/ */
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
int op, int op_flags, u32 what, int error, int pdu_len, const blk_opf_t opf, u32 what, int error,
void *pdu_data, u64 cgid) int pdu_len, void *pdu_data, u64 cgid)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct ring_buffer_event *event = NULL; struct ring_buffer_event *event = NULL;
...@@ -227,16 +227,17 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -227,16 +227,17 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
int cpu; int cpu;
bool blk_tracer = blk_tracer_enabled; bool blk_tracer = blk_tracer_enabled;
ssize_t cgid_len = cgid ? sizeof(cgid) : 0; ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
const enum req_op op = opf & REQ_OP_MASK;
if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer)) if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
return; return;
what |= ddir_act[op_is_write(op) ? WRITE : READ]; what |= ddir_act[op_is_write(op) ? WRITE : READ];
what |= MASK_TC_BIT(op_flags, SYNC); what |= MASK_TC_BIT(opf, SYNC);
what |= MASK_TC_BIT(op_flags, RAHEAD); what |= MASK_TC_BIT(opf, RAHEAD);
what |= MASK_TC_BIT(op_flags, META); what |= MASK_TC_BIT(opf, META);
what |= MASK_TC_BIT(op_flags, PREFLUSH); what |= MASK_TC_BIT(opf, PREFLUSH);
what |= MASK_TC_BIT(op_flags, FUA); what |= MASK_TC_BIT(opf, FUA);
if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE) if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
what |= BLK_TC_ACT(BLK_TC_DISCARD); what |= BLK_TC_ACT(BLK_TC_DISCARD);
if (op == REQ_OP_FLUSH) if (op == REQ_OP_FLUSH)
...@@ -842,9 +843,8 @@ static void blk_add_trace_rq(struct request *rq, blk_status_t error, ...@@ -842,9 +843,8 @@ static void blk_add_trace_rq(struct request *rq, blk_status_t error,
else else
what |= BLK_TC_ACT(BLK_TC_FS); what |= BLK_TC_ACT(BLK_TC_FS);
__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq), __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, rq->cmd_flags,
rq->cmd_flags, what, blk_status_to_errno(error), 0, what, blk_status_to_errno(error), 0, NULL, cgid);
NULL, cgid);
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -903,7 +903,7 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, ...@@ -903,7 +903,7 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
} }
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
bio_op(bio), bio->bi_opf, what, error, 0, NULL, bio->bi_opf, what, error, 0, NULL,
blk_trace_bio_get_cgid(q, bio)); blk_trace_bio_get_cgid(q, bio));
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -949,7 +949,7 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q) ...@@ -949,7 +949,7 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
rcu_read_lock(); rcu_read_lock();
bt = rcu_dereference(q->blk_trace); bt = rcu_dereference(q->blk_trace);
if (bt) if (bt)
__blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0); __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -969,7 +969,7 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q, ...@@ -969,7 +969,7 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
else else
what = BLK_TA_UNPLUG_TIMER; what = BLK_TA_UNPLUG_TIMER;
__blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0); __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -985,8 +985,7 @@ static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu) ...@@ -985,8 +985,7 @@ static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
__be64 rpdu = cpu_to_be64(pdu); __be64 rpdu = cpu_to_be64(pdu);
__blk_add_trace(bt, bio->bi_iter.bi_sector, __blk_add_trace(bt, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_size, bio->bi_opf, BLK_TA_SPLIT,
BLK_TA_SPLIT,
blk_status_to_errno(bio->bi_status), blk_status_to_errno(bio->bi_status),
sizeof(rpdu), &rpdu, sizeof(rpdu), &rpdu,
blk_trace_bio_get_cgid(q, bio)); blk_trace_bio_get_cgid(q, bio));
...@@ -1022,7 +1021,7 @@ static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev, ...@@ -1022,7 +1021,7 @@ static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
r.sector_from = cpu_to_be64(from); r.sector_from = cpu_to_be64(from);
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_opf, BLK_TA_REMAP,
blk_status_to_errno(bio->bi_status), blk_status_to_errno(bio->bi_status),
sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
rcu_read_unlock(); rcu_read_unlock();
...@@ -1058,7 +1057,7 @@ static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev, ...@@ -1058,7 +1057,7 @@ static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
r.sector_from = cpu_to_be64(from); r.sector_from = cpu_to_be64(from);
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
req_op(rq), rq->cmd_flags, BLK_TA_REMAP, 0, rq->cmd_flags, BLK_TA_REMAP, 0,
sizeof(r), &r, blk_trace_request_get_cgid(rq)); sizeof(r), &r, blk_trace_request_get_cgid(rq));
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -1084,7 +1083,7 @@ void blk_add_driver_data(struct request *rq, void *data, size_t len) ...@@ -1084,7 +1083,7 @@ void blk_add_driver_data(struct request *rq, void *data, size_t len)
return; return;
} }
__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0, __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0,
BLK_TA_DRV_DATA, 0, len, data, BLK_TA_DRV_DATA, 0, len, data,
blk_trace_request_get_cgid(rq)); blk_trace_request_get_cgid(rq));
rcu_read_unlock(); rcu_read_unlock();
...@@ -1881,14 +1880,14 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, ...@@ -1881,14 +1880,14 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
* caller with resulting string. * caller with resulting string.
* *
**/ **/
void blk_fill_rwbs(char *rwbs, unsigned int op) void blk_fill_rwbs(char *rwbs, blk_opf_t opf)
{ {
int i = 0; int i = 0;
if (op & REQ_PREFLUSH) if (opf & REQ_PREFLUSH)
rwbs[i++] = 'F'; rwbs[i++] = 'F';
switch (op & REQ_OP_MASK) { switch (opf & REQ_OP_MASK) {
case REQ_OP_WRITE: case REQ_OP_WRITE:
rwbs[i++] = 'W'; rwbs[i++] = 'W';
break; break;
...@@ -1909,13 +1908,13 @@ void blk_fill_rwbs(char *rwbs, unsigned int op) ...@@ -1909,13 +1908,13 @@ void blk_fill_rwbs(char *rwbs, unsigned int op)
rwbs[i++] = 'N'; rwbs[i++] = 'N';
} }
if (op & REQ_FUA) if (opf & REQ_FUA)
rwbs[i++] = 'F'; rwbs[i++] = 'F';
if (op & REQ_RAHEAD) if (opf & REQ_RAHEAD)
rwbs[i++] = 'A'; rwbs[i++] = 'A';
if (op & REQ_SYNC) if (opf & REQ_SYNC)
rwbs[i++] = 'S'; rwbs[i++] = 'S';
if (op & REQ_META) if (opf & REQ_META)
rwbs[i++] = 'M'; rwbs[i++] = 'M';
rwbs[i] = '\0'; rwbs[i] = '\0';
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册