提交 296c355c 编写于 作者: T Theodore Ts'o

ext4: Use tracepoints for mb_history trace file

The /proc/fs/ext4/<dev>/mb_history was maintained manually, and had a
number of problems: it required a largish amount of memory to be
allocated for each ext4 filesystem, and the s_mb_history_lock
introduced a CPU contention problem.  

By ripping out the mb_history code and replacing it with ftrace
tracepoints, and we get more functionality: timestamps, event
filtering, the ability to correlate mballoc history with other ext4
tracepoints, etc.
Signed-off-by: N"Theodore Ts'o" <tytso@mit.edu>
上级 90576c0b
......@@ -1113,7 +1113,6 @@ Table 1-12: Files in /proc/fs/ext4/<devname>
..............................................................................
File Content
mb_groups details of multiblock allocator buddy cache of free blocks
mb_history multiblock allocation history
..............................................................................
......
......@@ -65,6 +65,12 @@ typedef __u32 ext4_lblk_t;
/* data type for block group number */
typedef unsigned int ext4_group_t;
/*
* Flags used in mballoc's allocation_context flags field.
*
* Also used to show what's going on for debugging purposes when the
* flag field is exported via the traceport interface
*/
/* prefer goal again. length */
#define EXT4_MB_HINT_MERGE 0x0001
......@@ -971,14 +977,6 @@ struct ext4_sb_info {
unsigned long s_mb_last_group;
unsigned long s_mb_last_start;
/* history to debug policy */
struct ext4_mb_history *s_mb_history;
int s_mb_history_cur;
int s_mb_history_max;
int s_mb_history_num;
spinlock_t s_mb_history_lock;
int s_mb_history_filter;
/* stats for buddy allocator */
spinlock_t s_mb_pa_lock;
atomic_t s_bal_reqs; /* number of reqs with len > 1 */
......
......@@ -2096,207 +2096,6 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
return err;
}
#ifdef EXT4_MB_HISTORY
struct ext4_mb_proc_session {
struct ext4_mb_history *history;
struct super_block *sb;
int start;
int max;
};
static void *ext4_mb_history_skip_empty(struct ext4_mb_proc_session *s,
struct ext4_mb_history *hs,
int first)
{
if (hs == s->history + s->max)
hs = s->history;
if (!first && hs == s->history + s->start)
return NULL;
while (hs->orig.fe_len == 0) {
hs++;
if (hs == s->history + s->max)
hs = s->history;
if (hs == s->history + s->start)
return NULL;
}
return hs;
}
static void *ext4_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
{
struct ext4_mb_proc_session *s = seq->private;
struct ext4_mb_history *hs;
int l = *pos;
if (l == 0)
return SEQ_START_TOKEN;
hs = ext4_mb_history_skip_empty(s, s->history + s->start, 1);
if (!hs)
return NULL;
while (--l && (hs = ext4_mb_history_skip_empty(s, ++hs, 0)) != NULL);
return hs;
}
static void *ext4_mb_seq_history_next(struct seq_file *seq, void *v,
loff_t *pos)
{
struct ext4_mb_proc_session *s = seq->private;
struct ext4_mb_history *hs = v;
++*pos;
if (v == SEQ_START_TOKEN)
return ext4_mb_history_skip_empty(s, s->history + s->start, 1);
else
return ext4_mb_history_skip_empty(s, ++hs, 0);
}
static int ext4_mb_seq_history_show(struct seq_file *seq, void *v)
{
char buf[25], buf2[25], buf3[25], *fmt;
struct ext4_mb_history *hs = v;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s "
"%-5s %-2s %-6s %-5s %-5s %-6s\n",
"pid", "inode", "original", "goal", "result", "found",
"grps", "cr", "flags", "merge", "tail", "broken");
return 0;
}
if (hs->op == EXT4_MB_HISTORY_ALLOC) {
fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
"0x%04x %-5s %-5u %-6u\n";
sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
hs->result.fe_start, hs->result.fe_len,
hs->result.fe_logical);
sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
hs->orig.fe_start, hs->orig.fe_len,
hs->orig.fe_logical);
sprintf(buf3, "%u/%d/%u@%u", hs->goal.fe_group,
hs->goal.fe_start, hs->goal.fe_len,
hs->goal.fe_logical);
seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
hs->found, hs->groups, hs->cr, hs->flags,
hs->merged ? "M" : "", hs->tail,
hs->buddy ? 1 << hs->buddy : 0);
} else if (hs->op == EXT4_MB_HISTORY_PREALLOC) {
fmt = "%-5u %-8u %-23s %-23s %-23s\n";
sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
hs->result.fe_start, hs->result.fe_len,
hs->result.fe_logical);
sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
hs->orig.fe_start, hs->orig.fe_len,
hs->orig.fe_logical);
seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
} else if (hs->op == EXT4_MB_HISTORY_DISCARD) {
sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
hs->result.fe_start, hs->result.fe_len);
seq_printf(seq, "%-5u %-8u %-23s discard\n",
hs->pid, hs->ino, buf2);
} else if (hs->op == EXT4_MB_HISTORY_FREE) {
sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
hs->result.fe_start, hs->result.fe_len);
seq_printf(seq, "%-5u %-8u %-23s free\n",
hs->pid, hs->ino, buf2);
}
return 0;
}
static void ext4_mb_seq_history_stop(struct seq_file *seq, void *v)
{
}
static const struct seq_operations ext4_mb_seq_history_ops = {
.start = ext4_mb_seq_history_start,
.next = ext4_mb_seq_history_next,
.stop = ext4_mb_seq_history_stop,
.show = ext4_mb_seq_history_show,
};
static int ext4_mb_seq_history_open(struct inode *inode, struct file *file)
{
struct super_block *sb = PDE(inode)->data;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_mb_proc_session *s;
int rc;
int size;
if (unlikely(sbi->s_mb_history == NULL))
return -ENOMEM;
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s == NULL)
return -ENOMEM;
s->sb = sb;
size = sizeof(struct ext4_mb_history) * sbi->s_mb_history_max;
s->history = kmalloc(size, GFP_KERNEL);
if (s->history == NULL) {
kfree(s);
return -ENOMEM;
}
spin_lock(&sbi->s_mb_history_lock);
memcpy(s->history, sbi->s_mb_history, size);
s->max = sbi->s_mb_history_max;
s->start = sbi->s_mb_history_cur % s->max;
spin_unlock(&sbi->s_mb_history_lock);
rc = seq_open(file, &ext4_mb_seq_history_ops);
if (rc == 0) {
struct seq_file *m = (struct seq_file *)file->private_data;
m->private = s;
} else {
kfree(s->history);
kfree(s);
}
return rc;
}
static int ext4_mb_seq_history_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = (struct seq_file *)file->private_data;
struct ext4_mb_proc_session *s = seq->private;
kfree(s->history);
kfree(s);
return seq_release(inode, file);
}
static ssize_t ext4_mb_seq_history_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *ppos)
{
struct seq_file *seq = (struct seq_file *)file->private_data;
struct ext4_mb_proc_session *s = seq->private;
struct super_block *sb = s->sb;
char str[32];
int value;
if (count >= sizeof(str)) {
printk(KERN_ERR "EXT4-fs: %s string too long, max %u bytes\n",
"mb_history", (int)sizeof(str));
return -EOVERFLOW;
}
if (copy_from_user(str, buffer, count))
return -EFAULT;
value = simple_strtol(str, NULL, 0);
if (value < 0)
return -ERANGE;
EXT4_SB(sb)->s_mb_history_filter = value;
return count;
}
static const struct file_operations ext4_mb_seq_history_fops = {
.owner = THIS_MODULE,
.open = ext4_mb_seq_history_open,
.read = seq_read,
.write = ext4_mb_seq_history_write,
.llseek = seq_lseek,
.release = ext4_mb_seq_history_release,
};
static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
{
struct super_block *sb = seq->private;
......@@ -2396,82 +2195,6 @@ static const struct file_operations ext4_mb_seq_groups_fops = {
.release = seq_release,
};
static void ext4_mb_history_release(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
if (sbi->s_proc != NULL) {
remove_proc_entry("mb_groups", sbi->s_proc);
if (sbi->s_mb_history_max)
remove_proc_entry("mb_history", sbi->s_proc);
}
kfree(sbi->s_mb_history);
}
static void ext4_mb_history_init(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
int i;
if (sbi->s_proc != NULL) {
if (sbi->s_mb_history_max)
proc_create_data("mb_history", S_IRUGO, sbi->s_proc,
&ext4_mb_seq_history_fops, sb);
proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
&ext4_mb_seq_groups_fops, sb);
}
sbi->s_mb_history_cur = 0;
spin_lock_init(&sbi->s_mb_history_lock);
i = sbi->s_mb_history_max * sizeof(struct ext4_mb_history);
sbi->s_mb_history = i ? kzalloc(i, GFP_KERNEL) : NULL;
/* if we can't allocate history, then we simple won't use it */
}
static noinline_for_stack void
ext4_mb_store_history(struct ext4_allocation_context *ac)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_mb_history h;
if (sbi->s_mb_history == NULL)
return;
if (!(ac->ac_op & sbi->s_mb_history_filter))
return;
h.op = ac->ac_op;
h.pid = current->pid;
h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
h.orig = ac->ac_o_ex;
h.result = ac->ac_b_ex;
h.flags = ac->ac_flags;
h.found = ac->ac_found;
h.groups = ac->ac_groups_scanned;
h.cr = ac->ac_criteria;
h.tail = ac->ac_tail;
h.buddy = ac->ac_buddy;
h.merged = 0;
if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) {
if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
h.merged = 1;
h.goal = ac->ac_g_ex;
h.result = ac->ac_f_ex;
}
spin_lock(&sbi->s_mb_history_lock);
memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
sbi->s_mb_history_cur = 0;
spin_unlock(&sbi->s_mb_history_lock);
}
#else
#define ext4_mb_history_release(sb)
#define ext4_mb_history_init(sb)
#endif
/* Create and initialize ext4_group_info data for the given group. */
int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
......@@ -2690,7 +2413,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
sbi->s_mb_stats = MB_DEFAULT_STATS;
sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
......@@ -2708,7 +2430,9 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
spin_lock_init(&lg->lg_prealloc_lock);
}
ext4_mb_history_init(sb);
if (sbi->s_proc)
proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
&ext4_mb_seq_groups_fops, sb);
if (sbi->s_journal)
sbi->s_journal->j_commit_callback = release_blocks_on_commit;
......@@ -2788,7 +2512,8 @@ int ext4_mb_release(struct super_block *sb)
}
free_percpu(sbi->s_locality_groups);
ext4_mb_history_release(sb);
if (sbi->s_proc)
remove_proc_entry("mb_groups", sbi->s_proc);
return 0;
}
......@@ -3274,7 +2999,10 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
atomic_inc(&sbi->s_bal_breaks);
}
ext4_mb_store_history(ac);
if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
trace_ext4_mballoc_alloc(ac);
else
trace_ext4_mballoc_prealloc(ac);
}
/*
......@@ -3774,7 +3502,6 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
if (ac) {
ac->ac_sb = sb;
ac->ac_inode = pa->pa_inode;
ac->ac_op = EXT4_MB_HISTORY_DISCARD;
}
while (bit < end) {
......@@ -3794,7 +3521,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
ac->ac_b_ex.fe_start = bit;
ac->ac_b_ex.fe_len = next - bit;
ac->ac_b_ex.fe_logical = 0;
ext4_mb_store_history(ac);
trace_ext4_mballoc_discard(ac);
}
trace_ext4_mb_release_inode_pa(ac, pa, grp_blk_start + bit,
......@@ -3829,9 +3556,6 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
ext4_group_t group;
ext4_grpblk_t bit;
if (ac)
ac->ac_op = EXT4_MB_HISTORY_DISCARD;
trace_ext4_mb_release_group_pa(ac, pa);
BUG_ON(pa->pa_deleted == 0);
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
......@@ -3846,7 +3570,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
ac->ac_b_ex.fe_start = bit;
ac->ac_b_ex.fe_len = pa->pa_len;
ac->ac_b_ex.fe_logical = 0;
ext4_mb_store_history(ac);
trace_ext4_mballoc_discard(ac);
}
return 0;
......@@ -4737,7 +4461,6 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
if (ac) {
ac->ac_op = EXT4_MB_HISTORY_FREE;
ac->ac_inode = inode;
ac->ac_sb = sb;
}
......@@ -4804,7 +4527,7 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
ac->ac_b_ex.fe_group = block_group;
ac->ac_b_ex.fe_start = bit;
ac->ac_b_ex.fe_len = count;
ext4_mb_store_history(ac);
trace_ext4_mballoc_free(ac);
}
err = ext4_mb_load_buddy(sb, block_group, &e4b);
......
......@@ -52,18 +52,8 @@ extern u8 mb_enable_debug;
#define mb_debug(n, fmt, a...)
#endif
/*
* with EXT4_MB_HISTORY mballoc stores last N allocations in memory
* and you can monitor it in /proc/fs/ext4/<dev>/mb_history
*/
#define EXT4_MB_HISTORY
#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */
#define EXT4_MB_HISTORY_PREALLOC 2 /* preallocated blocks used */
#define EXT4_MB_HISTORY_DISCARD 4 /* preallocation discarded */
#define EXT4_MB_HISTORY_FREE 8 /* free */
#define EXT4_MB_HISTORY_DEFAULT (EXT4_MB_HISTORY_ALLOC | \
EXT4_MB_HISTORY_PREALLOC)
/*
* How long mballoc can look for a best extent (in found extents)
......@@ -217,22 +207,6 @@ struct ext4_allocation_context {
#define AC_STATUS_FOUND 2
#define AC_STATUS_BREAK 3
struct ext4_mb_history {
struct ext4_free_extent orig; /* orig allocation */
struct ext4_free_extent goal; /* goal allocation */
struct ext4_free_extent result; /* result allocation */
unsigned pid;
unsigned ino;
__u16 found; /* how many extents have been found */
__u16 groups; /* how many groups have been scanned */
__u16 tail; /* what tail broke some buddy */
__u16 buddy; /* buddy the tail ^^^ broke */
__u16 flags;
__u8 cr:3; /* which phase the result extent was found at */
__u8 op:4;
__u8 merged:1;
};
struct ext4_buddy {
struct page *bd_buddy_page;
void *bd_buddy;
......@@ -247,13 +221,6 @@ struct ext4_buddy {
#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap)
#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy)
#ifndef EXT4_MB_HISTORY
static inline void ext4_mb_store_history(struct ext4_allocation_context *ac)
{
return;
}
#endif
#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
......
......@@ -50,13 +50,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/ext4.h>
static int default_mb_history_length = 1000;
module_param_named(default_mb_history_length, default_mb_history_length,
int, 0644);
MODULE_PARM_DESC(default_mb_history_length,
"Default number of entries saved for mb_history");
struct proc_dir_entry *ext4_proc_root;
static struct kset *ext4_kset;
......@@ -1079,7 +1072,7 @@ enum {
Opt_journal_update, Opt_journal_dev,
Opt_journal_checksum, Opt_journal_async_commit,
Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
Opt_data_err_abort, Opt_data_err_ignore, Opt_mb_history_length,
Opt_data_err_abort, Opt_data_err_ignore,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, Opt_resize,
......@@ -1126,7 +1119,6 @@ static const match_table_t tokens = {
{Opt_data_writeback, "data=writeback"},
{Opt_data_err_abort, "data_err=abort"},
{Opt_data_err_ignore, "data_err=ignore"},
{Opt_mb_history_length, "mb_history_length=%u"},
{Opt_offusrjquota, "usrjquota="},
{Opt_usrjquota, "usrjquota=%s"},
{Opt_offgrpjquota, "grpjquota="},
......@@ -1367,13 +1359,6 @@ static int parse_options(char *options, struct super_block *sb,
case Opt_data_err_ignore:
clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
break;
case Opt_mb_history_length:
if (match_int(&args[0], &option))
return 0;
if (option < 0)
return 0;
sbi->s_mb_history_max = option;
break;
#ifdef CONFIG_QUOTA
case Opt_usrjquota:
qtype = USRQUOTA;
......@@ -2435,7 +2420,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
sbi->s_mb_history_max = default_mb_history_length;
set_opt(sbi->s_mount_opt, BARRIER);
......
......@@ -743,6 +743,169 @@ TRACE_EVENT(ext4_alloc_da_blocks,
__entry->data_blocks, __entry->meta_blocks)
);
TRACE_EVENT(ext4_mballoc_alloc,
TP_PROTO(struct ext4_allocation_context *ac),
TP_ARGS(ac),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( __u16, found )
__field( __u16, groups )
__field( __u16, buddy )
__field( __u16, flags )
__field( __u16, tail )
__field( __u8, cr )
__field( __u32, orig_logical )
__field( int, orig_start )
__field( __u32, orig_group )
__field( int, orig_len )
__field( __u32, goal_logical )
__field( int, goal_start )
__field( __u32, goal_group )
__field( int, goal_len )
__field( __u32, result_logical )
__field( int, result_start )
__field( __u32, result_group )
__field( int, result_len )
),
TP_fast_assign(
__entry->dev = ac->ac_inode->i_sb->s_dev;
__entry->ino = ac->ac_inode->i_ino;
__entry->found = ac->ac_found;
__entry->flags = ac->ac_flags;
__entry->groups = ac->ac_groups_scanned;
__entry->buddy = ac->ac_buddy;
__entry->tail = ac->ac_tail;
__entry->cr = ac->ac_criteria;
__entry->orig_logical = ac->ac_o_ex.fe_logical;
__entry->orig_start = ac->ac_o_ex.fe_start;
__entry->orig_group = ac->ac_o_ex.fe_group;
__entry->orig_len = ac->ac_o_ex.fe_len;
__entry->goal_logical = ac->ac_g_ex.fe_logical;
__entry->goal_start = ac->ac_g_ex.fe_start;
__entry->goal_group = ac->ac_g_ex.fe_group;
__entry->goal_len = ac->ac_g_ex.fe_len;
__entry->result_logical = ac->ac_f_ex.fe_logical;
__entry->result_start = ac->ac_f_ex.fe_start;
__entry->result_group = ac->ac_f_ex.fe_group;
__entry->result_len = ac->ac_f_ex.fe_len;
),
TP_printk("dev %s inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
"result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x "
"tail %u broken %u",
jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
__entry->orig_group, __entry->orig_start,
__entry->orig_len, __entry->orig_logical,
__entry->goal_group, __entry->goal_start,
__entry->goal_len, __entry->goal_logical,
__entry->result_group, __entry->result_start,
__entry->result_len, __entry->result_logical,
__entry->found, __entry->groups, __entry->cr,
__entry->flags, __entry->tail,
__entry->buddy ? 1 << __entry->buddy : 0)
);
TRACE_EVENT(ext4_mballoc_prealloc,
TP_PROTO(struct ext4_allocation_context *ac),
TP_ARGS(ac),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( __u32, orig_logical )
__field( int, orig_start )
__field( __u32, orig_group )
__field( int, orig_len )
__field( __u32, result_logical )
__field( int, result_start )
__field( __u32, result_group )
__field( int, result_len )
),
TP_fast_assign(
__entry->dev = ac->ac_inode->i_sb->s_dev;
__entry->ino = ac->ac_inode->i_ino;
__entry->orig_logical = ac->ac_o_ex.fe_logical;
__entry->orig_start = ac->ac_o_ex.fe_start;
__entry->orig_group = ac->ac_o_ex.fe_group;
__entry->orig_len = ac->ac_o_ex.fe_len;
__entry->result_logical = ac->ac_b_ex.fe_logical;
__entry->result_start = ac->ac_b_ex.fe_start;
__entry->result_group = ac->ac_b_ex.fe_group;
__entry->result_len = ac->ac_b_ex.fe_len;
),
TP_printk("dev %s inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u",
jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
__entry->orig_group, __entry->orig_start,
__entry->orig_len, __entry->orig_logical,
__entry->result_group, __entry->result_start,
__entry->result_len, __entry->result_logical)
);
TRACE_EVENT(ext4_mballoc_discard,
TP_PROTO(struct ext4_allocation_context *ac),
TP_ARGS(ac),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( __u32, result_logical )
__field( int, result_start )
__field( __u32, result_group )
__field( int, result_len )
),
TP_fast_assign(
__entry->dev = ac->ac_inode->i_sb->s_dev;
__entry->ino = ac->ac_inode->i_ino;
__entry->result_logical = ac->ac_b_ex.fe_logical;
__entry->result_start = ac->ac_b_ex.fe_start;
__entry->result_group = ac->ac_b_ex.fe_group;
__entry->result_len = ac->ac_b_ex.fe_len;
),
TP_printk("dev %s inode %lu extent %u/%d/%u@%u ",
jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
__entry->result_group, __entry->result_start,
__entry->result_len, __entry->result_logical)
);
TRACE_EVENT(ext4_mballoc_free,
TP_PROTO(struct ext4_allocation_context *ac),
TP_ARGS(ac),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( __u32, result_logical )
__field( int, result_start )
__field( __u32, result_group )
__field( int, result_len )
),
TP_fast_assign(
__entry->dev = ac->ac_inode->i_sb->s_dev;
__entry->ino = ac->ac_inode->i_ino;
__entry->result_logical = ac->ac_b_ex.fe_logical;
__entry->result_start = ac->ac_b_ex.fe_start;
__entry->result_group = ac->ac_b_ex.fe_group;
__entry->result_len = ac->ac_b_ex.fe_len;
),
TP_printk("dev %s inode %lu extent %u/%d/%u@%u ",
jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
__entry->result_group, __entry->result_start,
__entry->result_len, __entry->result_logical)
);
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册