提交 750ed1a4 编写于 作者: I Ingo Molnar 提交者: Thomas Gleixner

ftrace: timestamp syncing, prepare

rename and uninline now() to ftrace_now().
Signed-off-by: NIngo Molnar <mingo@elte.hu>
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 4bf39a94
...@@ -531,7 +531,7 @@ static int notrace __ftrace_update_code(void *ignore) ...@@ -531,7 +531,7 @@ static int notrace __ftrace_update_code(void *ignore)
save_ftrace_enabled = ftrace_enabled; save_ftrace_enabled = ftrace_enabled;
ftrace_enabled = 0; ftrace_enabled = 0;
start = now(raw_smp_processor_id()); start = ftrace_now(raw_smp_processor_id());
ftrace_update_cnt = 0; ftrace_update_cnt = 0;
/* No locks needed, the machine is stopped! */ /* No locks needed, the machine is stopped! */
...@@ -550,7 +550,7 @@ static int notrace __ftrace_update_code(void *ignore) ...@@ -550,7 +550,7 @@ static int notrace __ftrace_update_code(void *ignore)
} }
stop = now(raw_smp_processor_id()); stop = ftrace_now(raw_smp_processor_id());
ftrace_update_time = stop - start; ftrace_update_time = stop - start;
ftrace_update_tot_cnt += ftrace_update_cnt; ftrace_update_tot_cnt += ftrace_update_cnt;
......
...@@ -42,6 +42,11 @@ ns2usecs(cycle_t nsec) ...@@ -42,6 +42,11 @@ ns2usecs(cycle_t nsec)
return nsec; return nsec;
} }
notrace cycle_t ftrace_now(int cpu)
{
return cpu_clock(cpu);
}
static atomic_t tracer_counter; static atomic_t tracer_counter;
static struct trace_array global_trace; static struct trace_array global_trace;
...@@ -607,7 +612,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags) ...@@ -607,7 +612,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
entry->idx = atomic_inc_return(&tracer_counter); entry->idx = atomic_inc_return(&tracer_counter);
entry->preempt_count = pc & 0xff; entry->preempt_count = pc & 0xff;
entry->pid = tsk->pid; entry->pid = tsk->pid;
entry->t = now(raw_smp_processor_id()); entry->t = ftrace_now(raw_smp_processor_id());
entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
......
...@@ -171,10 +171,7 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); ...@@ -171,10 +171,7 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr, void update_max_tr_single(struct trace_array *tr,
struct task_struct *tsk, int cpu); struct task_struct *tsk, int cpu);
static inline notrace cycle_t now(int cpu) extern notrace cycle_t ftrace_now(int cpu);
{
return cpu_clock(cpu);
}
#ifdef CONFIG_SCHED_TRACER #ifdef CONFIG_SCHED_TRACER
extern void notrace extern void notrace
......
...@@ -20,7 +20,7 @@ static notrace void function_reset(struct trace_array *tr) ...@@ -20,7 +20,7 @@ static notrace void function_reset(struct trace_array *tr)
{ {
int cpu; int cpu;
tr->time_start = now(tr->cpu); tr->time_start = ftrace_now(tr->cpu);
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
tracing_reset(tr->data[cpu]); tracing_reset(tr->data[cpu]);
......
...@@ -136,7 +136,7 @@ check_critical_timing(struct trace_array *tr, ...@@ -136,7 +136,7 @@ check_critical_timing(struct trace_array *tr,
* as long as possible: * as long as possible:
*/ */
T0 = data->preempt_timestamp; T0 = data->preempt_timestamp;
T1 = now(cpu); T1 = ftrace_now(cpu);
delta = T1-T0; delta = T1-T0;
local_save_flags(flags); local_save_flags(flags);
...@@ -186,7 +186,7 @@ check_critical_timing(struct trace_array *tr, ...@@ -186,7 +186,7 @@ check_critical_timing(struct trace_array *tr,
out: out:
data->critical_sequence = max_sequence; data->critical_sequence = max_sequence;
data->preempt_timestamp = now(cpu); data->preempt_timestamp = ftrace_now(cpu);
tracing_reset(data); tracing_reset(data);
ftrace(tr, data, CALLER_ADDR0, parent_ip, flags); ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
} }
...@@ -215,7 +215,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) ...@@ -215,7 +215,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
atomic_inc(&data->disabled); atomic_inc(&data->disabled);
data->critical_sequence = max_sequence; data->critical_sequence = max_sequence;
data->preempt_timestamp = now(cpu); data->preempt_timestamp = ftrace_now(cpu);
data->critical_start = parent_ip ? : ip; data->critical_start = parent_ip ? : ip;
tracing_reset(data); tracing_reset(data);
......
...@@ -61,7 +61,7 @@ static notrace void sched_switch_reset(struct trace_array *tr) ...@@ -61,7 +61,7 @@ static notrace void sched_switch_reset(struct trace_array *tr)
{ {
int cpu; int cpu;
tr->time_start = now(tr->cpu); tr->time_start = ftrace_now(tr->cpu);
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
tracing_reset(tr->data[cpu]); tracing_reset(tr->data[cpu]);
......
...@@ -92,7 +92,7 @@ wakeup_sched_switch(struct task_struct *prev, struct task_struct *next) ...@@ -92,7 +92,7 @@ wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
* as long as possible: * as long as possible:
*/ */
T0 = data->preempt_timestamp; T0 = data->preempt_timestamp;
T1 = now(cpu); T1 = ftrace_now(cpu);
delta = T1-T0; delta = T1-T0;
if (!report_latency(delta)) if (!report_latency(delta))
...@@ -191,7 +191,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p, ...@@ -191,7 +191,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
local_save_flags(flags); local_save_flags(flags);
tr->data[wakeup_cpu]->preempt_timestamp = now(cpu); tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
ftrace(tr, tr->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags); ftrace(tr, tr->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags);
out_locked: out_locked:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册