提交 a5e25883 编写于 作者: S Steven Rostedt 提交者: Ingo Molnar

ftrace: replace raw_local_irq_save with local_irq_save

Impact: fix for lockdep and ftrace

The raw_local_irq_save/restore confuses lockdep. This patch
converts them to the local_irq_save/restore variants.
Signed-off-by: NSteven Rostedt <srostedt@redhat.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 dfdc5437
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
* Thanks to Arjan van de Ven for coming up with the initial idea of * Thanks to Arjan van de Ven for coming up with the initial idea of
* mapping lock dependencies runtime. * mapping lock dependencies runtime.
*/ */
#define DISABLE_BRANCH_PROFILING
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/delay.h> #include <linux/delay.h>
......
...@@ -1209,7 +1209,7 @@ void trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -1209,7 +1209,7 @@ void trace_graph_entry(struct ftrace_graph_ent *trace)
int cpu; int cpu;
int pc; int pc;
raw_local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = tr->data[cpu]; data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
...@@ -1218,7 +1218,7 @@ void trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -1218,7 +1218,7 @@ void trace_graph_entry(struct ftrace_graph_ent *trace)
__trace_graph_entry(tr, data, trace, flags, pc); __trace_graph_entry(tr, data, trace, flags, pc);
} }
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
raw_local_irq_restore(flags); local_irq_restore(flags);
} }
void trace_graph_return(struct ftrace_graph_ret *trace) void trace_graph_return(struct ftrace_graph_ret *trace)
...@@ -1230,7 +1230,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace) ...@@ -1230,7 +1230,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
int cpu; int cpu;
int pc; int pc;
raw_local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = tr->data[cpu]; data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
...@@ -1239,7 +1239,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace) ...@@ -1239,7 +1239,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
__trace_graph_return(tr, data, trace, flags, pc); __trace_graph_return(tr, data, trace, flags, pc);
} }
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
raw_local_irq_restore(flags); local_irq_restore(flags);
} }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
...@@ -2645,7 +2645,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, ...@@ -2645,7 +2645,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
if (err) if (err)
goto err_unlock; goto err_unlock;
raw_local_irq_disable(); local_irq_disable();
__raw_spin_lock(&ftrace_max_lock); __raw_spin_lock(&ftrace_max_lock);
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
/* /*
...@@ -2662,7 +2662,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, ...@@ -2662,7 +2662,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
} }
} }
__raw_spin_unlock(&ftrace_max_lock); __raw_spin_unlock(&ftrace_max_lock);
raw_local_irq_enable(); local_irq_enable();
tracing_cpumask = tracing_cpumask_new; tracing_cpumask = tracing_cpumask_new;
......
...@@ -42,7 +42,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -42,7 +42,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
if (unlikely(!tr)) if (unlikely(!tr))
return; return;
raw_local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out; goto out;
...@@ -74,7 +74,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -74,7 +74,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
out: out:
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&tr->data[cpu]->disabled);
raw_local_irq_restore(flags); local_irq_restore(flags);
} }
static inline static inline
......
...@@ -48,7 +48,7 @@ static inline void check_stack(void) ...@@ -48,7 +48,7 @@ static inline void check_stack(void)
if (!object_is_on_stack(&this_size)) if (!object_is_on_stack(&this_size))
return; return;
raw_local_irq_save(flags); local_irq_save(flags);
__raw_spin_lock(&max_stack_lock); __raw_spin_lock(&max_stack_lock);
/* a race could have already updated it */ /* a race could have already updated it */
...@@ -96,7 +96,7 @@ static inline void check_stack(void) ...@@ -96,7 +96,7 @@ static inline void check_stack(void)
out: out:
__raw_spin_unlock(&max_stack_lock); __raw_spin_unlock(&max_stack_lock);
raw_local_irq_restore(flags); local_irq_restore(flags);
} }
static void static void
...@@ -162,11 +162,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, ...@@ -162,11 +162,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
if (ret < 0) if (ret < 0)
return ret; return ret;
raw_local_irq_save(flags); local_irq_save(flags);
__raw_spin_lock(&max_stack_lock); __raw_spin_lock(&max_stack_lock);
*ptr = val; *ptr = val;
__raw_spin_unlock(&max_stack_lock); __raw_spin_unlock(&max_stack_lock);
raw_local_irq_restore(flags); local_irq_restore(flags);
return count; return count;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册