提交 38f80112 编写于 作者: M Markus Metzger 提交者: Ingo Molnar

x86, bts: fix race between per-task and per-cpu branch tracing

Per-task branch tracing installs a debug store context with the traced
task. This immediately results in the branch trace control bits to be
cleared for the next context switch of that task, if not set before.

Either per-cpu or per-task tracing are allowed at the same time.

An active per-cpu tracing would be disabled even if the per-task tracing
request is rejected and the task debug store context removed.

Check the tracing type (per-cpu or per-task) before installing a task
debug store context.
Signed-off-by: NMarkus Metzger <markus.t.metzger@intel.com>
Cc: roland@redhat.com
Cc: eranian@googlemail.com
Cc: oleg@redhat.com
Cc: juan.villacis@intel.com
Cc: ak@linux.jf.intel.com
LKML-Reference: <20090403144552.856000000@intel.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 8d99b3ac
......@@ -193,12 +193,28 @@ static DEFINE_SPINLOCK(ds_lock);
*/
static atomic_t tracers = ATOMIC_INIT(0);
static inline void get_tracer(struct task_struct *task)
static inline int get_tracer(struct task_struct *task)
{
if (task)
int error;
spin_lock_irq(&ds_lock);
if (task) {
error = -EPERM;
if (atomic_read(&tracers) < 0)
goto out;
atomic_inc(&tracers);
else
} else {
error = -EPERM;
if (atomic_read(&tracers) > 0)
goto out;
atomic_dec(&tracers);
}
error = 0;
out:
spin_unlock_irq(&ds_lock);
return error;
}
static inline void put_tracer(struct task_struct *task)
......@@ -209,14 +225,6 @@ static inline void put_tracer(struct task_struct *task)
atomic_inc(&tracers);
}
static inline int check_tracer(struct task_struct *task)
{
return task ?
(atomic_read(&tracers) >= 0) :
(atomic_read(&tracers) <= 0);
}
/*
* The DS context is either attached to a thread or to a cpu:
* - in the former case, the thread_struct contains a pointer to the
......@@ -677,6 +685,10 @@ struct bts_tracer *ds_request_bts(struct task_struct *task,
if (ovfl)
goto out;
error = get_tracer(task);
if (error < 0)
goto out;
/*
* Per-cpu tracing is typically requested using smp_call_function().
* We must not sleep.
......@@ -684,7 +696,7 @@ struct bts_tracer *ds_request_bts(struct task_struct *task,
error = -ENOMEM;
tracer = kzalloc(sizeof(*tracer), GFP_ATOMIC);
if (!tracer)
goto out;
goto out_put_tracer;
tracer->ovfl = ovfl;
error = ds_request(&tracer->ds, &tracer->trace.ds,
......@@ -695,14 +707,9 @@ struct bts_tracer *ds_request_bts(struct task_struct *task,
spin_lock_irqsave(&ds_lock, irq);
error = -EPERM;
if (!check_tracer(task))
goto out_unlock;
get_tracer(task);
error = -EPERM;
if (tracer->ds.context->bts_master)
goto out_put_tracer;
goto out_unlock;
tracer->ds.context->bts_master = tracer;
spin_unlock_irqrestore(&ds_lock, irq);
......@@ -716,13 +723,13 @@ struct bts_tracer *ds_request_bts(struct task_struct *task,
return tracer;
out_put_tracer:
put_tracer(task);
out_unlock:
spin_unlock_irqrestore(&ds_lock, irq);
ds_put_context(tracer->ds.context);
out_tracer:
kfree(tracer);
out_put_tracer:
put_tracer(task);
out:
return ERR_PTR(error);
}
......@@ -741,6 +748,10 @@ struct pebs_tracer *ds_request_pebs(struct task_struct *task,
if (ovfl)
goto out;
error = get_tracer(task);
if (error < 0)
goto out;
/*
* Per-cpu tracing is typically requested using smp_call_function().
* We must not sleep.
......@@ -748,7 +759,7 @@ struct pebs_tracer *ds_request_pebs(struct task_struct *task,
error = -ENOMEM;
tracer = kzalloc(sizeof(*tracer), GFP_ATOMIC);
if (!tracer)
goto out;
goto out_put_tracer;
tracer->ovfl = ovfl;
error = ds_request(&tracer->ds, &tracer->trace.ds,
......@@ -758,14 +769,9 @@ struct pebs_tracer *ds_request_pebs(struct task_struct *task,
spin_lock_irqsave(&ds_lock, irq);
error = -EPERM;
if (!check_tracer(task))
goto out_unlock;
get_tracer(task);
error = -EPERM;
if (tracer->ds.context->pebs_master)
goto out_put_tracer;
goto out_unlock;
tracer->ds.context->pebs_master = tracer;
spin_unlock_irqrestore(&ds_lock, irq);
......@@ -775,13 +781,13 @@ struct pebs_tracer *ds_request_pebs(struct task_struct *task,
return tracer;
out_put_tracer:
put_tracer(task);
out_unlock:
spin_unlock_irqrestore(&ds_lock, irq);
ds_put_context(tracer->ds.context);
out_tracer:
kfree(tracer);
out_put_tracer:
put_tracer(task);
out:
return ERR_PTR(error);
}
......@@ -804,8 +810,8 @@ void ds_release_bts(struct bts_tracer *tracer)
if (task && (task != current))
wait_task_context_switch(task);
put_tracer(task);
ds_put_context(tracer->ds.context);
put_tracer(task);
kfree(tracer);
}
......@@ -861,16 +867,20 @@ void ds_resume_bts(struct bts_tracer *tracer)
void ds_release_pebs(struct pebs_tracer *tracer)
{
struct task_struct *task;
if (!tracer)
return;
task = tracer->ds.context->task;
ds_suspend_pebs(tracer);
WARN_ON_ONCE(tracer->ds.context->pebs_master != tracer);
tracer->ds.context->pebs_master = NULL;
put_tracer(tracer->ds.context->task);
ds_put_context(tracer->ds.context);
put_tracer(task);
kfree(tracer);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册