提交 95cdd2e7 编写于 作者: I Ingo Molnar

perfcounters: enable lowlevel pmc code to schedule counters

Allow lowlevel ->enable() op to return an error if a counter can not be
added. This can be used to handle counter constraints.
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 78b6084c
...@@ -244,7 +244,7 @@ static int fixed_mode_idx(struct hw_perf_counter *hwc) ...@@ -244,7 +244,7 @@ static int fixed_mode_idx(struct hw_perf_counter *hwc)
/* /*
* Find a PMC slot for the freshly enabled / scheduled in counter: * Find a PMC slot for the freshly enabled / scheduled in counter:
*/ */
static void pmc_generic_enable(struct perf_counter *counter) static int pmc_generic_enable(struct perf_counter *counter)
{ {
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
struct hw_perf_counter *hwc = &counter->hw; struct hw_perf_counter *hwc = &counter->hw;
...@@ -253,6 +253,8 @@ static void pmc_generic_enable(struct perf_counter *counter) ...@@ -253,6 +253,8 @@ static void pmc_generic_enable(struct perf_counter *counter)
/* Try to get the previous counter again */ /* Try to get the previous counter again */
if (test_and_set_bit(idx, cpuc->used)) { if (test_and_set_bit(idx, cpuc->used)) {
idx = find_first_zero_bit(cpuc->used, nr_counters_generic); idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
if (idx == nr_counters_generic)
return -EAGAIN;
set_bit(idx, cpuc->used); set_bit(idx, cpuc->used);
hwc->idx = idx; hwc->idx = idx;
} }
...@@ -265,6 +267,8 @@ static void pmc_generic_enable(struct perf_counter *counter) ...@@ -265,6 +267,8 @@ static void pmc_generic_enable(struct perf_counter *counter)
__hw_perf_counter_set_period(counter, hwc, idx); __hw_perf_counter_set_period(counter, hwc, idx);
__pmc_generic_enable(counter, hwc, idx); __pmc_generic_enable(counter, hwc, idx);
return 0;
} }
void perf_counter_print_debug(void) void perf_counter_print_debug(void)
......
...@@ -128,7 +128,7 @@ struct perf_counter; ...@@ -128,7 +128,7 @@ struct perf_counter;
* struct hw_perf_counter_ops - performance counter hw ops * struct hw_perf_counter_ops - performance counter hw ops
*/ */
struct hw_perf_counter_ops { struct hw_perf_counter_ops {
void (*enable) (struct perf_counter *counter); int (*enable) (struct perf_counter *counter);
void (*disable) (struct perf_counter *counter); void (*disable) (struct perf_counter *counter);
void (*read) (struct perf_counter *counter); void (*read) (struct perf_counter *counter);
}; };
......
...@@ -355,21 +355,25 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu) ...@@ -355,21 +355,25 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu)
cpuctx->task_ctx = NULL; cpuctx->task_ctx = NULL;
} }
static void static int
counter_sched_in(struct perf_counter *counter, counter_sched_in(struct perf_counter *counter,
struct perf_cpu_context *cpuctx, struct perf_cpu_context *cpuctx,
struct perf_counter_context *ctx, struct perf_counter_context *ctx,
int cpu) int cpu)
{ {
if (counter->state == PERF_COUNTER_STATE_OFF) if (counter->state == PERF_COUNTER_STATE_OFF)
return; return 0;
if (counter->hw_ops->enable(counter))
return -EAGAIN;
counter->hw_ops->enable(counter);
counter->state = PERF_COUNTER_STATE_ACTIVE; counter->state = PERF_COUNTER_STATE_ACTIVE;
counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
cpuctx->active_oncpu++; cpuctx->active_oncpu++;
ctx->nr_active++; ctx->nr_active++;
return 0;
} }
static int static int
...@@ -378,20 +382,38 @@ group_sched_in(struct perf_counter *group_counter, ...@@ -378,20 +382,38 @@ group_sched_in(struct perf_counter *group_counter,
struct perf_counter_context *ctx, struct perf_counter_context *ctx,
int cpu) int cpu)
{ {
struct perf_counter *counter; struct perf_counter *counter, *partial_group;
int was_group = 0; int ret = 0;
counter_sched_in(group_counter, cpuctx, ctx, cpu); if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
return -EAGAIN;
/* /*
* Schedule in siblings as one group (if any): * Schedule in siblings as one group (if any):
*/ */
list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
counter_sched_in(counter, cpuctx, ctx, cpu); if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
was_group = 1; partial_group = counter;
goto group_error;
}
ret = -EAGAIN;
} }
return was_group; return ret;
group_error:
/*
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
*/
list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
if (counter == partial_group)
break;
counter_sched_out(counter, cpuctx, ctx);
}
counter_sched_out(group_counter, cpuctx, ctx);
return -EAGAIN;
} }
/* /*
...@@ -416,9 +438,6 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu) ...@@ -416,9 +438,6 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu)
spin_lock(&ctx->lock); spin_lock(&ctx->lock);
list_for_each_entry(counter, &ctx->counter_list, list_entry) { list_for_each_entry(counter, &ctx->counter_list, list_entry) {
if (ctx->nr_active == cpuctx->max_pertask)
break;
/* /*
* Listen to the 'cpu' scheduling filter constraint * Listen to the 'cpu' scheduling filter constraint
* of counters: * of counters:
...@@ -856,8 +875,9 @@ static const struct file_operations perf_fops = { ...@@ -856,8 +875,9 @@ static const struct file_operations perf_fops = {
.poll = perf_poll, .poll = perf_poll,
}; };
static void cpu_clock_perf_counter_enable(struct perf_counter *counter) static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
{ {
return 0;
} }
static void cpu_clock_perf_counter_disable(struct perf_counter *counter) static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
...@@ -913,11 +933,13 @@ static void task_clock_perf_counter_read(struct perf_counter *counter) ...@@ -913,11 +933,13 @@ static void task_clock_perf_counter_read(struct perf_counter *counter)
task_clock_perf_counter_update(counter, now); task_clock_perf_counter_update(counter, now);
} }
static void task_clock_perf_counter_enable(struct perf_counter *counter) static int task_clock_perf_counter_enable(struct perf_counter *counter)
{ {
u64 now = task_clock_perf_counter_val(counter, 0); u64 now = task_clock_perf_counter_val(counter, 0);
atomic64_set(&counter->hw.prev_count, now); atomic64_set(&counter->hw.prev_count, now);
return 0;
} }
static void task_clock_perf_counter_disable(struct perf_counter *counter) static void task_clock_perf_counter_disable(struct perf_counter *counter)
...@@ -960,12 +982,14 @@ static void page_faults_perf_counter_read(struct perf_counter *counter) ...@@ -960,12 +982,14 @@ static void page_faults_perf_counter_read(struct perf_counter *counter)
page_faults_perf_counter_update(counter); page_faults_perf_counter_update(counter);
} }
static void page_faults_perf_counter_enable(struct perf_counter *counter) static int page_faults_perf_counter_enable(struct perf_counter *counter)
{ {
/* /*
* page-faults is a per-task value already, * page-faults is a per-task value already,
* so we dont have to clear it on switch-in. * so we dont have to clear it on switch-in.
*/ */
return 0;
} }
static void page_faults_perf_counter_disable(struct perf_counter *counter) static void page_faults_perf_counter_disable(struct perf_counter *counter)
...@@ -1006,12 +1030,14 @@ static void context_switches_perf_counter_read(struct perf_counter *counter) ...@@ -1006,12 +1030,14 @@ static void context_switches_perf_counter_read(struct perf_counter *counter)
context_switches_perf_counter_update(counter); context_switches_perf_counter_update(counter);
} }
static void context_switches_perf_counter_enable(struct perf_counter *counter) static int context_switches_perf_counter_enable(struct perf_counter *counter)
{ {
/* /*
* ->nvcsw + curr->nivcsw is a per-task value already, * ->nvcsw + curr->nivcsw is a per-task value already,
* so we dont have to clear it on switch-in. * so we dont have to clear it on switch-in.
*/ */
return 0;
} }
static void context_switches_perf_counter_disable(struct perf_counter *counter) static void context_switches_perf_counter_disable(struct perf_counter *counter)
...@@ -1050,12 +1076,14 @@ static void cpu_migrations_perf_counter_read(struct perf_counter *counter) ...@@ -1050,12 +1076,14 @@ static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
cpu_migrations_perf_counter_update(counter); cpu_migrations_perf_counter_update(counter);
} }
static void cpu_migrations_perf_counter_enable(struct perf_counter *counter) static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
{ {
/* /*
* se.nr_migrations is a per-task value already, * se.nr_migrations is a per-task value already,
* so we dont have to clear it on switch-in. * so we dont have to clear it on switch-in.
*/ */
return 0;
} }
static void cpu_migrations_perf_counter_disable(struct perf_counter *counter) static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册