提交 a78ac325 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf_counter: Generic per counter interrupt throttle

Introduce a generic per counter interrupt throttle.

This uses the perf_counter_overflow() quick disable to throttle a specific
counter when its going too fast when a pmu->unthrottle() method is provided
which can undo the quick disable.

Power needs to implement both the quick disable and the unthrottle method.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090525153931.703093461@chello.nl>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 48e22d56
...@@ -623,6 +623,18 @@ static int x86_pmu_enable(struct perf_counter *counter) ...@@ -623,6 +623,18 @@ static int x86_pmu_enable(struct perf_counter *counter)
return 0; return 0;
} }
static void x86_pmu_unthrottle(struct perf_counter *counter)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
struct hw_perf_counter *hwc = &counter->hw;
if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
cpuc->counters[hwc->idx] != counter))
return;
x86_pmu.enable(hwc, hwc->idx);
}
void perf_counter_print_debug(void) void perf_counter_print_debug(void)
{ {
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
...@@ -1038,6 +1050,7 @@ static const struct pmu pmu = { ...@@ -1038,6 +1050,7 @@ static const struct pmu pmu = {
.enable = x86_pmu_enable, .enable = x86_pmu_enable,
.disable = x86_pmu_disable, .disable = x86_pmu_disable,
.read = x86_pmu_read, .read = x86_pmu_read,
.unthrottle = x86_pmu_unthrottle,
}; };
const struct pmu *hw_perf_counter_init(struct perf_counter *counter) const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
......
...@@ -266,6 +266,15 @@ enum perf_event_type { ...@@ -266,6 +266,15 @@ enum perf_event_type {
*/ */
PERF_EVENT_PERIOD = 4, PERF_EVENT_PERIOD = 4,
/*
* struct {
* struct perf_event_header header;
* u64 time;
* };
*/
PERF_EVENT_THROTTLE = 5,
PERF_EVENT_UNTHROTTLE = 6,
/* /*
* When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
* will be PERF_RECORD_* * will be PERF_RECORD_*
...@@ -367,6 +376,7 @@ struct pmu { ...@@ -367,6 +376,7 @@ struct pmu {
int (*enable) (struct perf_counter *counter); int (*enable) (struct perf_counter *counter);
void (*disable) (struct perf_counter *counter); void (*disable) (struct perf_counter *counter);
void (*read) (struct perf_counter *counter); void (*read) (struct perf_counter *counter);
void (*unthrottle) (struct perf_counter *counter);
}; };
/** /**
...@@ -613,6 +623,7 @@ extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); ...@@ -613,6 +623,7 @@ extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
extern int sysctl_perf_counter_priv; extern int sysctl_perf_counter_priv;
extern int sysctl_perf_counter_mlock; extern int sysctl_perf_counter_mlock;
extern int sysctl_perf_counter_limit;
extern void perf_counter_init(void); extern void perf_counter_init(void);
......
...@@ -46,6 +46,7 @@ static atomic_t nr_comm_tracking __read_mostly; ...@@ -46,6 +46,7 @@ static atomic_t nr_comm_tracking __read_mostly;
int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
/* /*
* Lock for (sysadmin-configurable) counter reservations: * Lock for (sysadmin-configurable) counter reservations:
...@@ -1066,12 +1067,15 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) ...@@ -1066,12 +1067,15 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
__perf_counter_sched_in(ctx, cpuctx, cpu); __perf_counter_sched_in(ctx, cpuctx, cpu);
} }
#define MAX_INTERRUPTS (~0ULL)
static void perf_log_throttle(struct perf_counter *counter, int enable);
static void perf_log_period(struct perf_counter *counter, u64 period); static void perf_log_period(struct perf_counter *counter, u64 period);
static void perf_adjust_freq(struct perf_counter_context *ctx) static void perf_adjust_freq(struct perf_counter_context *ctx)
{ {
struct perf_counter *counter; struct perf_counter *counter;
u64 irq_period; u64 interrupts, irq_period;
u64 events, period; u64 events, period;
s64 delta; s64 delta;
...@@ -1080,10 +1084,19 @@ static void perf_adjust_freq(struct perf_counter_context *ctx) ...@@ -1080,10 +1084,19 @@ static void perf_adjust_freq(struct perf_counter_context *ctx)
if (counter->state != PERF_COUNTER_STATE_ACTIVE) if (counter->state != PERF_COUNTER_STATE_ACTIVE)
continue; continue;
interrupts = counter->hw.interrupts;
counter->hw.interrupts = 0;
if (interrupts == MAX_INTERRUPTS) {
perf_log_throttle(counter, 1);
counter->pmu->unthrottle(counter);
interrupts = 2*sysctl_perf_counter_limit/HZ;
}
if (!counter->hw_event.freq || !counter->hw_event.irq_freq) if (!counter->hw_event.freq || !counter->hw_event.irq_freq)
continue; continue;
events = HZ * counter->hw.interrupts * counter->hw.irq_period; events = HZ * interrupts * counter->hw.irq_period;
period = div64_u64(events, counter->hw_event.irq_freq); period = div64_u64(events, counter->hw_event.irq_freq);
delta = (s64)(1 + period - counter->hw.irq_period); delta = (s64)(1 + period - counter->hw.irq_period);
...@@ -1097,7 +1110,6 @@ static void perf_adjust_freq(struct perf_counter_context *ctx) ...@@ -1097,7 +1110,6 @@ static void perf_adjust_freq(struct perf_counter_context *ctx)
perf_log_period(counter, irq_period); perf_log_period(counter, irq_period);
counter->hw.irq_period = irq_period; counter->hw.irq_period = irq_period;
counter->hw.interrupts = 0;
} }
spin_unlock(&ctx->lock); spin_unlock(&ctx->lock);
} }
...@@ -2543,6 +2555,35 @@ static void perf_log_period(struct perf_counter *counter, u64 period) ...@@ -2543,6 +2555,35 @@ static void perf_log_period(struct perf_counter *counter, u64 period)
perf_output_end(&handle); perf_output_end(&handle);
} }
/*
* IRQ throttle logging
*/
static void perf_log_throttle(struct perf_counter *counter, int enable)
{
struct perf_output_handle handle;
int ret;
struct {
struct perf_event_header header;
u64 time;
} throttle_event = {
.header = {
.type = PERF_EVENT_THROTTLE + 1,
.misc = 0,
.size = sizeof(throttle_event),
},
.time = sched_clock(),
};
ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 0, 0);
if (ret)
return;
perf_output_put(&handle, throttle_event);
perf_output_end(&handle);
}
/* /*
* Generic counter overflow handling. * Generic counter overflow handling.
*/ */
...@@ -2551,9 +2592,19 @@ int perf_counter_overflow(struct perf_counter *counter, ...@@ -2551,9 +2592,19 @@ int perf_counter_overflow(struct perf_counter *counter,
int nmi, struct pt_regs *regs, u64 addr) int nmi, struct pt_regs *regs, u64 addr)
{ {
int events = atomic_read(&counter->event_limit); int events = atomic_read(&counter->event_limit);
int throttle = counter->pmu->unthrottle != NULL;
int ret = 0; int ret = 0;
if (!throttle) {
counter->hw.interrupts++; counter->hw.interrupts++;
} else if (counter->hw.interrupts != MAX_INTERRUPTS) {
counter->hw.interrupts++;
if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) {
counter->hw.interrupts = MAX_INTERRUPTS;
perf_log_throttle(counter, 0);
ret = 1;
}
}
/* /*
* XXX event_limit might not quite work as expected on inherited * XXX event_limit might not quite work as expected on inherited
......
...@@ -930,6 +930,14 @@ static struct ctl_table kern_table[] = { ...@@ -930,6 +930,14 @@ static struct ctl_table kern_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dointvec, .proc_handler = &proc_dointvec,
}, },
{
.ctl_name = CTL_UNNUMBERED,
.procname = "perf_counter_int_limit",
.data = &sysctl_perf_counter_limit,
.maxlen = sizeof(sysctl_perf_counter_limit),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
#endif #endif
/* /*
* NOTE: do not add new entries to this table unless you have read * NOTE: do not add new entries to this table unless you have read
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册