提交 15ac9a39 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf: Remove the sysfs bits

Neither the overcommit nor the reservation sysfs parameter were
actually working, remove them as they'll only get in the way.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
LKML-Reference: <new-submission>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 a4eaf7f1
...@@ -808,7 +808,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, ...@@ -808,7 +808,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
/* la_ptr is the counter that overflowed. */ /* la_ptr is the counter that overflowed. */
if (unlikely(la_ptr >= perf_max_events)) { if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
/* This should never occur! */ /* This should never occur! */
irq_err_count++; irq_err_count++;
pr_warning("PMI: silly index %ld\n", la_ptr); pr_warning("PMI: silly index %ld\n", la_ptr);
...@@ -879,7 +879,6 @@ void __init init_hw_perf_events(void) ...@@ -879,7 +879,6 @@ void __init init_hw_perf_events(void)
/* And set up PMU specification */ /* And set up PMU specification */
alpha_pmu = &ev67_pmu; alpha_pmu = &ev67_pmu;
perf_max_events = alpha_pmu->num_pmcs;
perf_pmu_register(&pmu); perf_pmu_register(&pmu);
} }
......
...@@ -534,7 +534,7 @@ static int armpmu_event_init(struct perf_event *event) ...@@ -534,7 +534,7 @@ static int armpmu_event_init(struct perf_event *event)
event->destroy = hw_perf_event_destroy; event->destroy = hw_perf_event_destroy;
if (!atomic_inc_not_zero(&active_events)) { if (!atomic_inc_not_zero(&active_events)) {
if (atomic_read(&active_events) > perf_max_events) { if (atomic_read(&active_events) > armpmu.num_events) {
atomic_dec(&active_events); atomic_dec(&active_events);
return -ENOSPC; return -ENOSPC;
} }
...@@ -2974,14 +2974,12 @@ init_hw_perf_events(void) ...@@ -2974,14 +2974,12 @@ init_hw_perf_events(void)
armpmu = &armv6pmu; armpmu = &armv6pmu;
memcpy(armpmu_perf_cache_map, armv6_perf_cache_map, memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
sizeof(armv6_perf_cache_map)); sizeof(armv6_perf_cache_map));
perf_max_events = armv6pmu.num_events;
break; break;
case 0xB020: /* ARM11mpcore */ case 0xB020: /* ARM11mpcore */
armpmu = &armv6mpcore_pmu; armpmu = &armv6mpcore_pmu;
memcpy(armpmu_perf_cache_map, memcpy(armpmu_perf_cache_map,
armv6mpcore_perf_cache_map, armv6mpcore_perf_cache_map,
sizeof(armv6mpcore_perf_cache_map)); sizeof(armv6mpcore_perf_cache_map));
perf_max_events = armv6mpcore_pmu.num_events;
break; break;
case 0xC080: /* Cortex-A8 */ case 0xC080: /* Cortex-A8 */
armv7pmu.id = ARM_PERF_PMU_ID_CA8; armv7pmu.id = ARM_PERF_PMU_ID_CA8;
...@@ -2993,7 +2991,6 @@ init_hw_perf_events(void) ...@@ -2993,7 +2991,6 @@ init_hw_perf_events(void)
/* Reset PMNC and read the nb of CNTx counters /* Reset PMNC and read the nb of CNTx counters
supported */ supported */
armv7pmu.num_events = armv7_reset_read_pmnc(); armv7pmu.num_events = armv7_reset_read_pmnc();
perf_max_events = armv7pmu.num_events;
break; break;
case 0xC090: /* Cortex-A9 */ case 0xC090: /* Cortex-A9 */
armv7pmu.id = ARM_PERF_PMU_ID_CA9; armv7pmu.id = ARM_PERF_PMU_ID_CA9;
...@@ -3005,7 +3002,6 @@ init_hw_perf_events(void) ...@@ -3005,7 +3002,6 @@ init_hw_perf_events(void)
/* Reset PMNC and read the nb of CNTx counters /* Reset PMNC and read the nb of CNTx counters
supported */ supported */
armv7pmu.num_events = armv7_reset_read_pmnc(); armv7pmu.num_events = armv7_reset_read_pmnc();
perf_max_events = armv7pmu.num_events;
break; break;
} }
/* Intel CPUs [xscale]. */ /* Intel CPUs [xscale]. */
...@@ -3016,13 +3012,11 @@ init_hw_perf_events(void) ...@@ -3016,13 +3012,11 @@ init_hw_perf_events(void)
armpmu = &xscale1pmu; armpmu = &xscale1pmu;
memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
sizeof(xscale_perf_cache_map)); sizeof(xscale_perf_cache_map));
perf_max_events = xscale1pmu.num_events;
break; break;
case 2: case 2:
armpmu = &xscale2pmu; armpmu = &xscale2pmu;
memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
sizeof(xscale_perf_cache_map)); sizeof(xscale_perf_cache_map));
perf_max_events = xscale2pmu.num_events;
break; break;
} }
} }
...@@ -3032,7 +3026,6 @@ init_hw_perf_events(void) ...@@ -3032,7 +3026,6 @@ init_hw_perf_events(void)
arm_pmu_names[armpmu->id], armpmu->num_events); arm_pmu_names[armpmu->id], armpmu->num_events);
} else { } else {
pr_info("no hardware support available\n"); pr_info("no hardware support available\n");
perf_max_events = -1;
} }
perf_pmu_register(&pmu); perf_pmu_register(&pmu);
......
...@@ -897,7 +897,7 @@ static int sparc_check_constraints(struct perf_event **evts, ...@@ -897,7 +897,7 @@ static int sparc_check_constraints(struct perf_event **evts,
if (!n_ev) if (!n_ev)
return 0; return 0;
if (n_ev > perf_max_events) if (n_ev > MAX_HWEVENTS)
return -1; return -1;
msk0 = perf_event_get_msk(events[0]); msk0 = perf_event_get_msk(events[0]);
...@@ -1014,7 +1014,7 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags) ...@@ -1014,7 +1014,7 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
perf_pmu_disable(event->pmu); perf_pmu_disable(event->pmu);
n0 = cpuc->n_events; n0 = cpuc->n_events;
if (n0 >= perf_max_events) if (n0 >= MAX_HWEVENTS)
goto out; goto out;
cpuc->event[n0] = event; cpuc->event[n0] = event;
...@@ -1097,7 +1097,7 @@ static int sparc_pmu_event_init(struct perf_event *event) ...@@ -1097,7 +1097,7 @@ static int sparc_pmu_event_init(struct perf_event *event)
n = 0; n = 0;
if (event->group_leader != event) { if (event->group_leader != event) {
n = collect_events(event->group_leader, n = collect_events(event->group_leader,
perf_max_events - 1, MAX_HWEVENTS - 1,
evts, events, current_idx_dmy); evts, events, current_idx_dmy);
if (n < 0) if (n < 0)
return -EINVAL; return -EINVAL;
...@@ -1309,9 +1309,6 @@ void __init init_hw_perf_events(void) ...@@ -1309,9 +1309,6 @@ void __init init_hw_perf_events(void)
pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
/* All sparc64 PMUs currently have 2 events. */
perf_max_events = 2;
perf_pmu_register(&pmu); perf_pmu_register(&pmu);
register_die_notifier(&perf_event_nmi_notifier); register_die_notifier(&perf_event_nmi_notifier);
} }
......
...@@ -1396,7 +1396,6 @@ void __init init_hw_perf_events(void) ...@@ -1396,7 +1396,6 @@ void __init init_hw_perf_events(void)
x86_pmu.num_counters = X86_PMC_MAX_GENERIC; x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
} }
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
perf_max_events = x86_pmu.num_counters;
if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
......
...@@ -860,7 +860,6 @@ struct perf_cpu_context { ...@@ -860,7 +860,6 @@ struct perf_cpu_context {
struct perf_event_context ctx; struct perf_event_context ctx;
struct perf_event_context *task_ctx; struct perf_event_context *task_ctx;
int active_oncpu; int active_oncpu;
int max_pertask;
int exclusive; int exclusive;
struct swevent_hlist *swevent_hlist; struct swevent_hlist *swevent_hlist;
struct mutex hlist_mutex; struct mutex hlist_mutex;
...@@ -883,11 +882,6 @@ struct perf_output_handle { ...@@ -883,11 +882,6 @@ struct perf_output_handle {
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
/*
* Set by architecture code:
*/
extern int perf_max_events;
extern int perf_pmu_register(struct pmu *pmu); extern int perf_pmu_register(struct pmu *pmu);
extern void perf_pmu_unregister(struct pmu *pmu); extern void perf_pmu_unregister(struct pmu *pmu);
......
...@@ -39,10 +39,6 @@ ...@@ -39,10 +39,6 @@
*/ */
static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
int perf_max_events __read_mostly = 1;
static int perf_reserved_percpu __read_mostly;
static int perf_overcommit __read_mostly = 1;
static atomic_t nr_events __read_mostly; static atomic_t nr_events __read_mostly;
static atomic_t nr_mmap_events __read_mostly; static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly; static atomic_t nr_comm_events __read_mostly;
...@@ -66,11 +62,6 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000; ...@@ -66,11 +62,6 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000;
static atomic64_t perf_event_id; static atomic64_t perf_event_id;
/*
* Lock for (sysadmin-configurable) event reservations:
*/
static DEFINE_SPINLOCK(perf_resource_lock);
void __weak perf_event_print_debug(void) { } void __weak perf_event_print_debug(void) { }
void perf_pmu_disable(struct pmu *pmu) void perf_pmu_disable(struct pmu *pmu)
...@@ -480,16 +471,6 @@ static void __perf_event_remove_from_context(void *info) ...@@ -480,16 +471,6 @@ static void __perf_event_remove_from_context(void *info)
list_del_event(event, ctx); list_del_event(event, ctx);
if (!ctx->task) {
/*
* Allow more per task events with respect to the
* reservation:
*/
cpuctx->max_pertask =
min(perf_max_events - ctx->nr_events,
perf_max_events - perf_reserved_percpu);
}
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
} }
...@@ -823,9 +804,6 @@ static void __perf_install_in_context(void *info) ...@@ -823,9 +804,6 @@ static void __perf_install_in_context(void *info)
} }
} }
if (!err && !ctx->task && cpuctx->max_pertask)
cpuctx->max_pertask--;
unlock: unlock:
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
} }
...@@ -5930,10 +5908,6 @@ static void __cpuinit perf_event_init_cpu(int cpu) ...@@ -5930,10 +5908,6 @@ static void __cpuinit perf_event_init_cpu(int cpu)
cpuctx = &per_cpu(perf_cpu_context, cpu); cpuctx = &per_cpu(perf_cpu_context, cpu);
spin_lock(&perf_resource_lock);
cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
spin_unlock(&perf_resource_lock);
mutex_lock(&cpuctx->hlist_mutex); mutex_lock(&cpuctx->hlist_mutex);
if (cpuctx->hlist_refcount > 0) { if (cpuctx->hlist_refcount > 0) {
struct swevent_hlist *hlist; struct swevent_hlist *hlist;
...@@ -6008,101 +5982,3 @@ void __init perf_event_init(void) ...@@ -6008,101 +5982,3 @@ void __init perf_event_init(void)
perf_tp_register(); perf_tp_register();
perf_cpu_notifier(perf_cpu_notify); perf_cpu_notifier(perf_cpu_notify);
} }
static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
struct sysdev_class_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", perf_reserved_percpu);
}
static ssize_t
perf_set_reserve_percpu(struct sysdev_class *class,
struct sysdev_class_attribute *attr,
const char *buf,
size_t count)
{
struct perf_cpu_context *cpuctx;
unsigned long val;
int err, cpu, mpt;
err = strict_strtoul(buf, 10, &val);
if (err)
return err;
if (val > perf_max_events)
return -EINVAL;
spin_lock(&perf_resource_lock);
perf_reserved_percpu = val;
for_each_online_cpu(cpu) {
cpuctx = &per_cpu(perf_cpu_context, cpu);
raw_spin_lock_irq(&cpuctx->ctx.lock);
mpt = min(perf_max_events - cpuctx->ctx.nr_events,
perf_max_events - perf_reserved_percpu);
cpuctx->max_pertask = mpt;
raw_spin_unlock_irq(&cpuctx->ctx.lock);
}
spin_unlock(&perf_resource_lock);
return count;
}
static ssize_t perf_show_overcommit(struct sysdev_class *class,
struct sysdev_class_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", perf_overcommit);
}
static ssize_t
perf_set_overcommit(struct sysdev_class *class,
struct sysdev_class_attribute *attr,
const char *buf, size_t count)
{
unsigned long val;
int err;
err = strict_strtoul(buf, 10, &val);
if (err)
return err;
if (val > 1)
return -EINVAL;
spin_lock(&perf_resource_lock);
perf_overcommit = val;
spin_unlock(&perf_resource_lock);
return count;
}
static SYSDEV_CLASS_ATTR(
reserve_percpu,
0644,
perf_show_reserve_percpu,
perf_set_reserve_percpu
);
static SYSDEV_CLASS_ATTR(
overcommit,
0644,
perf_show_overcommit,
perf_set_overcommit
);
static struct attribute *perfclass_attrs[] = {
&attr_reserve_percpu.attr,
&attr_overcommit.attr,
NULL
};
static struct attribute_group perfclass_attr_group = {
.attrs = perfclass_attrs,
.name = "perf_events",
};
static int __init perf_event_sysfs_init(void)
{
return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
&perfclass_attr_group);
}
device_initcall(perf_event_sysfs_init);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册