提交 eb487ab4 编写于 作者: L Linus Torvalds

Merge branch 'perf-fixes-for-linus' of...

Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  perf: Fix reading in perf_event_read()
  watchdog: Don't change watchdog state on read of sysctl
  watchdog: Fix sysctl consistency
  watchdog: Fix broken nowatchdog logic
  perf: Fix Pentium4 raw event validation
  perf: Fix alloc_callchain_buffers()
......@@ -682,7 +682,7 @@ static int p4_validate_raw_event(struct perf_event *event)
* if an event is shared accross the logical threads
* the user needs special permissions to be able to use it
*/
if (p4_event_bind_map[v].shared) {
if (p4_ht_active() && p4_event_bind_map[v].shared) {
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return -EACCES;
}
......@@ -727,7 +727,8 @@ static int p4_hw_config(struct perf_event *event)
event->hw.config = p4_set_ht_bit(event->hw.config);
if (event->attr.type == PERF_TYPE_RAW) {
struct p4_event_bind *bind;
unsigned int esel;
/*
* Clear bits we reserve to be managed by kernel itself
* and never allowed from a user space
......@@ -743,6 +744,13 @@ static int p4_hw_config(struct perf_event *event)
* bits since we keep additional info here (for cache events and etc)
*/
event->hw.config |= event->attr.config;
bind = p4_config_get_bind(event->attr.config);
if (!bind) {
rc = -EINVAL;
goto out;
}
esel = P4_OPCODE_ESEL(bind->opcode);
event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
}
rc = x86_setup_perfctr(event);
......
......@@ -1901,11 +1901,12 @@ static void __perf_event_read(void *info)
return;
raw_spin_lock(&ctx->lock);
update_context_time(ctx);
if (ctx->is_active)
update_context_time(ctx);
update_event_times(event);
if (event->state == PERF_EVENT_STATE_ACTIVE)
event->pmu->read(event);
raw_spin_unlock(&ctx->lock);
event->pmu->read(event);
}
static inline u64 perf_event_count(struct perf_event *event)
......@@ -1999,8 +2000,7 @@ static int alloc_callchain_buffers(void)
* accessed from NMI. Use a temporary manual per cpu allocation
* until that gets sorted out.
*/
size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
num_possible_cpus();
size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
entries = kzalloc(size, GFP_KERNEL);
if (!entries)
......
......@@ -27,7 +27,7 @@
#include <asm/irq_regs.h>
#include <linux/perf_event.h>
int watchdog_enabled;
int watchdog_enabled = 1;
int __read_mostly softlockup_thresh = 60;
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
......@@ -43,9 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
#endif
static int no_watchdog;
/* boot commands */
/*
* Should we panic when a soft-lockup or hard-lockup occurs:
......@@ -58,7 +55,7 @@ static int __init hardlockup_panic_setup(char *str)
if (!strncmp(str, "panic", 5))
hardlockup_panic = 1;
else if (!strncmp(str, "0", 1))
no_watchdog = 1;
watchdog_enabled = 0;
return 1;
}
__setup("nmi_watchdog=", hardlockup_panic_setup);
......@@ -77,7 +74,7 @@ __setup("softlockup_panic=", softlockup_panic_setup);
static int __init nowatchdog_setup(char *str)
{
no_watchdog = 1;
watchdog_enabled = 0;
return 1;
}
__setup("nowatchdog", nowatchdog_setup);
......@@ -85,7 +82,7 @@ __setup("nowatchdog", nowatchdog_setup);
/* deprecated */
static int __init nosoftlockup_setup(char *str)
{
no_watchdog = 1;
watchdog_enabled = 0;
return 1;
}
__setup("nosoftlockup", nosoftlockup_setup);
......@@ -432,9 +429,6 @@ static int watchdog_enable(int cpu)
wake_up_process(p);
}
/* if any cpu succeeds, watchdog is considered enabled for the system */
watchdog_enabled = 1;
return 0;
}
......@@ -462,12 +456,16 @@ static void watchdog_disable(int cpu)
static void watchdog_enable_all_cpus(void)
{
int cpu;
int result = 0;
watchdog_enabled = 0;
for_each_online_cpu(cpu)
result += watchdog_enable(cpu);
if (!watchdog_enable(cpu))
/* if any cpu succeeds, watchdog is considered
enabled for the system */
watchdog_enabled = 1;
if (result)
if (!watchdog_enabled)
printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
}
......@@ -476,9 +474,6 @@ static void watchdog_disable_all_cpus(void)
{
int cpu;
if (no_watchdog)
return;
for_each_online_cpu(cpu)
watchdog_disable(cpu);
......@@ -498,10 +493,12 @@ int proc_dowatchdog_enabled(struct ctl_table *table, int write,
{
proc_dointvec(table, write, buffer, length, ppos);
if (watchdog_enabled)
watchdog_enable_all_cpus();
else
watchdog_disable_all_cpus();
if (write) {
if (watchdog_enabled)
watchdog_enable_all_cpus();
else
watchdog_disable_all_cpus();
}
return 0;
}
......@@ -530,7 +527,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
err = watchdog_enable(hotcpu);
if (watchdog_enabled)
err = watchdog_enable(hotcpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
......@@ -555,9 +553,6 @@ void __init lockup_detector_init(void)
void *cpu = (void *)(long)smp_processor_id();
int err;
if (no_watchdog)
return;
err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
WARN_ON(notifier_to_errno(err));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册