nmi.c 20.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 *  linux/arch/x86_64/nmi.c
 *
 *  NMI watchdog support on APIC systems
 *
 *  Started by Ingo Molnar <mingo@redhat.com>
 *
 *  Fixes:
 *  Mikael Pettersson	: AMD K7 support for local APIC NMI watchdog.
 *  Mikael Pettersson	: Power Management for local APIC NMI watchdog.
 *  Pavel Machek and
 *  Mikael Pettersson	: PM converted to driver model. Disable/enable API.
 */

#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/sysdev.h>
#include <linux/nmi.h>
#include <linux/sysctl.h>
22
#include <linux/kprobes.h>
L
Linus Torvalds 已提交
23 24 25 26 27

#include <asm/smp.h>
#include <asm/nmi.h>
#include <asm/proto.h>
#include <asm/kdebug.h>
28
#include <asm/mce.h>
L
Linus Torvalds 已提交
29

30 31 32 33 34 35 36 37 38 39 40 41 42 43
/* perfctr_nmi_owner tracks the ownership of the perfctr registers:
 * evtsel_nmi_owner tracks the ownership of the event selection
 * - different performance counters/ event selection may be reserved for
 *   different subsystems this reservation system just tries to coordinate
 *   things a little
 */
static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);

/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
 * offset from MSR_P4_BSU_ESCR0.  It will be the max for all platforms (for now)
 */
#define NMI_MAX_COUNTER_BITS 66

L
Linus Torvalds 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
/*
 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
 * - it may be reserved by some other driver, or not
 * - when not reserved by some other driver, it may be used for
 *   the NMI watchdog, or not
 *
 * This is maintained separately from nmi_active because the NMI
 * watchdog may also be driven from the I/O APIC timer.
 */
static DEFINE_SPINLOCK(lapic_nmi_owner_lock);
static unsigned int lapic_nmi_owner;
#define LAPIC_NMI_WATCHDOG	(1<<0)
#define LAPIC_NMI_RESERVED	(1<<1)

/* nmi_active:
59 60
 * >0: the lapic NMI watchdog is active, but can be disabled
 * <0: the lapic NMI watchdog has not been set up, and cannot
L
Linus Torvalds 已提交
61
 *     be enabled
62
 *  0: the lapic NMI watchdog is disabled, but can be enabled
L
Linus Torvalds 已提交
63
 */
64
atomic_t nmi_active = ATOMIC_INIT(0);		/* oprofile uses this */
L
Linus Torvalds 已提交
65 66 67 68 69
int panic_on_timeout;

unsigned int nmi_watchdog = NMI_DEFAULT;
static unsigned int nmi_hz = HZ;

70 71 72 73 74 75 76 77
struct nmi_watchdog_ctlblk {
	int enabled;
	u64 check_bit;
	unsigned int cccr_msr;
	unsigned int perfctr_msr;  /* the MSR to reset in NMI handler */
	unsigned int evntsel_msr;  /* the MSR to select the events to handle */
};
static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
L
Linus Torvalds 已提交
78

79 80 81
/* local prototypes */
static void stop_apic_nmi_watchdog(void *unused);
static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
82

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
/* converts an msr to an appropriate reservation bit */
static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
{
	/* returns the bit offset of the performance counter register */
	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_AMD:
		return (msr - MSR_K7_PERFCTR0);
	case X86_VENDOR_INTEL:
		return (msr - MSR_P4_BPU_PERFCTR0);
	}
	return 0;
}

/* converts an msr to an appropriate reservation bit */
static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
{
	/* returns the bit offset of the event selection register */
	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_AMD:
		return (msr - MSR_K7_EVNTSEL0);
	case X86_VENDOR_INTEL:
		return (msr - MSR_P4_BSU_ESCR0);
	}
	return 0;
}

/* checks for a bit availability (hack for oprofile) */
int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
{
	BUG_ON(counter > NMI_MAX_COUNTER_BITS);

	return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
}

/* checks the an msr for availability */
int avail_to_resrv_perfctr_nmi(unsigned int msr)
{
	unsigned int counter;

	counter = nmi_perfctr_msr_to_bit(msr);
	BUG_ON(counter > NMI_MAX_COUNTER_BITS);

	return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
}

int reserve_perfctr_nmi(unsigned int msr)
{
	unsigned int counter;

	counter = nmi_perfctr_msr_to_bit(msr);
	BUG_ON(counter > NMI_MAX_COUNTER_BITS);

	if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
		return 1;
	return 0;
}

void release_perfctr_nmi(unsigned int msr)
{
	unsigned int counter;

	counter = nmi_perfctr_msr_to_bit(msr);
	BUG_ON(counter > NMI_MAX_COUNTER_BITS);

	clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
}

int reserve_evntsel_nmi(unsigned int msr)
{
	unsigned int counter;

	counter = nmi_evntsel_msr_to_bit(msr);
	BUG_ON(counter > NMI_MAX_COUNTER_BITS);

	if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)))
		return 1;
	return 0;
}

void release_evntsel_nmi(unsigned int msr)
{
	unsigned int counter;

	counter = nmi_evntsel_msr_to_bit(msr);
	BUG_ON(counter > NMI_MAX_COUNTER_BITS);

	clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner));
}

172
static __cpuinit inline int nmi_known_cpu(void)
173 174 175 176 177
{
	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_AMD:
		return boot_cpu_data.x86 == 15;
	case X86_VENDOR_INTEL:
178
		return boot_cpu_data.x86 == 15;
179 180 181
	}
	return 0;
}
L
Linus Torvalds 已提交
182 183

/* Run after command line and cpu_init init, but before all other checks */
184
void __cpuinit nmi_watchdog_default(void)
L
Linus Torvalds 已提交
185 186 187
{
	if (nmi_watchdog != NMI_DEFAULT)
		return;
188 189 190
	if (nmi_known_cpu())
		nmi_watchdog = NMI_LOCAL_APIC;
	else
L
Linus Torvalds 已提交
191 192 193
		nmi_watchdog = NMI_IO_APIC;
}

194 195 196 197 198 199
#ifdef CONFIG_SMP
/* The performance counters used by NMI_LOCAL_APIC don't trigger when
 * the CPU is idle. To make sure the NMI watchdog really ticks on all
 * CPUs during the test make them busy.
 */
static __init void nmi_cpu_busy(void *data)
L
Linus Torvalds 已提交
200
{
201
	volatile int *endflag = data;
202
	local_irq_enable_in_hardirq();
203 204 205 206 207 208 209 210
	/* Intentionally don't use cpu_relax here. This is
	   to make sure that the performance counter really ticks,
	   even if there is a simulator or similar that catches the
	   pause instruction. On a real HT machine this is fine because
	   all other CPUs are busy with "useless" delay loops and don't
	   care if they get somewhat less cycles. */
	while (*endflag == 0)
		barrier();
L
Linus Torvalds 已提交
211
}
212
#endif
L
Linus Torvalds 已提交
213

214
int __init check_nmi_watchdog (void)
L
Linus Torvalds 已提交
215
{
216
	volatile int endflag = 0;
217
	int *counts;
L
Linus Torvalds 已提交
218 219
	int cpu;

220 221 222 223 224 225
	if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT))
		return 0;

	if (!atomic_read(&nmi_active))
		return 0;

226 227 228
	counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
	if (!counts)
		return -1;
L
Linus Torvalds 已提交
229

230
	printk(KERN_INFO "testing NMI watchdog ... ");
231

232
#ifdef CONFIG_SMP
233 234
	if (nmi_watchdog == NMI_LOCAL_APIC)
		smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
235
#endif
L
Linus Torvalds 已提交
236 237

	for (cpu = 0; cpu < NR_CPUS; cpu++)
238
		counts[cpu] = cpu_pda(cpu)->__nmi_count;
L
Linus Torvalds 已提交
239 240 241
	local_irq_enable();
	mdelay((10*1000)/nmi_hz); // wait 10 ticks

242
	for_each_online_cpu(cpu) {
243 244
		if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled)
			continue;
245
		if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
246
			printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
L
Linus Torvalds 已提交
247
			       cpu,
248
			       counts[cpu],
249
			       cpu_pda(cpu)->__nmi_count);
250 251
			per_cpu(nmi_watchdog_ctlblk, cpu).enabled = 0;
			atomic_dec(&nmi_active);
L
Linus Torvalds 已提交
252 253
		}
	}
254 255 256 257 258
	if (!atomic_read(&nmi_active)) {
		kfree(counts);
		atomic_set(&nmi_active, -1);
		return -1;
	}
259
	endflag = 1;
L
Linus Torvalds 已提交
260 261 262 263 264 265 266
	printk("OK.\n");

	/* now that we know it works we can reduce NMI frequency to
	   something more reasonable; makes a difference in some configs */
	if (nmi_watchdog == NMI_LOCAL_APIC)
		nmi_hz = 1;

267
	kfree(counts);
L
Linus Torvalds 已提交
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
	return 0;
}

int __init setup_nmi_watchdog(char *str)
{
	int nmi;

	if (!strncmp(str,"panic",5)) {
		panic_on_timeout = 1;
		str = strchr(str, ',');
		if (!str)
			return 1;
		++str;
	}

	get_option(&str, &nmi);

285
	if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
L
Linus Torvalds 已提交
286
		return 0;
287 288 289

	if ((nmi == NMI_LOCAL_APIC) && (nmi_known_cpu() == 0))
		return 0;  /* no lapic support */
290
	nmi_watchdog = nmi;
L
Linus Torvalds 已提交
291 292 293 294 295 296 297
	return 1;
}

__setup("nmi_watchdog=", setup_nmi_watchdog);

static void disable_lapic_nmi_watchdog(void)
{
298 299 300
	BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);

	if (atomic_read(&nmi_active) <= 0)
L
Linus Torvalds 已提交
301
		return;
302 303 304 305

	on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);

	BUG_ON(atomic_read(&nmi_active) != 0);
L
Linus Torvalds 已提交
306 307 308 309
}

static void enable_lapic_nmi_watchdog(void)
{
310 311 312 313 314 315 316 317 318 319 320 321
	BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);

	/* are we already enabled */
	if (atomic_read(&nmi_active) != 0)
		return;

	/* are we lapic aware */
	if (nmi_known_cpu() <= 0)
		return;

	on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
	touch_nmi_watchdog();
L
Linus Torvalds 已提交
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
}

int reserve_lapic_nmi(void)
{
	unsigned int old_owner;

	spin_lock(&lapic_nmi_owner_lock);
	old_owner = lapic_nmi_owner;
	lapic_nmi_owner |= LAPIC_NMI_RESERVED;
	spin_unlock(&lapic_nmi_owner_lock);
	if (old_owner & LAPIC_NMI_RESERVED)
		return -EBUSY;
	if (old_owner & LAPIC_NMI_WATCHDOG)
		disable_lapic_nmi_watchdog();
	return 0;
}

void release_lapic_nmi(void)
{
	unsigned int new_owner;

	spin_lock(&lapic_nmi_owner_lock);
	new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED;
	lapic_nmi_owner = new_owner;
	spin_unlock(&lapic_nmi_owner_lock);
	if (new_owner & LAPIC_NMI_WATCHDOG)
		enable_lapic_nmi_watchdog();
}

void disable_timer_nmi_watchdog(void)
{
353 354 355
	BUG_ON(nmi_watchdog != NMI_IO_APIC);

	if (atomic_read(&nmi_active) <= 0)
L
Linus Torvalds 已提交
356 357 358
		return;

	disable_irq(0);
359 360 361
	on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);

	BUG_ON(atomic_read(&nmi_active) != 0);
L
Linus Torvalds 已提交
362 363 364 365
}

void enable_timer_nmi_watchdog(void)
{
366 367 368
	BUG_ON(nmi_watchdog != NMI_IO_APIC);

	if (atomic_read(&nmi_active) == 0) {
L
Linus Torvalds 已提交
369
		touch_nmi_watchdog();
370
		on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
L
Linus Torvalds 已提交
371 372 373 374 375 376 377 378
		enable_irq(0);
	}
}

#ifdef CONFIG_PM

static int nmi_pm_active; /* nmi_active before suspend */

379
static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
L
Linus Torvalds 已提交
380
{
381
	nmi_pm_active = atomic_read(&nmi_active);
L
Linus Torvalds 已提交
382 383 384 385 386 387 388
	disable_lapic_nmi_watchdog();
	return 0;
}

static int lapic_nmi_resume(struct sys_device *dev)
{
	if (nmi_pm_active > 0)
389
		enable_lapic_nmi_watchdog();
L
Linus Torvalds 已提交
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
	return 0;
}

static struct sysdev_class nmi_sysclass = {
	set_kset_name("lapic_nmi"),
	.resume		= lapic_nmi_resume,
	.suspend	= lapic_nmi_suspend,
};

static struct sys_device device_lapic_nmi = {
	.id		= 0,
	.cls	= &nmi_sysclass,
};

static int __init init_lapic_nmi_sysfs(void)
{
	int error;

408 409 410 411 412 413 414
	/* should really be a BUG_ON but b/c this is an
	 * init call, it just doesn't work.  -dcz
	 */
	if (nmi_watchdog != NMI_LOCAL_APIC)
		return 0;

	if ( atomic_read(&nmi_active) < 0 )
L
Linus Torvalds 已提交
415 416 417 418 419 420 421 422 423 424 425 426
		return 0;

	error = sysdev_class_register(&nmi_sysclass);
	if (!error)
		error = sysdev_register(&device_lapic_nmi);
	return error;
}
/* must come after the local APIC's device_initcall() */
late_initcall(init_lapic_nmi_sysfs);

#endif	/* CONFIG_PM */

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
/*
 * Activate the NMI watchdog via the local APIC.
 * Original code written by Keith Owens.
 */

/* Note that these events don't tick when the CPU idles. This means
   the frequency varies with CPU load. */

#define K7_EVNTSEL_ENABLE	(1 << 22)
#define K7_EVNTSEL_INT		(1 << 20)
#define K7_EVNTSEL_OS		(1 << 17)
#define K7_EVNTSEL_USR		(1 << 16)
#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING	0x76
#define K7_NMI_EVENT		K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING

442
static int setup_k7_watchdog(void)
443
{
444
	unsigned int perfctr_msr, evntsel_msr;
L
Linus Torvalds 已提交
445
	unsigned int evntsel;
446
	struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
L
Linus Torvalds 已提交
447

448 449 450
	perfctr_msr = MSR_K7_PERFCTR0;
	evntsel_msr = MSR_K7_EVNTSEL0;
	if (!reserve_perfctr_nmi(perfctr_msr))
451 452
		goto fail;

453
	if (!reserve_evntsel_nmi(evntsel_msr))
454 455 456
		goto fail1;

	/* Simulator may not support it */
457
	if (checking_wrmsrl(evntsel_msr, 0UL))
458
		goto fail2;
459
	wrmsrl(perfctr_msr, 0UL);
L
Linus Torvalds 已提交
460 461 462 463 464 465

	evntsel = K7_EVNTSEL_INT
		| K7_EVNTSEL_OS
		| K7_EVNTSEL_USR
		| K7_NMI_EVENT;

466 467 468
	/* setup the timer */
	wrmsr(evntsel_msr, evntsel, 0);
	wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
L
Linus Torvalds 已提交
469 470
	apic_write(APIC_LVTPC, APIC_DM_NMI);
	evntsel |= K7_EVNTSEL_ENABLE;
471 472 473 474 475 476
	wrmsr(evntsel_msr, evntsel, 0);

	wd->perfctr_msr = perfctr_msr;
	wd->evntsel_msr = evntsel_msr;
	wd->cccr_msr = 0;  //unused
	wd->check_bit = 1ULL<<63;
477 478
	return 1;
fail2:
479
	release_evntsel_nmi(evntsel_msr);
480
fail1:
481
	release_perfctr_nmi(perfctr_msr);
482 483
fail:
	return 0;
L
Linus Torvalds 已提交
484 485
}

486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
static void stop_k7_watchdog(void)
{
	struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);

	wrmsr(wd->evntsel_msr, 0, 0);

	release_evntsel_nmi(wd->evntsel_msr);
	release_perfctr_nmi(wd->perfctr_msr);
}

/* Note that these events don't tick when the CPU idles. This means
   the frequency varies with CPU load. */

#define MSR_P4_MISC_ENABLE_PERF_AVAIL	(1<<7)
#define P4_ESCR_EVENT_SELECT(N)	((N)<<25)
#define P4_ESCR_OS		(1<<3)
#define P4_ESCR_USR		(1<<2)
#define P4_CCCR_OVF_PMI0	(1<<26)
#define P4_CCCR_OVF_PMI1	(1<<27)
#define P4_CCCR_THRESHOLD(N)	((N)<<20)
#define P4_CCCR_COMPLEMENT	(1<<19)
#define P4_CCCR_COMPARE		(1<<18)
#define P4_CCCR_REQUIRED	(3<<16)
#define P4_CCCR_ESCR_SELECT(N)	((N)<<13)
#define P4_CCCR_ENABLE		(1<<12)
#define P4_CCCR_OVF 		(1<<31)
/* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
   CRU_ESCR0 (with any non-null event selector) through a complemented
   max threshold. [IA32-Vol3, Section 14.9.9] */
515 516 517

static int setup_p4_watchdog(void)
{
518 519
	unsigned int perfctr_msr, evntsel_msr, cccr_msr;
	unsigned int evntsel, cccr_val;
520
	unsigned int misc_enable, dummy;
521 522
	unsigned int ht_num;
	struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
523

524
	rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
525 526 527 528
	if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
		return 0;

#ifdef CONFIG_SMP
529 530 531 532 533 534 535 536
	/* detect which hyperthread we are on */
	if (smp_num_siblings == 2) {
		unsigned int ebx, apicid;

        	ebx = cpuid_ebx(1);
	        apicid = (ebx >> 24) & 0xff;
        	ht_num = apicid & 1;
	} else
537
#endif
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
		ht_num = 0;

	/* performance counters are shared resources
	 * assign each hyperthread its own set
	 * (re-use the ESCR0 register, seems safe
	 * and keeps the cccr_val the same)
	 */
	if (!ht_num) {
		/* logical cpu 0 */
		perfctr_msr = MSR_P4_IQ_PERFCTR0;
		evntsel_msr = MSR_P4_CRU_ESCR0;
		cccr_msr = MSR_P4_IQ_CCCR0;
		cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
	} else {
		/* logical cpu 1 */
		perfctr_msr = MSR_P4_IQ_PERFCTR1;
		evntsel_msr = MSR_P4_CRU_ESCR0;
		cccr_msr = MSR_P4_IQ_CCCR1;
		cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
	}
558

559
	if (!reserve_perfctr_nmi(perfctr_msr))
560 561
		goto fail;

562
	if (!reserve_evntsel_nmi(evntsel_msr))
563
		goto fail1;
564

565 566 567 568 569 570 571 572 573 574 575 576
	evntsel = P4_ESCR_EVENT_SELECT(0x3F)
	 	| P4_ESCR_OS
		| P4_ESCR_USR;

	cccr_val |= P4_CCCR_THRESHOLD(15)
		 | P4_CCCR_COMPLEMENT
		 | P4_CCCR_COMPARE
		 | P4_CCCR_REQUIRED;

	wrmsr(evntsel_msr, evntsel, 0);
	wrmsr(cccr_msr, cccr_val, 0);
	wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
577
	apic_write(APIC_LVTPC, APIC_DM_NMI);
578 579 580 581 582 583 584
	cccr_val |= P4_CCCR_ENABLE;
	wrmsr(cccr_msr, cccr_val, 0);

	wd->perfctr_msr = perfctr_msr;
	wd->evntsel_msr = evntsel_msr;
	wd->cccr_msr = cccr_msr;
	wd->check_bit = 1ULL<<39;
585
	return 1;
586
fail1:
587
	release_perfctr_nmi(perfctr_msr);
588 589
fail:
	return 0;
590 591
}

592
static void stop_p4_watchdog(void)
L
Linus Torvalds 已提交
593
{
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
	struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);

	wrmsr(wd->cccr_msr, 0, 0);
	wrmsr(wd->evntsel_msr, 0, 0);

	release_evntsel_nmi(wd->evntsel_msr);
	release_perfctr_nmi(wd->perfctr_msr);
}

void setup_apic_nmi_watchdog(void *unused)
{
	/* only support LOCAL and IO APICs for now */
	if ((nmi_watchdog != NMI_LOCAL_APIC) &&
	    (nmi_watchdog != NMI_IO_APIC))
	    	return;

	if (nmi_watchdog == NMI_LOCAL_APIC) {
		switch (boot_cpu_data.x86_vendor) {
		case X86_VENDOR_AMD:
			if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
				return;
			if (!setup_k7_watchdog())
				return;
			break;
		case X86_VENDOR_INTEL:
			if (!setup_p4_watchdog())
				return;
			break;
		default:
623
			return;
624 625 626 627 628
		}
	}
	__get_cpu_var(nmi_watchdog_ctlblk.enabled) = 1;
	atomic_inc(&nmi_active);
}
629

630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
static void stop_apic_nmi_watchdog(void *unused)
{
	/* only support LOCAL and IO APICs for now */
	if ((nmi_watchdog != NMI_LOCAL_APIC) &&
	    (nmi_watchdog != NMI_IO_APIC))
	    	return;

	if (nmi_watchdog == NMI_LOCAL_APIC) {
		switch (boot_cpu_data.x86_vendor) {
		case X86_VENDOR_AMD:
			if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
				return;
			stop_k7_watchdog();
			break;
		case X86_VENDOR_INTEL:
			stop_p4_watchdog();
			break;
		default:
			return;
		}
L
Linus Torvalds 已提交
650
	}
651 652
	__get_cpu_var(nmi_watchdog_ctlblk.enabled) = 0;
	atomic_dec(&nmi_active);
L
Linus Torvalds 已提交
653 654 655 656 657 658 659 660 661 662 663
}

/*
 * the best way to detect whether a CPU has a 'hard lockup' problem
 * is to check it's local APIC timer IRQ counts. If they are not
 * changing then that CPU has some problem.
 *
 * as these watchdog NMI IRQs are generated on every CPU, we only
 * have to check the current processor.
 */

664 665 666
static DEFINE_PER_CPU(unsigned, last_irq_sum);
static DEFINE_PER_CPU(local_t, alert_counter);
static DEFINE_PER_CPU(int, nmi_touch);
L
Linus Torvalds 已提交
667 668 669

void touch_nmi_watchdog (void)
{
670 671
	if (nmi_watchdog > 0) {
		unsigned cpu;
L
Linus Torvalds 已提交
672

673 674 675 676 677 678 679 680
		/*
 		 * Tell other CPUs to reset their alert counters. We cannot
		 * do it ourselves because the alert count increase is not
		 * atomic.
		 */
		for_each_present_cpu (cpu)
			per_cpu(nmi_touch, cpu) = 1;
	}
I
Ingo Molnar 已提交
681 682

 	touch_softlockup_watchdog();
L
Linus Torvalds 已提交
683 684
}

685
int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
L
Linus Torvalds 已提交
686
{
687 688
	int sum;
	int touched = 0;
689 690
	struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
	u64 dummy;
691
	int rc=0;
692 693 694 695

	/* check for other users first */
	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
			== NOTIFY_STOP) {
696
		rc = 1;
697 698
		touched = 1;
	}
L
Linus Torvalds 已提交
699 700

	sum = read_pda(apic_timer_irqs);
701 702 703 704
	if (__get_cpu_var(nmi_touch)) {
		__get_cpu_var(nmi_touch) = 0;
		touched = 1;
	}
705

706 707 708 709 710 711
#ifdef CONFIG_X86_MCE
	/* Could check oops_in_progress here too, but it's safer
	   not too */
	if (atomic_read(&mce_entry) > 0)
		touched = 1;
#endif
712
	/* if the apic timer isn't firing, this cpu isn't doing much */
713
	if (!touched && __get_cpu_var(last_irq_sum) == sum) {
L
Linus Torvalds 已提交
714 715 716 717
		/*
		 * Ayiee, looks like this CPU is stuck ...
		 * wait a few IRQs (5 seconds) before doing the oops ...
		 */
718
		local_inc(&__get_cpu_var(alert_counter));
719
		if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz)
720
			die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs);
L
Linus Torvalds 已提交
721
	} else {
722 723
		__get_cpu_var(last_irq_sum) = sum;
		local_set(&__get_cpu_var(alert_counter), 0);
L
Linus Torvalds 已提交
724
	}
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750

	/* see if the nmi watchdog went off */
	if (wd->enabled) {
		if (nmi_watchdog == NMI_LOCAL_APIC) {
			rdmsrl(wd->perfctr_msr, dummy);
			if (dummy & wd->check_bit){
				/* this wasn't a watchdog timer interrupt */
				goto done;
			}

			/* only Intel uses the cccr msr */
	 		if (wd->cccr_msr != 0) {
	 			/*
	 			 * P4 quirks:
	 			 * - An overflown perfctr will assert its interrupt
	 			 *   until the OVF flag in its CCCR is cleared.
	 			 * - LVTPC is masked on interrupt and must be
	 			 *   unmasked by the LVTPC handler.
	 			 */
				rdmsrl(wd->cccr_msr, dummy);
				dummy &= ~P4_CCCR_OVF;
	 			wrmsrl(wd->cccr_msr, dummy);
	 			apic_write(APIC_LVTPC, APIC_DM_NMI);
	 		}
			/* start the cycle over again */
			wrmsrl(wd->perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
751 752 753 754 755 756 757 758 759
			rc = 1;
		} else 	if (nmi_watchdog == NMI_IO_APIC) {
			/* don't know how to accurately check for this.
			 * just assume it was a watchdog timer interrupt
			 * This matches the old behaviour.
			 */
			rc = 1;
		} else
			printk(KERN_WARNING "Unknown enabled NMI hardware?!\n");
760
	}
761
done:
762
	return rc;
L
Linus Torvalds 已提交
763 764
}

765
static __kprobes int dummy_nmi_callback(struct pt_regs * regs, int cpu)
L
Linus Torvalds 已提交
766 767 768 769 770 771
{
	return 0;
}
 
static nmi_callback_t nmi_callback = dummy_nmi_callback;
 
772
asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
L
Linus Torvalds 已提交
773 774 775
{
	nmi_enter();
	add_pda(__nmi_count,1);
776
	default_do_nmi(regs);
L
Linus Torvalds 已提交
777 778 779
	nmi_exit();
}

780 781 782 783 784
int do_nmi_callback(struct pt_regs * regs, int cpu)
{
	return rcu_dereference(nmi_callback)(regs, cpu);
}

L
Linus Torvalds 已提交
785 786
void set_nmi_callback(nmi_callback_t callback)
{
787
	vmalloc_sync_all();
788
	rcu_assign_pointer(nmi_callback, callback);
L
Linus Torvalds 已提交
789
}
790
EXPORT_SYMBOL_GPL(set_nmi_callback);
L
Linus Torvalds 已提交
791 792 793 794 795

void unset_nmi_callback(void)
{
	nmi_callback = dummy_nmi_callback;
}
796
EXPORT_SYMBOL_GPL(unset_nmi_callback);
L
Linus Torvalds 已提交
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842

#ifdef CONFIG_SYSCTL

static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
{
	unsigned char reason = get_nmi_reason();
	char buf[64];

	if (!(reason & 0xc0)) {
		sprintf(buf, "NMI received for unknown reason %02x\n", reason);
		die_nmi(buf,regs);
	}
	return 0;
}

/*
 * proc handler for /proc/sys/kernel/unknown_nmi_panic
 */
int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file,
			void __user *buffer, size_t *length, loff_t *ppos)
{
	int old_state;

	old_state = unknown_nmi_panic;
	proc_dointvec(table, write, file, buffer, length, ppos);
	if (!!old_state == !!unknown_nmi_panic)
		return 0;

	if (unknown_nmi_panic) {
		if (reserve_lapic_nmi() < 0) {
			unknown_nmi_panic = 0;
			return -EBUSY;
		} else {
			set_nmi_callback(unknown_nmi_panic_callback);
		}
	} else {
		release_lapic_nmi();
		unset_nmi_callback();
	}
	return 0;
}

#endif

EXPORT_SYMBOL(nmi_active);
EXPORT_SYMBOL(nmi_watchdog);
843 844 845 846 847 848
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
EXPORT_SYMBOL(reserve_perfctr_nmi);
EXPORT_SYMBOL(release_perfctr_nmi);
EXPORT_SYMBOL(reserve_evntsel_nmi);
EXPORT_SYMBOL(release_evntsel_nmi);
L
Linus Torvalds 已提交
849 850 851 852 853
EXPORT_SYMBOL(reserve_lapic_nmi);
EXPORT_SYMBOL(release_lapic_nmi);
EXPORT_SYMBOL(disable_timer_nmi_watchdog);
EXPORT_SYMBOL(enable_timer_nmi_watchdog);
EXPORT_SYMBOL(touch_nmi_watchdog);