crash.c 3.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Architecture specific (x86_64) functions for kexec based crash dumps.
 *
 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
 *
 * Copyright (C) IBM Corporation, 2004. All rights reserved.
 *
 */

#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/smp.h>
14
#include <linux/irq.h>
15 16
#include <linux/reboot.h>
#include <linux/kexec.h>
17
#include <linux/delay.h>
18 19
#include <linux/elf.h>
#include <linux/elfcore.h>
20
#include <linux/kdebug.h>
21 22 23 24 25

#include <asm/processor.h>
#include <asm/hardirq.h>
#include <asm/nmi.h>
#include <asm/hw_irq.h>
26 27 28 29 30 31 32 33
#include <asm/mach_apic.h>

/* This keeps a track of which one is crashing cpu. */
static int crashing_cpu;

#ifdef CONFIG_SMP
static atomic_t waiting_for_crash_ipi;

34 35
static int crash_nmi_callback(struct notifier_block *self,
				unsigned long val, void *data)
36
{
37 38 39
	struct pt_regs *regs;
	int cpu;

40
	if (val != DIE_NMI_IPI)
41 42 43 44 45
		return NOTIFY_OK;

	regs = ((struct die_args *)data)->regs;
	cpu = raw_smp_processor_id();

46 47 48 49 50 51
	/*
	 * Don't do anything if this handler is invoked on crashing cpu.
	 * Otherwise, system will completely hang. Crashing cpu can get
	 * an NMI if system was initially booted with nmi_watchdog parameter.
	 */
	if (cpu == crashing_cpu)
52
		return NOTIFY_STOP;
53 54
	local_irq_disable();

55
	crash_save_cpu(regs, cpu);
56 57 58 59
	disable_local_APIC();
	atomic_dec(&waiting_for_crash_ipi);
	/* Assume hlt works */
	for(;;)
60
		halt();
61 62 63 64 65 66

	return 1;
}

static void smp_send_nmi_allbutself(void)
{
67
	send_IPI_allbutself(NMI_VECTOR);
68 69 70 71 72 73 74 75
}

/*
 * This code is a best effort heuristic to get the
 * other cpus to stop executing. So races with
 * cpu hotplug shouldn't matter.
 */

76 77 78 79
static struct notifier_block crash_nmi_nb = {
	.notifier_call = crash_nmi_callback,
};

80 81 82 83 84
static void nmi_shootdown_cpus(void)
{
	unsigned long msecs;

	atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
85 86
	if (register_die_notifier(&crash_nmi_nb))
		return;         /* return what? */
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109

	/*
	 * Ensure the new callback function is set before sending
	 * out the NMI
	 */
	wmb();

	smp_send_nmi_allbutself();

	msecs = 1000; /* Wait at most a second for the other cpus to stop */
	while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
		mdelay(1);
		msecs--;
	}
	/* Leave the nmi callback set */
	disable_local_APIC();
}
#else
static void nmi_shootdown_cpus(void)
{
	/* There are no cpus to shootdown */
}
#endif
110

111
void machine_crash_shutdown(struct pt_regs *regs)
112
{
113 114
	/*
	 * This function is only called after the system
L
Lee Revell 已提交
115
	 * has panicked or is otherwise in a critical state.
116 117 118 119 120 121
	 * The minimum amount of code to allow a kexec'd kernel
	 * to run successfully needs to happen here.
	 *
	 * In practice this means shooting down the other cpus in
	 * an SMP system.
	 */
122 123 124 125 126 127 128 129 130 131 132 133
	/* The kernel is broken so disable interrupts */
	local_irq_disable();

	/* Make a note of crashing cpu. Will be used in NMI callback.*/
	crashing_cpu = smp_processor_id();
	nmi_shootdown_cpus();

	if(cpu_has_apic)
		 disable_local_APIC();

	disable_IO_APIC();

134
	crash_save_cpu(regs, smp_processor_id());
135
}