crash.c 8.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * Architecture specific (PPC64) functions for kexec based crash dumps.
 *
 * Copyright (C) 2005, IBM Corp.
 *
 * Created by: Haren Myneni
 *
 * This source code is licensed under the GNU General Public License,
 * Version 2.  See the file COPYING for more details.
 *
 */

#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/reboot.h>
#include <linux/kexec.h>
17
#include <linux/export.h>
18 19
#include <linux/crash_dump.h>
#include <linux/delay.h>
20
#include <linux/irq.h>
21 22 23 24
#include <linux/types.h>

#include <asm/processor.h>
#include <asm/machdep.h>
25
#include <asm/kexec.h>
26
#include <asm/kdump.h>
27
#include <asm/prom.h>
28
#include <asm/smp.h>
29
#include <asm/setjmp.h>
30
#include <asm/debug.h>
31

32 33 34 35 36 37 38 39 40 41 42 43 44 45
/*
 * The primary CPU waits a while for all secondary CPUs to enter. This is to
 * avoid sending an IPI if the secondary CPUs are entering
 * crash_kexec_secondary on their own (eg via a system reset).
 *
 * The secondary timeout has to be longer than the primary. Both timeouts are
 * in milliseconds.
 */
#define PRIMARY_TIMEOUT		500
#define SECONDARY_TIMEOUT	1000

#define IPI_TIMEOUT		10000
#define REAL_MODE_TIMEOUT	10000

46
/* This keeps a track of which one is the crashing cpu. */
47
int crashing_cpu = -1;
48
static int time_to_dump;
49

50
#define CRASH_HANDLER_MAX 3
51 52 53 54
/* NULL terminated list of shutdown handles */
static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1];
static DEFINE_SPINLOCK(crash_handlers_lock);

55 56 57 58 59 60 61 62 63 64
static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
static int crash_shutdown_cpu = -1;

static int handle_fault(struct pt_regs *regs)
{
	if (crash_shutdown_cpu == smp_processor_id())
		longjmp(crash_shutdown_buf, 1);
	return 0;
}

65 66
#ifdef CONFIG_SMP

67
static atomic_t cpus_in_crash;
68 69
void crash_ipi_callback(struct pt_regs *regs)
{
70 71
	static cpumask_t cpus_state_saved = CPU_MASK_NONE;

72 73 74 75 76
	int cpu = smp_processor_id();

	if (!cpu_online(cpu))
		return;

77
	hard_irq_disable();
78
	if (!cpumask_test_cpu(cpu, &cpus_state_saved)) {
79
		crash_save_cpu(regs, cpu);
80 81 82 83
		cpumask_set_cpu(cpu, &cpus_state_saved);
	}

	atomic_inc(&cpus_in_crash);
84
	smp_mb__after_atomic();
85

86 87 88 89
	/*
	 * Starting the kdump boot.
	 * This barrier is needed to make sure that all CPUs are stopped.
	 */
90
	while (!time_to_dump)
91 92 93 94
		cpu_relax();

	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(1, 1);
95 96

#ifdef CONFIG_PPC64
97
	kexec_smp_wait();
98 99 100 101
#else
	for (;;);	/* FIXME */
#endif

102 103 104
	/* NOTREACHED */
}

105
static void crash_kexec_prepare_cpus(int cpu)
106 107
{
	unsigned int msecs;
108
	unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
109 110
	int tries = 0;
	int (*old_handler)(struct pt_regs *regs);
111

112 113
	printk(KERN_EMERG "Sending IPI to other CPUs\n");

114 115 116
	crash_send_ipi(crash_ipi_callback);
	smp_wmb();

117
again:
118
	/*
119
	 * FIXME: Until we will have the way to stop other CPUs reliably,
120
	 * the crash CPU will send an IPI and wait for other CPUs to
121
	 * respond.
122
	 */
123
	msecs = IPI_TIMEOUT;
124
	while ((atomic_read(&cpus_in_crash) < ncpus) && (--msecs > 0))
125 126 127 128
		mdelay(1);

	/* Would it be better to replace the trap vector here? */

129
	if (atomic_read(&cpus_in_crash) >= ncpus) {
130 131
		printk(KERN_EMERG "IPI complete\n");
		return;
132
	}
133

134
	printk(KERN_EMERG "ERROR: %d cpu(s) not responding\n",
135
		ncpus - atomic_read(&cpus_in_crash));
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162

	/*
	 * If we have a panic timeout set then we can't wait indefinitely
	 * for someone to activate system reset. We also give up on the
	 * second time through if system reset fail to work.
	 */
	if ((panic_timeout > 0) || (tries > 0))
		return;

	/*
	 * A system reset will cause all CPUs to take an 0x100 exception.
	 * The primary CPU returns here via setjmp, and the secondary
	 * CPUs reexecute the crash_kexec_secondary path.
	 */
	old_handler = __debugger;
	__debugger = handle_fault;
	crash_shutdown_cpu = smp_processor_id();

	if (setjmp(crash_shutdown_buf) == 0) {
		printk(KERN_EMERG "Activate system reset (dumprestart) "
				  "to stop other cpu(s)\n");

		/*
		 * A system reset will force all CPUs to execute the
		 * crash code again. We need to reset cpus_in_crash so we
		 * wait for everyone to do this.
		 */
163
		atomic_set(&cpus_in_crash, 0);
164 165
		smp_mb();

166
		while (atomic_read(&cpus_in_crash) < ncpus)
167 168 169 170 171 172 173 174
			cpu_relax();
	}

	crash_shutdown_cpu = -1;
	__debugger = old_handler;

	tries++;
	goto again;
175
}
176 177

/*
178
 * This function will be called by secondary cpus.
179 180 181 182
 */
void crash_kexec_secondary(struct pt_regs *regs)
{
	unsigned long flags;
183
	int msecs = SECONDARY_TIMEOUT;
184 185

	local_irq_save(flags);
186

187
	/* Wait for the primary crash CPU to signal its progress */
188 189
	while (crashing_cpu < 0) {
		if (--msecs < 0) {
190
			/* No response, kdump image may not have been loaded */
191 192 193
			local_irq_restore(flags);
			return;
		}
194

195 196
		mdelay(1);
	}
197

198 199 200
	crash_ipi_callback(regs);
}

201 202
#else	/* ! CONFIG_SMP */

203
static void crash_kexec_prepare_cpus(int cpu)
204 205
{
	/*
206
	 * move the secondaries to us so that we can copy
207 208 209 210
	 * the new kernel 0-0x100 safely
	 *
	 * do this if kexec in setup.c ?
	 */
211
#ifdef CONFIG_PPC64
212
	smp_release_cpus();
213 214 215
#else
	/* FIXME */
#endif
216 217
}

218 219 220
void crash_kexec_secondary(struct pt_regs *regs)
{
}
221
#endif	/* CONFIG_SMP */
222

223
/* wait for all the CPUs to hit real mode but timeout if they don't come in */
224 225
#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
static void __maybe_unused crash_kexec_wait_realmode(int cpu)
226 227 228 229
{
	unsigned int msecs;
	int i;

230
	msecs = REAL_MODE_TIMEOUT;
231
	for (i=0; i < nr_cpu_ids && msecs > 0; i++) {
232 233 234 235 236
		if (i == cpu)
			continue;

		while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
			barrier();
237
			if (!cpu_possible(i) || !cpu_online(i) || (msecs <= 0))
238 239 240 241 242 243 244 245 246
				break;
			msecs--;
			mdelay(1);
		}
	}
	mb();
}
#else
static inline void crash_kexec_wait_realmode(int cpu) {}
247
#endif	/* CONFIG_SMP && CONFIG_PPC64 */
248

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
/*
 * Register a function to be called on shutdown.  Only use this if you
 * can't reset your device in the second kernel.
 */
int crash_shutdown_register(crash_shutdown_t handler)
{
	unsigned int i, rc;

	spin_lock(&crash_handlers_lock);
	for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
		if (!crash_shutdown_handles[i]) {
			/* Insert handle at first empty entry */
			crash_shutdown_handles[i] = handler;
			rc = 0;
			break;
		}

	if (i == CRASH_HANDLER_MAX) {
		printk(KERN_ERR "Crash shutdown handles full, "
		       "not registered.\n");
		rc = 1;
	}

	spin_unlock(&crash_handlers_lock);
	return rc;
}
EXPORT_SYMBOL(crash_shutdown_register);

int crash_shutdown_unregister(crash_shutdown_t handler)
{
	unsigned int i, rc;

	spin_lock(&crash_handlers_lock);
	for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
		if (crash_shutdown_handles[i] == handler)
			break;

	if (i == CRASH_HANDLER_MAX) {
		printk(KERN_ERR "Crash shutdown handle not found\n");
		rc = 1;
	} else {
		/* Shift handles down */
		for (; crash_shutdown_handles[i]; i++)
			crash_shutdown_handles[i] =
				crash_shutdown_handles[i+1];
		rc = 0;
	}

	spin_unlock(&crash_handlers_lock);
	return rc;
}
EXPORT_SYMBOL(crash_shutdown_unregister);

302 303
void default_machine_crash_shutdown(struct pt_regs *regs)
{
304 305 306
	unsigned int i;
	int (*old_handler)(struct pt_regs *regs);

307 308
	/*
	 * This function is only called after the system
L
Lee Revell 已提交
309
	 * has panicked or is otherwise in a critical state.
310 311 312 313 314 315 316
	 * The minimum amount of code to allow a kexec'd kernel
	 * to run successfully needs to happen here.
	 *
	 * In practice this means stopping other cpus in
	 * an SMP system.
	 * The kernel is broken so disable interrupts.
	 */
317
	hard_irq_disable();
318

319 320 321 322 323
	/*
	 * Make a note of crashing cpu. Will be used in machine_kexec
	 * such that another IPI will not be sent.
	 */
	crashing_cpu = smp_processor_id();
324 325 326 327 328 329 330 331

	/*
	 * If we came in via system reset, wait a while for the secondary
	 * CPUs to enter.
	 */
	if (TRAP(regs) == 0x100)
		mdelay(PRIMARY_TIMEOUT);

332
	crash_kexec_prepare_cpus(crashing_cpu);
333 334 335 336 337

	crash_save_cpu(regs, crashing_cpu);

	time_to_dump = 1;

338 339
	crash_kexec_wait_realmode(crashing_cpu);

340
	machine_kexec_mask_interrupts();
341 342

	/*
343
	 * Call registered shutdown routines safely.  Swap out
344 345 346 347
	 * __debugger_fault_handler, and replace on exit.
	 */
	old_handler = __debugger_fault_handler;
	__debugger_fault_handler = handle_fault;
348
	crash_shutdown_cpu = smp_processor_id();
349 350 351 352 353 354 355 356 357 358 359 360
	for (i = 0; crash_shutdown_handles[i]; i++) {
		if (setjmp(crash_shutdown_buf) == 0) {
			/*
			 * Insert syncs and delay to ensure
			 * instructions in the dangerous region don't
			 * leak away from this protected region.
			 */
			asm volatile("sync; isync");
			/* dangerous region */
			crash_shutdown_handles[i]();
			asm volatile("sync; isync");
		}
361
	}
362
	crash_shutdown_cpu = -1;
363
	__debugger_fault_handler = old_handler;
364

365 366
	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(1, 0);
367
}