kexec.c 37.3 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * kexec.c - kexec system call
 * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
 *
 * This source code is licensed under the GNU General Public License,
 * Version 2.  See the file COPYING for more details.
 */

9
#include <linux/capability.h>
10 11 12 13 14
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/kexec.h>
15
#include <linux/mutex.h>
16 17 18 19 20
#include <linux/list.h>
#include <linux/highmem.h>
#include <linux/syscalls.h>
#include <linux/reboot.h>
#include <linux/ioport.h>
21
#include <linux/hardirq.h>
22 23
#include <linux/elf.h>
#include <linux/elfcore.h>
K
Ken'ichi Ohmichi 已提交
24 25 26
#include <linux/utsrelease.h>
#include <linux/utsname.h>
#include <linux/numa.h>
H
Huang Ying 已提交
27 28
#include <linux/suspend.h>
#include <linux/device.h>
29 30 31 32
#include <linux/freezer.h>
#include <linux/pm.h>
#include <linux/cpu.h>
#include <linux/console.h>
33

34 35 36 37
#include <asm/page.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/system.h>
K
Ken'ichi Ohmichi 已提交
38
#include <asm/sections.h>
39

40 41 42
/* Per cpu memory for storing cpu states in case of system crash. */
note_buf_t* crash_notes;

K
Ken'ichi Ohmichi 已提交
43 44 45
/* vmcoreinfo stuff */
unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
46 47
size_t vmcoreinfo_size;
size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
K
Ken'ichi Ohmichi 已提交
48

49 50 51 52 53 54 55 56
/* Location of the reserved area for the crash kernel */
struct resource crashk_res = {
	.name  = "Crash kernel",
	.start = 0,
	.end   = 0,
	.flags = IORESOURCE_BUSY | IORESOURCE_MEM
};

57 58
int kexec_should_crash(struct task_struct *p)
{
59
	if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
60 61 62 63
		return 1;
	return 0;
}

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/*
 * When kexec transitions to the new kernel there is a one-to-one
 * mapping between physical and virtual addresses.  On processors
 * where you can disable the MMU this is trivial, and easy.  For
 * others it is still a simple predictable page table to setup.
 *
 * In that environment kexec copies the new kernel to its final
 * resting place.  This means I can only support memory whose
 * physical address can fit in an unsigned long.  In particular
 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
 * If the assembly stub has more restrictive requirements
 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
 * defined more restrictively in <asm/kexec.h>.
 *
 * The code for the transition from the current kernel to the
 * the new kernel is placed in the control_code_buffer, whose size
80
 * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
 * page of memory is necessary, but some architectures require more.
 * Because this memory must be identity mapped in the transition from
 * virtual to physical addresses it must live in the range
 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
 * modifiable.
 *
 * The assembly stub in the control code buffer is passed a linked list
 * of descriptor pages detailing the source pages of the new kernel,
 * and the destination addresses of those source pages.  As this data
 * structure is not used in the context of the current OS, it must
 * be self-contained.
 *
 * The code has been made to work with highmem pages and will use a
 * destination page in its final resting place (if it happens
 * to allocate it).  The end product of this is that most of the
 * physical address space, and most of RAM can be used.
 *
 * Future directions include:
 *  - allocating a page table with the control code buffer identity
 *    mapped, to simplify machine_kexec and make kexec_on_panic more
 *    reliable.
 */

/*
 * KIMAGE_NO_DEST is an impossible destination address..., for
 * allocating pages whose destination address we do not care about.
 */
#define KIMAGE_NO_DEST (-1UL)

M
Maneesh Soni 已提交
110 111 112
static int kimage_is_destination_range(struct kimage *image,
				       unsigned long start, unsigned long end);
static struct page *kimage_alloc_page(struct kimage *image,
A
Al Viro 已提交
113
				       gfp_t gfp_mask,
M
Maneesh Soni 已提交
114
				       unsigned long dest);
115 116

static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
M
Maneesh Soni 已提交
117 118
	                    unsigned long nr_segments,
                            struct kexec_segment __user *segments)
119 120 121 122 123 124 125 126
{
	size_t segment_bytes;
	struct kimage *image;
	unsigned long i;
	int result;

	/* Allocate a controlling structure */
	result = -ENOMEM;
127
	image = kzalloc(sizeof(*image), GFP_KERNEL);
M
Maneesh Soni 已提交
128
	if (!image)
129
		goto out;
M
Maneesh Soni 已提交
130

131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
	image->head = 0;
	image->entry = &image->head;
	image->last_entry = &image->head;
	image->control_page = ~0; /* By default this does not apply */
	image->start = entry;
	image->type = KEXEC_TYPE_DEFAULT;

	/* Initialize the list of control pages */
	INIT_LIST_HEAD(&image->control_pages);

	/* Initialize the list of destination pages */
	INIT_LIST_HEAD(&image->dest_pages);

	/* Initialize the list of unuseable pages */
	INIT_LIST_HEAD(&image->unuseable_pages);

	/* Read in the segments */
	image->nr_segments = nr_segments;
	segment_bytes = nr_segments * sizeof(*segments);
	result = copy_from_user(image->segment, segments, segment_bytes);
	if (result)
		goto out;

	/*
	 * Verify we have good destination addresses.  The caller is
	 * responsible for making certain we don't attempt to load
	 * the new image into invalid or reserved areas of RAM.  This
	 * just verifies it is an address we can use.
	 *
	 * Since the kernel does everything in page size chunks ensure
	 * the destination addreses are page aligned.  Too many
	 * special cases crop of when we don't do this.  The most
	 * insidious is getting overlapping destination addresses
	 * simply because addresses are changed to page size
	 * granularity.
	 */
	result = -EADDRNOTAVAIL;
	for (i = 0; i < nr_segments; i++) {
		unsigned long mstart, mend;
M
Maneesh Soni 已提交
170

171 172 173 174 175 176 177 178 179 180 181 182 183 184
		mstart = image->segment[i].mem;
		mend   = mstart + image->segment[i].memsz;
		if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
			goto out;
		if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
			goto out;
	}

	/* Verify our destination addresses do not overlap.
	 * If we alloed overlapping destination addresses
	 * through very weird things can happen with no
	 * easy explanation as one segment stops on another.
	 */
	result = -EINVAL;
M
Maneesh Soni 已提交
185
	for (i = 0; i < nr_segments; i++) {
186 187
		unsigned long mstart, mend;
		unsigned long j;
M
Maneesh Soni 已提交
188

189 190
		mstart = image->segment[i].mem;
		mend   = mstart + image->segment[i].memsz;
M
Maneesh Soni 已提交
191
		for (j = 0; j < i; j++) {
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
			unsigned long pstart, pend;
			pstart = image->segment[j].mem;
			pend   = pstart + image->segment[j].memsz;
			/* Do the segments overlap ? */
			if ((mend > pstart) && (mstart < pend))
				goto out;
		}
	}

	/* Ensure our buffer sizes are strictly less than
	 * our memory sizes.  This should always be the case,
	 * and it is easier to check up front than to be surprised
	 * later on.
	 */
	result = -EINVAL;
M
Maneesh Soni 已提交
207
	for (i = 0; i < nr_segments; i++) {
208 209 210 211 212
		if (image->segment[i].bufsz > image->segment[i].memsz)
			goto out;
	}

	result = 0;
M
Maneesh Soni 已提交
213 214
out:
	if (result == 0)
215
		*rimage = image;
M
Maneesh Soni 已提交
216
	else
217
		kfree(image);
M
Maneesh Soni 已提交
218

219 220 221 222 223
	return result;

}

static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
M
Maneesh Soni 已提交
224 225
				unsigned long nr_segments,
				struct kexec_segment __user *segments)
226 227 228 229 230 231 232
{
	int result;
	struct kimage *image;

	/* Allocate and initialize a controlling structure */
	image = NULL;
	result = do_kimage_alloc(&image, entry, nr_segments, segments);
M
Maneesh Soni 已提交
233
	if (result)
234
		goto out;
M
Maneesh Soni 已提交
235

236 237 238 239 240 241 242 243 244
	*rimage = image;

	/*
	 * Find a location for the control code buffer, and add it
	 * the vector of segments so that it's pages will also be
	 * counted as destination pages.
	 */
	result = -ENOMEM;
	image->control_code_page = kimage_alloc_control_pages(image,
245
					   get_order(KEXEC_CONTROL_PAGE_SIZE));
246 247 248 249 250
	if (!image->control_code_page) {
		printk(KERN_ERR "Could not allocate control_code_buffer\n");
		goto out;
	}

H
Huang Ying 已提交
251 252 253 254 255 256
	image->swap_page = kimage_alloc_control_pages(image, 0);
	if (!image->swap_page) {
		printk(KERN_ERR "Could not allocate swap buffer\n");
		goto out;
	}

257 258
	result = 0;
 out:
M
Maneesh Soni 已提交
259
	if (result == 0)
260
		*rimage = image;
M
Maneesh Soni 已提交
261
	else
262
		kfree(image);
M
Maneesh Soni 已提交
263

264 265 266 267
	return result;
}

static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
M
Maneesh Soni 已提交
268
				unsigned long nr_segments,
269
				struct kexec_segment __user *segments)
270 271 272 273 274 275 276 277 278 279 280 281 282 283
{
	int result;
	struct kimage *image;
	unsigned long i;

	image = NULL;
	/* Verify we have a valid entry point */
	if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
		result = -EADDRNOTAVAIL;
		goto out;
	}

	/* Allocate and initialize a controlling structure */
	result = do_kimage_alloc(&image, entry, nr_segments, segments);
M
Maneesh Soni 已提交
284
	if (result)
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
		goto out;

	/* Enable the special crash kernel control page
	 * allocation policy.
	 */
	image->control_page = crashk_res.start;
	image->type = KEXEC_TYPE_CRASH;

	/*
	 * Verify we have good destination addresses.  Normally
	 * the caller is responsible for making certain we don't
	 * attempt to load the new image into invalid or reserved
	 * areas of RAM.  But crash kernels are preloaded into a
	 * reserved area of ram.  We must ensure the addresses
	 * are in the reserved area otherwise preloading the
	 * kernel could corrupt things.
	 */
	result = -EADDRNOTAVAIL;
	for (i = 0; i < nr_segments; i++) {
		unsigned long mstart, mend;
M
Maneesh Soni 已提交
305

306
		mstart = image->segment[i].mem;
307
		mend = mstart + image->segment[i].memsz - 1;
308 309 310 311 312 313 314 315 316 317 318 319
		/* Ensure we are within the crash kernel limits */
		if ((mstart < crashk_res.start) || (mend > crashk_res.end))
			goto out;
	}

	/*
	 * Find a location for the control code buffer, and add
	 * the vector of segments so that it's pages will also be
	 * counted as destination pages.
	 */
	result = -ENOMEM;
	image->control_code_page = kimage_alloc_control_pages(image,
320
					   get_order(KEXEC_CONTROL_PAGE_SIZE));
321 322 323 324 325 326
	if (!image->control_code_page) {
		printk(KERN_ERR "Could not allocate control_code_buffer\n");
		goto out;
	}

	result = 0;
M
Maneesh Soni 已提交
327 328
out:
	if (result == 0)
329
		*rimage = image;
M
Maneesh Soni 已提交
330
	else
331
		kfree(image);
M
Maneesh Soni 已提交
332

333 334 335
	return result;
}

M
Maneesh Soni 已提交
336 337 338
static int kimage_is_destination_range(struct kimage *image,
					unsigned long start,
					unsigned long end)
339 340 341 342 343
{
	unsigned long i;

	for (i = 0; i < image->nr_segments; i++) {
		unsigned long mstart, mend;
M
Maneesh Soni 已提交
344

345
		mstart = image->segment[i].mem;
M
Maneesh Soni 已提交
346 347
		mend = mstart + image->segment[i].memsz;
		if ((end > mstart) && (start < mend))
348 349
			return 1;
	}
M
Maneesh Soni 已提交
350

351 352 353
	return 0;
}

A
Al Viro 已提交
354
static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
355 356
{
	struct page *pages;
M
Maneesh Soni 已提交
357

358 359 360 361
	pages = alloc_pages(gfp_mask, order);
	if (pages) {
		unsigned int count, i;
		pages->mapping = NULL;
H
Hugh Dickins 已提交
362
		set_page_private(pages, order);
363
		count = 1 << order;
M
Maneesh Soni 已提交
364
		for (i = 0; i < count; i++)
365 366
			SetPageReserved(pages + i);
	}
M
Maneesh Soni 已提交
367

368 369 370 371 372 373
	return pages;
}

static void kimage_free_pages(struct page *page)
{
	unsigned int order, count, i;
M
Maneesh Soni 已提交
374

H
Hugh Dickins 已提交
375
	order = page_private(page);
376
	count = 1 << order;
M
Maneesh Soni 已提交
377
	for (i = 0; i < count; i++)
378 379 380 381 382 383 384
		ClearPageReserved(page + i);
	__free_pages(page, order);
}

static void kimage_free_page_list(struct list_head *list)
{
	struct list_head *pos, *next;
M
Maneesh Soni 已提交
385

386 387 388 389 390 391 392 393 394
	list_for_each_safe(pos, next, list) {
		struct page *page;

		page = list_entry(pos, struct page, lru);
		list_del(&page->lru);
		kimage_free_pages(page);
	}
}

M
Maneesh Soni 已提交
395 396
static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
							unsigned int order)
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
{
	/* Control pages are special, they are the intermediaries
	 * that are needed while we copy the rest of the pages
	 * to their final resting place.  As such they must
	 * not conflict with either the destination addresses
	 * or memory the kernel is already using.
	 *
	 * The only case where we really need more than one of
	 * these are for architectures where we cannot disable
	 * the MMU and must instead generate an identity mapped
	 * page table for all of the memory.
	 *
	 * At worst this runs in O(N) of the image size.
	 */
	struct list_head extra_pages;
	struct page *pages;
	unsigned int count;

	count = 1 << order;
	INIT_LIST_HEAD(&extra_pages);

	/* Loop while I can allocate a page and the page allocated
	 * is a destination page.
	 */
	do {
		unsigned long pfn, epfn, addr, eaddr;
M
Maneesh Soni 已提交
423

424 425 426 427 428 429 430 431
		pages = kimage_alloc_pages(GFP_KERNEL, order);
		if (!pages)
			break;
		pfn   = page_to_pfn(pages);
		epfn  = pfn + count;
		addr  = pfn << PAGE_SHIFT;
		eaddr = epfn << PAGE_SHIFT;
		if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
M
Maneesh Soni 已提交
432
			      kimage_is_destination_range(image, addr, eaddr)) {
433 434 435
			list_add(&pages->lru, &extra_pages);
			pages = NULL;
		}
M
Maneesh Soni 已提交
436 437
	} while (!pages);

438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
	if (pages) {
		/* Remember the allocated page... */
		list_add(&pages->lru, &image->control_pages);

		/* Because the page is already in it's destination
		 * location we will never allocate another page at
		 * that address.  Therefore kimage_alloc_pages
		 * will not return it (again) and we don't need
		 * to give it an entry in image->segment[].
		 */
	}
	/* Deal with the destination pages I have inadvertently allocated.
	 *
	 * Ideally I would convert multi-page allocations into single
	 * page allocations, and add everyting to image->dest_pages.
	 *
	 * For now it is simpler to just free the pages.
	 */
	kimage_free_page_list(&extra_pages);

M
Maneesh Soni 已提交
458
	return pages;
459 460
}

M
Maneesh Soni 已提交
461 462
static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
						      unsigned int order)
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
{
	/* Control pages are special, they are the intermediaries
	 * that are needed while we copy the rest of the pages
	 * to their final resting place.  As such they must
	 * not conflict with either the destination addresses
	 * or memory the kernel is already using.
	 *
	 * Control pages are also the only pags we must allocate
	 * when loading a crash kernel.  All of the other pages
	 * are specified by the segments and we just memcpy
	 * into them directly.
	 *
	 * The only case where we really need more than one of
	 * these are for architectures where we cannot disable
	 * the MMU and must instead generate an identity mapped
	 * page table for all of the memory.
	 *
	 * Given the low demand this implements a very simple
	 * allocator that finds the first hole of the appropriate
	 * size in the reserved memory region, and allocates all
	 * of the memory up to and including the hole.
	 */
	unsigned long hole_start, hole_end, size;
	struct page *pages;
M
Maneesh Soni 已提交
487

488 489 490 491
	pages = NULL;
	size = (1 << order) << PAGE_SHIFT;
	hole_start = (image->control_page + (size - 1)) & ~(size - 1);
	hole_end   = hole_start + size - 1;
M
Maneesh Soni 已提交
492
	while (hole_end <= crashk_res.end) {
493
		unsigned long i;
M
Maneesh Soni 已提交
494 495

		if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT)
496
			break;
M
Maneesh Soni 已提交
497
		if (hole_end > crashk_res.end)
498 499
			break;
		/* See if I overlap any of the segments */
M
Maneesh Soni 已提交
500
		for (i = 0; i < image->nr_segments; i++) {
501
			unsigned long mstart, mend;
M
Maneesh Soni 已提交
502

503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
			mstart = image->segment[i].mem;
			mend   = mstart + image->segment[i].memsz - 1;
			if ((hole_end >= mstart) && (hole_start <= mend)) {
				/* Advance the hole to the end of the segment */
				hole_start = (mend + (size - 1)) & ~(size - 1);
				hole_end   = hole_start + size - 1;
				break;
			}
		}
		/* If I don't overlap any segments I have found my hole! */
		if (i == image->nr_segments) {
			pages = pfn_to_page(hole_start >> PAGE_SHIFT);
			break;
		}
	}
M
Maneesh Soni 已提交
518
	if (pages)
519
		image->control_page = hole_end;
M
Maneesh Soni 已提交
520

521 522 523 524
	return pages;
}


M
Maneesh Soni 已提交
525 526
struct page *kimage_alloc_control_pages(struct kimage *image,
					 unsigned int order)
527 528
{
	struct page *pages = NULL;
M
Maneesh Soni 已提交
529 530

	switch (image->type) {
531 532 533 534 535 536 537
	case KEXEC_TYPE_DEFAULT:
		pages = kimage_alloc_normal_control_pages(image, order);
		break;
	case KEXEC_TYPE_CRASH:
		pages = kimage_alloc_crash_control_pages(image, order);
		break;
	}
M
Maneesh Soni 已提交
538

539 540 541 542 543
	return pages;
}

static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
{
M
Maneesh Soni 已提交
544
	if (*image->entry != 0)
545
		image->entry++;
M
Maneesh Soni 已提交
546

547 548 549
	if (image->entry == image->last_entry) {
		kimage_entry_t *ind_page;
		struct page *page;
M
Maneesh Soni 已提交
550

551
		page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
M
Maneesh Soni 已提交
552
		if (!page)
553
			return -ENOMEM;
M
Maneesh Soni 已提交
554

555 556 557
		ind_page = page_address(page);
		*image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
		image->entry = ind_page;
M
Maneesh Soni 已提交
558 559
		image->last_entry = ind_page +
				      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
560 561 562 563
	}
	*image->entry = entry;
	image->entry++;
	*image->entry = 0;
M
Maneesh Soni 已提交
564

565 566 567
	return 0;
}

M
Maneesh Soni 已提交
568 569
static int kimage_set_destination(struct kimage *image,
				   unsigned long destination)
570 571 572 573 574
{
	int result;

	destination &= PAGE_MASK;
	result = kimage_add_entry(image, destination | IND_DESTINATION);
M
Maneesh Soni 已提交
575
	if (result == 0)
576
		image->destination = destination;
M
Maneesh Soni 已提交
577

578 579 580 581 582 583 584 585 586 587
	return result;
}


static int kimage_add_page(struct kimage *image, unsigned long page)
{
	int result;

	page &= PAGE_MASK;
	result = kimage_add_entry(image, page | IND_SOURCE);
M
Maneesh Soni 已提交
588
	if (result == 0)
589
		image->destination += PAGE_SIZE;
M
Maneesh Soni 已提交
590

591 592 593 594 595 596 597 598 599 600 601 602 603
	return result;
}


static void kimage_free_extra_pages(struct kimage *image)
{
	/* Walk through and free any extra destination pages I may have */
	kimage_free_page_list(&image->dest_pages);

	/* Walk through and free any unuseable pages I have cached */
	kimage_free_page_list(&image->unuseable_pages);

}
604
static void kimage_terminate(struct kimage *image)
605
{
M
Maneesh Soni 已提交
606
	if (*image->entry != 0)
607
		image->entry++;
M
Maneesh Soni 已提交
608

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
	*image->entry = IND_DONE;
}

#define for_each_kimage_entry(image, ptr, entry) \
	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
		ptr = (entry & IND_INDIRECTION)? \
			phys_to_virt((entry & PAGE_MASK)): ptr +1)

static void kimage_free_entry(kimage_entry_t entry)
{
	struct page *page;

	page = pfn_to_page(entry >> PAGE_SHIFT);
	kimage_free_pages(page);
}

static void kimage_free(struct kimage *image)
{
	kimage_entry_t *ptr, entry;
	kimage_entry_t ind = 0;

	if (!image)
		return;
M
Maneesh Soni 已提交
632

633 634 635 636
	kimage_free_extra_pages(image);
	for_each_kimage_entry(image, ptr, entry) {
		if (entry & IND_INDIRECTION) {
			/* Free the previous indirection page */
M
Maneesh Soni 已提交
637
			if (ind & IND_INDIRECTION)
638 639 640 641 642 643
				kimage_free_entry(ind);
			/* Save this indirection page until we are
			 * done with it.
			 */
			ind = entry;
		}
M
Maneesh Soni 已提交
644
		else if (entry & IND_SOURCE)
645 646 647
			kimage_free_entry(entry);
	}
	/* Free the final indirection page */
M
Maneesh Soni 已提交
648
	if (ind & IND_INDIRECTION)
649 650 651 652 653 654 655 656 657 658
		kimage_free_entry(ind);

	/* Handle any machine specific cleanup */
	machine_kexec_cleanup(image);

	/* Free the kexec control pages... */
	kimage_free_page_list(&image->control_pages);
	kfree(image);
}

M
Maneesh Soni 已提交
659 660
static kimage_entry_t *kimage_dst_used(struct kimage *image,
					unsigned long page)
661 662 663 664 665
{
	kimage_entry_t *ptr, entry;
	unsigned long destination = 0;

	for_each_kimage_entry(image, ptr, entry) {
M
Maneesh Soni 已提交
666
		if (entry & IND_DESTINATION)
667 668
			destination = entry & PAGE_MASK;
		else if (entry & IND_SOURCE) {
M
Maneesh Soni 已提交
669
			if (page == destination)
670 671 672 673
				return ptr;
			destination += PAGE_SIZE;
		}
	}
M
Maneesh Soni 已提交
674

675
	return NULL;
676 677
}

M
Maneesh Soni 已提交
678
static struct page *kimage_alloc_page(struct kimage *image,
A
Al Viro 已提交
679
					gfp_t gfp_mask,
M
Maneesh Soni 已提交
680
					unsigned long destination)
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
{
	/*
	 * Here we implement safeguards to ensure that a source page
	 * is not copied to its destination page before the data on
	 * the destination page is no longer useful.
	 *
	 * To do this we maintain the invariant that a source page is
	 * either its own destination page, or it is not a
	 * destination page at all.
	 *
	 * That is slightly stronger than required, but the proof
	 * that no problems will not occur is trivial, and the
	 * implementation is simply to verify.
	 *
	 * When allocating all pages normally this algorithm will run
	 * in O(N) time, but in the worst case it will run in O(N^2)
	 * time.   If the runtime is a problem the data structures can
	 * be fixed.
	 */
	struct page *page;
	unsigned long addr;

	/*
	 * Walk through the list of destination pages, and see if I
	 * have a match.
	 */
	list_for_each_entry(page, &image->dest_pages, lru) {
		addr = page_to_pfn(page) << PAGE_SHIFT;
		if (addr == destination) {
			list_del(&page->lru);
			return page;
		}
	}
	page = NULL;
	while (1) {
		kimage_entry_t *old;

		/* Allocate a page, if we run out of memory give up */
		page = kimage_alloc_pages(gfp_mask, 0);
M
Maneesh Soni 已提交
720
		if (!page)
721
			return NULL;
722
		/* If the page cannot be used file it away */
M
Maneesh Soni 已提交
723 724
		if (page_to_pfn(page) >
				(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
725 726 727 728 729 730 731 732 733 734
			list_add(&page->lru, &image->unuseable_pages);
			continue;
		}
		addr = page_to_pfn(page) << PAGE_SHIFT;

		/* If it is the destination page we want use it */
		if (addr == destination)
			break;

		/* If the page is not a destination page use it */
M
Maneesh Soni 已提交
735 736
		if (!kimage_is_destination_range(image, addr,
						  addr + PAGE_SIZE))
737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
			break;

		/*
		 * I know that the page is someones destination page.
		 * See if there is already a source page for this
		 * destination page.  And if so swap the source pages.
		 */
		old = kimage_dst_used(image, addr);
		if (old) {
			/* If so move it */
			unsigned long old_addr;
			struct page *old_page;

			old_addr = *old & PAGE_MASK;
			old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
			copy_highpage(page, old_page);
			*old = addr | (*old & ~PAGE_MASK);

			/* The old page I have found cannot be a
756 757
			 * destination page, so return it if it's
			 * gfp_flags honor the ones passed in.
758
			 */
759 760 761 762 763
			if (!(gfp_mask & __GFP_HIGHMEM) &&
			    PageHighMem(old_page)) {
				kimage_free_pages(old_page);
				continue;
			}
764 765 766 767 768 769 770 771 772 773 774
			addr = old_addr;
			page = old_page;
			break;
		}
		else {
			/* Place the page on the destination list I
			 * will use it later.
			 */
			list_add(&page->lru, &image->dest_pages);
		}
	}
M
Maneesh Soni 已提交
775

776 777 778 779
	return page;
}

static int kimage_load_normal_segment(struct kimage *image,
M
Maneesh Soni 已提交
780
					 struct kexec_segment *segment)
781 782 783 784
{
	unsigned long maddr;
	unsigned long ubytes, mbytes;
	int result;
785
	unsigned char __user *buf;
786 787 788 789 790 791 792 793

	result = 0;
	buf = segment->buf;
	ubytes = segment->bufsz;
	mbytes = segment->memsz;
	maddr = segment->mem;

	result = kimage_set_destination(image, maddr);
M
Maneesh Soni 已提交
794
	if (result < 0)
795
		goto out;
M
Maneesh Soni 已提交
796 797

	while (mbytes) {
798 799 800
		struct page *page;
		char *ptr;
		size_t uchunk, mchunk;
M
Maneesh Soni 已提交
801

802
		page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
803
		if (!page) {
804 805 806
			result  = -ENOMEM;
			goto out;
		}
M
Maneesh Soni 已提交
807 808 809
		result = kimage_add_page(image, page_to_pfn(page)
								<< PAGE_SHIFT);
		if (result < 0)
810
			goto out;
M
Maneesh Soni 已提交
811

812 813 814 815 816
		ptr = kmap(page);
		/* Start with a clear page */
		memset(ptr, 0, PAGE_SIZE);
		ptr += maddr & ~PAGE_MASK;
		mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
M
Maneesh Soni 已提交
817
		if (mchunk > mbytes)
818
			mchunk = mbytes;
M
Maneesh Soni 已提交
819

820
		uchunk = mchunk;
M
Maneesh Soni 已提交
821
		if (uchunk > ubytes)
822
			uchunk = ubytes;
M
Maneesh Soni 已提交
823

824 825 826 827 828 829 830 831 832 833 834
		result = copy_from_user(ptr, buf, uchunk);
		kunmap(page);
		if (result) {
			result = (result < 0) ? result : -EIO;
			goto out;
		}
		ubytes -= uchunk;
		maddr  += mchunk;
		buf    += mchunk;
		mbytes -= mchunk;
	}
M
Maneesh Soni 已提交
835
out:
836 837 838 839
	return result;
}

static int kimage_load_crash_segment(struct kimage *image,
M
Maneesh Soni 已提交
840
					struct kexec_segment *segment)
841 842 843 844 845 846 847 848
{
	/* For crash dumps kernels we simply copy the data from
	 * user space to it's destination.
	 * We do things a page at a time for the sake of kmap.
	 */
	unsigned long maddr;
	unsigned long ubytes, mbytes;
	int result;
849
	unsigned char __user *buf;
850 851 852 853 854 855

	result = 0;
	buf = segment->buf;
	ubytes = segment->bufsz;
	mbytes = segment->memsz;
	maddr = segment->mem;
M
Maneesh Soni 已提交
856
	while (mbytes) {
857 858 859
		struct page *page;
		char *ptr;
		size_t uchunk, mchunk;
M
Maneesh Soni 已提交
860

861
		page = pfn_to_page(maddr >> PAGE_SHIFT);
862
		if (!page) {
863 864 865 866 867 868
			result  = -ENOMEM;
			goto out;
		}
		ptr = kmap(page);
		ptr += maddr & ~PAGE_MASK;
		mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
M
Maneesh Soni 已提交
869
		if (mchunk > mbytes)
870
			mchunk = mbytes;
M
Maneesh Soni 已提交
871

872 873 874 875 876 877 878
		uchunk = mchunk;
		if (uchunk > ubytes) {
			uchunk = ubytes;
			/* Zero the trailing part of the page */
			memset(ptr + uchunk, 0, mchunk - uchunk);
		}
		result = copy_from_user(ptr, buf, uchunk);
Z
Zou Nan hai 已提交
879
		kexec_flush_icache_page(page);
880 881 882 883 884 885 886 887 888 889
		kunmap(page);
		if (result) {
			result = (result < 0) ? result : -EIO;
			goto out;
		}
		ubytes -= uchunk;
		maddr  += mchunk;
		buf    += mchunk;
		mbytes -= mchunk;
	}
M
Maneesh Soni 已提交
890
out:
891 892 893 894
	return result;
}

static int kimage_load_segment(struct kimage *image,
M
Maneesh Soni 已提交
895
				struct kexec_segment *segment)
896 897
{
	int result = -ENOMEM;
M
Maneesh Soni 已提交
898 899

	switch (image->type) {
900 901 902 903 904 905 906
	case KEXEC_TYPE_DEFAULT:
		result = kimage_load_normal_segment(image, segment);
		break;
	case KEXEC_TYPE_CRASH:
		result = kimage_load_crash_segment(image, segment);
		break;
	}
M
Maneesh Soni 已提交
907

908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
	return result;
}

/*
 * Exec Kernel system call: for obvious reasons only root may call it.
 *
 * This call breaks up into three pieces.
 * - A generic part which loads the new kernel from the current
 *   address space, and very carefully places the data in the
 *   allocated pages.
 *
 * - A generic part that interacts with the kernel and tells all of
 *   the devices to shut down.  Preventing on-going dmas, and placing
 *   the devices in a consistent state so a later kernel can
 *   reinitialize them.
 *
 * - A machine specific part that includes the syscall number
 *   and the copies the image to it's final destination.  And
 *   jumps into the image at entry.
 *
 * kexec does not sync, or unmount filesystems so if you need
 * that to happen you need to do that yourself.
 */
931 932
struct kimage *kexec_image;
struct kimage *kexec_crash_image;
933 934

static DEFINE_MUTEX(kexec_mutex);
935

M
Maneesh Soni 已提交
936 937 938
asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
				struct kexec_segment __user *segments,
				unsigned long flags)
939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
{
	struct kimage **dest_image, *image;
	int result;

	/* We only trust the superuser with rebooting the system. */
	if (!capable(CAP_SYS_BOOT))
		return -EPERM;

	/*
	 * Verify we have a legal set of flags
	 * This leaves us room for future extensions.
	 */
	if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
		return -EINVAL;

	/* Verify we are on the appropriate architecture */
	if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
		((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
		return -EINVAL;

	/* Put an artificial cap on the number
	 * of segments passed to kexec_load.
	 */
	if (nr_segments > KEXEC_SEGMENT_MAX)
		return -EINVAL;

	image = NULL;
	result = 0;

	/* Because we write directly to the reserved memory
	 * region when loading crash kernels we need a mutex here to
	 * prevent multiple crash  kernels from attempting to load
	 * simultaneously, and to prevent a crash kernel from loading
	 * over the top of a in use crash kernel.
	 *
	 * KISS: always take the mutex.
	 */
976
	if (!mutex_trylock(&kexec_mutex))
977
		return -EBUSY;
M
Maneesh Soni 已提交
978

979
	dest_image = &kexec_image;
M
Maneesh Soni 已提交
980
	if (flags & KEXEC_ON_CRASH)
981 982 983
		dest_image = &kexec_crash_image;
	if (nr_segments > 0) {
		unsigned long i;
M
Maneesh Soni 已提交
984

985
		/* Loading another kernel to reboot into */
M
Maneesh Soni 已提交
986 987 988
		if ((flags & KEXEC_ON_CRASH) == 0)
			result = kimage_normal_alloc(&image, entry,
							nr_segments, segments);
989 990 991 992 993 994
		/* Loading another kernel to switch to if this one crashes */
		else if (flags & KEXEC_ON_CRASH) {
			/* Free any current crash dump kernel before
			 * we corrupt it.
			 */
			kimage_free(xchg(&kexec_crash_image, NULL));
M
Maneesh Soni 已提交
995 996
			result = kimage_crash_alloc(&image, entry,
						     nr_segments, segments);
997
		}
M
Maneesh Soni 已提交
998
		if (result)
999
			goto out;
M
Maneesh Soni 已提交
1000

H
Huang Ying 已提交
1001 1002
		if (flags & KEXEC_PRESERVE_CONTEXT)
			image->preserve_context = 1;
1003
		result = machine_kexec_prepare(image);
M
Maneesh Soni 已提交
1004
		if (result)
1005
			goto out;
M
Maneesh Soni 已提交
1006 1007

		for (i = 0; i < nr_segments; i++) {
1008
			result = kimage_load_segment(image, &image->segment[i]);
M
Maneesh Soni 已提交
1009
			if (result)
1010 1011
				goto out;
		}
1012
		kimage_terminate(image);
1013 1014 1015 1016
	}
	/* Install the new kernel, and  Uninstall the old */
	image = xchg(dest_image, image);

M
Maneesh Soni 已提交
1017
out:
1018
	mutex_unlock(&kexec_mutex);
1019
	kimage_free(image);
M
Maneesh Soni 已提交
1020

1021 1022 1023 1024 1025
	return result;
}

#ifdef CONFIG_COMPAT
asmlinkage long compat_sys_kexec_load(unsigned long entry,
M
Maneesh Soni 已提交
1026 1027 1028
				unsigned long nr_segments,
				struct compat_kexec_segment __user *segments,
				unsigned long flags)
1029 1030 1031 1032 1033 1034 1035 1036
{
	struct compat_kexec_segment in;
	struct kexec_segment out, __user *ksegments;
	unsigned long i, result;

	/* Don't allow clients that don't understand the native
	 * architecture to do anything.
	 */
M
Maneesh Soni 已提交
1037
	if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1038 1039
		return -EINVAL;

M
Maneesh Soni 已提交
1040
	if (nr_segments > KEXEC_SEGMENT_MAX)
1041 1042 1043 1044 1045
		return -EINVAL;

	ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
	for (i=0; i < nr_segments; i++) {
		result = copy_from_user(&in, &segments[i], sizeof(in));
M
Maneesh Soni 已提交
1046
		if (result)
1047 1048 1049 1050 1051 1052 1053 1054
			return -EFAULT;

		out.buf   = compat_ptr(in.buf);
		out.bufsz = in.bufsz;
		out.mem   = in.mem;
		out.memsz = in.memsz;

		result = copy_to_user(&ksegments[i], &out, sizeof(out));
M
Maneesh Soni 已提交
1055
		if (result)
1056 1057 1058 1059 1060 1061 1062
			return -EFAULT;
	}

	return sys_kexec_load(entry, nr_segments, ksegments, flags);
}
#endif

1063
void crash_kexec(struct pt_regs *regs)
1064
{
1065
	/* Take the kexec_mutex here to prevent sys_kexec_load
1066 1067 1068 1069 1070 1071 1072
	 * running on one cpu from replacing the crash kernel
	 * we are using after a panic on a different cpu.
	 *
	 * If the crash kernel was not located in a fixed area
	 * of memory the xchg(&kexec_crash_image) would be
	 * sufficient.  But since I reuse the memory...
	 */
1073
	if (mutex_trylock(&kexec_mutex)) {
1074
		if (kexec_crash_image) {
1075 1076
			struct pt_regs fixed_regs;
			crash_setup_regs(&fixed_regs, regs);
K
Ken'ichi Ohmichi 已提交
1077
			crash_save_vmcoreinfo();
1078
			machine_crash_shutdown(&fixed_regs);
1079
			machine_kexec(kexec_crash_image);
1080
		}
1081
		mutex_unlock(&kexec_mutex);
1082 1083
	}
}
1084

1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
			    size_t data_len)
{
	struct elf_note note;

	note.n_namesz = strlen(name) + 1;
	note.n_descsz = data_len;
	note.n_type   = type;
	memcpy(buf, &note, sizeof(note));
	buf += (sizeof(note) + 3)/4;
	memcpy(buf, name, note.n_namesz);
	buf += (note.n_namesz + 3)/4;
	memcpy(buf, data, note.n_descsz);
	buf += (note.n_descsz + 3)/4;

	return buf;
}

static void final_note(u32 *buf)
{
	struct elf_note note;

	note.n_namesz = 0;
	note.n_descsz = 0;
	note.n_type   = 0;
	memcpy(buf, &note, sizeof(note));
}

void crash_save_cpu(struct pt_regs *regs, int cpu)
{
	struct elf_prstatus prstatus;
	u32 *buf;

	if ((cpu < 0) || (cpu >= NR_CPUS))
		return;

	/* Using ELF notes here is opportunistic.
	 * I need a well defined structure format
	 * for the data I pass, and I need tags
	 * on the data to indicate what information I have
	 * squirrelled away.  ELF notes happen to provide
	 * all of that, so there is no need to invent something new.
	 */
	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
	if (!buf)
		return;
	memset(&prstatus, 0, sizeof(prstatus));
	prstatus.pr_pid = current->pid;
	elf_core_copy_regs(&prstatus.pr_reg, regs);
1134 1135
	buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
		      	      &prstatus, sizeof(prstatus));
1136 1137 1138
	final_note(buf);
}

1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
static int __init crash_notes_memory_init(void)
{
	/* Allocate memory for saving cpu registers. */
	crash_notes = alloc_percpu(note_buf_t);
	if (!crash_notes) {
		printk("Kexec: Memory allocation for saving cpu register"
		" states failed\n");
		return -ENOMEM;
	}
	return 0;
}
module_init(crash_notes_memory_init)
K
Ken'ichi Ohmichi 已提交
1151

B
Bernhard Walle 已提交
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223

/*
 * parsing the "crashkernel" commandline
 *
 * this code is intended to be called from architecture specific code
 */


/*
 * This function parses command lines in the format
 *
 *   crashkernel=ramsize-range:size[,...][@offset]
 *
 * The function returns 0 on success and -EINVAL on failure.
 */
static int __init parse_crashkernel_mem(char 			*cmdline,
					unsigned long long	system_ram,
					unsigned long long	*crash_size,
					unsigned long long	*crash_base)
{
	char *cur = cmdline, *tmp;

	/* for each entry of the comma-separated list */
	do {
		unsigned long long start, end = ULLONG_MAX, size;

		/* get the start of the range */
		start = memparse(cur, &tmp);
		if (cur == tmp) {
			pr_warning("crashkernel: Memory value expected\n");
			return -EINVAL;
		}
		cur = tmp;
		if (*cur != '-') {
			pr_warning("crashkernel: '-' expected\n");
			return -EINVAL;
		}
		cur++;

		/* if no ':' is here, than we read the end */
		if (*cur != ':') {
			end = memparse(cur, &tmp);
			if (cur == tmp) {
				pr_warning("crashkernel: Memory "
						"value expected\n");
				return -EINVAL;
			}
			cur = tmp;
			if (end <= start) {
				pr_warning("crashkernel: end <= start\n");
				return -EINVAL;
			}
		}

		if (*cur != ':') {
			pr_warning("crashkernel: ':' expected\n");
			return -EINVAL;
		}
		cur++;

		size = memparse(cur, &tmp);
		if (cur == tmp) {
			pr_warning("Memory value expected\n");
			return -EINVAL;
		}
		cur = tmp;
		if (size >= system_ram) {
			pr_warning("crashkernel: invalid size\n");
			return -EINVAL;
		}

		/* match ? */
1224
		if (system_ram >= start && system_ram < end) {
B
Bernhard Walle 已提交
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
			*crash_size = size;
			break;
		}
	} while (*cur++ == ',');

	if (*crash_size > 0) {
		while (*cur != ' ' && *cur != '@')
			cur++;
		if (*cur == '@') {
			cur++;
			*crash_base = memparse(cur, &tmp);
			if (cur == tmp) {
				pr_warning("Memory value expected "
						"after '@'\n");
				return -EINVAL;
			}
		}
	}

	return 0;
}

/*
 * That function parses "simple" (old) crashkernel command lines like
 *
 * 	crashkernel=size[@offset]
 *
 * It returns 0 on success and -EINVAL on failure.
 */
static int __init parse_crashkernel_simple(char 		*cmdline,
					   unsigned long long 	*crash_size,
					   unsigned long long 	*crash_base)
{
	char *cur = cmdline;

	*crash_size = memparse(cmdline, &cur);
	if (cmdline == cur) {
		pr_warning("crashkernel: memory value expected\n");
		return -EINVAL;
	}

	if (*cur == '@')
		*crash_base = memparse(cur+1, &cur);

	return 0;
}

/*
 * That function is the entry point for command line parsing and should be
 * called from the arch-specific code.
 */
int __init parse_crashkernel(char 		 *cmdline,
			     unsigned long long system_ram,
			     unsigned long long *crash_size,
			     unsigned long long *crash_base)
{
	char 	*p = cmdline, *ck_cmdline = NULL;
	char	*first_colon, *first_space;

	BUG_ON(!crash_size || !crash_base);
	*crash_size = 0;
	*crash_base = 0;

	/* find crashkernel and use the last one if there are more */
	p = strstr(p, "crashkernel=");
	while (p) {
		ck_cmdline = p;
		p = strstr(p+1, "crashkernel=");
	}

	if (!ck_cmdline)
		return -EINVAL;

	ck_cmdline += 12; /* strlen("crashkernel=") */

	/*
	 * if the commandline contains a ':', then that's the extended
	 * syntax -- if not, it must be the classic syntax
	 */
	first_colon = strchr(ck_cmdline, ':');
	first_space = strchr(ck_cmdline, ' ');
	if (first_colon && (!first_space || first_colon < first_space))
		return parse_crashkernel_mem(ck_cmdline, system_ram,
				crash_size, crash_base);
	else
		return parse_crashkernel_simple(ck_cmdline, crash_size,
				crash_base);

	return 0;
}



K
Ken'ichi Ohmichi 已提交
1318 1319 1320 1321 1322 1323 1324
void crash_save_vmcoreinfo(void)
{
	u32 *buf;

	if (!vmcoreinfo_size)
		return;

1325
	vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
K
Ken'ichi Ohmichi 已提交
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366

	buf = (u32 *)vmcoreinfo_note;

	buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
			      vmcoreinfo_size);

	final_note(buf);
}

void vmcoreinfo_append_str(const char *fmt, ...)
{
	va_list args;
	char buf[0x50];
	int r;

	va_start(args, fmt);
	r = vsnprintf(buf, sizeof(buf), fmt, args);
	va_end(args);

	if (r + vmcoreinfo_size > vmcoreinfo_max_size)
		r = vmcoreinfo_max_size - vmcoreinfo_size;

	memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);

	vmcoreinfo_size += r;
}

/*
 * provide an empty default implementation here -- architecture
 * code may override this
 */
void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
{}

unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
{
	return __pa((unsigned long)(char *)&vmcoreinfo_note);
}

static int __init crash_save_vmcoreinfo_init(void)
{
1367 1368
	VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
	VMCOREINFO_PAGESIZE(PAGE_SIZE);
K
Ken'ichi Ohmichi 已提交
1369

1370 1371 1372 1373
	VMCOREINFO_SYMBOL(init_uts_ns);
	VMCOREINFO_SYMBOL(node_online_map);
	VMCOREINFO_SYMBOL(swapper_pg_dir);
	VMCOREINFO_SYMBOL(_stext);
K
Ken'ichi Ohmichi 已提交
1374 1375

#ifndef CONFIG_NEED_MULTIPLE_NODES
1376 1377
	VMCOREINFO_SYMBOL(mem_map);
	VMCOREINFO_SYMBOL(contig_page_data);
K
Ken'ichi Ohmichi 已提交
1378 1379
#endif
#ifdef CONFIG_SPARSEMEM
1380 1381
	VMCOREINFO_SYMBOL(mem_section);
	VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1382
	VMCOREINFO_STRUCT_SIZE(mem_section);
1383
	VMCOREINFO_OFFSET(mem_section, section_mem_map);
K
Ken'ichi Ohmichi 已提交
1384
#endif
1385 1386 1387 1388 1389 1390
	VMCOREINFO_STRUCT_SIZE(page);
	VMCOREINFO_STRUCT_SIZE(pglist_data);
	VMCOREINFO_STRUCT_SIZE(zone);
	VMCOREINFO_STRUCT_SIZE(free_area);
	VMCOREINFO_STRUCT_SIZE(list_head);
	VMCOREINFO_SIZE(nodemask_t);
1391 1392 1393 1394 1395 1396
	VMCOREINFO_OFFSET(page, flags);
	VMCOREINFO_OFFSET(page, _count);
	VMCOREINFO_OFFSET(page, mapping);
	VMCOREINFO_OFFSET(page, lru);
	VMCOREINFO_OFFSET(pglist_data, node_zones);
	VMCOREINFO_OFFSET(pglist_data, nr_zones);
K
Ken'ichi Ohmichi 已提交
1397
#ifdef CONFIG_FLAT_NODE_MEM_MAP
1398
	VMCOREINFO_OFFSET(pglist_data, node_mem_map);
K
Ken'ichi Ohmichi 已提交
1399
#endif
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409
	VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
	VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
	VMCOREINFO_OFFSET(pglist_data, node_id);
	VMCOREINFO_OFFSET(zone, free_area);
	VMCOREINFO_OFFSET(zone, vm_stat);
	VMCOREINFO_OFFSET(zone, spanned_pages);
	VMCOREINFO_OFFSET(free_area, free_list);
	VMCOREINFO_OFFSET(list_head, next);
	VMCOREINFO_OFFSET(list_head, prev);
	VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1410
	VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1411
	VMCOREINFO_NUMBER(NR_FREE_PAGES);
1412 1413 1414
	VMCOREINFO_NUMBER(PG_lru);
	VMCOREINFO_NUMBER(PG_private);
	VMCOREINFO_NUMBER(PG_swapcache);
K
Ken'ichi Ohmichi 已提交
1415 1416 1417 1418 1419 1420 1421

	arch_crash_save_vmcoreinfo();

	return 0;
}

module_init(crash_save_vmcoreinfo_init)
H
Huang Ying 已提交
1422

1423 1424 1425
/*
 * Move into place and start executing a preloaded standalone
 * executable.  If nothing was preloaded return an error.
H
Huang Ying 已提交
1426 1427 1428 1429 1430
 */
int kernel_kexec(void)
{
	int error = 0;

1431
	if (!mutex_trylock(&kexec_mutex))
H
Huang Ying 已提交
1432 1433 1434 1435 1436 1437 1438
		return -EBUSY;
	if (!kexec_image) {
		error = -EINVAL;
		goto Unlock;
	}

#ifdef CONFIG_KEXEC_JUMP
1439
	if (kexec_image->preserve_context) {
1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
		mutex_lock(&pm_mutex);
		pm_prepare_console();
		error = freeze_processes();
		if (error) {
			error = -EBUSY;
			goto Restore_console;
		}
		suspend_console();
		error = device_suspend(PMSG_FREEZE);
		if (error)
			goto Resume_console;
		error = disable_nonboot_cpus();
		if (error)
			goto Resume_devices;
1454
		device_pm_lock();
H
Huang Ying 已提交
1455
		local_irq_disable();
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
		/* At this point, device_suspend() has been called,
		 * but *not* device_power_down(). We *must*
		 * device_power_down() now.  Otherwise, drivers for
		 * some devices (e.g. interrupt controllers) become
		 * desynchronized with the actual state of the
		 * hardware at resume time, and evil weirdness ensues.
		 */
		error = device_power_down(PMSG_FREEZE);
		if (error)
			goto Enable_irqs;
1466
	} else
H
Huang Ying 已提交
1467
#endif
1468
	{
1469
		kernel_restart_prepare(NULL);
H
Huang Ying 已提交
1470 1471 1472 1473 1474 1475 1476
		printk(KERN_EMERG "Starting new kernel\n");
		machine_shutdown();
	}

	machine_kexec(kexec_image);

#ifdef CONFIG_KEXEC_JUMP
1477
	if (kexec_image->preserve_context) {
1478 1479
		device_power_up(PMSG_RESTORE);
 Enable_irqs:
H
Huang Ying 已提交
1480
		local_irq_enable();
1481
		device_pm_unlock();
1482 1483 1484 1485 1486 1487 1488 1489 1490
		enable_nonboot_cpus();
 Resume_devices:
		device_resume(PMSG_RESTORE);
 Resume_console:
		resume_console();
		thaw_processes();
 Restore_console:
		pm_restore_console();
		mutex_unlock(&pm_mutex);
H
Huang Ying 已提交
1491
	}
1492
#endif
H
Huang Ying 已提交
1493 1494

 Unlock:
1495
	mutex_unlock(&kexec_mutex);
H
Huang Ying 已提交
1496 1497
	return error;
}