kexec.c 28.9 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * kexec.c - kexec system call
 * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
 *
 * This source code is licensed under the GNU General Public License,
 * Version 2.  See the file COPYING for more details.
 */

9
#include <linux/capability.h>
10 11 12 13 14 15 16 17 18 19 20 21
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/kexec.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <linux/syscalls.h>
#include <linux/reboot.h>
#include <linux/syscalls.h>
#include <linux/ioport.h>
22
#include <linux/hardirq.h>
23 24
#include <linux/elf.h>
#include <linux/elfcore.h>
25

26 27 28 29 30 31
#include <asm/page.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/semaphore.h>

32 33 34
/* Per cpu memory for storing cpu states in case of system crash. */
note_buf_t* crash_notes;

35 36 37 38 39 40 41 42
/* Location of the reserved area for the crash kernel */
struct resource crashk_res = {
	.name  = "Crash kernel",
	.start = 0,
	.end   = 0,
	.flags = IORESOURCE_BUSY | IORESOURCE_MEM
};

43 44
int kexec_should_crash(struct task_struct *p)
{
45
	if (in_interrupt() || !p->pid || is_init(p) || panic_on_oops)
46 47 48 49
		return 1;
	return 0;
}

50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
/*
 * When kexec transitions to the new kernel there is a one-to-one
 * mapping between physical and virtual addresses.  On processors
 * where you can disable the MMU this is trivial, and easy.  For
 * others it is still a simple predictable page table to setup.
 *
 * In that environment kexec copies the new kernel to its final
 * resting place.  This means I can only support memory whose
 * physical address can fit in an unsigned long.  In particular
 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
 * If the assembly stub has more restrictive requirements
 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
 * defined more restrictively in <asm/kexec.h>.
 *
 * The code for the transition from the current kernel to the
 * the new kernel is placed in the control_code_buffer, whose size
 * is given by KEXEC_CONTROL_CODE_SIZE.  In the best case only a single
 * page of memory is necessary, but some architectures require more.
 * Because this memory must be identity mapped in the transition from
 * virtual to physical addresses it must live in the range
 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
 * modifiable.
 *
 * The assembly stub in the control code buffer is passed a linked list
 * of descriptor pages detailing the source pages of the new kernel,
 * and the destination addresses of those source pages.  As this data
 * structure is not used in the context of the current OS, it must
 * be self-contained.
 *
 * The code has been made to work with highmem pages and will use a
 * destination page in its final resting place (if it happens
 * to allocate it).  The end product of this is that most of the
 * physical address space, and most of RAM can be used.
 *
 * Future directions include:
 *  - allocating a page table with the control code buffer identity
 *    mapped, to simplify machine_kexec and make kexec_on_panic more
 *    reliable.
 */

/*
 * KIMAGE_NO_DEST is an impossible destination address..., for
 * allocating pages whose destination address we do not care about.
 */
#define KIMAGE_NO_DEST (-1UL)

M
Maneesh Soni 已提交
96 97 98
static int kimage_is_destination_range(struct kimage *image,
				       unsigned long start, unsigned long end);
static struct page *kimage_alloc_page(struct kimage *image,
A
Al Viro 已提交
99
				       gfp_t gfp_mask,
M
Maneesh Soni 已提交
100
				       unsigned long dest);
101 102

static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
M
Maneesh Soni 已提交
103 104
	                    unsigned long nr_segments,
                            struct kexec_segment __user *segments)
105 106 107 108 109 110 111 112
{
	size_t segment_bytes;
	struct kimage *image;
	unsigned long i;
	int result;

	/* Allocate a controlling structure */
	result = -ENOMEM;
113
	image = kzalloc(sizeof(*image), GFP_KERNEL);
M
Maneesh Soni 已提交
114
	if (!image)
115
		goto out;
M
Maneesh Soni 已提交
116

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
	image->head = 0;
	image->entry = &image->head;
	image->last_entry = &image->head;
	image->control_page = ~0; /* By default this does not apply */
	image->start = entry;
	image->type = KEXEC_TYPE_DEFAULT;

	/* Initialize the list of control pages */
	INIT_LIST_HEAD(&image->control_pages);

	/* Initialize the list of destination pages */
	INIT_LIST_HEAD(&image->dest_pages);

	/* Initialize the list of unuseable pages */
	INIT_LIST_HEAD(&image->unuseable_pages);

	/* Read in the segments */
	image->nr_segments = nr_segments;
	segment_bytes = nr_segments * sizeof(*segments);
	result = copy_from_user(image->segment, segments, segment_bytes);
	if (result)
		goto out;

	/*
	 * Verify we have good destination addresses.  The caller is
	 * responsible for making certain we don't attempt to load
	 * the new image into invalid or reserved areas of RAM.  This
	 * just verifies it is an address we can use.
	 *
	 * Since the kernel does everything in page size chunks ensure
	 * the destination addreses are page aligned.  Too many
	 * special cases crop of when we don't do this.  The most
	 * insidious is getting overlapping destination addresses
	 * simply because addresses are changed to page size
	 * granularity.
	 */
	result = -EADDRNOTAVAIL;
	for (i = 0; i < nr_segments; i++) {
		unsigned long mstart, mend;
M
Maneesh Soni 已提交
156

157 158 159 160 161 162 163 164 165 166 167 168 169 170
		mstart = image->segment[i].mem;
		mend   = mstart + image->segment[i].memsz;
		if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
			goto out;
		if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
			goto out;
	}

	/* Verify our destination addresses do not overlap.
	 * If we alloed overlapping destination addresses
	 * through very weird things can happen with no
	 * easy explanation as one segment stops on another.
	 */
	result = -EINVAL;
M
Maneesh Soni 已提交
171
	for (i = 0; i < nr_segments; i++) {
172 173
		unsigned long mstart, mend;
		unsigned long j;
M
Maneesh Soni 已提交
174

175 176
		mstart = image->segment[i].mem;
		mend   = mstart + image->segment[i].memsz;
M
Maneesh Soni 已提交
177
		for (j = 0; j < i; j++) {
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
			unsigned long pstart, pend;
			pstart = image->segment[j].mem;
			pend   = pstart + image->segment[j].memsz;
			/* Do the segments overlap ? */
			if ((mend > pstart) && (mstart < pend))
				goto out;
		}
	}

	/* Ensure our buffer sizes are strictly less than
	 * our memory sizes.  This should always be the case,
	 * and it is easier to check up front than to be surprised
	 * later on.
	 */
	result = -EINVAL;
M
Maneesh Soni 已提交
193
	for (i = 0; i < nr_segments; i++) {
194 195 196 197 198
		if (image->segment[i].bufsz > image->segment[i].memsz)
			goto out;
	}

	result = 0;
M
Maneesh Soni 已提交
199 200
out:
	if (result == 0)
201
		*rimage = image;
M
Maneesh Soni 已提交
202
	else
203
		kfree(image);
M
Maneesh Soni 已提交
204

205 206 207 208 209
	return result;

}

static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
M
Maneesh Soni 已提交
210 211
				unsigned long nr_segments,
				struct kexec_segment __user *segments)
212 213 214 215 216 217 218
{
	int result;
	struct kimage *image;

	/* Allocate and initialize a controlling structure */
	image = NULL;
	result = do_kimage_alloc(&image, entry, nr_segments, segments);
M
Maneesh Soni 已提交
219
	if (result)
220
		goto out;
M
Maneesh Soni 已提交
221

222 223 224 225 226 227 228 229 230
	*rimage = image;

	/*
	 * Find a location for the control code buffer, and add it
	 * the vector of segments so that it's pages will also be
	 * counted as destination pages.
	 */
	result = -ENOMEM;
	image->control_code_page = kimage_alloc_control_pages(image,
M
Maneesh Soni 已提交
231
					   get_order(KEXEC_CONTROL_CODE_SIZE));
232 233 234 235 236 237 238
	if (!image->control_code_page) {
		printk(KERN_ERR "Could not allocate control_code_buffer\n");
		goto out;
	}

	result = 0;
 out:
M
Maneesh Soni 已提交
239
	if (result == 0)
240
		*rimage = image;
M
Maneesh Soni 已提交
241
	else
242
		kfree(image);
M
Maneesh Soni 已提交
243

244 245 246 247
	return result;
}

static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
M
Maneesh Soni 已提交
248
				unsigned long nr_segments,
249
				struct kexec_segment __user *segments)
250 251 252 253 254 255 256 257 258 259 260 261 262 263
{
	int result;
	struct kimage *image;
	unsigned long i;

	image = NULL;
	/* Verify we have a valid entry point */
	if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
		result = -EADDRNOTAVAIL;
		goto out;
	}

	/* Allocate and initialize a controlling structure */
	result = do_kimage_alloc(&image, entry, nr_segments, segments);
M
Maneesh Soni 已提交
264
	if (result)
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
		goto out;

	/* Enable the special crash kernel control page
	 * allocation policy.
	 */
	image->control_page = crashk_res.start;
	image->type = KEXEC_TYPE_CRASH;

	/*
	 * Verify we have good destination addresses.  Normally
	 * the caller is responsible for making certain we don't
	 * attempt to load the new image into invalid or reserved
	 * areas of RAM.  But crash kernels are preloaded into a
	 * reserved area of ram.  We must ensure the addresses
	 * are in the reserved area otherwise preloading the
	 * kernel could corrupt things.
	 */
	result = -EADDRNOTAVAIL;
	for (i = 0; i < nr_segments; i++) {
		unsigned long mstart, mend;
M
Maneesh Soni 已提交
285

286
		mstart = image->segment[i].mem;
287
		mend = mstart + image->segment[i].memsz - 1;
288 289 290 291 292 293 294 295 296 297 298 299
		/* Ensure we are within the crash kernel limits */
		if ((mstart < crashk_res.start) || (mend > crashk_res.end))
			goto out;
	}

	/*
	 * Find a location for the control code buffer, and add
	 * the vector of segments so that it's pages will also be
	 * counted as destination pages.
	 */
	result = -ENOMEM;
	image->control_code_page = kimage_alloc_control_pages(image,
M
Maneesh Soni 已提交
300
					   get_order(KEXEC_CONTROL_CODE_SIZE));
301 302 303 304 305 306
	if (!image->control_code_page) {
		printk(KERN_ERR "Could not allocate control_code_buffer\n");
		goto out;
	}

	result = 0;
M
Maneesh Soni 已提交
307 308
out:
	if (result == 0)
309
		*rimage = image;
M
Maneesh Soni 已提交
310
	else
311
		kfree(image);
M
Maneesh Soni 已提交
312

313 314 315
	return result;
}

M
Maneesh Soni 已提交
316 317 318
static int kimage_is_destination_range(struct kimage *image,
					unsigned long start,
					unsigned long end)
319 320 321 322 323
{
	unsigned long i;

	for (i = 0; i < image->nr_segments; i++) {
		unsigned long mstart, mend;
M
Maneesh Soni 已提交
324

325
		mstart = image->segment[i].mem;
M
Maneesh Soni 已提交
326 327
		mend = mstart + image->segment[i].memsz;
		if ((end > mstart) && (start < mend))
328 329
			return 1;
	}
M
Maneesh Soni 已提交
330

331 332 333
	return 0;
}

A
Al Viro 已提交
334
static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
335 336
{
	struct page *pages;
M
Maneesh Soni 已提交
337

338 339 340 341
	pages = alloc_pages(gfp_mask, order);
	if (pages) {
		unsigned int count, i;
		pages->mapping = NULL;
H
Hugh Dickins 已提交
342
		set_page_private(pages, order);
343
		count = 1 << order;
M
Maneesh Soni 已提交
344
		for (i = 0; i < count; i++)
345 346
			SetPageReserved(pages + i);
	}
M
Maneesh Soni 已提交
347

348 349 350 351 352 353
	return pages;
}

static void kimage_free_pages(struct page *page)
{
	unsigned int order, count, i;
M
Maneesh Soni 已提交
354

H
Hugh Dickins 已提交
355
	order = page_private(page);
356
	count = 1 << order;
M
Maneesh Soni 已提交
357
	for (i = 0; i < count; i++)
358 359 360 361 362 363 364
		ClearPageReserved(page + i);
	__free_pages(page, order);
}

static void kimage_free_page_list(struct list_head *list)
{
	struct list_head *pos, *next;
M
Maneesh Soni 已提交
365

366 367 368 369 370 371 372 373 374
	list_for_each_safe(pos, next, list) {
		struct page *page;

		page = list_entry(pos, struct page, lru);
		list_del(&page->lru);
		kimage_free_pages(page);
	}
}

M
Maneesh Soni 已提交
375 376
static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
							unsigned int order)
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
{
	/* Control pages are special, they are the intermediaries
	 * that are needed while we copy the rest of the pages
	 * to their final resting place.  As such they must
	 * not conflict with either the destination addresses
	 * or memory the kernel is already using.
	 *
	 * The only case where we really need more than one of
	 * these are for architectures where we cannot disable
	 * the MMU and must instead generate an identity mapped
	 * page table for all of the memory.
	 *
	 * At worst this runs in O(N) of the image size.
	 */
	struct list_head extra_pages;
	struct page *pages;
	unsigned int count;

	count = 1 << order;
	INIT_LIST_HEAD(&extra_pages);

	/* Loop while I can allocate a page and the page allocated
	 * is a destination page.
	 */
	do {
		unsigned long pfn, epfn, addr, eaddr;
M
Maneesh Soni 已提交
403

404 405 406 407 408 409 410 411
		pages = kimage_alloc_pages(GFP_KERNEL, order);
		if (!pages)
			break;
		pfn   = page_to_pfn(pages);
		epfn  = pfn + count;
		addr  = pfn << PAGE_SHIFT;
		eaddr = epfn << PAGE_SHIFT;
		if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
M
Maneesh Soni 已提交
412
			      kimage_is_destination_range(image, addr, eaddr)) {
413 414 415
			list_add(&pages->lru, &extra_pages);
			pages = NULL;
		}
M
Maneesh Soni 已提交
416 417
	} while (!pages);

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
	if (pages) {
		/* Remember the allocated page... */
		list_add(&pages->lru, &image->control_pages);

		/* Because the page is already in it's destination
		 * location we will never allocate another page at
		 * that address.  Therefore kimage_alloc_pages
		 * will not return it (again) and we don't need
		 * to give it an entry in image->segment[].
		 */
	}
	/* Deal with the destination pages I have inadvertently allocated.
	 *
	 * Ideally I would convert multi-page allocations into single
	 * page allocations, and add everyting to image->dest_pages.
	 *
	 * For now it is simpler to just free the pages.
	 */
	kimage_free_page_list(&extra_pages);

M
Maneesh Soni 已提交
438
	return pages;
439 440
}

M
Maneesh Soni 已提交
441 442
static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
						      unsigned int order)
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
{
	/* Control pages are special, they are the intermediaries
	 * that are needed while we copy the rest of the pages
	 * to their final resting place.  As such they must
	 * not conflict with either the destination addresses
	 * or memory the kernel is already using.
	 *
	 * Control pages are also the only pags we must allocate
	 * when loading a crash kernel.  All of the other pages
	 * are specified by the segments and we just memcpy
	 * into them directly.
	 *
	 * The only case where we really need more than one of
	 * these are for architectures where we cannot disable
	 * the MMU and must instead generate an identity mapped
	 * page table for all of the memory.
	 *
	 * Given the low demand this implements a very simple
	 * allocator that finds the first hole of the appropriate
	 * size in the reserved memory region, and allocates all
	 * of the memory up to and including the hole.
	 */
	unsigned long hole_start, hole_end, size;
	struct page *pages;
M
Maneesh Soni 已提交
467

468 469 470 471
	pages = NULL;
	size = (1 << order) << PAGE_SHIFT;
	hole_start = (image->control_page + (size - 1)) & ~(size - 1);
	hole_end   = hole_start + size - 1;
M
Maneesh Soni 已提交
472
	while (hole_end <= crashk_res.end) {
473
		unsigned long i;
M
Maneesh Soni 已提交
474 475

		if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT)
476
			break;
M
Maneesh Soni 已提交
477
		if (hole_end > crashk_res.end)
478 479
			break;
		/* See if I overlap any of the segments */
M
Maneesh Soni 已提交
480
		for (i = 0; i < image->nr_segments; i++) {
481
			unsigned long mstart, mend;
M
Maneesh Soni 已提交
482

483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
			mstart = image->segment[i].mem;
			mend   = mstart + image->segment[i].memsz - 1;
			if ((hole_end >= mstart) && (hole_start <= mend)) {
				/* Advance the hole to the end of the segment */
				hole_start = (mend + (size - 1)) & ~(size - 1);
				hole_end   = hole_start + size - 1;
				break;
			}
		}
		/* If I don't overlap any segments I have found my hole! */
		if (i == image->nr_segments) {
			pages = pfn_to_page(hole_start >> PAGE_SHIFT);
			break;
		}
	}
M
Maneesh Soni 已提交
498
	if (pages)
499
		image->control_page = hole_end;
M
Maneesh Soni 已提交
500

501 502 503 504
	return pages;
}


M
Maneesh Soni 已提交
505 506
struct page *kimage_alloc_control_pages(struct kimage *image,
					 unsigned int order)
507 508
{
	struct page *pages = NULL;
M
Maneesh Soni 已提交
509 510

	switch (image->type) {
511 512 513 514 515 516 517
	case KEXEC_TYPE_DEFAULT:
		pages = kimage_alloc_normal_control_pages(image, order);
		break;
	case KEXEC_TYPE_CRASH:
		pages = kimage_alloc_crash_control_pages(image, order);
		break;
	}
M
Maneesh Soni 已提交
518

519 520 521 522 523
	return pages;
}

static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
{
M
Maneesh Soni 已提交
524
	if (*image->entry != 0)
525
		image->entry++;
M
Maneesh Soni 已提交
526

527 528 529
	if (image->entry == image->last_entry) {
		kimage_entry_t *ind_page;
		struct page *page;
M
Maneesh Soni 已提交
530

531
		page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
M
Maneesh Soni 已提交
532
		if (!page)
533
			return -ENOMEM;
M
Maneesh Soni 已提交
534

535 536 537
		ind_page = page_address(page);
		*image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
		image->entry = ind_page;
M
Maneesh Soni 已提交
538 539
		image->last_entry = ind_page +
				      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
540 541 542 543
	}
	*image->entry = entry;
	image->entry++;
	*image->entry = 0;
M
Maneesh Soni 已提交
544

545 546 547
	return 0;
}

M
Maneesh Soni 已提交
548 549
static int kimage_set_destination(struct kimage *image,
				   unsigned long destination)
550 551 552 553 554
{
	int result;

	destination &= PAGE_MASK;
	result = kimage_add_entry(image, destination | IND_DESTINATION);
M
Maneesh Soni 已提交
555
	if (result == 0)
556
		image->destination = destination;
M
Maneesh Soni 已提交
557

558 559 560 561 562 563 564 565 566 567
	return result;
}


static int kimage_add_page(struct kimage *image, unsigned long page)
{
	int result;

	page &= PAGE_MASK;
	result = kimage_add_entry(image, page | IND_SOURCE);
M
Maneesh Soni 已提交
568
	if (result == 0)
569
		image->destination += PAGE_SIZE;
M
Maneesh Soni 已提交
570

571 572 573 574 575 576 577 578 579 580 581 582 583 584 585
	return result;
}


static void kimage_free_extra_pages(struct kimage *image)
{
	/* Walk through and free any extra destination pages I may have */
	kimage_free_page_list(&image->dest_pages);

	/* Walk through and free any unuseable pages I have cached */
	kimage_free_page_list(&image->unuseable_pages);

}
static int kimage_terminate(struct kimage *image)
{
M
Maneesh Soni 已提交
586
	if (*image->entry != 0)
587
		image->entry++;
M
Maneesh Soni 已提交
588

589
	*image->entry = IND_DONE;
M
Maneesh Soni 已提交
590

591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
	return 0;
}

#define for_each_kimage_entry(image, ptr, entry) \
	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
		ptr = (entry & IND_INDIRECTION)? \
			phys_to_virt((entry & PAGE_MASK)): ptr +1)

static void kimage_free_entry(kimage_entry_t entry)
{
	struct page *page;

	page = pfn_to_page(entry >> PAGE_SHIFT);
	kimage_free_pages(page);
}

static void kimage_free(struct kimage *image)
{
	kimage_entry_t *ptr, entry;
	kimage_entry_t ind = 0;

	if (!image)
		return;
M
Maneesh Soni 已提交
614

615 616 617 618
	kimage_free_extra_pages(image);
	for_each_kimage_entry(image, ptr, entry) {
		if (entry & IND_INDIRECTION) {
			/* Free the previous indirection page */
M
Maneesh Soni 已提交
619
			if (ind & IND_INDIRECTION)
620 621 622 623 624 625
				kimage_free_entry(ind);
			/* Save this indirection page until we are
			 * done with it.
			 */
			ind = entry;
		}
M
Maneesh Soni 已提交
626
		else if (entry & IND_SOURCE)
627 628 629
			kimage_free_entry(entry);
	}
	/* Free the final indirection page */
M
Maneesh Soni 已提交
630
	if (ind & IND_INDIRECTION)
631 632 633 634 635 636 637 638 639 640
		kimage_free_entry(ind);

	/* Handle any machine specific cleanup */
	machine_kexec_cleanup(image);

	/* Free the kexec control pages... */
	kimage_free_page_list(&image->control_pages);
	kfree(image);
}

M
Maneesh Soni 已提交
641 642
static kimage_entry_t *kimage_dst_used(struct kimage *image,
					unsigned long page)
643 644 645 646 647
{
	kimage_entry_t *ptr, entry;
	unsigned long destination = 0;

	for_each_kimage_entry(image, ptr, entry) {
M
Maneesh Soni 已提交
648
		if (entry & IND_DESTINATION)
649 650
			destination = entry & PAGE_MASK;
		else if (entry & IND_SOURCE) {
M
Maneesh Soni 已提交
651
			if (page == destination)
652 653 654 655
				return ptr;
			destination += PAGE_SIZE;
		}
	}
M
Maneesh Soni 已提交
656

657
	return NULL;
658 659
}

M
Maneesh Soni 已提交
660
static struct page *kimage_alloc_page(struct kimage *image,
A
Al Viro 已提交
661
					gfp_t gfp_mask,
M
Maneesh Soni 已提交
662
					unsigned long destination)
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
{
	/*
	 * Here we implement safeguards to ensure that a source page
	 * is not copied to its destination page before the data on
	 * the destination page is no longer useful.
	 *
	 * To do this we maintain the invariant that a source page is
	 * either its own destination page, or it is not a
	 * destination page at all.
	 *
	 * That is slightly stronger than required, but the proof
	 * that no problems will not occur is trivial, and the
	 * implementation is simply to verify.
	 *
	 * When allocating all pages normally this algorithm will run
	 * in O(N) time, but in the worst case it will run in O(N^2)
	 * time.   If the runtime is a problem the data structures can
	 * be fixed.
	 */
	struct page *page;
	unsigned long addr;

	/*
	 * Walk through the list of destination pages, and see if I
	 * have a match.
	 */
	list_for_each_entry(page, &image->dest_pages, lru) {
		addr = page_to_pfn(page) << PAGE_SHIFT;
		if (addr == destination) {
			list_del(&page->lru);
			return page;
		}
	}
	page = NULL;
	while (1) {
		kimage_entry_t *old;

		/* Allocate a page, if we run out of memory give up */
		page = kimage_alloc_pages(gfp_mask, 0);
M
Maneesh Soni 已提交
702
		if (!page)
703
			return NULL;
704
		/* If the page cannot be used file it away */
M
Maneesh Soni 已提交
705 706
		if (page_to_pfn(page) >
				(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
707 708 709 710 711 712 713 714 715 716
			list_add(&page->lru, &image->unuseable_pages);
			continue;
		}
		addr = page_to_pfn(page) << PAGE_SHIFT;

		/* If it is the destination page we want use it */
		if (addr == destination)
			break;

		/* If the page is not a destination page use it */
M
Maneesh Soni 已提交
717 718
		if (!kimage_is_destination_range(image, addr,
						  addr + PAGE_SIZE))
719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
			break;

		/*
		 * I know that the page is someones destination page.
		 * See if there is already a source page for this
		 * destination page.  And if so swap the source pages.
		 */
		old = kimage_dst_used(image, addr);
		if (old) {
			/* If so move it */
			unsigned long old_addr;
			struct page *old_page;

			old_addr = *old & PAGE_MASK;
			old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
			copy_highpage(page, old_page);
			*old = addr | (*old & ~PAGE_MASK);

			/* The old page I have found cannot be a
			 * destination page, so return it.
			 */
			addr = old_addr;
			page = old_page;
			break;
		}
		else {
			/* Place the page on the destination list I
			 * will use it later.
			 */
			list_add(&page->lru, &image->dest_pages);
		}
	}
M
Maneesh Soni 已提交
751

752 753 754 755
	return page;
}

static int kimage_load_normal_segment(struct kimage *image,
M
Maneesh Soni 已提交
756
					 struct kexec_segment *segment)
757 758 759 760
{
	unsigned long maddr;
	unsigned long ubytes, mbytes;
	int result;
761
	unsigned char __user *buf;
762 763 764 765 766 767 768 769

	result = 0;
	buf = segment->buf;
	ubytes = segment->bufsz;
	mbytes = segment->memsz;
	maddr = segment->mem;

	result = kimage_set_destination(image, maddr);
M
Maneesh Soni 已提交
770
	if (result < 0)
771
		goto out;
M
Maneesh Soni 已提交
772 773

	while (mbytes) {
774 775 776
		struct page *page;
		char *ptr;
		size_t uchunk, mchunk;
M
Maneesh Soni 已提交
777

778 779 780 781 782
		page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
		if (page == 0) {
			result  = -ENOMEM;
			goto out;
		}
M
Maneesh Soni 已提交
783 784 785
		result = kimage_add_page(image, page_to_pfn(page)
								<< PAGE_SHIFT);
		if (result < 0)
786
			goto out;
M
Maneesh Soni 已提交
787

788 789 790 791 792
		ptr = kmap(page);
		/* Start with a clear page */
		memset(ptr, 0, PAGE_SIZE);
		ptr += maddr & ~PAGE_MASK;
		mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
M
Maneesh Soni 已提交
793
		if (mchunk > mbytes)
794
			mchunk = mbytes;
M
Maneesh Soni 已提交
795

796
		uchunk = mchunk;
M
Maneesh Soni 已提交
797
		if (uchunk > ubytes)
798
			uchunk = ubytes;
M
Maneesh Soni 已提交
799

800 801 802 803 804 805 806 807 808 809 810
		result = copy_from_user(ptr, buf, uchunk);
		kunmap(page);
		if (result) {
			result = (result < 0) ? result : -EIO;
			goto out;
		}
		ubytes -= uchunk;
		maddr  += mchunk;
		buf    += mchunk;
		mbytes -= mchunk;
	}
M
Maneesh Soni 已提交
811
out:
812 813 814 815
	return result;
}

static int kimage_load_crash_segment(struct kimage *image,
M
Maneesh Soni 已提交
816
					struct kexec_segment *segment)
817 818 819 820 821 822 823 824
{
	/* For crash dumps kernels we simply copy the data from
	 * user space to it's destination.
	 * We do things a page at a time for the sake of kmap.
	 */
	unsigned long maddr;
	unsigned long ubytes, mbytes;
	int result;
825
	unsigned char __user *buf;
826 827 828 829 830 831

	result = 0;
	buf = segment->buf;
	ubytes = segment->bufsz;
	mbytes = segment->memsz;
	maddr = segment->mem;
M
Maneesh Soni 已提交
832
	while (mbytes) {
833 834 835
		struct page *page;
		char *ptr;
		size_t uchunk, mchunk;
M
Maneesh Soni 已提交
836

837 838 839 840 841 842 843 844
		page = pfn_to_page(maddr >> PAGE_SHIFT);
		if (page == 0) {
			result  = -ENOMEM;
			goto out;
		}
		ptr = kmap(page);
		ptr += maddr & ~PAGE_MASK;
		mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
M
Maneesh Soni 已提交
845
		if (mchunk > mbytes)
846
			mchunk = mbytes;
M
Maneesh Soni 已提交
847

848 849 850 851 852 853 854
		uchunk = mchunk;
		if (uchunk > ubytes) {
			uchunk = ubytes;
			/* Zero the trailing part of the page */
			memset(ptr + uchunk, 0, mchunk - uchunk);
		}
		result = copy_from_user(ptr, buf, uchunk);
Z
Zou Nan hai 已提交
855
		kexec_flush_icache_page(page);
856 857 858 859 860 861 862 863 864 865
		kunmap(page);
		if (result) {
			result = (result < 0) ? result : -EIO;
			goto out;
		}
		ubytes -= uchunk;
		maddr  += mchunk;
		buf    += mchunk;
		mbytes -= mchunk;
	}
M
Maneesh Soni 已提交
866
out:
867 868 869 870
	return result;
}

static int kimage_load_segment(struct kimage *image,
M
Maneesh Soni 已提交
871
				struct kexec_segment *segment)
872 873
{
	int result = -ENOMEM;
M
Maneesh Soni 已提交
874 875

	switch (image->type) {
876 877 878 879 880 881 882
	case KEXEC_TYPE_DEFAULT:
		result = kimage_load_normal_segment(image, segment);
		break;
	case KEXEC_TYPE_CRASH:
		result = kimage_load_crash_segment(image, segment);
		break;
	}
M
Maneesh Soni 已提交
883

884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
	return result;
}

/*
 * Exec Kernel system call: for obvious reasons only root may call it.
 *
 * This call breaks up into three pieces.
 * - A generic part which loads the new kernel from the current
 *   address space, and very carefully places the data in the
 *   allocated pages.
 *
 * - A generic part that interacts with the kernel and tells all of
 *   the devices to shut down.  Preventing on-going dmas, and placing
 *   the devices in a consistent state so a later kernel can
 *   reinitialize them.
 *
 * - A machine specific part that includes the syscall number
 *   and the copies the image to it's final destination.  And
 *   jumps into the image at entry.
 *
 * kexec does not sync, or unmount filesystems so if you need
 * that to happen you need to do that yourself.
 */
907 908
struct kimage *kexec_image;
struct kimage *kexec_crash_image;
909 910 911 912 913
/*
 * A home grown binary mutex.
 * Nothing can wait so this mutex is safe to use
 * in interrupt context :)
 */
914
static int kexec_lock;
915

M
Maneesh Soni 已提交
916 917 918
asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
				struct kexec_segment __user *segments,
				unsigned long flags)
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
{
	struct kimage **dest_image, *image;
	int locked;
	int result;

	/* We only trust the superuser with rebooting the system. */
	if (!capable(CAP_SYS_BOOT))
		return -EPERM;

	/*
	 * Verify we have a legal set of flags
	 * This leaves us room for future extensions.
	 */
	if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
		return -EINVAL;

	/* Verify we are on the appropriate architecture */
	if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
		((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
		return -EINVAL;

	/* Put an artificial cap on the number
	 * of segments passed to kexec_load.
	 */
	if (nr_segments > KEXEC_SEGMENT_MAX)
		return -EINVAL;

	image = NULL;
	result = 0;

	/* Because we write directly to the reserved memory
	 * region when loading crash kernels we need a mutex here to
	 * prevent multiple crash  kernels from attempting to load
	 * simultaneously, and to prevent a crash kernel from loading
	 * over the top of a in use crash kernel.
	 *
	 * KISS: always take the mutex.
	 */
	locked = xchg(&kexec_lock, 1);
M
Maneesh Soni 已提交
958
	if (locked)
959
		return -EBUSY;
M
Maneesh Soni 已提交
960

961
	dest_image = &kexec_image;
M
Maneesh Soni 已提交
962
	if (flags & KEXEC_ON_CRASH)
963 964 965
		dest_image = &kexec_crash_image;
	if (nr_segments > 0) {
		unsigned long i;
M
Maneesh Soni 已提交
966

967
		/* Loading another kernel to reboot into */
M
Maneesh Soni 已提交
968 969 970
		if ((flags & KEXEC_ON_CRASH) == 0)
			result = kimage_normal_alloc(&image, entry,
							nr_segments, segments);
971 972 973 974 975 976
		/* Loading another kernel to switch to if this one crashes */
		else if (flags & KEXEC_ON_CRASH) {
			/* Free any current crash dump kernel before
			 * we corrupt it.
			 */
			kimage_free(xchg(&kexec_crash_image, NULL));
M
Maneesh Soni 已提交
977 978
			result = kimage_crash_alloc(&image, entry,
						     nr_segments, segments);
979
		}
M
Maneesh Soni 已提交
980
		if (result)
981
			goto out;
M
Maneesh Soni 已提交
982

983
		result = machine_kexec_prepare(image);
M
Maneesh Soni 已提交
984
		if (result)
985
			goto out;
M
Maneesh Soni 已提交
986 987

		for (i = 0; i < nr_segments; i++) {
988
			result = kimage_load_segment(image, &image->segment[i]);
M
Maneesh Soni 已提交
989
			if (result)
990 991 992
				goto out;
		}
		result = kimage_terminate(image);
M
Maneesh Soni 已提交
993
		if (result)
994 995 996 997 998
			goto out;
	}
	/* Install the new kernel, and  Uninstall the old */
	image = xchg(dest_image, image);

M
Maneesh Soni 已提交
999
out:
R
Roland McGrath 已提交
1000 1001
	locked = xchg(&kexec_lock, 0); /* Release the mutex */
	BUG_ON(!locked);
1002
	kimage_free(image);
M
Maneesh Soni 已提交
1003

1004 1005 1006 1007 1008
	return result;
}

#ifdef CONFIG_COMPAT
asmlinkage long compat_sys_kexec_load(unsigned long entry,
M
Maneesh Soni 已提交
1009 1010 1011
				unsigned long nr_segments,
				struct compat_kexec_segment __user *segments,
				unsigned long flags)
1012 1013 1014 1015 1016 1017 1018 1019
{
	struct compat_kexec_segment in;
	struct kexec_segment out, __user *ksegments;
	unsigned long i, result;

	/* Don't allow clients that don't understand the native
	 * architecture to do anything.
	 */
M
Maneesh Soni 已提交
1020
	if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1021 1022
		return -EINVAL;

M
Maneesh Soni 已提交
1023
	if (nr_segments > KEXEC_SEGMENT_MAX)
1024 1025 1026 1027 1028
		return -EINVAL;

	ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
	for (i=0; i < nr_segments; i++) {
		result = copy_from_user(&in, &segments[i], sizeof(in));
M
Maneesh Soni 已提交
1029
		if (result)
1030 1031 1032 1033 1034 1035 1036 1037
			return -EFAULT;

		out.buf   = compat_ptr(in.buf);
		out.bufsz = in.bufsz;
		out.mem   = in.mem;
		out.memsz = in.memsz;

		result = copy_to_user(&ksegments[i], &out, sizeof(out));
M
Maneesh Soni 已提交
1038
		if (result)
1039 1040 1041 1042 1043 1044 1045
			return -EFAULT;
	}

	return sys_kexec_load(entry, nr_segments, ksegments, flags);
}
#endif

1046
void crash_kexec(struct pt_regs *regs)
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
{
	int locked;


	/* Take the kexec_lock here to prevent sys_kexec_load
	 * running on one cpu from replacing the crash kernel
	 * we are using after a panic on a different cpu.
	 *
	 * If the crash kernel was not located in a fixed area
	 * of memory the xchg(&kexec_crash_image) would be
	 * sufficient.  But since I reuse the memory...
	 */
	locked = xchg(&kexec_lock, 1);
	if (!locked) {
1061
		if (kexec_crash_image) {
1062 1063 1064
			struct pt_regs fixed_regs;
			crash_setup_regs(&fixed_regs, regs);
			machine_crash_shutdown(&fixed_regs);
1065
			machine_kexec(kexec_crash_image);
1066
		}
R
Roland McGrath 已提交
1067 1068
		locked = xchg(&kexec_lock, 0);
		BUG_ON(!locked);
1069 1070
	}
}
1071

1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
			    size_t data_len)
{
	struct elf_note note;

	note.n_namesz = strlen(name) + 1;
	note.n_descsz = data_len;
	note.n_type   = type;
	memcpy(buf, &note, sizeof(note));
	buf += (sizeof(note) + 3)/4;
	memcpy(buf, name, note.n_namesz);
	buf += (note.n_namesz + 3)/4;
	memcpy(buf, data, note.n_descsz);
	buf += (note.n_descsz + 3)/4;

	return buf;
}

static void final_note(u32 *buf)
{
	struct elf_note note;

	note.n_namesz = 0;
	note.n_descsz = 0;
	note.n_type   = 0;
	memcpy(buf, &note, sizeof(note));
}

void crash_save_cpu(struct pt_regs *regs, int cpu)
{
	struct elf_prstatus prstatus;
	u32 *buf;

	if ((cpu < 0) || (cpu >= NR_CPUS))
		return;

	/* Using ELF notes here is opportunistic.
	 * I need a well defined structure format
	 * for the data I pass, and I need tags
	 * on the data to indicate what information I have
	 * squirrelled away.  ELF notes happen to provide
	 * all of that, so there is no need to invent something new.
	 */
	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
	if (!buf)
		return;
	memset(&prstatus, 0, sizeof(prstatus));
	prstatus.pr_pid = current->pid;
	elf_core_copy_regs(&prstatus.pr_reg, regs);
	buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
				sizeof(prstatus));
	final_note(buf);
}

1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
static int __init crash_notes_memory_init(void)
{
	/* Allocate memory for saving cpu registers. */
	crash_notes = alloc_percpu(note_buf_t);
	if (!crash_notes) {
		printk("Kexec: Memory allocation for saving cpu register"
		" states failed\n");
		return -ENOMEM;
	}
	return 0;
}
module_init(crash_notes_memory_init)