snapshot.c 21.7 KB
Newer Older
1
/*
2
 * linux/kernel/power/snapshot.c
3
 *
4
 * This file provide system snapshot/restore functionality.
5 6 7 8 9 10 11 12
 *
 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
 *
 * This file is released under the GPLv2, and is based on swsusp.c.
 *
 */


13
#include <linux/version.h>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/suspend.h>
#include <linux/smp_lock.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/bootmem.h>
#include <linux/syscalls.h>
#include <linux/console.h>
#include <linux/highmem.h>

#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/io.h>

#include "power.h"

37 38 39
/* List of PBEs used for creating and restoring the suspend image */
struct pbe *restore_pblist;

40 41
static unsigned int nr_copy_pages;
static unsigned int nr_meta_pages;
42
static unsigned long *buffer;
43

44
#ifdef CONFIG_HIGHMEM
45
unsigned int count_highmem_pages(void)
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
{
	struct zone *zone;
	unsigned long zone_pfn;
	unsigned int n = 0;

	for_each_zone (zone)
		if (is_highmem(zone)) {
			mark_free_pages(zone);
			for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
				struct page *page;
				unsigned long pfn = zone_pfn + zone->zone_start_pfn;
				if (!pfn_valid(pfn))
					continue;
				page = pfn_to_page(pfn);
				if (PageReserved(page))
					continue;
				if (PageNosaveFree(page))
					continue;
				n++;
			}
		}
	return n;
}

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
struct highmem_page {
	char *data;
	struct page *page;
	struct highmem_page *next;
};

static struct highmem_page *highmem_copy;

static int save_highmem_zone(struct zone *zone)
{
	unsigned long zone_pfn;
	mark_free_pages(zone);
	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
		struct page *page;
		struct highmem_page *save;
		void *kaddr;
		unsigned long pfn = zone_pfn + zone->zone_start_pfn;

88
		if (!(pfn%10000))
89 90 91 92 93 94 95 96 97 98
			printk(".");
		if (!pfn_valid(pfn))
			continue;
		page = pfn_to_page(pfn);
		/*
		 * This condition results from rvmalloc() sans vmalloc_32()
		 * and architectural memory reservations. This should be
		 * corrected eventually when the cases giving rise to this
		 * are better understood.
		 */
99
		if (PageReserved(page))
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
			continue;
		BUG_ON(PageNosave(page));
		if (PageNosaveFree(page))
			continue;
		save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
		if (!save)
			return -ENOMEM;
		save->next = highmem_copy;
		save->page = page;
		save->data = (void *) get_zeroed_page(GFP_ATOMIC);
		if (!save->data) {
			kfree(save);
			return -ENOMEM;
		}
		kaddr = kmap_atomic(page, KM_USER0);
		memcpy(save->data, kaddr, PAGE_SIZE);
		kunmap_atomic(kaddr, KM_USER0);
		highmem_copy = save;
	}
	return 0;
}

122
int save_highmem(void)
123 124 125 126
{
	struct zone *zone;
	int res = 0;

127
	pr_debug("swsusp: Saving Highmem");
128
	drain_local_pages();
129 130 131 132 133 134
	for_each_zone (zone) {
		if (is_highmem(zone))
			res = save_highmem_zone(zone);
		if (res)
			return res;
	}
135
	printk("\n");
136 137 138
	return 0;
}

139
int restore_highmem(void)
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
{
	printk("swsusp: Restoring Highmem\n");
	while (highmem_copy) {
		struct highmem_page *save = highmem_copy;
		void *kaddr;
		highmem_copy = save->next;

		kaddr = kmap_atomic(save->page, KM_USER0);
		memcpy(kaddr, save->data, PAGE_SIZE);
		kunmap_atomic(kaddr, KM_USER0);
		free_page((long) save->data);
		kfree(save);
	}
	return 0;
}
155
#else
156 157 158
static inline unsigned int count_highmem_pages(void) {return 0;}
static inline int save_highmem(void) {return 0;}
static inline int restore_highmem(void) {return 0;}
159
#endif
160

161 162 163 164 165 166 167 168 169
/**
 *	@safe_needed - on resume, for storing the PBE list and the image,
 *	we can only use memory pages that do not conflict with the pages
 *	used before suspend.
 *
 *	The unsafe pages are marked with the PG_nosave_free flag
 *	and we count them using unsafe_pages
 */

170 171 172 173 174
#define PG_ANY		0
#define PG_SAFE		1
#define PG_UNSAFE_CLEAR	1
#define PG_UNSAFE_KEEP	0

175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
static unsigned int unsafe_pages;

static void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
{
	void *res;

	res = (void *)get_zeroed_page(gfp_mask);
	if (safe_needed)
		while (res && PageNosaveFree(virt_to_page(res))) {
			/* The page is unsafe, mark it for swsusp_free() */
			SetPageNosave(virt_to_page(res));
			unsafe_pages++;
			res = (void *)get_zeroed_page(gfp_mask);
		}
	if (res) {
		SetPageNosave(virt_to_page(res));
		SetPageNosaveFree(virt_to_page(res));
	}
	return res;
}

unsigned long get_safe_page(gfp_t gfp_mask)
{
198
	return (unsigned long)alloc_image_page(gfp_mask, PG_SAFE);
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
}

/**
 *	free_image_page - free page represented by @addr, allocated with
 *	alloc_image_page (page flags set by it must be cleared)
 */

static inline void free_image_page(void *addr, int clear_nosave_free)
{
	ClearPageNosave(virt_to_page(addr));
	if (clear_nosave_free)
		ClearPageNosaveFree(virt_to_page(addr));
	free_page((unsigned long)addr);
}

/**
 *	pfn_is_nosave - check if given pfn is in the 'nosave' section
 */

218
static inline int pfn_is_nosave(unsigned long pfn)
219 220 221 222 223 224 225 226 227 228
{
	unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
	unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
	return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}

/**
 *	saveable - Determine whether a page should be cloned or not.
 *	@pfn:	The page
 *
229 230 231
 *	We save a page if it isn't Nosave, and is not in the range of pages
 *	statically defined as 'unsaveable', and it
 *	isn't a part of a free chunk of pages.
232 233
 */

234
static struct page *saveable_page(unsigned long pfn)
235
{
P
Pavel Machek 已提交
236
	struct page *page;
237 238

	if (!pfn_valid(pfn))
239
		return NULL;
240 241

	page = pfn_to_page(pfn);
242

243
	if (PageNosave(page))
244
		return NULL;
245
	if (PageReserved(page) && pfn_is_nosave(pfn))
246
		return NULL;
247
	if (PageNosaveFree(page))
248
		return NULL;
249

250
	return page;
251 252
}

253
unsigned int count_data_pages(void)
254 255
{
	struct zone *zone;
256
	unsigned long pfn, max_zone_pfn;
P
Pavel Machek 已提交
257
	unsigned int n = 0;
258 259 260 261 262

	for_each_zone (zone) {
		if (is_highmem(zone))
			continue;
		mark_free_pages(zone);
263 264 265
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			n += !!saveable_page(pfn);
266
	}
267
	return n;
268 269
}

270 271 272 273 274 275 276 277 278
static inline void copy_data_page(long *dst, long *src)
{
	int n;

	/* copy_page and memcpy are not usable for copying task structs. */
	for (n = PAGE_SIZE / sizeof(long); n; n--)
		*dst++ = *src++;
}

279
static void copy_data_pages(struct pbe *pblist)
280 281
{
	struct zone *zone;
282
	unsigned long pfn, max_zone_pfn;
283
	struct pbe *pbe;
284

285
	pbe = pblist;
286 287 288 289
	for_each_zone (zone) {
		if (is_highmem(zone))
			continue;
		mark_free_pages(zone);
290 291 292 293 294
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
			struct page *page = saveable_page(pfn);

			if (page) {
295
				void *ptr = page_address(page);
296

297
				BUG_ON(!pbe);
298 299
				copy_data_page((void *)pbe->address, ptr);
				pbe->orig_address = (unsigned long)ptr;
300 301 302 303 304 305 306 307 308 309 310
				pbe = pbe->next;
			}
		}
	}
	BUG_ON(pbe);
}

/**
 *	free_pagedir - free pages allocated with alloc_pagedir()
 */

311
static void free_pagedir(struct pbe *pblist, int clear_nosave_free)
312 313 314 315 316
{
	struct pbe *pbe;

	while (pblist) {
		pbe = (pblist + PB_PAGE_SKIP)->next;
317
		free_image_page(pblist, clear_nosave_free);
318 319 320 321 322 323 324 325
		pblist = pbe;
	}
}

/**
 *	fill_pb_page - Create a list of PBEs on a given memory page
 */

326
static inline void fill_pb_page(struct pbe *pbpage, unsigned int n)
327 328 329 330
{
	struct pbe *p;

	p = pbpage;
331
	pbpage += n - 1;
332 333 334 335 336 337 338 339
	do
		p->next = p + 1;
	while (++p < pbpage);
}

/**
 *	create_pbe_list - Create a list of PBEs on top of a given chain
 *	of memory pages allocated with alloc_pagedir()
340 341 342
 *
 *	This function assumes that pages allocated by alloc_image_page() will
 *	always be zeroed.
343 344
 */

345
static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
346
{
347
	struct pbe *pbpage;
P
Pavel Machek 已提交
348
	unsigned int num = PBES_PER_PAGE;
349 350 351 352 353

	for_each_pb_page (pbpage, pblist) {
		if (num >= nr_pages)
			break;

354
		fill_pb_page(pbpage, PBES_PER_PAGE);
355 356 357
		num += PBES_PER_PAGE;
	}
	if (pbpage) {
358 359
		num -= PBES_PER_PAGE;
		fill_pb_page(pbpage, nr_pages - num);
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
	}
}

/**
 *	alloc_pagedir - Allocate the page directory.
 *
 *	First, determine exactly how many pages we need and
 *	allocate them.
 *
 *	We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
 *	struct pbe elements (pbes) and the last element in the page points
 *	to the next page.
 *
 *	On each page we set up a list of struct_pbe elements.
 */

376 377
static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask,
				 int safe_needed)
378
{
P
Pavel Machek 已提交
379
	unsigned int num;
380 381 382 383 384
	struct pbe *pblist, *pbe;

	if (!nr_pages)
		return NULL;

385
	pblist = alloc_image_page(gfp_mask, safe_needed);
386 387 388
	pbe = pblist;
	for (num = PBES_PER_PAGE; num < nr_pages; num += PBES_PER_PAGE) {
		if (!pbe) {
389
			free_pagedir(pblist, PG_UNSAFE_CLEAR);
390 391
			return NULL;
		}
392
		pbe += PB_PAGE_SKIP;
393
		pbe->next = alloc_image_page(gfp_mask, safe_needed);
394
		pbe = pbe->next;
395
	}
396
	create_pbe_list(pblist, nr_pages);
397 398 399 400 401 402 403 404 405 406 407
	return pblist;
}

/**
 * Free pages we allocated for suspend. Suspend pages are alocated
 * before atomic copy, so we need to free them after resume.
 */

void swsusp_free(void)
{
	struct zone *zone;
408
	unsigned long pfn, max_zone_pfn;
409 410

	for_each_zone(zone) {
411 412 413 414 415
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			if (pfn_valid(pfn)) {
				struct page *page = pfn_to_page(pfn);

416 417 418 419 420 421 422
				if (PageNosave(page) && PageNosaveFree(page)) {
					ClearPageNosave(page);
					ClearPageNosaveFree(page);
					free_page((long) page_address(page));
				}
			}
	}
423 424
	nr_copy_pages = 0;
	nr_meta_pages = 0;
425
	restore_pblist = NULL;
426
	buffer = NULL;
427 428 429 430 431 432 433 434 435 436
}


/**
 *	enough_free_mem - Make sure we enough free memory to snapshot.
 *
 *	Returns TRUE or FALSE after checking the number of available
 *	free pages.
 */

P
Pavel Machek 已提交
437
static int enough_free_mem(unsigned int nr_pages)
438
{
439 440 441 442 443 444 445 446
	struct zone *zone;
	unsigned int n = 0;

	for_each_zone (zone)
		if (!is_highmem(zone))
			n += zone->free_pages;
	pr_debug("swsusp: available memory: %u pages\n", n);
	return n > (nr_pages + PAGES_FOR_IO +
447
		(nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
448 449
}

450
static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed)
451 452 453 454 455 456 457 458 459 460
{
	struct pbe *p;

	for_each_pbe (p, pblist) {
		p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed);
		if (!p->address)
			return -ENOMEM;
	}
	return 0;
}
461

P
Pavel Machek 已提交
462
static struct pbe *swsusp_alloc(unsigned int nr_pages)
463
{
464
	struct pbe *pblist;
465

466 467
	pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, PG_ANY);
	if (!pblist) {
468
		printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
469
		return NULL;
470 471
	}

472
	if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, PG_ANY)) {
473 474 475
		printk(KERN_ERR "suspend: Allocating image pages failed.\n");
		swsusp_free();
		return NULL;
476 477
	}

478
	return pblist;
479 480
}

481
asmlinkage int swsusp_save(void)
482
{
P
Pavel Machek 已提交
483
	unsigned int nr_pages;
484 485 486 487

	pr_debug("swsusp: critical section: \n");

	drain_local_pages();
488 489
	nr_pages = count_data_pages();
	printk("swsusp: Need to copy %u pages\n", nr_pages);
490 491

	pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
492 493
		 nr_pages,
		 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE,
494 495
		 PAGES_FOR_IO, nr_free_pages());

496
	if (!enough_free_mem(nr_pages)) {
497 498 499 500
		printk(KERN_ERR "swsusp: Not enough free memory\n");
		return -ENOMEM;
	}

501 502
	restore_pblist = swsusp_alloc(nr_pages);
	if (!restore_pblist)
503
		return -ENOMEM;
504 505 506 507 508

	/* During allocating of suspend pagedir, new cold pages may appear.
	 * Kill them.
	 */
	drain_local_pages();
509
	copy_data_pages(restore_pblist);
510 511 512 513 514 515 516

	/*
	 * End of critical section. From now on, we can write to memory,
	 * but we should not touch disk. This specially means we must _not_
	 * touch swap space! Except we must write out our image of course.
	 */

517
	nr_copy_pages = nr_pages;
518
	nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT;
519 520

	printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
521 522
	return 0;
}
523 524 525 526 527 528 529 530 531 532

static void init_header(struct swsusp_info *info)
{
	memset(info, 0, sizeof(struct swsusp_info));
	info->version_code = LINUX_VERSION_CODE;
	info->num_physpages = num_physpages;
	memcpy(&info->uts, &system_utsname, sizeof(system_utsname));
	info->cpus = num_online_cpus();
	info->image_pages = nr_copy_pages;
	info->pages = nr_copy_pages + nr_meta_pages + 1;
533 534
	info->size = info->pages;
	info->size <<= PAGE_SHIFT;
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
}

/**
 *	pack_orig_addresses - the .orig_address fields of the PBEs from the
 *	list starting at @pbe are stored in the array @buf[] (1 page)
 */

static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe)
{
	int j;

	for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
		buf[j] = pbe->orig_address;
		pbe = pbe->next;
	}
	if (!pbe)
		for (; j < PAGE_SIZE / sizeof(long); j++)
			buf[j] = 0;
	return pbe;
}

/**
 *	snapshot_read_next - used for reading the system memory snapshot.
 *
 *	On the first call to it @handle should point to a zeroed
 *	snapshot_handle structure.  The structure gets updated and a pointer
 *	to it should be passed to this function every next time.
 *
 *	The @count parameter should contain the number of bytes the caller
 *	wants to read from the snapshot.  It must not be zero.
 *
 *	On success the function returns a positive number.  Then, the caller
 *	is allowed to read up to the returned number of bytes from the memory
 *	location computed by the data_of() macro.  The number returned
 *	may be smaller than @count, but this only happens if the read would
 *	cross a page boundary otherwise.
 *
 *	The function returns 0 to indicate the end of data stream condition,
 *	and a negative number is returned on error.  In such cases the
 *	structure pointed to by @handle is not updated and should not be used
 *	any more.
 */

int snapshot_read_next(struct snapshot_handle *handle, size_t count)
{
580
	if (handle->cur > nr_meta_pages + nr_copy_pages)
581 582 583
		return 0;
	if (!buffer) {
		/* This makes the buffer be freed by swsusp_free() */
584
		buffer = alloc_image_page(GFP_ATOMIC, PG_ANY);
585 586 587 588 589 590
		if (!buffer)
			return -ENOMEM;
	}
	if (!handle->offset) {
		init_header((struct swsusp_info *)buffer);
		handle->buffer = buffer;
591
		handle->pbe = restore_pblist;
592
	}
593 594
	if (handle->prev < handle->cur) {
		if (handle->cur <= nr_meta_pages) {
595 596
			handle->pbe = pack_orig_addresses(buffer, handle->pbe);
			if (!handle->pbe)
597
				handle->pbe = restore_pblist;
598 599 600 601
		} else {
			handle->buffer = (void *)handle->pbe->address;
			handle->pbe = handle->pbe->next;
		}
602
		handle->prev = handle->cur;
603
	}
604 605 606 607 608
	handle->buf_offset = handle->cur_offset;
	if (handle->cur_offset + count >= PAGE_SIZE) {
		count = PAGE_SIZE - handle->cur_offset;
		handle->cur_offset = 0;
		handle->cur++;
609
	} else {
610
		handle->cur_offset += count;
611 612 613 614 615 616 617 618 619 620 621 622 623 624
	}
	handle->offset += count;
	return count;
}

/**
 *	mark_unsafe_pages - mark the pages that cannot be used for storing
 *	the image during resume, because they conflict with the pages that
 *	had been used before suspend
 */

static int mark_unsafe_pages(struct pbe *pblist)
{
	struct zone *zone;
625
	unsigned long pfn, max_zone_pfn;
626 627 628 629 630 631 632
	struct pbe *p;

	if (!pblist) /* a sanity check */
		return -EINVAL;

	/* Clear page flags */
	for_each_zone (zone) {
633 634 635 636
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			if (pfn_valid(pfn))
				ClearPageNosaveFree(pfn_to_page(pfn));
637 638 639 640 641 642 643 644 645 646
	}

	/* Mark orig addresses */
	for_each_pbe (p, pblist) {
		if (virt_addr_valid(p->orig_address))
			SetPageNosaveFree(virt_to_page(p->orig_address));
		else
			return -EFAULT;
	}

647 648
	unsafe_pages = 0;

649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
	return 0;
}

static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
{
	/* We assume both lists contain the same number of elements */
	while (src) {
		dst->orig_address = src->orig_address;
		dst = dst->next;
		src = src->next;
	}
}

static int check_header(struct swsusp_info *info)
{
	char *reason = NULL;

	if (info->version_code != LINUX_VERSION_CODE)
		reason = "kernel version";
	if (info->num_physpages != num_physpages)
		reason = "memory size";
	if (strcmp(info->uts.sysname,system_utsname.sysname))
		reason = "system type";
	if (strcmp(info->uts.release,system_utsname.release))
		reason = "kernel release";
	if (strcmp(info->uts.version,system_utsname.version))
		reason = "version";
	if (strcmp(info->uts.machine,system_utsname.machine))
		reason = "machine";
	if (reason) {
		printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
		return -EPERM;
	}
	return 0;
}

/**
 *	load header - check the image header and copy data from it
 */

static int load_header(struct snapshot_handle *handle,
                              struct swsusp_info *info)
{
	int error;
	struct pbe *pblist;

	error = check_header(info);
	if (!error) {
697
		pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, PG_ANY);
698 699
		if (!pblist)
			return -ENOMEM;
700
		restore_pblist = pblist;
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
		handle->pbe = pblist;
		nr_copy_pages = info->image_pages;
		nr_meta_pages = info->pages - info->image_pages - 1;
	}
	return error;
}

/**
 *	unpack_orig_addresses - copy the elements of @buf[] (1 page) to
 *	the PBEs in the list starting at @pbe
 */

static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
                                                struct pbe *pbe)
{
	int j;

	for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
		pbe->orig_address = buf[j];
		pbe = pbe->next;
	}
	return pbe;
}

/**
726
 *	prepare_image - use metadata contained in the PBE list
727
 *	pointed to by restore_pblist to mark the pages that will
728
 *	be overwritten in the process of restoring the system
729 730 731 732 733 734 735 736
 *	memory state from the image ("unsafe" pages) and allocate
 *	memory for the image
 *
 *	The idea is to allocate the PBE list first and then
 *	allocate as many pages as it's needed for the image data,
 *	but not to assign these pages to the PBEs initially.
 *	Instead, we just mark them as allocated and create a list
 *	of "safe" which will be used later
737 738
 */

739 740 741 742 743 744 745 746
struct safe_page {
	struct safe_page *next;
	char padding[PAGE_SIZE - sizeof(void *)];
};

static struct safe_page *safe_pages;

static int prepare_image(struct snapshot_handle *handle)
747 748
{
	int error = 0;
749 750
	unsigned int nr_pages = nr_copy_pages;
	struct pbe *p, *pblist = NULL;
751

752
	p = restore_pblist;
753 754
	error = mark_unsafe_pages(p);
	if (!error) {
755
		pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, PG_SAFE);
756 757
		if (pblist)
			copy_page_backup_list(pblist, p);
758
		free_pagedir(p, PG_UNSAFE_KEEP);
759 760 761
		if (!pblist)
			error = -ENOMEM;
	}
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
	safe_pages = NULL;
	if (!error && nr_pages > unsafe_pages) {
		nr_pages -= unsafe_pages;
		while (nr_pages--) {
			struct safe_page *ptr;

			ptr = (struct safe_page *)get_zeroed_page(GFP_ATOMIC);
			if (!ptr) {
				error = -ENOMEM;
				break;
			}
			if (!PageNosaveFree(virt_to_page(ptr))) {
				/* The page is "safe", add it to the list */
				ptr->next = safe_pages;
				safe_pages = ptr;
			}
			/* Mark the page as allocated */
			SetPageNosave(virt_to_page(ptr));
			SetPageNosaveFree(virt_to_page(ptr));
		}
	}
783
	if (!error) {
784
		restore_pblist = pblist;
785 786
	} else {
		handle->pbe = NULL;
787
		swsusp_free();
788 789 790 791
	}
	return error;
}

792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
static void *get_buffer(struct snapshot_handle *handle)
{
	struct pbe *pbe = handle->pbe, *last = handle->last_pbe;
	struct page *page = virt_to_page(pbe->orig_address);

	if (PageNosave(page) && PageNosaveFree(page)) {
		/*
		 * We have allocated the "original" page frame and we can
		 * use it directly to store the read page
		 */
		pbe->address = 0;
		if (last && last->next)
			last->next = NULL;
		return (void *)pbe->orig_address;
	}
	/*
	 * The "original" page frame has not been allocated and we have to
	 * use a "safe" page frame to store the read page
	 */
	pbe->address = (unsigned long)safe_pages;
	safe_pages = safe_pages->next;
	if (last)
		last->next = pbe;
	handle->last_pbe = pbe;
	return (void *)pbe->address;
}

819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
/**
 *	snapshot_write_next - used for writing the system memory snapshot.
 *
 *	On the first call to it @handle should point to a zeroed
 *	snapshot_handle structure.  The structure gets updated and a pointer
 *	to it should be passed to this function every next time.
 *
 *	The @count parameter should contain the number of bytes the caller
 *	wants to write to the image.  It must not be zero.
 *
 *	On success the function returns a positive number.  Then, the caller
 *	is allowed to write up to the returned number of bytes to the memory
 *	location computed by the data_of() macro.  The number returned
 *	may be smaller than @count, but this only happens if the write would
 *	cross a page boundary otherwise.
 *
 *	The function returns 0 to indicate the "end of file" condition,
 *	and a negative number is returned on error.  In such cases the
 *	structure pointed to by @handle is not updated and should not be used
 *	any more.
 */

int snapshot_write_next(struct snapshot_handle *handle, size_t count)
{
	int error = 0;

845
	if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
846 847 848
		return 0;
	if (!buffer) {
		/* This makes the buffer be freed by swsusp_free() */
849
		buffer = alloc_image_page(GFP_ATOMIC, PG_ANY);
850 851 852 853 854
		if (!buffer)
			return -ENOMEM;
	}
	if (!handle->offset)
		handle->buffer = buffer;
A
Andrew Morton 已提交
855
	handle->sync_read = 1;
856
	if (handle->prev < handle->cur) {
857
		if (!handle->prev) {
A
Andrew Morton 已提交
858 859
			error = load_header(handle,
					(struct swsusp_info *)buffer);
860 861 862
			if (error)
				return error;
		} else if (handle->prev <= nr_meta_pages) {
A
Andrew Morton 已提交
863 864
			handle->pbe = unpack_orig_addresses(buffer,
							handle->pbe);
865
			if (!handle->pbe) {
866
				error = prepare_image(handle);
867 868
				if (error)
					return error;
869
				handle->pbe = restore_pblist;
870 871
				handle->last_pbe = NULL;
				handle->buffer = get_buffer(handle);
A
Andrew Morton 已提交
872
				handle->sync_read = 0;
873 874 875
			}
		} else {
			handle->pbe = handle->pbe->next;
876
			handle->buffer = get_buffer(handle);
A
Andrew Morton 已提交
877
			handle->sync_read = 0;
878
		}
879
		handle->prev = handle->cur;
880
	}
881 882 883 884 885
	handle->buf_offset = handle->cur_offset;
	if (handle->cur_offset + count >= PAGE_SIZE) {
		count = PAGE_SIZE - handle->cur_offset;
		handle->cur_offset = 0;
		handle->cur++;
886
	} else {
887
		handle->cur_offset += count;
888 889 890 891 892 893 894 895
	}
	handle->offset += count;
	return count;
}

int snapshot_image_loaded(struct snapshot_handle *handle)
{
	return !(!handle->pbe || handle->pbe->next || !nr_copy_pages ||
896
		handle->cur <= nr_meta_pages + nr_copy_pages);
897
}