snapshot.c 21.5 KB
Newer Older
1
/*
2
 * linux/kernel/power/snapshot.c
3
 *
4
 * This file provide system snapshot/restore functionality.
5 6 7 8 9 10 11 12
 *
 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
 *
 * This file is released under the GPLv2, and is based on swsusp.c.
 *
 */


13
#include <linux/version.h>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/suspend.h>
#include <linux/smp_lock.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/bootmem.h>
#include <linux/syscalls.h>
#include <linux/console.h>
#include <linux/highmem.h>

#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/io.h>

#include "power.h"

37 38 39
/* List of PBEs used for creating and restoring the suspend image */
struct pbe *restore_pblist;

40 41
static unsigned int nr_copy_pages;
static unsigned int nr_meta_pages;
42
static unsigned long *buffer;
43

44
#ifdef CONFIG_HIGHMEM
45
unsigned int count_highmem_pages(void)
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
{
	struct zone *zone;
	unsigned long zone_pfn;
	unsigned int n = 0;

	for_each_zone (zone)
		if (is_highmem(zone)) {
			mark_free_pages(zone);
			for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
				struct page *page;
				unsigned long pfn = zone_pfn + zone->zone_start_pfn;
				if (!pfn_valid(pfn))
					continue;
				page = pfn_to_page(pfn);
				if (PageReserved(page))
					continue;
				if (PageNosaveFree(page))
					continue;
				n++;
			}
		}
	return n;
}

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
struct highmem_page {
	char *data;
	struct page *page;
	struct highmem_page *next;
};

static struct highmem_page *highmem_copy;

static int save_highmem_zone(struct zone *zone)
{
	unsigned long zone_pfn;
	mark_free_pages(zone);
	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
		struct page *page;
		struct highmem_page *save;
		void *kaddr;
		unsigned long pfn = zone_pfn + zone->zone_start_pfn;

88
		if (!(pfn%10000))
89 90 91 92 93 94 95 96 97 98
			printk(".");
		if (!pfn_valid(pfn))
			continue;
		page = pfn_to_page(pfn);
		/*
		 * This condition results from rvmalloc() sans vmalloc_32()
		 * and architectural memory reservations. This should be
		 * corrected eventually when the cases giving rise to this
		 * are better understood.
		 */
99
		if (PageReserved(page))
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
			continue;
		BUG_ON(PageNosave(page));
		if (PageNosaveFree(page))
			continue;
		save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
		if (!save)
			return -ENOMEM;
		save->next = highmem_copy;
		save->page = page;
		save->data = (void *) get_zeroed_page(GFP_ATOMIC);
		if (!save->data) {
			kfree(save);
			return -ENOMEM;
		}
		kaddr = kmap_atomic(page, KM_USER0);
		memcpy(save->data, kaddr, PAGE_SIZE);
		kunmap_atomic(kaddr, KM_USER0);
		highmem_copy = save;
	}
	return 0;
}

122
int save_highmem(void)
123 124 125 126
{
	struct zone *zone;
	int res = 0;

127
	pr_debug("swsusp: Saving Highmem");
128
	drain_local_pages();
129 130 131 132 133 134
	for_each_zone (zone) {
		if (is_highmem(zone))
			res = save_highmem_zone(zone);
		if (res)
			return res;
	}
135
	printk("\n");
136 137 138
	return 0;
}

139
int restore_highmem(void)
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
{
	printk("swsusp: Restoring Highmem\n");
	while (highmem_copy) {
		struct highmem_page *save = highmem_copy;
		void *kaddr;
		highmem_copy = save->next;

		kaddr = kmap_atomic(save->page, KM_USER0);
		memcpy(kaddr, save->data, PAGE_SIZE);
		kunmap_atomic(kaddr, KM_USER0);
		free_page((long) save->data);
		kfree(save);
	}
	return 0;
}
155
#else
156 157 158
static inline unsigned int count_highmem_pages(void) {return 0;}
static inline int save_highmem(void) {return 0;}
static inline int restore_highmem(void) {return 0;}
159
#endif
160

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
/**
 *	@safe_needed - on resume, for storing the PBE list and the image,
 *	we can only use memory pages that do not conflict with the pages
 *	used before suspend.
 *
 *	The unsafe pages are marked with the PG_nosave_free flag
 *	and we count them using unsafe_pages
 */

static unsigned int unsafe_pages;

static void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
{
	void *res;

	res = (void *)get_zeroed_page(gfp_mask);
	if (safe_needed)
		while (res && PageNosaveFree(virt_to_page(res))) {
			/* The page is unsafe, mark it for swsusp_free() */
			SetPageNosave(virt_to_page(res));
			unsafe_pages++;
			res = (void *)get_zeroed_page(gfp_mask);
		}
	if (res) {
		SetPageNosave(virt_to_page(res));
		SetPageNosaveFree(virt_to_page(res));
	}
	return res;
}

unsigned long get_safe_page(gfp_t gfp_mask)
{
	return (unsigned long)alloc_image_page(gfp_mask, 1);
}

/**
 *	free_image_page - free page represented by @addr, allocated with
 *	alloc_image_page (page flags set by it must be cleared)
 */

static inline void free_image_page(void *addr, int clear_nosave_free)
{
	ClearPageNosave(virt_to_page(addr));
	if (clear_nosave_free)
		ClearPageNosaveFree(virt_to_page(addr));
	free_page((unsigned long)addr);
}

/**
 *	pfn_is_nosave - check if given pfn is in the 'nosave' section
 */

213
static inline int pfn_is_nosave(unsigned long pfn)
214 215 216 217 218 219 220 221 222 223
{
	unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
	unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
	return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}

/**
 *	saveable - Determine whether a page should be cloned or not.
 *	@pfn:	The page
 *
224 225 226
 *	We save a page if it isn't Nosave, and is not in the range of pages
 *	statically defined as 'unsaveable', and it
 *	isn't a part of a free chunk of pages.
227 228
 */

229
static struct page *saveable_page(unsigned long pfn)
230
{
P
Pavel Machek 已提交
231
	struct page *page;
232 233

	if (!pfn_valid(pfn))
234
		return NULL;
235 236

	page = pfn_to_page(pfn);
237

238
	if (PageNosave(page))
239
		return NULL;
240
	if (PageReserved(page) && pfn_is_nosave(pfn))
241
		return NULL;
242
	if (PageNosaveFree(page))
243
		return NULL;
244

245
	return page;
246 247
}

248
unsigned int count_data_pages(void)
249 250
{
	struct zone *zone;
251
	unsigned long pfn, max_zone_pfn;
P
Pavel Machek 已提交
252
	unsigned int n = 0;
253 254 255 256 257

	for_each_zone (zone) {
		if (is_highmem(zone))
			continue;
		mark_free_pages(zone);
258 259 260
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			n += !!saveable_page(pfn);
261
	}
262
	return n;
263 264
}

265 266 267 268 269 270 271 272 273
static inline void copy_data_page(long *dst, long *src)
{
	int n;

	/* copy_page and memcpy are not usable for copying task structs. */
	for (n = PAGE_SIZE / sizeof(long); n; n--)
		*dst++ = *src++;
}

274
static void copy_data_pages(struct pbe *pblist)
275 276
{
	struct zone *zone;
277
	unsigned long pfn, max_zone_pfn;
278
	struct pbe *pbe;
279

280
	pbe = pblist;
281 282 283 284
	for_each_zone (zone) {
		if (is_highmem(zone))
			continue;
		mark_free_pages(zone);
285 286 287 288 289
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
			struct page *page = saveable_page(pfn);

			if (page) {
290
				void *ptr = page_address(page);
291

292
				BUG_ON(!pbe);
293 294
				copy_data_page((void *)pbe->address, ptr);
				pbe->orig_address = (unsigned long)ptr;
295 296 297 298 299 300 301 302 303 304 305
				pbe = pbe->next;
			}
		}
	}
	BUG_ON(pbe);
}

/**
 *	free_pagedir - free pages allocated with alloc_pagedir()
 */

306
static void free_pagedir(struct pbe *pblist, int clear_nosave_free)
307 308 309 310 311
{
	struct pbe *pbe;

	while (pblist) {
		pbe = (pblist + PB_PAGE_SKIP)->next;
312
		free_image_page(pblist, clear_nosave_free);
313 314 315 316 317 318 319 320
		pblist = pbe;
	}
}

/**
 *	fill_pb_page - Create a list of PBEs on a given memory page
 */

321
static inline void fill_pb_page(struct pbe *pbpage, unsigned int n)
322 323 324 325
{
	struct pbe *p;

	p = pbpage;
326
	pbpage += n - 1;
327 328 329 330 331 332 333 334
	do
		p->next = p + 1;
	while (++p < pbpage);
}

/**
 *	create_pbe_list - Create a list of PBEs on top of a given chain
 *	of memory pages allocated with alloc_pagedir()
335 336 337
 *
 *	This function assumes that pages allocated by alloc_image_page() will
 *	always be zeroed.
338 339
 */

340
static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
341
{
342
	struct pbe *pbpage;
P
Pavel Machek 已提交
343
	unsigned int num = PBES_PER_PAGE;
344 345 346 347 348

	for_each_pb_page (pbpage, pblist) {
		if (num >= nr_pages)
			break;

349
		fill_pb_page(pbpage, PBES_PER_PAGE);
350 351 352
		num += PBES_PER_PAGE;
	}
	if (pbpage) {
353 354
		num -= PBES_PER_PAGE;
		fill_pb_page(pbpage, nr_pages - num);
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
	}
}

/**
 *	alloc_pagedir - Allocate the page directory.
 *
 *	First, determine exactly how many pages we need and
 *	allocate them.
 *
 *	We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
 *	struct pbe elements (pbes) and the last element in the page points
 *	to the next page.
 *
 *	On each page we set up a list of struct_pbe elements.
 */

371 372
static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask,
				 int safe_needed)
373
{
P
Pavel Machek 已提交
374
	unsigned int num;
375 376 377 378 379
	struct pbe *pblist, *pbe;

	if (!nr_pages)
		return NULL;

380
	pblist = alloc_image_page(gfp_mask, safe_needed);
381 382 383 384 385 386
	pbe = pblist;
	for (num = PBES_PER_PAGE; num < nr_pages; num += PBES_PER_PAGE) {
		if (!pbe) {
			free_pagedir(pblist, 1);
			return NULL;
		}
387
		pbe += PB_PAGE_SKIP;
388
		pbe->next = alloc_image_page(gfp_mask, safe_needed);
389
		pbe = pbe->next;
390
	}
391
	create_pbe_list(pblist, nr_pages);
392 393 394 395 396 397 398 399 400 401 402
	return pblist;
}

/**
 * Free pages we allocated for suspend. Suspend pages are alocated
 * before atomic copy, so we need to free them after resume.
 */

void swsusp_free(void)
{
	struct zone *zone;
403
	unsigned long pfn, max_zone_pfn;
404 405

	for_each_zone(zone) {
406 407 408 409 410
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			if (pfn_valid(pfn)) {
				struct page *page = pfn_to_page(pfn);

411 412 413 414 415 416 417
				if (PageNosave(page) && PageNosaveFree(page)) {
					ClearPageNosave(page);
					ClearPageNosaveFree(page);
					free_page((long) page_address(page));
				}
			}
	}
418 419
	nr_copy_pages = 0;
	nr_meta_pages = 0;
420
	restore_pblist = NULL;
421
	buffer = NULL;
422 423 424 425 426 427 428 429 430 431
}


/**
 *	enough_free_mem - Make sure we enough free memory to snapshot.
 *
 *	Returns TRUE or FALSE after checking the number of available
 *	free pages.
 */

P
Pavel Machek 已提交
432
static int enough_free_mem(unsigned int nr_pages)
433
{
434 435 436 437 438 439 440 441
	struct zone *zone;
	unsigned int n = 0;

	for_each_zone (zone)
		if (!is_highmem(zone))
			n += zone->free_pages;
	pr_debug("swsusp: available memory: %u pages\n", n);
	return n > (nr_pages + PAGES_FOR_IO +
442
		(nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
443 444
}

445
static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed)
446 447 448 449 450 451 452 453 454 455
{
	struct pbe *p;

	for_each_pbe (p, pblist) {
		p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed);
		if (!p->address)
			return -ENOMEM;
	}
	return 0;
}
456

P
Pavel Machek 已提交
457
static struct pbe *swsusp_alloc(unsigned int nr_pages)
458
{
459
	struct pbe *pblist;
460

461
	if (!(pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, 0))) {
462
		printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
463
		return NULL;
464 465
	}

466 467 468 469
	if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) {
		printk(KERN_ERR "suspend: Allocating image pages failed.\n");
		swsusp_free();
		return NULL;
470 471
	}

472
	return pblist;
473 474
}

475
asmlinkage int swsusp_save(void)
476
{
P
Pavel Machek 已提交
477
	unsigned int nr_pages;
478 479 480 481

	pr_debug("swsusp: critical section: \n");

	drain_local_pages();
482 483
	nr_pages = count_data_pages();
	printk("swsusp: Need to copy %u pages\n", nr_pages);
484 485

	pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
486 487
		 nr_pages,
		 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE,
488 489
		 PAGES_FOR_IO, nr_free_pages());

490
	if (!enough_free_mem(nr_pages)) {
491 492 493 494
		printk(KERN_ERR "swsusp: Not enough free memory\n");
		return -ENOMEM;
	}

495 496
	restore_pblist = swsusp_alloc(nr_pages);
	if (!restore_pblist)
497
		return -ENOMEM;
498 499 500 501 502

	/* During allocating of suspend pagedir, new cold pages may appear.
	 * Kill them.
	 */
	drain_local_pages();
503
	copy_data_pages(restore_pblist);
504 505 506 507 508 509 510

	/*
	 * End of critical section. From now on, we can write to memory,
	 * but we should not touch disk. This specially means we must _not_
	 * touch swap space! Except we must write out our image of course.
	 */

511
	nr_copy_pages = nr_pages;
512
	nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT;
513 514

	printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
515 516
	return 0;
}
517 518 519 520 521 522 523 524 525 526

static void init_header(struct swsusp_info *info)
{
	memset(info, 0, sizeof(struct swsusp_info));
	info->version_code = LINUX_VERSION_CODE;
	info->num_physpages = num_physpages;
	memcpy(&info->uts, &system_utsname, sizeof(system_utsname));
	info->cpus = num_online_cpus();
	info->image_pages = nr_copy_pages;
	info->pages = nr_copy_pages + nr_meta_pages + 1;
527 528
	info->size = info->pages;
	info->size <<= PAGE_SHIFT;
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
}

/**
 *	pack_orig_addresses - the .orig_address fields of the PBEs from the
 *	list starting at @pbe are stored in the array @buf[] (1 page)
 */

static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe)
{
	int j;

	for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
		buf[j] = pbe->orig_address;
		pbe = pbe->next;
	}
	if (!pbe)
		for (; j < PAGE_SIZE / sizeof(long); j++)
			buf[j] = 0;
	return pbe;
}

/**
 *	snapshot_read_next - used for reading the system memory snapshot.
 *
 *	On the first call to it @handle should point to a zeroed
 *	snapshot_handle structure.  The structure gets updated and a pointer
 *	to it should be passed to this function every next time.
 *
 *	The @count parameter should contain the number of bytes the caller
 *	wants to read from the snapshot.  It must not be zero.
 *
 *	On success the function returns a positive number.  Then, the caller
 *	is allowed to read up to the returned number of bytes from the memory
 *	location computed by the data_of() macro.  The number returned
 *	may be smaller than @count, but this only happens if the read would
 *	cross a page boundary otherwise.
 *
 *	The function returns 0 to indicate the end of data stream condition,
 *	and a negative number is returned on error.  In such cases the
 *	structure pointed to by @handle is not updated and should not be used
 *	any more.
 */

int snapshot_read_next(struct snapshot_handle *handle, size_t count)
{
574
	if (handle->cur > nr_meta_pages + nr_copy_pages)
575 576 577 578 579 580 581 582 583 584
		return 0;
	if (!buffer) {
		/* This makes the buffer be freed by swsusp_free() */
		buffer = alloc_image_page(GFP_ATOMIC, 0);
		if (!buffer)
			return -ENOMEM;
	}
	if (!handle->offset) {
		init_header((struct swsusp_info *)buffer);
		handle->buffer = buffer;
585
		handle->pbe = restore_pblist;
586
	}
587 588
	if (handle->prev < handle->cur) {
		if (handle->cur <= nr_meta_pages) {
589 590
			handle->pbe = pack_orig_addresses(buffer, handle->pbe);
			if (!handle->pbe)
591
				handle->pbe = restore_pblist;
592 593 594 595
		} else {
			handle->buffer = (void *)handle->pbe->address;
			handle->pbe = handle->pbe->next;
		}
596
		handle->prev = handle->cur;
597
	}
598 599 600 601 602
	handle->buf_offset = handle->cur_offset;
	if (handle->cur_offset + count >= PAGE_SIZE) {
		count = PAGE_SIZE - handle->cur_offset;
		handle->cur_offset = 0;
		handle->cur++;
603
	} else {
604
		handle->cur_offset += count;
605 606 607 608 609 610 611 612 613 614 615 616 617 618
	}
	handle->offset += count;
	return count;
}

/**
 *	mark_unsafe_pages - mark the pages that cannot be used for storing
 *	the image during resume, because they conflict with the pages that
 *	had been used before suspend
 */

static int mark_unsafe_pages(struct pbe *pblist)
{
	struct zone *zone;
619
	unsigned long pfn, max_zone_pfn;
620 621 622 623 624 625 626
	struct pbe *p;

	if (!pblist) /* a sanity check */
		return -EINVAL;

	/* Clear page flags */
	for_each_zone (zone) {
627 628 629 630
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			if (pfn_valid(pfn))
				ClearPageNosaveFree(pfn_to_page(pfn));
631 632 633 634 635 636 637 638 639 640
	}

	/* Mark orig addresses */
	for_each_pbe (p, pblist) {
		if (virt_addr_valid(p->orig_address))
			SetPageNosaveFree(virt_to_page(p->orig_address));
		else
			return -EFAULT;
	}

641 642
	unsafe_pages = 0;

643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
	return 0;
}

static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
{
	/* We assume both lists contain the same number of elements */
	while (src) {
		dst->orig_address = src->orig_address;
		dst = dst->next;
		src = src->next;
	}
}

static int check_header(struct swsusp_info *info)
{
	char *reason = NULL;

	if (info->version_code != LINUX_VERSION_CODE)
		reason = "kernel version";
	if (info->num_physpages != num_physpages)
		reason = "memory size";
	if (strcmp(info->uts.sysname,system_utsname.sysname))
		reason = "system type";
	if (strcmp(info->uts.release,system_utsname.release))
		reason = "kernel release";
	if (strcmp(info->uts.version,system_utsname.version))
		reason = "version";
	if (strcmp(info->uts.machine,system_utsname.machine))
		reason = "machine";
	if (reason) {
		printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
		return -EPERM;
	}
	return 0;
}

/**
 *	load header - check the image header and copy data from it
 */

static int load_header(struct snapshot_handle *handle,
                              struct swsusp_info *info)
{
	int error;
	struct pbe *pblist;

	error = check_header(info);
	if (!error) {
		pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, 0);
		if (!pblist)
			return -ENOMEM;
694
		restore_pblist = pblist;
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
		handle->pbe = pblist;
		nr_copy_pages = info->image_pages;
		nr_meta_pages = info->pages - info->image_pages - 1;
	}
	return error;
}

/**
 *	unpack_orig_addresses - copy the elements of @buf[] (1 page) to
 *	the PBEs in the list starting at @pbe
 */

static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
                                                struct pbe *pbe)
{
	int j;

	for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
		pbe->orig_address = buf[j];
		pbe = pbe->next;
	}
	return pbe;
}

/**
720
 *	prepare_image - use metadata contained in the PBE list
721
 *	pointed to by restore_pblist to mark the pages that will
722
 *	be overwritten in the process of restoring the system
723 724 725 726 727 728 729 730
 *	memory state from the image ("unsafe" pages) and allocate
 *	memory for the image
 *
 *	The idea is to allocate the PBE list first and then
 *	allocate as many pages as it's needed for the image data,
 *	but not to assign these pages to the PBEs initially.
 *	Instead, we just mark them as allocated and create a list
 *	of "safe" which will be used later
731 732
 */

733 734 735 736 737 738 739 740
struct safe_page {
	struct safe_page *next;
	char padding[PAGE_SIZE - sizeof(void *)];
};

static struct safe_page *safe_pages;

static int prepare_image(struct snapshot_handle *handle)
741 742
{
	int error = 0;
743 744
	unsigned int nr_pages = nr_copy_pages;
	struct pbe *p, *pblist = NULL;
745

746
	p = restore_pblist;
747 748
	error = mark_unsafe_pages(p);
	if (!error) {
749
		pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1);
750 751
		if (pblist)
			copy_page_backup_list(pblist, p);
752
		free_pagedir(p, 0);
753 754 755
		if (!pblist)
			error = -ENOMEM;
	}
756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
	safe_pages = NULL;
	if (!error && nr_pages > unsafe_pages) {
		nr_pages -= unsafe_pages;
		while (nr_pages--) {
			struct safe_page *ptr;

			ptr = (struct safe_page *)get_zeroed_page(GFP_ATOMIC);
			if (!ptr) {
				error = -ENOMEM;
				break;
			}
			if (!PageNosaveFree(virt_to_page(ptr))) {
				/* The page is "safe", add it to the list */
				ptr->next = safe_pages;
				safe_pages = ptr;
			}
			/* Mark the page as allocated */
			SetPageNosave(virt_to_page(ptr));
			SetPageNosaveFree(virt_to_page(ptr));
		}
	}
777
	if (!error) {
778
		restore_pblist = pblist;
779 780
	} else {
		handle->pbe = NULL;
781
		swsusp_free();
782 783 784 785
	}
	return error;
}

786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
static void *get_buffer(struct snapshot_handle *handle)
{
	struct pbe *pbe = handle->pbe, *last = handle->last_pbe;
	struct page *page = virt_to_page(pbe->orig_address);

	if (PageNosave(page) && PageNosaveFree(page)) {
		/*
		 * We have allocated the "original" page frame and we can
		 * use it directly to store the read page
		 */
		pbe->address = 0;
		if (last && last->next)
			last->next = NULL;
		return (void *)pbe->orig_address;
	}
	/*
	 * The "original" page frame has not been allocated and we have to
	 * use a "safe" page frame to store the read page
	 */
	pbe->address = (unsigned long)safe_pages;
	safe_pages = safe_pages->next;
	if (last)
		last->next = pbe;
	handle->last_pbe = pbe;
	return (void *)pbe->address;
}

813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
/**
 *	snapshot_write_next - used for writing the system memory snapshot.
 *
 *	On the first call to it @handle should point to a zeroed
 *	snapshot_handle structure.  The structure gets updated and a pointer
 *	to it should be passed to this function every next time.
 *
 *	The @count parameter should contain the number of bytes the caller
 *	wants to write to the image.  It must not be zero.
 *
 *	On success the function returns a positive number.  Then, the caller
 *	is allowed to write up to the returned number of bytes to the memory
 *	location computed by the data_of() macro.  The number returned
 *	may be smaller than @count, but this only happens if the write would
 *	cross a page boundary otherwise.
 *
 *	The function returns 0 to indicate the "end of file" condition,
 *	and a negative number is returned on error.  In such cases the
 *	structure pointed to by @handle is not updated and should not be used
 *	any more.
 */

int snapshot_write_next(struct snapshot_handle *handle, size_t count)
{
	int error = 0;

839
	if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
840 841 842 843 844 845 846 847 848
		return 0;
	if (!buffer) {
		/* This makes the buffer be freed by swsusp_free() */
		buffer = alloc_image_page(GFP_ATOMIC, 0);
		if (!buffer)
			return -ENOMEM;
	}
	if (!handle->offset)
		handle->buffer = buffer;
A
Andrew Morton 已提交
849
	handle->sync_read = 1;
850
	if (handle->prev < handle->cur) {
851
		if (!handle->prev) {
A
Andrew Morton 已提交
852 853
			error = load_header(handle,
					(struct swsusp_info *)buffer);
854 855 856
			if (error)
				return error;
		} else if (handle->prev <= nr_meta_pages) {
A
Andrew Morton 已提交
857 858
			handle->pbe = unpack_orig_addresses(buffer,
							handle->pbe);
859
			if (!handle->pbe) {
860
				error = prepare_image(handle);
861 862
				if (error)
					return error;
863
				handle->pbe = restore_pblist;
864 865
				handle->last_pbe = NULL;
				handle->buffer = get_buffer(handle);
A
Andrew Morton 已提交
866
				handle->sync_read = 0;
867 868 869
			}
		} else {
			handle->pbe = handle->pbe->next;
870
			handle->buffer = get_buffer(handle);
A
Andrew Morton 已提交
871
			handle->sync_read = 0;
872
		}
873
		handle->prev = handle->cur;
874
	}
875 876 877 878 879
	handle->buf_offset = handle->cur_offset;
	if (handle->cur_offset + count >= PAGE_SIZE) {
		count = PAGE_SIZE - handle->cur_offset;
		handle->cur_offset = 0;
		handle->cur++;
880
	} else {
881
		handle->cur_offset += count;
882 883 884 885 886 887 888 889
	}
	handle->offset += count;
	return count;
}

int snapshot_image_loaded(struct snapshot_handle *handle)
{
	return !(!handle->pbe || handle->pbe->next || !nr_copy_pages ||
890
		handle->cur <= nr_meta_pages + nr_copy_pages);
891
}