snapshot.c 21.4 KB
Newer Older
1
/*
2
 * linux/kernel/power/snapshot.c
3
 *
4
 * This file provide system snapshot/restore functionality.
5 6 7 8 9 10 11 12
 *
 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
 *
 * This file is released under the GPLv2, and is based on swsusp.c.
 *
 */


13
#include <linux/version.h>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/suspend.h>
#include <linux/smp_lock.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/bootmem.h>
#include <linux/syscalls.h>
#include <linux/console.h>
#include <linux/highmem.h>

#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/io.h>

#include "power.h"

37
struct pbe *pagedir_nosave;
38 39
static unsigned int nr_copy_pages;
static unsigned int nr_meta_pages;
40
static unsigned long *buffer;
41

42
#ifdef CONFIG_HIGHMEM
43
unsigned int count_highmem_pages(void)
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
{
	struct zone *zone;
	unsigned long zone_pfn;
	unsigned int n = 0;

	for_each_zone (zone)
		if (is_highmem(zone)) {
			mark_free_pages(zone);
			for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
				struct page *page;
				unsigned long pfn = zone_pfn + zone->zone_start_pfn;
				if (!pfn_valid(pfn))
					continue;
				page = pfn_to_page(pfn);
				if (PageReserved(page))
					continue;
				if (PageNosaveFree(page))
					continue;
				n++;
			}
		}
	return n;
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
struct highmem_page {
	char *data;
	struct page *page;
	struct highmem_page *next;
};

static struct highmem_page *highmem_copy;

static int save_highmem_zone(struct zone *zone)
{
	unsigned long zone_pfn;
	mark_free_pages(zone);
	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
		struct page *page;
		struct highmem_page *save;
		void *kaddr;
		unsigned long pfn = zone_pfn + zone->zone_start_pfn;

86
		if (!(pfn%10000))
87 88 89 90 91 92 93 94 95 96
			printk(".");
		if (!pfn_valid(pfn))
			continue;
		page = pfn_to_page(pfn);
		/*
		 * This condition results from rvmalloc() sans vmalloc_32()
		 * and architectural memory reservations. This should be
		 * corrected eventually when the cases giving rise to this
		 * are better understood.
		 */
97
		if (PageReserved(page))
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
			continue;
		BUG_ON(PageNosave(page));
		if (PageNosaveFree(page))
			continue;
		save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
		if (!save)
			return -ENOMEM;
		save->next = highmem_copy;
		save->page = page;
		save->data = (void *) get_zeroed_page(GFP_ATOMIC);
		if (!save->data) {
			kfree(save);
			return -ENOMEM;
		}
		kaddr = kmap_atomic(page, KM_USER0);
		memcpy(save->data, kaddr, PAGE_SIZE);
		kunmap_atomic(kaddr, KM_USER0);
		highmem_copy = save;
	}
	return 0;
}

120
int save_highmem(void)
121 122 123 124
{
	struct zone *zone;
	int res = 0;

125
	pr_debug("swsusp: Saving Highmem");
126
	drain_local_pages();
127 128 129 130 131 132
	for_each_zone (zone) {
		if (is_highmem(zone))
			res = save_highmem_zone(zone);
		if (res)
			return res;
	}
133
	printk("\n");
134 135 136
	return 0;
}

137
int restore_highmem(void)
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
{
	printk("swsusp: Restoring Highmem\n");
	while (highmem_copy) {
		struct highmem_page *save = highmem_copy;
		void *kaddr;
		highmem_copy = save->next;

		kaddr = kmap_atomic(save->page, KM_USER0);
		memcpy(kaddr, save->data, PAGE_SIZE);
		kunmap_atomic(kaddr, KM_USER0);
		free_page((long) save->data);
		kfree(save);
	}
	return 0;
}
153
#else
154 155 156
static inline unsigned int count_highmem_pages(void) {return 0;}
static inline int save_highmem(void) {return 0;}
static inline int restore_highmem(void) {return 0;}
157
#endif
158

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
/**
 *	@safe_needed - on resume, for storing the PBE list and the image,
 *	we can only use memory pages that do not conflict with the pages
 *	used before suspend.
 *
 *	The unsafe pages are marked with the PG_nosave_free flag
 *	and we count them using unsafe_pages
 */

static unsigned int unsafe_pages;

static void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
{
	void *res;

	res = (void *)get_zeroed_page(gfp_mask);
	if (safe_needed)
		while (res && PageNosaveFree(virt_to_page(res))) {
			/* The page is unsafe, mark it for swsusp_free() */
			SetPageNosave(virt_to_page(res));
			unsafe_pages++;
			res = (void *)get_zeroed_page(gfp_mask);
		}
	if (res) {
		SetPageNosave(virt_to_page(res));
		SetPageNosaveFree(virt_to_page(res));
	}
	return res;
}

unsigned long get_safe_page(gfp_t gfp_mask)
{
	return (unsigned long)alloc_image_page(gfp_mask, 1);
}

/**
 *	free_image_page - free page represented by @addr, allocated with
 *	alloc_image_page (page flags set by it must be cleared)
 */

static inline void free_image_page(void *addr, int clear_nosave_free)
{
	ClearPageNosave(virt_to_page(addr));
	if (clear_nosave_free)
		ClearPageNosaveFree(virt_to_page(addr));
	free_page((unsigned long)addr);
}

/**
 *	pfn_is_nosave - check if given pfn is in the 'nosave' section
 */

211
static inline int pfn_is_nosave(unsigned long pfn)
212 213 214 215 216 217 218 219 220 221
{
	unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
	unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
	return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}

/**
 *	saveable - Determine whether a page should be cloned or not.
 *	@pfn:	The page
 *
222 223 224
 *	We save a page if it isn't Nosave, and is not in the range of pages
 *	statically defined as 'unsaveable', and it
 *	isn't a part of a free chunk of pages.
225 226
 */

227
static struct page *saveable_page(unsigned long pfn)
228
{
P
Pavel Machek 已提交
229
	struct page *page;
230 231

	if (!pfn_valid(pfn))
232
		return NULL;
233 234

	page = pfn_to_page(pfn);
235

236
	if (PageNosave(page))
237
		return NULL;
238
	if (PageReserved(page) && pfn_is_nosave(pfn))
239
		return NULL;
240
	if (PageNosaveFree(page))
241
		return NULL;
242

243
	return page;
244 245
}

246
unsigned int count_data_pages(void)
247 248
{
	struct zone *zone;
249
	unsigned long pfn, max_zone_pfn;
P
Pavel Machek 已提交
250
	unsigned int n = 0;
251 252 253 254 255

	for_each_zone (zone) {
		if (is_highmem(zone))
			continue;
		mark_free_pages(zone);
256 257 258
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			n += !!saveable_page(pfn);
259
	}
260
	return n;
261 262
}

263 264 265 266 267 268 269 270 271
static inline void copy_data_page(long *dst, long *src)
{
	int n;

	/* copy_page and memcpy are not usable for copying task structs. */
	for (n = PAGE_SIZE / sizeof(long); n; n--)
		*dst++ = *src++;
}

272
static void copy_data_pages(struct pbe *pblist)
273 274
{
	struct zone *zone;
275
	unsigned long pfn, max_zone_pfn;
276
	struct pbe *pbe;
277

278
	pbe = pblist;
279 280 281 282
	for_each_zone (zone) {
		if (is_highmem(zone))
			continue;
		mark_free_pages(zone);
283 284 285 286 287
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
			struct page *page = saveable_page(pfn);

			if (page) {
288
				void *ptr = page_address(page);
289

290
				BUG_ON(!pbe);
291 292
				copy_data_page((void *)pbe->address, ptr);
				pbe->orig_address = (unsigned long)ptr;
293 294 295 296 297 298 299 300 301 302 303
				pbe = pbe->next;
			}
		}
	}
	BUG_ON(pbe);
}

/**
 *	free_pagedir - free pages allocated with alloc_pagedir()
 */

304
static void free_pagedir(struct pbe *pblist, int clear_nosave_free)
305 306 307 308 309
{
	struct pbe *pbe;

	while (pblist) {
		pbe = (pblist + PB_PAGE_SKIP)->next;
310
		free_image_page(pblist, clear_nosave_free);
311 312 313 314 315 316 317 318
		pblist = pbe;
	}
}

/**
 *	fill_pb_page - Create a list of PBEs on a given memory page
 */

319
static inline void fill_pb_page(struct pbe *pbpage, unsigned int n)
320 321 322 323
{
	struct pbe *p;

	p = pbpage;
324
	pbpage += n - 1;
325 326 327 328 329 330 331 332
	do
		p->next = p + 1;
	while (++p < pbpage);
}

/**
 *	create_pbe_list - Create a list of PBEs on top of a given chain
 *	of memory pages allocated with alloc_pagedir()
333 334 335
 *
 *	This function assumes that pages allocated by alloc_image_page() will
 *	always be zeroed.
336 337
 */

338
static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
339
{
340
	struct pbe *pbpage;
P
Pavel Machek 已提交
341
	unsigned int num = PBES_PER_PAGE;
342 343 344 345 346

	for_each_pb_page (pbpage, pblist) {
		if (num >= nr_pages)
			break;

347
		fill_pb_page(pbpage, PBES_PER_PAGE);
348 349 350
		num += PBES_PER_PAGE;
	}
	if (pbpage) {
351 352
		num -= PBES_PER_PAGE;
		fill_pb_page(pbpage, nr_pages - num);
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
	}
}

/**
 *	alloc_pagedir - Allocate the page directory.
 *
 *	First, determine exactly how many pages we need and
 *	allocate them.
 *
 *	We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
 *	struct pbe elements (pbes) and the last element in the page points
 *	to the next page.
 *
 *	On each page we set up a list of struct_pbe elements.
 */

369 370
static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask,
				 int safe_needed)
371
{
P
Pavel Machek 已提交
372
	unsigned int num;
373 374 375 376 377
	struct pbe *pblist, *pbe;

	if (!nr_pages)
		return NULL;

378
	pblist = alloc_image_page(gfp_mask, safe_needed);
379 380 381 382 383 384
	pbe = pblist;
	for (num = PBES_PER_PAGE; num < nr_pages; num += PBES_PER_PAGE) {
		if (!pbe) {
			free_pagedir(pblist, 1);
			return NULL;
		}
385
		pbe += PB_PAGE_SKIP;
386
		pbe->next = alloc_image_page(gfp_mask, safe_needed);
387
		pbe = pbe->next;
388
	}
389
	create_pbe_list(pblist, nr_pages);
390 391 392 393 394 395 396 397 398 399 400
	return pblist;
}

/**
 * Free pages we allocated for suspend. Suspend pages are alocated
 * before atomic copy, so we need to free them after resume.
 */

void swsusp_free(void)
{
	struct zone *zone;
401
	unsigned long pfn, max_zone_pfn;
402 403

	for_each_zone(zone) {
404 405 406 407 408
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			if (pfn_valid(pfn)) {
				struct page *page = pfn_to_page(pfn);

409 410 411 412 413 414 415
				if (PageNosave(page) && PageNosaveFree(page)) {
					ClearPageNosave(page);
					ClearPageNosaveFree(page);
					free_page((long) page_address(page));
				}
			}
	}
416 417 418
	nr_copy_pages = 0;
	nr_meta_pages = 0;
	pagedir_nosave = NULL;
419
	buffer = NULL;
420 421 422 423 424 425 426 427 428 429
}


/**
 *	enough_free_mem - Make sure we enough free memory to snapshot.
 *
 *	Returns TRUE or FALSE after checking the number of available
 *	free pages.
 */

P
Pavel Machek 已提交
430
static int enough_free_mem(unsigned int nr_pages)
431
{
432 433 434 435 436 437 438 439
	struct zone *zone;
	unsigned int n = 0;

	for_each_zone (zone)
		if (!is_highmem(zone))
			n += zone->free_pages;
	pr_debug("swsusp: available memory: %u pages\n", n);
	return n > (nr_pages + PAGES_FOR_IO +
440
		(nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
441 442
}

443
static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed)
444 445 446 447 448 449 450 451 452 453
{
	struct pbe *p;

	for_each_pbe (p, pblist) {
		p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed);
		if (!p->address)
			return -ENOMEM;
	}
	return 0;
}
454

P
Pavel Machek 已提交
455
static struct pbe *swsusp_alloc(unsigned int nr_pages)
456
{
457
	struct pbe *pblist;
458

459
	if (!(pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, 0))) {
460
		printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
461
		return NULL;
462 463
	}

464 465 466 467
	if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) {
		printk(KERN_ERR "suspend: Allocating image pages failed.\n");
		swsusp_free();
		return NULL;
468 469
	}

470
	return pblist;
471 472
}

473
asmlinkage int swsusp_save(void)
474
{
P
Pavel Machek 已提交
475
	unsigned int nr_pages;
476 477 478 479

	pr_debug("swsusp: critical section: \n");

	drain_local_pages();
480 481
	nr_pages = count_data_pages();
	printk("swsusp: Need to copy %u pages\n", nr_pages);
482 483

	pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
484 485
		 nr_pages,
		 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE,
486 487
		 PAGES_FOR_IO, nr_free_pages());

488
	if (!enough_free_mem(nr_pages)) {
489 490 491 492
		printk(KERN_ERR "swsusp: Not enough free memory\n");
		return -ENOMEM;
	}

493 494 495
	pagedir_nosave = swsusp_alloc(nr_pages);
	if (!pagedir_nosave)
		return -ENOMEM;
496 497 498 499 500

	/* During allocating of suspend pagedir, new cold pages may appear.
	 * Kill them.
	 */
	drain_local_pages();
501
	copy_data_pages(pagedir_nosave);
502 503 504 505 506 507 508

	/*
	 * End of critical section. From now on, we can write to memory,
	 * but we should not touch disk. This specially means we must _not_
	 * touch swap space! Except we must write out our image of course.
	 */

509
	nr_copy_pages = nr_pages;
510
	nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT;
511 512

	printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
513 514
	return 0;
}
515 516 517 518 519 520 521 522 523 524

static void init_header(struct swsusp_info *info)
{
	memset(info, 0, sizeof(struct swsusp_info));
	info->version_code = LINUX_VERSION_CODE;
	info->num_physpages = num_physpages;
	memcpy(&info->uts, &system_utsname, sizeof(system_utsname));
	info->cpus = num_online_cpus();
	info->image_pages = nr_copy_pages;
	info->pages = nr_copy_pages + nr_meta_pages + 1;
525 526
	info->size = info->pages;
	info->size <<= PAGE_SHIFT;
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
}

/**
 *	pack_orig_addresses - the .orig_address fields of the PBEs from the
 *	list starting at @pbe are stored in the array @buf[] (1 page)
 */

static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe)
{
	int j;

	for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
		buf[j] = pbe->orig_address;
		pbe = pbe->next;
	}
	if (!pbe)
		for (; j < PAGE_SIZE / sizeof(long); j++)
			buf[j] = 0;
	return pbe;
}

/**
 *	snapshot_read_next - used for reading the system memory snapshot.
 *
 *	On the first call to it @handle should point to a zeroed
 *	snapshot_handle structure.  The structure gets updated and a pointer
 *	to it should be passed to this function every next time.
 *
 *	The @count parameter should contain the number of bytes the caller
 *	wants to read from the snapshot.  It must not be zero.
 *
 *	On success the function returns a positive number.  Then, the caller
 *	is allowed to read up to the returned number of bytes from the memory
 *	location computed by the data_of() macro.  The number returned
 *	may be smaller than @count, but this only happens if the read would
 *	cross a page boundary otherwise.
 *
 *	The function returns 0 to indicate the end of data stream condition,
 *	and a negative number is returned on error.  In such cases the
 *	structure pointed to by @handle is not updated and should not be used
 *	any more.
 */

int snapshot_read_next(struct snapshot_handle *handle, size_t count)
{
572
	if (handle->cur > nr_meta_pages + nr_copy_pages)
573 574 575 576 577 578 579 580 581 582 583 584
		return 0;
	if (!buffer) {
		/* This makes the buffer be freed by swsusp_free() */
		buffer = alloc_image_page(GFP_ATOMIC, 0);
		if (!buffer)
			return -ENOMEM;
	}
	if (!handle->offset) {
		init_header((struct swsusp_info *)buffer);
		handle->buffer = buffer;
		handle->pbe = pagedir_nosave;
	}
585 586
	if (handle->prev < handle->cur) {
		if (handle->cur <= nr_meta_pages) {
587 588 589 590 591 592 593
			handle->pbe = pack_orig_addresses(buffer, handle->pbe);
			if (!handle->pbe)
				handle->pbe = pagedir_nosave;
		} else {
			handle->buffer = (void *)handle->pbe->address;
			handle->pbe = handle->pbe->next;
		}
594
		handle->prev = handle->cur;
595
	}
596 597 598 599 600
	handle->buf_offset = handle->cur_offset;
	if (handle->cur_offset + count >= PAGE_SIZE) {
		count = PAGE_SIZE - handle->cur_offset;
		handle->cur_offset = 0;
		handle->cur++;
601
	} else {
602
		handle->cur_offset += count;
603 604 605 606 607 608 609 610 611 612 613 614 615 616
	}
	handle->offset += count;
	return count;
}

/**
 *	mark_unsafe_pages - mark the pages that cannot be used for storing
 *	the image during resume, because they conflict with the pages that
 *	had been used before suspend
 */

static int mark_unsafe_pages(struct pbe *pblist)
{
	struct zone *zone;
617
	unsigned long pfn, max_zone_pfn;
618 619 620 621 622 623 624
	struct pbe *p;

	if (!pblist) /* a sanity check */
		return -EINVAL;

	/* Clear page flags */
	for_each_zone (zone) {
625 626 627 628
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			if (pfn_valid(pfn))
				ClearPageNosaveFree(pfn_to_page(pfn));
629 630 631 632 633 634 635 636 637 638
	}

	/* Mark orig addresses */
	for_each_pbe (p, pblist) {
		if (virt_addr_valid(p->orig_address))
			SetPageNosaveFree(virt_to_page(p->orig_address));
		else
			return -EFAULT;
	}

639 640
	unsafe_pages = 0;

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
	return 0;
}

static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
{
	/* We assume both lists contain the same number of elements */
	while (src) {
		dst->orig_address = src->orig_address;
		dst = dst->next;
		src = src->next;
	}
}

static int check_header(struct swsusp_info *info)
{
	char *reason = NULL;

	if (info->version_code != LINUX_VERSION_CODE)
		reason = "kernel version";
	if (info->num_physpages != num_physpages)
		reason = "memory size";
	if (strcmp(info->uts.sysname,system_utsname.sysname))
		reason = "system type";
	if (strcmp(info->uts.release,system_utsname.release))
		reason = "kernel release";
	if (strcmp(info->uts.version,system_utsname.version))
		reason = "version";
	if (strcmp(info->uts.machine,system_utsname.machine))
		reason = "machine";
	if (reason) {
		printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
		return -EPERM;
	}
	return 0;
}

/**
 *	load header - check the image header and copy data from it
 */

static int load_header(struct snapshot_handle *handle,
                              struct swsusp_info *info)
{
	int error;
	struct pbe *pblist;

	error = check_header(info);
	if (!error) {
		pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, 0);
		if (!pblist)
			return -ENOMEM;
		pagedir_nosave = pblist;
		handle->pbe = pblist;
		nr_copy_pages = info->image_pages;
		nr_meta_pages = info->pages - info->image_pages - 1;
	}
	return error;
}

/**
 *	unpack_orig_addresses - copy the elements of @buf[] (1 page) to
 *	the PBEs in the list starting at @pbe
 */

static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
                                                struct pbe *pbe)
{
	int j;

	for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
		pbe->orig_address = buf[j];
		pbe = pbe->next;
	}
	return pbe;
}

/**
718
 *	prepare_image - use metadata contained in the PBE list
719 720
 *	pointed to by pagedir_nosave to mark the pages that will
 *	be overwritten in the process of restoring the system
721 722 723 724 725 726 727 728
 *	memory state from the image ("unsafe" pages) and allocate
 *	memory for the image
 *
 *	The idea is to allocate the PBE list first and then
 *	allocate as many pages as it's needed for the image data,
 *	but not to assign these pages to the PBEs initially.
 *	Instead, we just mark them as allocated and create a list
 *	of "safe" which will be used later
729 730
 */

731 732 733 734 735 736 737 738
struct safe_page {
	struct safe_page *next;
	char padding[PAGE_SIZE - sizeof(void *)];
};

static struct safe_page *safe_pages;

static int prepare_image(struct snapshot_handle *handle)
739 740
{
	int error = 0;
741 742
	unsigned int nr_pages = nr_copy_pages;
	struct pbe *p, *pblist = NULL;
743 744 745 746

	p = pagedir_nosave;
	error = mark_unsafe_pages(p);
	if (!error) {
747
		pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1);
748 749
		if (pblist)
			copy_page_backup_list(pblist, p);
750
		free_pagedir(p, 0);
751 752 753
		if (!pblist)
			error = -ENOMEM;
	}
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
	safe_pages = NULL;
	if (!error && nr_pages > unsafe_pages) {
		nr_pages -= unsafe_pages;
		while (nr_pages--) {
			struct safe_page *ptr;

			ptr = (struct safe_page *)get_zeroed_page(GFP_ATOMIC);
			if (!ptr) {
				error = -ENOMEM;
				break;
			}
			if (!PageNosaveFree(virt_to_page(ptr))) {
				/* The page is "safe", add it to the list */
				ptr->next = safe_pages;
				safe_pages = ptr;
			}
			/* Mark the page as allocated */
			SetPageNosave(virt_to_page(ptr));
			SetPageNosaveFree(virt_to_page(ptr));
		}
	}
775 776 777 778
	if (!error) {
		pagedir_nosave = pblist;
	} else {
		handle->pbe = NULL;
779
		swsusp_free();
780 781 782 783
	}
	return error;
}

784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
static void *get_buffer(struct snapshot_handle *handle)
{
	struct pbe *pbe = handle->pbe, *last = handle->last_pbe;
	struct page *page = virt_to_page(pbe->orig_address);

	if (PageNosave(page) && PageNosaveFree(page)) {
		/*
		 * We have allocated the "original" page frame and we can
		 * use it directly to store the read page
		 */
		pbe->address = 0;
		if (last && last->next)
			last->next = NULL;
		return (void *)pbe->orig_address;
	}
	/*
	 * The "original" page frame has not been allocated and we have to
	 * use a "safe" page frame to store the read page
	 */
	pbe->address = (unsigned long)safe_pages;
	safe_pages = safe_pages->next;
	if (last)
		last->next = pbe;
	handle->last_pbe = pbe;
	return (void *)pbe->address;
}

811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836
/**
 *	snapshot_write_next - used for writing the system memory snapshot.
 *
 *	On the first call to it @handle should point to a zeroed
 *	snapshot_handle structure.  The structure gets updated and a pointer
 *	to it should be passed to this function every next time.
 *
 *	The @count parameter should contain the number of bytes the caller
 *	wants to write to the image.  It must not be zero.
 *
 *	On success the function returns a positive number.  Then, the caller
 *	is allowed to write up to the returned number of bytes to the memory
 *	location computed by the data_of() macro.  The number returned
 *	may be smaller than @count, but this only happens if the write would
 *	cross a page boundary otherwise.
 *
 *	The function returns 0 to indicate the "end of file" condition,
 *	and a negative number is returned on error.  In such cases the
 *	structure pointed to by @handle is not updated and should not be used
 *	any more.
 */

int snapshot_write_next(struct snapshot_handle *handle, size_t count)
{
	int error = 0;

837
	if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
838 839 840 841 842 843 844 845 846
		return 0;
	if (!buffer) {
		/* This makes the buffer be freed by swsusp_free() */
		buffer = alloc_image_page(GFP_ATOMIC, 0);
		if (!buffer)
			return -ENOMEM;
	}
	if (!handle->offset)
		handle->buffer = buffer;
A
Andrew Morton 已提交
847
	handle->sync_read = 1;
848
	if (handle->prev < handle->cur) {
849
		if (!handle->prev) {
A
Andrew Morton 已提交
850 851
			error = load_header(handle,
					(struct swsusp_info *)buffer);
852 853 854
			if (error)
				return error;
		} else if (handle->prev <= nr_meta_pages) {
A
Andrew Morton 已提交
855 856
			handle->pbe = unpack_orig_addresses(buffer,
							handle->pbe);
857
			if (!handle->pbe) {
858
				error = prepare_image(handle);
859 860 861
				if (error)
					return error;
				handle->pbe = pagedir_nosave;
862 863
				handle->last_pbe = NULL;
				handle->buffer = get_buffer(handle);
A
Andrew Morton 已提交
864
				handle->sync_read = 0;
865 866 867
			}
		} else {
			handle->pbe = handle->pbe->next;
868
			handle->buffer = get_buffer(handle);
A
Andrew Morton 已提交
869
			handle->sync_read = 0;
870
		}
871
		handle->prev = handle->cur;
872
	}
873 874 875 876 877
	handle->buf_offset = handle->cur_offset;
	if (handle->cur_offset + count >= PAGE_SIZE) {
		count = PAGE_SIZE - handle->cur_offset;
		handle->cur_offset = 0;
		handle->cur++;
878
	} else {
879
		handle->cur_offset += count;
880 881 882 883 884 885 886 887
	}
	handle->offset += count;
	return count;
}

int snapshot_image_loaded(struct snapshot_handle *handle)
{
	return !(!handle->pbe || handle->pbe->next || !nr_copy_pages ||
888
		handle->cur <= nr_meta_pages + nr_copy_pages);
889
}