snapshot.c 21.3 KB
Newer Older
1
/*
2
 * linux/kernel/power/snapshot.c
3
 *
4
 * This file provide system snapshot/restore functionality.
5 6 7 8 9 10 11 12
 *
 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
 *
 * This file is released under the GPLv2, and is based on swsusp.c.
 *
 */


13
#include <linux/version.h>
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/suspend.h>
#include <linux/smp_lock.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/bootmem.h>
#include <linux/syscalls.h>
#include <linux/console.h>
#include <linux/highmem.h>

#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/io.h>

#include "power.h"

37
struct pbe *pagedir_nosave;
38 39
static unsigned int nr_copy_pages;
static unsigned int nr_meta_pages;
40
static unsigned long *buffer;
41

42
#ifdef CONFIG_HIGHMEM
43
unsigned int count_highmem_pages(void)
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
{
	struct zone *zone;
	unsigned long zone_pfn;
	unsigned int n = 0;

	for_each_zone (zone)
		if (is_highmem(zone)) {
			mark_free_pages(zone);
			for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
				struct page *page;
				unsigned long pfn = zone_pfn + zone->zone_start_pfn;
				if (!pfn_valid(pfn))
					continue;
				page = pfn_to_page(pfn);
				if (PageReserved(page))
					continue;
				if (PageNosaveFree(page))
					continue;
				n++;
			}
		}
	return n;
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
struct highmem_page {
	char *data;
	struct page *page;
	struct highmem_page *next;
};

static struct highmem_page *highmem_copy;

static int save_highmem_zone(struct zone *zone)
{
	unsigned long zone_pfn;
	mark_free_pages(zone);
	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
		struct page *page;
		struct highmem_page *save;
		void *kaddr;
		unsigned long pfn = zone_pfn + zone->zone_start_pfn;

86
		if (!(pfn%10000))
87 88 89 90 91 92 93 94 95 96
			printk(".");
		if (!pfn_valid(pfn))
			continue;
		page = pfn_to_page(pfn);
		/*
		 * This condition results from rvmalloc() sans vmalloc_32()
		 * and architectural memory reservations. This should be
		 * corrected eventually when the cases giving rise to this
		 * are better understood.
		 */
97
		if (PageReserved(page))
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
			continue;
		BUG_ON(PageNosave(page));
		if (PageNosaveFree(page))
			continue;
		save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
		if (!save)
			return -ENOMEM;
		save->next = highmem_copy;
		save->page = page;
		save->data = (void *) get_zeroed_page(GFP_ATOMIC);
		if (!save->data) {
			kfree(save);
			return -ENOMEM;
		}
		kaddr = kmap_atomic(page, KM_USER0);
		memcpy(save->data, kaddr, PAGE_SIZE);
		kunmap_atomic(kaddr, KM_USER0);
		highmem_copy = save;
	}
	return 0;
}

120
int save_highmem(void)
121 122 123 124
{
	struct zone *zone;
	int res = 0;

125
	pr_debug("swsusp: Saving Highmem");
126
	drain_local_pages();
127 128 129 130 131 132
	for_each_zone (zone) {
		if (is_highmem(zone))
			res = save_highmem_zone(zone);
		if (res)
			return res;
	}
133
	printk("\n");
134 135 136
	return 0;
}

137
int restore_highmem(void)
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
{
	printk("swsusp: Restoring Highmem\n");
	while (highmem_copy) {
		struct highmem_page *save = highmem_copy;
		void *kaddr;
		highmem_copy = save->next;

		kaddr = kmap_atomic(save->page, KM_USER0);
		memcpy(kaddr, save->data, PAGE_SIZE);
		kunmap_atomic(kaddr, KM_USER0);
		free_page((long) save->data);
		kfree(save);
	}
	return 0;
}
153
#else
154 155 156
static inline unsigned int count_highmem_pages(void) {return 0;}
static inline int save_highmem(void) {return 0;}
static inline int restore_highmem(void) {return 0;}
157
#endif
158

159
static inline int pfn_is_nosave(unsigned long pfn)
160 161 162 163 164 165 166 167 168 169
{
	unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
	unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
	return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}

/**
 *	saveable - Determine whether a page should be cloned or not.
 *	@pfn:	The page
 *
170 171 172
 *	We save a page if it isn't Nosave, and is not in the range of pages
 *	statically defined as 'unsaveable', and it
 *	isn't a part of a free chunk of pages.
173 174
 */

175
static struct page *saveable_page(unsigned long pfn)
176
{
P
Pavel Machek 已提交
177
	struct page *page;
178 179

	if (!pfn_valid(pfn))
180
		return NULL;
181 182

	page = pfn_to_page(pfn);
183

184
	if (PageNosave(page))
185
		return NULL;
186
	if (PageReserved(page) && pfn_is_nosave(pfn))
187
		return NULL;
188
	if (PageNosaveFree(page))
189
		return NULL;
190

191
	return page;
192 193
}

194
unsigned int count_data_pages(void)
195 196
{
	struct zone *zone;
197
	unsigned long pfn, max_zone_pfn;
P
Pavel Machek 已提交
198
	unsigned int n = 0;
199 200 201 202 203

	for_each_zone (zone) {
		if (is_highmem(zone))
			continue;
		mark_free_pages(zone);
204 205 206
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			n += !!saveable_page(pfn);
207
	}
208
	return n;
209 210
}

211
static void copy_data_pages(struct pbe *pblist)
212 213
{
	struct zone *zone;
214
	unsigned long pfn, max_zone_pfn;
215
	struct pbe *pbe, *p;
216

217
	pbe = pblist;
218 219 220 221 222
	for_each_zone (zone) {
		if (is_highmem(zone))
			continue;
		mark_free_pages(zone);
		/* This is necessary for swsusp_free() */
223
		for_each_pb_page (p, pblist)
224
			SetPageNosaveFree(virt_to_page(p));
225
		for_each_pbe (p, pblist)
226
			SetPageNosaveFree(virt_to_page(p->address));
227 228 229 230 231
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
			struct page *page = saveable_page(pfn);

			if (page) {
232 233 234
				long *src, *dst;
				int n;

235 236
				BUG_ON(!pbe);
				pbe->orig_address = (unsigned long)page_address(page);
237 238 239 240 241
				/* copy_page and memcpy are not usable for copying task structs. */
				dst = (long *)pbe->address;
				src = (long *)pbe->orig_address;
				for (n = PAGE_SIZE / sizeof(long); n; n--)
					*dst++ = *src++;
242 243 244 245 246 247 248 249 250 251 252 253
				pbe = pbe->next;
			}
		}
	}
	BUG_ON(pbe);
}


/**
 *	free_pagedir - free pages allocated with alloc_pagedir()
 */

254
static void free_pagedir(struct pbe *pblist, int clear_nosave_free)
255 256 257 258 259 260
{
	struct pbe *pbe;

	while (pblist) {
		pbe = (pblist + PB_PAGE_SKIP)->next;
		ClearPageNosave(virt_to_page(pblist));
261 262
		if (clear_nosave_free)
			ClearPageNosaveFree(virt_to_page(pblist));
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
		free_page((unsigned long)pblist);
		pblist = pbe;
	}
}

/**
 *	fill_pb_page - Create a list of PBEs on a given memory page
 */

static inline void fill_pb_page(struct pbe *pbpage)
{
	struct pbe *p;

	p = pbpage;
	pbpage += PB_PAGE_SKIP;
	do
		p->next = p + 1;
	while (++p < pbpage);
}

/**
 *	create_pbe_list - Create a list of PBEs on top of a given chain
 *	of memory pages allocated with alloc_pagedir()
 */

288
static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
289 290
{
	struct pbe *pbpage, *p;
P
Pavel Machek 已提交
291
	unsigned int num = PBES_PER_PAGE;
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306

	for_each_pb_page (pbpage, pblist) {
		if (num >= nr_pages)
			break;

		fill_pb_page(pbpage);
		num += PBES_PER_PAGE;
	}
	if (pbpage) {
		for (num -= PBES_PER_PAGE - 1, p = pbpage; num < nr_pages; p++, num++)
			p->next = p + 1;
		p->next = NULL;
	}
}

307
static unsigned int unsafe_pages;
308

309 310 311
/**
 *	@safe_needed - on resume, for storing the PBE list and the image,
 *	we can only use memory pages that do not conflict with the pages
312
 *	used before suspend.
313 314
 *
 *	The unsafe pages are marked with the PG_nosave_free flag
315
 *	and we count them using unsafe_pages
316 317
 */

A
Andrew Morton 已提交
318
static void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
319
{
320 321
	void *res;

322
	res = (void *)get_zeroed_page(gfp_mask);
323
	if (safe_needed)
324 325 326 327
		while (res && PageNosaveFree(virt_to_page(res))) {
			/* The page is unsafe, mark it for swsusp_free() */
			SetPageNosave(virt_to_page(res));
			unsafe_pages++;
328
			res = (void *)get_zeroed_page(gfp_mask);
329
		}
330 331 332 333 334 335 336
	if (res) {
		SetPageNosave(virt_to_page(res));
		SetPageNosaveFree(virt_to_page(res));
	}
	return res;
}

337 338 339 340 341
unsigned long get_safe_page(gfp_t gfp_mask)
{
	return (unsigned long)alloc_image_page(gfp_mask, 1);
}

342 343 344 345 346 347 348 349 350 351 352 353 354
/**
 *	alloc_pagedir - Allocate the page directory.
 *
 *	First, determine exactly how many pages we need and
 *	allocate them.
 *
 *	We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
 *	struct pbe elements (pbes) and the last element in the page points
 *	to the next page.
 *
 *	On each page we set up a list of struct_pbe elements.
 */

355 356
static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask,
				 int safe_needed)
357
{
P
Pavel Machek 已提交
358
	unsigned int num;
359 360 361 362 363
	struct pbe *pblist, *pbe;

	if (!nr_pages)
		return NULL;

364
	pblist = alloc_image_page(gfp_mask, safe_needed);
365 366 367 368
	/* FIXME: rewrite this ugly loop */
	for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages;
        		pbe = pbe->next, num += PBES_PER_PAGE) {
		pbe += PB_PAGE_SKIP;
369
		pbe->next = alloc_image_page(gfp_mask, safe_needed);
370 371
	}
	if (!pbe) { /* get_zeroed_page() failed */
372
		free_pagedir(pblist, 1);
373
		pblist = NULL;
374
        } else
375
		create_pbe_list(pblist, nr_pages);
376 377 378 379 380 381 382 383 384 385 386
	return pblist;
}

/**
 * Free pages we allocated for suspend. Suspend pages are alocated
 * before atomic copy, so we need to free them after resume.
 */

void swsusp_free(void)
{
	struct zone *zone;
387
	unsigned long pfn, max_zone_pfn;
388 389

	for_each_zone(zone) {
390 391 392 393 394
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			if (pfn_valid(pfn)) {
				struct page *page = pfn_to_page(pfn);

395 396 397 398 399 400 401
				if (PageNosave(page) && PageNosaveFree(page)) {
					ClearPageNosave(page);
					ClearPageNosaveFree(page);
					free_page((long) page_address(page));
				}
			}
	}
402 403 404
	nr_copy_pages = 0;
	nr_meta_pages = 0;
	pagedir_nosave = NULL;
405
	buffer = NULL;
406 407 408 409 410 411 412 413 414 415
}


/**
 *	enough_free_mem - Make sure we enough free memory to snapshot.
 *
 *	Returns TRUE or FALSE after checking the number of available
 *	free pages.
 */

P
Pavel Machek 已提交
416
static int enough_free_mem(unsigned int nr_pages)
417
{
418 419 420 421 422 423 424 425
	struct zone *zone;
	unsigned int n = 0;

	for_each_zone (zone)
		if (!is_highmem(zone))
			n += zone->free_pages;
	pr_debug("swsusp: available memory: %u pages\n", n);
	return n > (nr_pages + PAGES_FOR_IO +
426
		(nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
427 428
}

429
static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed)
430 431 432 433 434 435 436 437 438 439
{
	struct pbe *p;

	for_each_pbe (p, pblist) {
		p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed);
		if (!p->address)
			return -ENOMEM;
	}
	return 0;
}
440

P
Pavel Machek 已提交
441
static struct pbe *swsusp_alloc(unsigned int nr_pages)
442
{
443
	struct pbe *pblist;
444

445
	if (!(pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, 0))) {
446
		printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
447
		return NULL;
448 449
	}

450 451 452 453
	if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) {
		printk(KERN_ERR "suspend: Allocating image pages failed.\n");
		swsusp_free();
		return NULL;
454 455
	}

456
	return pblist;
457 458
}

459
asmlinkage int swsusp_save(void)
460
{
P
Pavel Machek 已提交
461
	unsigned int nr_pages;
462 463 464 465

	pr_debug("swsusp: critical section: \n");

	drain_local_pages();
466 467
	nr_pages = count_data_pages();
	printk("swsusp: Need to copy %u pages\n", nr_pages);
468 469

	pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
470 471
		 nr_pages,
		 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE,
472 473
		 PAGES_FOR_IO, nr_free_pages());

474
	if (!enough_free_mem(nr_pages)) {
475 476 477 478
		printk(KERN_ERR "swsusp: Not enough free memory\n");
		return -ENOMEM;
	}

479 480 481
	pagedir_nosave = swsusp_alloc(nr_pages);
	if (!pagedir_nosave)
		return -ENOMEM;
482 483 484 485 486

	/* During allocating of suspend pagedir, new cold pages may appear.
	 * Kill them.
	 */
	drain_local_pages();
487
	copy_data_pages(pagedir_nosave);
488 489 490 491 492 493 494

	/*
	 * End of critical section. From now on, we can write to memory,
	 * but we should not touch disk. This specially means we must _not_
	 * touch swap space! Except we must write out our image of course.
	 */

495
	nr_copy_pages = nr_pages;
496
	nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT;
497 498

	printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
499 500
	return 0;
}
501 502 503 504 505 506 507 508 509 510

static void init_header(struct swsusp_info *info)
{
	memset(info, 0, sizeof(struct swsusp_info));
	info->version_code = LINUX_VERSION_CODE;
	info->num_physpages = num_physpages;
	memcpy(&info->uts, &system_utsname, sizeof(system_utsname));
	info->cpus = num_online_cpus();
	info->image_pages = nr_copy_pages;
	info->pages = nr_copy_pages + nr_meta_pages + 1;
511 512
	info->size = info->pages;
	info->size <<= PAGE_SHIFT;
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
}

/**
 *	pack_orig_addresses - the .orig_address fields of the PBEs from the
 *	list starting at @pbe are stored in the array @buf[] (1 page)
 */

static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe)
{
	int j;

	for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
		buf[j] = pbe->orig_address;
		pbe = pbe->next;
	}
	if (!pbe)
		for (; j < PAGE_SIZE / sizeof(long); j++)
			buf[j] = 0;
	return pbe;
}

/**
 *	snapshot_read_next - used for reading the system memory snapshot.
 *
 *	On the first call to it @handle should point to a zeroed
 *	snapshot_handle structure.  The structure gets updated and a pointer
 *	to it should be passed to this function every next time.
 *
 *	The @count parameter should contain the number of bytes the caller
 *	wants to read from the snapshot.  It must not be zero.
 *
 *	On success the function returns a positive number.  Then, the caller
 *	is allowed to read up to the returned number of bytes from the memory
 *	location computed by the data_of() macro.  The number returned
 *	may be smaller than @count, but this only happens if the read would
 *	cross a page boundary otherwise.
 *
 *	The function returns 0 to indicate the end of data stream condition,
 *	and a negative number is returned on error.  In such cases the
 *	structure pointed to by @handle is not updated and should not be used
 *	any more.
 */

int snapshot_read_next(struct snapshot_handle *handle, size_t count)
{
558
	if (handle->cur > nr_meta_pages + nr_copy_pages)
559 560 561 562 563 564 565 566 567 568 569 570
		return 0;
	if (!buffer) {
		/* This makes the buffer be freed by swsusp_free() */
		buffer = alloc_image_page(GFP_ATOMIC, 0);
		if (!buffer)
			return -ENOMEM;
	}
	if (!handle->offset) {
		init_header((struct swsusp_info *)buffer);
		handle->buffer = buffer;
		handle->pbe = pagedir_nosave;
	}
571 572
	if (handle->prev < handle->cur) {
		if (handle->cur <= nr_meta_pages) {
573 574 575 576 577 578 579
			handle->pbe = pack_orig_addresses(buffer, handle->pbe);
			if (!handle->pbe)
				handle->pbe = pagedir_nosave;
		} else {
			handle->buffer = (void *)handle->pbe->address;
			handle->pbe = handle->pbe->next;
		}
580
		handle->prev = handle->cur;
581
	}
582 583 584 585 586
	handle->buf_offset = handle->cur_offset;
	if (handle->cur_offset + count >= PAGE_SIZE) {
		count = PAGE_SIZE - handle->cur_offset;
		handle->cur_offset = 0;
		handle->cur++;
587
	} else {
588
		handle->cur_offset += count;
589 590 591 592 593 594 595 596 597 598 599 600 601 602
	}
	handle->offset += count;
	return count;
}

/**
 *	mark_unsafe_pages - mark the pages that cannot be used for storing
 *	the image during resume, because they conflict with the pages that
 *	had been used before suspend
 */

static int mark_unsafe_pages(struct pbe *pblist)
{
	struct zone *zone;
603
	unsigned long pfn, max_zone_pfn;
604 605 606 607 608 609 610
	struct pbe *p;

	if (!pblist) /* a sanity check */
		return -EINVAL;

	/* Clear page flags */
	for_each_zone (zone) {
611 612 613 614
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			if (pfn_valid(pfn))
				ClearPageNosaveFree(pfn_to_page(pfn));
615 616 617 618 619 620 621 622 623 624
	}

	/* Mark orig addresses */
	for_each_pbe (p, pblist) {
		if (virt_addr_valid(p->orig_address))
			SetPageNosaveFree(virt_to_page(p->orig_address));
		else
			return -EFAULT;
	}

625 626
	unsafe_pages = 0;

627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
	return 0;
}

static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
{
	/* We assume both lists contain the same number of elements */
	while (src) {
		dst->orig_address = src->orig_address;
		dst = dst->next;
		src = src->next;
	}
}

static int check_header(struct swsusp_info *info)
{
	char *reason = NULL;

	if (info->version_code != LINUX_VERSION_CODE)
		reason = "kernel version";
	if (info->num_physpages != num_physpages)
		reason = "memory size";
	if (strcmp(info->uts.sysname,system_utsname.sysname))
		reason = "system type";
	if (strcmp(info->uts.release,system_utsname.release))
		reason = "kernel release";
	if (strcmp(info->uts.version,system_utsname.version))
		reason = "version";
	if (strcmp(info->uts.machine,system_utsname.machine))
		reason = "machine";
	if (reason) {
		printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
		return -EPERM;
	}
	return 0;
}

/**
 *	load header - check the image header and copy data from it
 */

static int load_header(struct snapshot_handle *handle,
                              struct swsusp_info *info)
{
	int error;
	struct pbe *pblist;

	error = check_header(info);
	if (!error) {
		pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, 0);
		if (!pblist)
			return -ENOMEM;
		pagedir_nosave = pblist;
		handle->pbe = pblist;
		nr_copy_pages = info->image_pages;
		nr_meta_pages = info->pages - info->image_pages - 1;
	}
	return error;
}

/**
 *	unpack_orig_addresses - copy the elements of @buf[] (1 page) to
 *	the PBEs in the list starting at @pbe
 */

static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
                                                struct pbe *pbe)
{
	int j;

	for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
		pbe->orig_address = buf[j];
		pbe = pbe->next;
	}
	return pbe;
}

/**
704
 *	prepare_image - use metadata contained in the PBE list
705 706
 *	pointed to by pagedir_nosave to mark the pages that will
 *	be overwritten in the process of restoring the system
707 708 709 710 711 712 713 714
 *	memory state from the image ("unsafe" pages) and allocate
 *	memory for the image
 *
 *	The idea is to allocate the PBE list first and then
 *	allocate as many pages as it's needed for the image data,
 *	but not to assign these pages to the PBEs initially.
 *	Instead, we just mark them as allocated and create a list
 *	of "safe" which will be used later
715 716
 */

717 718 719 720 721 722 723 724
struct safe_page {
	struct safe_page *next;
	char padding[PAGE_SIZE - sizeof(void *)];
};

static struct safe_page *safe_pages;

static int prepare_image(struct snapshot_handle *handle)
725 726
{
	int error = 0;
727 728
	unsigned int nr_pages = nr_copy_pages;
	struct pbe *p, *pblist = NULL;
729 730 731 732

	p = pagedir_nosave;
	error = mark_unsafe_pages(p);
	if (!error) {
733
		pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1);
734 735
		if (pblist)
			copy_page_backup_list(pblist, p);
736
		free_pagedir(p, 0);
737 738 739
		if (!pblist)
			error = -ENOMEM;
	}
740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
	safe_pages = NULL;
	if (!error && nr_pages > unsafe_pages) {
		nr_pages -= unsafe_pages;
		while (nr_pages--) {
			struct safe_page *ptr;

			ptr = (struct safe_page *)get_zeroed_page(GFP_ATOMIC);
			if (!ptr) {
				error = -ENOMEM;
				break;
			}
			if (!PageNosaveFree(virt_to_page(ptr))) {
				/* The page is "safe", add it to the list */
				ptr->next = safe_pages;
				safe_pages = ptr;
			}
			/* Mark the page as allocated */
			SetPageNosave(virt_to_page(ptr));
			SetPageNosaveFree(virt_to_page(ptr));
		}
	}
761 762 763 764
	if (!error) {
		pagedir_nosave = pblist;
	} else {
		handle->pbe = NULL;
765
		swsusp_free();
766 767 768 769
	}
	return error;
}

770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
static void *get_buffer(struct snapshot_handle *handle)
{
	struct pbe *pbe = handle->pbe, *last = handle->last_pbe;
	struct page *page = virt_to_page(pbe->orig_address);

	if (PageNosave(page) && PageNosaveFree(page)) {
		/*
		 * We have allocated the "original" page frame and we can
		 * use it directly to store the read page
		 */
		pbe->address = 0;
		if (last && last->next)
			last->next = NULL;
		return (void *)pbe->orig_address;
	}
	/*
	 * The "original" page frame has not been allocated and we have to
	 * use a "safe" page frame to store the read page
	 */
	pbe->address = (unsigned long)safe_pages;
	safe_pages = safe_pages->next;
	if (last)
		last->next = pbe;
	handle->last_pbe = pbe;
	return (void *)pbe->address;
}

797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
/**
 *	snapshot_write_next - used for writing the system memory snapshot.
 *
 *	On the first call to it @handle should point to a zeroed
 *	snapshot_handle structure.  The structure gets updated and a pointer
 *	to it should be passed to this function every next time.
 *
 *	The @count parameter should contain the number of bytes the caller
 *	wants to write to the image.  It must not be zero.
 *
 *	On success the function returns a positive number.  Then, the caller
 *	is allowed to write up to the returned number of bytes to the memory
 *	location computed by the data_of() macro.  The number returned
 *	may be smaller than @count, but this only happens if the write would
 *	cross a page boundary otherwise.
 *
 *	The function returns 0 to indicate the "end of file" condition,
 *	and a negative number is returned on error.  In such cases the
 *	structure pointed to by @handle is not updated and should not be used
 *	any more.
 */

int snapshot_write_next(struct snapshot_handle *handle, size_t count)
{
	int error = 0;

823
	if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
824 825 826 827 828 829 830 831 832
		return 0;
	if (!buffer) {
		/* This makes the buffer be freed by swsusp_free() */
		buffer = alloc_image_page(GFP_ATOMIC, 0);
		if (!buffer)
			return -ENOMEM;
	}
	if (!handle->offset)
		handle->buffer = buffer;
A
Andrew Morton 已提交
833
	handle->sync_read = 1;
834
	if (handle->prev < handle->cur) {
835
		if (!handle->prev) {
A
Andrew Morton 已提交
836 837
			error = load_header(handle,
					(struct swsusp_info *)buffer);
838 839 840
			if (error)
				return error;
		} else if (handle->prev <= nr_meta_pages) {
A
Andrew Morton 已提交
841 842
			handle->pbe = unpack_orig_addresses(buffer,
							handle->pbe);
843
			if (!handle->pbe) {
844
				error = prepare_image(handle);
845 846 847
				if (error)
					return error;
				handle->pbe = pagedir_nosave;
848 849
				handle->last_pbe = NULL;
				handle->buffer = get_buffer(handle);
A
Andrew Morton 已提交
850
				handle->sync_read = 0;
851 852 853
			}
		} else {
			handle->pbe = handle->pbe->next;
854
			handle->buffer = get_buffer(handle);
A
Andrew Morton 已提交
855
			handle->sync_read = 0;
856
		}
857
		handle->prev = handle->cur;
858
	}
859 860 861 862 863
	handle->buf_offset = handle->cur_offset;
	if (handle->cur_offset + count >= PAGE_SIZE) {
		count = PAGE_SIZE - handle->cur_offset;
		handle->cur_offset = 0;
		handle->cur++;
864
	} else {
865
		handle->cur_offset += count;
866 867 868 869 870 871 872 873
	}
	handle->offset += count;
	return count;
}

int snapshot_image_loaded(struct snapshot_handle *handle)
{
	return !(!handle->pbe || handle->pbe->next || !nr_copy_pages ||
874
		handle->cur <= nr_meta_pages + nr_copy_pages);
875
}