hibernate_32.c 3.9 KB
Newer Older
1
/*
2
 * Hibernation support specific for i386 - temporary page tables
3 4 5 6 7 8
 *
 * Distribute under GPLv2
 *
 * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl>
 */

9
#include <linux/gfp.h>
10 11 12 13 14
#include <linux/suspend.h>
#include <linux/bootmem.h>

#include <asm/page.h>
#include <asm/pgtable.h>
15
#include <asm/mmzone.h>
16
#include <asm/sections.h>
17

18
/* Defined in hibernate_asm_32.S */
19 20 21 22 23 24
extern int restore_image(void);

/* Pointer to the temporary resume page tables */
pgd_t *resume_pg_dir;

/* The following three functions are based on the analogous code in
25
 * arch/x86/mm/init_32.c
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
 */

/*
 * Create a middle page table on a resume-safe page and put a pointer to it in
 * the given global directory entry.  This only returns the gd entry
 * in non-PAE compilation mode, since the middle layer is folded.
 */
static pmd_t *resume_one_md_table_init(pgd_t *pgd)
{
	pud_t *pud;
	pmd_t *pmd_table;

#ifdef CONFIG_X86_PAE
	pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
	if (!pmd_table)
		return NULL;

	set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
	pud = pud_offset(pgd, 0);

	BUG_ON(pmd_table != pmd_offset(pud, 0));
#else
	pud = pud_offset(pgd, 0);
	pmd_table = pmd_offset(pud, 0);
#endif

	return pmd_table;
}

/*
 * Create a page table on a resume-safe page and place a pointer to it in
 * a middle page directory entry.
 */
static pte_t *resume_one_page_table_init(pmd_t *pmd)
{
	if (pmd_none(*pmd)) {
		pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
		if (!page_table)
			return NULL;

		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));

		BUG_ON(page_table != pte_offset_kernel(pmd, 0));

		return page_table;
	}

	return pte_offset_kernel(pmd, 0);
}

/*
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET.  The page tables are allocated out of resume-safe pages.
 */
static int resume_physical_mapping_init(pgd_t *pgd_base)
{
	unsigned long pfn;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;
	int pgd_idx, pmd_idx;

	pgd_idx = pgd_index(PAGE_OFFSET);
	pgd = pgd_base + pgd_idx;
	pfn = 0;

	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
		pmd = resume_one_md_table_init(pgd);
		if (!pmd)
			return -ENOMEM;

		if (pfn >= max_low_pfn)
			continue;

		for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
			if (pfn >= max_low_pfn)
				break;

			/* Map with big pages if possible, otherwise create
			 * normal page tables.
			 * NOTE: We can mark everything as executable here
			 */
109
			if (boot_cpu_has(X86_FEATURE_PSE)) {
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
				set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
				pfn += PTRS_PER_PTE;
			} else {
				pte_t *max_pte;

				pte = resume_one_page_table_init(pmd);
				if (!pte)
					return -ENOMEM;

				max_pte = pte + PTRS_PER_PTE;
				for (; pte < max_pte; pte++, pfn++) {
					if (pfn >= max_low_pfn)
						break;

					set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
				}
			}
		}
	}
129

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
	return 0;
}

static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
{
#ifdef CONFIG_X86_PAE
	int i;

	/* Init entries of the first-level page table to the zero page */
	for (i = 0; i < PTRS_PER_PGD; i++)
		set_pgd(pg_dir + i,
			__pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
#endif
}

int swsusp_arch_resume(void)
{
	int error;

	resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
	if (!resume_pg_dir)
		return -ENOMEM;

	resume_init_first_level_page_table(resume_pg_dir);
	error = resume_physical_mapping_init(resume_pg_dir);
	if (error)
		return error;

	/* We have got enough memory and from now on we cannot recover */
	restore_image();
	return 0;
}
162 163 164 165 166 167 168 169 170 171 172

/*
 *	pfn_is_nosave - check if given pfn is in the 'nosave' section
 */

int pfn_is_nosave(unsigned long pfn)
{
	unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
	unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
	return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}