machine_kexec_64.c 6.3 KB
Newer Older
1
/*
D
Dave Jones 已提交
2
 * handle transition of Linux booting another kernel
3 4 5 6 7 8 9 10 11 12
 * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
 *
 * This source code is licensed under the GNU General Public License,
 * Version 2.  See the file COPYING for more details.
 */

#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/string.h>
#include <linux/reboot.h>
K
Ken'ichi Ohmichi 已提交
13
#include <linux/numa.h>
I
Ingo Molnar 已提交
14 15
#include <linux/ftrace.h>

16 17 18 19
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
20 21

static void init_level2_page(pmd_t *level2p, unsigned long addr)
22 23
{
	unsigned long end_addr;
M
Maneesh Soni 已提交
24

25
	addr &= PAGE_MASK;
26
	end_addr = addr + PUD_SIZE;
M
Maneesh Soni 已提交
27
	while (addr < end_addr) {
28 29
		set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
		addr += PMD_SIZE;
30 31 32
	}
}

33
static int init_level3_page(struct kimage *image, pud_t *level3p,
M
Maneesh Soni 已提交
34
				unsigned long addr, unsigned long last_addr)
35 36 37
{
	unsigned long end_addr;
	int result;
M
Maneesh Soni 已提交
38

39 40
	result = 0;
	addr &= PAGE_MASK;
41
	end_addr = addr + PGDIR_SIZE;
M
Maneesh Soni 已提交
42
	while ((addr < last_addr) && (addr < end_addr)) {
43
		struct page *page;
44
		pmd_t *level2p;
M
Maneesh Soni 已提交
45

46 47 48 49 50
		page = kimage_alloc_control_pages(image, 0);
		if (!page) {
			result = -ENOMEM;
			goto out;
		}
51
		level2p = (pmd_t *)page_address(page);
52
		init_level2_page(level2p, addr);
53 54
		set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
		addr += PUD_SIZE;
55 56
	}
	/* clear the unused entries */
M
Maneesh Soni 已提交
57
	while (addr < end_addr) {
58 59
		pud_clear(level3p++);
		addr += PUD_SIZE;
60 61 62 63 64 65
	}
out:
	return result;
}


66
static int init_level4_page(struct kimage *image, pgd_t *level4p,
M
Maneesh Soni 已提交
67
				unsigned long addr, unsigned long last_addr)
68 69 70
{
	unsigned long end_addr;
	int result;
M
Maneesh Soni 已提交
71

72 73
	result = 0;
	addr &= PAGE_MASK;
74
	end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
M
Maneesh Soni 已提交
75
	while ((addr < last_addr) && (addr < end_addr)) {
76
		struct page *page;
77
		pud_t *level3p;
M
Maneesh Soni 已提交
78

79 80 81 82 83
		page = kimage_alloc_control_pages(image, 0);
		if (!page) {
			result = -ENOMEM;
			goto out;
		}
84
		level3p = (pud_t *)page_address(page);
85 86 87 88
		result = init_level3_page(image, level3p, addr, last_addr);
		if (result) {
			goto out;
		}
89 90
		set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
		addr += PGDIR_SIZE;
91 92
	}
	/* clear the unused entries */
M
Maneesh Soni 已提交
93
	while (addr < end_addr) {
94 95
		pgd_clear(level4p++);
		addr += PGDIR_SIZE;
96
	}
M
Maneesh Soni 已提交
97
out:
98 99 100
	return result;
}

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
static void free_transition_pgtable(struct kimage *image)
{
	free_page((unsigned long)image->arch.pud);
	free_page((unsigned long)image->arch.pmd);
	free_page((unsigned long)image->arch.pte);
}

static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
{
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long vaddr, paddr;
	int result = -ENOMEM;

	vaddr = (unsigned long)relocate_kernel;
	paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
	pgd += pgd_index(vaddr);
	if (!pgd_present(*pgd)) {
		pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
		if (!pud)
			goto err;
		image->arch.pud = pud;
		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
	}
	pud = pud_offset(pgd, vaddr);
	if (!pud_present(*pud)) {
		pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
		if (!pmd)
			goto err;
		image->arch.pmd = pmd;
		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
	}
	pmd = pmd_offset(pud, vaddr);
	if (!pmd_present(*pmd)) {
		pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
		if (!pte)
			goto err;
		image->arch.pte = pte;
		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
	}
	pte = pte_offset_kernel(pmd, vaddr);
	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
	return 0;
err:
	free_transition_pgtable(image);
	return result;
}

150 151 152

static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
{
153
	pgd_t *level4p;
154
	int result;
155
	level4p = (pgd_t *)__va(start_pgtable);
156 157 158 159
	result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
	if (result)
		return result;
	return init_transition_pgtable(image, level4p);
160 161 162 163
}

static void set_idt(void *newidt, u16 limit)
{
164
	struct desc_ptr curidt;
165 166

	/* x86-64 supports unaliged loads & stores */
167 168
	curidt.size    = limit;
	curidt.address = (unsigned long)newidt;
169 170

	__asm__ __volatile__ (
171 172
		"lidtq %0\n"
		: : "m" (curidt)
173 174 175 176 177 178
		);
};


static void set_gdt(void *newgdt, u16 limit)
{
179
	struct desc_ptr curgdt;
180 181

	/* x86-64 supports unaligned loads & stores */
182 183
	curgdt.size    = limit;
	curgdt.address = (unsigned long)newgdt;
184 185

	__asm__ __volatile__ (
186 187
		"lgdtq %0\n"
		: : "m" (curgdt)
188 189 190 191 192 193
		);
};

static void load_segments(void)
{
	__asm__ __volatile__ (
194 195 196 197 198
		"\tmovl %0,%%ds\n"
		"\tmovl %0,%%es\n"
		"\tmovl %0,%%ss\n"
		"\tmovl %0,%%fs\n"
		"\tmovl %0,%%gs\n"
M
Michael Matz 已提交
199
		: : "a" (__KERNEL_DS) : "memory"
200 201 202 203 204
		);
}

int machine_kexec_prepare(struct kimage *image)
{
205
	unsigned long start_pgtable;
206 207 208
	int result;

	/* Calculate the offsets */
M
Maneesh Soni 已提交
209
	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
210 211 212

	/* Setup the identity mapped 64bit page table */
	result = init_pgtable(image, start_pgtable);
M
Maneesh Soni 已提交
213
	if (result)
214 215 216 217 218 219 220
		return result;

	return 0;
}

void machine_kexec_cleanup(struct kimage *image)
{
221
	free_transition_pgtable(image);
222 223 224 225 226 227
}

/*
 * Do not allocate memory (or fail in any way) in machine_kexec().
 * We are past the point of no return, committed to rebooting now.
 */
H
Huang Ying 已提交
228
void machine_kexec(struct kimage *image)
229
{
230 231
	unsigned long page_list[PAGES_NR];
	void *control_page;
232

I
Ingo Molnar 已提交
233 234
	tracer_disable();

235 236 237
	/* Interrupts aren't acceptable while we reboot */
	local_irq_disable();

238 239 240
	control_page = page_address(image->control_code_page) + PAGE_SIZE;
	memcpy(control_page, relocate_kernel, PAGE_SIZE);

241
	page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
242 243
	page_list[PA_TABLE_PAGE] =
	  (unsigned long)__pa(page_address(image->control_code_page));
244

245 246 247 248 249
	/* The segment registers are funny things, they have both a
	 * visible and an invisible part.  Whenever the visible part is
	 * set to a specific selector, the invisible part is loaded
	 * with from a table in memory.  At no other time is the
	 * descriptor table in memory accessed.
250 251 252 253 254 255 256 257 258 259
	 *
	 * I take advantage of this here by force loading the
	 * segments, before I zap the gdt with an invalid value.
	 */
	load_segments();
	/* The gdt & idt are now invalid.
	 * If you want to load them you must set up your own idt & gdt.
	 */
	set_gdt(phys_to_virt(0),0);
	set_idt(phys_to_virt(0),0);
260

261
	/* now call it */
262 263
	relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
			image->start);
264
}
265

K
Ken'ichi Ohmichi 已提交
266 267
void arch_crash_save_vmcoreinfo(void)
{
268
	VMCOREINFO_SYMBOL(phys_base);
269
	VMCOREINFO_SYMBOL(init_level4_pgt);
270 271 272 273 274

#ifdef CONFIG_NUMA
	VMCOREINFO_SYMBOL(node_data);
	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
K
Ken'ichi Ohmichi 已提交
275 276
}