fault_64.c 17.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 *  Copyright (C) 1995  Linus Torvalds
 *  Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>		/* For unblank_screen() */
#include <linux/compiler.h>
21
#include <linux/vmalloc.h>
L
Linus Torvalds 已提交
22
#include <linux/module.h>
23
#include <linux/kprobes.h>
24
#include <linux/uaccess.h>
25
#include <linux/kdebug.h>
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33

#include <asm/system.h>
#include <asm/pgalloc.h>
#include <asm/smp.h>
#include <asm/tlbflush.h>
#include <asm/proto.h>
#include <asm-generic/sections.h>

34 35 36 37 38 39 40 41
/*
 * Page fault error code bits
 *	bit 0 == 0 means no page found, 1 means protection fault
 *	bit 1 == 0 means read, 1 means write
 *	bit 2 == 0 means kernel, 1 means user-mode
 *	bit 3 == 1 means use of reserved bit detected
 *	bit 4 == 1 means fault was an instruction fetch
 */
42
#define PF_PROT		(1<<0)
43
#define PF_WRITE	(1<<1)
44 45
#define PF_USER		(1<<2)
#define PF_RSVD		(1<<3)
46 47
#define PF_INSTR	(1<<4)

48
static inline int notify_page_fault(struct pt_regs *regs)
49
{
50
#ifdef CONFIG_KPROBES
51 52 53 54 55 56 57 58 59
	int ret = 0;

	/* kprobe_running() needs smp_processor_id() */
	if (!user_mode(regs)) {
		preempt_disable();
		if (kprobe_running() && kprobe_fault_handler(regs, 14))
			ret = 1;
		preempt_enable();
	}
60

61 62 63 64
	return ret;
#else
	return 0;
#endif
65
}
66

67 68 69 70 71 72 73 74 75 76 77 78 79
/*
 * X86_32
 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
 * Check that here and ignore it.
 *
 * X86_64
 * Sometimes the CPU reports invalid exceptions on prefetch.
 * Check that here and ignore it.
 *
 * Opcode checker based on code by Richard Brunner
 */
static int is_prefetch(struct pt_regs *regs, unsigned long addr,
		       unsigned long error_code)
80
{
81
	unsigned char *instr;
L
Linus Torvalds 已提交
82
	int scan_more = 1;
83
	int prefetch = 0;
84
	unsigned char *max_instr;
L
Linus Torvalds 已提交
85

86 87 88 89 90 91 92 93 94 95
#ifdef CONFIG_X86_32
	if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
		     boot_cpu_data.x86 >= 6)) {
		/* Catch an obscure case of prefetch inside an NX page. */
		if (nx_enabled && (error_code & PF_INSTR))
			return 0;
	} else {
		return 0;
	}
#else
L
Linus Torvalds 已提交
96
	/* If it was a exec fault ignore */
97
	if (error_code & PF_INSTR)
L
Linus Torvalds 已提交
98
		return 0;
99 100
#endif

101
	instr = (unsigned char *)convert_ip_to_linear(current, regs);
102
	max_instr = instr + 15;
L
Linus Torvalds 已提交
103

104
	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
L
Linus Torvalds 已提交
105 106
		return 0;

107
	while (scan_more && instr < max_instr) {
L
Linus Torvalds 已提交
108 109 110 111
		unsigned char opcode;
		unsigned char instr_hi;
		unsigned char instr_lo;

112
		if (probe_kernel_address(instr, opcode))
113
			break;
L
Linus Torvalds 已提交
114

115 116
		instr_hi = opcode & 0xf0;
		instr_lo = opcode & 0x0f;
L
Linus Torvalds 已提交
117 118
		instr++;

119
		switch (instr_hi) {
L
Linus Torvalds 已提交
120 121
		case 0x20:
		case 0x30:
122 123 124 125 126 127
			/*
			 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
			 * In X86_64 long mode, the CPU will signal invalid
			 * opcode if some of these prefixes are present so
			 * X86_64 will never get here anyway
			 */
L
Linus Torvalds 已提交
128 129
			scan_more = ((instr_lo & 7) == 0x6);
			break;
130
#ifdef CONFIG_X86_64
L
Linus Torvalds 已提交
131
		case 0x40:
132 133 134 135 136 137 138
			/*
			 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
			 * Need to figure out under what instruction mode the
			 * instruction was issued. Could check the LDT for lm,
			 * but for now it's good enough to assume that long
			 * mode only uses well known segments or kernel.
			 */
139
			scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
L
Linus Torvalds 已提交
140
			break;
141
#endif
L
Linus Torvalds 已提交
142 143 144
		case 0x60:
			/* 0x64 thru 0x67 are valid prefixes in all modes. */
			scan_more = (instr_lo & 0xC) == 0x4;
145
			break;
L
Linus Torvalds 已提交
146
		case 0xF0:
147
			/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
L
Linus Torvalds 已提交
148
			scan_more = !instr_lo || (instr_lo>>1) == 1;
149
			break;
L
Linus Torvalds 已提交
150 151 152
		case 0x00:
			/* Prefetch instruction is 0x0F0D or 0x0F18 */
			scan_more = 0;
153

154
			if (probe_kernel_address(instr, opcode))
L
Linus Torvalds 已提交
155 156 157
				break;
			prefetch = (instr_lo == 0xF) &&
				(opcode == 0x0D || opcode == 0x18);
158
			break;
L
Linus Torvalds 已提交
159 160 161
		default:
			scan_more = 0;
			break;
162
		}
L
Linus Torvalds 已提交
163 164 165 166
	}
	return prefetch;
}

167 168 169 170 171 172 173 174 175 176 177 178
static void force_sig_info_fault(int si_signo, int si_code,
	unsigned long address, struct task_struct *tsk)
{
	siginfo_t info;

	info.si_signo = si_signo;
	info.si_errno = 0;
	info.si_code = si_code;
	info.si_addr = (void __user *)address;
	force_sig_info(si_signo, &info, tsk);
}

179 180
static int bad_address(void *p)
{
L
Linus Torvalds 已提交
181
	unsigned long dummy;
182
	return probe_kernel_address((unsigned long *)p, dummy);
183
}
L
Linus Torvalds 已提交
184 185 186 187 188 189 190 191

void dump_pagetable(unsigned long address)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

192
	pgd = (pgd_t *)read_cr3();
L
Linus Torvalds 已提交
193

194
	pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
L
Linus Torvalds 已提交
195 196
	pgd += pgd_index(address);
	if (bad_address(pgd)) goto bad;
197
	printk("PGD %lx ", pgd_val(*pgd));
198
	if (!pgd_present(*pgd)) goto ret;
L
Linus Torvalds 已提交
199

200
	pud = pud_offset(pgd, address);
L
Linus Torvalds 已提交
201 202 203 204 205 206 207
	if (bad_address(pud)) goto bad;
	printk("PUD %lx ", pud_val(*pud));
	if (!pud_present(*pud))	goto ret;

	pmd = pmd_offset(pud, address);
	if (bad_address(pmd)) goto bad;
	printk("PMD %lx ", pmd_val(*pmd));
208
	if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
L
Linus Torvalds 已提交
209 210 211

	pte = pte_offset_kernel(pmd, address);
	if (bad_address(pte)) goto bad;
212
	printk("PTE %lx", pte_val(*pte));
L
Linus Torvalds 已提交
213 214 215 216 217 218 219
ret:
	printk("\n");
	return;
bad:
	printk("BAD\n");
}

220
#ifdef CONFIG_X86_64
221
static const char errata93_warning[] =
L
Linus Torvalds 已提交
222 223 224 225
KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
KERN_ERR "******* Please consider a BIOS update.\n"
KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
226
#endif
L
Linus Torvalds 已提交
227 228 229

/* Workaround for K8 erratum #93 & buggy BIOS.
   BIOS SMM functions are required to use a specific workaround
230 231
   to avoid corruption of the 64bit RIP register on C stepping K8.
   A lot of BIOS that didn't get tested properly miss this.
L
Linus Torvalds 已提交
232 233
   The OS sees this as a page fault with the upper 32bits of RIP cleared.
   Try to work around it here.
234 235 236
   Note we only handle faults in kernel here.
   Does nothing for X86_32
 */
237
static int is_errata93(struct pt_regs *regs, unsigned long address)
L
Linus Torvalds 已提交
238
{
239
#ifdef CONFIG_X86_64
L
Linus Torvalds 已提交
240
	static int warned;
241
	if (address != regs->ip)
L
Linus Torvalds 已提交
242
		return 0;
243
	if ((address >> 32) != 0)
L
Linus Torvalds 已提交
244 245
		return 0;
	address |= 0xffffffffUL << 32;
246 247
	if ((address >= (u64)_stext && address <= (u64)_etext) ||
	    (address >= MODULES_VADDR && address <= MODULES_END)) {
L
Linus Torvalds 已提交
248
		if (!warned) {
249
			printk(errata93_warning);
L
Linus Torvalds 已提交
250 251
			warned = 1;
		}
252
		regs->ip = address;
L
Linus Torvalds 已提交
253 254
		return 1;
	}
255
#endif
L
Linus Torvalds 已提交
256
	return 0;
257
}
L
Linus Torvalds 已提交
258 259 260 261

static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
				 unsigned long error_code)
{
262
	unsigned long flags = oops_begin();
263
	struct task_struct *tsk;
264

L
Linus Torvalds 已提交
265 266 267
	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
	       current->comm, address);
	dump_pagetable(address);
268 269 270 271
	tsk = current;
	tsk->thread.cr2 = address;
	tsk->thread.trap_no = 14;
	tsk->thread.error_code = error_code;
272 273 274
	if (__die("Bad pagetable", regs, error_code))
		regs = NULL;
	oops_end(flags, regs, SIGKILL);
L
Linus Torvalds 已提交
275 276 277
}

/*
278
 * Handle a fault on the vmalloc area
279 280
 *
 * This assumes no large pages in there.
L
Linus Torvalds 已提交
281 282 283
 */
static int vmalloc_fault(unsigned long address)
{
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
#ifdef CONFIG_X86_32
	unsigned long pgd_paddr;
	pmd_t *pmd_k;
	pte_t *pte_k;
	/*
	 * Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 *
	 * Do _not_ use "current" here. We might be inside
	 * an interrupt in the middle of a task switch..
	 */
	pgd_paddr = read_cr3();
	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
	if (!pmd_k)
		return -1;
	pte_k = pte_offset_kernel(pmd_k, address);
	if (!pte_present(*pte_k))
		return -1;
	return 0;
#else
L
Linus Torvalds 已提交
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
	pgd_t *pgd, *pgd_ref;
	pud_t *pud, *pud_ref;
	pmd_t *pmd, *pmd_ref;
	pte_t *pte, *pte_ref;

	/* Copy kernel mappings over when needed. This can also
	   happen within a race in page table update. In the later
	   case just flush. */

	pgd = pgd_offset(current->mm ?: &init_mm, address);
	pgd_ref = pgd_offset_k(address);
	if (pgd_none(*pgd_ref))
		return -1;
	if (pgd_none(*pgd))
		set_pgd(pgd, *pgd_ref);
319
	else
320
		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
L
Linus Torvalds 已提交
321 322 323 324 325 326 327 328

	/* Below here mismatches are bugs because these lower tables
	   are shared */

	pud = pud_offset(pgd, address);
	pud_ref = pud_offset(pgd_ref, address);
	if (pud_none(*pud_ref))
		return -1;
329
	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
L
Linus Torvalds 已提交
330 331 332 333 334 335 336 337 338 339 340
		BUG();
	pmd = pmd_offset(pud, address);
	pmd_ref = pmd_offset(pud_ref, address);
	if (pmd_none(*pmd_ref))
		return -1;
	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
		BUG();
	pte_ref = pte_offset_kernel(pmd_ref, address);
	if (!pte_present(*pte_ref))
		return -1;
	pte = pte_offset_kernel(pmd, address);
341 342 343 344
	/* Don't use pte_page here, because the mappings can point
	   outside mem_map, and the NUMA hash lookup cannot handle
	   that. */
	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
L
Linus Torvalds 已提交
345 346
		BUG();
	return 0;
347
#endif
L
Linus Torvalds 已提交
348 349
}

350
int show_unhandled_signals = 1;
L
Linus Torvalds 已提交
351 352 353 354 355 356

/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
357 358
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
					unsigned long error_code)
L
Linus Torvalds 已提交
359 360 361
{
	struct task_struct *tsk;
	struct mm_struct *mm;
362
	struct vm_area_struct *vma;
L
Linus Torvalds 已提交
363
	unsigned long address;
N
Nick Piggin 已提交
364
	int write, fault;
365
	unsigned long flags;
366
	int si_code;
L
Linus Torvalds 已提交
367

P
Peter Zijlstra 已提交
368 369 370 371 372
	/*
	 * We can fault from pretty much anywhere, with unknown IRQ state.
	 */
	trace_hardirqs_fixup();

373 374 375 376
	tsk = current;
	mm = tsk->mm;
	prefetchw(&mm->mmap_sem);

L
Linus Torvalds 已提交
377
	/* get the address */
378
	address = read_cr2();
L
Linus Torvalds 已提交
379

380
	si_code = SEGV_MAPERR;
L
Linus Torvalds 已提交
381

382 383
	if (notify_page_fault(regs))
		return;
L
Linus Torvalds 已提交
384 385 386 387 388 389 390 391 392 393 394 395

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * This verifies that the fault happens in kernel space
	 * (error_code & 4) == 0, and that the fault was not a
396
	 * protection error (error_code & 9) == 0.
L
Linus Torvalds 已提交
397
	 */
398
	if (unlikely(address >= TASK_SIZE64)) {
399 400 401 402 403
		/*
		 * Don't check for the module range here: its PML4
		 * is always initialized because it's shared with the main
		 * kernel text. Only vmalloc may need PML4 syncups.
		 */
404
		if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
405
		      ((address >= VMALLOC_START && address < VMALLOC_END))) {
406 407
			if (vmalloc_fault(address) >= 0)
				return;
L
Linus Torvalds 已提交
408 409 410 411 412 413 414 415
		}
		/*
		 * Don't take the mm semaphore here. If we fixup a prefetch
		 * fault we could otherwise deadlock.
		 */
		goto bad_area_nosemaphore;
	}

416
	if (likely(regs->flags & X86_EFLAGS_IF))
417 418
		local_irq_enable();

419
	if (unlikely(error_code & PF_RSVD))
L
Linus Torvalds 已提交
420 421 422
		pgtable_bad(address, regs, error_code);

	/*
423 424
	 * If we're in an interrupt, have no user context or are running in an
	 * atomic region then we must not take the fault.
L
Linus Torvalds 已提交
425 426 427 428
	 */
	if (unlikely(in_atomic() || !mm))
		goto bad_area_nosemaphore;

429 430 431 432 433 434 435
	/*
	 * User-mode registers count as a user access even for any
	 * potential system fault or CPU buglet.
	 */
	if (user_mode_vm(regs))
		error_code |= PF_USER;

L
Linus Torvalds 已提交
436 437 438
 again:
	/* When running in the kernel we expect faults to occur only to
	 * addresses in user space.  All other faults represent errors in the
S
Simon Arlott 已提交
439
	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
A
Adrian Bunk 已提交
440
	 * erroneous fault occurring in a code path which already holds mmap_sem
L
Linus Torvalds 已提交
441 442 443 444 445 446
	 * we will deadlock attempting to validate the fault against the
	 * address space.  Luckily the kernel only validly references user
	 * space from well defined areas of code, which are listed in the
	 * exceptions table.
	 *
	 * As the vast majority of faults will be valid we will only perform
S
Simon Arlott 已提交
447
	 * the source reference check when there is a possibility of a deadlock.
L
Linus Torvalds 已提交
448 449 450 451 452
	 * Attempt to lock the address space, if we cannot we then validate the
	 * source.  If this is invalid we can skip the address space check,
	 * thus avoiding the deadlock.
	 */
	if (!down_read_trylock(&mm->mmap_sem)) {
453
		if ((error_code & PF_USER) == 0 &&
454
		    !search_exception_tables(regs->ip))
L
Linus Torvalds 已提交
455 456 457 458 459 460 461 462 463 464 465
			goto bad_area_nosemaphore;
		down_read(&mm->mmap_sem);
	}

	vma = find_vma(mm, address);
	if (!vma)
		goto bad_area;
	if (likely(vma->vm_start <= address))
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
466
	if (error_code & PF_USER) {
467 468 469 470 471
		/*
		 * Accessing the stack below %sp is always a bug.
		 * The large cushion allows instructions like enter
		 * and pusha to work.  ("enter $65535,$31" pushes
		 * 32 pointers and then decrements %sp by 65535.)
472
		 */
473
		if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
L
Linus Torvalds 已提交
474 475 476 477 478 479 480 481 482
			goto bad_area;
	}
	if (expand_stack(vma, address))
		goto bad_area;
/*
 * Ok, we have a good vm_area for this memory access, so
 * we can handle it..
 */
good_area:
483
	si_code = SEGV_ACCERR;
L
Linus Torvalds 已提交
484
	write = 0;
485
	switch (error_code & (PF_PROT|PF_WRITE)) {
486 487 488 489 490 491 492 493 494 495 496
	default:	/* 3: write, present */
		/* fall through */
	case PF_WRITE:		/* write, not present */
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
		write++;
		break;
	case PF_PROT:		/* read, present */
		goto bad_area;
	case 0:			/* read, not present */
		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
L
Linus Torvalds 已提交
497 498 499 500 501 502 503 504
			goto bad_area;
	}

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
N
Nick Piggin 已提交
505 506 507 508 509 510 511
	fault = handle_mm_fault(mm, vma, address, write);
	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
		else if (fault & VM_FAULT_SIGBUS)
			goto do_sigbus;
		BUG();
L
Linus Torvalds 已提交
512
	}
N
Nick Piggin 已提交
513 514 515 516
	if (fault & VM_FAULT_MAJOR)
		tsk->maj_flt++;
	else
		tsk->min_flt++;
L
Linus Torvalds 已提交
517 518 519 520 521 522 523 524 525 526 527 528
	up_read(&mm->mmap_sem);
	return;

/*
 * Something tried to access memory that isn't in our memory map..
 * Fix it, but check if it's kernel or user first..
 */
bad_area:
	up_read(&mm->mmap_sem);

bad_area_nosemaphore:
	/* User mode accesses just cause a SIGSEGV */
529
	if (error_code & PF_USER) {
530 531 532 533 534 535

		/*
		 * It's possible to have interrupts off here.
		 */
		local_irq_enable();

L
Linus Torvalds 已提交
536 537 538 539 540 541 542 543 544 545 546 547 548
		if (is_prefetch(regs, address, error_code))
			return;

		/* Work around K8 erratum #100 K8 in compat mode
		   occasionally jumps to illegal addresses >4GB.  We
		   catch this here in the page fault handler because
		   these addresses are not reachable. Just detect this
		   case and return.  Any code segment in LDT is
		   compatibility mode. */
		if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
		    (address >> 32))
			return;

549 550
		if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
		    printk_ratelimit()) {
L
Linus Torvalds 已提交
551
			printk(
552
#ifdef CONFIG_X86_32
553
			"%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx",
554
#else
555
			"%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx",
556 557 558 559
#endif
			task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
			tsk->comm, task_pid_nr(tsk), address, regs->ip,
			regs->sp, error_code);
560 561
			print_vma_addr(" in ", regs->ip);
			printk("\n");
L
Linus Torvalds 已提交
562
		}
563

L
Linus Torvalds 已提交
564 565 566 567
		tsk->thread.cr2 = address;
		/* Kernel addresses are always protection faults */
		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
		tsk->thread.trap_no = 14;
568 569

		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
L
Linus Torvalds 已提交
570 571 572 573 574
		return;
	}

no_context:
	/* Are we prepared to handle this kernel fault?  */
575
	if (fixup_exception(regs))
L
Linus Torvalds 已提交
576 577
		return;

578
	/*
L
Linus Torvalds 已提交
579 580 581
	 * Hall of shame of CPU/BIOS bugs.
	 */

582 583
	if (is_prefetch(regs, address, error_code))
		return;
L
Linus Torvalds 已提交
584 585

	if (is_errata93(regs, address))
586
		return;
L
Linus Torvalds 已提交
587 588 589 590 591 592

/*
 * Oops. The kernel tried to access some bad page. We'll have to
 * terminate things with extreme prejudice.
 */

593
	flags = oops_begin();
L
Linus Torvalds 已提交
594 595 596 597 598

	if (address < PAGE_SIZE)
		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
	else
		printk(KERN_ALERT "Unable to handle kernel paging request");
599
	printk(" at %016lx RIP: \n" KERN_ALERT, address);
600
	printk_address(regs->ip, regs->bp);
L
Linus Torvalds 已提交
601
	dump_pagetable(address);
602 603 604
	tsk->thread.cr2 = address;
	tsk->thread.trap_no = 14;
	tsk->thread.error_code = error_code;
605 606
	if (__die("Oops", regs, error_code))
		regs = NULL;
L
Linus Torvalds 已提交
607 608
	/* Executive summary in case the body of the oops scrolled away */
	printk(KERN_EMERG "CR2: %016lx\n", address);
609
	oops_end(flags, regs, SIGKILL);
L
Linus Torvalds 已提交
610 611 612 613 614 615 616

/*
 * We ran out of memory, or some other thing happened to us that made
 * us unable to handle the page fault gracefully.
 */
out_of_memory:
	up_read(&mm->mmap_sem);
617
	if (is_global_init(current)) {
L
Linus Torvalds 已提交
618 619 620 621
		yield();
		goto again;
	}
	printk("VM: killing process %s\n", tsk->comm);
622
	if (error_code & PF_USER)
623
		do_group_exit(SIGKILL);
L
Linus Torvalds 已提交
624 625 626 627 628 629
	goto no_context;

do_sigbus:
	up_read(&mm->mmap_sem);

	/* Kernel mode? Handle exceptions or die */
630
	if (!(error_code & PF_USER))
L
Linus Torvalds 已提交
631 632 633 634 635
		goto no_context;

	tsk->thread.cr2 = address;
	tsk->thread.error_code = error_code;
	tsk->thread.trap_no = 14;
636
	force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
L
Linus Torvalds 已提交
637 638
	return;
}
639

640
DEFINE_SPINLOCK(pgd_lock);
641
LIST_HEAD(pgd_list);
642 643 644

void vmalloc_sync_all(void)
{
645 646 647 648 649 650
	/*
	 * Note that races in the updates of insync and start aren't
	 * problematic: insync can only get set bits added, and updates to
	 * start are only improving performance (without affecting correctness
	 * if undone).
	 */
651 652 653 654 655 656 657 658 659 660 661 662
	static DECLARE_BITMAP(insync, PTRS_PER_PGD);
	static unsigned long start = VMALLOC_START & PGDIR_MASK;
	unsigned long address;

	for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
		if (!test_bit(pgd_index(address), insync)) {
			const pgd_t *pgd_ref = pgd_offset_k(address);
			struct page *page;

			if (pgd_none(*pgd_ref))
				continue;
			spin_lock(&pgd_lock);
663
			list_for_each_entry(page, &pgd_list, lru) {
664 665 666 667 668
				pgd_t *pgd;
				pgd = (pgd_t *)page_address(page) + pgd_index(address);
				if (pgd_none(*pgd))
					set_pgd(pgd, *pgd_ref);
				else
669
					BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
670 671 672 673 674 675 676 677 678
			}
			spin_unlock(&pgd_lock);
			set_bit(pgd_index(address), insync);
		}
		if (address == start)
			start = address + PGDIR_SIZE;
	}
	/* Check that there is no need to do the same for the modules area. */
	BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
679
	BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
680 681
				(__START_KERNEL & PGDIR_MASK)));
}