fault.c 7.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 1995 - 2000 by Ralf Baechle
 */
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/module.h>
D
David Daney 已提交
20
#include <linux/kprobes.h>
21
#include <linux/perf_event.h>
L
Linus Torvalds 已提交
22 23 24 25 26

#include <asm/branch.h>
#include <asm/mmu_context.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
27
#include <asm/highmem.h>		/* For VMALLOC_END */
D
David Daney 已提交
28
#include <linux/kdebug.h>
L
Linus Torvalds 已提交
29 30 31 32 33 34

/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
D
David Daney 已提交
35
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long write,
L
Linus Torvalds 已提交
36 37 38 39 40 41 42
			      unsigned long address)
{
	struct vm_area_struct * vma = NULL;
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;
	const int field = sizeof(unsigned long) * 2;
	siginfo_t info;
N
Nick Piggin 已提交
43
	int fault;
44 45
	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
						 (write ? FAULT_FLAG_WRITE : 0);
L
Linus Torvalds 已提交
46 47

#if 0
48
	printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
L
Linus Torvalds 已提交
49 50 51 52
	       current->comm, current->pid, field, address, write,
	       field, regs->cp0_epc);
#endif

D
David Daney 已提交
53 54 55 56 57 58 59 60 61 62 63
#ifdef CONFIG_KPROBES
	/*
	 * This is to notify the fault handler of the kprobes.  The
	 * exception code is redundant as it is also carried in REGS,
	 * but we pass it anyhow.
	 */
	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
		       (regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP)
		return;
#endif

L
Linus Torvalds 已提交
64 65 66 67 68 69 70 71 72 73 74
	info.si_code = SEGV_MAPERR;

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 */
75 76 77 78 79 80
#ifdef CONFIG_64BIT
# define VMALLOC_FAULT_TARGET no_context
#else
# define VMALLOC_FAULT_TARGET vmalloc_fault
#endif

81
	if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
82
		goto VMALLOC_FAULT_TARGET;
83 84
#ifdef MODULE_START
	if (unlikely(address >= MODULE_START && address < MODULE_END))
85
		goto VMALLOC_FAULT_TARGET;
86
#endif
L
Linus Torvalds 已提交
87 88 89 90 91 92 93 94

	/*
	 * If we're in an interrupt or have no user
	 * context, we must not take the fault..
	 */
	if (in_atomic() || !mm)
		goto bad_area_nosemaphore;

95
retry:
L
Linus Torvalds 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
	down_read(&mm->mmap_sem);
	vma = find_vma(mm, address);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= address)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (expand_stack(vma, address))
		goto bad_area;
/*
 * Ok, we have a good vm_area for this memory access, so
 * we can handle it..
 */
good_area:
	info.si_code = SEGV_ACCERR;

	if (write) {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
	} else {
117
		if (cpu_has_rixi) {
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
			if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
#if 0
				pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
					  raw_smp_processor_id(),
					  current->comm, current->pid,
					  field, address, write,
					  field, regs->cp0_epc);
#endif
				goto bad_area;
			}
			if (!(vma->vm_flags & VM_READ)) {
#if 0
				pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
					  raw_smp_processor_id(),
					  current->comm, current->pid,
					  field, address, write,
					  field, regs->cp0_epc);
#endif
				goto bad_area;
			}
		} else {
			if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
				goto bad_area;
		}
L
Linus Torvalds 已提交
142 143 144 145 146 147 148
	}

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
149 150 151 152 153
	fault = handle_mm_fault(mm, vma, address, flags);

	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
		return;

154
	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
N
Nick Piggin 已提交
155 156 157 158 159
	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
		else if (fault & VM_FAULT_SIGBUS)
			goto do_sigbus;
L
Linus Torvalds 已提交
160 161
		BUG();
	}
162 163 164 165 166 167 168 169 170 171 172 173
	if (flags & FAULT_FLAG_ALLOW_RETRY) {
		if (fault & VM_FAULT_MAJOR) {
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
						  regs, address);
			tsk->maj_flt++;
		} else {
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
						  regs, address);
			tsk->min_flt++;
		}
		if (fault & VM_FAULT_RETRY) {
			flags &= ~FAULT_FLAG_ALLOW_RETRY;
174
			flags |= FAULT_FLAG_TRIED;
175 176 177 178 179 180 181 182 183

			/*
			 * No need to up_read(&mm->mmap_sem) as we would
			 * have already released it in __lock_page_or_retry
			 * in mm/filemap.c.
			 */

			goto retry;
		}
184
	}
L
Linus Torvalds 已提交
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212

	up_read(&mm->mmap_sem);
	return;

/*
 * Something tried to access memory that isn't in our memory map..
 * Fix it, but check if it's kernel or user first..
 */
bad_area:
	up_read(&mm->mmap_sem);

bad_area_nosemaphore:
	/* User mode accesses just cause a SIGSEGV */
	if (user_mode(regs)) {
		tsk->thread.cp0_badvaddr = address;
		tsk->thread.error_code = write;
#if 0
		printk("do_page_fault() #2: sending SIGSEGV to %s for "
		       "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
		       tsk->comm,
		       write ? "write access to" : "read access from",
		       field, address,
		       field, (unsigned long) regs->cp0_epc,
		       field, (unsigned long) regs->regs[31]);
#endif
		info.si_signo = SIGSEGV;
		info.si_errno = 0;
		/* info.si_code has been set above */
R
Ralf Baechle 已提交
213
		info.si_addr = (void __user *) address;
L
Linus Torvalds 已提交
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
		force_sig_info(SIGSEGV, &info, tsk);
		return;
	}

no_context:
	/* Are we prepared to handle this kernel fault?  */
	if (fixup_exception(regs)) {
		current->thread.cp0_baduaddr = address;
		return;
	}

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
	 * terminate things with extreme prejudice.
	 */
	bust_spinlocks(1);

	printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
	       "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
233
	       raw_smp_processor_id(), field, address, field, regs->cp0_epc,
L
Linus Torvalds 已提交
234 235 236 237
	       field,  regs->regs[31]);
	die("Oops", regs);

out_of_memory:
238 239 240 241
	/*
	 * We ran out of memory, call the OOM killer, and return the userspace
	 * (which will retry the fault, or kill us if we got oom-killed).
	 */
242
	up_read(&mm->mmap_sem);
243 244
	pagefault_out_of_memory();
	return;
L
Linus Torvalds 已提交
245 246 247 248 249 250 251

do_sigbus:
	up_read(&mm->mmap_sem);

	/* Kernel mode? Handle exceptions or die */
	if (!user_mode(regs))
		goto no_context;
252
	else
L
Linus Torvalds 已提交
253 254 255 256
	/*
	 * Send a sigbus, regardless of whether we were in kernel
	 * or user mode.
	 */
257 258 259 260 261 262 263 264 265
#if 0
		printk("do_page_fault() #3: sending SIGBUS to %s for "
		       "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
		       tsk->comm,
		       write ? "write access to" : "read access from",
		       field, address,
		       field, (unsigned long) regs->cp0_epc,
		       field, (unsigned long) regs->regs[31]);
#endif
L
Linus Torvalds 已提交
266 267 268 269
	tsk->thread.cp0_badvaddr = address;
	info.si_signo = SIGBUS;
	info.si_errno = 0;
	info.si_code = BUS_ADRERR;
R
Ralf Baechle 已提交
270
	info.si_addr = (void __user *) address;
L
Linus Torvalds 已提交
271 272 273
	force_sig_info(SIGBUS, &info, tsk);

	return;
274
#ifndef CONFIG_64BIT
L
Linus Torvalds 已提交
275 276 277 278 279 280 281 282 283 284 285
vmalloc_fault:
	{
		/*
		 * Synchronize this task's top level page-table
		 * with the 'reference' page table.
		 *
		 * Do _not_ use "tsk" here. We might be inside
		 * an interrupt in the middle of a task switch..
		 */
		int offset = __pgd_offset(address);
		pgd_t *pgd, *pgd_k;
286
		pud_t *pud, *pud_k;
L
Linus Torvalds 已提交
287 288 289
		pmd_t *pmd, *pmd_k;
		pte_t *pte_k;

290
		pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
L
Linus Torvalds 已提交
291 292 293 294 295 296
		pgd_k = init_mm.pgd + offset;

		if (!pgd_present(*pgd_k))
			goto no_context;
		set_pgd(pgd, *pgd_k);

297 298 299 300 301 302 303
		pud = pud_offset(pgd, address);
		pud_k = pud_offset(pgd_k, address);
		if (!pud_present(*pud_k))
			goto no_context;

		pmd = pmd_offset(pud, address);
		pmd_k = pmd_offset(pud_k, address);
L
Linus Torvalds 已提交
304 305 306 307 308 309 310 311 312
		if (!pmd_present(*pmd_k))
			goto no_context;
		set_pmd(pmd, *pmd_k);

		pte_k = pte_offset_kernel(pmd_k, address);
		if (!pte_present(*pte_k))
			goto no_context;
		return;
	}
313
#endif
L
Linus Torvalds 已提交
314
}