fault.c 8.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 1995 - 2000 by Ralf Baechle
 */
8
#include <linux/context_tracking.h>
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18 19 20
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/module.h>
D
David Daney 已提交
21
#include <linux/kprobes.h>
22
#include <linux/perf_event.h>
L
Linus Torvalds 已提交
23 24 25 26 27

#include <asm/branch.h>
#include <asm/mmu_context.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
28
#include <asm/highmem.h>		/* For VMALLOC_END */
D
David Daney 已提交
29
#include <linux/kdebug.h>
L
Linus Torvalds 已提交
30 31 32 33 34 35

/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
36 37
static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
	unsigned long address)
L
Linus Torvalds 已提交
38 39 40 41 42 43
{
	struct vm_area_struct * vma = NULL;
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;
	const int field = sizeof(unsigned long) * 2;
	siginfo_t info;
N
Nick Piggin 已提交
44
	int fault;
45
	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
L
Linus Torvalds 已提交
46 47

#if 0
48
	printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
L
Linus Torvalds 已提交
49 50 51 52
	       current->comm, current->pid, field, address, write,
	       field, regs->cp0_epc);
#endif

D
David Daney 已提交
53 54
#ifdef CONFIG_KPROBES
	/*
R
Ralf Baechle 已提交
55
	 * This is to notify the fault handler of the kprobes.	The
D
David Daney 已提交
56 57 58 59 60 61 62 63
	 * exception code is redundant as it is also carried in REGS,
	 * but we pass it anyhow.
	 */
	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
		       (regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP)
		return;
#endif

L
Linus Torvalds 已提交
64 65 66 67 68 69 70 71 72 73 74
	info.si_code = SEGV_MAPERR;

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 */
75 76 77 78 79 80
#ifdef CONFIG_64BIT
# define VMALLOC_FAULT_TARGET no_context
#else
# define VMALLOC_FAULT_TARGET vmalloc_fault
#endif

81
	if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
82
		goto VMALLOC_FAULT_TARGET;
83 84
#ifdef MODULE_START
	if (unlikely(address >= MODULE_START && address < MODULE_END))
85
		goto VMALLOC_FAULT_TARGET;
86
#endif
L
Linus Torvalds 已提交
87 88 89 90 91 92 93 94

	/*
	 * If we're in an interrupt or have no user
	 * context, we must not take the fault..
	 */
	if (in_atomic() || !mm)
		goto bad_area_nosemaphore;

95 96
	if (user_mode(regs))
		flags |= FAULT_FLAG_USER;
97
retry:
L
Linus Torvalds 已提交
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
	down_read(&mm->mmap_sem);
	vma = find_vma(mm, address);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= address)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (expand_stack(vma, address))
		goto bad_area;
/*
 * Ok, we have a good vm_area for this memory access, so
 * we can handle it..
 */
good_area:
	info.si_code = SEGV_ACCERR;

	if (write) {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
118
		flags |= FAULT_FLAG_WRITE;
L
Linus Torvalds 已提交
119
	} else {
120
		if (cpu_has_rixi) {
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
			if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
#if 0
				pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
					  raw_smp_processor_id(),
					  current->comm, current->pid,
					  field, address, write,
					  field, regs->cp0_epc);
#endif
				goto bad_area;
			}
			if (!(vma->vm_flags & VM_READ)) {
#if 0
				pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
					  raw_smp_processor_id(),
					  current->comm, current->pid,
					  field, address, write,
					  field, regs->cp0_epc);
#endif
				goto bad_area;
			}
		} else {
			if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
				goto bad_area;
		}
L
Linus Torvalds 已提交
145 146 147 148 149 150 151
	}

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
152 153 154 155 156
	fault = handle_mm_fault(mm, vma, address, flags);

	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
		return;

157
	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
N
Nick Piggin 已提交
158 159 160 161 162
	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
		else if (fault & VM_FAULT_SIGBUS)
			goto do_sigbus;
L
Linus Torvalds 已提交
163 164
		BUG();
	}
165 166 167 168 169 170 171 172 173 174 175 176
	if (flags & FAULT_FLAG_ALLOW_RETRY) {
		if (fault & VM_FAULT_MAJOR) {
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
						  regs, address);
			tsk->maj_flt++;
		} else {
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
						  regs, address);
			tsk->min_flt++;
		}
		if (fault & VM_FAULT_RETRY) {
			flags &= ~FAULT_FLAG_ALLOW_RETRY;
177
			flags |= FAULT_FLAG_TRIED;
178 179 180 181 182 183 184 185 186

			/*
			 * No need to up_read(&mm->mmap_sem) as we would
			 * have already released it in __lock_page_or_retry
			 * in mm/filemap.c.
			 */

			goto retry;
		}
187
	}
L
Linus Torvalds 已提交
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215

	up_read(&mm->mmap_sem);
	return;

/*
 * Something tried to access memory that isn't in our memory map..
 * Fix it, but check if it's kernel or user first..
 */
bad_area:
	up_read(&mm->mmap_sem);

bad_area_nosemaphore:
	/* User mode accesses just cause a SIGSEGV */
	if (user_mode(regs)) {
		tsk->thread.cp0_badvaddr = address;
		tsk->thread.error_code = write;
#if 0
		printk("do_page_fault() #2: sending SIGSEGV to %s for "
		       "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
		       tsk->comm,
		       write ? "write access to" : "read access from",
		       field, address,
		       field, (unsigned long) regs->cp0_epc,
		       field, (unsigned long) regs->regs[31]);
#endif
		info.si_signo = SIGSEGV;
		info.si_errno = 0;
		/* info.si_code has been set above */
R
Ralf Baechle 已提交
216
		info.si_addr = (void __user *) address;
L
Linus Torvalds 已提交
217 218 219 220 221
		force_sig_info(SIGSEGV, &info, tsk);
		return;
	}

no_context:
R
Ralf Baechle 已提交
222
	/* Are we prepared to handle this kernel fault?	 */
L
Linus Torvalds 已提交
223 224 225 226 227 228 229 230 231 232 233 234 235
	if (fixup_exception(regs)) {
		current->thread.cp0_baduaddr = address;
		return;
	}

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
	 * terminate things with extreme prejudice.
	 */
	bust_spinlocks(1);

	printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
	       "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
236
	       raw_smp_processor_id(), field, address, field, regs->cp0_epc,
L
Linus Torvalds 已提交
237 238 239 240
	       field,  regs->regs[31]);
	die("Oops", regs);

out_of_memory:
241 242 243 244
	/*
	 * We ran out of memory, call the OOM killer, and return the userspace
	 * (which will retry the fault, or kill us if we got oom-killed).
	 */
245
	up_read(&mm->mmap_sem);
246 247
	if (!user_mode(regs))
		goto no_context;
248 249
	pagefault_out_of_memory();
	return;
L
Linus Torvalds 已提交
250 251 252 253 254 255 256

do_sigbus:
	up_read(&mm->mmap_sem);

	/* Kernel mode? Handle exceptions or die */
	if (!user_mode(regs))
		goto no_context;
257
	else
L
Linus Torvalds 已提交
258 259 260 261
	/*
	 * Send a sigbus, regardless of whether we were in kernel
	 * or user mode.
	 */
262 263 264 265 266 267 268 269 270
#if 0
		printk("do_page_fault() #3: sending SIGBUS to %s for "
		       "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
		       tsk->comm,
		       write ? "write access to" : "read access from",
		       field, address,
		       field, (unsigned long) regs->cp0_epc,
		       field, (unsigned long) regs->regs[31]);
#endif
L
Linus Torvalds 已提交
271 272 273 274
	tsk->thread.cp0_badvaddr = address;
	info.si_signo = SIGBUS;
	info.si_errno = 0;
	info.si_code = BUS_ADRERR;
R
Ralf Baechle 已提交
275
	info.si_addr = (void __user *) address;
L
Linus Torvalds 已提交
276 277 278
	force_sig_info(SIGBUS, &info, tsk);

	return;
279
#ifndef CONFIG_64BIT
L
Linus Torvalds 已提交
280 281 282 283 284 285 286 287 288 289 290
vmalloc_fault:
	{
		/*
		 * Synchronize this task's top level page-table
		 * with the 'reference' page table.
		 *
		 * Do _not_ use "tsk" here. We might be inside
		 * an interrupt in the middle of a task switch..
		 */
		int offset = __pgd_offset(address);
		pgd_t *pgd, *pgd_k;
291
		pud_t *pud, *pud_k;
L
Linus Torvalds 已提交
292 293 294
		pmd_t *pmd, *pmd_k;
		pte_t *pte_k;

295
		pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
L
Linus Torvalds 已提交
296 297 298 299 300 301
		pgd_k = init_mm.pgd + offset;

		if (!pgd_present(*pgd_k))
			goto no_context;
		set_pgd(pgd, *pgd_k);

302 303 304 305 306 307 308
		pud = pud_offset(pgd, address);
		pud_k = pud_offset(pgd_k, address);
		if (!pud_present(*pud_k))
			goto no_context;

		pmd = pmd_offset(pud, address);
		pmd_k = pmd_offset(pud_k, address);
L
Linus Torvalds 已提交
309 310 311 312 313 314 315 316 317
		if (!pmd_present(*pmd_k))
			goto no_context;
		set_pmd(pmd, *pmd_k);

		pte_k = pte_offset_kernel(pmd_k, address);
		if (!pte_present(*pte_k))
			goto no_context;
		return;
	}
318
#endif
L
Linus Torvalds 已提交
319
}
320 321 322 323 324 325 326 327 328 329

asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
	unsigned long write, unsigned long address)
{
	enum ctx_state prev_state;

	prev_state = exception_enter();
	__do_page_fault(regs, write, address);
	exception_exit(prev_state);
}