fault.c 6.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6 7
/*
 *  linux/arch/alpha/mm/fault.c
 *
 *  Copyright (C) 1995  Linus Torvalds
 */

8
#include <linux/sched/signal.h>
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/io.h>

#define __EXTERN_INLINE inline
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#undef  __EXTERN_INLINE

#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
26
#include <linux/extable.h>
27
#include <linux/uaccess.h>
28
#include <linux/perf_event.h>
L
Linus Torvalds 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81

extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);


/*
 * Force a new ASN for a task.
 */

#ifndef CONFIG_SMP
unsigned long last_asn = ASN_FIRST_VERSION;
#endif

void
__load_new_mm_context(struct mm_struct *next_mm)
{
	unsigned long mmc;
	struct pcb_struct *pcb;

	mmc = __get_new_mm_context(next_mm, smp_processor_id());
	next_mm->context[smp_processor_id()] = mmc;

	pcb = &current_thread_info()->pcb;
	pcb->asn = mmc & HARDWARE_ASN_MASK;
	pcb->ptbr = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;

	__reload_thread(pcb);
}


/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to handle_mm_fault().
 *
 * mmcsr:
 *	0 = translation not valid
 *	1 = access violation
 *	2 = fault-on-read
 *	3 = fault-on-execute
 *	4 = fault-on-write
 *
 * cause:
 *	-1 = instruction fetch
 *	0 = load
 *	1 = store
 *
 * Registers $9 through $15 are saved in a block just prior to `regs' and
 * are saved and restored around the call to allow exception code to
 * modify them.
 */

/* Macro for exception fixup code to access integer registers.  */
#define dpf_reg(r)							\
	(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 :	\
82
				 (r) <= 18 ? (r)+10 : (r)-10])
L
Linus Torvalds 已提交
83 84 85 86 87 88 89 90

asmlinkage void
do_page_fault(unsigned long address, unsigned long mmcsr,
	      long cause, struct pt_regs *regs)
{
	struct vm_area_struct * vma;
	struct mm_struct *mm = current->mm;
	const struct exception_table_entry *fixup;
91 92
	int si_code = SEGV_MAPERR;
	vm_fault_t fault;
P
Peter Xu 已提交
93
	unsigned int flags = FAULT_FLAG_DEFAULT;
L
Linus Torvalds 已提交
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110

	/* As of EV6, a load into $31/$f31 is a prefetch, and never faults
	   (or is suppressed by the PALcode).  Support that for older CPUs
	   by ignoring such an instruction.  */
	if (cause == 0) {
		unsigned int insn;
		__get_user(insn, (unsigned int __user *)regs->pc);
		if ((insn >> 21 & 0x1f) == 0x1f &&
		    /* ldq ldl ldt lds ldg ldf ldwu ldbu */
		    (1ul << (insn >> 26) & 0x30f00001400ul)) {
			regs->pc += 4;
			return;
		}
	}

	/* If we're in an interrupt context, or have no user context,
	   we must not take the fault.  */
111
	if (!mm || faulthandler_disabled())
L
Linus Torvalds 已提交
112 113 114 115 116 117
		goto no_context;

#ifdef CONFIG_ALPHA_LARGE_VMALLOC
	if (address >= TASK_SIZE)
		goto vmalloc_fault;
#endif
118 119
	if (user_mode(regs))
		flags |= FAULT_FLAG_USER;
120
	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
121
retry:
122
	mmap_read_lock(mm);
L
Linus Torvalds 已提交
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
	vma = find_vma(mm, address);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= address)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (expand_stack(vma, address))
		goto bad_area;

	/* Ok, we have a good vm_area for this memory access, so
	   we can handle it.  */
 good_area:
	si_code = SEGV_ACCERR;
	if (cause < 0) {
		if (!(vma->vm_flags & VM_EXEC))
			goto bad_area;
	} else if (!cause) {
		/* Allow reads even for write-only mappings */
		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
			goto bad_area;
	} else {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
147
		flags |= FAULT_FLAG_WRITE;
L
Linus Torvalds 已提交
148 149 150 151 152
	}

	/* If for any reason at all we couldn't handle the fault,
	   make sure we exit gracefully rather than endlessly redo
	   the fault.  */
153
	fault = handle_mm_fault(vma, address, flags, regs);
154

P
Peter Xu 已提交
155
	if (fault_signal_pending(fault, regs))
156 157
		return;

N
Nick Piggin 已提交
158 159 160
	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
161 162
		else if (fault & VM_FAULT_SIGSEGV)
			goto bad_area;
N
Nick Piggin 已提交
163 164
		else if (fault & VM_FAULT_SIGBUS)
			goto do_sigbus;
L
Linus Torvalds 已提交
165 166
		BUG();
	}
167 168 169

	if (flags & FAULT_FLAG_ALLOW_RETRY) {
		if (fault & VM_FAULT_RETRY) {
170
			flags |= FAULT_FLAG_TRIED;
171

172
			 /* No need to mmap_read_unlock(mm) as we would
173 174 175 176 177 178 179 180
			 * have already released it in __lock_page_or_retry
			 * in mm/filemap.c.
			 */

			goto retry;
		}
	}

181
	mmap_read_unlock(mm);
182

L
Linus Torvalds 已提交
183 184 185 186 187
	return;

	/* Something tried to access memory that isn't in our memory map.
	   Fix it, but check if it's kernel or user first.  */
 bad_area:
188
	mmap_read_unlock(mm);
L
Linus Torvalds 已提交
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211

	if (user_mode(regs))
		goto do_sigsegv;

 no_context:
	/* Are we prepared to handle this fault as an exception?  */
	if ((fixup = search_exception_tables(regs->pc)) != 0) {
		unsigned long newpc;
		newpc = fixup_exception(dpf_reg, fixup, regs->pc);
		regs->pc = newpc;
		return;
	}

	/* Oops. The kernel tried to access some bad page. We'll have to
	   terminate things with extreme prejudice.  */
	printk(KERN_ALERT "Unable to handle kernel paging request at "
	       "virtual address %016lx\n", address);
	die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
	do_exit(SIGKILL);

	/* We ran out of memory, or some other thing happened to us that
	   made us unable to handle the page fault gracefully.  */
 out_of_memory:
212
	mmap_read_unlock(mm);
L
Linus Torvalds 已提交
213 214
	if (!user_mode(regs))
		goto no_context;
215 216
	pagefault_out_of_memory();
	return;
L
Linus Torvalds 已提交
217 218

 do_sigbus:
219
	mmap_read_unlock(mm);
L
Linus Torvalds 已提交
220 221
	/* Send a sigbus, regardless of whether we were in kernel
	   or user mode.  */
222
	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address);
L
Linus Torvalds 已提交
223 224 225 226 227
	if (!user_mode(regs))
		goto no_context;
	return;

 do_sigsegv:
228
	force_sig_fault(SIGSEGV, si_code, (void __user *) address);
L
Linus Torvalds 已提交
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
	return;

#ifdef CONFIG_ALPHA_LARGE_VMALLOC
 vmalloc_fault:
	if (user_mode(regs))
		goto do_sigsegv;
	else {
		/* Synchronize this task's top level page-table
		   with the "reference" page table from init.  */
		long index = pgd_index(address);
		pgd_t *pgd, *pgd_k;

		pgd = current->active_mm->pgd + index;
		pgd_k = swapper_pg_dir + index;
		if (!pgd_present(*pgd) && pgd_present(*pgd_k)) {
			pgd_val(*pgd) = pgd_val(*pgd_k);
			return;
		}
		goto no_context;
	}
#endif
}