fault.c 22.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3
/*
 *  S390 version
4
 *    Copyright IBM Corp. 1999
L
Linus Torvalds 已提交
5 6 7 8 9 10 11
 *    Author(s): Hartmut Penner (hp@de.ibm.com)
 *               Ulrich Weigand (uweigand@de.ibm.com)
 *
 *  Derived from "arch/i386/mm/fault.c"
 *    Copyright (C) 1995  Linus Torvalds
 */

12
#include <linux/kernel_stat.h>
13
#include <linux/perf_event.h>
L
Linus Torvalds 已提交
14 15
#include <linux/signal.h>
#include <linux/sched.h>
16
#include <linux/sched/debug.h>
L
Linus Torvalds 已提交
17 18 19 20 21 22 23
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
H
Heiko Carstens 已提交
24
#include <linux/compat.h>
L
Linus Torvalds 已提交
25
#include <linux/smp.h>
26
#include <linux/kdebug.h>
L
Linus Torvalds 已提交
27 28
#include <linux/init.h>
#include <linux/console.h>
29
#include <linux/extable.h>
L
Linus Torvalds 已提交
30
#include <linux/hardirq.h>
M
Michael Grundy 已提交
31
#include <linux/kprobes.h>
32
#include <linux/uaccess.h>
33
#include <linux/hugetlb.h>
34
#include <asm/asm-offsets.h>
35
#include <asm/diag.h>
36
#include <asm/gmap.h>
37
#include <asm/irq.h>
M
Martin Schwidefsky 已提交
38
#include <asm/mmu_context.h>
39
#include <asm/facility.h>
40
#include <asm/uv.h>
41
#include "../kernel/entry.h"
L
Linus Torvalds 已提交
42 43 44 45 46

#define __FAIL_ADDR_MASK -4096L
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000ULL

47 48 49 50 51
#define VM_FAULT_BADCONTEXT	((__force vm_fault_t) 0x010000)
#define VM_FAULT_BADMAP		((__force vm_fault_t) 0x020000)
#define VM_FAULT_BADACCESS	((__force vm_fault_t) 0x040000)
#define VM_FAULT_SIGNAL		((__force vm_fault_t) 0x080000)
#define VM_FAULT_PFAULT		((__force vm_fault_t) 0x100000)
52

53 54 55 56 57 58 59
enum fault_type {
	KERNEL_FAULT,
	USER_FAULT,
	VDSO_FAULT,
	GMAP_FAULT,
};

60
static unsigned long store_indication __read_mostly;
61

62
static int __init fault_init(void)
63
{
64
	if (test_facility(75))
65
		store_indication = 0xc00;
66
	return 0;
67
}
68
early_initcall(fault_init);
69

L
Linus Torvalds 已提交
70
/*
71
 * Find out which address space caused the exception.
L
Linus Torvalds 已提交
72
 */
73
static enum fault_type get_fault_type(struct pt_regs *regs)
L
Linus Torvalds 已提交
74
{
75 76 77
	unsigned long trans_exc_code;

	trans_exc_code = regs->int_parm_long & 3;
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
	if (likely(trans_exc_code == 0)) {
		/* primary space exception */
		if (IS_ENABLED(CONFIG_PGSTE) &&
		    test_pt_regs_flag(regs, PIF_GUEST_FAULT))
			return GMAP_FAULT;
		if (current->thread.mm_segment == USER_DS)
			return USER_FAULT;
		return KERNEL_FAULT;
	}
	if (trans_exc_code == 2) {
		/* secondary space exception */
		if (current->thread.mm_segment & 1) {
			if (current->thread.mm_segment == USER_DS_SACF)
				return USER_FAULT;
			return KERNEL_FAULT;
		}
		return VDSO_FAULT;
	}
96 97 98 99
	if (trans_exc_code == 1) {
		/* access register mode, not used in the kernel */
		return USER_FAULT;
	}
100 101
	/* home space exception -> access via kernel ASCE */
	return KERNEL_FAULT;
L
Linus Torvalds 已提交
102 103
}

104 105 106 107
static int bad_address(void *p)
{
	unsigned long dummy;

108
	return get_kernel_nofault(dummy, (unsigned long *)p);
109 110 111 112
}

static void dump_pagetable(unsigned long asce, unsigned long address)
{
113
	unsigned long *table = __va(asce & _ASCE_ORIGIN);
114 115 116 117

	pr_alert("AS:%016lx ", asce);
	switch (asce & _ASCE_TYPE_MASK) {
	case _ASCE_TYPE_REGION1:
118
		table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
119 120 121 122 123 124
		if (bad_address(table))
			goto bad;
		pr_cont("R1:%016lx ", *table);
		if (*table & _REGION_ENTRY_INVALID)
			goto out;
		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
J
Joe Perches 已提交
125
		fallthrough;
126
	case _ASCE_TYPE_REGION2:
127
		table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
128 129 130 131 132 133
		if (bad_address(table))
			goto bad;
		pr_cont("R2:%016lx ", *table);
		if (*table & _REGION_ENTRY_INVALID)
			goto out;
		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
J
Joe Perches 已提交
134
		fallthrough;
135
	case _ASCE_TYPE_REGION3:
136
		table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
137 138 139 140 141 142
		if (bad_address(table))
			goto bad;
		pr_cont("R3:%016lx ", *table);
		if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
			goto out;
		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
J
Joe Perches 已提交
143
		fallthrough;
144
	case _ASCE_TYPE_SEGMENT:
145
		table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
146 147
		if (bad_address(table))
			goto bad;
J
Joe Perches 已提交
148
		pr_cont("S:%016lx ", *table);
149 150 151 152
		if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
			goto out;
		table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
	}
153
	table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
154 155 156 157 158 159 160 161 162 163 164 165 166 167
	if (bad_address(table))
		goto bad;
	pr_cont("P:%016lx ", *table);
out:
	pr_cont("\n");
	return;
bad:
	pr_cont("BAD\n");
}

static void dump_fault_info(struct pt_regs *regs)
{
	unsigned long asce;

168 169
	pr_alert("Failing address: %016lx TEID: %016lx\n",
		 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
	pr_alert("Fault in ");
	switch (regs->int_parm_long & 3) {
	case 3:
		pr_cont("home space ");
		break;
	case 2:
		pr_cont("secondary space ");
		break;
	case 1:
		pr_cont("access register ");
		break;
	case 0:
		pr_cont("primary space ");
		break;
	}
	pr_cont("mode while using ");
186 187
	switch (get_fault_type(regs)) {
	case USER_FAULT:
188 189
		asce = S390_lowcore.user_asce;
		pr_cont("user ");
190 191 192 193 194 195 196 197 198 199 200 201 202
		break;
	case VDSO_FAULT:
		asce = S390_lowcore.vdso_asce;
		pr_cont("vdso ");
		break;
	case GMAP_FAULT:
		asce = ((struct gmap *) S390_lowcore.gmap)->asce;
		pr_cont("gmap ");
		break;
	case KERNEL_FAULT:
		asce = S390_lowcore.kernel_asce;
		pr_cont("kernel ");
		break;
203 204
	default:
		unreachable();
205 206 207 208 209
	}
	pr_cont("ASCE.\n");
	dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
}

210 211 212
int show_unhandled_signals = 1;

void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
213 214 215 216 217 218 219
{
	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
		return;
	if (!unhandled_signal(current, signr))
		return;
	if (!printk_ratelimit())
		return;
220
	printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
221
	       regs->int_code & 0xffff, regs->int_code >> 17);
222
	print_vma_addr(KERN_CONT "in ", regs->psw.addr);
M
Martin Schwidefsky 已提交
223
	printk(KERN_CONT "\n");
224 225
	if (is_mm_fault)
		dump_fault_info(regs);
226 227 228
	show_regs(regs);
}

L
Linus Torvalds 已提交
229 230 231 232
/*
 * Send SIGSEGV to task.  This is an external routine
 * to keep the stack usage of do_page_fault small.
 */
M
Martin Schwidefsky 已提交
233
static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
L
Linus Torvalds 已提交
234
{
235
	report_user_fault(regs, SIGSEGV, 1);
236
	force_sig_fault(SIGSEGV, si_code,
237
			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
L
Linus Torvalds 已提交
238 239
}

240 241 242 243 244 245 246 247 248 249 250 251
const struct exception_table_entry *s390_search_extables(unsigned long addr)
{
	const struct exception_table_entry *fixup;

	fixup = search_extable(__start_dma_ex_table,
			       __stop_dma_ex_table - __start_dma_ex_table,
			       addr);
	if (!fixup)
		fixup = search_exception_tables(addr);
	return fixup;
}

M
Martin Schwidefsky 已提交
252
static noinline void do_no_context(struct pt_regs *regs)
253 254 255 256
{
	const struct exception_table_entry *fixup;

	/* Are we prepared to handle this kernel fault?  */
257
	fixup = s390_search_extables(regs->psw.addr);
258
	if (fixup) {
259
		regs->psw.addr = extable_fixup(fixup);
260 261 262 263 264 265 266
		return;
	}

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
	 * terminate things with extreme prejudice.
	 */
267
	if (get_fault_type(regs) == KERNEL_FAULT)
268
		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
269
		       " in virtual kernel address space\n");
270 271
	else
		printk(KERN_ALERT "Unable to handle kernel paging request"
272 273
		       " in virtual user address space\n");
	dump_fault_info(regs);
M
Martin Schwidefsky 已提交
274
	die(regs, "Oops");
275 276 277
	do_exit(SIGKILL);
}

M
Martin Schwidefsky 已提交
278
static noinline void do_low_address(struct pt_regs *regs)
279 280 281 282 283
{
	/* Low-address protection hit in kernel mode means
	   NULL pointer write access in kernel mode.  */
	if (regs->psw.mask & PSW_MASK_PSTATE) {
		/* Low-address protection hit in user mode 'cannot happen'. */
M
Martin Schwidefsky 已提交
284
		die (regs, "Low-address protection");
285 286 287
		do_exit(SIGKILL);
	}

M
Martin Schwidefsky 已提交
288
	do_no_context(regs);
289 290
}

M
Martin Schwidefsky 已提交
291
static noinline void do_sigbus(struct pt_regs *regs)
292 293 294 295 296
{
	/*
	 * Send a sigbus, regardless of whether we were in kernel
	 * or user mode.
	 */
297
	force_sig_fault(SIGBUS, BUS_ADRERR,
298
			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
299 300
}

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
static noinline int signal_return(struct pt_regs *regs)
{
	u16 instruction;
	int rc;

	rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
	if (rc)
		return rc;
	if (instruction == 0x0a77) {
		set_pt_regs_flag(regs, PIF_SYSCALL);
		regs->int_code = 0x00040077;
		return 0;
	} else if (instruction == 0x0aad) {
		set_pt_regs_flag(regs, PIF_SYSCALL);
		regs->int_code = 0x000400ad;
		return 0;
	}
	return -EACCES;
}

321 322
static noinline void do_fault_error(struct pt_regs *regs, int access,
					vm_fault_t fault)
323 324 325 326 327
{
	int si_code;

	switch (fault) {
	case VM_FAULT_BADACCESS:
328 329
		if (access == VM_EXEC && signal_return(regs) == 0)
			break;
J
Joe Perches 已提交
330
		fallthrough;
331 332
	case VM_FAULT_BADMAP:
		/* Bad memory access. Check if it is kernel or user space. */
333
		if (user_mode(regs)) {
334 335 336
			/* User mode accesses just cause a SIGSEGV */
			si_code = (fault == VM_FAULT_BADMAP) ?
				SEGV_MAPERR : SEGV_ACCERR;
M
Martin Schwidefsky 已提交
337
			do_sigsegv(regs, si_code);
338
			break;
339
		}
J
Joe Perches 已提交
340
		fallthrough;
341
	case VM_FAULT_BADCONTEXT:
342
	case VM_FAULT_PFAULT:
M
Martin Schwidefsky 已提交
343
		do_no_context(regs);
344
		break;
345 346 347 348
	case VM_FAULT_SIGNAL:
		if (!user_mode(regs))
			do_no_context(regs);
		break;
349
	default: /* fault & VM_FAULT_ERROR */
350
		if (fault & VM_FAULT_OOM) {
351
			if (!user_mode(regs))
M
Martin Schwidefsky 已提交
352
				do_no_context(regs);
353 354
			else
				pagefault_out_of_memory();
355 356 357 358 359 360
		} else if (fault & VM_FAULT_SIGSEGV) {
			/* Kernel mode? Handle exceptions or die */
			if (!user_mode(regs))
				do_no_context(regs);
			else
				do_sigsegv(regs, SEGV_MAPERR);
361
		} else if (fault & VM_FAULT_SIGBUS) {
362
			/* Kernel mode? Handle exceptions or die */
363
			if (!user_mode(regs))
M
Martin Schwidefsky 已提交
364
				do_no_context(regs);
M
Martin Schwidefsky 已提交
365
			else
M
Martin Schwidefsky 已提交
366
				do_sigbus(regs);
367 368 369 370 371 372
		} else
			BUG();
		break;
	}
}

L
Linus Torvalds 已提交
373 374 375 376 377
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 *
378
 * interruption code (int_code):
L
Linus Torvalds 已提交
379 380 381 382 383
 *   04       Protection           ->  Write-Protection  (suprression)
 *   10       Segment translation  ->  Not present       (nullification)
 *   11       Page translation     ->  Not present       (nullification)
 *   3b       Region third trans.  ->  Not present       (nullification)
 */
384
static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
L
Linus Torvalds 已提交
385
{
386
	struct gmap *gmap;
387 388 389
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct vm_area_struct *vma;
390
	enum fault_type type;
M
Martin Schwidefsky 已提交
391
	unsigned long trans_exc_code;
392
	unsigned long address;
393
	unsigned int flags;
394
	vm_fault_t fault;
L
Linus Torvalds 已提交
395

396 397 398 399 400
	tsk = current;
	/*
	 * The instruction that caused the program check has
	 * been nullified. Don't signal single step via SIGTRAP.
	 */
401
	clear_pt_regs_flag(regs, PIF_PER_TRAP);
402

403
	if (kprobe_page_fault(regs, 14))
404
		return 0;
M
Michael Grundy 已提交
405

406
	mm = tsk->mm;
M
Martin Schwidefsky 已提交
407
	trans_exc_code = regs->int_parm_long;
L
Linus Torvalds 已提交
408 409 410 411 412 413

	/*
	 * Verify that the fault happened in user space, that
	 * we are not in an interrupt and that there is a 
	 * user context.
	 */
414
	fault = VM_FAULT_BADCONTEXT;
415 416 417 418 419 420
	type = get_fault_type(regs);
	switch (type) {
	case KERNEL_FAULT:
		goto out;
	case VDSO_FAULT:
		fault = VM_FAULT_BADMAP;
421
		goto out;
422 423 424 425 426 427
	case USER_FAULT:
	case GMAP_FAULT:
		if (faulthandler_disabled() || !mm)
			goto out;
		break;
	}
L
Linus Torvalds 已提交
428

429
	address = trans_exc_code & __FAIL_ADDR_MASK;
430
	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
P
Peter Xu 已提交
431
	flags = FAULT_FLAG_DEFAULT;
432 433
	if (user_mode(regs))
		flags |= FAULT_FLAG_USER;
434 435
	if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
		flags |= FAULT_FLAG_WRITE;
436
	mmap_read_lock(mm);
L
Linus Torvalds 已提交
437

438 439 440
	gmap = NULL;
	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
		gmap = (struct gmap *) S390_lowcore.gmap;
441
		current->thread.gmap_addr = address;
442
		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
443
		current->thread.gmap_int_code = regs->int_code & 0xffff;
444
		address = __gmap_translate(gmap, address);
445 446 447 448
		if (address == -EFAULT) {
			fault = VM_FAULT_BADMAP;
			goto out_up;
		}
449 450
		if (gmap->pfault_enabled)
			flags |= FAULT_FLAG_RETRY_NOWAIT;
451 452 453
	}

retry:
454
	fault = VM_FAULT_BADMAP;
455 456
	vma = find_vma(mm, address);
	if (!vma)
457
		goto out_up;
G
Gerald Schaefer 已提交
458

459 460 461 462 463 464 465 466 467 468 469 470
	if (unlikely(vma->vm_start > address)) {
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out_up;
		if (expand_stack(vma, address))
			goto out_up;
	}

	/*
	 * Ok, we have a good vm_area for this memory access, so
	 * we can handle it..
	 */
	fault = VM_FAULT_BADACCESS;
471
	if (unlikely(!(vma->vm_flags & access)))
472
		goto out_up;
L
Linus Torvalds 已提交
473

474 475
	if (is_vm_hugetlb_page(vma))
		address &= HPAGE_MASK;
L
Linus Torvalds 已提交
476 477 478 479 480
	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
481
	fault = handle_mm_fault(vma, address, flags);
P
Peter Xu 已提交
482
	if (fault_signal_pending(fault, regs)) {
483
		fault = VM_FAULT_SIGNAL;
484 485
		if (flags & FAULT_FLAG_RETRY_NOWAIT)
			goto out_up;
486 487
		goto out;
	}
488 489 490
	if (unlikely(fault & VM_FAULT_ERROR))
		goto out_up;

491 492 493 494 495 496 497 498
	/*
	 * Major/minor page fault accounting is only done on the
	 * initial attempt. If we go through a retry, it is extremely
	 * likely that the page will be found in page cache at that point.
	 */
	if (flags & FAULT_FLAG_ALLOW_RETRY) {
		if (fault & VM_FAULT_MAJOR) {
			tsk->maj_flt++;
499
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
500 501 502
				      regs, address);
		} else {
			tsk->min_flt++;
503
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
504 505 506
				      regs, address);
		}
		if (fault & VM_FAULT_RETRY) {
507 508
			if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
			    (flags & FAULT_FLAG_RETRY_NOWAIT)) {
509
				/* FAULT_FLAG_RETRY_NOWAIT has been set,
510
				 * mmap_lock has not been released */
511 512 513 514
				current->thread.gmap_pfault = 1;
				fault = VM_FAULT_PFAULT;
				goto out_up;
			}
515
			flags &= ~FAULT_FLAG_RETRY_NOWAIT;
516
			flags |= FAULT_FLAG_TRIED;
517
			mmap_read_lock(mm);
518 519
			goto retry;
		}
520
	}
521
	if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
522 523 524 525 526 527 528 529 530 531 532
		address =  __gmap_link(gmap, current->thread.gmap_addr,
				       address);
		if (address == -EFAULT) {
			fault = VM_FAULT_BADMAP;
			goto out_up;
		}
		if (address == -ENOMEM) {
			fault = VM_FAULT_OOM;
			goto out_up;
		}
	}
533 534
	fault = 0;
out_up:
535
	mmap_read_unlock(mm);
536 537
out:
	return fault;
L
Linus Torvalds 已提交
538 539
}

540
void do_protection_exception(struct pt_regs *regs)
L
Linus Torvalds 已提交
541
{
M
Martin Schwidefsky 已提交
542
	unsigned long trans_exc_code;
543 544
	int access;
	vm_fault_t fault;
545

M
Martin Schwidefsky 已提交
546
	trans_exc_code = regs->int_parm_long;
547 548 549 550 551 552 553
	/*
	 * Protection exceptions are suppressing, decrement psw address.
	 * The exception to this rule are aborted transactions, for these
	 * the PSW already points to the correct location.
	 */
	if (!(regs->int_code & 0x200))
		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
554 555 556 557 558
	/*
	 * Check for low-address protection.  This needs to be treated
	 * as a special case because the translation exception code
	 * field is not guaranteed to contain valid data in this case.
	 */
559
	if (unlikely(!(trans_exc_code & 4))) {
M
Martin Schwidefsky 已提交
560
		do_low_address(regs);
561 562
		return;
	}
563 564 565 566 567 568 569 570 571
	if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
		regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
					(regs->psw.addr & PAGE_MASK);
		access = VM_EXEC;
		fault = VM_FAULT_BADACCESS;
	} else {
		access = VM_WRITE;
		fault = do_exception(regs, access);
	}
572
	if (unlikely(fault))
573
		do_fault_error(regs, access, fault);
L
Linus Torvalds 已提交
574
}
575
NOKPROBE_SYMBOL(do_protection_exception);
L
Linus Torvalds 已提交
576

577
void do_dat_exception(struct pt_regs *regs)
L
Linus Torvalds 已提交
578
{
579 580
	int access;
	vm_fault_t fault;
581

582
	access = VM_ACCESS_FLAGS;
M
Martin Schwidefsky 已提交
583
	fault = do_exception(regs, access);
584
	if (unlikely(fault))
585
		do_fault_error(regs, access, fault);
L
Linus Torvalds 已提交
586
}
587
NOKPROBE_SYMBOL(do_dat_exception);
L
Linus Torvalds 已提交
588 589 590 591 592

#ifdef CONFIG_PFAULT 
/*
 * 'pfault' pseudo page faults routines.
 */
593
static int pfault_disable;
L
Linus Torvalds 已提交
594 595 596 597 598 599 600 601 602

static int __init nopfault(char *str)
{
	pfault_disable = 1;
	return 1;
}

__setup("nopfault", nopfault);

H
Heiko Carstens 已提交
603 604 605 606 607 608 609 610 611 612
struct pfault_refbk {
	u16 refdiagc;
	u16 reffcode;
	u16 refdwlen;
	u16 refversn;
	u64 refgaddr;
	u64 refselmk;
	u64 refcmpmk;
	u64 reserved;
} __attribute__ ((packed, aligned(8)));
L
Linus Torvalds 已提交
613

614 615 616 617 618 619 620 621 622 623 624
static struct pfault_refbk pfault_init_refbk = {
	.refdiagc = 0x258,
	.reffcode = 0,
	.refdwlen = 5,
	.refversn = 2,
	.refgaddr = __LC_LPP,
	.refselmk = 1ULL << 48,
	.refcmpmk = 1ULL << 48,
	.reserved = __PF_RES_FIELD
};

L
Linus Torvalds 已提交
625 626 627 628
int pfault_init(void)
{
        int rc;

629
	if (pfault_disable)
L
Linus Torvalds 已提交
630
		return -1;
631
	diag_stat_inc(DIAG_STAT_X258);
632 633 634 635
	asm volatile(
		"	diag	%1,%0,0x258\n"
		"0:	j	2f\n"
		"1:	la	%0,8\n"
L
Linus Torvalds 已提交
636
		"2:\n"
637
		EX_TABLE(0b,1b)
638 639
		: "=d" (rc)
		: "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
L
Linus Torvalds 已提交
640 641 642
        return rc;
}

643 644 645 646 647 648 649
static struct pfault_refbk pfault_fini_refbk = {
	.refdiagc = 0x258,
	.reffcode = 1,
	.refdwlen = 5,
	.refversn = 2,
};

L
Linus Torvalds 已提交
650 651 652
void pfault_fini(void)
{

653
	if (pfault_disable)
L
Linus Torvalds 已提交
654
		return;
655
	diag_stat_inc(DIAG_STAT_X258);
656 657
	asm volatile(
		"	diag	%0,0,0x258\n"
H
Heiko Carstens 已提交
658
		"0:	nopr	%%r7\n"
659
		EX_TABLE(0b,0b)
660
		: : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
L
Linus Torvalds 已提交
661 662
}

663 664 665
static DEFINE_SPINLOCK(pfault_lock);
static LIST_HEAD(pfault_list);

666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
#define PF_COMPLETE	0x0080

/*
 * The mechanism of our pfault code: if Linux is running as guest, runs a user
 * space process and the user space process accesses a page that the host has
 * paged out we get a pfault interrupt.
 *
 * This allows us, within the guest, to schedule a different process. Without
 * this mechanism the host would have to suspend the whole virtual cpu until
 * the page has been paged in.
 *
 * So when we get such an interrupt then we set the state of the current task
 * to uninterruptible and also set the need_resched flag. Both happens within
 * interrupt context(!). If we later on want to return to user space we
 * recognize the need_resched flag and then call schedule().  It's not very
 * obvious how this works...
 *
 * Of course we have a lot of additional fun with the completion interrupt (->
 * host signals that a page of a process has been paged in and the process can
 * continue to run). This interrupt can arrive on any cpu and, since we have
 * virtual cpus, actually appear before the interrupt that signals that a page
 * is missing.
 */
689
static void pfault_interrupt(struct ext_code ext_code,
690
			     unsigned int param32, unsigned long param64)
L
Linus Torvalds 已提交
691 692 693
{
	struct task_struct *tsk;
	__u16 subcode;
694
	pid_t pid;
L
Linus Torvalds 已提交
695 696

	/*
697 698 699
	 * Get the external interruption subcode & pfault initial/completion
	 * signal bit. VM stores this in the 'cpu address' field associated
	 * with the external interrupt.
L
Linus Torvalds 已提交
700
	 */
701
	subcode = ext_code.subcode;
L
Linus Torvalds 已提交
702 703
	if ((subcode & 0xff00) != __SUBCODE_MASK)
		return;
704
	inc_irq_stat(IRQEXT_PFL);
705
	/* Get the token (= pid of the affected task). */
706
	pid = param64 & LPP_PID_MASK;
707 708 709 710 711 712 713
	rcu_read_lock();
	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
	if (tsk)
		get_task_struct(tsk);
	rcu_read_unlock();
	if (!tsk)
		return;
714
	spin_lock(&pfault_lock);
715
	if (subcode & PF_COMPLETE) {
L
Linus Torvalds 已提交
716
		/* signal bit is set -> a page has been swapped in by VM */
717
		if (tsk->thread.pfault_wait == 1) {
L
Linus Torvalds 已提交
718 719 720 721
			/* Initial interrupt was faster than the completion
			 * interrupt. pfault_wait is valid. Set pfault_wait
			 * back to zero and wake up the process. This can
			 * safely be done because the task is still sleeping
722
			 * and can't produce new pfaults. */
L
Linus Torvalds 已提交
723
			tsk->thread.pfault_wait = 0;
724
			list_del(&tsk->thread.list);
L
Linus Torvalds 已提交
725
			wake_up_process(tsk);
726
			put_task_struct(tsk);
727 728 729
		} else {
			/* Completion interrupt was faster than initial
			 * interrupt. Set pfault_wait to -1 so the initial
730 731 732 733 734 735 736
			 * interrupt doesn't put the task to sleep.
			 * If the task is not running, ignore the completion
			 * interrupt since it must be a leftover of a PFAULT
			 * CANCEL operation which didn't remove all pending
			 * completion interrupts. */
			if (tsk->state == TASK_RUNNING)
				tsk->thread.pfault_wait = -1;
L
Linus Torvalds 已提交
737 738 739
		}
	} else {
		/* signal bit not set -> a real page is missing. */
H
Heiko Carstens 已提交
740 741
		if (WARN_ON_ONCE(tsk != current))
			goto out;
742 743
		if (tsk->thread.pfault_wait == 1) {
			/* Already on the list with a reference: put to sleep */
744
			goto block;
745
		} else if (tsk->thread.pfault_wait == -1) {
L
Linus Torvalds 已提交
746
			/* Completion interrupt was faster than the initial
747 748
			 * interrupt (pfault_wait == -1). Set pfault_wait
			 * back to zero and exit. */
L
Linus Torvalds 已提交
749
			tsk->thread.pfault_wait = 0;
750 751
		} else {
			/* Initial interrupt arrived before completion
752 753 754 755 756
			 * interrupt. Let the task sleep.
			 * An extra task reference is needed since a different
			 * cpu may set the task state to TASK_RUNNING again
			 * before the scheduler is reached. */
			get_task_struct(tsk);
757 758
			tsk->thread.pfault_wait = 1;
			list_add(&tsk->thread.list, &pfault_list);
759 760 761 762 763
block:
			/* Since this must be a userspace fault, there
			 * is no kernel task state to trample. Rely on the
			 * return to userspace schedule() to block. */
			__set_current_state(TASK_UNINTERRUPTIBLE);
L
Linus Torvalds 已提交
764
			set_tsk_need_resched(tsk);
765
			set_preempt_need_resched();
766 767
		}
	}
H
Heiko Carstens 已提交
768
out:
769
	spin_unlock(&pfault_lock);
770
	put_task_struct(tsk);
771 772
}

773
static int pfault_cpu_dead(unsigned int cpu)
774 775 776 777
{
	struct thread_struct *thread, *next;
	struct task_struct *tsk;

778 779 780 781 782 783 784
	spin_lock_irq(&pfault_lock);
	list_for_each_entry_safe(thread, next, &pfault_list, list) {
		thread->pfault_wait = 0;
		list_del(&thread->list);
		tsk = container_of(thread, struct task_struct, thread);
		wake_up_process(tsk);
		put_task_struct(tsk);
L
Linus Torvalds 已提交
785
	}
786 787
	spin_unlock_irq(&pfault_lock);
	return 0;
L
Linus Torvalds 已提交
788 789
}

790
static int __init pfault_irq_init(void)
H
Heiko Carstens 已提交
791
{
792
	int rc;
H
Heiko Carstens 已提交
793

794
	rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
H
Heiko Carstens 已提交
795 796 797 798 799
	if (rc)
		goto out_extint;
	rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
	if (rc)
		goto out_pfault;
800
	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
801 802
	cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
				  NULL, pfault_cpu_dead);
H
Heiko Carstens 已提交
803
	return 0;
H
Heiko Carstens 已提交
804

H
Heiko Carstens 已提交
805
out_pfault:
806
	unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
H
Heiko Carstens 已提交
807 808 809
out_extint:
	pfault_disable = 1;
	return rc;
H
Heiko Carstens 已提交
810
}
811 812
early_initcall(pfault_irq_init);

H
Heiko Carstens 已提交
813
#endif /* CONFIG_PFAULT */
814 815 816 817 818 819 820 821 822 823 824 825 826

#if IS_ENABLED(CONFIG_PGSTE)
void do_secure_storage_access(struct pt_regs *regs)
{
	unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
	struct vm_area_struct *vma;
	struct mm_struct *mm;
	struct page *page;
	int rc;

	switch (get_fault_type(regs)) {
	case USER_FAULT:
		mm = current->mm;
827
		mmap_read_lock(mm);
828 829
		vma = find_vma(mm, addr);
		if (!vma) {
830
			mmap_read_unlock(mm);
831 832 833 834 835
			do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
			break;
		}
		page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
		if (IS_ERR_OR_NULL(page)) {
836
			mmap_read_unlock(mm);
837 838 839 840 841
			break;
		}
		if (arch_make_page_accessible(page))
			send_sig(SIGSEGV, current, 0);
		put_page(page);
842
		mmap_read_unlock(mm);
843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
		break;
	case KERNEL_FAULT:
		page = phys_to_page(addr);
		if (unlikely(!try_get_page(page)))
			break;
		rc = arch_make_page_accessible(page);
		put_page(page);
		if (rc)
			BUG();
		break;
	case VDSO_FAULT:
	case GMAP_FAULT:
	default:
		do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
		WARN_ON_ONCE(1);
	}
}
NOKPROBE_SYMBOL(do_secure_storage_access);

void do_non_secure_storage_access(struct pt_regs *regs)
{
	unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;

	if (get_fault_type(regs) != GMAP_FAULT) {
		do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
		WARN_ON_ONCE(1);
		return;
	}

	if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
		send_sig(SIGSEGV, current, 0);
}
NOKPROBE_SYMBOL(do_non_secure_storage_access);

#else
void do_secure_storage_access(struct pt_regs *regs)
{
	default_trap_handler(regs);
}

void do_non_secure_storage_access(struct pt_regs *regs)
{
	default_trap_handler(regs);
}
#endif