mpx.c 24.2 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * mpx.c - Memory Protection eXtensions
 *
 * Copyright (c) 2014, Intel Corporation.
 * Qiaowei Ren <qiaowei.ren@intel.com>
 * Dave Hansen <dave.hansen@intel.com>
 */
#include <linux/kernel.h>
9
#include <linux/slab.h>
10 11 12
#include <linux/syscalls.h>
#include <linux/sched/sysctl.h>

13
#include <asm/insn.h>
14
#include <asm/mman.h>
15
#include <asm/mmu_context.h>
16
#include <asm/mpx.h>
17
#include <asm/processor.h>
18
#include <asm/fpu/internal.h>
19

D
Dave Hansen 已提交
20 21 22
#define CREATE_TRACE_POINTS
#include <asm/trace/mpx.h>

23 24 25 26 27 28 29 30 31
static const char *mpx_mapping_name(struct vm_area_struct *vma)
{
	return "[mpx]";
}

static struct vm_operations_struct mpx_vma_ops = {
	.name = mpx_mapping_name,
};

32 33 34 35 36
static int is_mpx_vma(struct vm_area_struct *vma)
{
	return (vma->vm_ops == &mpx_vma_ops);
}

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
/*
 * This is really a simplified "vm_mmap". it only handles MPX
 * bounds tables (the bounds directory is user-allocated).
 *
 * Later on, we use the vma->vm_ops to uniquely identify these
 * VMAs.
 */
static unsigned long mpx_mmap(unsigned long len)
{
	unsigned long ret;
	unsigned long addr, pgoff;
	struct mm_struct *mm = current->mm;
	vm_flags_t vm_flags;
	struct vm_area_struct *vma;

52 53
	/* Only bounds table can be allocated here */
	if (len != MPX_BT_SIZE_BYTES)
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
		return -EINVAL;

	down_write(&mm->mmap_sem);

	/* Too many mappings? */
	if (mm->map_count > sysctl_max_map_count) {
		ret = -ENOMEM;
		goto out;
	}

	/* Obtain the address to map to. we verify (or select) it and ensure
	 * that it represents a valid section of the address space.
	 */
	addr = get_unmapped_area(NULL, 0, len, 0, MAP_ANONYMOUS | MAP_PRIVATE);
	if (addr & ~PAGE_MASK) {
		ret = addr;
		goto out;
	}

	vm_flags = VM_READ | VM_WRITE | VM_MPX |
			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;

	/* Set pgoff according to addr for anon_vma */
	pgoff = addr >> PAGE_SHIFT;

	ret = mmap_region(NULL, addr, len, vm_flags, pgoff);
	if (IS_ERR_VALUE(ret))
		goto out;

	vma = find_vma(mm, ret);
	if (!vma) {
		ret = -ENOMEM;
		goto out;
	}
	vma->vm_ops = &mpx_vma_ops;

	if (vm_flags & VM_LOCKED) {
		up_write(&mm->mmap_sem);
		mm_populate(ret, len);
		return ret;
	}

out:
	up_write(&mm->mmap_sem);
	return ret;
}
100 101 102 103 104 105 106

enum reg_type {
	REG_TYPE_RM = 0,
	REG_TYPE_INDEX,
	REG_TYPE_BASE,
};

107 108
static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
			  enum reg_type type)
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
{
	int regno = 0;

	static const int regoff[] = {
		offsetof(struct pt_regs, ax),
		offsetof(struct pt_regs, cx),
		offsetof(struct pt_regs, dx),
		offsetof(struct pt_regs, bx),
		offsetof(struct pt_regs, sp),
		offsetof(struct pt_regs, bp),
		offsetof(struct pt_regs, si),
		offsetof(struct pt_regs, di),
#ifdef CONFIG_X86_64
		offsetof(struct pt_regs, r8),
		offsetof(struct pt_regs, r9),
		offsetof(struct pt_regs, r10),
		offsetof(struct pt_regs, r11),
		offsetof(struct pt_regs, r12),
		offsetof(struct pt_regs, r13),
		offsetof(struct pt_regs, r14),
		offsetof(struct pt_regs, r15),
#endif
	};
	int nr_registers = ARRAY_SIZE(regoff);
	/*
	 * Don't possibly decode a 32-bit instructions as
	 * reading a 64-bit-only register.
	 */
	if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64)
		nr_registers -= 8;

	switch (type) {
	case REG_TYPE_RM:
		regno = X86_MODRM_RM(insn->modrm.value);
		if (X86_REX_B(insn->rex_prefix.value) == 1)
			regno += 8;
		break;

	case REG_TYPE_INDEX:
		regno = X86_SIB_INDEX(insn->sib.value);
		if (X86_REX_X(insn->rex_prefix.value) == 1)
			regno += 8;
		break;

	case REG_TYPE_BASE:
		regno = X86_SIB_BASE(insn->sib.value);
		if (X86_REX_B(insn->rex_prefix.value) == 1)
			regno += 8;
		break;

	default:
		pr_err("invalid register type");
		BUG();
		break;
	}

	if (regno > nr_registers) {
		WARN_ONCE(1, "decoded an instruction with an invalid register");
		return -EINVAL;
	}
	return regoff[regno];
}

/*
 * return the address being referenced be instruction
 * for rm=3 returning the content of the rm reg
 * for rm!=3 calculates the address using SIB and Disp
 */
static void __user *mpx_get_addr_ref(struct insn *insn, struct pt_regs *regs)
{
179 180
	unsigned long addr, base, indx;
	int addr_offset, base_offset, indx_offset;
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
	insn_byte_t sib;

	insn_get_modrm(insn);
	insn_get_sib(insn);
	sib = insn->sib.value;

	if (X86_MODRM_MOD(insn->modrm.value) == 3) {
		addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
		if (addr_offset < 0)
			goto out_err;
		addr = regs_get_register(regs, addr_offset);
	} else {
		if (insn->sib.nbytes) {
			base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE);
			if (base_offset < 0)
				goto out_err;

			indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX);
			if (indx_offset < 0)
				goto out_err;

			base = regs_get_register(regs, base_offset);
			indx = regs_get_register(regs, indx_offset);
			addr = base + indx * (1 << X86_SIB_SCALE(sib));
		} else {
			addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
			if (addr_offset < 0)
				goto out_err;
			addr = regs_get_register(regs, addr_offset);
		}
		addr += insn->displacement.value;
	}
	return (void __user *)addr;
out_err:
	return (void __user *)-1;
}

static int mpx_insn_decode(struct insn *insn,
			   struct pt_regs *regs)
{
	unsigned char buf[MAX_INSN_SIZE];
	int x86_64 = !test_thread_flag(TIF_IA32);
	int not_copied;
	int nr_copied;

	not_copied = copy_from_user(buf, (void __user *)regs->ip, sizeof(buf));
	nr_copied = sizeof(buf) - not_copied;
	/*
	 * The decoder _should_ fail nicely if we pass it a short buffer.
	 * But, let's not depend on that implementation detail.  If we
	 * did not get anything, just error out now.
	 */
	if (!nr_copied)
		return -EFAULT;
	insn_init(insn, buf, nr_copied, x86_64);
	insn_get_length(insn);
	/*
	 * copy_from_user() tries to get as many bytes as we could see in
	 * the largest possible instruction.  If the instruction we are
	 * after is shorter than that _and_ we attempt to copy from
	 * something unreadable, we might get a short read.  This is OK
	 * as long as the read did not stop in the middle of the
	 * instruction.  Check to see if we got a partial instruction.
	 */
	if (nr_copied < insn->length)
		return -EFAULT;

	insn_get_opcode(insn);
	/*
	 * We only _really_ need to decode bndcl/bndcn/bndcu
	 * Error out on anything else.
	 */
	if (insn->opcode.bytes[0] != 0x0f)
		goto bad_opcode;
	if ((insn->opcode.bytes[1] != 0x1a) &&
	    (insn->opcode.bytes[1] != 0x1b))
		goto bad_opcode;

	return 0;
bad_opcode:
	return -EINVAL;
}

/*
 * If a bounds overflow occurs then a #BR is generated. This
 * function decodes MPX instructions to get violation address
 * and set this address into extended struct siginfo.
 *
 * Note that this is not a super precise way of doing this.
 * Userspace could have, by the time we get here, written
 * anything it wants in to the instructions.  We can not
 * trust anything about it.  They might not be valid
 * instructions or might encode invalid registers, etc...
 *
 * The caller is expected to kfree() the returned siginfo_t.
 */
277
siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
278
{
279
	const struct bndreg *bndregs, *bndreg;
280
	siginfo_t *info = NULL;
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
	struct insn insn;
	uint8_t bndregno;
	int err;

	err = mpx_insn_decode(&insn, regs);
	if (err)
		goto err_out;

	/*
	 * We know at this point that we are only dealing with
	 * MPX instructions.
	 */
	insn_get_modrm(&insn);
	bndregno = X86_MODRM_REG(insn.modrm.value);
	if (bndregno > 3) {
		err = -EINVAL;
		goto err_out;
	}
299 300
	/* get bndregs field from current task's xsave area */
	bndregs = get_xsave_field_ptr(XSTATE_BNDREGS);
301 302 303 304 305 306 307
	if (!bndregs) {
		err = -EINVAL;
		goto err_out;
	}
	/* now go select the individual register in the set of 4 */
	bndreg = &bndregs[bndregno];

308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (!info) {
		err = -ENOMEM;
		goto err_out;
	}
	/*
	 * The registers are always 64-bit, but the upper 32
	 * bits are ignored in 32-bit mode.  Also, note that the
	 * upper bounds are architecturally represented in 1's
	 * complement form.
	 *
	 * The 'unsigned long' cast is because the compiler
	 * complains when casting from integers to different-size
	 * pointers.
	 */
323 324
	info->si_lower = (void __user *)(unsigned long)bndreg->lower_bound;
	info->si_upper = (void __user *)(unsigned long)~bndreg->upper_bound;
325 326 327 328 329 330 331 332 333 334 335 336 337
	info->si_addr_lsb = 0;
	info->si_signo = SIGSEGV;
	info->si_errno = 0;
	info->si_code = SEGV_BNDERR;
	info->si_addr = mpx_get_addr_ref(&insn, regs);
	/*
	 * We were not able to extract an address from the instruction,
	 * probably because there was something invalid in it.
	 */
	if (info->si_addr == (void *)-1) {
		err = -EINVAL;
		goto err_out;
	}
338
	trace_mpx_bounds_register_exception(info->si_addr, bndreg);
339 340
	return info;
err_out:
341 342
	/* info might be NULL, but kfree() handles that */
	kfree(info);
343 344
	return ERR_PTR(err);
}
345

346
static __user void *mpx_get_bounds_dir(void)
347
{
348
	const struct bndcsr *bndcsr;
349 350 351 352

	if (!cpu_feature_enabled(X86_FEATURE_MPX))
		return MPX_INVALID_BOUNDS_DIR;

353 354 355 356 357 358
	/*
	 * 32-bit binaries on 64-bit kernels are currently
	 * unsupported.
	 */
	if (IS_ENABLED(CONFIG_X86_64) && test_thread_flag(TIF_IA32))
		return MPX_INVALID_BOUNDS_DIR;
359 360 361 362
	/*
	 * The bounds directory pointer is stored in a register
	 * only accessible if we first do an xsave.
	 */
363
	bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR);
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
	if (!bndcsr)
		return MPX_INVALID_BOUNDS_DIR;

	/*
	 * Make sure the register looks valid by checking the
	 * enable bit.
	 */
	if (!(bndcsr->bndcfgu & MPX_BNDCFG_ENABLE_FLAG))
		return MPX_INVALID_BOUNDS_DIR;

	/*
	 * Lastly, mask off the low bits used for configuration
	 * flags, and return the address of the bounds table.
	 */
	return (void __user *)(unsigned long)
		(bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK);
}

382
int mpx_enable_management(void)
383 384
{
	void __user *bd_base = MPX_INVALID_BOUNDS_DIR;
385
	struct mm_struct *mm = current->mm;
386 387 388 389 390 391 392 393
	int ret = 0;

	/*
	 * runtime in the userspace will be responsible for allocation of
	 * the bounds directory. Then, it will save the base of the bounds
	 * directory into XSAVE/XRSTOR Save Area and enable MPX through
	 * XRSTOR instruction.
	 *
394 395 396 397
	 * The copy_xregs_to_kernel() beneath get_xsave_field_ptr() is
	 * expected to be relatively expensive. Storing the bounds
	 * directory here means that we do not have to do xsave in the
	 * unmap path; we can just use mm->bd_addr instead.
398
	 */
399
	bd_base = mpx_get_bounds_dir();
400 401 402 403 404 405 406 407 408
	down_write(&mm->mmap_sem);
	mm->bd_addr = bd_base;
	if (mm->bd_addr == MPX_INVALID_BOUNDS_DIR)
		ret = -ENXIO;

	up_write(&mm->mmap_sem);
	return ret;
}

409
int mpx_disable_management(void)
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
{
	struct mm_struct *mm = current->mm;

	if (!cpu_feature_enabled(X86_FEATURE_MPX))
		return -ENXIO;

	down_write(&mm->mmap_sem);
	mm->bd_addr = MPX_INVALID_BOUNDS_DIR;
	up_write(&mm->mmap_sem);
	return 0;
}

/*
 * With 32-bit mode, MPX_BT_SIZE_BYTES is 4MB, and the size of each
 * bounds table is 16KB. With 64-bit mode, MPX_BT_SIZE_BYTES is 2GB,
 * and the size of each bounds table is 4MB.
 */
static int allocate_bt(long __user *bd_entry)
{
	unsigned long expected_old_val = 0;
	unsigned long actual_old_val = 0;
	unsigned long bt_addr;
	int ret = 0;

	/*
	 * Carve the virtual space out of userspace for the new
	 * bounds table:
	 */
	bt_addr = mpx_mmap(MPX_BT_SIZE_BYTES);
	if (IS_ERR((void *)bt_addr))
		return PTR_ERR((void *)bt_addr);
	/*
	 * Set the valid flag (kinda like _PAGE_PRESENT in a pte)
	 */
	bt_addr = bt_addr | MPX_BD_ENTRY_VALID_FLAG;

	/*
	 * Go poke the address of the new bounds table in to the
	 * bounds directory entry out in userspace memory.  Note:
	 * we may race with another CPU instantiating the same table.
	 * In that case the cmpxchg will see an unexpected
	 * 'actual_old_val'.
	 *
	 * This can fault, but that's OK because we do not hold
	 * mmap_sem at this point, unlike some of the other part
	 * of the MPX code that have to pagefault_disable().
	 */
	ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry,
					   expected_old_val, bt_addr);
	if (ret)
		goto out_unmap;

	/*
	 * The user_atomic_cmpxchg_inatomic() will only return nonzero
	 * for faults, *not* if the cmpxchg itself fails.  Now we must
	 * verify that the cmpxchg itself completed successfully.
	 */
	/*
	 * We expected an empty 'expected_old_val', but instead found
	 * an apparently valid entry.  Assume we raced with another
	 * thread to instantiate this table and desclare succecss.
	 */
	if (actual_old_val & MPX_BD_ENTRY_VALID_FLAG) {
		ret = 0;
		goto out_unmap;
	}
	/*
	 * We found a non-empty bd_entry but it did not have the
	 * VALID_FLAG set.  Return an error which will result in
	 * a SEGV since this probably means that somebody scribbled
	 * some invalid data in to a bounds table.
	 */
	if (expected_old_val != actual_old_val) {
		ret = -EINVAL;
		goto out_unmap;
	}
	return 0;
out_unmap:
	vm_munmap(bt_addr & MPX_BT_ADDR_MASK, MPX_BT_SIZE_BYTES);
	return ret;
}

/*
 * When a BNDSTX instruction attempts to save bounds to a bounds
 * table, it will first attempt to look up the table in the
 * first-level bounds directory.  If it does not find a table in
 * the directory, a #BR is generated and we get here in order to
 * allocate a new table.
 *
 * With 32-bit mode, the size of BD is 4MB, and the size of each
 * bound table is 16KB. With 64-bit mode, the size of BD is 2GB,
 * and the size of each bound table is 4MB.
 */
503
static int do_mpx_bt_fault(void)
504 505
{
	unsigned long bd_entry, bd_base;
506
	const struct bndcsr *bndcsr;
507

508
	bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR);
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
	if (!bndcsr)
		return -EINVAL;
	/*
	 * Mask off the preserve and enable bits
	 */
	bd_base = bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK;
	/*
	 * The hardware provides the address of the missing or invalid
	 * entry via BNDSTATUS, so we don't have to go look it up.
	 */
	bd_entry = bndcsr->bndstatus & MPX_BNDSTA_ADDR_MASK;
	/*
	 * Make sure the directory entry is within where we think
	 * the directory is.
	 */
	if ((bd_entry < bd_base) ||
	    (bd_entry >= bd_base + MPX_BD_SIZE_BYTES))
		return -EINVAL;

	return allocate_bt((long __user *)bd_entry);
}

531
int mpx_handle_bd_fault(void)
532 533 534 535 536 537 538 539
{
	/*
	 * Userspace never asked us to manage the bounds tables,
	 * so refuse to help.
	 */
	if (!kernel_managing_mpx_tables(current->mm))
		return -EINVAL;

540
	if (do_mpx_bt_fault()) {
541 542 543 544 545 546 547 548 549
		force_sig(SIGSEGV, current);
		/*
		 * The force_sig() is essentially "handling" this
		 * exception, so we do not pass up the error
		 * from do_mpx_bt_fault().
		 */
	}
	return 0;
}
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670

/*
 * A thin wrapper around get_user_pages().  Returns 0 if the
 * fault was resolved or -errno if not.
 */
static int mpx_resolve_fault(long __user *addr, int write)
{
	long gup_ret;
	int nr_pages = 1;
	int force = 0;

	gup_ret = get_user_pages(current, current->mm, (unsigned long)addr,
				 nr_pages, write, force, NULL, NULL);
	/*
	 * get_user_pages() returns number of pages gotten.
	 * 0 means we failed to fault in and get anything,
	 * probably because 'addr' is bad.
	 */
	if (!gup_ret)
		return -EFAULT;
	/* Other error, return it */
	if (gup_ret < 0)
		return gup_ret;
	/* must have gup'd a page and gup_ret>0, success */
	return 0;
}

/*
 * Get the base of bounds tables pointed by specific bounds
 * directory entry.
 */
static int get_bt_addr(struct mm_struct *mm,
			long __user *bd_entry, unsigned long *bt_addr)
{
	int ret;
	int valid_bit;

	if (!access_ok(VERIFY_READ, (bd_entry), sizeof(*bd_entry)))
		return -EFAULT;

	while (1) {
		int need_write = 0;

		pagefault_disable();
		ret = get_user(*bt_addr, bd_entry);
		pagefault_enable();
		if (!ret)
			break;
		if (ret == -EFAULT)
			ret = mpx_resolve_fault(bd_entry, need_write);
		/*
		 * If we could not resolve the fault, consider it
		 * userspace's fault and error out.
		 */
		if (ret)
			return ret;
	}

	valid_bit = *bt_addr & MPX_BD_ENTRY_VALID_FLAG;
	*bt_addr &= MPX_BT_ADDR_MASK;

	/*
	 * When the kernel is managing bounds tables, a bounds directory
	 * entry will either have a valid address (plus the valid bit)
	 * *OR* be completely empty. If we see a !valid entry *and* some
	 * data in the address field, we know something is wrong. This
	 * -EINVAL return will cause a SIGSEGV.
	 */
	if (!valid_bit && *bt_addr)
		return -EINVAL;
	/*
	 * Do we have an completely zeroed bt entry?  That is OK.  It
	 * just means there was no bounds table for this memory.  Make
	 * sure to distinguish this from -EINVAL, which will cause
	 * a SEGV.
	 */
	if (!valid_bit)
		return -ENOENT;

	return 0;
}

/*
 * Free the backing physical pages of bounds table 'bt_addr'.
 * Assume start...end is within that bounds table.
 */
static int zap_bt_entries(struct mm_struct *mm,
		unsigned long bt_addr,
		unsigned long start, unsigned long end)
{
	struct vm_area_struct *vma;
	unsigned long addr, len;

	/*
	 * Find the first overlapping vma. If vma->vm_start > start, there
	 * will be a hole in the bounds table. This -EINVAL return will
	 * cause a SIGSEGV.
	 */
	vma = find_vma(mm, start);
	if (!vma || vma->vm_start > start)
		return -EINVAL;

	/*
	 * A NUMA policy on a VM_MPX VMA could cause this bouds table to
	 * be split. So we need to look across the entire 'start -> end'
	 * range of this bounds table, find all of the VM_MPX VMAs, and
	 * zap only those.
	 */
	addr = start;
	while (vma && vma->vm_start < end) {
		/*
		 * We followed a bounds directory entry down
		 * here.  If we find a non-MPX VMA, that's bad,
		 * so stop immediately and return an error.  This
		 * probably results in a SIGSEGV.
		 */
		if (!is_mpx_vma(vma))
			return -EINVAL;

		len = min(vma->vm_end, end) - addr;
		zap_page_range(vma, addr, len, NULL);
671
		trace_mpx_unmap_zap(addr, addr+len);
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843

		vma = vma->vm_next;
		addr = vma->vm_start;
	}

	return 0;
}

static int unmap_single_bt(struct mm_struct *mm,
		long __user *bd_entry, unsigned long bt_addr)
{
	unsigned long expected_old_val = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
	unsigned long actual_old_val = 0;
	int ret;

	while (1) {
		int need_write = 1;

		pagefault_disable();
		ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry,
						   expected_old_val, 0);
		pagefault_enable();
		if (!ret)
			break;
		if (ret == -EFAULT)
			ret = mpx_resolve_fault(bd_entry, need_write);
		/*
		 * If we could not resolve the fault, consider it
		 * userspace's fault and error out.
		 */
		if (ret)
			return ret;
	}
	/*
	 * The cmpxchg was performed, check the results.
	 */
	if (actual_old_val != expected_old_val) {
		/*
		 * Someone else raced with us to unmap the table.
		 * There was no bounds table pointed to by the
		 * directory, so declare success.  Somebody freed
		 * it.
		 */
		if (!actual_old_val)
			return 0;
		/*
		 * Something messed with the bounds directory
		 * entry.  We hold mmap_sem for read or write
		 * here, so it could not be a _new_ bounds table
		 * that someone just allocated.  Something is
		 * wrong, so pass up the error and SIGSEGV.
		 */
		return -EINVAL;
	}

	/*
	 * Note, we are likely being called under do_munmap() already. To
	 * avoid recursion, do_munmap() will check whether it comes
	 * from one bounds table through VM_MPX flag.
	 */
	return do_munmap(mm, bt_addr, MPX_BT_SIZE_BYTES);
}

/*
 * If the bounds table pointed by bounds directory 'bd_entry' is
 * not shared, unmap this whole bounds table. Otherwise, only free
 * those backing physical pages of bounds table entries covered
 * in this virtual address region start...end.
 */
static int unmap_shared_bt(struct mm_struct *mm,
		long __user *bd_entry, unsigned long start,
		unsigned long end, bool prev_shared, bool next_shared)
{
	unsigned long bt_addr;
	int ret;

	ret = get_bt_addr(mm, bd_entry, &bt_addr);
	/*
	 * We could see an "error" ret for not-present bounds
	 * tables (not really an error), or actual errors, but
	 * stop unmapping either way.
	 */
	if (ret)
		return ret;

	if (prev_shared && next_shared)
		ret = zap_bt_entries(mm, bt_addr,
				bt_addr+MPX_GET_BT_ENTRY_OFFSET(start),
				bt_addr+MPX_GET_BT_ENTRY_OFFSET(end));
	else if (prev_shared)
		ret = zap_bt_entries(mm, bt_addr,
				bt_addr+MPX_GET_BT_ENTRY_OFFSET(start),
				bt_addr+MPX_BT_SIZE_BYTES);
	else if (next_shared)
		ret = zap_bt_entries(mm, bt_addr, bt_addr,
				bt_addr+MPX_GET_BT_ENTRY_OFFSET(end));
	else
		ret = unmap_single_bt(mm, bd_entry, bt_addr);

	return ret;
}

/*
 * A virtual address region being munmap()ed might share bounds table
 * with adjacent VMAs. We only need to free the backing physical
 * memory of these shared bounds tables entries covered in this virtual
 * address region.
 */
static int unmap_edge_bts(struct mm_struct *mm,
		unsigned long start, unsigned long end)
{
	int ret;
	long __user *bde_start, *bde_end;
	struct vm_area_struct *prev, *next;
	bool prev_shared = false, next_shared = false;

	bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start);
	bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1);

	/*
	 * Check whether bde_start and bde_end are shared with adjacent
	 * VMAs.
	 *
	 * We already unliked the VMAs from the mm's rbtree so 'start'
	 * is guaranteed to be in a hole. This gets us the first VMA
	 * before the hole in to 'prev' and the next VMA after the hole
	 * in to 'next'.
	 */
	next = find_vma_prev(mm, start, &prev);
	if (prev && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(prev->vm_end-1))
			== bde_start)
		prev_shared = true;
	if (next && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(next->vm_start))
			== bde_end)
		next_shared = true;

	/*
	 * This virtual address region being munmap()ed is only
	 * covered by one bounds table.
	 *
	 * In this case, if this table is also shared with adjacent
	 * VMAs, only part of the backing physical memory of the bounds
	 * table need be freeed. Otherwise the whole bounds table need
	 * be unmapped.
	 */
	if (bde_start == bde_end) {
		return unmap_shared_bt(mm, bde_start, start, end,
				prev_shared, next_shared);
	}

	/*
	 * If more than one bounds tables are covered in this virtual
	 * address region being munmap()ed, we need to separately check
	 * whether bde_start and bde_end are shared with adjacent VMAs.
	 */
	ret = unmap_shared_bt(mm, bde_start, start, end, prev_shared, false);
	if (ret)
		return ret;
	ret = unmap_shared_bt(mm, bde_end, start, end, false, next_shared);
	if (ret)
		return ret;

	return 0;
}

static int mpx_unmap_tables(struct mm_struct *mm,
		unsigned long start, unsigned long end)
{
	int ret;
	long __user *bd_entry, *bde_start, *bde_end;
	unsigned long bt_addr;

844
	trace_mpx_unmap_search(start, end);
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
	/*
	 * "Edge" bounds tables are those which are being used by the region
	 * (start -> end), but that may be shared with adjacent areas.  If they
	 * turn out to be completely unshared, they will be freed.  If they are
	 * shared, we will free the backing store (like an MADV_DONTNEED) for
	 * areas used by this region.
	 */
	ret = unmap_edge_bts(mm, start, end);
	switch (ret) {
		/* non-present tables are OK */
		case 0:
		case -ENOENT:
			/* Success, or no tables to unmap */
			break;
		case -EINVAL:
		case -EFAULT:
		default:
			return ret;
	}

	/*
	 * Only unmap the bounds table that are
	 *   1. fully covered
	 *   2. not at the edges of the mapping, even if full aligned
	 */
	bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start);
	bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1);
	for (bd_entry = bde_start + 1; bd_entry < bde_end; bd_entry++) {
		ret = get_bt_addr(mm, bd_entry, &bt_addr);
		switch (ret) {
			case 0:
				break;
			case -ENOENT:
				/* No table here, try the next one */
				continue;
			case -EINVAL:
			case -EFAULT:
			default:
				/*
				 * Note: we are being strict here.
				 * Any time we run in to an issue
				 * unmapping tables, we stop and
				 * SIGSEGV.
				 */
				return ret;
		}

		ret = unmap_single_bt(mm, bd_entry, bt_addr);
		if (ret)
			return ret;
	}

	return 0;
}

/*
 * Free unused bounds tables covered in a virtual address region being
 * munmap()ed. Assume end > start.
 *
 * This function will be called by do_munmap(), and the VMAs covering
 * the virtual address region start...end have already been split if
 * necessary, and the 'vma' is the first vma in this range (start -> end).
 */
void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long start, unsigned long end)
{
	int ret;

	/*
	 * Refuse to do anything unless userspace has asked
	 * the kernel to help manage the bounds tables,
	 */
	if (!kernel_managing_mpx_tables(current->mm))
		return;
	/*
	 * This will look across the entire 'start -> end' range,
	 * and find all of the non-VM_MPX VMAs.
	 *
	 * To avoid recursion, if a VM_MPX vma is found in the range
	 * (start->end), we will not continue follow-up work. This
	 * recursion represents having bounds tables for bounds tables,
	 * which should not occur normally. Being strict about it here
	 * helps ensure that we do not have an exploitable stack overflow.
	 */
	do {
		if (vma->vm_flags & VM_MPX)
			return;
		vma = vma->vm_next;
	} while (vma && vma->vm_start < end);

	ret = mpx_unmap_tables(mm, start, end);
	if (ret)
		force_sig(SIGSEGV, current);
}