mpx.c 28.7 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * mpx.c - Memory Protection eXtensions
 *
 * Copyright (c) 2014, Intel Corporation.
 * Qiaowei Ren <qiaowei.ren@intel.com>
 * Dave Hansen <dave.hansen@intel.com>
 */
#include <linux/kernel.h>
9
#include <linux/slab.h>
10
#include <linux/mm_types.h>
11 12 13
#include <linux/syscalls.h>
#include <linux/sched/sysctl.h>

14
#include <asm/insn.h>
15
#include <asm/mman.h>
16
#include <asm/mmu_context.h>
17
#include <asm/mpx.h>
18
#include <asm/processor.h>
19
#include <asm/fpu/internal.h>
20

D
Dave Hansen 已提交
21 22 23
#define CREATE_TRACE_POINTS
#include <asm/trace/mpx.h>

24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
{
	if (is_64bit_mm(mm))
		return MPX_BD_SIZE_BYTES_64;
	else
		return MPX_BD_SIZE_BYTES_32;
}

static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
{
	if (is_64bit_mm(mm))
		return MPX_BT_SIZE_BYTES_64;
	else
		return MPX_BT_SIZE_BYTES_32;
}

40 41 42 43 44 45 46
/*
 * This is really a simplified "vm_mmap". it only handles MPX
 * bounds tables (the bounds directory is user-allocated).
 */
static unsigned long mpx_mmap(unsigned long len)
{
	struct mm_struct *mm = current->mm;
47
	unsigned long addr, populate;
48

49
	/* Only bounds table can be allocated here */
50
	if (len != mpx_bt_size_bytes(mm))
51 52 53
		return -EINVAL;

	down_write(&mm->mmap_sem);
54
	addr = do_mmap(NULL, 0, len, PROT_READ | PROT_WRITE,
55
		       MAP_ANONYMOUS | MAP_PRIVATE, VM_MPX, 0, &populate, NULL);
56
	up_write(&mm->mmap_sem);
57 58 59 60
	if (populate)
		mm_populate(addr, populate);

	return addr;
61
}
62 63 64 65 66 67 68

enum reg_type {
	REG_TYPE_RM = 0,
	REG_TYPE_INDEX,
	REG_TYPE_BASE,
};

69 70
static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
			  enum reg_type type)
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
{
	int regno = 0;

	static const int regoff[] = {
		offsetof(struct pt_regs, ax),
		offsetof(struct pt_regs, cx),
		offsetof(struct pt_regs, dx),
		offsetof(struct pt_regs, bx),
		offsetof(struct pt_regs, sp),
		offsetof(struct pt_regs, bp),
		offsetof(struct pt_regs, si),
		offsetof(struct pt_regs, di),
#ifdef CONFIG_X86_64
		offsetof(struct pt_regs, r8),
		offsetof(struct pt_regs, r9),
		offsetof(struct pt_regs, r10),
		offsetof(struct pt_regs, r11),
		offsetof(struct pt_regs, r12),
		offsetof(struct pt_regs, r13),
		offsetof(struct pt_regs, r14),
		offsetof(struct pt_regs, r15),
#endif
	};
	int nr_registers = ARRAY_SIZE(regoff);
	/*
	 * Don't possibly decode a 32-bit instructions as
	 * reading a 64-bit-only register.
	 */
	if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64)
		nr_registers -= 8;

	switch (type) {
	case REG_TYPE_RM:
		regno = X86_MODRM_RM(insn->modrm.value);
105
		if (X86_REX_B(insn->rex_prefix.value))
106 107 108 109 110
			regno += 8;
		break;

	case REG_TYPE_INDEX:
		regno = X86_SIB_INDEX(insn->sib.value);
111
		if (X86_REX_X(insn->rex_prefix.value))
112 113 114 115 116
			regno += 8;
		break;

	case REG_TYPE_BASE:
		regno = X86_SIB_BASE(insn->sib.value);
117
		if (X86_REX_B(insn->rex_prefix.value))
118 119 120 121 122 123 124 125 126
			regno += 8;
		break;

	default:
		pr_err("invalid register type");
		BUG();
		break;
	}

127
	if (regno >= nr_registers) {
128 129 130 131 132 133 134 135 136 137 138 139 140
		WARN_ONCE(1, "decoded an instruction with an invalid register");
		return -EINVAL;
	}
	return regoff[regno];
}

/*
 * return the address being referenced be instruction
 * for rm=3 returning the content of the rm reg
 * for rm!=3 calculates the address using SIB and Disp
 */
static void __user *mpx_get_addr_ref(struct insn *insn, struct pt_regs *regs)
{
141 142
	unsigned long addr, base, indx;
	int addr_offset, base_offset, indx_offset;
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
	insn_byte_t sib;

	insn_get_modrm(insn);
	insn_get_sib(insn);
	sib = insn->sib.value;

	if (X86_MODRM_MOD(insn->modrm.value) == 3) {
		addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
		if (addr_offset < 0)
			goto out_err;
		addr = regs_get_register(regs, addr_offset);
	} else {
		if (insn->sib.nbytes) {
			base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE);
			if (base_offset < 0)
				goto out_err;

			indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX);
			if (indx_offset < 0)
				goto out_err;

			base = regs_get_register(regs, base_offset);
			indx = regs_get_register(regs, indx_offset);
			addr = base + indx * (1 << X86_SIB_SCALE(sib));
		} else {
			addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
			if (addr_offset < 0)
				goto out_err;
			addr = regs_get_register(regs, addr_offset);
		}
		addr += insn->displacement.value;
	}
	return (void __user *)addr;
out_err:
	return (void __user *)-1;
}

static int mpx_insn_decode(struct insn *insn,
			   struct pt_regs *regs)
{
	unsigned char buf[MAX_INSN_SIZE];
	int x86_64 = !test_thread_flag(TIF_IA32);
	int not_copied;
	int nr_copied;

	not_copied = copy_from_user(buf, (void __user *)regs->ip, sizeof(buf));
	nr_copied = sizeof(buf) - not_copied;
	/*
	 * The decoder _should_ fail nicely if we pass it a short buffer.
	 * But, let's not depend on that implementation detail.  If we
	 * did not get anything, just error out now.
	 */
	if (!nr_copied)
		return -EFAULT;
	insn_init(insn, buf, nr_copied, x86_64);
	insn_get_length(insn);
	/*
	 * copy_from_user() tries to get as many bytes as we could see in
	 * the largest possible instruction.  If the instruction we are
	 * after is shorter than that _and_ we attempt to copy from
	 * something unreadable, we might get a short read.  This is OK
	 * as long as the read did not stop in the middle of the
	 * instruction.  Check to see if we got a partial instruction.
	 */
	if (nr_copied < insn->length)
		return -EFAULT;

	insn_get_opcode(insn);
	/*
	 * We only _really_ need to decode bndcl/bndcn/bndcu
	 * Error out on anything else.
	 */
	if (insn->opcode.bytes[0] != 0x0f)
		goto bad_opcode;
	if ((insn->opcode.bytes[1] != 0x1a) &&
	    (insn->opcode.bytes[1] != 0x1b))
		goto bad_opcode;

	return 0;
bad_opcode:
	return -EINVAL;
}

/*
 * If a bounds overflow occurs then a #BR is generated. This
 * function decodes MPX instructions to get violation address
 * and set this address into extended struct siginfo.
 *
 * Note that this is not a super precise way of doing this.
 * Userspace could have, by the time we get here, written
 * anything it wants in to the instructions.  We can not
 * trust anything about it.  They might not be valid
 * instructions or might encode invalid registers, etc...
 *
 * The caller is expected to kfree() the returned siginfo_t.
 */
239
siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
240
{
241 242
	const struct mpx_bndreg_state *bndregs;
	const struct mpx_bndreg *bndreg;
243
	siginfo_t *info = NULL;
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
	struct insn insn;
	uint8_t bndregno;
	int err;

	err = mpx_insn_decode(&insn, regs);
	if (err)
		goto err_out;

	/*
	 * We know at this point that we are only dealing with
	 * MPX instructions.
	 */
	insn_get_modrm(&insn);
	bndregno = X86_MODRM_REG(insn.modrm.value);
	if (bndregno > 3) {
		err = -EINVAL;
		goto err_out;
	}
262
	/* get bndregs field from current task's xsave area */
D
Dave Hansen 已提交
263
	bndregs = get_xsave_field_ptr(XFEATURE_MASK_BNDREGS);
264 265 266 267 268
	if (!bndregs) {
		err = -EINVAL;
		goto err_out;
	}
	/* now go select the individual register in the set of 4 */
269
	bndreg = &bndregs->bndreg[bndregno];
270

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (!info) {
		err = -ENOMEM;
		goto err_out;
	}
	/*
	 * The registers are always 64-bit, but the upper 32
	 * bits are ignored in 32-bit mode.  Also, note that the
	 * upper bounds are architecturally represented in 1's
	 * complement form.
	 *
	 * The 'unsigned long' cast is because the compiler
	 * complains when casting from integers to different-size
	 * pointers.
	 */
286 287
	info->si_lower = (void __user *)(unsigned long)bndreg->lower_bound;
	info->si_upper = (void __user *)(unsigned long)~bndreg->upper_bound;
288 289 290 291 292 293 294 295 296
	info->si_addr_lsb = 0;
	info->si_signo = SIGSEGV;
	info->si_errno = 0;
	info->si_code = SEGV_BNDERR;
	info->si_addr = mpx_get_addr_ref(&insn, regs);
	/*
	 * We were not able to extract an address from the instruction,
	 * probably because there was something invalid in it.
	 */
297
	if (info->si_addr == (void __user *)-1) {
298 299 300
		err = -EINVAL;
		goto err_out;
	}
301
	trace_mpx_bounds_register_exception(info->si_addr, bndreg);
302 303
	return info;
err_out:
304 305
	/* info might be NULL, but kfree() handles that */
	kfree(info);
306 307
	return ERR_PTR(err);
}
308

309
static __user void *mpx_get_bounds_dir(void)
310
{
311
	const struct mpx_bndcsr *bndcsr;
312 313 314 315 316 317 318 319

	if (!cpu_feature_enabled(X86_FEATURE_MPX))
		return MPX_INVALID_BOUNDS_DIR;

	/*
	 * The bounds directory pointer is stored in a register
	 * only accessible if we first do an xsave.
	 */
D
Dave Hansen 已提交
320
	bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
	if (!bndcsr)
		return MPX_INVALID_BOUNDS_DIR;

	/*
	 * Make sure the register looks valid by checking the
	 * enable bit.
	 */
	if (!(bndcsr->bndcfgu & MPX_BNDCFG_ENABLE_FLAG))
		return MPX_INVALID_BOUNDS_DIR;

	/*
	 * Lastly, mask off the low bits used for configuration
	 * flags, and return the address of the bounds table.
	 */
	return (void __user *)(unsigned long)
		(bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK);
}

339
int mpx_enable_management(void)
340 341
{
	void __user *bd_base = MPX_INVALID_BOUNDS_DIR;
342
	struct mm_struct *mm = current->mm;
343 344 345 346 347 348 349 350
	int ret = 0;

	/*
	 * runtime in the userspace will be responsible for allocation of
	 * the bounds directory. Then, it will save the base of the bounds
	 * directory into XSAVE/XRSTOR Save Area and enable MPX through
	 * XRSTOR instruction.
	 *
351 352 353
	 * The copy_xregs_to_kernel() beneath get_xsave_field_ptr() is
	 * expected to be relatively expensive. Storing the bounds
	 * directory here means that we do not have to do xsave in the
354
	 * unmap path; we can just use mm->context.bd_addr instead.
355
	 */
356
	bd_base = mpx_get_bounds_dir();
357
	down_write(&mm->mmap_sem);
358 359 360 361 362 363 364 365 366

	/* MPX doesn't support addresses above 47 bits yet. */
	if (find_vma(mm, DEFAULT_MAP_WINDOW)) {
		pr_warn_once("%s (%d): MPX cannot handle addresses "
				"above 47-bits. Disabling.",
				current->comm, current->pid);
		ret = -ENXIO;
		goto out;
	}
367 368
	mm->context.bd_addr = bd_base;
	if (mm->context.bd_addr == MPX_INVALID_BOUNDS_DIR)
369
		ret = -ENXIO;
370
out:
371 372 373 374
	up_write(&mm->mmap_sem);
	return ret;
}

375
int mpx_disable_management(void)
376 377 378 379 380 381 382
{
	struct mm_struct *mm = current->mm;

	if (!cpu_feature_enabled(X86_FEATURE_MPX))
		return -ENXIO;

	down_write(&mm->mmap_sem);
383
	mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
384 385 386 387
	up_write(&mm->mmap_sem);
	return 0;
}

388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
static int mpx_cmpxchg_bd_entry(struct mm_struct *mm,
		unsigned long *curval,
		unsigned long __user *addr,
		unsigned long old_val, unsigned long new_val)
{
	int ret;
	/*
	 * user_atomic_cmpxchg_inatomic() actually uses sizeof()
	 * the pointer that we pass to it to figure out how much
	 * data to cmpxchg.  We have to be careful here not to
	 * pass a pointer to a 64-bit data type when we only want
	 * a 32-bit copy.
	 */
	if (is_64bit_mm(mm)) {
		ret = user_atomic_cmpxchg_inatomic(curval,
				addr, old_val, new_val);
	} else {
		u32 uninitialized_var(curval_32);
		u32 old_val_32 = old_val;
		u32 new_val_32 = new_val;
		u32 __user *addr_32 = (u32 __user *)addr;

		ret = user_atomic_cmpxchg_inatomic(&curval_32,
				addr_32, old_val_32, new_val_32);
		*curval = curval_32;
	}
	return ret;
}

417
/*
418 419
 * With 32-bit mode, a bounds directory is 4MB, and the size of each
 * bounds table is 16KB. With 64-bit mode, a bounds directory is 2GB,
420 421
 * and the size of each bounds table is 4MB.
 */
422
static int allocate_bt(struct mm_struct *mm, long __user *bd_entry)
423 424 425 426
{
	unsigned long expected_old_val = 0;
	unsigned long actual_old_val = 0;
	unsigned long bt_addr;
427
	unsigned long bd_new_entry;
428 429 430 431 432 433
	int ret = 0;

	/*
	 * Carve the virtual space out of userspace for the new
	 * bounds table:
	 */
434
	bt_addr = mpx_mmap(mpx_bt_size_bytes(mm));
435 436 437 438 439
	if (IS_ERR((void *)bt_addr))
		return PTR_ERR((void *)bt_addr);
	/*
	 * Set the valid flag (kinda like _PAGE_PRESENT in a pte)
	 */
440
	bd_new_entry = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
441 442 443 444 445 446 447 448 449 450 451 452

	/*
	 * Go poke the address of the new bounds table in to the
	 * bounds directory entry out in userspace memory.  Note:
	 * we may race with another CPU instantiating the same table.
	 * In that case the cmpxchg will see an unexpected
	 * 'actual_old_val'.
	 *
	 * This can fault, but that's OK because we do not hold
	 * mmap_sem at this point, unlike some of the other part
	 * of the MPX code that have to pagefault_disable().
	 */
453 454
	ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val,	bd_entry,
				   expected_old_val, bd_new_entry);
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
	if (ret)
		goto out_unmap;

	/*
	 * The user_atomic_cmpxchg_inatomic() will only return nonzero
	 * for faults, *not* if the cmpxchg itself fails.  Now we must
	 * verify that the cmpxchg itself completed successfully.
	 */
	/*
	 * We expected an empty 'expected_old_val', but instead found
	 * an apparently valid entry.  Assume we raced with another
	 * thread to instantiate this table and desclare succecss.
	 */
	if (actual_old_val & MPX_BD_ENTRY_VALID_FLAG) {
		ret = 0;
		goto out_unmap;
	}
	/*
	 * We found a non-empty bd_entry but it did not have the
	 * VALID_FLAG set.  Return an error which will result in
	 * a SEGV since this probably means that somebody scribbled
	 * some invalid data in to a bounds table.
	 */
	if (expected_old_val != actual_old_val) {
		ret = -EINVAL;
		goto out_unmap;
	}
482
	trace_mpx_new_bounds_table(bt_addr);
483 484
	return 0;
out_unmap:
485
	vm_munmap(bt_addr, mpx_bt_size_bytes(mm));
486 487 488 489 490 491 492 493 494 495 496 497 498 499
	return ret;
}

/*
 * When a BNDSTX instruction attempts to save bounds to a bounds
 * table, it will first attempt to look up the table in the
 * first-level bounds directory.  If it does not find a table in
 * the directory, a #BR is generated and we get here in order to
 * allocate a new table.
 *
 * With 32-bit mode, the size of BD is 4MB, and the size of each
 * bound table is 16KB. With 64-bit mode, the size of BD is 2GB,
 * and the size of each bound table is 4MB.
 */
500
static int do_mpx_bt_fault(void)
501 502
{
	unsigned long bd_entry, bd_base;
503
	const struct mpx_bndcsr *bndcsr;
504
	struct mm_struct *mm = current->mm;
505

D
Dave Hansen 已提交
506
	bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
	if (!bndcsr)
		return -EINVAL;
	/*
	 * Mask off the preserve and enable bits
	 */
	bd_base = bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK;
	/*
	 * The hardware provides the address of the missing or invalid
	 * entry via BNDSTATUS, so we don't have to go look it up.
	 */
	bd_entry = bndcsr->bndstatus & MPX_BNDSTA_ADDR_MASK;
	/*
	 * Make sure the directory entry is within where we think
	 * the directory is.
	 */
	if ((bd_entry < bd_base) ||
523
	    (bd_entry >= bd_base + mpx_bd_size_bytes(mm)))
524 525
		return -EINVAL;

526
	return allocate_bt(mm, (long __user *)bd_entry);
527 528
}

529
int mpx_handle_bd_fault(void)
530 531 532 533 534 535 536 537
{
	/*
	 * Userspace never asked us to manage the bounds tables,
	 * so refuse to help.
	 */
	if (!kernel_managing_mpx_tables(current->mm))
		return -EINVAL;

538
	return do_mpx_bt_fault();
539
}
540 541 542 543 544 545 546 547 548 549

/*
 * A thin wrapper around get_user_pages().  Returns 0 if the
 * fault was resolved or -errno if not.
 */
static int mpx_resolve_fault(long __user *addr, int write)
{
	long gup_ret;
	int nr_pages = 1;

550 551
	gup_ret = get_user_pages((unsigned long)addr, nr_pages,
			write ? FOLL_WRITE : 0,	NULL, NULL);
552 553 554 555 556 557 558 559 560 561 562 563 564 565
	/*
	 * get_user_pages() returns number of pages gotten.
	 * 0 means we failed to fault in and get anything,
	 * probably because 'addr' is bad.
	 */
	if (!gup_ret)
		return -EFAULT;
	/* Other error, return it */
	if (gup_ret < 0)
		return gup_ret;
	/* must have gup'd a page and gup_ret>0, success */
	return 0;
}

566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
					     unsigned long bd_entry)
{
	unsigned long bt_addr = bd_entry;
	int align_to_bytes;
	/*
	 * Bit 0 in a bt_entry is always the valid bit.
	 */
	bt_addr &= ~MPX_BD_ENTRY_VALID_FLAG;
	/*
	 * Tables are naturally aligned at 8-byte boundaries
	 * on 64-bit and 4-byte boundaries on 32-bit.  The
	 * documentation makes it appear that the low bits
	 * are ignored by the hardware, so we do the same.
	 */
	if (is_64bit_mm(mm))
		align_to_bytes = 8;
	else
		align_to_bytes = 4;
	bt_addr &= ~(align_to_bytes-1);
	return bt_addr;
}

589 590 591 592 593
/*
 * We only want to do a 4-byte get_user() on 32-bit.  Otherwise,
 * we might run off the end of the bounds table if we are on
 * a 64-bit kernel and try to get 8 bytes.
 */
594
static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
		long __user *bd_entry_ptr)
{
	u32 bd_entry_32;
	int ret;

	if (is_64bit_mm(mm))
		return get_user(*bd_entry_ret, bd_entry_ptr);

	/*
	 * Note that get_user() uses the type of the *pointer* to
	 * establish the size of the get, not the destination.
	 */
	ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr);
	*bd_entry_ret = bd_entry_32;
	return ret;
}

612 613 614 615 616
/*
 * Get the base of bounds tables pointed by specific bounds
 * directory entry.
 */
static int get_bt_addr(struct mm_struct *mm,
617 618
			long __user *bd_entry_ptr,
			unsigned long *bt_addr_result)
619 620 621
{
	int ret;
	int valid_bit;
622 623
	unsigned long bd_entry;
	unsigned long bt_addr;
624

625
	if (!access_ok(VERIFY_READ, (bd_entry_ptr), sizeof(*bd_entry_ptr)))
626 627 628 629 630 631
		return -EFAULT;

	while (1) {
		int need_write = 0;

		pagefault_disable();
632
		ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr);
633 634 635 636
		pagefault_enable();
		if (!ret)
			break;
		if (ret == -EFAULT)
637
			ret = mpx_resolve_fault(bd_entry_ptr, need_write);
638 639 640 641 642 643 644 645
		/*
		 * If we could not resolve the fault, consider it
		 * userspace's fault and error out.
		 */
		if (ret)
			return ret;
	}

646 647
	valid_bit = bd_entry & MPX_BD_ENTRY_VALID_FLAG;
	bt_addr = mpx_bd_entry_to_bt_addr(mm, bd_entry);
648 649 650 651 652 653 654 655

	/*
	 * When the kernel is managing bounds tables, a bounds directory
	 * entry will either have a valid address (plus the valid bit)
	 * *OR* be completely empty. If we see a !valid entry *and* some
	 * data in the address field, we know something is wrong. This
	 * -EINVAL return will cause a SIGSEGV.
	 */
656
	if (!valid_bit && bt_addr)
657 658 659 660 661 662 663 664 665 666
		return -EINVAL;
	/*
	 * Do we have an completely zeroed bt entry?  That is OK.  It
	 * just means there was no bounds table for this memory.  Make
	 * sure to distinguish this from -EINVAL, which will cause
	 * a SEGV.
	 */
	if (!valid_bit)
		return -ENOENT;

667
	*bt_addr_result = bt_addr;
668 669 670
	return 0;
}

671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
static inline int bt_entry_size_bytes(struct mm_struct *mm)
{
	if (is_64bit_mm(mm))
		return MPX_BT_ENTRY_BYTES_64;
	else
		return MPX_BT_ENTRY_BYTES_32;
}

/*
 * Take a virtual address and turns it in to the offset in bytes
 * inside of the bounds table where the bounds table entry
 * controlling 'addr' can be found.
 */
static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm,
		unsigned long addr)
{
	unsigned long bt_table_nr_entries;
	unsigned long offset = addr;

	if (is_64bit_mm(mm)) {
		/* Bottom 3 bits are ignored on 64-bit */
		offset >>= 3;
		bt_table_nr_entries = MPX_BT_NR_ENTRIES_64;
	} else {
		/* Bottom 2 bits are ignored on 32-bit */
		offset >>= 2;
		bt_table_nr_entries = MPX_BT_NR_ENTRIES_32;
	}
	/*
	 * We know the size of the table in to which we are
	 * indexing, and we have eliminated all the low bits
	 * which are ignored for indexing.
	 *
	 * Mask out all the high bits which we do not need
	 * to index in to the table.  Note that the tables
	 * are always powers of two so this gives us a proper
	 * mask.
	 */
	offset &= (bt_table_nr_entries-1);
	/*
	 * We now have an entry offset in terms of *entries* in
	 * the table.  We need to scale it back up to bytes.
	 */
	offset *= bt_entry_size_bytes(mm);
	return offset;
}

/*
 * How much virtual address space does a single bounds
 * directory entry cover?
 *
 * Note, we need a long long because 4GB doesn't fit in
 * to a long on 32-bit.
 */
static inline unsigned long bd_entry_virt_space(struct mm_struct *mm)
{
727 728 729 730 731
	unsigned long long virt_space;
	unsigned long long GB = (1ULL << 30);

	/*
	 * This covers 32-bit emulation as well as 32-bit kernels
732
	 * running on 64-bit hardware.
733 734 735 736 737 738
	 */
	if (!is_64bit_mm(mm))
		return (4ULL * GB) / MPX_BD_NR_ENTRIES_32;

	/*
	 * 'x86_virt_bits' returns what the hardware is capable
739
	 * of, and returns the full >32-bit address space when
740 741 742 743
	 * running 32-bit kernels on 64-bit hardware.
	 */
	virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
	return virt_space / MPX_BD_NR_ENTRIES_64;
744 745 746
}

/*
D
Dave Hansen 已提交
747 748
 * Free the backing physical pages of bounds table 'bt_addr'.
 * Assume start...end is within that bounds table.
749
 */
D
Dave Hansen 已提交
750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
		unsigned long bt_addr,
		unsigned long start_mapping, unsigned long end_mapping)
{
	struct vm_area_struct *vma;
	unsigned long addr, len;
	unsigned long start;
	unsigned long end;

	/*
	 * if we 'end' on a boundary, the offset will be 0 which
	 * is not what we want.  Back it up a byte to get the
	 * last bt entry.  Then once we have the entry itself,
	 * move 'end' back up by the table entry size.
	 */
	start = bt_addr + mpx_get_bt_entry_offset_bytes(mm, start_mapping);
	end   = bt_addr + mpx_get_bt_entry_offset_bytes(mm, end_mapping - 1);
	/*
	 * Move end back up by one entry.  Among other things
	 * this ensures that it remains page-aligned and does
	 * not screw up zap_page_range()
	 */
	end += bt_entry_size_bytes(mm);

	/*
	 * Find the first overlapping vma. If vma->vm_start > start, there
	 * will be a hole in the bounds table. This -EINVAL return will
	 * cause a SIGSEGV.
	 */
	vma = find_vma(mm, start);
	if (!vma || vma->vm_start > start)
		return -EINVAL;

	/*
	 * A NUMA policy on a VM_MPX VMA could cause this bounds table to
	 * be split. So we need to look across the entire 'start -> end'
	 * range of this bounds table, find all of the VM_MPX VMAs, and
	 * zap only those.
	 */
	addr = start;
	while (vma && vma->vm_start < end) {
		/*
		 * We followed a bounds directory entry down
		 * here.  If we find a non-MPX VMA, that's bad,
		 * so stop immediately and return an error.  This
		 * probably results in a SIGSEGV.
		 */
797
		if (!(vma->vm_flags & VM_MPX))
D
Dave Hansen 已提交
798 799 800
			return -EINVAL;

		len = min(vma->vm_end, end) - addr;
801
		zap_page_range(vma, addr, len);
D
Dave Hansen 已提交
802 803 804 805 806 807 808 809
		trace_mpx_unmap_zap(addr, addr+len);

		vma = vma->vm_next;
		addr = vma->vm_start;
	}
	return 0;
}

810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
static unsigned long mpx_get_bd_entry_offset(struct mm_struct *mm,
		unsigned long addr)
{
	/*
	 * There are several ways to derive the bd offsets.  We
	 * use the following approach here:
	 * 1. We know the size of the virtual address space
	 * 2. We know the number of entries in a bounds table
	 * 3. We know that each entry covers a fixed amount of
	 *    virtual address space.
	 * So, we can just divide the virtual address by the
	 * virtual space used by one entry to determine which
	 * entry "controls" the given virtual address.
	 */
	if (is_64bit_mm(mm)) {
		int bd_entry_size = 8; /* 64-bit pointer */
		/*
		 * Take the 64-bit addressing hole in to account.
		 */
		addr &= ((1UL << boot_cpu_data.x86_virt_bits) - 1);
		return (addr / bd_entry_virt_space(mm)) * bd_entry_size;
	} else {
		int bd_entry_size = 4; /* 32-bit pointer */
		/*
		 * 32-bit has no hole so this case needs no mask
		 */
		return (addr / bd_entry_virt_space(mm)) * bd_entry_size;
	}
	/*
	 * The two return calls above are exact copies.  If we
	 * pull out a single copy and put it in here, gcc won't
	 * realize that we're doing a power-of-2 divide and use
	 * shifts.  It uses a real divide.  If we put them up
	 * there, it manages to figure it out (gcc 4.8.3).
	 */
845 846
}

D
Dave Hansen 已提交
847 848
static int unmap_entire_bt(struct mm_struct *mm,
		long __user *bd_entry, unsigned long bt_addr)
849
{
D
Dave Hansen 已提交
850 851
	unsigned long expected_old_val = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
	unsigned long uninitialized_var(actual_old_val);
852 853
	int ret;

D
Dave Hansen 已提交
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
	while (1) {
		int need_write = 1;
		unsigned long cleared_bd_entry = 0;

		pagefault_disable();
		ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val,
				bd_entry, expected_old_val, cleared_bd_entry);
		pagefault_enable();
		if (!ret)
			break;
		if (ret == -EFAULT)
			ret = mpx_resolve_fault(bd_entry, need_write);
		/*
		 * If we could not resolve the fault, consider it
		 * userspace's fault and error out.
		 */
		if (ret)
			return ret;
	}
873
	/*
D
Dave Hansen 已提交
874
	 * The cmpxchg was performed, check the results.
875
	 */
D
Dave Hansen 已提交
876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
	if (actual_old_val != expected_old_val) {
		/*
		 * Someone else raced with us to unmap the table.
		 * That is OK, since we were both trying to do
		 * the same thing.  Declare success.
		 */
		if (!actual_old_val)
			return 0;
		/*
		 * Something messed with the bounds directory
		 * entry.  We hold mmap_sem for read or write
		 * here, so it could not be a _new_ bounds table
		 * that someone just allocated.  Something is
		 * wrong, so pass up the error and SIGSEGV.
		 */
		return -EINVAL;
	}
	/*
	 * Note, we are likely being called under do_munmap() already. To
	 * avoid recursion, do_munmap() will check whether it comes
	 * from one bounds table through VM_MPX flag.
	 */
898
	return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm), NULL);
899 900
}

D
Dave Hansen 已提交
901 902
static int try_unmap_single_bt(struct mm_struct *mm,
	       unsigned long start, unsigned long end)
903
{
D
Dave Hansen 已提交
904 905 906 907 908 909 910 911 912 913
	struct vm_area_struct *next;
	struct vm_area_struct *prev;
	/*
	 * "bta" == Bounds Table Area: the area controlled by the
	 * bounds table that we are unmapping.
	 */
	unsigned long bta_start_vaddr = start & ~(bd_entry_virt_space(mm)-1);
	unsigned long bta_end_vaddr = bta_start_vaddr + bd_entry_virt_space(mm);
	unsigned long uninitialized_var(bt_addr);
	void __user *bde_vaddr;
914
	int ret;
915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
	/*
	 * We already unlinked the VMAs from the mm's rbtree so 'start'
	 * is guaranteed to be in a hole. This gets us the first VMA
	 * before the hole in to 'prev' and the next VMA after the hole
	 * in to 'next'.
	 */
	next = find_vma_prev(mm, start, &prev);
	/*
	 * Do not count other MPX bounds table VMAs as neighbors.
	 * Although theoretically possible, we do not allow bounds
	 * tables for bounds tables so our heads do not explode.
	 * If we count them as neighbors here, we may end up with
	 * lots of tables even though we have no actual table
	 * entries in use.
	 */
930
	while (next && (next->vm_flags & VM_MPX))
931
		next = next->vm_next;
932
	while (prev && (prev->vm_flags & VM_MPX))
933
		prev = prev->vm_prev;
934
	/*
D
Dave Hansen 已提交
935 936 937 938 939
	 * We know 'start' and 'end' lie within an area controlled
	 * by a single bounds table.  See if there are any other
	 * VMAs controlled by that bounds table.  If there are not
	 * then we can "expand" the are we are unmapping to possibly
	 * cover the entire table.
940 941
	 */
	next = find_vma_prev(mm, start, &prev);
D
Dave Hansen 已提交
942 943 944 945 946 947 948 949
	if ((!prev || prev->vm_end <= bta_start_vaddr) &&
	    (!next || next->vm_start >= bta_end_vaddr)) {
		/*
		 * No neighbor VMAs controlled by same bounds
		 * table.  Try to unmap the whole thing
		 */
		start = bta_start_vaddr;
		end = bta_end_vaddr;
950 951
	}

952
	bde_vaddr = mm->context.bd_addr + mpx_get_bd_entry_offset(mm, start);
D
Dave Hansen 已提交
953
	ret = get_bt_addr(mm, bde_vaddr, &bt_addr);
954
	/*
D
Dave Hansen 已提交
955
	 * No bounds table there, so nothing to unmap.
956
	 */
D
Dave Hansen 已提交
957 958 959 960
	if (ret == -ENOENT) {
		ret = 0;
		return 0;
	}
961 962
	if (ret)
		return ret;
D
Dave Hansen 已提交
963 964 965 966 967 968 969 970 971 972
	/*
	 * We are unmapping an entire table.  Either because the
	 * unmap that started this whole process was large enough
	 * to cover an entire table, or that the unmap was small
	 * but was the area covered by a bounds table.
	 */
	if ((start == bta_start_vaddr) &&
	    (end == bta_end_vaddr))
		return unmap_entire_bt(mm, bde_vaddr, bt_addr);
	return zap_bt_entries_mapping(mm, bt_addr, start, end);
973 974 975 976 977
}

static int mpx_unmap_tables(struct mm_struct *mm,
		unsigned long start, unsigned long end)
{
D
Dave Hansen 已提交
978
	unsigned long one_unmap_start;
979
	trace_mpx_unmap_search(start, end);
980

D
Dave Hansen 已提交
981 982 983 984 985 986 987 988 989 990 991 992 993 994
	one_unmap_start = start;
	while (one_unmap_start < end) {
		int ret;
		unsigned long next_unmap_start = ALIGN(one_unmap_start+1,
						       bd_entry_virt_space(mm));
		unsigned long one_unmap_end = end;
		/*
		 * if the end is beyond the current bounds table,
		 * move it back so we only deal with a single one
		 * at a time
		 */
		if (one_unmap_end > next_unmap_start)
			one_unmap_end = next_unmap_start;
		ret = try_unmap_single_bt(mm, one_unmap_start, one_unmap_end);
995 996 997
		if (ret)
			return ret;

D
Dave Hansen 已提交
998 999
		one_unmap_start = next_unmap_start;
	}
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
	return 0;
}

/*
 * Free unused bounds tables covered in a virtual address region being
 * munmap()ed. Assume end > start.
 *
 * This function will be called by do_munmap(), and the VMAs covering
 * the virtual address region start...end have already been split if
 * necessary, and the 'vma' is the first vma in this range (start -> end).
 */
void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long start, unsigned long end)
{
	int ret;

	/*
	 * Refuse to do anything unless userspace has asked
	 * the kernel to help manage the bounds tables,
	 */
	if (!kernel_managing_mpx_tables(current->mm))
		return;
	/*
	 * This will look across the entire 'start -> end' range,
	 * and find all of the non-VM_MPX VMAs.
	 *
	 * To avoid recursion, if a VM_MPX vma is found in the range
	 * (start->end), we will not continue follow-up work. This
	 * recursion represents having bounds tables for bounds tables,
	 * which should not occur normally. Being strict about it here
	 * helps ensure that we do not have an exploitable stack overflow.
	 */
	do {
		if (vma->vm_flags & VM_MPX)
			return;
		vma = vma->vm_next;
	} while (vma && vma->vm_start < end);

	ret = mpx_unmap_tables(mm, start, end);
	if (ret)
		force_sig(SIGSEGV, current);
}
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063

/* MPX cannot handle addresses above 47 bits yet. */
unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len,
		unsigned long flags)
{
	if (!kernel_managing_mpx_tables(current->mm))
		return addr;
	if (addr + len <= DEFAULT_MAP_WINDOW)
		return addr;
	if (flags & MAP_FIXED)
		return -ENOMEM;

	/*
	 * Requested len is larger than the whole area we're allowed to map in.
	 * Resetting hinting address wouldn't do much good -- fail early.
	 */
	if (len > DEFAULT_MAP_WINDOW)
		return -ENOMEM;

	/* Look for unmap area within DEFAULT_MAP_WINDOW */
	return 0;
}