check.c 75.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7 8
/*
 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
 */

#include <string.h>
#include <stdlib.h>

9 10 11 12 13 14 15 16
#include <arch/elf.h>
#include <objtool/builtin.h>
#include <objtool/cfi.h>
#include <objtool/arch.h>
#include <objtool/check.h>
#include <objtool/special.h>
#include <objtool/warn.h>
#include <objtool/endianness.h>
17

18
#include <linux/objtool.h>
19 20
#include <linux/hashtable.h>
#include <linux/kernel.h>
21
#include <linux/static_call_types.h>
22 23 24 25

struct alternative {
	struct list_head list;
	struct instruction *insn;
P
Peter Zijlstra 已提交
26
	bool skip_orig;
27 28
};

29
struct cfi_init_state initial_func_cfi;
30

31 32
struct instruction *find_insn(struct objtool_file *file,
			      struct section *sec, unsigned long offset)
33 34 35
{
	struct instruction *insn;

36
	hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
37 38
		if (insn->sec == sec && insn->offset == offset)
			return insn;
39
	}
40 41 42 43 44 45 46 47 48

	return NULL;
}

static struct instruction *next_insn_same_sec(struct objtool_file *file,
					      struct instruction *insn)
{
	struct instruction *next = list_next_entry(insn, list);

49
	if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
50 51 52 53 54
		return NULL;

	return next;
}

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
static struct instruction *next_insn_same_func(struct objtool_file *file,
					       struct instruction *insn)
{
	struct instruction *next = list_next_entry(insn, list);
	struct symbol *func = insn->func;

	if (!func)
		return NULL;

	if (&next->list != &file->insn_list && next->func == func)
		return next;

	/* Check if we're already in the subfunction: */
	if (func == func->cfunc)
		return NULL;

	/* Move to the subfunction: */
	return find_insn(file, func->cfunc->sec, func->cfunc->offset);
}

75 76 77 78 79 80 81 82 83 84 85
static struct instruction *prev_insn_same_sym(struct objtool_file *file,
					       struct instruction *insn)
{
	struct instruction *prev = list_prev_entry(insn, list);

	if (&prev->list != &file->insn_list && prev->func == insn->func)
		return prev;

	return NULL;
}

86
#define func_for_each_insn(file, func, insn)				\
87 88 89 90
	for (insn = find_insn(file, func->sec, func->offset);		\
	     insn;							\
	     insn = next_insn_same_func(file, insn))

91 92
#define sym_for_each_insn(file, sym, insn)				\
	for (insn = find_insn(file, sym->sec, sym->offset);		\
93
	     insn && &insn->list != &file->insn_list &&			\
94 95
		insn->sec == sym->sec &&				\
		insn->offset < sym->offset + sym->len;			\
96 97
	     insn = list_next_entry(insn, list))

98
#define sym_for_each_insn_continue_reverse(file, sym, insn)		\
99 100
	for (insn = list_prev_entry(insn, list);			\
	     &insn->list != &file->insn_list &&				\
101
		insn->sec == sym->sec && insn->offset >= sym->offset;	\
102 103 104 105 106
	     insn = list_prev_entry(insn, list))

#define sec_for_each_insn_from(file, insn)				\
	for (; insn; insn = next_insn_same_sec(file, insn))

107 108 109
#define sec_for_each_insn_continue(file, insn)				\
	for (insn = next_insn_same_sec(file, insn); insn;		\
	     insn = next_insn_same_sec(file, insn))
110

111 112 113 114 115 116 117 118 119 120 121 122
static bool is_jump_table_jump(struct instruction *insn)
{
	struct alt_group *alt_group = insn->alt_group;

	if (insn->jump_table)
		return true;

	/* Retpoline alternative for a jump table? */
	return alt_group && alt_group->orig_group &&
	       alt_group->orig_group->first_insn->jump_table;
}

123 124
static bool is_sibling_call(struct instruction *insn)
{
125 126 127 128 129 130 131 132
	/*
	 * Assume only ELF functions can make sibling calls.  This ensures
	 * sibling call detection consistency between vmlinux.o and individual
	 * objects.
	 */
	if (!insn->func)
		return false;

133 134
	/* An indirect jump is either a sibling call or a jump to a table. */
	if (insn->type == INSN_JUMP_DYNAMIC)
135
		return !is_jump_table_jump(insn);
136 137

	/* add_jump_destinations() sets insn->call_dest for sibling calls. */
138
	return (is_static_jump(insn) && insn->call_dest);
139 140
}

141 142 143 144 145 146 147 148 149
/*
 * This checks to see if the given function is a "noreturn" function.
 *
 * For global functions which are outside the scope of this object file, we
 * have to keep a manual list of them.
 *
 * For local functions, we have to detect them manually by simply looking for
 * the lack of a return instruction.
 */
150 151
static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
				int recursion)
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
{
	int i;
	struct instruction *insn;
	bool empty = true;

	/*
	 * Unfortunately these have to be hard coded because the noreturn
	 * attribute isn't provided in ELF data.
	 */
	static const char * const global_noreturns[] = {
		"__stack_chk_fail",
		"panic",
		"do_exit",
		"do_task_dead",
		"__module_put_and_exit",
		"complete_and_exit",
		"__reiserfs_panic",
		"lbug_with_loc",
		"fortify_panic",
171
		"usercopy_abort",
172
		"machine_real_restart",
173
		"rewind_stack_do_exit",
174
		"kunit_try_catch_throw",
175
		"xen_start_kernel",
176 177
	};

178 179 180
	if (!func)
		return false;

181
	if (func->bind == STB_WEAK)
182
		return false;
183 184 185 186

	if (func->bind == STB_GLOBAL)
		for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
			if (!strcmp(func->name, global_noreturns[i]))
187
				return true;
188

189
	if (!func->len)
190
		return false;
191

192 193
	insn = find_insn(file, func->sec, func->offset);
	if (!insn->func)
194
		return false;
195

196
	func_for_each_insn(file, func, insn) {
197 198 199
		empty = false;

		if (insn->type == INSN_RETURN)
200
			return false;
201 202 203
	}

	if (empty)
204
		return false;
205 206 207 208 209 210

	/*
	 * A function can have a sibling call instead of a return.  In that
	 * case, the function's dead-end status depends on whether the target
	 * of the sibling call returns.
	 */
211
	func_for_each_insn(file, func, insn) {
212
		if (is_sibling_call(insn)) {
213 214 215 216
			struct instruction *dest = insn->jump_dest;

			if (!dest)
				/* sibling call to another file */
217
				return false;
218

219 220 221 222 223 224 225 226
			/* local sibling call */
			if (recursion == 5) {
				/*
				 * Infinite recursion: two functions have
				 * sibling calls to each other.  This is a very
				 * rare case.  It means they aren't dead ends.
				 */
				return false;
227 228
			}

229 230
			return __dead_end_function(file, dest->func, recursion+1);
		}
231 232
	}

233
	return true;
234 235
}

236
static bool dead_end_function(struct objtool_file *file, struct symbol *func)
237 238 239 240
{
	return __dead_end_function(file, func, 0);
}

241
static void init_cfi_state(struct cfi_state *cfi)
242 243 244
{
	int i;

245
	for (i = 0; i < CFI_NUM_REGS; i++) {
246 247
		cfi->regs[i].base = CFI_UNDEFINED;
		cfi->vals[i].base = CFI_UNDEFINED;
248
	}
249 250 251 252 253
	cfi->cfa.base = CFI_UNDEFINED;
	cfi->drap_reg = CFI_UNDEFINED;
	cfi->drap_offset = -1;
}

254
static void init_insn_state(struct insn_state *state, struct section *sec)
255 256 257
{
	memset(state, 0, sizeof(*state));
	init_cfi_state(&state->cfi);
258 259 260 261 262 263

	/*
	 * We need the full vmlinux for noinstr validation, otherwise we can
	 * not correctly determine insn->call_dest->sec (external symbols do
	 * not have a section).
	 */
264
	if (vmlinux && noinstr && sec)
265
		state->noinstr = sec->noinstr;
266 267
}

268 269 270 271 272 273 274 275 276 277
/*
 * Call the arch-specific instruction decoder for all the instructions and add
 * them to the global instruction list.
 */
static int decode_instructions(struct objtool_file *file)
{
	struct section *sec;
	struct symbol *func;
	unsigned long offset;
	struct instruction *insn;
P
Peter Zijlstra 已提交
278
	unsigned long nr_insns = 0;
279 280
	int ret;

281
	for_each_sec(file, sec) {
282 283 284 285

		if (!(sec->sh.sh_flags & SHF_EXECINSTR))
			continue;

286 287 288 289 290
		if (strcmp(sec->name, ".altinstr_replacement") &&
		    strcmp(sec->name, ".altinstr_aux") &&
		    strncmp(sec->name, ".discard.", 9))
			sec->text = true;

291 292
		if (!strcmp(sec->name, ".noinstr.text") ||
		    !strcmp(sec->name, ".entry.text"))
293 294
			sec->noinstr = true;

295 296
		for (offset = 0; offset < sec->len; offset += insn->len) {
			insn = malloc(sizeof(*insn));
297 298 299 300
			if (!insn) {
				WARN("malloc failed");
				return -1;
			}
301 302
			memset(insn, 0, sizeof(*insn));
			INIT_LIST_HEAD(&insn->alts);
303
			INIT_LIST_HEAD(&insn->stack_ops);
304
			init_cfi_state(&insn->cfi);
305

306 307 308 309 310 311
			insn->sec = sec;
			insn->offset = offset;

			ret = arch_decode_instruction(file->elf, sec, offset,
						      sec->len - offset,
						      &insn->len, &insn->type,
312
						      &insn->immediate,
313
						      &insn->stack_ops);
314
			if (ret)
315
				goto err;
316

317
			hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
318
			list_add_tail(&insn->list, &file->insn_list);
P
Peter Zijlstra 已提交
319
			nr_insns++;
320 321 322
		}

		list_for_each_entry(func, &sec->symbol_list, list) {
323
			if (func->type != STT_FUNC || func->alias != func)
324 325 326 327 328 329 330 331
				continue;

			if (!find_insn(file, sec, func->offset)) {
				WARN("%s(): can't find starting instruction",
				     func->name);
				return -1;
			}

332
			sym_for_each_insn(file, func, insn)
333
				insn->func = func;
334 335 336
		}
	}

P
Peter Zijlstra 已提交
337 338 339
	if (stats)
		printf("nr_insns: %lu\n", nr_insns);

340
	return 0;
341 342 343 344

err:
	free(insn);
	return ret;
345 346
}

347 348 349 350 351 352 353 354 355 356 357 358 359
static struct instruction *find_last_insn(struct objtool_file *file,
					  struct section *sec)
{
	struct instruction *insn = NULL;
	unsigned int offset;
	unsigned int end = (sec->len > 10) ? sec->len - 10 : 0;

	for (offset = sec->len - 1; offset >= end && !insn; offset--)
		insn = find_insn(file, sec, offset);

	return insn;
}

360
/*
361
 * Mark "ud2" instructions and manually annotated dead ends.
362 363 364 365
 */
static int add_dead_ends(struct objtool_file *file)
{
	struct section *sec;
M
Matt Helsley 已提交
366
	struct reloc *reloc;
367 368
	struct instruction *insn;

369 370 371 372 373 374 375 376 377 378 379
	/*
	 * By default, "ud2" is a dead end unless otherwise annotated, because
	 * GCC 7 inserts it for certain divide-by-zero cases.
	 */
	for_each_insn(file, insn)
		if (insn->type == INSN_BUG)
			insn->dead_end = true;

	/*
	 * Check for manually annotated dead ends.
	 */
380 381
	sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
	if (!sec)
382
		goto reachable;
383

M
Matt Helsley 已提交
384 385
	list_for_each_entry(reloc, &sec->reloc_list, list) {
		if (reloc->sym->type != STT_SECTION) {
386 387 388
			WARN("unexpected relocation symbol type in %s", sec->name);
			return -1;
		}
M
Matt Helsley 已提交
389
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
390 391
		if (insn)
			insn = list_prev_entry(insn, list);
M
Matt Helsley 已提交
392 393
		else if (reloc->addend == reloc->sym->sec->len) {
			insn = find_last_insn(file, reloc->sym->sec);
394
			if (!insn) {
395
				WARN("can't find unreachable insn at %s+0x%x",
M
Matt Helsley 已提交
396
				     reloc->sym->sec->name, reloc->addend);
397 398 399 400
				return -1;
			}
		} else {
			WARN("can't find unreachable insn at %s+0x%x",
M
Matt Helsley 已提交
401
			     reloc->sym->sec->name, reloc->addend);
402 403 404 405 406 407
			return -1;
		}

		insn->dead_end = true;
	}

408 409 410 411 412 413 414 415 416 417 418
reachable:
	/*
	 * These manually annotated reachable checks are needed for GCC 4.4,
	 * where the Linux unreachable() macro isn't supported.  In that case
	 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
	 * not a dead end.
	 */
	sec = find_section_by_name(file->elf, ".rela.discard.reachable");
	if (!sec)
		return 0;

M
Matt Helsley 已提交
419 420
	list_for_each_entry(reloc, &sec->reloc_list, list) {
		if (reloc->sym->type != STT_SECTION) {
421 422 423
			WARN("unexpected relocation symbol type in %s", sec->name);
			return -1;
		}
M
Matt Helsley 已提交
424
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
425 426
		if (insn)
			insn = list_prev_entry(insn, list);
M
Matt Helsley 已提交
427 428
		else if (reloc->addend == reloc->sym->sec->len) {
			insn = find_last_insn(file, reloc->sym->sec);
429
			if (!insn) {
430
				WARN("can't find reachable insn at %s+0x%x",
M
Matt Helsley 已提交
431
				     reloc->sym->sec->name, reloc->addend);
432 433 434 435
				return -1;
			}
		} else {
			WARN("can't find reachable insn at %s+0x%x",
M
Matt Helsley 已提交
436
			     reloc->sym->sec->name, reloc->addend);
437 438 439 440 441 442
			return -1;
		}

		insn->dead_end = false;
	}

443 444 445
	return 0;
}

446 447
static int create_static_call_sections(struct objtool_file *file)
{
448
	struct section *sec;
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
	struct static_call_site *site;
	struct instruction *insn;
	struct symbol *key_sym;
	char *key_name, *tmp;
	int idx;

	sec = find_section_by_name(file->elf, ".static_call_sites");
	if (sec) {
		INIT_LIST_HEAD(&file->static_call_list);
		WARN("file already has .static_call_sites section, skipping");
		return 0;
	}

	if (list_empty(&file->static_call_list))
		return 0;

	idx = 0;
466
	list_for_each_entry(insn, &file->static_call_list, call_node)
467 468 469 470 471 472 473 474
		idx++;

	sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
				 sizeof(struct static_call_site), idx);
	if (!sec)
		return -1;

	idx = 0;
475
	list_for_each_entry(insn, &file->static_call_list, call_node) {
476 477 478 479 480

		site = (struct static_call_site *)sec->data->d_buf + idx;
		memset(site, 0, sizeof(struct static_call_site));

		/* populate reloc for 'addr' */
481 482 483 484
		if (elf_add_reloc_to_insn(file->elf, sec,
					  idx * sizeof(struct static_call_site),
					  R_X86_64_PC32,
					  insn->sec, insn->offset))
485
			return -1;
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502

		/* find key symbol */
		key_name = strdup(insn->call_dest->name);
		if (!key_name) {
			perror("strdup");
			return -1;
		}
		if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
			    STATIC_CALL_TRAMP_PREFIX_LEN)) {
			WARN("static_call: trampoline name malformed: %s", key_name);
			return -1;
		}
		tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
		memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);

		key_sym = find_symbol_by_name(file->elf, tmp);
		if (!key_sym) {
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
			if (!module) {
				WARN("static_call: can't find static_call_key symbol: %s", tmp);
				return -1;
			}

			/*
			 * For modules(), the key might not be exported, which
			 * means the module can make static calls but isn't
			 * allowed to change them.
			 *
			 * In that case we temporarily set the key to be the
			 * trampoline address.  This is fixed up in
			 * static_call_add_module().
			 */
			key_sym = insn->call_dest;
518 519 520 521
		}
		free(key_name);

		/* populate reloc for 'key' */
522 523 524 525
		if (elf_add_reloc(file->elf, sec,
				  idx * sizeof(struct static_call_site) + 4,
				  R_X86_64_PC32, key_sym,
				  is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
526 527 528 529 530 531 532 533
			return -1;

		idx++;
	}

	return 0;
}

534 535
static int create_mcount_loc_sections(struct objtool_file *file)
{
536
	struct section *sec;
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
	unsigned long *loc;
	struct instruction *insn;
	int idx;

	sec = find_section_by_name(file->elf, "__mcount_loc");
	if (sec) {
		INIT_LIST_HEAD(&file->mcount_loc_list);
		WARN("file already has __mcount_loc section, skipping");
		return 0;
	}

	if (list_empty(&file->mcount_loc_list))
		return 0;

	idx = 0;
	list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node)
		idx++;

	sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx);
	if (!sec)
		return -1;

	idx = 0;
	list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node) {

		loc = (unsigned long *)sec->data->d_buf + idx;
		memset(loc, 0, sizeof(unsigned long));

565 566 567 568
		if (elf_add_reloc_to_insn(file->elf, sec,
					  idx * sizeof(unsigned long),
					  R_X86_64_64,
					  insn->sec, insn->offset))
569 570 571 572 573 574 575 576
			return -1;

		idx++;
	}

	return 0;
}

577 578 579 580 581 582 583 584
/*
 * Warnings shouldn't be reported for ignored functions.
 */
static void add_ignores(struct objtool_file *file)
{
	struct instruction *insn;
	struct section *sec;
	struct symbol *func;
M
Matt Helsley 已提交
585
	struct reloc *reloc;
586

P
Peter Zijlstra 已提交
587 588 589
	sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
	if (!sec)
		return;
590

M
Matt Helsley 已提交
591 592
	list_for_each_entry(reloc, &sec->reloc_list, list) {
		switch (reloc->sym->type) {
P
Peter Zijlstra 已提交
593
		case STT_FUNC:
M
Matt Helsley 已提交
594
			func = reloc->sym;
P
Peter Zijlstra 已提交
595 596 597
			break;

		case STT_SECTION:
M
Matt Helsley 已提交
598
			func = find_func_by_offset(reloc->sym->sec, reloc->addend);
599
			if (!func)
600
				continue;
P
Peter Zijlstra 已提交
601
			break;
602

P
Peter Zijlstra 已提交
603
		default:
M
Matt Helsley 已提交
604
			WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
P
Peter Zijlstra 已提交
605
			continue;
606
		}
P
Peter Zijlstra 已提交
607

608
		func_for_each_insn(file, func, insn)
P
Peter Zijlstra 已提交
609
			insn->ignore = true;
610 611 612
	}
}

P
Peter Zijlstra 已提交
613 614 615 616 617 618 619 620 621 622
/*
 * This is a whitelist of functions that is allowed to be called with AC set.
 * The list is meant to be minimal and only contains compiler instrumentation
 * ABI and a few functions used to implement *_{to,from}_user() functions.
 *
 * These functions must not directly change AC, but may PUSHF/POPF.
 */
static const char *uaccess_safe_builtin[] = {
	/* KASAN */
	"kasan_report",
623
	"kasan_check_range",
P
Peter Zijlstra 已提交
624 625 626 627 628 629 630 631 632 633 634 635 636
	/* KASAN out-of-line */
	"__asan_loadN_noabort",
	"__asan_load1_noabort",
	"__asan_load2_noabort",
	"__asan_load4_noabort",
	"__asan_load8_noabort",
	"__asan_load16_noabort",
	"__asan_storeN_noabort",
	"__asan_store1_noabort",
	"__asan_store2_noabort",
	"__asan_store4_noabort",
	"__asan_store8_noabort",
	"__asan_store16_noabort",
637 638
	"__kasan_check_read",
	"__kasan_check_write",
P
Peter Zijlstra 已提交
639 640 641 642 643 644 645 646 647 648 649 650 651
	/* KASAN in-line */
	"__asan_report_load_n_noabort",
	"__asan_report_load1_noabort",
	"__asan_report_load2_noabort",
	"__asan_report_load4_noabort",
	"__asan_report_load8_noabort",
	"__asan_report_load16_noabort",
	"__asan_report_store_n_noabort",
	"__asan_report_store1_noabort",
	"__asan_report_store2_noabort",
	"__asan_report_store4_noabort",
	"__asan_report_store8_noabort",
	"__asan_report_store16_noabort",
652
	/* KCSAN */
653
	"__kcsan_check_access",
654 655
	"kcsan_found_watchpoint",
	"kcsan_setup_watchpoint",
656
	"kcsan_check_scoped_accesses",
657 658
	"kcsan_disable_current",
	"kcsan_enable_current_nowarn",
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
	/* KCSAN/TSAN */
	"__tsan_func_entry",
	"__tsan_func_exit",
	"__tsan_read_range",
	"__tsan_write_range",
	"__tsan_read1",
	"__tsan_read2",
	"__tsan_read4",
	"__tsan_read8",
	"__tsan_read16",
	"__tsan_write1",
	"__tsan_write2",
	"__tsan_write4",
	"__tsan_write8",
	"__tsan_write16",
674 675 676 677 678
	"__tsan_read_write1",
	"__tsan_read_write2",
	"__tsan_read_write4",
	"__tsan_read_write8",
	"__tsan_read_write16",
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
	"__tsan_atomic8_load",
	"__tsan_atomic16_load",
	"__tsan_atomic32_load",
	"__tsan_atomic64_load",
	"__tsan_atomic8_store",
	"__tsan_atomic16_store",
	"__tsan_atomic32_store",
	"__tsan_atomic64_store",
	"__tsan_atomic8_exchange",
	"__tsan_atomic16_exchange",
	"__tsan_atomic32_exchange",
	"__tsan_atomic64_exchange",
	"__tsan_atomic8_fetch_add",
	"__tsan_atomic16_fetch_add",
	"__tsan_atomic32_fetch_add",
	"__tsan_atomic64_fetch_add",
	"__tsan_atomic8_fetch_sub",
	"__tsan_atomic16_fetch_sub",
	"__tsan_atomic32_fetch_sub",
	"__tsan_atomic64_fetch_sub",
	"__tsan_atomic8_fetch_and",
	"__tsan_atomic16_fetch_and",
	"__tsan_atomic32_fetch_and",
	"__tsan_atomic64_fetch_and",
	"__tsan_atomic8_fetch_or",
	"__tsan_atomic16_fetch_or",
	"__tsan_atomic32_fetch_or",
	"__tsan_atomic64_fetch_or",
	"__tsan_atomic8_fetch_xor",
	"__tsan_atomic16_fetch_xor",
	"__tsan_atomic32_fetch_xor",
	"__tsan_atomic64_fetch_xor",
	"__tsan_atomic8_fetch_nand",
	"__tsan_atomic16_fetch_nand",
	"__tsan_atomic32_fetch_nand",
	"__tsan_atomic64_fetch_nand",
	"__tsan_atomic8_compare_exchange_strong",
	"__tsan_atomic16_compare_exchange_strong",
	"__tsan_atomic32_compare_exchange_strong",
	"__tsan_atomic64_compare_exchange_strong",
	"__tsan_atomic8_compare_exchange_weak",
	"__tsan_atomic16_compare_exchange_weak",
	"__tsan_atomic32_compare_exchange_weak",
	"__tsan_atomic64_compare_exchange_weak",
	"__tsan_atomic8_compare_exchange_val",
	"__tsan_atomic16_compare_exchange_val",
	"__tsan_atomic32_compare_exchange_val",
	"__tsan_atomic64_compare_exchange_val",
	"__tsan_atomic_thread_fence",
	"__tsan_atomic_signal_fence",
P
Peter Zijlstra 已提交
729 730
	/* KCOV */
	"write_comp_data",
731
	"check_kcov_mode",
P
Peter Zijlstra 已提交
732 733 734 735 736 737 738 739 740
	"__sanitizer_cov_trace_pc",
	"__sanitizer_cov_trace_const_cmp1",
	"__sanitizer_cov_trace_const_cmp2",
	"__sanitizer_cov_trace_const_cmp4",
	"__sanitizer_cov_trace_const_cmp8",
	"__sanitizer_cov_trace_cmp1",
	"__sanitizer_cov_trace_cmp2",
	"__sanitizer_cov_trace_cmp4",
	"__sanitizer_cov_trace_cmp8",
741
	"__sanitizer_cov_trace_switch",
P
Peter Zijlstra 已提交
742 743 744 745
	/* UBSAN */
	"ubsan_type_mismatch_common",
	"__ubsan_handle_type_mismatch",
	"__ubsan_handle_type_mismatch_v1",
746
	"__ubsan_handle_shift_out_of_bounds",
P
Peter Zijlstra 已提交
747 748
	/* misc */
	"csum_partial_copy_generic",
749 750
	"copy_mc_fragile",
	"copy_mc_fragile_handle_tail",
751
	"copy_mc_enhanced_fast_string",
P
Peter Zijlstra 已提交
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768
	"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
	NULL
};

static void add_uaccess_safe(struct objtool_file *file)
{
	struct symbol *func;
	const char **name;

	if (!uaccess)
		return;

	for (name = uaccess_safe_builtin; *name; name++) {
		func = find_symbol_by_name(file->elf, *name);
		if (!func)
			continue;

769
		func->uaccess_safe = true;
770 771 772
	}
}

773 774 775 776 777 778
/*
 * FIXME: For now, just ignore any alternatives which add retpolines.  This is
 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
 * But it at least allows objtool to understand the control flow *around* the
 * retpoline.
 */
779
static int add_ignore_alternatives(struct objtool_file *file)
780 781
{
	struct section *sec;
M
Matt Helsley 已提交
782
	struct reloc *reloc;
783 784
	struct instruction *insn;

785
	sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
786 787 788
	if (!sec)
		return 0;

M
Matt Helsley 已提交
789 790
	list_for_each_entry(reloc, &sec->reloc_list, list) {
		if (reloc->sym->type != STT_SECTION) {
791 792 793 794
			WARN("unexpected relocation symbol type in %s", sec->name);
			return -1;
		}

M
Matt Helsley 已提交
795
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
796
		if (!insn) {
797
			WARN("bad .discard.ignore_alts entry");
798 799 800 801 802 803 804 805 806
			return -1;
		}

		insn->ignore_alts = true;
	}

	return 0;
}

807 808 809 810 811
__weak bool arch_is_retpoline(struct symbol *sym)
{
	return false;
}

812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
#define NEGATIVE_RELOC	((void *)-1L)

static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
{
	if (insn->reloc == NEGATIVE_RELOC)
		return NULL;

	if (!insn->reloc) {
		insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
						       insn->offset, insn->len);
		if (!insn->reloc) {
			insn->reloc = NEGATIVE_RELOC;
			return NULL;
		}
	}

	return insn->reloc;
}

831 832 833 834 835 836
/*
 * Find the destination instructions for all jumps.
 */
static int add_jump_destinations(struct objtool_file *file)
{
	struct instruction *insn;
M
Matt Helsley 已提交
837
	struct reloc *reloc;
838 839 840 841
	struct section *dest_sec;
	unsigned long dest_off;

	for_each_insn(file, insn) {
842
		if (!is_static_jump(insn))
843 844
			continue;

845
		reloc = insn_reloc(file, insn);
M
Matt Helsley 已提交
846
		if (!reloc) {
847
			dest_sec = insn->sec;
848
			dest_off = arch_jump_destination(insn);
M
Matt Helsley 已提交
849 850 851
		} else if (reloc->sym->type == STT_SECTION) {
			dest_sec = reloc->sym->sec;
			dest_off = arch_dest_reloc_offset(reloc->addend);
852
		} else if (arch_is_retpoline(reloc->sym)) {
853 854 855 856
			/*
			 * Retpoline jumps are really dynamic jumps in
			 * disguise, so convert them accordingly.
			 */
857 858 859 860 861
			if (insn->type == INSN_JUMP_UNCONDITIONAL)
				insn->type = INSN_JUMP_DYNAMIC;
			else
				insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;

862 863 864
			list_add_tail(&insn->call_node,
				      &file->retpoline_call_list);

865
			insn->retpoline_safe = true;
866
			continue;
867 868
		} else if (insn->func) {
			/* internal or external sibling call (with reloc) */
M
Matt Helsley 已提交
869
			insn->call_dest = reloc->sym;
P
Peter Zijlstra 已提交
870
			if (insn->call_dest->static_call_tramp) {
871
				list_add_tail(&insn->call_node,
P
Peter Zijlstra 已提交
872 873
					      &file->static_call_list);
			}
874
			continue;
875 876 877 878 879 880 881
		} else if (reloc->sym->sec->idx) {
			dest_sec = reloc->sym->sec;
			dest_off = reloc->sym->sym.st_value +
				   arch_dest_reloc_offset(reloc->addend);
		} else {
			/* non-func asm code jumping to another file */
			continue;
882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
		}

		insn->jump_dest = find_insn(file, dest_sec, dest_off);
		if (!insn->jump_dest) {

			/*
			 * This is a special case where an alt instruction
			 * jumps past the end of the section.  These are
			 * handled later in handle_group_alt().
			 */
			if (!strcmp(insn->sec->name, ".altinstr_replacement"))
				continue;

			WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
				  insn->sec, insn->offset, dest_sec->name,
				  dest_off);
			return -1;
		}
900 901

		/*
902
		 * Cross-function jump.
903 904
		 */
		if (insn->func && insn->jump_dest->func &&
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919
		    insn->func != insn->jump_dest->func) {

			/*
			 * For GCC 8+, create parent/child links for any cold
			 * subfunctions.  This is _mostly_ redundant with a
			 * similar initialization in read_symbols().
			 *
			 * If a function has aliases, we want the *first* such
			 * function in the symbol table to be the subfunction's
			 * parent.  In that case we overwrite the
			 * initialization done in read_symbols().
			 *
			 * However this code can't completely replace the
			 * read_symbols() code because this doesn't detect the
			 * case where the parent function's only reference to a
920
			 * subfunction is through a jump table.
921
			 */
922 923
			if (!strstr(insn->func->name, ".cold") &&
			    strstr(insn->jump_dest->func->name, ".cold")) {
924 925 926 927 928 929
				insn->func->cfunc = insn->jump_dest->func;
				insn->jump_dest->func->pfunc = insn->func;

			} else if (insn->jump_dest->func->pfunc != insn->func->pfunc &&
				   insn->jump_dest->offset == insn->jump_dest->func->offset) {

930
				/* internal sibling call (without reloc) */
931
				insn->call_dest = insn->jump_dest->func;
P
Peter Zijlstra 已提交
932
				if (insn->call_dest->static_call_tramp) {
933
					list_add_tail(&insn->call_node,
P
Peter Zijlstra 已提交
934 935
						      &file->static_call_list);
				}
936
			}
937
		}
938 939 940 941 942
	}

	return 0;
}

943 944 945 946 947 948 949 950 951 952
static void remove_insn_ops(struct instruction *insn)
{
	struct stack_op *op, *tmp;

	list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
		list_del(&op->list);
		free(op);
	}
}

953 954 955 956 957 958 959 960 961 962 963
static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
{
	struct symbol *call_dest;

	call_dest = find_func_by_offset(sec, offset);
	if (!call_dest)
		call_dest = find_symbol_by_offset(sec, offset);

	return call_dest;
}

964 965 966 967 968 969 970
/*
 * Find the destination instructions for all calls.
 */
static int add_call_destinations(struct objtool_file *file)
{
	struct instruction *insn;
	unsigned long dest_off;
M
Matt Helsley 已提交
971
	struct reloc *reloc;
972 973 974 975 976

	for_each_insn(file, insn) {
		if (insn->type != INSN_CALL)
			continue;

977
		reloc = insn_reloc(file, insn);
M
Matt Helsley 已提交
978
		if (!reloc) {
979
			dest_off = arch_jump_destination(insn);
980
			insn->call_dest = find_call_destination(insn->sec, dest_off);
981

982 983 984 985
			if (insn->ignore)
				continue;

			if (!insn->call_dest) {
986
				WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
987 988
				return -1;
			}
989

990 991 992 993 994 995
			if (insn->func && insn->call_dest->type != STT_FUNC) {
				WARN_FUNC("unsupported call to non-function",
					  insn->sec, insn->offset);
				return -1;
			}

M
Matt Helsley 已提交
996 997
		} else if (reloc->sym->type == STT_SECTION) {
			dest_off = arch_dest_reloc_offset(reloc->addend);
998 999
			insn->call_dest = find_call_destination(reloc->sym->sec,
								dest_off);
1000
			if (!insn->call_dest) {
1001
				WARN_FUNC("can't find call dest symbol at %s+0x%lx",
1002
					  insn->sec, insn->offset,
M
Matt Helsley 已提交
1003
					  reloc->sym->sec->name,
1004
					  dest_off);
1005 1006
				return -1;
			}
1007

1008
		} else if (arch_is_retpoline(reloc->sym)) {
1009 1010 1011 1012 1013 1014 1015
			/*
			 * Retpoline calls are really dynamic calls in
			 * disguise, so convert them accordingly.
			 */
			insn->type = INSN_CALL_DYNAMIC;
			insn->retpoline_safe = true;

1016 1017 1018
			list_add_tail(&insn->call_node,
				      &file->retpoline_call_list);

1019 1020 1021
			remove_insn_ops(insn);
			continue;

1022
		} else
M
Matt Helsley 已提交
1023
			insn->call_dest = reloc->sym;
1024

1025
		if (insn->call_dest && insn->call_dest->static_call_tramp) {
1026
			list_add_tail(&insn->call_node,
1027 1028 1029
				      &file->static_call_list);
		}

P
Peter Zijlstra 已提交
1030 1031 1032 1033 1034 1035 1036
		/*
		 * Many compilers cannot disable KCOV with a function attribute
		 * so they need a little help, NOP out any KCOV calls from noinstr
		 * text.
		 */
		if (insn->sec->noinstr &&
		    !strncmp(insn->call_dest->name, "__sanitizer_cov_", 16)) {
1037 1038 1039
			if (reloc) {
				reloc->type = R_NONE;
				elf_write_reloc(file->elf, reloc);
P
Peter Zijlstra 已提交
1040 1041 1042 1043 1044 1045 1046 1047
			}

			elf_write_insn(file->elf, insn->sec,
				       insn->offset, insn->len,
				       arch_nop_insn(insn->len));
			insn->type = INSN_NOP;
		}

1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
		if (mcount && !strcmp(insn->call_dest->name, "__fentry__")) {
			if (reloc) {
				reloc->type = R_NONE;
				elf_write_reloc(file->elf, reloc);
			}

			elf_write_insn(file->elf, insn->sec,
				       insn->offset, insn->len,
				       arch_nop_insn(insn->len));

			insn->type = INSN_NOP;

			list_add_tail(&insn->mcount_loc_node,
				      &file->mcount_loc_list);
		}

1064 1065 1066 1067 1068 1069 1070 1071
		/*
		 * Whatever stack impact regular CALLs have, should be undone
		 * by the RETURN of the called function.
		 *
		 * Annotated intra-function calls retain the stack_ops but
		 * are converted to JUMP, see read_intra_function_calls().
		 */
		remove_insn_ops(insn);
1072 1073 1074 1075 1076 1077
	}

	return 0;
}

/*
1078 1079
 * The .alternatives section requires some extra special care over and above
 * other special sections because alternatives are patched in place.
1080 1081 1082 1083 1084 1085
 */
static int handle_group_alt(struct objtool_file *file,
			    struct special_alt *special_alt,
			    struct instruction *orig_insn,
			    struct instruction **new_insn)
{
1086
	struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
J
Josh Poimboeuf 已提交
1087
	struct alt_group *orig_alt_group, *new_alt_group;
1088 1089
	unsigned long dest_off;

J
Josh Poimboeuf 已提交
1090 1091 1092 1093 1094 1095

	orig_alt_group = malloc(sizeof(*orig_alt_group));
	if (!orig_alt_group) {
		WARN("malloc failed");
		return -1;
	}
1096 1097 1098 1099 1100 1101 1102
	orig_alt_group->cfi = calloc(special_alt->orig_len,
				     sizeof(struct cfi_state *));
	if (!orig_alt_group->cfi) {
		WARN("calloc failed");
		return -1;
	}

1103 1104 1105 1106 1107 1108
	last_orig_insn = NULL;
	insn = orig_insn;
	sec_for_each_insn_from(file, insn) {
		if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
			break;

J
Josh Poimboeuf 已提交
1109
		insn->alt_group = orig_alt_group;
1110 1111
		last_orig_insn = insn;
	}
J
Josh Poimboeuf 已提交
1112 1113 1114
	orig_alt_group->orig_group = NULL;
	orig_alt_group->first_insn = orig_insn;
	orig_alt_group->last_insn = last_orig_insn;
1115

1116

1117 1118 1119 1120
	new_alt_group = malloc(sizeof(*new_alt_group));
	if (!new_alt_group) {
		WARN("malloc failed");
		return -1;
1121 1122
	}

1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
	if (special_alt->new_len < special_alt->orig_len) {
		/*
		 * Insert a fake nop at the end to make the replacement
		 * alt_group the same size as the original.  This is needed to
		 * allow propagate_alt_cfi() to do its magic.  When the last
		 * instruction affects the stack, the instruction after it (the
		 * nop) will propagate the new state to the shared CFI array.
		 */
		nop = malloc(sizeof(*nop));
		if (!nop) {
			WARN("malloc failed");
1134 1135
			return -1;
		}
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
		memset(nop, 0, sizeof(*nop));
		INIT_LIST_HEAD(&nop->alts);
		INIT_LIST_HEAD(&nop->stack_ops);
		init_cfi_state(&nop->cfi);

		nop->sec = special_alt->new_sec;
		nop->offset = special_alt->new_off + special_alt->new_len;
		nop->len = special_alt->orig_len - special_alt->new_len;
		nop->type = INSN_NOP;
		nop->func = orig_insn->func;
		nop->alt_group = new_alt_group;
		nop->ignore = orig_insn->ignore_alts;
1148
	}
1149

1150 1151 1152
	if (!special_alt->new_len) {
		*new_insn = nop;
		goto end;
1153 1154 1155 1156
	}

	insn = *new_insn;
	sec_for_each_insn_from(file, insn) {
1157 1158
		struct reloc *alt_reloc;

1159 1160 1161 1162 1163
		if (insn->offset >= special_alt->new_off + special_alt->new_len)
			break;

		last_new_insn = insn;

1164
		insn->ignore = orig_insn->ignore_alts;
1165
		insn->func = orig_insn->func;
J
Josh Poimboeuf 已提交
1166
		insn->alt_group = new_alt_group;
1167

1168 1169 1170 1171 1172 1173 1174 1175
		/*
		 * Since alternative replacement code is copy/pasted by the
		 * kernel after applying relocations, generally such code can't
		 * have relative-address relocation references to outside the
		 * .altinstr_replacement section, unless the arch's
		 * alternatives code can adjust the relative offsets
		 * accordingly.
		 */
1176
		alt_reloc = insn_reloc(file, insn);
1177 1178
		if (alt_reloc &&
		    !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1179 1180 1181 1182 1183 1184

			WARN_FUNC("unsupported relocation in alternatives section",
				  insn->sec, insn->offset);
			return -1;
		}

1185
		if (!is_static_jump(insn))
1186 1187 1188 1189 1190
			continue;

		if (!insn->immediate)
			continue;

1191
		dest_off = arch_jump_destination(insn);
1192 1193
		if (dest_off == special_alt->new_off + special_alt->new_len)
			insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207

		if (!insn->jump_dest) {
			WARN_FUNC("can't find alternative jump destination",
				  insn->sec, insn->offset);
			return -1;
		}
	}

	if (!last_new_insn) {
		WARN_FUNC("can't find last new alternative instruction",
			  special_alt->new_sec, special_alt->new_off);
		return -1;
	}

1208 1209 1210
	if (nop)
		list_add(&nop->list, &last_new_insn->list);
end:
J
Josh Poimboeuf 已提交
1211 1212
	new_alt_group->orig_group = orig_alt_group;
	new_alt_group->first_insn = *new_insn;
1213 1214
	new_alt_group->last_insn = nop ? : last_new_insn;
	new_alt_group->cfi = orig_alt_group->cfi;
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
	return 0;
}

/*
 * A jump table entry can either convert a nop to a jump or a jump to a nop.
 * If the original instruction is a jump, make the alt entry an effective nop
 * by just skipping the original instruction.
 */
static int handle_jump_alt(struct objtool_file *file,
			   struct special_alt *special_alt,
			   struct instruction *orig_insn,
			   struct instruction **new_insn)
{
1228 1229
	if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
	    orig_insn->type != INSN_NOP) {
1230

1231 1232 1233 1234 1235
		WARN_FUNC("unsupported instruction at jump label",
			  orig_insn->sec, orig_insn->offset);
		return -1;
	}

1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
	if (special_alt->key_addend & 2) {
		struct reloc *reloc = insn_reloc(file, orig_insn);

		if (reloc) {
			reloc->type = R_NONE;
			elf_write_reloc(file->elf, reloc);
		}
		elf_write_insn(file->elf, orig_insn->sec,
			       orig_insn->offset, orig_insn->len,
			       arch_nop_insn(orig_insn->len));
		orig_insn->type = INSN_NOP;
1247 1248 1249 1250 1251 1252 1253 1254 1255
	}

	if (orig_insn->type == INSN_NOP) {
		if (orig_insn->len == 2)
			file->jl_nop_short++;
		else
			file->jl_nop_long++;

		return 0;
1256 1257
	}

1258 1259 1260 1261 1262
	if (orig_insn->len == 2)
		file->jl_short++;
	else
		file->jl_long++;

1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
	*new_insn = list_next_entry(orig_insn, list);
	return 0;
}

/*
 * Read all the special sections which have alternate instructions which can be
 * patched in or redirected to at runtime.  Each instruction having alternate
 * instruction(s) has them added to its insn->alts list, which will be
 * traversed in validate_branch().
 */
static int add_special_section_alts(struct objtool_file *file)
{
	struct list_head special_alts;
	struct instruction *orig_insn, *new_insn;
	struct special_alt *special_alt, *tmp;
	struct alternative *alt;
	int ret;

	ret = special_get_alts(file->elf, &special_alts);
	if (ret)
		return ret;

	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {

		orig_insn = find_insn(file, special_alt->orig_sec,
				      special_alt->orig_off);
		if (!orig_insn) {
			WARN_FUNC("special: can't find orig instruction",
				  special_alt->orig_sec, special_alt->orig_off);
			ret = -1;
			goto out;
		}

		new_insn = NULL;
		if (!special_alt->group || special_alt->new_len) {
			new_insn = find_insn(file, special_alt->new_sec,
					     special_alt->new_off);
			if (!new_insn) {
				WARN_FUNC("special: can't find new instruction",
					  special_alt->new_sec,
					  special_alt->new_off);
				ret = -1;
				goto out;
			}
		}

		if (special_alt->group) {
1310 1311 1312 1313 1314 1315
			if (!special_alt->orig_len) {
				WARN_FUNC("empty alternative entry",
					  orig_insn->sec, orig_insn->offset);
				continue;
			}

1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
			ret = handle_group_alt(file, special_alt, orig_insn,
					       &new_insn);
			if (ret)
				goto out;
		} else if (special_alt->jump_or_nop) {
			ret = handle_jump_alt(file, special_alt, orig_insn,
					      &new_insn);
			if (ret)
				goto out;
		}

1327 1328 1329 1330 1331 1332 1333
		alt = malloc(sizeof(*alt));
		if (!alt) {
			WARN("malloc failed");
			ret = -1;
			goto out;
		}

1334
		alt->insn = new_insn;
P
Peter Zijlstra 已提交
1335
		alt->skip_orig = special_alt->skip_orig;
P
Peter Zijlstra 已提交
1336
		orig_insn->ignore_alts |= special_alt->skip_alt;
1337 1338 1339 1340 1341 1342
		list_add_tail(&alt->list, &orig_insn->alts);

		list_del(&special_alt->list);
		free(special_alt);
	}

1343 1344 1345 1346 1347 1348
	if (stats) {
		printf("jl\\\tNOP\tJMP\n");
		printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
		printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
	}

1349 1350 1351 1352
out:
	return ret;
}

1353
static int add_jump_table(struct objtool_file *file, struct instruction *insn,
M
Matt Helsley 已提交
1354
			    struct reloc *table)
1355
{
M
Matt Helsley 已提交
1356
	struct reloc *reloc = table;
1357
	struct instruction *dest_insn;
1358
	struct alternative *alt;
1359 1360
	struct symbol *pfunc = insn->func->pfunc;
	unsigned int prev_offset = 0;
1361

1362
	/*
M
Matt Helsley 已提交
1363
	 * Each @reloc is a switch table relocation which points to the target
1364 1365
	 * instruction.
	 */
M
Matt Helsley 已提交
1366
	list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
1367 1368

		/* Check for the end of the table: */
M
Matt Helsley 已提交
1369
		if (reloc != table && reloc->jump_table_start)
1370 1371
			break;

1372
		/* Make sure the table entries are consecutive: */
M
Matt Helsley 已提交
1373
		if (prev_offset && reloc->offset != prev_offset + 8)
1374 1375 1376
			break;

		/* Detect function pointers from contiguous objects: */
M
Matt Helsley 已提交
1377 1378
		if (reloc->sym->sec == pfunc->sec &&
		    reloc->addend == pfunc->offset)
1379 1380
			break;

M
Matt Helsley 已提交
1381
		dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
1382
		if (!dest_insn)
1383 1384
			break;

1385
		/* Make sure the destination is in the same function: */
1386
		if (!dest_insn->func || dest_insn->func->pfunc != pfunc)
1387
			break;
1388 1389 1390 1391 1392 1393 1394

		alt = malloc(sizeof(*alt));
		if (!alt) {
			WARN("malloc failed");
			return -1;
		}

1395
		alt->insn = dest_insn;
1396
		list_add_tail(&alt->list, &insn->alts);
M
Matt Helsley 已提交
1397
		prev_offset = reloc->offset;
1398 1399 1400 1401 1402 1403
	}

	if (!prev_offset) {
		WARN_FUNC("can't find switch jump table",
			  insn->sec, insn->offset);
		return -1;
1404 1405 1406 1407 1408 1409
	}

	return 0;
}

/*
1410 1411
 * find_jump_table() - Given a dynamic jump, find the switch jump table
 * associated with it.
1412
 */
M
Matt Helsley 已提交
1413
static struct reloc *find_jump_table(struct objtool_file *file,
1414 1415 1416
				      struct symbol *func,
				      struct instruction *insn)
{
1417
	struct reloc *table_reloc;
1418
	struct instruction *dest_insn, *orig_insn = insn;
1419

1420 1421 1422 1423 1424
	/*
	 * Backward search using the @first_jump_src links, these help avoid
	 * much of the 'in between' code. Which avoids us getting confused by
	 * it.
	 */
1425
	for (;
1426 1427
	     insn && insn->func && insn->func->pfunc == func;
	     insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
1428

1429
		if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
1430 1431 1432 1433 1434 1435 1436 1437 1438
			break;

		/* allow small jumps within the range */
		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
		    insn->jump_dest &&
		    (insn->jump_dest->offset <= insn->offset ||
		     insn->jump_dest->offset > orig_insn->offset))
		    break;

1439
		table_reloc = arch_find_switch_table(file, insn);
M
Matt Helsley 已提交
1440
		if (!table_reloc)
1441
			continue;
M
Matt Helsley 已提交
1442
		dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
1443 1444
		if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func)
			continue;
1445

M
Matt Helsley 已提交
1446
		return table_reloc;
1447 1448 1449 1450 1451
	}

	return NULL;
}

1452 1453 1454 1455 1456 1457
/*
 * First pass: Mark the head of each jump table so that in the next pass,
 * we know when a given jump table ends and the next one starts.
 */
static void mark_func_jump_tables(struct objtool_file *file,
				    struct symbol *func)
1458
{
1459
	struct instruction *insn, *last = NULL;
M
Matt Helsley 已提交
1460
	struct reloc *reloc;
1461

1462
	func_for_each_insn(file, func, insn) {
1463 1464 1465 1466 1467
		if (!last)
			last = insn;

		/*
		 * Store back-pointers for unconditional forward jumps such
1468
		 * that find_jump_table() can back-track using those and
1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479
		 * avoid some potentially confusing code.
		 */
		if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
		    insn->offset > last->offset &&
		    insn->jump_dest->offset > insn->offset &&
		    !insn->jump_dest->first_jump_src) {

			insn->jump_dest->first_jump_src = insn;
			last = insn->jump_dest;
		}

1480 1481 1482
		if (insn->type != INSN_JUMP_DYNAMIC)
			continue;

M
Matt Helsley 已提交
1483 1484 1485 1486
		reloc = find_jump_table(file, func, insn);
		if (reloc) {
			reloc->jump_table_start = true;
			insn->jump_table = reloc;
1487 1488
		}
	}
1489 1490 1491 1492 1493 1494 1495 1496
}

static int add_func_jump_tables(struct objtool_file *file,
				  struct symbol *func)
{
	struct instruction *insn;
	int ret;

1497
	func_for_each_insn(file, func, insn) {
1498 1499
		if (!insn->jump_table)
			continue;
1500

1501
		ret = add_jump_table(file, insn, insn->jump_table);
1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513
		if (ret)
			return ret;
	}

	return 0;
}

/*
 * For some switch statements, gcc generates a jump table in the .rodata
 * section which contains a list of addresses within the function to jump to.
 * This finds these jump tables and adds them to the insn->alts lists.
 */
1514
static int add_jump_table_alts(struct objtool_file *file)
1515 1516 1517 1518 1519
{
	struct section *sec;
	struct symbol *func;
	int ret;

1520
	if (!file->rodata)
1521 1522
		return 0;

1523
	for_each_sec(file, sec) {
1524 1525 1526 1527
		list_for_each_entry(func, &sec->symbol_list, list) {
			if (func->type != STT_FUNC)
				continue;

1528
			mark_func_jump_tables(file, func);
1529
			ret = add_func_jump_tables(file, func);
1530 1531 1532 1533 1534 1535 1536 1537
			if (ret)
				return ret;
		}
	}

	return 0;
}

1538 1539 1540 1541 1542 1543 1544 1545
static void set_func_state(struct cfi_state *state)
{
	state->cfa = initial_func_cfi.cfa;
	memcpy(&state->regs, &initial_func_cfi.regs,
	       CFI_NUM_REGS * sizeof(struct cfi_reg));
	state->stack_size = initial_func_cfi.cfa.offset;
}

1546 1547
static int read_unwind_hints(struct objtool_file *file)
{
M
Matt Helsley 已提交
1548 1549
	struct section *sec, *relocsec;
	struct reloc *reloc;
1550 1551 1552 1553 1554 1555 1556 1557
	struct unwind_hint *hint;
	struct instruction *insn;
	int i;

	sec = find_section_by_name(file->elf, ".discard.unwind_hints");
	if (!sec)
		return 0;

M
Matt Helsley 已提交
1558 1559
	relocsec = sec->reloc;
	if (!relocsec) {
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
		WARN("missing .rela.discard.unwind_hints section");
		return -1;
	}

	if (sec->len % sizeof(struct unwind_hint)) {
		WARN("struct unwind_hint size mismatch");
		return -1;
	}

	file->hints = true;

	for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
		hint = (struct unwind_hint *)sec->data->d_buf + i;

M
Matt Helsley 已提交
1574 1575 1576
		reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
		if (!reloc) {
			WARN("can't find reloc for unwind_hints[%d]", i);
1577 1578 1579
			return -1;
		}

M
Matt Helsley 已提交
1580
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
1581 1582 1583 1584 1585
		if (!insn) {
			WARN("can't find insn for unwind_hints[%d]", i);
			return -1;
		}

1586
		insn->hint = true;
1587

1588 1589
		if (hint->type == UNWIND_HINT_TYPE_FUNC) {
			set_func_state(&insn->cfi);
1590 1591 1592
			continue;
		}

1593
		if (arch_decode_hint_reg(insn, hint->sp_reg)) {
1594 1595 1596 1597 1598
			WARN_FUNC("unsupported unwind_hint sp base reg %d",
				  insn->sec, insn->offset, hint->sp_reg);
			return -1;
		}

1599
		insn->cfi.cfa.offset = bswap_if_needed(hint->sp_offset);
1600 1601
		insn->cfi.type = hint->type;
		insn->cfi.end = hint->end;
1602 1603 1604 1605 1606
	}

	return 0;
}

1607 1608
static int read_retpoline_hints(struct objtool_file *file)
{
J
Josh Poimboeuf 已提交
1609
	struct section *sec;
1610
	struct instruction *insn;
M
Matt Helsley 已提交
1611
	struct reloc *reloc;
1612

J
Josh Poimboeuf 已提交
1613
	sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
1614 1615 1616
	if (!sec)
		return 0;

M
Matt Helsley 已提交
1617 1618
	list_for_each_entry(reloc, &sec->reloc_list, list) {
		if (reloc->sym->type != STT_SECTION) {
J
Josh Poimboeuf 已提交
1619
			WARN("unexpected relocation symbol type in %s", sec->name);
1620 1621 1622
			return -1;
		}

M
Matt Helsley 已提交
1623
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
1624
		if (!insn) {
J
Josh Poimboeuf 已提交
1625
			WARN("bad .discard.retpoline_safe entry");
1626 1627 1628 1629 1630
			return -1;
		}

		if (insn->type != INSN_JUMP_DYNAMIC &&
		    insn->type != INSN_CALL_DYNAMIC) {
J
Josh Poimboeuf 已提交
1631
			WARN_FUNC("retpoline_safe hint not an indirect jump/call",
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
				  insn->sec, insn->offset);
			return -1;
		}

		insn->retpoline_safe = true;
	}

	return 0;
}

1642 1643 1644 1645
static int read_instr_hints(struct objtool_file *file)
{
	struct section *sec;
	struct instruction *insn;
M
Matt Helsley 已提交
1646
	struct reloc *reloc;
1647 1648 1649 1650 1651

	sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
	if (!sec)
		return 0;

M
Matt Helsley 已提交
1652 1653
	list_for_each_entry(reloc, &sec->reloc_list, list) {
		if (reloc->sym->type != STT_SECTION) {
1654 1655 1656 1657
			WARN("unexpected relocation symbol type in %s", sec->name);
			return -1;
		}

M
Matt Helsley 已提交
1658
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670
		if (!insn) {
			WARN("bad .discard.instr_end entry");
			return -1;
		}

		insn->instr--;
	}

	sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
	if (!sec)
		return 0;

M
Matt Helsley 已提交
1671 1672
	list_for_each_entry(reloc, &sec->reloc_list, list) {
		if (reloc->sym->type != STT_SECTION) {
1673 1674 1675 1676
			WARN("unexpected relocation symbol type in %s", sec->name);
			return -1;
		}

M
Matt Helsley 已提交
1677
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
		if (!insn) {
			WARN("bad .discard.instr_begin entry");
			return -1;
		}

		insn->instr++;
	}

	return 0;
}

1689 1690 1691 1692
static int read_intra_function_calls(struct objtool_file *file)
{
	struct instruction *insn;
	struct section *sec;
M
Matt Helsley 已提交
1693
	struct reloc *reloc;
1694 1695 1696 1697 1698

	sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
	if (!sec)
		return 0;

M
Matt Helsley 已提交
1699
	list_for_each_entry(reloc, &sec->reloc_list, list) {
1700 1701
		unsigned long dest_off;

M
Matt Helsley 已提交
1702
		if (reloc->sym->type != STT_SECTION) {
1703 1704 1705 1706 1707
			WARN("unexpected relocation symbol type in %s",
			     sec->name);
			return -1;
		}

M
Matt Helsley 已提交
1708
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739
		if (!insn) {
			WARN("bad .discard.intra_function_call entry");
			return -1;
		}

		if (insn->type != INSN_CALL) {
			WARN_FUNC("intra_function_call not a direct call",
				  insn->sec, insn->offset);
			return -1;
		}

		/*
		 * Treat intra-function CALLs as JMPs, but with a stack_op.
		 * See add_call_destinations(), which strips stack_ops from
		 * normal CALLs.
		 */
		insn->type = INSN_JUMP_UNCONDITIONAL;

		dest_off = insn->offset + insn->len + insn->immediate;
		insn->jump_dest = find_insn(file, insn->sec, dest_off);
		if (!insn->jump_dest) {
			WARN_FUNC("can't find call dest at %s+0x%lx",
				  insn->sec, insn->offset,
				  insn->sec->name, dest_off);
			return -1;
		}
	}

	return 0;
}

1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
static int read_static_call_tramps(struct objtool_file *file)
{
	struct section *sec;
	struct symbol *func;

	for_each_sec(file, sec) {
		list_for_each_entry(func, &sec->symbol_list, list) {
			if (func->bind == STB_GLOBAL &&
			    !strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
				     strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
				func->static_call_tramp = true;
		}
	}

	return 0;
}

1757 1758 1759 1760 1761 1762
static void mark_rodata(struct objtool_file *file)
{
	struct section *sec;
	bool found = false;

	/*
1763 1764 1765 1766 1767 1768 1769 1770
	 * Search for the following rodata sections, each of which can
	 * potentially contain jump tables:
	 *
	 * - .rodata: can contain GCC switch tables
	 * - .rodata.<func>: same, if -fdata-sections is being used
	 * - .rodata..c_jump_table: contains C annotated jump tables
	 *
	 * .rodata.str1.* sections are ignored; they don't contain jump tables.
1771 1772
	 */
	for_each_sec(file, sec) {
1773 1774
		if (!strncmp(sec->name, ".rodata", 7) &&
		    !strstr(sec->name, ".str1.")) {
1775 1776 1777 1778 1779 1780 1781 1782
			sec->rodata = true;
			found = true;
		}
	}

	file->rodata = found;
}

1783 1784 1785 1786 1787
__weak int arch_rewrite_retpolines(struct objtool_file *file)
{
	return 0;
}

1788 1789 1790 1791
static int decode_sections(struct objtool_file *file)
{
	int ret;

1792 1793
	mark_rodata(file);

1794 1795 1796 1797 1798 1799 1800 1801 1802
	ret = decode_instructions(file);
	if (ret)
		return ret;

	ret = add_dead_ends(file);
	if (ret)
		return ret;

	add_ignores(file);
P
Peter Zijlstra 已提交
1803
	add_uaccess_safe(file);
1804

1805
	ret = add_ignore_alternatives(file);
1806 1807 1808
	if (ret)
		return ret;

1809 1810 1811
	/*
	 * Must be before add_{jump_call}_destination.
	 */
P
Peter Zijlstra 已提交
1812 1813 1814 1815
	ret = read_static_call_tramps(file);
	if (ret)
		return ret;

1816 1817 1818 1819
	/*
	 * Must be before add_special_section_alts() as that depends on
	 * jump_dest being set.
	 */
1820 1821 1822 1823
	ret = add_jump_destinations(file);
	if (ret)
		return ret;

1824
	ret = add_special_section_alts(file);
1825 1826 1827
	if (ret)
		return ret;

1828 1829 1830 1831
	/*
	 * Must be before add_call_destination(); it changes INSN_CALL to
	 * INSN_JUMP.
	 */
1832 1833 1834 1835
	ret = read_intra_function_calls(file);
	if (ret)
		return ret;

1836
	ret = add_call_destinations(file);
1837 1838 1839
	if (ret)
		return ret;

1840
	ret = add_jump_table_alts(file);
1841 1842 1843
	if (ret)
		return ret;

1844 1845 1846 1847
	ret = read_unwind_hints(file);
	if (ret)
		return ret;

1848 1849 1850 1851
	ret = read_retpoline_hints(file);
	if (ret)
		return ret;

1852 1853 1854 1855
	ret = read_instr_hints(file);
	if (ret)
		return ret;

1856 1857 1858 1859 1860 1861 1862 1863 1864
	/*
	 * Must be after add_special_section_alts(), since this will emit
	 * alternatives. Must be after add_{jump,call}_destination(), since
	 * those create the call insn lists.
	 */
	ret = arch_rewrite_retpolines(file);
	if (ret)
		return ret;

1865 1866 1867 1868 1869
	return 0;
}

static bool is_fentry_call(struct instruction *insn)
{
1870
	if (insn->type == INSN_CALL && insn->call_dest &&
1871 1872 1873 1874 1875 1876 1877
	    insn->call_dest->type == STT_NOTYPE &&
	    !strcmp(insn->call_dest->name, "__fentry__"))
		return true;

	return false;
}

1878
static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
1879
{
1880
	struct cfi_state *cfi = &state->cfi;
1881 1882
	int i;

1883
	if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
1884 1885
		return true;

1886
	if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
1887 1888
		return true;

1889
	if (cfi->stack_size != initial_func_cfi.cfa.offset)
1890 1891 1892
		return true;

	for (i = 0; i < CFI_NUM_REGS; i++) {
1893 1894
		if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
		    cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
1895
			return true;
1896
	}
1897 1898 1899 1900

	return false;
}

1901 1902 1903 1904 1905 1906 1907
static bool check_reg_frame_pos(const struct cfi_reg *reg,
				int expected_offset)
{
	return reg->base == CFI_CFA &&
	       reg->offset == expected_offset;
}

1908 1909
static bool has_valid_stack_frame(struct insn_state *state)
{
1910 1911
	struct cfi_state *cfi = &state->cfi;

1912 1913 1914
	if (cfi->cfa.base == CFI_BP &&
	    check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
	    check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
1915 1916
		return true;

1917
	if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
1918 1919 1920
		return true;

	return false;
1921 1922
}

1923 1924
static int update_cfi_state_regs(struct instruction *insn,
				  struct cfi_state *cfi,
1925
				  struct stack_op *op)
1926
{
1927
	struct cfi_reg *cfa = &cfi->cfa;
1928

1929
	if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
1930 1931 1932
		return 0;

	/* push */
P
Peter Zijlstra 已提交
1933
	if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
1934 1935 1936
		cfa->offset += 8;

	/* pop */
P
Peter Zijlstra 已提交
1937
	if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
1938 1939 1940 1941 1942 1943 1944 1945 1946 1947
		cfa->offset -= 8;

	/* add immediate to sp */
	if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
	    op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
		cfa->offset -= op->src.offset;

	return 0;
}

1948
static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
1949
{
1950
	if (arch_callee_saved_reg(reg) &&
1951 1952 1953
	    cfi->regs[reg].base == CFI_UNDEFINED) {
		cfi->regs[reg].base = base;
		cfi->regs[reg].offset = offset;
1954
	}
1955 1956
}

1957
static void restore_reg(struct cfi_state *cfi, unsigned char reg)
1958
{
1959 1960
	cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
	cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
}

/*
 * A note about DRAP stack alignment:
 *
 * GCC has the concept of a DRAP register, which is used to help keep track of
 * the stack pointer when aligning the stack.  r10 or r13 is used as the DRAP
 * register.  The typical DRAP pattern is:
 *
 *   4c 8d 54 24 08		lea    0x8(%rsp),%r10
 *   48 83 e4 c0		and    $0xffffffffffffffc0,%rsp
 *   41 ff 72 f8		pushq  -0x8(%r10)
 *   55				push   %rbp
 *   48 89 e5			mov    %rsp,%rbp
 *				(more pushes)
 *   41 52			push   %r10
 *				...
 *   41 5a			pop    %r10
 *				(more pops)
 *   5d				pop    %rbp
 *   49 8d 62 f8		lea    -0x8(%r10),%rsp
 *   c3				retq
 *
 * There are some variations in the epilogues, like:
 *
 *   5b				pop    %rbx
 *   41 5a			pop    %r10
 *   41 5c			pop    %r12
 *   41 5d			pop    %r13
 *   41 5e			pop    %r14
 *   c9				leaveq
 *   49 8d 62 f8		lea    -0x8(%r10),%rsp
 *   c3				retq
 *
 * and:
 *
 *   4c 8b 55 e8		mov    -0x18(%rbp),%r10
 *   48 8b 5d e0		mov    -0x20(%rbp),%rbx
 *   4c 8b 65 f0		mov    -0x10(%rbp),%r12
 *   4c 8b 6d f8		mov    -0x8(%rbp),%r13
 *   c9				leaveq
 *   49 8d 62 f8		lea    -0x8(%r10),%rsp
 *   c3				retq
 *
 * Sometimes r13 is used as the DRAP register, in which case it's saved and
 * restored beforehand:
 *
 *   41 55			push   %r13
 *   4c 8d 6c 24 10		lea    0x10(%rsp),%r13
 *   48 83 e4 f0		and    $0xfffffffffffffff0,%rsp
 *				...
 *   49 8d 65 f0		lea    -0x10(%r13),%rsp
 *   41 5d			pop    %r13
 *   c3				retq
 */
2016 2017 2018
static int update_cfi_state(struct instruction *insn,
			    struct instruction *next_insn,
			    struct cfi_state *cfi, struct stack_op *op)
2019
{
2020 2021
	struct cfi_reg *cfa = &cfi->cfa;
	struct cfi_reg *regs = cfi->regs;
2022 2023 2024 2025 2026 2027 2028 2029 2030 2031

	/* stack operations don't make sense with an undefined CFA */
	if (cfa->base == CFI_UNDEFINED) {
		if (insn->func) {
			WARN_FUNC("undefined stack state", insn->sec, insn->offset);
			return -1;
		}
		return 0;
	}

2032 2033
	if (cfi->type == UNWIND_HINT_TYPE_REGS ||
	    cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2034
		return update_cfi_state_regs(insn, cfi, op);
2035

2036 2037 2038 2039 2040 2041
	switch (op->dest.type) {

	case OP_DEST_REG:
		switch (op->src.type) {

		case OP_SRC_REG:
2042 2043
			if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
			    cfa->base == CFI_SP &&
2044
			    check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2045 2046 2047

				/* mov %rsp, %rbp */
				cfa->base = op->dest.reg;
2048
				cfi->bp_scratch = false;
2049
			}
2050

2051
			else if (op->src.reg == CFI_SP &&
2052
				 op->dest.reg == CFI_BP && cfi->drap) {
2053

2054 2055
				/* drap: mov %rsp, %rbp */
				regs[CFI_BP].base = CFI_BP;
2056 2057
				regs[CFI_BP].offset = -cfi->stack_size;
				cfi->bp_scratch = false;
2058
			}
2059

2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071
			else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {

				/*
				 * mov %rsp, %reg
				 *
				 * This is needed for the rare case where GCC
				 * does:
				 *
				 *   mov    %rsp, %rax
				 *   ...
				 *   mov    %rax, %rsp
				 */
2072 2073
				cfi->vals[op->dest.reg].base = CFI_CFA;
				cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2074 2075
			}

J
Josh Poimboeuf 已提交
2076
			else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
P
Peter Zijlstra 已提交
2077
				 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
J
Josh Poimboeuf 已提交
2078 2079 2080 2081 2082 2083

				/*
				 * mov %rbp, %rsp
				 *
				 * Restore the original stack pointer (Clang).
				 */
2084
				cfi->stack_size = -cfi->regs[CFI_BP].offset;
J
Josh Poimboeuf 已提交
2085 2086
			}

2087 2088 2089 2090
			else if (op->dest.reg == cfa->base) {

				/* mov %reg, %rsp */
				if (cfa->base == CFI_SP &&
2091
				    cfi->vals[op->src.reg].base == CFI_CFA) {
2092 2093 2094 2095 2096 2097 2098 2099 2100

					/*
					 * This is needed for the rare case
					 * where GCC does something dumb like:
					 *
					 *   lea    0x8(%rsp), %rcx
					 *   ...
					 *   mov    %rcx, %rsp
					 */
2101 2102
					cfa->offset = -cfi->vals[op->src.reg].offset;
					cfi->stack_size = cfa->offset;
2103

P
Peter Zijlstra 已提交
2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
				} else if (cfa->base == CFI_SP &&
					   cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
					   cfi->vals[op->src.reg].offset == cfa->offset) {

					/*
					 * Stack swizzle:
					 *
					 * 1: mov %rsp, (%[tos])
					 * 2: mov %[tos], %rsp
					 *    ...
					 * 3: pop %rsp
					 *
					 * Where:
					 *
					 * 1 - places a pointer to the previous
					 *     stack at the Top-of-Stack of the
					 *     new stack.
					 *
					 * 2 - switches to the new stack.
					 *
					 * 3 - pops the Top-of-Stack to restore
					 *     the original stack.
					 *
					 * Note: we set base to SP_INDIRECT
					 * here and preserve offset. Therefore
					 * when the unwinder reaches ToS it
					 * will dereference SP and then add the
					 * offset to find the next frame, IOW:
					 * (%rsp) + offset.
					 */
					cfa->base = CFI_SP_INDIRECT;

2136 2137 2138 2139
				} else {
					cfa->base = CFI_UNDEFINED;
					cfa->offset = 0;
				}
2140 2141
			}

2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155
			else if (op->dest.reg == CFI_SP &&
				 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
				 cfi->vals[op->src.reg].offset == cfa->offset) {

				/*
				 * The same stack swizzle case 2) as above. But
				 * because we can't change cfa->base, case 3)
				 * will become a regular POP. Pretend we're a
				 * PUSH so things don't go unbalanced.
				 */
				cfi->stack_size += 8;
			}


2156 2157 2158 2159 2160 2161
			break;

		case OP_SRC_ADD:
			if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {

				/* add imm, %rsp */
2162
				cfi->stack_size -= op->src.offset;
2163 2164 2165 2166 2167 2168 2169 2170
				if (cfa->base == CFI_SP)
					cfa->offset -= op->src.offset;
				break;
			}

			if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {

				/* lea disp(%rbp), %rsp */
2171
				cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2172 2173 2174
				break;
			}

2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
			if (!cfi->drap && op->src.reg == CFI_SP &&
			    op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
			    check_reg_frame_pos(&regs[CFI_BP], -cfa->offset + op->src.offset)) {

				/* lea disp(%rsp), %rbp */
				cfa->base = CFI_BP;
				cfa->offset -= op->src.offset;
				cfi->bp_scratch = false;
				break;
			}

2186
			if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2187 2188

				/* drap: lea disp(%rsp), %drap */
2189
				cfi->drap_reg = op->dest.reg;
2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200

				/*
				 * lea disp(%rsp), %reg
				 *
				 * This is needed for the rare case where GCC
				 * does something dumb like:
				 *
				 *   lea    0x8(%rsp), %rcx
				 *   ...
				 *   mov    %rcx, %rsp
				 */
2201 2202 2203
				cfi->vals[op->dest.reg].base = CFI_CFA;
				cfi->vals[op->dest.reg].offset = \
					-cfi->stack_size + op->src.offset;
2204

2205 2206 2207
				break;
			}

2208 2209
			if (cfi->drap && op->dest.reg == CFI_SP &&
			    op->src.reg == cfi->drap_reg) {
2210 2211 2212

				 /* drap: lea disp(%drap), %rsp */
				cfa->base = CFI_SP;
2213 2214 2215
				cfa->offset = cfi->stack_size = -op->src.offset;
				cfi->drap_reg = CFI_UNDEFINED;
				cfi->drap = false;
2216 2217 2218
				break;
			}

2219
			if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
2220 2221 2222 2223 2224 2225 2226 2227 2228
				WARN_FUNC("unsupported stack register modification",
					  insn->sec, insn->offset);
				return -1;
			}

			break;

		case OP_SRC_AND:
			if (op->dest.reg != CFI_SP ||
2229 2230
			    (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
			    (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2231 2232 2233 2234 2235
				WARN_FUNC("unsupported stack pointer realignment",
					  insn->sec, insn->offset);
				return -1;
			}

2236
			if (cfi->drap_reg != CFI_UNDEFINED) {
2237
				/* drap: and imm, %rsp */
2238 2239 2240
				cfa->base = cfi->drap_reg;
				cfa->offset = cfi->stack_size = 0;
				cfi->drap = true;
2241 2242 2243 2244 2245 2246 2247 2248 2249 2250
			}

			/*
			 * Older versions of GCC (4.8ish) realign the stack
			 * without DRAP, with a frame pointer.
			 */

			break;

		case OP_SRC_POP:
P
Peter Zijlstra 已提交
2251
		case OP_SRC_POPF:
P
Peter Zijlstra 已提交
2252 2253 2254 2255 2256 2257 2258
			if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {

				/* pop %rsp; # restore from a stack swizzle */
				cfa->base = CFI_SP;
				break;
			}

2259
			if (!cfi->drap && op->dest.reg == cfa->base) {
2260 2261 2262 2263 2264

				/* pop %rbp */
				cfa->base = CFI_SP;
			}

2265 2266 2267
			if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
			    op->dest.reg == cfi->drap_reg &&
			    cfi->drap_offset == -cfi->stack_size) {
2268

2269
				/* drap: pop %drap */
2270
				cfa->base = cfi->drap_reg;
2271
				cfa->offset = 0;
2272
				cfi->drap_offset = -1;
2273

P
Peter Zijlstra 已提交
2274
			} else if (cfi->stack_size == -regs[op->dest.reg].offset) {
2275

2276
				/* pop %reg */
2277
				restore_reg(cfi, op->dest.reg);
2278 2279
			}

2280
			cfi->stack_size -= 8;
2281 2282 2283 2284 2285 2286
			if (cfa->base == CFI_SP)
				cfa->offset -= 8;

			break;

		case OP_SRC_REG_INDIRECT:
2287 2288 2289 2290 2291 2292 2293 2294
			if (!cfi->drap && op->dest.reg == cfa->base &&
			    op->dest.reg == CFI_BP) {

				/* mov disp(%rsp), %rbp */
				cfa->base = CFI_SP;
				cfa->offset = cfi->stack_size;
			}

2295 2296
			if (cfi->drap && op->src.reg == CFI_BP &&
			    op->src.offset == cfi->drap_offset) {
2297 2298

				/* drap: mov disp(%rbp), %drap */
2299
				cfa->base = cfi->drap_reg;
2300
				cfa->offset = 0;
2301
				cfi->drap_offset = -1;
2302 2303
			}

2304
			if (cfi->drap && op->src.reg == CFI_BP &&
2305 2306 2307
			    op->src.offset == regs[op->dest.reg].offset) {

				/* drap: mov disp(%rbp), %reg */
2308
				restore_reg(cfi, op->dest.reg);
2309 2310 2311 2312 2313 2314

			} else if (op->src.reg == cfa->base &&
			    op->src.offset == regs[op->dest.reg].offset + cfa->offset) {

				/* mov disp(%rbp), %reg */
				/* mov disp(%rsp), %reg */
2315
				restore_reg(cfi, op->dest.reg);
2316 2317 2318 2319 2320 2321

			} else if (op->src.reg == CFI_SP &&
				   op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {

				/* mov disp(%rsp), %reg */
				restore_reg(cfi, op->dest.reg);
2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334
			}

			break;

		default:
			WARN_FUNC("unknown stack-related instruction",
				  insn->sec, insn->offset);
			return -1;
		}

		break;

	case OP_DEST_PUSH:
P
Peter Zijlstra 已提交
2335
	case OP_DEST_PUSHF:
2336
		cfi->stack_size += 8;
2337 2338 2339 2340 2341 2342
		if (cfa->base == CFI_SP)
			cfa->offset += 8;

		if (op->src.type != OP_SRC_REG)
			break;

2343 2344
		if (cfi->drap) {
			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2345 2346 2347

				/* drap: push %drap */
				cfa->base = CFI_BP_INDIRECT;
2348
				cfa->offset = -cfi->stack_size;
2349

2350
				/* save drap so we know when to restore it */
2351
				cfi->drap_offset = -cfi->stack_size;
2352

2353
			} else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
2354 2355

				/* drap: push %rbp */
2356
				cfi->stack_size = 0;
2357

2358
			} else {
2359 2360

				/* drap: push %reg */
2361
				save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
2362 2363 2364 2365 2366
			}

		} else {

			/* push %reg */
2367
			save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
2368 2369 2370
		}

		/* detect when asm code uses rbp as a scratch register */
2371
		if (!no_fp && insn->func && op->src.reg == CFI_BP &&
2372
		    cfa->base != CFI_BP)
2373
			cfi->bp_scratch = true;
2374 2375 2376 2377
		break;

	case OP_DEST_REG_INDIRECT:

2378 2379
		if (cfi->drap) {
			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2380 2381 2382 2383 2384

				/* drap: mov %drap, disp(%rbp) */
				cfa->base = CFI_BP_INDIRECT;
				cfa->offset = op->dest.offset;

2385
				/* save drap offset so we know when to restore it */
2386
				cfi->drap_offset = op->dest.offset;
2387
			} else {
2388 2389

				/* drap: mov reg, disp(%rbp) */
2390
				save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
2391 2392 2393 2394 2395 2396
			}

		} else if (op->dest.reg == cfa->base) {

			/* mov reg, disp(%rbp) */
			/* mov reg, disp(%rsp) */
2397 2398
			save_reg(cfi, op->src.reg, CFI_CFA,
				 op->dest.offset - cfi->cfa.offset);
2399 2400 2401 2402 2403 2404

		} else if (op->dest.reg == CFI_SP) {

			/* mov reg, disp(%rsp) */
			save_reg(cfi, op->src.reg, CFI_CFA,
				 op->dest.offset - cfi->stack_size);
P
Peter Zijlstra 已提交
2405 2406 2407 2408 2409 2410

		} else if (op->src.reg == CFI_SP && op->dest.offset == 0) {

			/* mov %rsp, (%reg); # setup a stack swizzle. */
			cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
			cfi->vals[op->dest.reg].offset = cfa->offset;
2411 2412 2413 2414 2415
		}

		break;

	case OP_DEST_MEM:
P
Peter Zijlstra 已提交
2416
		if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
2417 2418 2419 2420 2421 2422
			WARN_FUNC("unknown stack-related memory operation",
				  insn->sec, insn->offset);
			return -1;
		}

		/* pop mem */
2423
		cfi->stack_size -= 8;
2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437
		if (cfa->base == CFI_SP)
			cfa->offset -= 8;

		break;

	default:
		WARN_FUNC("unknown stack-related instruction",
			  insn->sec, insn->offset);
		return -1;
	}

	return 0;
}

2438 2439 2440 2441 2442 2443 2444 2445 2446 2447
/*
 * The stack layouts of alternatives instructions can sometimes diverge when
 * they have stack modifications.  That's fine as long as the potential stack
 * layouts don't conflict at any given potential instruction boundary.
 *
 * Flatten the CFIs of the different alternative code streams (both original
 * and replacement) into a single shared CFI array which can be used to detect
 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
 */
static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
2448
{
2449 2450
	struct cfi_state **alt_cfi;
	int group_off;
2451

2452 2453
	if (!insn->alt_group)
		return 0;
2454

2455 2456
	alt_cfi = insn->alt_group->cfi;
	group_off = insn->offset - insn->alt_group->first_insn->offset;
2457

2458 2459 2460 2461 2462 2463
	if (!alt_cfi[group_off]) {
		alt_cfi[group_off] = &insn->cfi;
	} else {
		if (memcmp(alt_cfi[group_off], &insn->cfi, sizeof(struct cfi_state))) {
			WARN_FUNC("stack layout conflict in alternatives",
				  insn->sec, insn->offset);
2464 2465
			return -1;
		}
2466 2467 2468 2469 2470
	}

	return 0;
}

2471 2472 2473
static int handle_insn_ops(struct instruction *insn,
			   struct instruction *next_insn,
			   struct insn_state *state)
2474 2475 2476 2477 2478
{
	struct stack_op *op;

	list_for_each_entry(op, &insn->stack_ops, list) {

2479
		if (update_cfi_state(insn, next_insn, &state->cfi, op))
2480
			return 1;
2481

2482 2483 2484
		if (!insn->alt_group)
			continue;

2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509
		if (op->dest.type == OP_DEST_PUSHF) {
			if (!state->uaccess_stack) {
				state->uaccess_stack = 1;
			} else if (state->uaccess_stack >> 31) {
				WARN_FUNC("PUSHF stack exhausted",
					  insn->sec, insn->offset);
				return 1;
			}
			state->uaccess_stack <<= 1;
			state->uaccess_stack  |= state->uaccess;
		}

		if (op->src.type == OP_SRC_POPF) {
			if (state->uaccess_stack) {
				state->uaccess = state->uaccess_stack & 1;
				state->uaccess_stack >>= 1;
				if (state->uaccess_stack == 1)
					state->uaccess_stack = 0;
			}
		}
	}

	return 0;
}

2510
static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
2511
{
2512
	struct cfi_state *cfi1 = &insn->cfi;
2513 2514
	int i;

2515 2516
	if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {

2517 2518
		WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
			  insn->sec, insn->offset,
2519 2520
			  cfi1->cfa.base, cfi1->cfa.offset,
			  cfi2->cfa.base, cfi2->cfa.offset);
2521

2522
	} else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
2523
		for (i = 0; i < CFI_NUM_REGS; i++) {
2524
			if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
2525 2526 2527 2528 2529
				    sizeof(struct cfi_reg)))
				continue;

			WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
				  insn->sec, insn->offset,
2530 2531
				  i, cfi1->regs[i].base, cfi1->regs[i].offset,
				  i, cfi2->regs[i].base, cfi2->regs[i].offset);
2532 2533 2534
			break;
		}

2535 2536
	} else if (cfi1->type != cfi2->type) {

2537
		WARN_FUNC("stack state mismatch: type1=%d type2=%d",
2538 2539 2540 2541 2542
			  insn->sec, insn->offset, cfi1->type, cfi2->type);

	} else if (cfi1->drap != cfi2->drap ||
		   (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
		   (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
2543

2544
		WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
2545
			  insn->sec, insn->offset,
2546 2547
			  cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
			  cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
2548 2549 2550 2551 2552

	} else
		return true;

	return false;
2553 2554
}

P
Peter Zijlstra 已提交
2555 2556 2557
static inline bool func_uaccess_safe(struct symbol *func)
{
	if (func)
2558
		return func->uaccess_safe;
P
Peter Zijlstra 已提交
2559 2560 2561 2562

	return false;
}

2563
static inline const char *call_dest_name(struct instruction *insn)
P
Peter Zijlstra 已提交
2564 2565 2566 2567 2568 2569 2570
{
	if (insn->call_dest)
		return insn->call_dest->name;

	return "{dynamic}";
}

2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596
static inline bool noinstr_call_dest(struct symbol *func)
{
	/*
	 * We can't deal with indirect function calls at present;
	 * assume they're instrumented.
	 */
	if (!func)
		return false;

	/*
	 * If the symbol is from a noinstr section; we good.
	 */
	if (func->sec->noinstr)
		return true;

	/*
	 * The __ubsan_handle_*() calls are like WARN(), they only happen when
	 * something 'BAD' happened. At the risk of taking the machine down,
	 * let them proceed to get the message out.
	 */
	if (!strncmp(func->name, "__ubsan_handle_", 15))
		return true;

	return false;
}

P
Peter Zijlstra 已提交
2597 2598
static int validate_call(struct instruction *insn, struct insn_state *state)
{
2599
	if (state->noinstr && state->instr <= 0 &&
2600
	    !noinstr_call_dest(insn->call_dest)) {
2601 2602 2603 2604 2605
		WARN_FUNC("call to %s() leaves .noinstr.text section",
				insn->sec, insn->offset, call_dest_name(insn));
		return 1;
	}

P
Peter Zijlstra 已提交
2606 2607
	if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
		WARN_FUNC("call to %s() with UACCESS enabled",
2608
				insn->sec, insn->offset, call_dest_name(insn));
P
Peter Zijlstra 已提交
2609 2610 2611
		return 1;
	}

2612 2613
	if (state->df) {
		WARN_FUNC("call to %s() with DF set",
2614
				insn->sec, insn->offset, call_dest_name(insn));
2615 2616 2617
		return 1;
	}

P
Peter Zijlstra 已提交
2618 2619 2620
	return 0;
}

2621 2622
static int validate_sibling_call(struct instruction *insn, struct insn_state *state)
{
2623
	if (has_modified_stack_frame(insn, state)) {
2624 2625 2626 2627 2628
		WARN_FUNC("sibling call from callable instruction with modified stack frame",
				insn->sec, insn->offset);
		return 1;
	}

P
Peter Zijlstra 已提交
2629
	return validate_call(insn, state);
2630 2631
}

2632 2633
static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
{
2634 2635 2636 2637 2638 2639
	if (state->noinstr && state->instr > 0) {
		WARN_FUNC("return with instrumentation enabled",
			  insn->sec, insn->offset);
		return 1;
	}

2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657
	if (state->uaccess && !func_uaccess_safe(func)) {
		WARN_FUNC("return with UACCESS enabled",
			  insn->sec, insn->offset);
		return 1;
	}

	if (!state->uaccess && func_uaccess_safe(func)) {
		WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
			  insn->sec, insn->offset);
		return 1;
	}

	if (state->df) {
		WARN_FUNC("return with DF set",
			  insn->sec, insn->offset);
		return 1;
	}

2658
	if (func && has_modified_stack_frame(insn, state)) {
2659 2660 2661 2662 2663
		WARN_FUNC("return with modified stack frame",
			  insn->sec, insn->offset);
		return 1;
	}

2664
	if (state->cfi.bp_scratch) {
2665 2666
		WARN_FUNC("BP used as a scratch register",
			  insn->sec, insn->offset);
2667 2668 2669 2670 2671 2672
		return 1;
	}

	return 0;
}

2673 2674
static struct instruction *next_insn_to_validate(struct objtool_file *file,
						 struct instruction *insn)
2675
{
J
Josh Poimboeuf 已提交
2676
	struct alt_group *alt_group = insn->alt_group;
2677

2678 2679 2680 2681 2682 2683 2684 2685 2686
	/*
	 * Simulate the fact that alternatives are patched in-place.  When the
	 * end of a replacement alt_group is reached, redirect objtool flow to
	 * the end of the original alt_group.
	 */
	if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
		return next_insn_same_sec(file, alt_group->orig_group->last_insn);

	return next_insn_same_sec(file, insn);
2687 2688
}

2689 2690 2691 2692 2693 2694
/*
 * Follow the branch starting at the given instruction, and recursively follow
 * any other branches (jumps).  Meanwhile, track the frame pointer state at
 * each instruction and validate all the rules described in
 * tools/objtool/Documentation/stack-validation.txt.
 */
2695
static int validate_branch(struct objtool_file *file, struct symbol *func,
P
Peter Zijlstra 已提交
2696
			   struct instruction *insn, struct insn_state state)
2697 2698
{
	struct alternative *alt;
P
Peter Zijlstra 已提交
2699
	struct instruction *next_insn;
2700
	struct section *sec;
2701
	u8 visited;
2702 2703 2704 2705 2706
	int ret;

	sec = insn->sec;

	while (1) {
2707
		next_insn = next_insn_to_validate(file, insn);
2708

2709
		if (file->c_file && func && insn->func && func != insn->func->pfunc) {
2710 2711 2712
			WARN("%s() falls through to next function %s()",
			     func->name, insn->func->name);
			return 1;
2713 2714
		}

2715 2716 2717
		if (func && insn->ignore) {
			WARN_FUNC("BUG: why am I validating an ignored function?",
				  sec, insn->offset);
2718
			return 1;
2719 2720
		}

2721
		visited = 1 << state.uaccess;
2722
		if (insn->visited) {
2723
			if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
2724 2725
				return 1;

2726
			if (insn->visited & visited)
P
Peter Zijlstra 已提交
2727
				return 0;
2728 2729
		}

2730 2731 2732
		if (state.noinstr)
			state.instr += insn->instr;

2733
		if (insn->hint)
2734
			state.cfi = insn->cfi;
2735
		else
2736
			insn->cfi = state.cfi;
2737

2738
		insn->visited |= visited;
2739

2740 2741 2742
		if (propagate_alt_cfi(file, insn))
			return 1;

2743
		if (!insn->ignore_alts && !list_empty(&insn->alts)) {
P
Peter Zijlstra 已提交
2744 2745
			bool skip_orig = false;

2746
			list_for_each_entry(alt, &insn->alts, list) {
P
Peter Zijlstra 已提交
2747 2748 2749
				if (alt->skip_orig)
					skip_orig = true;

2750
				ret = validate_branch(file, func, alt->insn, state);
2751 2752 2753 2754 2755
				if (ret) {
					if (backtrace)
						BT_FUNC("(alt)", insn);
					return ret;
				}
2756
			}
P
Peter Zijlstra 已提交
2757 2758 2759

			if (skip_orig)
				return 0;
2760 2761
		}

2762
		if (handle_insn_ops(insn, next_insn, &state))
2763 2764
			return 1;

2765 2766 2767
		switch (insn->type) {

		case INSN_RETURN:
2768
			return validate_return(func, insn, &state);
2769 2770

		case INSN_CALL:
P
Peter Zijlstra 已提交
2771 2772 2773 2774
		case INSN_CALL_DYNAMIC:
			ret = validate_call(insn, &state);
			if (ret)
				return ret;
2775

2776 2777
			if (!no_fp && func && !is_fentry_call(insn) &&
			    !has_valid_stack_frame(&state)) {
2778 2779 2780 2781
				WARN_FUNC("call without frame pointer save/setup",
					  sec, insn->offset);
				return 1;
			}
2782 2783 2784 2785

			if (dead_end_function(file, insn->call_dest))
				return 0;

2786 2787 2788 2789
			break;

		case INSN_JUMP_CONDITIONAL:
		case INSN_JUMP_UNCONDITIONAL:
2790
			if (is_sibling_call(insn)) {
2791
				ret = validate_sibling_call(insn, &state);
2792
				if (ret)
2793
					return ret;
2794

2795
			} else if (insn->jump_dest) {
2796 2797
				ret = validate_branch(file, func,
						      insn->jump_dest, state);
2798 2799 2800 2801 2802
				if (ret) {
					if (backtrace)
						BT_FUNC("(branch)", insn);
					return ret;
				}
2803
			}
2804 2805 2806 2807 2808 2809 2810

			if (insn->type == INSN_JUMP_UNCONDITIONAL)
				return 0;

			break;

		case INSN_JUMP_DYNAMIC:
2811
		case INSN_JUMP_DYNAMIC_CONDITIONAL:
2812
			if (is_sibling_call(insn)) {
2813 2814 2815
				ret = validate_sibling_call(insn, &state);
				if (ret)
					return ret;
2816 2817
			}

2818 2819 2820 2821
			if (insn->type == INSN_JUMP_DYNAMIC)
				return 0;

			break;
2822

2823 2824 2825 2826 2827 2828 2829 2830
		case INSN_CONTEXT_SWITCH:
			if (func && (!next_insn || !next_insn->hint)) {
				WARN_FUNC("unsupported instruction in callable function",
					  sec, insn->offset);
				return 1;
			}
			return 0;

P
Peter Zijlstra 已提交
2831 2832 2833 2834 2835 2836 2837 2838 2839 2840
		case INSN_STAC:
			if (state.uaccess) {
				WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
				return 1;
			}

			state.uaccess = true;
			break;

		case INSN_CLAC:
2841
			if (!state.uaccess && func) {
P
Peter Zijlstra 已提交
2842 2843 2844 2845 2846 2847 2848 2849 2850 2851
				WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
				return 1;
			}

			if (func_uaccess_safe(func) && !state.uaccess_stack) {
				WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
				return 1;
			}

			state.uaccess = false;
2852 2853
			break;

2854
		case INSN_STD:
2855
			if (state.df) {
2856
				WARN_FUNC("recursive STD", sec, insn->offset);
2857 2858
				return 1;
			}
2859 2860 2861 2862 2863

			state.df = true;
			break;

		case INSN_CLD:
2864
			if (!state.df && func) {
2865
				WARN_FUNC("redundant CLD", sec, insn->offset);
2866 2867
				return 1;
			}
2868 2869

			state.df = false;
2870 2871
			break;

2872 2873 2874 2875 2876 2877 2878
		default:
			break;
		}

		if (insn->dead_end)
			return 0;

2879
		if (!next_insn) {
2880
			if (state.cfi.cfa.base == CFI_UNDEFINED)
2881
				return 0;
2882 2883 2884
			WARN("%s: unexpected end of section", sec->name);
			return 1;
		}
2885 2886

		insn = next_insn;
2887 2888 2889 2890 2891
	}

	return 0;
}

2892
static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
2893 2894 2895
{
	struct instruction *insn;
	struct insn_state state;
2896
	int ret, warnings = 0;
2897 2898 2899 2900

	if (!file->hints)
		return 0;

2901
	init_insn_state(&state, sec);
2902

2903 2904 2905 2906 2907 2908 2909 2910 2911
	if (sec) {
		insn = find_insn(file, sec, 0);
		if (!insn)
			return 0;
	} else {
		insn = list_first_entry(&file->insn_list, typeof(*insn), list);
	}

	while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
2912
		if (insn->hint && !insn->visited) {
2913
			ret = validate_branch(file, insn->func, insn, state);
2914 2915
			if (ret && backtrace)
				BT_FUNC("<=== (hint)", insn);
2916 2917
			warnings += ret;
		}
2918 2919

		insn = list_next_entry(insn, list);
2920 2921 2922 2923 2924
	}

	return warnings;
}

2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937
static int validate_retpoline(struct objtool_file *file)
{
	struct instruction *insn;
	int warnings = 0;

	for_each_insn(file, insn) {
		if (insn->type != INSN_JUMP_DYNAMIC &&
		    insn->type != INSN_CALL_DYNAMIC)
			continue;

		if (insn->retpoline_safe)
			continue;

2938 2939 2940 2941 2942 2943 2944 2945 2946
		/*
		 * .init.text code is ran before userspace and thus doesn't
		 * strictly need retpolines, except for modules which are
		 * loaded late, they very much do need retpoline in their
		 * .init.text
		 */
		if (!strcmp(insn->sec->name, ".init.text") && !module)
			continue;

2947 2948 2949 2950 2951 2952 2953 2954 2955 2956
		WARN_FUNC("indirect %s found in RETPOLINE build",
			  insn->sec, insn->offset,
			  insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");

		warnings++;
	}

	return warnings;
}

2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969
static bool is_kasan_insn(struct instruction *insn)
{
	return (insn->type == INSN_CALL &&
		!strcmp(insn->call_dest->name, "__asan_handle_no_return"));
}

static bool is_ubsan_insn(struct instruction *insn)
{
	return (insn->type == INSN_CALL &&
		!strcmp(insn->call_dest->name,
			"__ubsan_handle_builtin_unreachable"));
}

2970
static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
2971 2972
{
	int i;
2973
	struct instruction *prev_insn;
2974

2975 2976 2977 2978 2979 2980
	if (insn->ignore || insn->type == INSN_NOP)
		return true;

	/*
	 * Ignore any unused exceptions.  This can happen when a whitelisted
	 * function has an exception table entry.
2981 2982 2983
	 *
	 * Also ignore alternative replacement instructions.  This can happen
	 * when a whitelisted function uses one of the ALTERNATIVE macros.
2984
	 */
2985 2986 2987
	if (!strcmp(insn->sec->name, ".fixup") ||
	    !strcmp(insn->sec->name, ".altinstr_replacement") ||
	    !strcmp(insn->sec->name, ".altinstr_aux"))
2988 2989
		return true;

2990 2991 2992 2993 2994 2995 2996 2997
	if (!insn->func)
		return false;

	/*
	 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
	 * __builtin_unreachable().  The BUG() macro has an unreachable() after
	 * the UD2, which causes GCC's undefined trap logic to emit another UD2
	 * (or occasionally a JMP to UD2).
2998 2999
	 *
	 * It may also insert a UD2 after calling a __noreturn function.
3000
	 */
3001 3002
	prev_insn = list_prev_entry(insn, list);
	if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) &&
3003 3004 3005 3006 3007
	    (insn->type == INSN_BUG ||
	     (insn->type == INSN_JUMP_UNCONDITIONAL &&
	      insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
		return true;

3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018
	/*
	 * Check if this (or a subsequent) instruction is related to
	 * CONFIG_UBSAN or CONFIG_KASAN.
	 *
	 * End the search at 5 instructions to avoid going into the weeds.
	 */
	for (i = 0; i < 5; i++) {

		if (is_kasan_insn(insn) || is_ubsan_insn(insn))
			return true;

3019 3020 3021 3022 3023 3024 3025 3026
		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
			if (insn->jump_dest &&
			    insn->jump_dest->func == insn->func) {
				insn = insn->jump_dest;
				continue;
			}

			break;
3027 3028
		}

3029
		if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
3030
			break;
3031

3032 3033 3034 3035 3036 3037
		insn = list_next_entry(insn, list);
	}

	return false;
}

3038 3039
static int validate_symbol(struct objtool_file *file, struct section *sec,
			   struct symbol *sym, struct insn_state *state)
3040 3041
{
	struct instruction *insn;
3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065
	int ret;

	if (!sym->len) {
		WARN("%s() is missing an ELF size annotation", sym->name);
		return 1;
	}

	if (sym->pfunc != sym || sym->alias != sym)
		return 0;

	insn = find_insn(file, sec, sym->offset);
	if (!insn || insn->ignore || insn->visited)
		return 0;

	state->uaccess = sym->uaccess_safe;

	ret = validate_branch(file, insn->func, insn, *state);
	if (ret && backtrace)
		BT_FUNC("<=== (sym)", insn);
	return ret;
}

static int validate_section(struct objtool_file *file, struct section *sec)
{
3066
	struct insn_state state;
3067 3068
	struct symbol *func;
	int warnings = 0;
3069

3070 3071 3072
	list_for_each_entry(func, &sec->symbol_list, list) {
		if (func->type != STT_FUNC)
			continue;
3073

3074
		init_insn_state(&state, sec);
3075
		set_func_state(&state.cfi);
3076

3077
		warnings += validate_symbol(file, sec, func, &state);
3078 3079 3080 3081 3082
	}

	return warnings;
}

3083 3084 3085
static int validate_vmlinux_functions(struct objtool_file *file)
{
	struct section *sec;
3086
	int warnings = 0;
3087 3088

	sec = find_section_by_name(file->elf, ".noinstr.text");
3089 3090 3091 3092
	if (sec) {
		warnings += validate_section(file, sec);
		warnings += validate_unwind_hints(file, sec);
	}
3093

3094 3095 3096 3097 3098
	sec = find_section_by_name(file->elf, ".entry.text");
	if (sec) {
		warnings += validate_section(file, sec);
		warnings += validate_unwind_hints(file, sec);
	}
3099 3100

	return warnings;
3101 3102
}

3103 3104 3105 3106 3107
static int validate_functions(struct objtool_file *file)
{
	struct section *sec;
	int warnings = 0;

3108 3109 3110 3111
	for_each_sec(file, sec) {
		if (!(sec->sh.sh_flags & SHF_EXECINSTR))
			continue;

3112
		warnings += validate_section(file, sec);
3113
	}
3114 3115 3116 3117

	return warnings;
}

3118
static int validate_reachable_instructions(struct objtool_file *file)
3119 3120
{
	struct instruction *insn;
3121 3122 3123

	if (file->ignore_unreachables)
		return 0;
3124 3125

	for_each_insn(file, insn) {
3126
		if (insn->visited || ignore_unreachable_insn(file, insn))
3127 3128 3129 3130
			continue;

		WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
		return 1;
3131 3132
	}

3133
	return 0;
3134 3135
}

3136
int check(struct objtool_file *file)
3137 3138 3139
{
	int ret, warnings = 0;

3140 3141
	arch_initial_func_cfi_state(&initial_func_cfi);

3142
	ret = decode_sections(file);
3143 3144 3145 3146
	if (ret < 0)
		goto out;
	warnings += ret;

3147
	if (list_empty(&file->insn_list))
3148 3149
		goto out;

3150
	if (vmlinux && !validate_dup) {
3151
		ret = validate_vmlinux_functions(file);
3152 3153 3154 3155 3156 3157 3158
		if (ret < 0)
			goto out;

		warnings += ret;
		goto out;
	}

3159
	if (retpoline) {
3160
		ret = validate_retpoline(file);
3161 3162 3163 3164 3165
		if (ret < 0)
			return ret;
		warnings += ret;
	}

3166
	ret = validate_functions(file);
3167 3168 3169 3170
	if (ret < 0)
		goto out;
	warnings += ret;

3171
	ret = validate_unwind_hints(file, NULL);
3172 3173 3174 3175
	if (ret < 0)
		goto out;
	warnings += ret;

3176
	if (!warnings) {
3177
		ret = validate_reachable_instructions(file);
3178 3179 3180 3181 3182
		if (ret < 0)
			goto out;
		warnings += ret;
	}

3183
	ret = create_static_call_sections(file);
3184 3185 3186 3187
	if (ret < 0)
		goto out;
	warnings += ret;

3188 3189 3190 3191 3192 3193 3194
	if (mcount) {
		ret = create_mcount_loc_sections(file);
		if (ret < 0)
			goto out;
		warnings += ret;
	}

3195
out:
3196 3197 3198 3199 3200
	/*
	 *  For now, don't fail the kernel build on fatal warnings.  These
	 *  errors are still fairly common due to the growing matrix of
	 *  supported toolchains and their recent pace of change.
	 */
3201 3202
	return 0;
}