check.c 99.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7
/*
 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
 */

#include <string.h>
#include <stdlib.h>
8
#include <inttypes.h>
P
Peter Zijlstra 已提交
9
#include <sys/mman.h>
10

11 12 13 14 15 16 17 18
#include <arch/elf.h>
#include <objtool/builtin.h>
#include <objtool/cfi.h>
#include <objtool/arch.h>
#include <objtool/check.h>
#include <objtool/special.h>
#include <objtool/warn.h>
#include <objtool/endianness.h>
19

20
#include <linux/objtool.h>
21 22
#include <linux/hashtable.h>
#include <linux/kernel.h>
23
#include <linux/static_call_types.h>
24 25 26 27

struct alternative {
	struct list_head list;
	struct instruction *insn;
P
Peter Zijlstra 已提交
28
	bool skip_orig;
29 30
};

P
Peter Zijlstra 已提交
31 32 33 34 35
static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;

static struct cfi_init_state initial_func_cfi;
static struct cfi_state init_cfi;
static struct cfi_state func_cfi;
36

37 38
struct instruction *find_insn(struct objtool_file *file,
			      struct section *sec, unsigned long offset)
39 40 41
{
	struct instruction *insn;

42
	hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
43 44
		if (insn->sec == sec && insn->offset == offset)
			return insn;
45
	}
46 47 48 49 50 51 52 53 54

	return NULL;
}

static struct instruction *next_insn_same_sec(struct objtool_file *file,
					      struct instruction *insn)
{
	struct instruction *next = list_next_entry(insn, list);

55
	if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
56 57 58 59 60
		return NULL;

	return next;
}

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
static struct instruction *next_insn_same_func(struct objtool_file *file,
					       struct instruction *insn)
{
	struct instruction *next = list_next_entry(insn, list);
	struct symbol *func = insn->func;

	if (!func)
		return NULL;

	if (&next->list != &file->insn_list && next->func == func)
		return next;

	/* Check if we're already in the subfunction: */
	if (func == func->cfunc)
		return NULL;

	/* Move to the subfunction: */
	return find_insn(file, func->cfunc->sec, func->cfunc->offset);
}

81 82 83 84 85 86 87 88 89 90 91
static struct instruction *prev_insn_same_sym(struct objtool_file *file,
					       struct instruction *insn)
{
	struct instruction *prev = list_prev_entry(insn, list);

	if (&prev->list != &file->insn_list && prev->func == insn->func)
		return prev;

	return NULL;
}

92
#define func_for_each_insn(file, func, insn)				\
93 94 95 96
	for (insn = find_insn(file, func->sec, func->offset);		\
	     insn;							\
	     insn = next_insn_same_func(file, insn))

97 98
#define sym_for_each_insn(file, sym, insn)				\
	for (insn = find_insn(file, sym->sec, sym->offset);		\
99
	     insn && &insn->list != &file->insn_list &&			\
100 101
		insn->sec == sym->sec &&				\
		insn->offset < sym->offset + sym->len;			\
102 103
	     insn = list_next_entry(insn, list))

104
#define sym_for_each_insn_continue_reverse(file, sym, insn)		\
105 106
	for (insn = list_prev_entry(insn, list);			\
	     &insn->list != &file->insn_list &&				\
107
		insn->sec == sym->sec && insn->offset >= sym->offset;	\
108 109 110 111 112
	     insn = list_prev_entry(insn, list))

#define sec_for_each_insn_from(file, insn)				\
	for (; insn; insn = next_insn_same_sec(file, insn))

113 114 115
#define sec_for_each_insn_continue(file, insn)				\
	for (insn = next_insn_same_sec(file, insn); insn;		\
	     insn = next_insn_same_sec(file, insn))
116

117 118 119 120 121 122 123 124 125 126 127 128
static bool is_jump_table_jump(struct instruction *insn)
{
	struct alt_group *alt_group = insn->alt_group;

	if (insn->jump_table)
		return true;

	/* Retpoline alternative for a jump table? */
	return alt_group && alt_group->orig_group &&
	       alt_group->orig_group->first_insn->jump_table;
}

129 130
static bool is_sibling_call(struct instruction *insn)
{
131 132 133 134 135 136 137 138
	/*
	 * Assume only ELF functions can make sibling calls.  This ensures
	 * sibling call detection consistency between vmlinux.o and individual
	 * objects.
	 */
	if (!insn->func)
		return false;

139 140
	/* An indirect jump is either a sibling call or a jump to a table. */
	if (insn->type == INSN_JUMP_DYNAMIC)
141
		return !is_jump_table_jump(insn);
142 143

	/* add_jump_destinations() sets insn->call_dest for sibling calls. */
144
	return (is_static_jump(insn) && insn->call_dest);
145 146
}

147 148 149 150 151 152 153 154 155
/*
 * This checks to see if the given function is a "noreturn" function.
 *
 * For global functions which are outside the scope of this object file, we
 * have to keep a manual list of them.
 *
 * For local functions, we have to detect them manually by simply looking for
 * the lack of a return instruction.
 */
156 157
static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
				int recursion)
158 159 160 161 162 163 164 165 166 167 168 169 170 171
{
	int i;
	struct instruction *insn;
	bool empty = true;

	/*
	 * Unfortunately these have to be hard coded because the noreturn
	 * attribute isn't provided in ELF data.
	 */
	static const char * const global_noreturns[] = {
		"__stack_chk_fail",
		"panic",
		"do_exit",
		"do_task_dead",
E
Eric W. Biederman 已提交
172
		"kthread_exit",
173
		"make_task_dead",
174
		"__module_put_and_kthread_exit",
175
		"kthread_complete_and_exit",
176 177 178
		"__reiserfs_panic",
		"lbug_with_loc",
		"fortify_panic",
179
		"usercopy_abort",
180
		"machine_real_restart",
181
		"rewind_stack_and_make_dead",
182
		"kunit_try_catch_throw",
183
		"xen_start_kernel",
184
		"cpu_bringup_and_idle",
185
		"do_group_exit",
186
		"stop_this_cpu",
187
		"__invalid_creds",
188 189
		"cpu_startup_entry",
		"__ubsan_handle_builtin_unreachable",
190
		"ex_handler_msr_mce",
191 192
	};

193 194 195
	if (!func)
		return false;

196
	if (func->bind == STB_WEAK)
197
		return false;
198 199 200 201

	if (func->bind == STB_GLOBAL)
		for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
			if (!strcmp(func->name, global_noreturns[i]))
202
				return true;
203

204
	if (!func->len)
205
		return false;
206

207 208
	insn = find_insn(file, func->sec, func->offset);
	if (!insn->func)
209
		return false;
210

211
	func_for_each_insn(file, func, insn) {
212 213 214
		empty = false;

		if (insn->type == INSN_RETURN)
215
			return false;
216 217 218
	}

	if (empty)
219
		return false;
220 221 222 223 224 225

	/*
	 * A function can have a sibling call instead of a return.  In that
	 * case, the function's dead-end status depends on whether the target
	 * of the sibling call returns.
	 */
226
	func_for_each_insn(file, func, insn) {
227
		if (is_sibling_call(insn)) {
228 229 230 231
			struct instruction *dest = insn->jump_dest;

			if (!dest)
				/* sibling call to another file */
232
				return false;
233

234 235 236 237 238 239 240 241
			/* local sibling call */
			if (recursion == 5) {
				/*
				 * Infinite recursion: two functions have
				 * sibling calls to each other.  This is a very
				 * rare case.  It means they aren't dead ends.
				 */
				return false;
242 243
			}

244 245
			return __dead_end_function(file, dest->func, recursion+1);
		}
246 247
	}

248
	return true;
249 250
}

251
static bool dead_end_function(struct objtool_file *file, struct symbol *func)
252 253 254 255
{
	return __dead_end_function(file, func, 0);
}

256
static void init_cfi_state(struct cfi_state *cfi)
257 258 259
{
	int i;

260
	for (i = 0; i < CFI_NUM_REGS; i++) {
261 262
		cfi->regs[i].base = CFI_UNDEFINED;
		cfi->vals[i].base = CFI_UNDEFINED;
263
	}
264 265 266 267 268
	cfi->cfa.base = CFI_UNDEFINED;
	cfi->drap_reg = CFI_UNDEFINED;
	cfi->drap_offset = -1;
}

269 270
static void init_insn_state(struct objtool_file *file, struct insn_state *state,
			    struct section *sec)
271 272 273
{
	memset(state, 0, sizeof(*state));
	init_cfi_state(&state->cfi);
274 275 276 277 278 279

	/*
	 * We need the full vmlinux for noinstr validation, otherwise we can
	 * not correctly determine insn->call_dest->sec (external symbols do
	 * not have a section).
	 */
280
	if (opts.link && opts.noinstr && sec)
281
		state->noinstr = sec->noinstr;
282 283
}

P
Peter Zijlstra 已提交
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
static struct cfi_state *cfi_alloc(void)
{
	struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
	if (!cfi) {
		WARN("calloc failed");
		exit(1);
	}
	nr_cfi++;
	return cfi;
}

static int cfi_bits;
static struct hlist_head *cfi_hash;

static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
{
	return memcmp((void *)cfi1 + sizeof(cfi1->hash),
		      (void *)cfi2 + sizeof(cfi2->hash),
		      sizeof(struct cfi_state) - sizeof(struct hlist_node));
}

static inline u32 cfi_key(struct cfi_state *cfi)
{
	return jhash((void *)cfi + sizeof(cfi->hash),
		     sizeof(*cfi) - sizeof(cfi->hash), 0);
}

static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
{
	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
	struct cfi_state *obj;

	hlist_for_each_entry(obj, head, hash) {
		if (!cficmp(cfi, obj)) {
			nr_cfi_cache++;
			return obj;
		}
	}

	obj = cfi_alloc();
	*obj = *cfi;
	hlist_add_head(&obj->hash, head);

	return obj;
}

static void cfi_hash_add(struct cfi_state *cfi)
{
	struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];

	hlist_add_head(&cfi->hash, head);
}

static void *cfi_hash_alloc(unsigned long size)
{
	cfi_bits = max(10, ilog2(size));
	cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
			PROT_READ|PROT_WRITE,
			MAP_PRIVATE|MAP_ANON, -1, 0);
	if (cfi_hash == (void *)-1L) {
		WARN("mmap fail cfi_hash");
		cfi_hash = NULL;
346
	}  else if (opts.stats) {
P
Peter Zijlstra 已提交
347 348 349 350 351 352 353 354 355
		printf("cfi_bits: %d\n", cfi_bits);
	}

	return cfi_hash;
}

static unsigned long nr_insns;
static unsigned long nr_insns_visited;

356 357 358 359 360 361 362 363 364 365 366 367
/*
 * Call the arch-specific instruction decoder for all the instructions and add
 * them to the global instruction list.
 */
static int decode_instructions(struct objtool_file *file)
{
	struct section *sec;
	struct symbol *func;
	unsigned long offset;
	struct instruction *insn;
	int ret;

368
	for_each_sec(file, sec) {
369 370 371 372

		if (!(sec->sh.sh_flags & SHF_EXECINSTR))
			continue;

373 374 375 376 377
		if (strcmp(sec->name, ".altinstr_replacement") &&
		    strcmp(sec->name, ".altinstr_aux") &&
		    strncmp(sec->name, ".discard.", 9))
			sec->text = true;

378
		if (!strcmp(sec->name, ".noinstr.text") ||
379 380
		    !strcmp(sec->name, ".entry.text") ||
		    !strncmp(sec->name, ".text.__x86.", 12))
381 382
			sec->noinstr = true;

383
		for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
384
			insn = malloc(sizeof(*insn));
385 386 387 388
			if (!insn) {
				WARN("malloc failed");
				return -1;
			}
389 390
			memset(insn, 0, sizeof(*insn));
			INIT_LIST_HEAD(&insn->alts);
391
			INIT_LIST_HEAD(&insn->stack_ops);
392
			INIT_LIST_HEAD(&insn->call_node);
393

394 395 396
			insn->sec = sec;
			insn->offset = offset;

397
			ret = arch_decode_instruction(file, sec, offset,
398
						      sec->sh.sh_size - offset,
399
						      &insn->len, &insn->type,
400
						      &insn->immediate,
401
						      &insn->stack_ops);
402
			if (ret)
403
				goto err;
404

P
Peter Zijlstra 已提交
405 406 407 408 409 410 411 412
			/*
			 * By default, "ud2" is a dead end unless otherwise
			 * annotated, because GCC 7 inserts it for certain
			 * divide-by-zero cases.
			 */
			if (insn->type == INSN_BUG)
				insn->dead_end = true;

413
			hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
414
			list_add_tail(&insn->list, &file->insn_list);
P
Peter Zijlstra 已提交
415
			nr_insns++;
416 417 418
		}

		list_for_each_entry(func, &sec->symbol_list, list) {
419
			if (func->type != STT_FUNC || func->alias != func)
420 421 422 423 424 425 426 427
				continue;

			if (!find_insn(file, sec, func->offset)) {
				WARN("%s(): can't find starting instruction",
				     func->name);
				return -1;
			}

428
			sym_for_each_insn(file, func, insn) {
429
				insn->func = func;
430
				if (insn->type == INSN_ENDBR && list_empty(&insn->call_node)) {
431
					if (insn->offset == insn->func->offset) {
432
						list_add_tail(&insn->call_node, &file->endbr_list);
433 434 435 436 437 438
						file->nr_endbr++;
					} else {
						file->nr_endbr_int++;
					}
				}
			}
439 440 441
		}
	}

442
	if (opts.stats)
P
Peter Zijlstra 已提交
443 444
		printf("nr_insns: %lu\n", nr_insns);

445
	return 0;
446 447 448 449

err:
	free(insn);
	return ret;
450 451
}

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
/*
 * Read the pv_ops[] .data table to find the static initialized values.
 */
static int add_pv_ops(struct objtool_file *file, const char *symname)
{
	struct symbol *sym, *func;
	unsigned long off, end;
	struct reloc *rel;
	int idx;

	sym = find_symbol_by_name(file->elf, symname);
	if (!sym)
		return 0;

	off = sym->offset;
	end = off + sym->len;
	for (;;) {
		rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
		if (!rel)
			break;

		func = rel->sym;
		if (func->type == STT_SECTION)
			func = find_symbol_by_offset(rel->sym->sec, rel->addend);

		idx = (rel->offset - sym->offset) / sizeof(unsigned long);

		objtool_pv_add(file, idx, func);

		off = rel->offset + 1;
		if (off > end)
			break;
	}

	return 0;
}

/*
 * Allocate and initialize file->pv_ops[].
 */
static int init_pv_ops(struct objtool_file *file)
{
	static const char *pv_ops_tables[] = {
		"pv_ops",
		"xen_cpu_ops",
		"xen_irq_ops",
		"xen_mmu_ops",
		NULL,
	};
	const char *pv_ops;
	struct symbol *sym;
	int idx, nr;

505
	if (!opts.noinstr)
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
		return 0;

	file->pv_ops = NULL;

	sym = find_symbol_by_name(file->elf, "pv_ops");
	if (!sym)
		return 0;

	nr = sym->len / sizeof(unsigned long);
	file->pv_ops = calloc(sizeof(struct pv_state), nr);
	if (!file->pv_ops)
		return -1;

	for (idx = 0; idx < nr; idx++)
		INIT_LIST_HEAD(&file->pv_ops[idx].targets);

	for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
		add_pv_ops(file, pv_ops);

	return 0;
}

528 529 530 531 532
static struct instruction *find_last_insn(struct objtool_file *file,
					  struct section *sec)
{
	struct instruction *insn = NULL;
	unsigned int offset;
533
	unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
534

535
	for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
536 537 538 539 540
		insn = find_insn(file, sec, offset);

	return insn;
}

541
/*
542
 * Mark "ud2" instructions and manually annotated dead ends.
543 544 545 546
 */
static int add_dead_ends(struct objtool_file *file)
{
	struct section *sec;
M
Matt Helsley 已提交
547
	struct reloc *reloc;
548 549
	struct instruction *insn;

550 551 552
	/*
	 * Check for manually annotated dead ends.
	 */
553 554
	sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
	if (!sec)
555
		goto reachable;
556

M
Matt Helsley 已提交
557 558
	list_for_each_entry(reloc, &sec->reloc_list, list) {
		if (reloc->sym->type != STT_SECTION) {
559 560 561
			WARN("unexpected relocation symbol type in %s", sec->name);
			return -1;
		}
M
Matt Helsley 已提交
562
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
563 564
		if (insn)
			insn = list_prev_entry(insn, list);
565
		else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
M
Matt Helsley 已提交
566
			insn = find_last_insn(file, reloc->sym->sec);
567
			if (!insn) {
568
				WARN("can't find unreachable insn at %s+0x%" PRIx64,
M
Matt Helsley 已提交
569
				     reloc->sym->sec->name, reloc->addend);
570 571 572
				return -1;
			}
		} else {
573
			WARN("can't find unreachable insn at %s+0x%" PRIx64,
M
Matt Helsley 已提交
574
			     reloc->sym->sec->name, reloc->addend);
575 576 577 578 579 580
			return -1;
		}

		insn->dead_end = true;
	}

581 582 583 584 585 586 587 588 589 590 591
reachable:
	/*
	 * These manually annotated reachable checks are needed for GCC 4.4,
	 * where the Linux unreachable() macro isn't supported.  In that case
	 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
	 * not a dead end.
	 */
	sec = find_section_by_name(file->elf, ".rela.discard.reachable");
	if (!sec)
		return 0;

M
Matt Helsley 已提交
592 593
	list_for_each_entry(reloc, &sec->reloc_list, list) {
		if (reloc->sym->type != STT_SECTION) {
594 595 596
			WARN("unexpected relocation symbol type in %s", sec->name);
			return -1;
		}
M
Matt Helsley 已提交
597
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
598 599
		if (insn)
			insn = list_prev_entry(insn, list);
600
		else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
M
Matt Helsley 已提交
601
			insn = find_last_insn(file, reloc->sym->sec);
602
			if (!insn) {
603
				WARN("can't find reachable insn at %s+0x%" PRIx64,
M
Matt Helsley 已提交
604
				     reloc->sym->sec->name, reloc->addend);
605 606 607
				return -1;
			}
		} else {
608
			WARN("can't find reachable insn at %s+0x%" PRIx64,
M
Matt Helsley 已提交
609
			     reloc->sym->sec->name, reloc->addend);
610 611 612 613 614 615
			return -1;
		}

		insn->dead_end = false;
	}

616 617 618
	return 0;
}

619 620
static int create_static_call_sections(struct objtool_file *file)
{
621
	struct section *sec;
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
	struct static_call_site *site;
	struct instruction *insn;
	struct symbol *key_sym;
	char *key_name, *tmp;
	int idx;

	sec = find_section_by_name(file->elf, ".static_call_sites");
	if (sec) {
		INIT_LIST_HEAD(&file->static_call_list);
		WARN("file already has .static_call_sites section, skipping");
		return 0;
	}

	if (list_empty(&file->static_call_list))
		return 0;

	idx = 0;
639
	list_for_each_entry(insn, &file->static_call_list, call_node)
640 641 642 643 644 645 646 647
		idx++;

	sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
				 sizeof(struct static_call_site), idx);
	if (!sec)
		return -1;

	idx = 0;
648
	list_for_each_entry(insn, &file->static_call_list, call_node) {
649 650 651 652 653

		site = (struct static_call_site *)sec->data->d_buf + idx;
		memset(site, 0, sizeof(struct static_call_site));

		/* populate reloc for 'addr' */
654 655 656 657
		if (elf_add_reloc_to_insn(file->elf, sec,
					  idx * sizeof(struct static_call_site),
					  R_X86_64_PC32,
					  insn->sec, insn->offset))
658
			return -1;
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675

		/* find key symbol */
		key_name = strdup(insn->call_dest->name);
		if (!key_name) {
			perror("strdup");
			return -1;
		}
		if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
			    STATIC_CALL_TRAMP_PREFIX_LEN)) {
			WARN("static_call: trampoline name malformed: %s", key_name);
			return -1;
		}
		tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
		memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);

		key_sym = find_symbol_by_name(file->elf, tmp);
		if (!key_sym) {
676
			if (!opts.module) {
677 678 679 680 681 682 683 684 685 686 687 688 689 690
				WARN("static_call: can't find static_call_key symbol: %s", tmp);
				return -1;
			}

			/*
			 * For modules(), the key might not be exported, which
			 * means the module can make static calls but isn't
			 * allowed to change them.
			 *
			 * In that case we temporarily set the key to be the
			 * trampoline address.  This is fixed up in
			 * static_call_add_module().
			 */
			key_sym = insn->call_dest;
691 692 693 694
		}
		free(key_name);

		/* populate reloc for 'key' */
695 696 697 698
		if (elf_add_reloc(file->elf, sec,
				  idx * sizeof(struct static_call_site) + 4,
				  R_X86_64_PC32, key_sym,
				  is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
699 700 701 702 703 704 705 706
			return -1;

		idx++;
	}

	return 0;
}

707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
static int create_retpoline_sites_sections(struct objtool_file *file)
{
	struct instruction *insn;
	struct section *sec;
	int idx;

	sec = find_section_by_name(file->elf, ".retpoline_sites");
	if (sec) {
		WARN("file already has .retpoline_sites, skipping");
		return 0;
	}

	idx = 0;
	list_for_each_entry(insn, &file->retpoline_call_list, call_node)
		idx++;

	if (!idx)
		return 0;

	sec = elf_create_section(file->elf, ".retpoline_sites", 0,
				 sizeof(int), idx);
	if (!sec) {
		WARN("elf_create_section: .retpoline_sites");
		return -1;
	}

	idx = 0;
	list_for_each_entry(insn, &file->retpoline_call_list, call_node) {

		int *site = (int *)sec->data->d_buf + idx;
		*site = 0;

		if (elf_add_reloc_to_insn(file->elf, sec,
					  idx * sizeof(int),
					  R_X86_64_PC32,
					  insn->sec, insn->offset)) {
			WARN("elf_add_reloc_to_insn: .retpoline_sites");
			return -1;
		}

		idx++;
	}

	return 0;
}

753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
static int create_return_sites_sections(struct objtool_file *file)
{
	struct instruction *insn;
	struct section *sec;
	int idx;

	sec = find_section_by_name(file->elf, ".return_sites");
	if (sec) {
		WARN("file already has .return_sites, skipping");
		return 0;
	}

	idx = 0;
	list_for_each_entry(insn, &file->return_thunk_list, call_node)
		idx++;

	if (!idx)
		return 0;

	sec = elf_create_section(file->elf, ".return_sites", 0,
				 sizeof(int), idx);
	if (!sec) {
		WARN("elf_create_section: .return_sites");
		return -1;
	}

	idx = 0;
	list_for_each_entry(insn, &file->return_thunk_list, call_node) {

		int *site = (int *)sec->data->d_buf + idx;
		*site = 0;

		if (elf_add_reloc_to_insn(file->elf, sec,
					  idx * sizeof(int),
					  R_X86_64_PC32,
					  insn->sec, insn->offset)) {
			WARN("elf_add_reloc_to_insn: .return_sites");
			return -1;
		}

		idx++;
	}

	return 0;
}

799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
static int create_ibt_endbr_seal_sections(struct objtool_file *file)
{
	struct instruction *insn;
	struct section *sec;
	int idx;

	sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
	if (sec) {
		WARN("file already has .ibt_endbr_seal, skipping");
		return 0;
	}

	idx = 0;
	list_for_each_entry(insn, &file->endbr_list, call_node)
		idx++;

815
	if (opts.stats) {
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
		printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
		printf("ibt: ENDBR inside functions:  %d\n", file->nr_endbr_int);
		printf("ibt: superfluous ENDBR:       %d\n", idx);
	}

	if (!idx)
		return 0;

	sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0,
				 sizeof(int), idx);
	if (!sec) {
		WARN("elf_create_section: .ibt_endbr_seal");
		return -1;
	}

	idx = 0;
	list_for_each_entry(insn, &file->endbr_list, call_node) {

		int *site = (int *)sec->data->d_buf + idx;
		*site = 0;

		if (elf_add_reloc_to_insn(file->elf, sec,
					  idx * sizeof(int),
					  R_X86_64_PC32,
					  insn->sec, insn->offset)) {
			WARN("elf_add_reloc_to_insn: .ibt_endbr_seal");
			return -1;
		}

		idx++;
	}

	return 0;
}

851 852
static int create_mcount_loc_sections(struct objtool_file *file)
{
853
	struct section *sec;
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
	unsigned long *loc;
	struct instruction *insn;
	int idx;

	sec = find_section_by_name(file->elf, "__mcount_loc");
	if (sec) {
		INIT_LIST_HEAD(&file->mcount_loc_list);
		WARN("file already has __mcount_loc section, skipping");
		return 0;
	}

	if (list_empty(&file->mcount_loc_list))
		return 0;

	idx = 0;
869
	list_for_each_entry(insn, &file->mcount_loc_list, call_node)
870 871 872 873 874 875 876
		idx++;

	sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx);
	if (!sec)
		return -1;

	idx = 0;
877
	list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
878 879 880 881

		loc = (unsigned long *)sec->data->d_buf + idx;
		memset(loc, 0, sizeof(unsigned long));

882 883 884 885
		if (elf_add_reloc_to_insn(file->elf, sec,
					  idx * sizeof(unsigned long),
					  R_X86_64_64,
					  insn->sec, insn->offset))
886 887 888 889 890 891 892 893
			return -1;

		idx++;
	}

	return 0;
}

894 895 896 897 898 899 900 901
/*
 * Warnings shouldn't be reported for ignored functions.
 */
static void add_ignores(struct objtool_file *file)
{
	struct instruction *insn;
	struct section *sec;
	struct symbol *func;
M
Matt Helsley 已提交
902
	struct reloc *reloc;
903

P
Peter Zijlstra 已提交
904 905 906
	sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
	if (!sec)
		return;
907

M
Matt Helsley 已提交
908 909
	list_for_each_entry(reloc, &sec->reloc_list, list) {
		switch (reloc->sym->type) {
P
Peter Zijlstra 已提交
910
		case STT_FUNC:
M
Matt Helsley 已提交
911
			func = reloc->sym;
P
Peter Zijlstra 已提交
912 913 914
			break;

		case STT_SECTION:
M
Matt Helsley 已提交
915
			func = find_func_by_offset(reloc->sym->sec, reloc->addend);
916
			if (!func)
917
				continue;
P
Peter Zijlstra 已提交
918
			break;
919

P
Peter Zijlstra 已提交
920
		default:
M
Matt Helsley 已提交
921
			WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
P
Peter Zijlstra 已提交
922
			continue;
923
		}
P
Peter Zijlstra 已提交
924

925
		func_for_each_insn(file, func, insn)
P
Peter Zijlstra 已提交
926
			insn->ignore = true;
927 928 929
	}
}

P
Peter Zijlstra 已提交
930 931 932 933 934 935 936 937 938 939
/*
 * This is a whitelist of functions that is allowed to be called with AC set.
 * The list is meant to be minimal and only contains compiler instrumentation
 * ABI and a few functions used to implement *_{to,from}_user() functions.
 *
 * These functions must not directly change AC, but may PUSHF/POPF.
 */
static const char *uaccess_safe_builtin[] = {
	/* KASAN */
	"kasan_report",
940
	"kasan_check_range",
P
Peter Zijlstra 已提交
941 942 943 944 945 946 947 948 949 950 951 952 953
	/* KASAN out-of-line */
	"__asan_loadN_noabort",
	"__asan_load1_noabort",
	"__asan_load2_noabort",
	"__asan_load4_noabort",
	"__asan_load8_noabort",
	"__asan_load16_noabort",
	"__asan_storeN_noabort",
	"__asan_store1_noabort",
	"__asan_store2_noabort",
	"__asan_store4_noabort",
	"__asan_store8_noabort",
	"__asan_store16_noabort",
954 955
	"__kasan_check_read",
	"__kasan_check_write",
P
Peter Zijlstra 已提交
956 957 958 959 960 961 962 963 964 965 966 967 968
	/* KASAN in-line */
	"__asan_report_load_n_noabort",
	"__asan_report_load1_noabort",
	"__asan_report_load2_noabort",
	"__asan_report_load4_noabort",
	"__asan_report_load8_noabort",
	"__asan_report_load16_noabort",
	"__asan_report_store_n_noabort",
	"__asan_report_store1_noabort",
	"__asan_report_store2_noabort",
	"__asan_report_store4_noabort",
	"__asan_report_store8_noabort",
	"__asan_report_store16_noabort",
969
	/* KCSAN */
970
	"__kcsan_check_access",
971 972 973 974
	"__kcsan_mb",
	"__kcsan_wmb",
	"__kcsan_rmb",
	"__kcsan_release",
975 976
	"kcsan_found_watchpoint",
	"kcsan_setup_watchpoint",
977
	"kcsan_check_scoped_accesses",
978 979
	"kcsan_disable_current",
	"kcsan_enable_current_nowarn",
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
	/* KCSAN/TSAN */
	"__tsan_func_entry",
	"__tsan_func_exit",
	"__tsan_read_range",
	"__tsan_write_range",
	"__tsan_read1",
	"__tsan_read2",
	"__tsan_read4",
	"__tsan_read8",
	"__tsan_read16",
	"__tsan_write1",
	"__tsan_write2",
	"__tsan_write4",
	"__tsan_write8",
	"__tsan_write16",
995 996 997 998 999
	"__tsan_read_write1",
	"__tsan_read_write2",
	"__tsan_read_write4",
	"__tsan_read_write8",
	"__tsan_read_write16",
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
	"__tsan_atomic8_load",
	"__tsan_atomic16_load",
	"__tsan_atomic32_load",
	"__tsan_atomic64_load",
	"__tsan_atomic8_store",
	"__tsan_atomic16_store",
	"__tsan_atomic32_store",
	"__tsan_atomic64_store",
	"__tsan_atomic8_exchange",
	"__tsan_atomic16_exchange",
	"__tsan_atomic32_exchange",
	"__tsan_atomic64_exchange",
	"__tsan_atomic8_fetch_add",
	"__tsan_atomic16_fetch_add",
	"__tsan_atomic32_fetch_add",
	"__tsan_atomic64_fetch_add",
	"__tsan_atomic8_fetch_sub",
	"__tsan_atomic16_fetch_sub",
	"__tsan_atomic32_fetch_sub",
	"__tsan_atomic64_fetch_sub",
	"__tsan_atomic8_fetch_and",
	"__tsan_atomic16_fetch_and",
	"__tsan_atomic32_fetch_and",
	"__tsan_atomic64_fetch_and",
	"__tsan_atomic8_fetch_or",
	"__tsan_atomic16_fetch_or",
	"__tsan_atomic32_fetch_or",
	"__tsan_atomic64_fetch_or",
	"__tsan_atomic8_fetch_xor",
	"__tsan_atomic16_fetch_xor",
	"__tsan_atomic32_fetch_xor",
	"__tsan_atomic64_fetch_xor",
	"__tsan_atomic8_fetch_nand",
	"__tsan_atomic16_fetch_nand",
	"__tsan_atomic32_fetch_nand",
	"__tsan_atomic64_fetch_nand",
	"__tsan_atomic8_compare_exchange_strong",
	"__tsan_atomic16_compare_exchange_strong",
	"__tsan_atomic32_compare_exchange_strong",
	"__tsan_atomic64_compare_exchange_strong",
	"__tsan_atomic8_compare_exchange_weak",
	"__tsan_atomic16_compare_exchange_weak",
	"__tsan_atomic32_compare_exchange_weak",
	"__tsan_atomic64_compare_exchange_weak",
	"__tsan_atomic8_compare_exchange_val",
	"__tsan_atomic16_compare_exchange_val",
	"__tsan_atomic32_compare_exchange_val",
	"__tsan_atomic64_compare_exchange_val",
	"__tsan_atomic_thread_fence",
	"__tsan_atomic_signal_fence",
P
Peter Zijlstra 已提交
1050 1051
	/* KCOV */
	"write_comp_data",
1052
	"check_kcov_mode",
P
Peter Zijlstra 已提交
1053 1054 1055 1056 1057 1058 1059 1060 1061
	"__sanitizer_cov_trace_pc",
	"__sanitizer_cov_trace_const_cmp1",
	"__sanitizer_cov_trace_const_cmp2",
	"__sanitizer_cov_trace_const_cmp4",
	"__sanitizer_cov_trace_const_cmp8",
	"__sanitizer_cov_trace_cmp1",
	"__sanitizer_cov_trace_cmp2",
	"__sanitizer_cov_trace_cmp4",
	"__sanitizer_cov_trace_cmp8",
1062
	"__sanitizer_cov_trace_switch",
P
Peter Zijlstra 已提交
1063 1064 1065 1066
	/* UBSAN */
	"ubsan_type_mismatch_common",
	"__ubsan_handle_type_mismatch",
	"__ubsan_handle_type_mismatch_v1",
1067
	"__ubsan_handle_shift_out_of_bounds",
P
Peter Zijlstra 已提交
1068 1069
	/* misc */
	"csum_partial_copy_generic",
1070 1071
	"copy_mc_fragile",
	"copy_mc_fragile_handle_tail",
1072
	"copy_mc_enhanced_fast_string",
P
Peter Zijlstra 已提交
1073 1074 1075 1076 1077 1078 1079 1080 1081
	"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
	NULL
};

static void add_uaccess_safe(struct objtool_file *file)
{
	struct symbol *func;
	const char **name;

1082
	if (!opts.uaccess)
P
Peter Zijlstra 已提交
1083 1084 1085 1086 1087 1088 1089
		return;

	for (name = uaccess_safe_builtin; *name; name++) {
		func = find_symbol_by_name(file->elf, *name);
		if (!func)
			continue;

1090
		func->uaccess_safe = true;
1091 1092 1093
	}
}

1094 1095 1096 1097 1098 1099
/*
 * FIXME: For now, just ignore any alternatives which add retpolines.  This is
 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
 * But it at least allows objtool to understand the control flow *around* the
 * retpoline.
 */
1100
static int add_ignore_alternatives(struct objtool_file *file)
1101 1102
{
	struct section *sec;
M
Matt Helsley 已提交
1103
	struct reloc *reloc;
1104 1105
	struct instruction *insn;

1106
	sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
1107 1108 1109
	if (!sec)
		return 0;

M
Matt Helsley 已提交
1110 1111
	list_for_each_entry(reloc, &sec->reloc_list, list) {
		if (reloc->sym->type != STT_SECTION) {
1112 1113 1114 1115
			WARN("unexpected relocation symbol type in %s", sec->name);
			return -1;
		}

M
Matt Helsley 已提交
1116
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
1117
		if (!insn) {
1118
			WARN("bad .discard.ignore_alts entry");
1119 1120 1121 1122 1123 1124 1125 1126 1127
			return -1;
		}

		insn->ignore_alts = true;
	}

	return 0;
}

1128 1129 1130 1131 1132
__weak bool arch_is_retpoline(struct symbol *sym)
{
	return false;
}

1133 1134 1135 1136 1137
__weak bool arch_is_rethunk(struct symbol *sym)
{
	return false;
}

1138 1139 1140 1141 1142 1143 1144 1145
#define NEGATIVE_RELOC	((void *)-1L)

static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
{
	if (insn->reloc == NEGATIVE_RELOC)
		return NULL;

	if (!insn->reloc) {
1146 1147 1148
		if (!file)
			return NULL;

1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
		insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
						       insn->offset, insn->len);
		if (!insn->reloc) {
			insn->reloc = NEGATIVE_RELOC;
			return NULL;
		}
	}

	return insn->reloc;
}

1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
static void remove_insn_ops(struct instruction *insn)
{
	struct stack_op *op, *tmp;

	list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
		list_del(&op->list);
		free(op);
	}
}

1170 1171
static void annotate_call_site(struct objtool_file *file,
			       struct instruction *insn, bool sibling)
1172 1173
{
	struct reloc *reloc = insn_reloc(file, insn);
1174
	struct symbol *sym = insn->call_dest;
1175

1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
	if (!sym)
		sym = reloc->sym;

	/*
	 * Alternative replacement code is just template code which is
	 * sometimes copied to the original instruction. For now, don't
	 * annotate it. (In the future we might consider annotating the
	 * original instruction if/when it ever makes sense to do so.)
	 */
	if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1186 1187
		return;

1188 1189 1190
	if (sym->static_call_tramp) {
		list_add_tail(&insn->call_node, &file->static_call_list);
		return;
1191 1192
	}

1193 1194 1195 1196 1197
	if (sym->retpoline_thunk) {
		list_add_tail(&insn->call_node, &file->retpoline_call_list);
		return;
	}

1198
	/*
1199 1200 1201
	 * Many compilers cannot disable KCOV or sanitizer calls with a function
	 * attribute so they need a little help, NOP out any such calls from
	 * noinstr text.
1202
	 */
1203
	if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
		if (reloc) {
			reloc->type = R_NONE;
			elf_write_reloc(file->elf, reloc);
		}

		elf_write_insn(file->elf, insn->sec,
			       insn->offset, insn->len,
			       sibling ? arch_ret_insn(insn->len)
			               : arch_nop_insn(insn->len));

		insn->type = sibling ? INSN_RETURN : INSN_NOP;
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225

		if (sibling) {
			/*
			 * We've replaced the tail-call JMP insn by two new
			 * insn: RET; INT3, except we only have a single struct
			 * insn here. Mark it retpoline_safe to avoid the SLS
			 * warning, instead of adding another insn.
			 */
			insn->retpoline_safe = true;
		}

1226
		return;
1227 1228
	}

1229
	if (opts.mcount && sym->fentry) {
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
		if (sibling)
			WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset);

		if (reloc) {
			reloc->type = R_NONE;
			elf_write_reloc(file->elf, reloc);
		}

		elf_write_insn(file->elf, insn->sec,
			       insn->offset, insn->len,
			       arch_nop_insn(insn->len));

		insn->type = INSN_NOP;

1244
		list_add_tail(&insn->call_node, &file->mcount_loc_list);
1245
		return;
1246
	}
P
Peter Zijlstra 已提交
1247 1248 1249

	if (!sibling && dead_end_function(file, sym))
		insn->dead_end = true;
1250 1251 1252 1253 1254 1255 1256 1257
}

static void add_call_dest(struct objtool_file *file, struct instruction *insn,
			  struct symbol *dest, bool sibling)
{
	insn->call_dest = dest;
	if (!dest)
		return;
1258 1259 1260 1261 1262 1263 1264 1265 1266

	/*
	 * Whatever stack impact regular CALLs have, should be undone
	 * by the RETURN of the called function.
	 *
	 * Annotated intra-function calls retain the stack_ops but
	 * are converted to JUMP, see read_intra_function_calls().
	 */
	remove_insn_ops(insn);
1267 1268

	annotate_call_site(file, insn, sibling);
1269 1270
}

1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
{
	/*
	 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
	 * so convert them accordingly.
	 */
	switch (insn->type) {
	case INSN_CALL:
		insn->type = INSN_CALL_DYNAMIC;
		break;
	case INSN_JUMP_UNCONDITIONAL:
		insn->type = INSN_JUMP_DYNAMIC;
		break;
	case INSN_JUMP_CONDITIONAL:
		insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
		break;
	default:
		return;
	}

	insn->retpoline_safe = true;

	/*
	 * Whatever stack impact regular CALLs have, should be undone
	 * by the RETURN of the called function.
	 *
	 * Annotated intra-function calls retain the stack_ops but
	 * are converted to JUMP, see read_intra_function_calls().
	 */
	remove_insn_ops(insn);

	annotate_call_site(file, insn, false);
}
1304

P
Peter Zijlstra 已提交
1305
static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1306 1307 1308 1309 1310 1311 1312 1313
{
	/*
	 * Return thunk tail calls are really just returns in disguise,
	 * so convert them accordingly.
	 */
	insn->type = INSN_RETURN;
	insn->retpoline_safe = true;

P
Peter Zijlstra 已提交
1314 1315
	if (add)
		list_add_tail(&insn->call_node, &file->return_thunk_list);
1316 1317
}

1318 1319 1320 1321 1322
static bool same_function(struct instruction *insn1, struct instruction *insn2)
{
	return insn1->func->pfunc == insn2->func->pfunc;
}

1323
static bool is_first_func_insn(struct objtool_file *file, struct instruction *insn)
1324
{
1325 1326 1327
	if (insn->offset == insn->func->offset)
		return true;

1328
	if (opts.ibt) {
1329 1330 1331 1332 1333 1334 1335 1336
		struct instruction *prev = prev_insn_same_sym(file, insn);

		if (prev && prev->type == INSN_ENDBR &&
		    insn->offset == insn->func->offset + prev->len)
			return true;
	}

	return false;
1337 1338
}

1339 1340 1341 1342 1343
/*
 * Find the destination instructions for all jumps.
 */
static int add_jump_destinations(struct objtool_file *file)
{
1344
	struct instruction *insn, *jump_dest;
M
Matt Helsley 已提交
1345
	struct reloc *reloc;
1346 1347 1348 1349
	struct section *dest_sec;
	unsigned long dest_off;

	for_each_insn(file, insn) {
1350 1351 1352 1353 1354 1355 1356
		if (insn->jump_dest) {
			/*
			 * handle_group_alt() may have previously set
			 * 'jump_dest' for some alternatives.
			 */
			continue;
		}
1357
		if (!is_static_jump(insn))
1358 1359
			continue;

1360
		reloc = insn_reloc(file, insn);
M
Matt Helsley 已提交
1361
		if (!reloc) {
1362
			dest_sec = insn->sec;
1363
			dest_off = arch_jump_destination(insn);
M
Matt Helsley 已提交
1364 1365 1366
		} else if (reloc->sym->type == STT_SECTION) {
			dest_sec = reloc->sym->sec;
			dest_off = arch_dest_reloc_offset(reloc->addend);
P
Peter Zijlstra 已提交
1367
		} else if (reloc->sym->retpoline_thunk) {
1368
			add_retpoline_call(file, insn);
1369
			continue;
1370
		} else if (reloc->sym->return_thunk) {
P
Peter Zijlstra 已提交
1371
			add_return_call(file, insn, true);
1372
			continue;
1373
		} else if (insn->func) {
1374 1375 1376 1377
			/*
			 * External sibling call or internal sibling call with
			 * STT_FUNC reloc.
			 */
1378
			add_call_dest(file, insn, reloc->sym, true);
1379
			continue;
1380 1381 1382 1383 1384 1385 1386
		} else if (reloc->sym->sec->idx) {
			dest_sec = reloc->sym->sec;
			dest_off = reloc->sym->sym.st_value +
				   arch_dest_reloc_offset(reloc->addend);
		} else {
			/* non-func asm code jumping to another file */
			continue;
1387 1388
		}

1389 1390
		jump_dest = find_insn(file, dest_sec, dest_off);
		if (!jump_dest) {
P
Peter Zijlstra 已提交
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
			struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);

			/*
			 * This is a special case for zen_untrain_ret().
			 * It jumps to __x86_return_thunk(), but objtool
			 * can't find the thunk's starting RET
			 * instruction, because the RET is also in the
			 * middle of another instruction.  Objtool only
			 * knows about the outer instruction.
			 */
			if (sym && sym->return_thunk) {
				add_return_call(file, insn, false);
				continue;
			}

1406 1407 1408 1409 1410
			WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
				  insn->sec, insn->offset, dest_sec->name,
				  dest_off);
			return -1;
		}
1411 1412

		/*
1413
		 * Cross-function jump.
1414
		 */
1415 1416
		if (insn->func && jump_dest->func &&
		    insn->func != jump_dest->func) {
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430

			/*
			 * For GCC 8+, create parent/child links for any cold
			 * subfunctions.  This is _mostly_ redundant with a
			 * similar initialization in read_symbols().
			 *
			 * If a function has aliases, we want the *first* such
			 * function in the symbol table to be the subfunction's
			 * parent.  In that case we overwrite the
			 * initialization done in read_symbols().
			 *
			 * However this code can't completely replace the
			 * read_symbols() code because this doesn't detect the
			 * case where the parent function's only reference to a
1431
			 * subfunction is through a jump table.
1432
			 */
1433
			if (!strstr(insn->func->name, ".cold") &&
1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
			    strstr(jump_dest->func->name, ".cold")) {
				insn->func->cfunc = jump_dest->func;
				jump_dest->func->pfunc = insn->func;

			} else if (!same_function(insn, jump_dest) &&
				   is_first_func_insn(file, jump_dest)) {
				/*
				 * Internal sibling call without reloc or with
				 * STT_SECTION reloc.
				 */
				add_call_dest(file, insn, jump_dest->func, true);
				continue;
1446
			}
1447
		}
1448 1449

		insn->jump_dest = jump_dest;
1450 1451 1452 1453 1454
	}

	return 0;
}

1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
{
	struct symbol *call_dest;

	call_dest = find_func_by_offset(sec, offset);
	if (!call_dest)
		call_dest = find_symbol_by_offset(sec, offset);

	return call_dest;
}

1466 1467 1468 1469 1470 1471 1472
/*
 * Find the destination instructions for all calls.
 */
static int add_call_destinations(struct objtool_file *file)
{
	struct instruction *insn;
	unsigned long dest_off;
1473
	struct symbol *dest;
M
Matt Helsley 已提交
1474
	struct reloc *reloc;
1475 1476 1477 1478 1479

	for_each_insn(file, insn) {
		if (insn->type != INSN_CALL)
			continue;

1480
		reloc = insn_reloc(file, insn);
M
Matt Helsley 已提交
1481
		if (!reloc) {
1482
			dest_off = arch_jump_destination(insn);
1483 1484 1485
			dest = find_call_destination(insn->sec, dest_off);

			add_call_dest(file, insn, dest, false);
1486

1487 1488 1489 1490
			if (insn->ignore)
				continue;

			if (!insn->call_dest) {
1491
				WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
1492 1493
				return -1;
			}
1494

1495 1496 1497 1498 1499 1500
			if (insn->func && insn->call_dest->type != STT_FUNC) {
				WARN_FUNC("unsupported call to non-function",
					  insn->sec, insn->offset);
				return -1;
			}

M
Matt Helsley 已提交
1501 1502
		} else if (reloc->sym->type == STT_SECTION) {
			dest_off = arch_dest_reloc_offset(reloc->addend);
1503 1504
			dest = find_call_destination(reloc->sym->sec, dest_off);
			if (!dest) {
1505
				WARN_FUNC("can't find call dest symbol at %s+0x%lx",
1506
					  insn->sec, insn->offset,
M
Matt Helsley 已提交
1507
					  reloc->sym->sec->name,
1508
					  dest_off);
1509 1510
				return -1;
			}
1511

1512 1513
			add_call_dest(file, insn, dest, false);

P
Peter Zijlstra 已提交
1514
		} else if (reloc->sym->retpoline_thunk) {
1515
			add_retpoline_call(file, insn);
1516

1517
		} else
1518
			add_call_dest(file, insn, reloc->sym, false);
1519 1520 1521 1522 1523 1524
	}

	return 0;
}

/*
1525 1526
 * The .alternatives section requires some extra special care over and above
 * other special sections because alternatives are patched in place.
1527 1528 1529 1530 1531 1532
 */
static int handle_group_alt(struct objtool_file *file,
			    struct special_alt *special_alt,
			    struct instruction *orig_insn,
			    struct instruction **new_insn)
{
1533
	struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
J
Josh Poimboeuf 已提交
1534
	struct alt_group *orig_alt_group, *new_alt_group;
1535 1536
	unsigned long dest_off;

J
Josh Poimboeuf 已提交
1537 1538 1539 1540 1541 1542

	orig_alt_group = malloc(sizeof(*orig_alt_group));
	if (!orig_alt_group) {
		WARN("malloc failed");
		return -1;
	}
1543 1544 1545 1546 1547 1548 1549
	orig_alt_group->cfi = calloc(special_alt->orig_len,
				     sizeof(struct cfi_state *));
	if (!orig_alt_group->cfi) {
		WARN("calloc failed");
		return -1;
	}

1550 1551 1552 1553 1554 1555
	last_orig_insn = NULL;
	insn = orig_insn;
	sec_for_each_insn_from(file, insn) {
		if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
			break;

J
Josh Poimboeuf 已提交
1556
		insn->alt_group = orig_alt_group;
1557 1558
		last_orig_insn = insn;
	}
J
Josh Poimboeuf 已提交
1559 1560 1561
	orig_alt_group->orig_group = NULL;
	orig_alt_group->first_insn = orig_insn;
	orig_alt_group->last_insn = last_orig_insn;
1562

1563

1564 1565 1566 1567
	new_alt_group = malloc(sizeof(*new_alt_group));
	if (!new_alt_group) {
		WARN("malloc failed");
		return -1;
1568 1569
	}

1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
	if (special_alt->new_len < special_alt->orig_len) {
		/*
		 * Insert a fake nop at the end to make the replacement
		 * alt_group the same size as the original.  This is needed to
		 * allow propagate_alt_cfi() to do its magic.  When the last
		 * instruction affects the stack, the instruction after it (the
		 * nop) will propagate the new state to the shared CFI array.
		 */
		nop = malloc(sizeof(*nop));
		if (!nop) {
			WARN("malloc failed");
1581 1582
			return -1;
		}
1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
		memset(nop, 0, sizeof(*nop));
		INIT_LIST_HEAD(&nop->alts);
		INIT_LIST_HEAD(&nop->stack_ops);

		nop->sec = special_alt->new_sec;
		nop->offset = special_alt->new_off + special_alt->new_len;
		nop->len = special_alt->orig_len - special_alt->new_len;
		nop->type = INSN_NOP;
		nop->func = orig_insn->func;
		nop->alt_group = new_alt_group;
		nop->ignore = orig_insn->ignore_alts;
1594
	}
1595

1596 1597 1598
	if (!special_alt->new_len) {
		*new_insn = nop;
		goto end;
1599 1600 1601 1602
	}

	insn = *new_insn;
	sec_for_each_insn_from(file, insn) {
1603 1604
		struct reloc *alt_reloc;

1605 1606 1607 1608 1609
		if (insn->offset >= special_alt->new_off + special_alt->new_len)
			break;

		last_new_insn = insn;

1610
		insn->ignore = orig_insn->ignore_alts;
1611
		insn->func = orig_insn->func;
J
Josh Poimboeuf 已提交
1612
		insn->alt_group = new_alt_group;
1613

1614 1615 1616 1617 1618 1619 1620 1621
		/*
		 * Since alternative replacement code is copy/pasted by the
		 * kernel after applying relocations, generally such code can't
		 * have relative-address relocation references to outside the
		 * .altinstr_replacement section, unless the arch's
		 * alternatives code can adjust the relative offsets
		 * accordingly.
		 */
1622
		alt_reloc = insn_reloc(file, insn);
1623 1624
		if (alt_reloc &&
		    !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1625 1626 1627 1628 1629 1630

			WARN_FUNC("unsupported relocation in alternatives section",
				  insn->sec, insn->offset);
			return -1;
		}

1631
		if (!is_static_jump(insn))
1632 1633 1634 1635 1636
			continue;

		if (!insn->immediate)
			continue;

1637
		dest_off = arch_jump_destination(insn);
1638
		if (dest_off == special_alt->new_off + special_alt->new_len) {
1639
			insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
1640 1641 1642 1643 1644
			if (!insn->jump_dest) {
				WARN_FUNC("can't find alternative jump destination",
					  insn->sec, insn->offset);
				return -1;
			}
1645 1646 1647 1648 1649 1650 1651 1652 1653
		}
	}

	if (!last_new_insn) {
		WARN_FUNC("can't find last new alternative instruction",
			  special_alt->new_sec, special_alt->new_off);
		return -1;
	}

1654 1655 1656
	if (nop)
		list_add(&nop->list, &last_new_insn->list);
end:
J
Josh Poimboeuf 已提交
1657 1658
	new_alt_group->orig_group = orig_alt_group;
	new_alt_group->first_insn = *new_insn;
1659 1660
	new_alt_group->last_insn = nop ? : last_new_insn;
	new_alt_group->cfi = orig_alt_group->cfi;
1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673
	return 0;
}

/*
 * A jump table entry can either convert a nop to a jump or a jump to a nop.
 * If the original instruction is a jump, make the alt entry an effective nop
 * by just skipping the original instruction.
 */
static int handle_jump_alt(struct objtool_file *file,
			   struct special_alt *special_alt,
			   struct instruction *orig_insn,
			   struct instruction **new_insn)
{
1674 1675
	if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
	    orig_insn->type != INSN_NOP) {
1676

1677 1678 1679 1680 1681
		WARN_FUNC("unsupported instruction at jump label",
			  orig_insn->sec, orig_insn->offset);
		return -1;
	}

1682
	if (opts.hack_jump_label && special_alt->key_addend & 2) {
1683 1684 1685 1686 1687 1688 1689 1690 1691 1692
		struct reloc *reloc = insn_reloc(file, orig_insn);

		if (reloc) {
			reloc->type = R_NONE;
			elf_write_reloc(file->elf, reloc);
		}
		elf_write_insn(file->elf, orig_insn->sec,
			       orig_insn->offset, orig_insn->len,
			       arch_nop_insn(orig_insn->len));
		orig_insn->type = INSN_NOP;
1693 1694 1695 1696 1697 1698 1699 1700 1701
	}

	if (orig_insn->type == INSN_NOP) {
		if (orig_insn->len == 2)
			file->jl_nop_short++;
		else
			file->jl_nop_long++;

		return 0;
1702 1703
	}

1704 1705 1706 1707 1708
	if (orig_insn->len == 2)
		file->jl_short++;
	else
		file->jl_long++;

1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755
	*new_insn = list_next_entry(orig_insn, list);
	return 0;
}

/*
 * Read all the special sections which have alternate instructions which can be
 * patched in or redirected to at runtime.  Each instruction having alternate
 * instruction(s) has them added to its insn->alts list, which will be
 * traversed in validate_branch().
 */
static int add_special_section_alts(struct objtool_file *file)
{
	struct list_head special_alts;
	struct instruction *orig_insn, *new_insn;
	struct special_alt *special_alt, *tmp;
	struct alternative *alt;
	int ret;

	ret = special_get_alts(file->elf, &special_alts);
	if (ret)
		return ret;

	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {

		orig_insn = find_insn(file, special_alt->orig_sec,
				      special_alt->orig_off);
		if (!orig_insn) {
			WARN_FUNC("special: can't find orig instruction",
				  special_alt->orig_sec, special_alt->orig_off);
			ret = -1;
			goto out;
		}

		new_insn = NULL;
		if (!special_alt->group || special_alt->new_len) {
			new_insn = find_insn(file, special_alt->new_sec,
					     special_alt->new_off);
			if (!new_insn) {
				WARN_FUNC("special: can't find new instruction",
					  special_alt->new_sec,
					  special_alt->new_off);
				ret = -1;
				goto out;
			}
		}

		if (special_alt->group) {
1756 1757 1758 1759 1760 1761
			if (!special_alt->orig_len) {
				WARN_FUNC("empty alternative entry",
					  orig_insn->sec, orig_insn->offset);
				continue;
			}

1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772
			ret = handle_group_alt(file, special_alt, orig_insn,
					       &new_insn);
			if (ret)
				goto out;
		} else if (special_alt->jump_or_nop) {
			ret = handle_jump_alt(file, special_alt, orig_insn,
					      &new_insn);
			if (ret)
				goto out;
		}

1773 1774 1775 1776 1777 1778 1779
		alt = malloc(sizeof(*alt));
		if (!alt) {
			WARN("malloc failed");
			ret = -1;
			goto out;
		}

1780
		alt->insn = new_insn;
P
Peter Zijlstra 已提交
1781
		alt->skip_orig = special_alt->skip_orig;
P
Peter Zijlstra 已提交
1782
		orig_insn->ignore_alts |= special_alt->skip_alt;
1783 1784 1785 1786 1787 1788
		list_add_tail(&alt->list, &orig_insn->alts);

		list_del(&special_alt->list);
		free(special_alt);
	}

1789
	if (opts.stats) {
1790 1791 1792 1793 1794
		printf("jl\\\tNOP\tJMP\n");
		printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
		printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
	}

1795 1796 1797 1798
out:
	return ret;
}

1799
static int add_jump_table(struct objtool_file *file, struct instruction *insn,
M
Matt Helsley 已提交
1800
			    struct reloc *table)
1801
{
M
Matt Helsley 已提交
1802
	struct reloc *reloc = table;
1803
	struct instruction *dest_insn;
1804
	struct alternative *alt;
1805 1806
	struct symbol *pfunc = insn->func->pfunc;
	unsigned int prev_offset = 0;
1807

1808
	/*
M
Matt Helsley 已提交
1809
	 * Each @reloc is a switch table relocation which points to the target
1810 1811
	 * instruction.
	 */
M
Matt Helsley 已提交
1812
	list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
1813 1814

		/* Check for the end of the table: */
M
Matt Helsley 已提交
1815
		if (reloc != table && reloc->jump_table_start)
1816 1817
			break;

1818
		/* Make sure the table entries are consecutive: */
M
Matt Helsley 已提交
1819
		if (prev_offset && reloc->offset != prev_offset + 8)
1820 1821 1822
			break;

		/* Detect function pointers from contiguous objects: */
M
Matt Helsley 已提交
1823 1824
		if (reloc->sym->sec == pfunc->sec &&
		    reloc->addend == pfunc->offset)
1825 1826
			break;

M
Matt Helsley 已提交
1827
		dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
1828
		if (!dest_insn)
1829 1830
			break;

1831
		/* Make sure the destination is in the same function: */
1832
		if (!dest_insn->func || dest_insn->func->pfunc != pfunc)
1833
			break;
1834 1835 1836 1837 1838 1839 1840

		alt = malloc(sizeof(*alt));
		if (!alt) {
			WARN("malloc failed");
			return -1;
		}

1841
		alt->insn = dest_insn;
1842
		list_add_tail(&alt->list, &insn->alts);
M
Matt Helsley 已提交
1843
		prev_offset = reloc->offset;
1844 1845 1846 1847 1848 1849
	}

	if (!prev_offset) {
		WARN_FUNC("can't find switch jump table",
			  insn->sec, insn->offset);
		return -1;
1850 1851 1852 1853 1854 1855
	}

	return 0;
}

/*
1856 1857
 * find_jump_table() - Given a dynamic jump, find the switch jump table
 * associated with it.
1858
 */
M
Matt Helsley 已提交
1859
static struct reloc *find_jump_table(struct objtool_file *file,
1860 1861 1862
				      struct symbol *func,
				      struct instruction *insn)
{
1863
	struct reloc *table_reloc;
1864
	struct instruction *dest_insn, *orig_insn = insn;
1865

1866 1867 1868 1869 1870
	/*
	 * Backward search using the @first_jump_src links, these help avoid
	 * much of the 'in between' code. Which avoids us getting confused by
	 * it.
	 */
1871
	for (;
1872 1873
	     insn && insn->func && insn->func->pfunc == func;
	     insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
1874

1875
		if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
1876 1877 1878 1879 1880 1881 1882 1883 1884
			break;

		/* allow small jumps within the range */
		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
		    insn->jump_dest &&
		    (insn->jump_dest->offset <= insn->offset ||
		     insn->jump_dest->offset > orig_insn->offset))
		    break;

1885
		table_reloc = arch_find_switch_table(file, insn);
M
Matt Helsley 已提交
1886
		if (!table_reloc)
1887
			continue;
M
Matt Helsley 已提交
1888
		dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
1889 1890
		if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func)
			continue;
1891

M
Matt Helsley 已提交
1892
		return table_reloc;
1893 1894 1895 1896 1897
	}

	return NULL;
}

1898 1899 1900 1901 1902 1903
/*
 * First pass: Mark the head of each jump table so that in the next pass,
 * we know when a given jump table ends and the next one starts.
 */
static void mark_func_jump_tables(struct objtool_file *file,
				    struct symbol *func)
1904
{
1905
	struct instruction *insn, *last = NULL;
M
Matt Helsley 已提交
1906
	struct reloc *reloc;
1907

1908
	func_for_each_insn(file, func, insn) {
1909 1910 1911 1912 1913
		if (!last)
			last = insn;

		/*
		 * Store back-pointers for unconditional forward jumps such
1914
		 * that find_jump_table() can back-track using those and
1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925
		 * avoid some potentially confusing code.
		 */
		if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
		    insn->offset > last->offset &&
		    insn->jump_dest->offset > insn->offset &&
		    !insn->jump_dest->first_jump_src) {

			insn->jump_dest->first_jump_src = insn;
			last = insn->jump_dest;
		}

1926 1927 1928
		if (insn->type != INSN_JUMP_DYNAMIC)
			continue;

M
Matt Helsley 已提交
1929 1930 1931 1932
		reloc = find_jump_table(file, func, insn);
		if (reloc) {
			reloc->jump_table_start = true;
			insn->jump_table = reloc;
1933 1934
		}
	}
1935 1936 1937 1938 1939 1940 1941 1942
}

static int add_func_jump_tables(struct objtool_file *file,
				  struct symbol *func)
{
	struct instruction *insn;
	int ret;

1943
	func_for_each_insn(file, func, insn) {
1944 1945
		if (!insn->jump_table)
			continue;
1946

1947
		ret = add_jump_table(file, insn, insn->jump_table);
1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
		if (ret)
			return ret;
	}

	return 0;
}

/*
 * For some switch statements, gcc generates a jump table in the .rodata
 * section which contains a list of addresses within the function to jump to.
 * This finds these jump tables and adds them to the insn->alts lists.
 */
1960
static int add_jump_table_alts(struct objtool_file *file)
1961 1962 1963 1964 1965
{
	struct section *sec;
	struct symbol *func;
	int ret;

1966
	if (!file->rodata)
1967 1968
		return 0;

1969
	for_each_sec(file, sec) {
1970 1971 1972 1973
		list_for_each_entry(func, &sec->symbol_list, list) {
			if (func->type != STT_FUNC)
				continue;

1974
			mark_func_jump_tables(file, func);
1975
			ret = add_func_jump_tables(file, func);
1976 1977 1978 1979 1980 1981 1982 1983
			if (ret)
				return ret;
		}
	}

	return 0;
}

1984 1985 1986 1987 1988 1989 1990 1991
static void set_func_state(struct cfi_state *state)
{
	state->cfa = initial_func_cfi.cfa;
	memcpy(&state->regs, &initial_func_cfi.regs,
	       CFI_NUM_REGS * sizeof(struct cfi_reg));
	state->stack_size = initial_func_cfi.cfa.offset;
}

1992 1993
static int read_unwind_hints(struct objtool_file *file)
{
P
Peter Zijlstra 已提交
1994
	struct cfi_state cfi = init_cfi;
M
Matt Helsley 已提交
1995
	struct section *sec, *relocsec;
1996 1997
	struct unwind_hint *hint;
	struct instruction *insn;
P
Peter Zijlstra 已提交
1998
	struct reloc *reloc;
1999 2000 2001 2002 2003 2004
	int i;

	sec = find_section_by_name(file->elf, ".discard.unwind_hints");
	if (!sec)
		return 0;

M
Matt Helsley 已提交
2005 2006
	relocsec = sec->reloc;
	if (!relocsec) {
2007 2008 2009 2010
		WARN("missing .rela.discard.unwind_hints section");
		return -1;
	}

2011
	if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
2012 2013 2014 2015 2016 2017
		WARN("struct unwind_hint size mismatch");
		return -1;
	}

	file->hints = true;

2018
	for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
2019 2020
		hint = (struct unwind_hint *)sec->data->d_buf + i;

M
Matt Helsley 已提交
2021 2022 2023
		reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
		if (!reloc) {
			WARN("can't find reloc for unwind_hints[%d]", i);
2024 2025 2026
			return -1;
		}

M
Matt Helsley 已提交
2027
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
2028 2029 2030 2031 2032
		if (!insn) {
			WARN("can't find insn for unwind_hints[%d]", i);
			return -1;
		}

2033
		insn->hint = true;
2034

2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
		if (hint->type == UNWIND_HINT_TYPE_SAVE) {
			insn->hint = false;
			insn->save = true;
			continue;
		}

		if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
			insn->restore = true;
			continue;
		}

2046
		if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2047 2048
			struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);

2049 2050 2051 2052 2053 2054 2055
			if (sym && sym->bind == STB_GLOBAL) {
				if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
					WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR",
						  insn->sec, insn->offset);
				}

				insn->entry = 1;
2056 2057 2058
			}
		}

2059 2060 2061 2062 2063
		if (hint->type == UNWIND_HINT_TYPE_ENTRY) {
			hint->type = UNWIND_HINT_TYPE_CALL;
			insn->entry = 1;
		}

2064
		if (hint->type == UNWIND_HINT_TYPE_FUNC) {
P
Peter Zijlstra 已提交
2065
			insn->cfi = &func_cfi;
2066 2067 2068
			continue;
		}

P
Peter Zijlstra 已提交
2069 2070 2071 2072
		if (insn->cfi)
			cfi = *(insn->cfi);

		if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2073 2074 2075 2076 2077
			WARN_FUNC("unsupported unwind_hint sp base reg %d",
				  insn->sec, insn->offset, hint->sp_reg);
			return -1;
		}

P
Peter Zijlstra 已提交
2078 2079 2080 2081 2082
		cfi.cfa.offset = bswap_if_needed(hint->sp_offset);
		cfi.type = hint->type;
		cfi.end = hint->end;

		insn->cfi = cfi_hash_find_or_add(&cfi);
2083 2084 2085 2086 2087
	}

	return 0;
}

2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104
static int read_noendbr_hints(struct objtool_file *file)
{
	struct section *sec;
	struct instruction *insn;
	struct reloc *reloc;

	sec = find_section_by_name(file->elf, ".rela.discard.noendbr");
	if (!sec)
		return 0;

	list_for_each_entry(reloc, &sec->reloc_list, list) {
		insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend);
		if (!insn) {
			WARN("bad .discard.noendbr entry");
			return -1;
		}

2105 2106 2107
		if (insn->type == INSN_ENDBR)
			WARN_FUNC("ANNOTATE_NOENDBR on ENDBR", insn->sec, insn->offset);

2108 2109 2110 2111 2112 2113
		insn->noendbr = 1;
	}

	return 0;
}

2114 2115
static int read_retpoline_hints(struct objtool_file *file)
{
J
Josh Poimboeuf 已提交
2116
	struct section *sec;
2117
	struct instruction *insn;
M
Matt Helsley 已提交
2118
	struct reloc *reloc;
2119

J
Josh Poimboeuf 已提交
2120
	sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
2121 2122 2123
	if (!sec)
		return 0;

M
Matt Helsley 已提交
2124 2125
	list_for_each_entry(reloc, &sec->reloc_list, list) {
		if (reloc->sym->type != STT_SECTION) {
J
Josh Poimboeuf 已提交
2126
			WARN("unexpected relocation symbol type in %s", sec->name);
2127 2128 2129
			return -1;
		}

M
Matt Helsley 已提交
2130
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
2131
		if (!insn) {
J
Josh Poimboeuf 已提交
2132
			WARN("bad .discard.retpoline_safe entry");
2133 2134 2135 2136
			return -1;
		}

		if (insn->type != INSN_JUMP_DYNAMIC &&
2137
		    insn->type != INSN_CALL_DYNAMIC &&
2138 2139 2140
		    insn->type != INSN_RETURN &&
		    insn->type != INSN_NOP) {
			WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop",
2141 2142 2143 2144 2145 2146 2147 2148 2149 2150
				  insn->sec, insn->offset);
			return -1;
		}

		insn->retpoline_safe = true;
	}

	return 0;
}

2151 2152 2153 2154
static int read_instr_hints(struct objtool_file *file)
{
	struct section *sec;
	struct instruction *insn;
M
Matt Helsley 已提交
2155
	struct reloc *reloc;
2156 2157 2158 2159 2160

	sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
	if (!sec)
		return 0;

M
Matt Helsley 已提交
2161 2162
	list_for_each_entry(reloc, &sec->reloc_list, list) {
		if (reloc->sym->type != STT_SECTION) {
2163 2164 2165 2166
			WARN("unexpected relocation symbol type in %s", sec->name);
			return -1;
		}

M
Matt Helsley 已提交
2167
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
		if (!insn) {
			WARN("bad .discard.instr_end entry");
			return -1;
		}

		insn->instr--;
	}

	sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
	if (!sec)
		return 0;

M
Matt Helsley 已提交
2180 2181
	list_for_each_entry(reloc, &sec->reloc_list, list) {
		if (reloc->sym->type != STT_SECTION) {
2182 2183 2184 2185
			WARN("unexpected relocation symbol type in %s", sec->name);
			return -1;
		}

M
Matt Helsley 已提交
2186
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197
		if (!insn) {
			WARN("bad .discard.instr_begin entry");
			return -1;
		}

		insn->instr++;
	}

	return 0;
}

2198 2199 2200 2201
static int read_intra_function_calls(struct objtool_file *file)
{
	struct instruction *insn;
	struct section *sec;
M
Matt Helsley 已提交
2202
	struct reloc *reloc;
2203 2204 2205 2206 2207

	sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
	if (!sec)
		return 0;

M
Matt Helsley 已提交
2208
	list_for_each_entry(reloc, &sec->reloc_list, list) {
2209 2210
		unsigned long dest_off;

M
Matt Helsley 已提交
2211
		if (reloc->sym->type != STT_SECTION) {
2212 2213 2214 2215 2216
			WARN("unexpected relocation symbol type in %s",
			     sec->name);
			return -1;
		}

M
Matt Helsley 已提交
2217
		insn = find_insn(file, reloc->sym->sec, reloc->addend);
2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248
		if (!insn) {
			WARN("bad .discard.intra_function_call entry");
			return -1;
		}

		if (insn->type != INSN_CALL) {
			WARN_FUNC("intra_function_call not a direct call",
				  insn->sec, insn->offset);
			return -1;
		}

		/*
		 * Treat intra-function CALLs as JMPs, but with a stack_op.
		 * See add_call_destinations(), which strips stack_ops from
		 * normal CALLs.
		 */
		insn->type = INSN_JUMP_UNCONDITIONAL;

		dest_off = insn->offset + insn->len + insn->immediate;
		insn->jump_dest = find_insn(file, insn->sec, dest_off);
		if (!insn->jump_dest) {
			WARN_FUNC("can't find call dest at %s+0x%lx",
				  insn->sec, insn->offset,
				  insn->sec->name, dest_off);
			return -1;
		}
	}

	return 0;
}

2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273
/*
 * Return true if name matches an instrumentation function, where calls to that
 * function from noinstr code can safely be removed, but compilers won't do so.
 */
static bool is_profiling_func(const char *name)
{
	/*
	 * Many compilers cannot disable KCOV with a function attribute.
	 */
	if (!strncmp(name, "__sanitizer_cov_", 16))
		return true;

	/*
	 * Some compilers currently do not remove __tsan_func_entry/exit nor
	 * __tsan_atomic_signal_fence (used for barrier instrumentation) with
	 * the __no_sanitize_thread attribute, remove them. Once the kernel's
	 * minimum Clang version is 14.0, this can be removed.
	 */
	if (!strncmp(name, "__tsan_func_", 12) ||
	    !strcmp(name, "__tsan_atomic_signal_fence"))
		return true;

	return false;
}

P
Peter Zijlstra 已提交
2274
static int classify_symbols(struct objtool_file *file)
2275 2276 2277 2278 2279 2280
{
	struct section *sec;
	struct symbol *func;

	for_each_sec(file, sec) {
		list_for_each_entry(func, &sec->symbol_list, list) {
P
Peter Zijlstra 已提交
2281 2282 2283 2284
			if (func->bind != STB_GLOBAL)
				continue;

			if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2285 2286
				     strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
				func->static_call_tramp = true;
P
Peter Zijlstra 已提交
2287 2288 2289 2290

			if (arch_is_retpoline(func))
				func->retpoline_thunk = true;

2291 2292 2293
			if (arch_is_rethunk(func))
				func->return_thunk = true;

P
Peter Zijlstra 已提交
2294 2295 2296
			if (!strcmp(func->name, "__fentry__"))
				func->fentry = true;

2297 2298
			if (is_profiling_func(func->name))
				func->profiling_func = true;
2299 2300 2301 2302 2303 2304
		}
	}

	return 0;
}

2305 2306 2307 2308 2309 2310
static void mark_rodata(struct objtool_file *file)
{
	struct section *sec;
	bool found = false;

	/*
2311 2312 2313 2314 2315 2316 2317 2318
	 * Search for the following rodata sections, each of which can
	 * potentially contain jump tables:
	 *
	 * - .rodata: can contain GCC switch tables
	 * - .rodata.<func>: same, if -fdata-sections is being used
	 * - .rodata..c_jump_table: contains C annotated jump tables
	 *
	 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2319 2320
	 */
	for_each_sec(file, sec) {
2321 2322
		if (!strncmp(sec->name, ".rodata", 7) &&
		    !strstr(sec->name, ".str1.")) {
2323 2324 2325 2326 2327 2328 2329 2330
			sec->rodata = true;
			found = true;
		}
	}

	file->rodata = found;
}

2331 2332 2333 2334
static int decode_sections(struct objtool_file *file)
{
	int ret;

2335 2336
	mark_rodata(file);

2337 2338 2339 2340
	ret = init_pv_ops(file);
	if (ret)
		return ret;

2341 2342 2343 2344 2345
	ret = decode_instructions(file);
	if (ret)
		return ret;

	add_ignores(file);
P
Peter Zijlstra 已提交
2346
	add_uaccess_safe(file);
2347

2348
	ret = add_ignore_alternatives(file);
2349 2350 2351
	if (ret)
		return ret;

2352 2353 2354
	/*
	 * Must be before read_unwind_hints() since that needs insn->noendbr.
	 */
2355 2356 2357 2358
	ret = read_noendbr_hints(file);
	if (ret)
		return ret;

2359 2360 2361
	/*
	 * Must be before add_{jump_call}_destination.
	 */
P
Peter Zijlstra 已提交
2362
	ret = classify_symbols(file);
P
Peter Zijlstra 已提交
2363 2364 2365
	if (ret)
		return ret;

2366
	/*
2367 2368
	 * Must be before add_jump_destinations(), which depends on 'func'
	 * being set for alternatives, to enable proper sibling call detection.
2369
	 */
2370
	ret = add_special_section_alts(file);
2371 2372 2373
	if (ret)
		return ret;

2374
	ret = add_jump_destinations(file);
2375 2376 2377
	if (ret)
		return ret;

2378 2379 2380 2381
	/*
	 * Must be before add_call_destination(); it changes INSN_CALL to
	 * INSN_JUMP.
	 */
2382 2383 2384 2385
	ret = read_intra_function_calls(file);
	if (ret)
		return ret;

2386
	ret = add_call_destinations(file);
2387 2388 2389
	if (ret)
		return ret;

P
Peter Zijlstra 已提交
2390 2391 2392 2393 2394 2395 2396 2397
	/*
	 * Must be after add_call_destinations() such that it can override
	 * dead_end_function() marks.
	 */
	ret = add_dead_ends(file);
	if (ret)
		return ret;

2398
	ret = add_jump_table_alts(file);
2399 2400 2401
	if (ret)
		return ret;

2402 2403 2404 2405
	ret = read_unwind_hints(file);
	if (ret)
		return ret;

2406 2407 2408 2409
	ret = read_retpoline_hints(file);
	if (ret)
		return ret;

2410 2411 2412 2413
	ret = read_instr_hints(file);
	if (ret)
		return ret;

2414 2415 2416 2417 2418
	return 0;
}

static bool is_fentry_call(struct instruction *insn)
{
P
Peter Zijlstra 已提交
2419 2420 2421
	if (insn->type == INSN_CALL &&
	    insn->call_dest &&
	    insn->call_dest->fentry)
2422 2423 2424 2425 2426
		return true;

	return false;
}

2427
static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2428
{
2429
	struct cfi_state *cfi = &state->cfi;
2430 2431
	int i;

2432
	if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2433 2434
		return true;

2435
	if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2436 2437
		return true;

2438
	if (cfi->stack_size != initial_func_cfi.cfa.offset)
2439 2440 2441
		return true;

	for (i = 0; i < CFI_NUM_REGS; i++) {
2442 2443
		if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
		    cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2444
			return true;
2445
	}
2446 2447 2448 2449

	return false;
}

2450 2451 2452 2453 2454 2455 2456
static bool check_reg_frame_pos(const struct cfi_reg *reg,
				int expected_offset)
{
	return reg->base == CFI_CFA &&
	       reg->offset == expected_offset;
}

2457 2458
static bool has_valid_stack_frame(struct insn_state *state)
{
2459 2460
	struct cfi_state *cfi = &state->cfi;

2461 2462 2463
	if (cfi->cfa.base == CFI_BP &&
	    check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
	    check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2464 2465
		return true;

2466
	if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2467 2468 2469
		return true;

	return false;
2470 2471
}

2472 2473
static int update_cfi_state_regs(struct instruction *insn,
				  struct cfi_state *cfi,
2474
				  struct stack_op *op)
2475
{
2476
	struct cfi_reg *cfa = &cfi->cfa;
2477

2478
	if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2479 2480 2481
		return 0;

	/* push */
P
Peter Zijlstra 已提交
2482
	if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2483 2484 2485
		cfa->offset += 8;

	/* pop */
P
Peter Zijlstra 已提交
2486
	if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
		cfa->offset -= 8;

	/* add immediate to sp */
	if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
	    op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
		cfa->offset -= op->src.offset;

	return 0;
}

2497
static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2498
{
2499
	if (arch_callee_saved_reg(reg) &&
2500 2501 2502
	    cfi->regs[reg].base == CFI_UNDEFINED) {
		cfi->regs[reg].base = base;
		cfi->regs[reg].offset = offset;
2503
	}
2504 2505
}

2506
static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2507
{
2508 2509
	cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
	cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564
}

/*
 * A note about DRAP stack alignment:
 *
 * GCC has the concept of a DRAP register, which is used to help keep track of
 * the stack pointer when aligning the stack.  r10 or r13 is used as the DRAP
 * register.  The typical DRAP pattern is:
 *
 *   4c 8d 54 24 08		lea    0x8(%rsp),%r10
 *   48 83 e4 c0		and    $0xffffffffffffffc0,%rsp
 *   41 ff 72 f8		pushq  -0x8(%r10)
 *   55				push   %rbp
 *   48 89 e5			mov    %rsp,%rbp
 *				(more pushes)
 *   41 52			push   %r10
 *				...
 *   41 5a			pop    %r10
 *				(more pops)
 *   5d				pop    %rbp
 *   49 8d 62 f8		lea    -0x8(%r10),%rsp
 *   c3				retq
 *
 * There are some variations in the epilogues, like:
 *
 *   5b				pop    %rbx
 *   41 5a			pop    %r10
 *   41 5c			pop    %r12
 *   41 5d			pop    %r13
 *   41 5e			pop    %r14
 *   c9				leaveq
 *   49 8d 62 f8		lea    -0x8(%r10),%rsp
 *   c3				retq
 *
 * and:
 *
 *   4c 8b 55 e8		mov    -0x18(%rbp),%r10
 *   48 8b 5d e0		mov    -0x20(%rbp),%rbx
 *   4c 8b 65 f0		mov    -0x10(%rbp),%r12
 *   4c 8b 6d f8		mov    -0x8(%rbp),%r13
 *   c9				leaveq
 *   49 8d 62 f8		lea    -0x8(%r10),%rsp
 *   c3				retq
 *
 * Sometimes r13 is used as the DRAP register, in which case it's saved and
 * restored beforehand:
 *
 *   41 55			push   %r13
 *   4c 8d 6c 24 10		lea    0x10(%rsp),%r13
 *   48 83 e4 f0		and    $0xfffffffffffffff0,%rsp
 *				...
 *   49 8d 65 f0		lea    -0x10(%r13),%rsp
 *   41 5d			pop    %r13
 *   c3				retq
 */
2565 2566 2567
static int update_cfi_state(struct instruction *insn,
			    struct instruction *next_insn,
			    struct cfi_state *cfi, struct stack_op *op)
2568
{
2569 2570
	struct cfi_reg *cfa = &cfi->cfa;
	struct cfi_reg *regs = cfi->regs;
2571 2572 2573 2574 2575 2576 2577 2578 2579 2580

	/* stack operations don't make sense with an undefined CFA */
	if (cfa->base == CFI_UNDEFINED) {
		if (insn->func) {
			WARN_FUNC("undefined stack state", insn->sec, insn->offset);
			return -1;
		}
		return 0;
	}

2581 2582
	if (cfi->type == UNWIND_HINT_TYPE_REGS ||
	    cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2583
		return update_cfi_state_regs(insn, cfi, op);
2584

2585 2586 2587 2588 2589 2590
	switch (op->dest.type) {

	case OP_DEST_REG:
		switch (op->src.type) {

		case OP_SRC_REG:
2591 2592
			if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
			    cfa->base == CFI_SP &&
2593
			    check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2594 2595 2596

				/* mov %rsp, %rbp */
				cfa->base = op->dest.reg;
2597
				cfi->bp_scratch = false;
2598
			}
2599

2600
			else if (op->src.reg == CFI_SP &&
2601
				 op->dest.reg == CFI_BP && cfi->drap) {
2602

2603 2604
				/* drap: mov %rsp, %rbp */
				regs[CFI_BP].base = CFI_BP;
2605 2606
				regs[CFI_BP].offset = -cfi->stack_size;
				cfi->bp_scratch = false;
2607
			}
2608

2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620
			else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {

				/*
				 * mov %rsp, %reg
				 *
				 * This is needed for the rare case where GCC
				 * does:
				 *
				 *   mov    %rsp, %rax
				 *   ...
				 *   mov    %rax, %rsp
				 */
2621 2622
				cfi->vals[op->dest.reg].base = CFI_CFA;
				cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2623 2624
			}

J
Josh Poimboeuf 已提交
2625
			else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
P
Peter Zijlstra 已提交
2626
				 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
J
Josh Poimboeuf 已提交
2627 2628 2629 2630 2631 2632

				/*
				 * mov %rbp, %rsp
				 *
				 * Restore the original stack pointer (Clang).
				 */
2633
				cfi->stack_size = -cfi->regs[CFI_BP].offset;
J
Josh Poimboeuf 已提交
2634 2635
			}

2636 2637 2638 2639
			else if (op->dest.reg == cfa->base) {

				/* mov %reg, %rsp */
				if (cfa->base == CFI_SP &&
2640
				    cfi->vals[op->src.reg].base == CFI_CFA) {
2641 2642 2643 2644 2645 2646 2647 2648 2649

					/*
					 * This is needed for the rare case
					 * where GCC does something dumb like:
					 *
					 *   lea    0x8(%rsp), %rcx
					 *   ...
					 *   mov    %rcx, %rsp
					 */
2650 2651
					cfa->offset = -cfi->vals[op->src.reg].offset;
					cfi->stack_size = cfa->offset;
2652

P
Peter Zijlstra 已提交
2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684
				} else if (cfa->base == CFI_SP &&
					   cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
					   cfi->vals[op->src.reg].offset == cfa->offset) {

					/*
					 * Stack swizzle:
					 *
					 * 1: mov %rsp, (%[tos])
					 * 2: mov %[tos], %rsp
					 *    ...
					 * 3: pop %rsp
					 *
					 * Where:
					 *
					 * 1 - places a pointer to the previous
					 *     stack at the Top-of-Stack of the
					 *     new stack.
					 *
					 * 2 - switches to the new stack.
					 *
					 * 3 - pops the Top-of-Stack to restore
					 *     the original stack.
					 *
					 * Note: we set base to SP_INDIRECT
					 * here and preserve offset. Therefore
					 * when the unwinder reaches ToS it
					 * will dereference SP and then add the
					 * offset to find the next frame, IOW:
					 * (%rsp) + offset.
					 */
					cfa->base = CFI_SP_INDIRECT;

2685 2686 2687 2688
				} else {
					cfa->base = CFI_UNDEFINED;
					cfa->offset = 0;
				}
2689 2690
			}

2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704
			else if (op->dest.reg == CFI_SP &&
				 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
				 cfi->vals[op->src.reg].offset == cfa->offset) {

				/*
				 * The same stack swizzle case 2) as above. But
				 * because we can't change cfa->base, case 3)
				 * will become a regular POP. Pretend we're a
				 * PUSH so things don't go unbalanced.
				 */
				cfi->stack_size += 8;
			}


2705 2706 2707 2708 2709 2710
			break;

		case OP_SRC_ADD:
			if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {

				/* add imm, %rsp */
2711
				cfi->stack_size -= op->src.offset;
2712 2713 2714 2715 2716 2717 2718 2719
				if (cfa->base == CFI_SP)
					cfa->offset -= op->src.offset;
				break;
			}

			if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {

				/* lea disp(%rbp), %rsp */
2720
				cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2721 2722 2723
				break;
			}

2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734
			if (!cfi->drap && op->src.reg == CFI_SP &&
			    op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
			    check_reg_frame_pos(&regs[CFI_BP], -cfa->offset + op->src.offset)) {

				/* lea disp(%rsp), %rbp */
				cfa->base = CFI_BP;
				cfa->offset -= op->src.offset;
				cfi->bp_scratch = false;
				break;
			}

2735
			if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2736 2737

				/* drap: lea disp(%rsp), %drap */
2738
				cfi->drap_reg = op->dest.reg;
2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749

				/*
				 * lea disp(%rsp), %reg
				 *
				 * This is needed for the rare case where GCC
				 * does something dumb like:
				 *
				 *   lea    0x8(%rsp), %rcx
				 *   ...
				 *   mov    %rcx, %rsp
				 */
2750 2751 2752
				cfi->vals[op->dest.reg].base = CFI_CFA;
				cfi->vals[op->dest.reg].offset = \
					-cfi->stack_size + op->src.offset;
2753

2754 2755 2756
				break;
			}

2757 2758
			if (cfi->drap && op->dest.reg == CFI_SP &&
			    op->src.reg == cfi->drap_reg) {
2759 2760 2761

				 /* drap: lea disp(%drap), %rsp */
				cfa->base = CFI_SP;
2762 2763 2764
				cfa->offset = cfi->stack_size = -op->src.offset;
				cfi->drap_reg = CFI_UNDEFINED;
				cfi->drap = false;
2765 2766 2767
				break;
			}

2768
			if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
2769 2770 2771 2772 2773 2774 2775 2776 2777
				WARN_FUNC("unsupported stack register modification",
					  insn->sec, insn->offset);
				return -1;
			}

			break;

		case OP_SRC_AND:
			if (op->dest.reg != CFI_SP ||
2778 2779
			    (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
			    (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2780 2781 2782 2783 2784
				WARN_FUNC("unsupported stack pointer realignment",
					  insn->sec, insn->offset);
				return -1;
			}

2785
			if (cfi->drap_reg != CFI_UNDEFINED) {
2786
				/* drap: and imm, %rsp */
2787 2788 2789
				cfa->base = cfi->drap_reg;
				cfa->offset = cfi->stack_size = 0;
				cfi->drap = true;
2790 2791 2792 2793 2794 2795 2796 2797 2798 2799
			}

			/*
			 * Older versions of GCC (4.8ish) realign the stack
			 * without DRAP, with a frame pointer.
			 */

			break;

		case OP_SRC_POP:
P
Peter Zijlstra 已提交
2800
		case OP_SRC_POPF:
P
Peter Zijlstra 已提交
2801 2802 2803 2804 2805 2806 2807
			if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {

				/* pop %rsp; # restore from a stack swizzle */
				cfa->base = CFI_SP;
				break;
			}

2808
			if (!cfi->drap && op->dest.reg == cfa->base) {
2809 2810 2811 2812 2813

				/* pop %rbp */
				cfa->base = CFI_SP;
			}

2814 2815 2816
			if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
			    op->dest.reg == cfi->drap_reg &&
			    cfi->drap_offset == -cfi->stack_size) {
2817

2818
				/* drap: pop %drap */
2819
				cfa->base = cfi->drap_reg;
2820
				cfa->offset = 0;
2821
				cfi->drap_offset = -1;
2822

P
Peter Zijlstra 已提交
2823
			} else if (cfi->stack_size == -regs[op->dest.reg].offset) {
2824

2825
				/* pop %reg */
2826
				restore_reg(cfi, op->dest.reg);
2827 2828
			}

2829
			cfi->stack_size -= 8;
2830 2831 2832 2833 2834 2835
			if (cfa->base == CFI_SP)
				cfa->offset -= 8;

			break;

		case OP_SRC_REG_INDIRECT:
2836 2837 2838 2839 2840 2841 2842 2843
			if (!cfi->drap && op->dest.reg == cfa->base &&
			    op->dest.reg == CFI_BP) {

				/* mov disp(%rsp), %rbp */
				cfa->base = CFI_SP;
				cfa->offset = cfi->stack_size;
			}

2844 2845
			if (cfi->drap && op->src.reg == CFI_BP &&
			    op->src.offset == cfi->drap_offset) {
2846 2847

				/* drap: mov disp(%rbp), %drap */
2848
				cfa->base = cfi->drap_reg;
2849
				cfa->offset = 0;
2850
				cfi->drap_offset = -1;
2851 2852
			}

2853
			if (cfi->drap && op->src.reg == CFI_BP &&
2854 2855 2856
			    op->src.offset == regs[op->dest.reg].offset) {

				/* drap: mov disp(%rbp), %reg */
2857
				restore_reg(cfi, op->dest.reg);
2858 2859 2860 2861 2862 2863

			} else if (op->src.reg == cfa->base &&
			    op->src.offset == regs[op->dest.reg].offset + cfa->offset) {

				/* mov disp(%rbp), %reg */
				/* mov disp(%rsp), %reg */
2864
				restore_reg(cfi, op->dest.reg);
2865 2866 2867 2868 2869 2870

			} else if (op->src.reg == CFI_SP &&
				   op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {

				/* mov disp(%rsp), %reg */
				restore_reg(cfi, op->dest.reg);
2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883
			}

			break;

		default:
			WARN_FUNC("unknown stack-related instruction",
				  insn->sec, insn->offset);
			return -1;
		}

		break;

	case OP_DEST_PUSH:
P
Peter Zijlstra 已提交
2884
	case OP_DEST_PUSHF:
2885
		cfi->stack_size += 8;
2886 2887 2888 2889 2890 2891
		if (cfa->base == CFI_SP)
			cfa->offset += 8;

		if (op->src.type != OP_SRC_REG)
			break;

2892 2893
		if (cfi->drap) {
			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2894 2895 2896

				/* drap: push %drap */
				cfa->base = CFI_BP_INDIRECT;
2897
				cfa->offset = -cfi->stack_size;
2898

2899
				/* save drap so we know when to restore it */
2900
				cfi->drap_offset = -cfi->stack_size;
2901

2902
			} else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
2903 2904

				/* drap: push %rbp */
2905
				cfi->stack_size = 0;
2906

2907
			} else {
2908 2909

				/* drap: push %reg */
2910
				save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
2911 2912 2913 2914 2915
			}

		} else {

			/* push %reg */
2916
			save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
2917 2918 2919
		}

		/* detect when asm code uses rbp as a scratch register */
2920
		if (opts.stackval && insn->func && op->src.reg == CFI_BP &&
2921
		    cfa->base != CFI_BP)
2922
			cfi->bp_scratch = true;
2923 2924 2925 2926
		break;

	case OP_DEST_REG_INDIRECT:

2927 2928
		if (cfi->drap) {
			if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2929 2930 2931 2932 2933

				/* drap: mov %drap, disp(%rbp) */
				cfa->base = CFI_BP_INDIRECT;
				cfa->offset = op->dest.offset;

2934
				/* save drap offset so we know when to restore it */
2935
				cfi->drap_offset = op->dest.offset;
2936
			} else {
2937 2938

				/* drap: mov reg, disp(%rbp) */
2939
				save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
2940 2941 2942 2943 2944 2945
			}

		} else if (op->dest.reg == cfa->base) {

			/* mov reg, disp(%rbp) */
			/* mov reg, disp(%rsp) */
2946 2947
			save_reg(cfi, op->src.reg, CFI_CFA,
				 op->dest.offset - cfi->cfa.offset);
2948 2949 2950 2951 2952 2953

		} else if (op->dest.reg == CFI_SP) {

			/* mov reg, disp(%rsp) */
			save_reg(cfi, op->src.reg, CFI_CFA,
				 op->dest.offset - cfi->stack_size);
P
Peter Zijlstra 已提交
2954 2955 2956 2957 2958 2959

		} else if (op->src.reg == CFI_SP && op->dest.offset == 0) {

			/* mov %rsp, (%reg); # setup a stack swizzle. */
			cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
			cfi->vals[op->dest.reg].offset = cfa->offset;
2960 2961 2962 2963 2964
		}

		break;

	case OP_DEST_MEM:
P
Peter Zijlstra 已提交
2965
		if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
2966 2967 2968 2969 2970 2971
			WARN_FUNC("unknown stack-related memory operation",
				  insn->sec, insn->offset);
			return -1;
		}

		/* pop mem */
2972
		cfi->stack_size -= 8;
2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986
		if (cfa->base == CFI_SP)
			cfa->offset -= 8;

		break;

	default:
		WARN_FUNC("unknown stack-related instruction",
			  insn->sec, insn->offset);
		return -1;
	}

	return 0;
}

2987 2988 2989 2990 2991 2992 2993 2994 2995 2996
/*
 * The stack layouts of alternatives instructions can sometimes diverge when
 * they have stack modifications.  That's fine as long as the potential stack
 * layouts don't conflict at any given potential instruction boundary.
 *
 * Flatten the CFIs of the different alternative code streams (both original
 * and replacement) into a single shared CFI array which can be used to detect
 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
 */
static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
2997
{
2998 2999
	struct cfi_state **alt_cfi;
	int group_off;
3000

3001 3002
	if (!insn->alt_group)
		return 0;
3003

P
Peter Zijlstra 已提交
3004 3005 3006 3007 3008
	if (!insn->cfi) {
		WARN("CFI missing");
		return -1;
	}

3009 3010
	alt_cfi = insn->alt_group->cfi;
	group_off = insn->offset - insn->alt_group->first_insn->offset;
3011

3012
	if (!alt_cfi[group_off]) {
P
Peter Zijlstra 已提交
3013
		alt_cfi[group_off] = insn->cfi;
3014
	} else {
P
Peter Zijlstra 已提交
3015
		if (cficmp(alt_cfi[group_off], insn->cfi)) {
3016 3017
			WARN_FUNC("stack layout conflict in alternatives",
				  insn->sec, insn->offset);
3018 3019
			return -1;
		}
3020 3021 3022 3023 3024
	}

	return 0;
}

3025 3026 3027
static int handle_insn_ops(struct instruction *insn,
			   struct instruction *next_insn,
			   struct insn_state *state)
3028 3029 3030 3031 3032
{
	struct stack_op *op;

	list_for_each_entry(op, &insn->stack_ops, list) {

3033
		if (update_cfi_state(insn, next_insn, &state->cfi, op))
3034
			return 1;
3035

3036 3037 3038
		if (!insn->alt_group)
			continue;

3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063
		if (op->dest.type == OP_DEST_PUSHF) {
			if (!state->uaccess_stack) {
				state->uaccess_stack = 1;
			} else if (state->uaccess_stack >> 31) {
				WARN_FUNC("PUSHF stack exhausted",
					  insn->sec, insn->offset);
				return 1;
			}
			state->uaccess_stack <<= 1;
			state->uaccess_stack  |= state->uaccess;
		}

		if (op->src.type == OP_SRC_POPF) {
			if (state->uaccess_stack) {
				state->uaccess = state->uaccess_stack & 1;
				state->uaccess_stack >>= 1;
				if (state->uaccess_stack == 1)
					state->uaccess_stack = 0;
			}
		}
	}

	return 0;
}

3064
static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3065
{
P
Peter Zijlstra 已提交
3066
	struct cfi_state *cfi1 = insn->cfi;
3067 3068
	int i;

P
Peter Zijlstra 已提交
3069 3070 3071 3072 3073
	if (!cfi1) {
		WARN("CFI missing");
		return false;
	}

3074 3075
	if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {

3076 3077
		WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
			  insn->sec, insn->offset,
3078 3079
			  cfi1->cfa.base, cfi1->cfa.offset,
			  cfi2->cfa.base, cfi2->cfa.offset);
3080

3081
	} else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3082
		for (i = 0; i < CFI_NUM_REGS; i++) {
3083
			if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
3084 3085 3086 3087 3088
				    sizeof(struct cfi_reg)))
				continue;

			WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
				  insn->sec, insn->offset,
3089 3090
				  i, cfi1->regs[i].base, cfi1->regs[i].offset,
				  i, cfi2->regs[i].base, cfi2->regs[i].offset);
3091 3092 3093
			break;
		}

3094 3095
	} else if (cfi1->type != cfi2->type) {

3096
		WARN_FUNC("stack state mismatch: type1=%d type2=%d",
3097 3098 3099 3100 3101
			  insn->sec, insn->offset, cfi1->type, cfi2->type);

	} else if (cfi1->drap != cfi2->drap ||
		   (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
		   (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3102

3103
		WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3104
			  insn->sec, insn->offset,
3105 3106
			  cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
			  cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3107 3108 3109 3110 3111

	} else
		return true;

	return false;
3112 3113
}

P
Peter Zijlstra 已提交
3114 3115 3116
static inline bool func_uaccess_safe(struct symbol *func)
{
	if (func)
3117
		return func->uaccess_safe;
P
Peter Zijlstra 已提交
3118 3119 3120 3121

	return false;
}

3122
static inline const char *call_dest_name(struct instruction *insn)
P
Peter Zijlstra 已提交
3123
{
3124
	static char pvname[19];
3125 3126 3127
	struct reloc *rel;
	int idx;

P
Peter Zijlstra 已提交
3128 3129 3130
	if (insn->call_dest)
		return insn->call_dest->name;

3131 3132 3133 3134 3135 3136 3137
	rel = insn_reloc(NULL, insn);
	if (rel && !strcmp(rel->sym->name, "pv_ops")) {
		idx = (rel->addend / sizeof(void *));
		snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
		return pvname;
	}

P
Peter Zijlstra 已提交
3138 3139 3140
	return "{dynamic}";
}

3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170
static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
{
	struct symbol *target;
	struct reloc *rel;
	int idx;

	rel = insn_reloc(file, insn);
	if (!rel || strcmp(rel->sym->name, "pv_ops"))
		return false;

	idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *));

	if (file->pv_ops[idx].clean)
		return true;

	file->pv_ops[idx].clean = true;

	list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
		if (!target->sec->noinstr) {
			WARN("pv_ops[%d]: %s", idx, target->name);
			file->pv_ops[idx].clean = false;
		}
	}

	return file->pv_ops[idx].clean;
}

static inline bool noinstr_call_dest(struct objtool_file *file,
				     struct instruction *insn,
				     struct symbol *func)
3171 3172 3173 3174 3175
{
	/*
	 * We can't deal with indirect function calls at present;
	 * assume they're instrumented.
	 */
3176 3177 3178 3179
	if (!func) {
		if (file->pv_ops)
			return pv_call_dest(file, insn);

3180
		return false;
3181
	}
3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199

	/*
	 * If the symbol is from a noinstr section; we good.
	 */
	if (func->sec->noinstr)
		return true;

	/*
	 * The __ubsan_handle_*() calls are like WARN(), they only happen when
	 * something 'BAD' happened. At the risk of taking the machine down,
	 * let them proceed to get the message out.
	 */
	if (!strncmp(func->name, "__ubsan_handle_", 15))
		return true;

	return false;
}

3200 3201 3202
static int validate_call(struct objtool_file *file,
			 struct instruction *insn,
			 struct insn_state *state)
P
Peter Zijlstra 已提交
3203
{
3204
	if (state->noinstr && state->instr <= 0 &&
3205
	    !noinstr_call_dest(file, insn, insn->call_dest)) {
3206 3207 3208 3209 3210
		WARN_FUNC("call to %s() leaves .noinstr.text section",
				insn->sec, insn->offset, call_dest_name(insn));
		return 1;
	}

P
Peter Zijlstra 已提交
3211 3212
	if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
		WARN_FUNC("call to %s() with UACCESS enabled",
3213
				insn->sec, insn->offset, call_dest_name(insn));
P
Peter Zijlstra 已提交
3214 3215 3216
		return 1;
	}

3217 3218
	if (state->df) {
		WARN_FUNC("call to %s() with DF set",
3219
				insn->sec, insn->offset, call_dest_name(insn));
3220 3221 3222
		return 1;
	}

P
Peter Zijlstra 已提交
3223 3224 3225
	return 0;
}

3226 3227 3228
static int validate_sibling_call(struct objtool_file *file,
				 struct instruction *insn,
				 struct insn_state *state)
3229
{
3230
	if (has_modified_stack_frame(insn, state)) {
3231 3232 3233 3234 3235
		WARN_FUNC("sibling call from callable instruction with modified stack frame",
				insn->sec, insn->offset);
		return 1;
	}

3236
	return validate_call(file, insn, state);
3237 3238
}

3239 3240
static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
{
3241 3242 3243 3244 3245 3246
	if (state->noinstr && state->instr > 0) {
		WARN_FUNC("return with instrumentation enabled",
			  insn->sec, insn->offset);
		return 1;
	}

3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264
	if (state->uaccess && !func_uaccess_safe(func)) {
		WARN_FUNC("return with UACCESS enabled",
			  insn->sec, insn->offset);
		return 1;
	}

	if (!state->uaccess && func_uaccess_safe(func)) {
		WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
			  insn->sec, insn->offset);
		return 1;
	}

	if (state->df) {
		WARN_FUNC("return with DF set",
			  insn->sec, insn->offset);
		return 1;
	}

3265
	if (func && has_modified_stack_frame(insn, state)) {
3266 3267 3268 3269 3270
		WARN_FUNC("return with modified stack frame",
			  insn->sec, insn->offset);
		return 1;
	}

3271
	if (state->cfi.bp_scratch) {
3272 3273
		WARN_FUNC("BP used as a scratch register",
			  insn->sec, insn->offset);
3274 3275 3276 3277 3278 3279
		return 1;
	}

	return 0;
}

3280 3281
static struct instruction *next_insn_to_validate(struct objtool_file *file,
						 struct instruction *insn)
3282
{
J
Josh Poimboeuf 已提交
3283
	struct alt_group *alt_group = insn->alt_group;
3284

3285 3286 3287 3288 3289 3290 3291 3292 3293
	/*
	 * Simulate the fact that alternatives are patched in-place.  When the
	 * end of a replacement alt_group is reached, redirect objtool flow to
	 * the end of the original alt_group.
	 */
	if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
		return next_insn_same_sec(file, alt_group->orig_group->last_insn);

	return next_insn_same_sec(file, insn);
3294 3295
}

3296 3297 3298 3299
/*
 * Follow the branch starting at the given instruction, and recursively follow
 * any other branches (jumps).  Meanwhile, track the frame pointer state at
 * each instruction and validate all the rules described in
3300
 * tools/objtool/Documentation/objtool.txt.
3301
 */
3302
static int validate_branch(struct objtool_file *file, struct symbol *func,
P
Peter Zijlstra 已提交
3303
			   struct instruction *insn, struct insn_state state)
3304 3305
{
	struct alternative *alt;
P
Peter Zijlstra 已提交
3306
	struct instruction *next_insn, *prev_insn = NULL;
3307
	struct section *sec;
3308
	u8 visited;
3309 3310 3311 3312 3313
	int ret;

	sec = insn->sec;

	while (1) {
3314
		next_insn = next_insn_to_validate(file, insn);
3315

3316
		if (func && insn->func && func != insn->func->pfunc) {
3317 3318 3319
			WARN("%s() falls through to next function %s()",
			     func->name, insn->func->name);
			return 1;
3320 3321
		}

3322 3323 3324
		if (func && insn->ignore) {
			WARN_FUNC("BUG: why am I validating an ignored function?",
				  sec, insn->offset);
3325
			return 1;
3326 3327
		}

3328 3329
		visited = VISITED_BRANCH << state.uaccess;
		if (insn->visited & VISITED_BRANCH_MASK) {
3330
			if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3331 3332
				return 1;

3333
			if (insn->visited & visited)
P
Peter Zijlstra 已提交
3334
				return 0;
P
Peter Zijlstra 已提交
3335 3336
		} else {
			nr_insns_visited++;
3337 3338
		}

3339 3340 3341
		if (state.noinstr)
			state.instr += insn->instr;

P
Peter Zijlstra 已提交
3342
		if (insn->hint) {
3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371
			if (insn->restore) {
				struct instruction *save_insn, *i;

				i = insn;
				save_insn = NULL;

				sym_for_each_insn_continue_reverse(file, func, i) {
					if (i->save) {
						save_insn = i;
						break;
					}
				}

				if (!save_insn) {
					WARN_FUNC("no corresponding CFI save for CFI restore",
						  sec, insn->offset);
					return 1;
				}

				if (!save_insn->visited) {
					WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
						  sec, insn->offset);
					return 1;
				}

				insn->cfi = save_insn->cfi;
				nr_cfi_reused++;
			}

P
Peter Zijlstra 已提交
3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382
			state.cfi = *insn->cfi;
		} else {
			/* XXX track if we actually changed state.cfi */

			if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
				insn->cfi = prev_insn->cfi;
				nr_cfi_reused++;
			} else {
				insn->cfi = cfi_hash_find_or_add(&state.cfi);
			}
		}
3383

3384
		insn->visited |= visited;
3385

3386 3387 3388
		if (propagate_alt_cfi(file, insn))
			return 1;

3389
		if (!insn->ignore_alts && !list_empty(&insn->alts)) {
P
Peter Zijlstra 已提交
3390 3391
			bool skip_orig = false;

3392
			list_for_each_entry(alt, &insn->alts, list) {
P
Peter Zijlstra 已提交
3393 3394 3395
				if (alt->skip_orig)
					skip_orig = true;

3396
				ret = validate_branch(file, func, alt->insn, state);
3397
				if (ret) {
3398
					if (opts.backtrace)
3399 3400 3401
						BT_FUNC("(alt)", insn);
					return ret;
				}
3402
			}
P
Peter Zijlstra 已提交
3403 3404 3405

			if (skip_orig)
				return 0;
3406 3407
		}

3408
		if (handle_insn_ops(insn, next_insn, &state))
3409 3410
			return 1;

3411 3412 3413
		switch (insn->type) {

		case INSN_RETURN:
3414
			return validate_return(func, insn, &state);
3415 3416

		case INSN_CALL:
P
Peter Zijlstra 已提交
3417
		case INSN_CALL_DYNAMIC:
3418
			ret = validate_call(file, insn, &state);
P
Peter Zijlstra 已提交
3419 3420
			if (ret)
				return ret;
3421

3422
			if (opts.stackval && func && !is_fentry_call(insn) &&
3423
			    !has_valid_stack_frame(&state)) {
3424 3425 3426 3427
				WARN_FUNC("call without frame pointer save/setup",
					  sec, insn->offset);
				return 1;
			}
3428

P
Peter Zijlstra 已提交
3429
			if (insn->dead_end)
3430 3431
				return 0;

3432 3433 3434 3435
			break;

		case INSN_JUMP_CONDITIONAL:
		case INSN_JUMP_UNCONDITIONAL:
3436
			if (is_sibling_call(insn)) {
3437
				ret = validate_sibling_call(file, insn, &state);
3438
				if (ret)
3439
					return ret;
3440

3441
			} else if (insn->jump_dest) {
3442 3443
				ret = validate_branch(file, func,
						      insn->jump_dest, state);
3444
				if (ret) {
3445
					if (opts.backtrace)
3446 3447 3448
						BT_FUNC("(branch)", insn);
					return ret;
				}
3449
			}
3450 3451 3452 3453 3454 3455 3456

			if (insn->type == INSN_JUMP_UNCONDITIONAL)
				return 0;

			break;

		case INSN_JUMP_DYNAMIC:
3457
		case INSN_JUMP_DYNAMIC_CONDITIONAL:
3458
			if (is_sibling_call(insn)) {
3459
				ret = validate_sibling_call(file, insn, &state);
3460 3461
				if (ret)
					return ret;
3462 3463
			}

3464 3465 3466 3467
			if (insn->type == INSN_JUMP_DYNAMIC)
				return 0;

			break;
3468

3469 3470 3471 3472 3473 3474 3475 3476
		case INSN_CONTEXT_SWITCH:
			if (func && (!next_insn || !next_insn->hint)) {
				WARN_FUNC("unsupported instruction in callable function",
					  sec, insn->offset);
				return 1;
			}
			return 0;

P
Peter Zijlstra 已提交
3477 3478 3479 3480 3481 3482 3483 3484 3485 3486
		case INSN_STAC:
			if (state.uaccess) {
				WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
				return 1;
			}

			state.uaccess = true;
			break;

		case INSN_CLAC:
3487
			if (!state.uaccess && func) {
P
Peter Zijlstra 已提交
3488 3489 3490 3491 3492 3493 3494 3495 3496 3497
				WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
				return 1;
			}

			if (func_uaccess_safe(func) && !state.uaccess_stack) {
				WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
				return 1;
			}

			state.uaccess = false;
3498 3499
			break;

3500
		case INSN_STD:
3501
			if (state.df) {
3502
				WARN_FUNC("recursive STD", sec, insn->offset);
3503 3504
				return 1;
			}
3505 3506 3507 3508 3509

			state.df = true;
			break;

		case INSN_CLD:
3510
			if (!state.df && func) {
3511
				WARN_FUNC("redundant CLD", sec, insn->offset);
3512 3513
				return 1;
			}
3514 3515

			state.df = false;
3516 3517
			break;

3518 3519 3520 3521 3522 3523 3524
		default:
			break;
		}

		if (insn->dead_end)
			return 0;

3525
		if (!next_insn) {
3526
			if (state.cfi.cfa.base == CFI_UNDEFINED)
3527
				return 0;
3528 3529 3530
			WARN("%s: unexpected end of section", sec->name);
			return 1;
		}
3531

P
Peter Zijlstra 已提交
3532
		prev_insn = insn;
3533
		insn = next_insn;
3534 3535 3536 3537 3538
	}

	return 0;
}

3539
static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3540 3541 3542
{
	struct instruction *insn;
	struct insn_state state;
3543
	int ret, warnings = 0;
3544 3545 3546 3547

	if (!file->hints)
		return 0;

3548
	init_insn_state(file, &state, sec);
3549

3550 3551 3552 3553 3554 3555 3556 3557 3558
	if (sec) {
		insn = find_insn(file, sec, 0);
		if (!insn)
			return 0;
	} else {
		insn = list_first_entry(&file->insn_list, typeof(*insn), list);
	}

	while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
3559
		if (insn->hint && !insn->visited && !insn->ignore) {
3560
			ret = validate_branch(file, insn->func, insn, state);
3561
			if (ret && opts.backtrace)
3562
				BT_FUNC("<=== (hint)", insn);
3563 3564
			warnings += ret;
		}
3565 3566

		insn = list_next_entry(insn, list);
3567 3568 3569 3570 3571
	}

	return warnings;
}

3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710
/*
 * Validate rethunk entry constraint: must untrain RET before the first RET.
 *
 * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes
 * before an actual RET instruction.
 */
static int validate_entry(struct objtool_file *file, struct instruction *insn)
{
	struct instruction *next, *dest;
	int ret, warnings = 0;

	for (;;) {
		next = next_insn_to_validate(file, insn);

		if (insn->visited & VISITED_ENTRY)
			return 0;

		insn->visited |= VISITED_ENTRY;

		if (!insn->ignore_alts && !list_empty(&insn->alts)) {
			struct alternative *alt;
			bool skip_orig = false;

			list_for_each_entry(alt, &insn->alts, list) {
				if (alt->skip_orig)
					skip_orig = true;

				ret = validate_entry(file, alt->insn);
				if (ret) {
				        if (opts.backtrace)
						BT_FUNC("(alt)", insn);
					return ret;
				}
			}

			if (skip_orig)
				return 0;
		}

		switch (insn->type) {

		case INSN_CALL_DYNAMIC:
		case INSN_JUMP_DYNAMIC:
		case INSN_JUMP_DYNAMIC_CONDITIONAL:
			WARN_FUNC("early indirect call", insn->sec, insn->offset);
			return 1;

		case INSN_JUMP_UNCONDITIONAL:
		case INSN_JUMP_CONDITIONAL:
			if (!is_sibling_call(insn)) {
				if (!insn->jump_dest) {
					WARN_FUNC("unresolved jump target after linking?!?",
						  insn->sec, insn->offset);
					return -1;
				}
				ret = validate_entry(file, insn->jump_dest);
				if (ret) {
					if (opts.backtrace) {
						BT_FUNC("(branch%s)", insn,
							insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
					}
					return ret;
				}

				if (insn->type == INSN_JUMP_UNCONDITIONAL)
					return 0;

				break;
			}

			/* fallthrough */
		case INSN_CALL:
			dest = find_insn(file, insn->call_dest->sec,
					 insn->call_dest->offset);
			if (!dest) {
				WARN("Unresolved function after linking!?: %s",
				     insn->call_dest->name);
				return -1;
			}

			ret = validate_entry(file, dest);
			if (ret) {
				if (opts.backtrace)
					BT_FUNC("(call)", insn);
				return ret;
			}
			/*
			 * If a call returns without error, it must have seen UNTRAIN_RET.
			 * Therefore any non-error return is a success.
			 */
			return 0;

		case INSN_RETURN:
			WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset);
			return 1;

		case INSN_NOP:
			if (insn->retpoline_safe)
				return 0;
			break;

		default:
			break;
		}

		if (!next) {
			WARN_FUNC("teh end!", insn->sec, insn->offset);
			return -1;
		}
		insn = next;
	}

	return warnings;
}

/*
 * Validate that all branches starting at 'insn->entry' encounter UNRET_END
 * before RET.
 */
static int validate_unret(struct objtool_file *file)
{
	struct instruction *insn;
	int ret, warnings = 0;

	for_each_insn(file, insn) {
		if (!insn->entry)
			continue;

		ret = validate_entry(file, insn);
		if (ret < 0) {
			WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset);
			return ret;
		}
		warnings += ret;
	}

	return warnings;
}

3711 3712 3713 3714 3715 3716 3717
static int validate_retpoline(struct objtool_file *file)
{
	struct instruction *insn;
	int warnings = 0;

	for_each_insn(file, insn) {
		if (insn->type != INSN_JUMP_DYNAMIC &&
3718 3719
		    insn->type != INSN_CALL_DYNAMIC &&
		    insn->type != INSN_RETURN)
3720 3721 3722 3723 3724
			continue;

		if (insn->retpoline_safe)
			continue;

3725 3726 3727 3728 3729 3730
		/*
		 * .init.text code is ran before userspace and thus doesn't
		 * strictly need retpolines, except for modules which are
		 * loaded late, they very much do need retpoline in their
		 * .init.text
		 */
3731
		if (!strcmp(insn->sec->name, ".init.text") && !opts.module)
3732 3733
			continue;

3734
		if (insn->type == INSN_RETURN) {
3735 3736 3737 3738 3739
			if (opts.rethunk) {
				WARN_FUNC("'naked' return found in RETHUNK build",
					  insn->sec, insn->offset);
			} else
				continue;
3740 3741 3742 3743 3744
		} else {
			WARN_FUNC("indirect %s found in RETPOLINE build",
				  insn->sec, insn->offset,
				  insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
		}
3745 3746 3747 3748 3749 3750 3751

		warnings++;
	}

	return warnings;
}

3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764
static bool is_kasan_insn(struct instruction *insn)
{
	return (insn->type == INSN_CALL &&
		!strcmp(insn->call_dest->name, "__asan_handle_no_return"));
}

static bool is_ubsan_insn(struct instruction *insn)
{
	return (insn->type == INSN_CALL &&
		!strcmp(insn->call_dest->name,
			"__ubsan_handle_builtin_unreachable"));
}

3765
static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
3766 3767
{
	int i;
3768
	struct instruction *prev_insn;
3769

3770
	if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
3771 3772 3773
		return true;

	/*
P
Peter Zijlstra 已提交
3774
	 * Ignore alternative replacement instructions.  This can happen
3775
	 * when a whitelisted function uses one of the ALTERNATIVE macros.
3776
	 */
P
Peter Zijlstra 已提交
3777
	if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
3778
	    !strcmp(insn->sec->name, ".altinstr_aux"))
3779 3780
		return true;

3781
	/*
3782
	 * Whole archive runs might encounter dead code from weak symbols.
3783 3784 3785 3786 3787 3788
	 * This is where the linker will have dropped the weak symbol in
	 * favour of a regular symbol, but leaves the code in place.
	 *
	 * In this case we'll find a piece of code (whole function) that is not
	 * covered by a !section symbol. Ignore them.
	 */
3789
	if (opts.link && !insn->func) {
3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823
		int size = find_symbol_hole_containing(insn->sec, insn->offset);
		unsigned long end = insn->offset + size;

		if (!size) /* not a hole */
			return false;

		if (size < 0) /* hole until the end */
			return true;

		sec_for_each_insn_continue(file, insn) {
			/*
			 * If we reach a visited instruction at or before the
			 * end of the hole, ignore the unreachable.
			 */
			if (insn->visited)
				return true;

			if (insn->offset >= end)
				break;

			/*
			 * If this hole jumps to a .cold function, mark it ignore too.
			 */
			if (insn->jump_dest && insn->jump_dest->func &&
			    strstr(insn->jump_dest->func->name, ".cold")) {
				struct instruction *dest = insn->jump_dest;
				func_for_each_insn(file, dest->func, dest)
					dest->ignore = true;
			}
		}

		return false;
	}

3824 3825 3826
	if (!insn->func)
		return false;

3827 3828 3829
	if (insn->func->static_call_tramp)
		return true;

3830 3831 3832 3833 3834
	/*
	 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
	 * __builtin_unreachable().  The BUG() macro has an unreachable() after
	 * the UD2, which causes GCC's undefined trap logic to emit another UD2
	 * (or occasionally a JMP to UD2).
3835 3836
	 *
	 * It may also insert a UD2 after calling a __noreturn function.
3837
	 */
3838 3839
	prev_insn = list_prev_entry(insn, list);
	if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) &&
3840 3841 3842 3843 3844
	    (insn->type == INSN_BUG ||
	     (insn->type == INSN_JUMP_UNCONDITIONAL &&
	      insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
		return true;

3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855
	/*
	 * Check if this (or a subsequent) instruction is related to
	 * CONFIG_UBSAN or CONFIG_KASAN.
	 *
	 * End the search at 5 instructions to avoid going into the weeds.
	 */
	for (i = 0; i < 5; i++) {

		if (is_kasan_insn(insn) || is_ubsan_insn(insn))
			return true;

3856 3857 3858 3859 3860 3861 3862 3863
		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
			if (insn->jump_dest &&
			    insn->jump_dest->func == insn->func) {
				insn = insn->jump_dest;
				continue;
			}

			break;
3864 3865
		}

3866
		if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
3867
			break;
3868

3869 3870 3871 3872 3873 3874
		insn = list_next_entry(insn, list);
	}

	return false;
}

3875 3876
static int validate_symbol(struct objtool_file *file, struct section *sec,
			   struct symbol *sym, struct insn_state *state)
3877 3878
{
	struct instruction *insn;
3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895
	int ret;

	if (!sym->len) {
		WARN("%s() is missing an ELF size annotation", sym->name);
		return 1;
	}

	if (sym->pfunc != sym || sym->alias != sym)
		return 0;

	insn = find_insn(file, sec, sym->offset);
	if (!insn || insn->ignore || insn->visited)
		return 0;

	state->uaccess = sym->uaccess_safe;

	ret = validate_branch(file, insn->func, insn, *state);
3896
	if (ret && opts.backtrace)
3897 3898 3899 3900 3901 3902
		BT_FUNC("<=== (sym)", insn);
	return ret;
}

static int validate_section(struct objtool_file *file, struct section *sec)
{
3903
	struct insn_state state;
3904 3905
	struct symbol *func;
	int warnings = 0;
3906

3907 3908 3909
	list_for_each_entry(func, &sec->symbol_list, list) {
		if (func->type != STT_FUNC)
			continue;
3910

3911
		init_insn_state(file, &state, sec);
3912
		set_func_state(&state.cfi);
3913

3914
		warnings += validate_symbol(file, sec, func, &state);
3915 3916 3917 3918 3919
	}

	return warnings;
}

3920
static int validate_noinstr_sections(struct objtool_file *file)
3921 3922
{
	struct section *sec;
3923
	int warnings = 0;
3924 3925

	sec = find_section_by_name(file->elf, ".noinstr.text");
3926 3927 3928 3929
	if (sec) {
		warnings += validate_section(file, sec);
		warnings += validate_unwind_hints(file, sec);
	}
3930

3931 3932 3933 3934 3935
	sec = find_section_by_name(file->elf, ".entry.text");
	if (sec) {
		warnings += validate_section(file, sec);
		warnings += validate_unwind_hints(file, sec);
	}
3936 3937

	return warnings;
3938 3939
}

3940 3941 3942 3943 3944
static int validate_functions(struct objtool_file *file)
{
	struct section *sec;
	int warnings = 0;

3945 3946 3947 3948
	for_each_sec(file, sec) {
		if (!(sec->sh.sh_flags & SHF_EXECINSTR))
			continue;

3949
		warnings += validate_section(file, sec);
3950
	}
3951 3952 3953 3954

	return warnings;
}

3955
static void mark_endbr_used(struct instruction *insn)
3956
{
3957 3958 3959 3960 3961 3962 3963
	if (!list_empty(&insn->call_node))
		list_del_init(&insn->call_node);
}

static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
{
	struct instruction *dest;
3964
	struct reloc *reloc;
3965 3966
	unsigned long off;
	int warnings = 0;
3967

3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984
	/*
	 * Looking for function pointer load relocations.  Ignore
	 * direct/indirect branches:
	 */
	switch (insn->type) {
	case INSN_CALL:
	case INSN_CALL_DYNAMIC:
	case INSN_JUMP_CONDITIONAL:
	case INSN_JUMP_UNCONDITIONAL:
	case INSN_JUMP_DYNAMIC:
	case INSN_JUMP_DYNAMIC_CONDITIONAL:
	case INSN_RETURN:
	case INSN_NOP:
		return 0;
	default:
		break;
	}
3985

3986 3987 3988 3989 3990
	for (reloc = insn_reloc(file, insn);
	     reloc;
	     reloc = find_reloc_by_dest_range(file->elf, insn->sec,
					      reloc->offset + 1,
					      (insn->offset + insn->len) - (reloc->offset + 1))) {
3991

3992 3993 3994 3995 3996
		/*
		 * static_call_update() references the trampoline, which
		 * doesn't have (or need) ENDBR.  Skip warning in that case.
		 */
		if (reloc->sym->static_call_tramp)
3997 3998
			continue;

3999 4000 4001 4002 4003 4004 4005 4006
		off = reloc->sym->offset;
		if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32)
			off += arch_dest_reloc_offset(reloc->addend);
		else
			off += reloc->addend;

		dest = find_insn(file, reloc->sym->sec, off);
		if (!dest)
4007 4008
			continue;

4009 4010
		if (dest->type == INSN_ENDBR) {
			mark_endbr_used(dest);
4011
			continue;
4012
		}
4013

4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029
		if (dest->func && dest->func == insn->func) {
			/*
			 * Anything from->to self is either _THIS_IP_ or
			 * IRET-to-self.
			 *
			 * There is no sane way to annotate _THIS_IP_ since the
			 * compiler treats the relocation as a constant and is
			 * happy to fold in offsets, skewing any annotation we
			 * do, leading to vast amounts of false-positives.
			 *
			 * There's also compiler generated _THIS_IP_ through
			 * KCOV and such which we have no hope of annotating.
			 *
			 * As such, blanket accept self-references without
			 * issue.
			 */
4030
			continue;
4031
		}
4032

4033
		if (dest->noendbr)
4034 4035
			continue;

4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089
		WARN_FUNC("relocation to !ENDBR: %s",
			  insn->sec, insn->offset,
			  offstr(dest->sec, dest->offset));

		warnings++;
	}

	return warnings;
}

static int validate_ibt_data_reloc(struct objtool_file *file,
				   struct reloc *reloc)
{
	struct instruction *dest;

	dest = find_insn(file, reloc->sym->sec,
			 reloc->sym->offset + reloc->addend);
	if (!dest)
		return 0;

	if (dest->type == INSN_ENDBR) {
		mark_endbr_used(dest);
		return 0;
	}

	if (dest->noendbr)
		return 0;

	WARN_FUNC("data relocation to !ENDBR: %s",
		  reloc->sec->base, reloc->offset,
		  offstr(dest->sec, dest->offset));

	return 1;
}

/*
 * Validate IBT rules and remove used ENDBR instructions from the seal list.
 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
 * NOPs) later, in create_ibt_endbr_seal_sections().
 */
static int validate_ibt(struct objtool_file *file)
{
	struct section *sec;
	struct reloc *reloc;
	struct instruction *insn;
	int warnings = 0;

	for_each_insn(file, insn)
		warnings += validate_ibt_insn(file, insn);

	for_each_sec(file, sec) {

		/* Already done by validate_ibt_insn() */
		if (sec->sh.sh_flags & SHF_EXECINSTR)
4090 4091
			continue;

4092 4093
		if (!sec->reloc)
			continue;
4094

4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112
		/*
		 * These sections can reference text addresses, but not with
		 * the intent to indirect branch to them.
		 */
		if (!strncmp(sec->name, ".discard", 8)			||
		    !strncmp(sec->name, ".debug", 6)			||
		    !strcmp(sec->name, ".altinstructions")		||
		    !strcmp(sec->name, ".ibt_endbr_seal")		||
		    !strcmp(sec->name, ".orc_unwind_ip")		||
		    !strcmp(sec->name, ".parainstructions")		||
		    !strcmp(sec->name, ".retpoline_sites")		||
		    !strcmp(sec->name, ".smp_locks")			||
		    !strcmp(sec->name, ".static_call_sites")		||
		    !strcmp(sec->name, "_error_injection_whitelist")	||
		    !strcmp(sec->name, "_kprobe_blacklist")		||
		    !strcmp(sec->name, "__bug_table")			||
		    !strcmp(sec->name, "__ex_table")			||
		    !strcmp(sec->name, "__jump_table")			||
4113
		    !strcmp(sec->name, "__mcount_loc"))
4114
			continue;
4115

4116 4117
		list_for_each_entry(reloc, &sec->reloc->reloc_list, list)
			warnings += validate_ibt_data_reloc(file, reloc);
4118 4119
	}

4120
	return warnings;
4121 4122
}

4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157
static int validate_sls(struct objtool_file *file)
{
	struct instruction *insn, *next_insn;
	int warnings = 0;

	for_each_insn(file, insn) {
		next_insn = next_insn_same_sec(file, insn);

		if (insn->retpoline_safe)
			continue;

		switch (insn->type) {
		case INSN_RETURN:
			if (!next_insn || next_insn->type != INSN_TRAP) {
				WARN_FUNC("missing int3 after ret",
					  insn->sec, insn->offset);
				warnings++;
			}

			break;
		case INSN_JUMP_DYNAMIC:
			if (!next_insn || next_insn->type != INSN_TRAP) {
				WARN_FUNC("missing int3 after indirect jump",
					  insn->sec, insn->offset);
				warnings++;
			}
			break;
		default:
			break;
		}
	}

	return warnings;
}

4158
static int validate_reachable_instructions(struct objtool_file *file)
4159 4160
{
	struct instruction *insn;
4161 4162 4163

	if (file->ignore_unreachables)
		return 0;
4164 4165

	for_each_insn(file, insn) {
4166
		if (insn->visited || ignore_unreachable_insn(file, insn))
4167 4168 4169 4170
			continue;

		WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
		return 1;
4171 4172
	}

4173
	return 0;
4174 4175
}

4176
int check(struct objtool_file *file)
4177 4178 4179
{
	int ret, warnings = 0;

4180
	arch_initial_func_cfi_state(&initial_func_cfi);
P
Peter Zijlstra 已提交
4181 4182 4183 4184 4185 4186 4187 4188 4189
	init_cfi_state(&init_cfi);
	init_cfi_state(&func_cfi);
	set_func_state(&func_cfi);

	if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
		goto out;

	cfi_hash_add(&init_cfi);
	cfi_hash_add(&func_cfi);
4190

4191
	ret = decode_sections(file);
4192 4193
	if (ret < 0)
		goto out;
P
Peter Zijlstra 已提交
4194

4195 4196
	warnings += ret;

4197
	if (list_empty(&file->insn_list))
4198 4199
		goto out;

4200
	if (opts.retpoline) {
4201
		ret = validate_retpoline(file);
4202 4203 4204 4205 4206
		if (ret < 0)
			return ret;
		warnings += ret;
	}

4207
	if (opts.stackval || opts.orc || opts.uaccess) {
4208 4209 4210 4211
		ret = validate_functions(file);
		if (ret < 0)
			goto out;
		warnings += ret;
4212

4213
		ret = validate_unwind_hints(file, NULL);
4214 4215 4216
		if (ret < 0)
			goto out;
		warnings += ret;
4217 4218 4219 4220 4221 4222 4223

		if (!warnings) {
			ret = validate_reachable_instructions(file);
			if (ret < 0)
				goto out;
			warnings += ret;
		}
4224 4225 4226 4227 4228 4229

	} else if (opts.noinstr) {
		ret = validate_noinstr_sections(file);
		if (ret < 0)
			goto out;
		warnings += ret;
4230 4231
	}

4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242
	if (opts.unret) {
		/*
		 * Must be after validate_branch() and friends, it plays
		 * further games with insn->visited.
		 */
		ret = validate_unret(file);
		if (ret < 0)
			return ret;
		warnings += ret;
	}

4243 4244
	if (opts.ibt) {
		ret = validate_ibt(file);
4245 4246
		if (ret < 0)
			goto out;
4247 4248 4249 4250 4251 4252 4253
		warnings += ret;
	}

	if (opts.sls) {
		ret = validate_sls(file);
		if (ret < 0)
			goto out;
4254 4255 4256
		warnings += ret;
	}

4257 4258 4259 4260 4261 4262
	if (opts.static_call) {
		ret = create_static_call_sections(file);
		if (ret < 0)
			goto out;
		warnings += ret;
	}
4263

4264
	if (opts.retpoline) {
4265 4266 4267 4268 4269 4270
		ret = create_retpoline_sites_sections(file);
		if (ret < 0)
			goto out;
		warnings += ret;
	}

4271
	if (opts.rethunk) {
4272 4273 4274 4275
		ret = create_return_sites_sections(file);
		if (ret < 0)
			goto out;
		warnings += ret;
4276 4277
	}

4278
	if (opts.mcount) {
4279 4280 4281 4282 4283 4284
		ret = create_mcount_loc_sections(file);
		if (ret < 0)
			goto out;
		warnings += ret;
	}

4285
	if (opts.ibt) {
4286 4287 4288 4289 4290 4291
		ret = create_ibt_endbr_seal_sections(file);
		if (ret < 0)
			goto out;
		warnings += ret;
	}

J
Josh Poimboeuf 已提交
4292 4293 4294 4295 4296 4297 4298 4299
	if (opts.orc && !list_empty(&file->insn_list)) {
		ret = orc_create(file);
		if (ret < 0)
			goto out;
		warnings += ret;
	}


4300
	if (opts.stats) {
P
Peter Zijlstra 已提交
4301 4302 4303 4304 4305 4306
		printf("nr_insns_visited: %ld\n", nr_insns_visited);
		printf("nr_cfi: %ld\n", nr_cfi);
		printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
		printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
	}

4307
out:
4308 4309 4310 4311 4312
	/*
	 *  For now, don't fail the kernel build on fatal warnings.  These
	 *  errors are still fairly common due to the growing matrix of
	 *  supported toolchains and their recent pace of change.
	 */
4313 4314
	return 0;
}