jump_label.c 19.8 KB
Newer Older
1 2 3 4
/*
 * jump label support
 *
 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5
 * Copyright (C) 2011 Peter Zijlstra
6 7 8 9 10 11 12 13 14
 *
 */
#include <linux/memory.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/err.h>
15
#include <linux/static_key.h>
16
#include <linux/jump_label_ratelimit.h>
17
#include <linux/bug.h>
18
#include <linux/cpu.h>
19 20 21 22 23 24

#ifdef HAVE_JUMP_LABEL

/* mutex to protect coming/going of the the jump_label table */
static DEFINE_MUTEX(jump_label_mutex);

25 26 27 28 29 30 31 32 33 34
void jump_label_lock(void)
{
	mutex_lock(&jump_label_mutex);
}

void jump_label_unlock(void)
{
	mutex_unlock(&jump_label_mutex);
}

35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
static int jump_label_cmp(const void *a, const void *b)
{
	const struct jump_entry *jea = a;
	const struct jump_entry *jeb = b;

	if (jea->key < jeb->key)
		return -1;

	if (jea->key > jeb->key)
		return 1;

	return 0;
}

static void
50
jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
51 52 53 54 55 56 57 58
{
	unsigned long size;

	size = (((unsigned long)stop - (unsigned long)start)
					/ sizeof(struct jump_entry));
	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
}

59
static void jump_label_update(struct static_key *key);
60

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/*
 * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
 * The use of 'atomic_read()' requires atomic.h and its problematic for some
 * kernel headers such as kernel.h and others. Since static_key_count() is not
 * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
 * to have it be a function here. Similarly, for 'static_key_enable()' and
 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
 * to be included from most/all places for HAVE_JUMP_LABEL.
 */
int static_key_count(struct static_key *key)
{
	/*
	 * -1 means the first static_key_slow_inc() is in progress.
	 *  static_key_enabled() must return true, so return 1 here.
	 */
	int n = atomic_read(&key->enabled);

	return n >= 0 ? n : 1;
}
EXPORT_SYMBOL_GPL(static_key_count);

82
void static_key_slow_inc_cpuslocked(struct static_key *key)
83
{
84 85
	int v, v1;

86
	STATIC_KEY_CHECK_USE(key);
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101

	/*
	 * Careful if we get concurrent static_key_slow_inc() calls;
	 * later calls must wait for the first one to _finish_ the
	 * jump_label_update() process.  At the same time, however,
	 * the jump_label_update() call below wants to see
	 * static_key_enabled(&key) for jumps to be updated properly.
	 *
	 * So give a special meaning to negative key->enabled: it sends
	 * static_key_slow_inc() down the slow path, and it is non-zero
	 * so it counts as "enabled" in jump_label_update().  Note that
	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
	 */
	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
102
		if (likely(v1 == v))
103 104
			return;
	}
105

106
	jump_label_lock();
107 108
	if (atomic_read(&key->enabled) == 0) {
		atomic_set(&key->enabled, -1);
109
		jump_label_update(key);
110 111 112 113 114
		/*
		 * Ensure that if the above cmpxchg loop observes our positive
		 * value, it must also observe all the text changes.
		 */
		atomic_set_release(&key->enabled, 1);
115 116 117
	} else {
		atomic_inc(&key->enabled);
	}
118
	jump_label_unlock();
119 120 121 122 123 124
}

void static_key_slow_inc(struct static_key *key)
{
	cpus_read_lock();
	static_key_slow_inc_cpuslocked(key);
125
	cpus_read_unlock();
126
}
127
EXPORT_SYMBOL_GPL(static_key_slow_inc);
128

129
void static_key_enable_cpuslocked(struct static_key *key)
130
{
131
	STATIC_KEY_CHECK_USE(key);
132

133 134 135 136 137 138 139 140 141
	if (atomic_read(&key->enabled) > 0) {
		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
		return;
	}

	jump_label_lock();
	if (atomic_read(&key->enabled) == 0) {
		atomic_set(&key->enabled, -1);
		jump_label_update(key);
142 143 144 145
		/*
		 * See static_key_slow_inc().
		 */
		atomic_set_release(&key->enabled, 1);
146 147
	}
	jump_label_unlock();
148 149 150 151 152 153 154
}
EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);

void static_key_enable(struct static_key *key)
{
	cpus_read_lock();
	static_key_enable_cpuslocked(key);
155 156 157 158
	cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_enable);

159
void static_key_disable_cpuslocked(struct static_key *key)
160
{
161
	STATIC_KEY_CHECK_USE(key);
162

163 164 165 166 167 168 169 170 171
	if (atomic_read(&key->enabled) != 1) {
		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
		return;
	}

	jump_label_lock();
	if (atomic_cmpxchg(&key->enabled, 1, 0))
		jump_label_update(key);
	jump_label_unlock();
172 173 174 175 176 177 178
}
EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);

void static_key_disable(struct static_key *key)
{
	cpus_read_lock();
	static_key_disable_cpuslocked(key);
179 180 181 182
	cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_disable);

183
static void __static_key_slow_dec_cpuslocked(struct static_key *key,
184 185
					   unsigned long rate_limit,
					   struct delayed_work *work)
186
{
187 188 189 190 191 192 193
	/*
	 * The negative count check is valid even when a negative
	 * key->enabled is in use by static_key_slow_inc(); a
	 * __static_key_slow_dec() before the first static_key_slow_inc()
	 * returns is unbalanced, because all other static_key_slow_inc()
	 * instances block while the update is in progress.
	 */
194 195 196
	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
		WARN(atomic_read(&key->enabled) < 0,
		     "jump label: negative count!\n");
197
		return;
198
	}
199

200 201 202
	if (rate_limit) {
		atomic_inc(&key->enabled);
		schedule_delayed_work(work, rate_limit);
203
	} else {
204
		jump_label_update(key);
205
	}
206
	jump_label_unlock();
207 208 209 210 211 212 213
}

static void __static_key_slow_dec(struct static_key *key,
				  unsigned long rate_limit,
				  struct delayed_work *work)
{
	cpus_read_lock();
214
	__static_key_slow_dec_cpuslocked(key, rate_limit, work);
215
	cpus_read_unlock();
216 217
}

218 219
static void jump_label_update_timeout(struct work_struct *work)
{
220 221 222
	struct static_key_deferred *key =
		container_of(work, struct static_key_deferred, work.work);
	__static_key_slow_dec(&key->key, 0, NULL);
223 224
}

225
void static_key_slow_dec(struct static_key *key)
226
{
227
	STATIC_KEY_CHECK_USE(key);
228
	__static_key_slow_dec(key, 0, NULL);
229
}
230
EXPORT_SYMBOL_GPL(static_key_slow_dec);
231

232 233 234 235 236 237
void static_key_slow_dec_cpuslocked(struct static_key *key)
{
	STATIC_KEY_CHECK_USE(key);
	__static_key_slow_dec_cpuslocked(key, 0, NULL);
}

238
void static_key_slow_dec_deferred(struct static_key_deferred *key)
239
{
240
	STATIC_KEY_CHECK_USE(key);
241
	__static_key_slow_dec(&key->key, key->timeout, &key->work);
242
}
243
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
244

245 246
void static_key_deferred_flush(struct static_key_deferred *key)
{
247
	STATIC_KEY_CHECK_USE(key);
248 249 250 251
	flush_delayed_work(&key->work);
}
EXPORT_SYMBOL_GPL(static_key_deferred_flush);

252
void jump_label_rate_limit(struct static_key_deferred *key,
253 254
		unsigned long rl)
{
255
	STATIC_KEY_CHECK_USE(key);
256 257 258
	key->timeout = rl;
	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
}
259
EXPORT_SYMBOL_GPL(jump_label_rate_limit);
260

261 262 263 264 265 266 267 268 269
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
	if (entry->code <= (unsigned long)end &&
		entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
		return 1;

	return 0;
}

270 271
static int __jump_label_text_reserved(struct jump_entry *iter_start,
		struct jump_entry *iter_stop, void *start, void *end)
272 273 274 275 276
{
	struct jump_entry *iter;

	iter = iter_start;
	while (iter < iter_stop) {
277 278
		if (addr_conflict(iter, start, end))
			return 1;
279 280 281
		iter++;
	}

282 283 284
	return 0;
}

285
/*
286 287 288 289 290
 * Update code which is definitely not currently executing.
 * Architectures which need heavyweight synchronization to modify
 * running code can override this to make the non-live update case
 * cheaper.
 */
291
void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
292 293
					    enum jump_label_type type)
{
294
	arch_jump_label_transform(entry, type);
295 296
}

297
static inline struct jump_entry *static_key_entries(struct static_key *key)
298
{
299 300
	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
301 302
}

303
static inline bool static_key_type(struct static_key *key)
304
{
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
	return key->type & JUMP_TYPE_TRUE;
}

static inline bool static_key_linked(struct static_key *key)
{
	return key->type & JUMP_TYPE_LINKED;
}

static inline void static_key_clear_linked(struct static_key *key)
{
	key->type &= ~JUMP_TYPE_LINKED;
}

static inline void static_key_set_linked(struct static_key *key)
{
	key->type |= JUMP_TYPE_LINKED;
321
}
322

323 324
static inline struct static_key *jump_entry_key(struct jump_entry *entry)
{
325 326 327 328 329 330
	return (struct static_key *)((unsigned long)entry->key & ~1UL);
}

static bool jump_entry_branch(struct jump_entry *entry)
{
	return (unsigned long)entry->key & 1UL;
331 332
}

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
/***
 * A 'struct static_key' uses a union such that it either points directly
 * to a table of 'struct jump_entry' or to a linked list of modules which in
 * turn point to 'struct jump_entry' tables.
 *
 * The two lower bits of the pointer are used to keep track of which pointer
 * type is in use and to store the initial branch direction, we use an access
 * function which preserves these bits.
 */
static void static_key_set_entries(struct static_key *key,
				   struct jump_entry *entries)
{
	unsigned long type;

	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
	type = key->type & JUMP_TYPE_MASK;
	key->entries = entries;
	key->type |= type;
}

353
static enum jump_label_type jump_label_type(struct jump_entry *entry)
354
{
355
	struct static_key *key = jump_entry_key(entry);
356
	bool enabled = static_key_enabled(key);
357
	bool branch = jump_entry_branch(entry);
358

359 360
	/* See the comment in linux/jump_label.h */
	return enabled ^ branch;
361 362
}

363 364 365 366 367 368
static void __jump_label_update(struct static_key *key,
				struct jump_entry *entry,
				struct jump_entry *stop)
{
	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
		/*
369 370
		 * An entry->code of 0 indicates an entry which has been
		 * disabled because it was in an init text area.
371
		 */
372 373 374 375 376 377
		if (entry->code) {
			if (kernel_text_address(entry->code))
				arch_jump_label_transform(entry, jump_label_type(entry));
			else
				WARN_ONCE(1, "can't patch jump_label at %pS", (void *)entry->code);
		}
378 379 380
	}
}

381
void __init jump_label_init(void)
382 383 384
{
	struct jump_entry *iter_start = __start___jump_table;
	struct jump_entry *iter_stop = __stop___jump_table;
385
	struct static_key *key = NULL;
386 387
	struct jump_entry *iter;

388 389 390 391 392 393 394 395 396
	/*
	 * Since we are initializing the static_key.enabled field with
	 * with the 'raw' int values (to avoid pulling in atomic.h) in
	 * jump_label.h, let's make sure that is safe. There are only two
	 * cases to check since we initialize to 0 or 1.
	 */
	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);

397 398 399
	if (static_key_initialized)
		return;

400
	cpus_read_lock();
401
	jump_label_lock();
402 403 404
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
405
		struct static_key *iterk;
406

407 408 409 410
		/* rewrite NOPs */
		if (jump_label_type(iter) == JUMP_LABEL_NOP)
			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);

411
		iterk = jump_entry_key(iter);
412
		if (iterk == key)
413 414
			continue;

415
		key = iterk;
416
		static_key_set_entries(key, iter);
417
	}
418
	static_key_initialized = true;
419
	jump_label_unlock();
420
	cpus_read_unlock();
421 422
}

423 424 425 426 427 428 429 430
/* Disable any jump label entries in __init code */
void __init jump_label_invalidate_init(void)
{
	struct jump_entry *iter_start = __start___jump_table;
	struct jump_entry *iter_stop = __stop___jump_table;
	struct jump_entry *iter;

	for (iter = iter_start; iter < iter_stop; iter++) {
431
		if (init_kernel_text(iter->code))
432 433 434 435
			iter->code = 0;
	}
}

436 437
#ifdef CONFIG_MODULES

438 439 440 441 442 443 444 445 446 447
static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
{
	struct static_key *key = jump_entry_key(entry);
	bool type = static_key_type(key);
	bool branch = jump_entry_branch(entry);

	/* See the comment in linux/jump_label.h */
	return type ^ branch;
}

448 449
struct static_key_mod {
	struct static_key_mod *next;
450 451 452 453
	struct jump_entry *entries;
	struct module *mod;
};

454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
static inline struct static_key_mod *static_key_mod(struct static_key *key)
{
	WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED));
	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
}

/***
 * key->type and key->next are the same via union.
 * This sets key->next and preserves the type bits.
 *
 * See additional comments above static_key_set_entries().
 */
static void static_key_set_mod(struct static_key *key,
			       struct static_key_mod *mod)
{
	unsigned long type;

	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
	type = key->type & JUMP_TYPE_MASK;
	key->next = mod;
	key->type |= type;
}

477 478 479 480
static int __jump_label_mod_text_reserved(void *start, void *end)
{
	struct module *mod;

481
	preempt_disable();
482
	mod = __module_text_address((unsigned long)start);
483 484 485
	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
	preempt_enable();

486 487 488 489 490 491 492 493 494
	if (!mod)
		return 0;


	return __jump_label_text_reserved(mod->jump_entries,
				mod->jump_entries + mod->num_jump_entries,
				start, end);
}

495
static void __jump_label_mod_update(struct static_key *key)
496
{
497
	struct static_key_mod *mod;
498

499 500 501 502 503 504 505 506 507 508
	for (mod = static_key_mod(key); mod; mod = mod->next) {
		struct jump_entry *stop;
		struct module *m;

		/*
		 * NULL if the static_key is defined in a module
		 * that does not use it
		 */
		if (!mod->entries)
			continue;
509

510 511 512 513 514 515
		m = mod->mod;
		if (!m)
			stop = __stop___jump_table;
		else
			stop = m->jump_entries + m->num_jump_entries;
		__jump_label_update(key, mod->entries, stop);
516 517 518 519 520 521 522 523 524 525 526 527
	}
}

/***
 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
 * @mod: module to patch
 *
 * Allow for run-time selection of the optimal nops. Before the module
 * loads patch these with arch_get_jump_label_nop(), which is specified by
 * the arch specific jump label code.
 */
void jump_label_apply_nops(struct module *mod)
528
{
529 530 531 532 533 534 535 536
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;

	/* if the module doesn't have jump label entries, just return */
	if (iter_start == iter_stop)
		return;

537 538 539 540 541
	for (iter = iter_start; iter < iter_stop; iter++) {
		/* Only write NOPs for arch_branch_static(). */
		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
	}
542 543
}

544
static int jump_label_add_module(struct module *mod)
545
{
546 547 548
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;
549
	struct static_key *key = NULL;
550
	struct static_key_mod *jlm, *jlm2;
551 552

	/* if the module doesn't have jump label entries, just return */
553
	if (iter_start == iter_stop)
554 555
		return 0;

556 557 558
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
559
		struct static_key *iterk;
560

561
		iterk = jump_entry_key(iter);
562 563
		if (iterk == key)
			continue;
564

565
		key = iterk;
566
		if (within_module(iter->key, mod)) {
567
			static_key_set_entries(key, iter);
568
			continue;
569
		}
570
		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
571 572
		if (!jlm)
			return -ENOMEM;
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
		if (!static_key_linked(key)) {
			jlm2 = kzalloc(sizeof(struct static_key_mod),
				       GFP_KERNEL);
			if (!jlm2) {
				kfree(jlm);
				return -ENOMEM;
			}
			preempt_disable();
			jlm2->mod = __module_address((unsigned long)key);
			preempt_enable();
			jlm2->entries = static_key_entries(key);
			jlm2->next = NULL;
			static_key_set_mod(key, jlm2);
			static_key_set_linked(key);
		}
588 589
		jlm->mod = mod;
		jlm->entries = iter;
590 591 592
		jlm->next = static_key_mod(key);
		static_key_set_mod(key, jlm);
		static_key_set_linked(key);
593

594 595
		/* Only update if we've changed from our initial state */
		if (jump_label_type(iter) != jump_label_init_type(iter))
596
			__jump_label_update(key, iter, iter_stop);
597
	}
598

599 600 601
	return 0;
}

602
static void jump_label_del_module(struct module *mod)
603
{
604 605 606
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;
607 608
	struct static_key *key = NULL;
	struct static_key_mod *jlm, **prev;
609

610
	for (iter = iter_start; iter < iter_stop; iter++) {
611
		if (jump_entry_key(iter) == key)
612 613
			continue;

614
		key = jump_entry_key(iter);
615

616
		if (within_module(iter->key, mod))
617 618
			continue;

619 620 621 622
		/* No memory during module load */
		if (WARN_ON(!static_key_linked(key)))
			continue;

623
		prev = &key->next;
624
		jlm = static_key_mod(key);
625

626 627 628 629 630
		while (jlm && jlm->mod != mod) {
			prev = &jlm->next;
			jlm = jlm->next;
		}

631 632 633 634 635 636 637
		/* No memory during module load */
		if (WARN_ON(!jlm))
			continue;

		if (prev == &key->next)
			static_key_set_mod(key, jlm->next);
		else
638
			*prev = jlm->next;
639 640 641 642 643 644 645 646

		kfree(jlm);

		jlm = static_key_mod(key);
		/* if only one etry is left, fold it back into the static_key */
		if (jlm->next == NULL) {
			static_key_set_entries(key, jlm->entries);
			static_key_clear_linked(key);
647
			kfree(jlm);
648 649 650 651
		}
	}
}

652
/* Disable any jump label entries in module init code */
653
static void jump_label_invalidate_module_init(struct module *mod)
654
{
655 656
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
657 658
	struct jump_entry *iter;

659 660 661
	for (iter = iter_start; iter < iter_stop; iter++) {
		if (within_module_init(iter->code, mod))
			iter->code = 0;
662 663 664
	}
}

665 666 667 668 669 670 671
static int
jump_label_module_notify(struct notifier_block *self, unsigned long val,
			 void *data)
{
	struct module *mod = data;
	int ret = 0;

672 673 674
	cpus_read_lock();
	jump_label_lock();

675 676
	switch (val) {
	case MODULE_STATE_COMING:
677
		ret = jump_label_add_module(mod);
678 679
		if (ret) {
			WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
680
			jump_label_del_module(mod);
681
		}
682 683
		break;
	case MODULE_STATE_GOING:
684
		jump_label_del_module(mod);
685
		break;
686
	case MODULE_STATE_LIVE:
687
		jump_label_invalidate_module_init(mod);
688
		break;
689 690
	}

691 692 693
	jump_label_unlock();
	cpus_read_unlock();

694
	return notifier_from_errno(ret);
695 696
}

697
static struct notifier_block jump_label_module_nb = {
698
	.notifier_call = jump_label_module_notify,
699
	.priority = 1, /* higher than tracepoints */
700 701
};

702
static __init int jump_label_init_module(void)
703 704 705
{
	return register_module_notifier(&jump_label_module_nb);
}
706
early_initcall(jump_label_init_module);
707 708 709

#endif /* CONFIG_MODULES */

710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
/***
 * jump_label_text_reserved - check if addr range is reserved
 * @start: start text addr
 * @end: end text addr
 *
 * checks if the text addr located between @start and @end
 * overlaps with any of the jump label patch addresses. Code
 * that wants to modify kernel text should first verify that
 * it does not overlap with any of the jump label addresses.
 * Caller must hold jump_label_mutex.
 *
 * returns 1 if there is an overlap, 0 otherwise
 */
int jump_label_text_reserved(void *start, void *end)
{
	int ret = __jump_label_text_reserved(__start___jump_table,
			__stop___jump_table, start, end);

	if (ret)
		return ret;

#ifdef CONFIG_MODULES
	ret = __jump_label_mod_text_reserved(start, end);
#endif
	return ret;
}

737
static void jump_label_update(struct static_key *key)
738
{
739
	struct jump_entry *stop = __stop___jump_table;
740
	struct jump_entry *entry;
741
#ifdef CONFIG_MODULES
742
	struct module *mod;
743

744 745 746 747
	if (static_key_linked(key)) {
		__jump_label_mod_update(key);
		return;
	}
748

749 750
	preempt_disable();
	mod = __module_address((unsigned long)key);
751 752
	if (mod)
		stop = mod->jump_entries + mod->num_jump_entries;
753
	preempt_enable();
754
#endif
755
	entry = static_key_entries(key);
756 757
	/* if there are no users, entry can be NULL */
	if (entry)
758
		__jump_label_update(key, entry, stop);
759 760
}

761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
#ifdef CONFIG_STATIC_KEYS_SELFTEST
static DEFINE_STATIC_KEY_TRUE(sk_true);
static DEFINE_STATIC_KEY_FALSE(sk_false);

static __init int jump_label_test(void)
{
	int i;

	for (i = 0; i < 2; i++) {
		WARN_ON(static_key_enabled(&sk_true.key) != true);
		WARN_ON(static_key_enabled(&sk_false.key) != false);

		WARN_ON(!static_branch_likely(&sk_true));
		WARN_ON(!static_branch_unlikely(&sk_true));
		WARN_ON(static_branch_likely(&sk_false));
		WARN_ON(static_branch_unlikely(&sk_false));

		static_branch_disable(&sk_true);
		static_branch_enable(&sk_false);

		WARN_ON(static_key_enabled(&sk_true.key) == true);
		WARN_ON(static_key_enabled(&sk_false.key) == false);

		WARN_ON(static_branch_likely(&sk_true));
		WARN_ON(static_branch_unlikely(&sk_true));
		WARN_ON(!static_branch_likely(&sk_false));
		WARN_ON(!static_branch_unlikely(&sk_false));

		static_branch_enable(&sk_true);
		static_branch_disable(&sk_false);
	}

	return 0;
}
795
early_initcall(jump_label_test);
796 797 798
#endif /* STATIC_KEYS_SELFTEST */

#endif /* HAVE_JUMP_LABEL */