jump_label.c 18.9 KB
Newer Older
1 2 3 4
/*
 * jump label support
 *
 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5
 * Copyright (C) 2011 Peter Zijlstra
6 7 8 9 10 11 12 13 14
 *
 */
#include <linux/memory.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/err.h>
15
#include <linux/static_key.h>
16
#include <linux/jump_label_ratelimit.h>
17
#include <linux/bug.h>
18
#include <linux/cpu.h>
19 20 21 22 23 24

#ifdef HAVE_JUMP_LABEL

/* mutex to protect coming/going of the the jump_label table */
static DEFINE_MUTEX(jump_label_mutex);

25 26 27 28 29 30 31 32 33 34
void jump_label_lock(void)
{
	mutex_lock(&jump_label_mutex);
}

void jump_label_unlock(void)
{
	mutex_unlock(&jump_label_mutex);
}

35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
static int jump_label_cmp(const void *a, const void *b)
{
	const struct jump_entry *jea = a;
	const struct jump_entry *jeb = b;

	if (jea->key < jeb->key)
		return -1;

	if (jea->key > jeb->key)
		return 1;

	return 0;
}

static void
50
jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
51 52 53 54 55 56 57 58
{
	unsigned long size;

	size = (((unsigned long)stop - (unsigned long)start)
					/ sizeof(struct jump_entry));
	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
}

59
static void jump_label_update(struct static_key *key);
60

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/*
 * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
 * The use of 'atomic_read()' requires atomic.h and its problematic for some
 * kernel headers such as kernel.h and others. Since static_key_count() is not
 * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
 * to have it be a function here. Similarly, for 'static_key_enable()' and
 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
 * to be included from most/all places for HAVE_JUMP_LABEL.
 */
int static_key_count(struct static_key *key)
{
	/*
	 * -1 means the first static_key_slow_inc() is in progress.
	 *  static_key_enabled() must return true, so return 1 here.
	 */
	int n = atomic_read(&key->enabled);

	return n >= 0 ? n : 1;
}
EXPORT_SYMBOL_GPL(static_key_count);

82
static void static_key_slow_inc_cpuslocked(struct static_key *key)
83
{
84 85
	int v, v1;

86
	STATIC_KEY_CHECK_USE();
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101

	/*
	 * Careful if we get concurrent static_key_slow_inc() calls;
	 * later calls must wait for the first one to _finish_ the
	 * jump_label_update() process.  At the same time, however,
	 * the jump_label_update() call below wants to see
	 * static_key_enabled(&key) for jumps to be updated properly.
	 *
	 * So give a special meaning to negative key->enabled: it sends
	 * static_key_slow_inc() down the slow path, and it is non-zero
	 * so it counts as "enabled" in jump_label_update().  Note that
	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
	 */
	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
102
		if (likely(v1 == v))
103 104
			return;
	}
105

106
	jump_label_lock();
107 108
	if (atomic_read(&key->enabled) == 0) {
		atomic_set(&key->enabled, -1);
109
		jump_label_update(key);
110 111 112 113 114
		/*
		 * Ensure that if the above cmpxchg loop observes our positive
		 * value, it must also observe all the text changes.
		 */
		atomic_set_release(&key->enabled, 1);
115 116 117
	} else {
		atomic_inc(&key->enabled);
	}
118
	jump_label_unlock();
119 120 121 122 123 124
}

void static_key_slow_inc(struct static_key *key)
{
	cpus_read_lock();
	static_key_slow_inc_cpuslocked(key);
125
	cpus_read_unlock();
126
}
127
EXPORT_SYMBOL_GPL(static_key_slow_inc);
128

129 130 131 132 133 134 135 136 137 138 139 140 141
void static_key_enable(struct static_key *key)
{
	STATIC_KEY_CHECK_USE();
	if (atomic_read(&key->enabled) > 0) {
		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
		return;
	}

	cpus_read_lock();
	jump_label_lock();
	if (atomic_read(&key->enabled) == 0) {
		atomic_set(&key->enabled, -1);
		jump_label_update(key);
142 143 144 145
		/*
		 * See static_key_slow_inc().
		 */
		atomic_set_release(&key->enabled, 1);
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
	}
	jump_label_unlock();
	cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_enable);

void static_key_disable(struct static_key *key)
{
	STATIC_KEY_CHECK_USE();
	if (atomic_read(&key->enabled) != 1) {
		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
		return;
	}

	cpus_read_lock();
	jump_label_lock();
	if (atomic_cmpxchg(&key->enabled, 1, 0))
		jump_label_update(key);
	jump_label_unlock();
	cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_disable);

169 170 171
static void static_key_slow_dec_cpuslocked(struct static_key *key,
					   unsigned long rate_limit,
					   struct delayed_work *work)
172
{
173 174 175 176 177 178 179
	/*
	 * The negative count check is valid even when a negative
	 * key->enabled is in use by static_key_slow_inc(); a
	 * __static_key_slow_dec() before the first static_key_slow_inc()
	 * returns is unbalanced, because all other static_key_slow_inc()
	 * instances block while the update is in progress.
	 */
180 181 182
	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
		WARN(atomic_read(&key->enabled) < 0,
		     "jump label: negative count!\n");
183
		return;
184
	}
185

186 187 188
	if (rate_limit) {
		atomic_inc(&key->enabled);
		schedule_delayed_work(work, rate_limit);
189
	} else {
190
		jump_label_update(key);
191
	}
192
	jump_label_unlock();
193 194 195 196 197 198 199 200
}

static void __static_key_slow_dec(struct static_key *key,
				  unsigned long rate_limit,
				  struct delayed_work *work)
{
	cpus_read_lock();
	static_key_slow_dec_cpuslocked(key, rate_limit, work);
201
	cpus_read_unlock();
202 203
}

204 205
static void jump_label_update_timeout(struct work_struct *work)
{
206 207 208
	struct static_key_deferred *key =
		container_of(work, struct static_key_deferred, work.work);
	__static_key_slow_dec(&key->key, 0, NULL);
209 210
}

211
void static_key_slow_dec(struct static_key *key)
212
{
213
	STATIC_KEY_CHECK_USE();
214
	__static_key_slow_dec(key, 0, NULL);
215
}
216
EXPORT_SYMBOL_GPL(static_key_slow_dec);
217

218
void static_key_slow_dec_deferred(struct static_key_deferred *key)
219
{
220
	STATIC_KEY_CHECK_USE();
221
	__static_key_slow_dec(&key->key, key->timeout, &key->work);
222
}
223
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
224

225 226 227 228 229 230 231
void static_key_deferred_flush(struct static_key_deferred *key)
{
	STATIC_KEY_CHECK_USE();
	flush_delayed_work(&key->work);
}
EXPORT_SYMBOL_GPL(static_key_deferred_flush);

232
void jump_label_rate_limit(struct static_key_deferred *key,
233 234
		unsigned long rl)
{
235
	STATIC_KEY_CHECK_USE();
236 237 238
	key->timeout = rl;
	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
}
239
EXPORT_SYMBOL_GPL(jump_label_rate_limit);
240

241 242 243 244 245 246 247 248 249
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
	if (entry->code <= (unsigned long)end &&
		entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
		return 1;

	return 0;
}

250 251
static int __jump_label_text_reserved(struct jump_entry *iter_start,
		struct jump_entry *iter_stop, void *start, void *end)
252 253 254 255 256
{
	struct jump_entry *iter;

	iter = iter_start;
	while (iter < iter_stop) {
257 258
		if (addr_conflict(iter, start, end))
			return 1;
259 260 261
		iter++;
	}

262 263 264
	return 0;
}

265
/*
266 267 268 269 270
 * Update code which is definitely not currently executing.
 * Architectures which need heavyweight synchronization to modify
 * running code can override this to make the non-live update case
 * cheaper.
 */
271
void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
272 273
					    enum jump_label_type type)
{
274
	arch_jump_label_transform(entry, type);
275 276
}

277
static inline struct jump_entry *static_key_entries(struct static_key *key)
278
{
279 280
	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
281 282
}

283
static inline bool static_key_type(struct static_key *key)
284
{
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
	return key->type & JUMP_TYPE_TRUE;
}

static inline bool static_key_linked(struct static_key *key)
{
	return key->type & JUMP_TYPE_LINKED;
}

static inline void static_key_clear_linked(struct static_key *key)
{
	key->type &= ~JUMP_TYPE_LINKED;
}

static inline void static_key_set_linked(struct static_key *key)
{
	key->type |= JUMP_TYPE_LINKED;
301
}
302

303 304
static inline struct static_key *jump_entry_key(struct jump_entry *entry)
{
305 306 307 308 309 310
	return (struct static_key *)((unsigned long)entry->key & ~1UL);
}

static bool jump_entry_branch(struct jump_entry *entry)
{
	return (unsigned long)entry->key & 1UL;
311 312
}

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
/***
 * A 'struct static_key' uses a union such that it either points directly
 * to a table of 'struct jump_entry' or to a linked list of modules which in
 * turn point to 'struct jump_entry' tables.
 *
 * The two lower bits of the pointer are used to keep track of which pointer
 * type is in use and to store the initial branch direction, we use an access
 * function which preserves these bits.
 */
static void static_key_set_entries(struct static_key *key,
				   struct jump_entry *entries)
{
	unsigned long type;

	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
	type = key->type & JUMP_TYPE_MASK;
	key->entries = entries;
	key->type |= type;
}

333
static enum jump_label_type jump_label_type(struct jump_entry *entry)
334
{
335
	struct static_key *key = jump_entry_key(entry);
336
	bool enabled = static_key_enabled(key);
337
	bool branch = jump_entry_branch(entry);
338

339 340
	/* See the comment in linux/jump_label.h */
	return enabled ^ branch;
341 342
}

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
static void __jump_label_update(struct static_key *key,
				struct jump_entry *entry,
				struct jump_entry *stop)
{
	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
		/*
		 * entry->code set to 0 invalidates module init text sections
		 * kernel_text_address() verifies we are not in core kernel
		 * init code, see jump_label_invalidate_module_init().
		 */
		if (entry->code && kernel_text_address(entry->code))
			arch_jump_label_transform(entry, jump_label_type(entry));
	}
}

358
void __init jump_label_init(void)
359 360 361
{
	struct jump_entry *iter_start = __start___jump_table;
	struct jump_entry *iter_stop = __stop___jump_table;
362
	struct static_key *key = NULL;
363 364
	struct jump_entry *iter;

365 366 367 368 369 370 371 372 373
	/*
	 * Since we are initializing the static_key.enabled field with
	 * with the 'raw' int values (to avoid pulling in atomic.h) in
	 * jump_label.h, let's make sure that is safe. There are only two
	 * cases to check since we initialize to 0 or 1.
	 */
	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);

374 375 376
	if (static_key_initialized)
		return;

377
	cpus_read_lock();
378
	jump_label_lock();
379 380 381
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
382
		struct static_key *iterk;
383

384 385 386 387
		/* rewrite NOPs */
		if (jump_label_type(iter) == JUMP_LABEL_NOP)
			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);

388
		iterk = jump_entry_key(iter);
389
		if (iterk == key)
390 391
			continue;

392
		key = iterk;
393
		static_key_set_entries(key, iter);
394
	}
395
	static_key_initialized = true;
396
	jump_label_unlock();
397
	cpus_read_unlock();
398 399 400 401
}

#ifdef CONFIG_MODULES

402 403 404 405 406 407 408 409 410 411
static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
{
	struct static_key *key = jump_entry_key(entry);
	bool type = static_key_type(key);
	bool branch = jump_entry_branch(entry);

	/* See the comment in linux/jump_label.h */
	return type ^ branch;
}

412 413
struct static_key_mod {
	struct static_key_mod *next;
414 415 416 417
	struct jump_entry *entries;
	struct module *mod;
};

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
static inline struct static_key_mod *static_key_mod(struct static_key *key)
{
	WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED));
	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
}

/***
 * key->type and key->next are the same via union.
 * This sets key->next and preserves the type bits.
 *
 * See additional comments above static_key_set_entries().
 */
static void static_key_set_mod(struct static_key *key,
			       struct static_key_mod *mod)
{
	unsigned long type;

	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
	type = key->type & JUMP_TYPE_MASK;
	key->next = mod;
	key->type |= type;
}

441 442 443 444
static int __jump_label_mod_text_reserved(void *start, void *end)
{
	struct module *mod;

445
	preempt_disable();
446
	mod = __module_text_address((unsigned long)start);
447 448 449
	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
	preempt_enable();

450 451 452 453 454 455 456 457 458
	if (!mod)
		return 0;


	return __jump_label_text_reserved(mod->jump_entries,
				mod->jump_entries + mod->num_jump_entries,
				start, end);
}

459
static void __jump_label_mod_update(struct static_key *key)
460
{
461
	struct static_key_mod *mod;
462

463 464 465 466 467 468 469 470 471 472
	for (mod = static_key_mod(key); mod; mod = mod->next) {
		struct jump_entry *stop;
		struct module *m;

		/*
		 * NULL if the static_key is defined in a module
		 * that does not use it
		 */
		if (!mod->entries)
			continue;
473

474 475 476 477 478 479
		m = mod->mod;
		if (!m)
			stop = __stop___jump_table;
		else
			stop = m->jump_entries + m->num_jump_entries;
		__jump_label_update(key, mod->entries, stop);
480 481 482 483 484 485 486 487 488 489 490 491
	}
}

/***
 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
 * @mod: module to patch
 *
 * Allow for run-time selection of the optimal nops. Before the module
 * loads patch these with arch_get_jump_label_nop(), which is specified by
 * the arch specific jump label code.
 */
void jump_label_apply_nops(struct module *mod)
492
{
493 494 495 496 497 498 499 500
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;

	/* if the module doesn't have jump label entries, just return */
	if (iter_start == iter_stop)
		return;

501 502 503 504 505
	for (iter = iter_start; iter < iter_stop; iter++) {
		/* Only write NOPs for arch_branch_static(). */
		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
	}
506 507
}

508
static int jump_label_add_module(struct module *mod)
509
{
510 511 512
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;
513
	struct static_key *key = NULL;
514
	struct static_key_mod *jlm, *jlm2;
515 516

	/* if the module doesn't have jump label entries, just return */
517
	if (iter_start == iter_stop)
518 519
		return 0;

520 521 522
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
523
		struct static_key *iterk;
524

525
		iterk = jump_entry_key(iter);
526 527
		if (iterk == key)
			continue;
528

529
		key = iterk;
530
		if (within_module(iter->key, mod)) {
531
			static_key_set_entries(key, iter);
532
			continue;
533
		}
534
		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
535 536
		if (!jlm)
			return -ENOMEM;
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
		if (!static_key_linked(key)) {
			jlm2 = kzalloc(sizeof(struct static_key_mod),
				       GFP_KERNEL);
			if (!jlm2) {
				kfree(jlm);
				return -ENOMEM;
			}
			preempt_disable();
			jlm2->mod = __module_address((unsigned long)key);
			preempt_enable();
			jlm2->entries = static_key_entries(key);
			jlm2->next = NULL;
			static_key_set_mod(key, jlm2);
			static_key_set_linked(key);
		}
552 553
		jlm->mod = mod;
		jlm->entries = iter;
554 555 556
		jlm->next = static_key_mod(key);
		static_key_set_mod(key, jlm);
		static_key_set_linked(key);
557

558 559
		/* Only update if we've changed from our initial state */
		if (jump_label_type(iter) != jump_label_init_type(iter))
560
			__jump_label_update(key, iter, iter_stop);
561
	}
562

563 564 565
	return 0;
}

566
static void jump_label_del_module(struct module *mod)
567
{
568 569 570
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;
571 572
	struct static_key *key = NULL;
	struct static_key_mod *jlm, **prev;
573

574
	for (iter = iter_start; iter < iter_stop; iter++) {
575
		if (jump_entry_key(iter) == key)
576 577
			continue;

578
		key = jump_entry_key(iter);
579

580
		if (within_module(iter->key, mod))
581 582
			continue;

583 584 585 586
		/* No memory during module load */
		if (WARN_ON(!static_key_linked(key)))
			continue;

587
		prev = &key->next;
588
		jlm = static_key_mod(key);
589

590 591 592 593 594
		while (jlm && jlm->mod != mod) {
			prev = &jlm->next;
			jlm = jlm->next;
		}

595 596 597 598 599 600 601
		/* No memory during module load */
		if (WARN_ON(!jlm))
			continue;

		if (prev == &key->next)
			static_key_set_mod(key, jlm->next);
		else
602
			*prev = jlm->next;
603 604 605 606 607 608 609 610

		kfree(jlm);

		jlm = static_key_mod(key);
		/* if only one etry is left, fold it back into the static_key */
		if (jlm->next == NULL) {
			static_key_set_entries(key, jlm->entries);
			static_key_clear_linked(key);
611
			kfree(jlm);
612 613 614 615
		}
	}
}

616
static void jump_label_invalidate_module_init(struct module *mod)
617
{
618 619
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
620 621
	struct jump_entry *iter;

622 623 624
	for (iter = iter_start; iter < iter_stop; iter++) {
		if (within_module_init(iter->code, mod))
			iter->code = 0;
625 626 627
	}
}

628 629 630 631 632 633 634
static int
jump_label_module_notify(struct notifier_block *self, unsigned long val,
			 void *data)
{
	struct module *mod = data;
	int ret = 0;

635 636 637
	cpus_read_lock();
	jump_label_lock();

638 639
	switch (val) {
	case MODULE_STATE_COMING:
640
		ret = jump_label_add_module(mod);
641 642
		if (ret) {
			WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
643
			jump_label_del_module(mod);
644
		}
645 646
		break;
	case MODULE_STATE_GOING:
647
		jump_label_del_module(mod);
648
		break;
649
	case MODULE_STATE_LIVE:
650
		jump_label_invalidate_module_init(mod);
651
		break;
652 653
	}

654 655 656
	jump_label_unlock();
	cpus_read_unlock();

657
	return notifier_from_errno(ret);
658 659
}

660
static struct notifier_block jump_label_module_nb = {
661
	.notifier_call = jump_label_module_notify,
662
	.priority = 1, /* higher than tracepoints */
663 664
};

665
static __init int jump_label_init_module(void)
666 667 668
{
	return register_module_notifier(&jump_label_module_nb);
}
669
early_initcall(jump_label_init_module);
670 671 672

#endif /* CONFIG_MODULES */

673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
/***
 * jump_label_text_reserved - check if addr range is reserved
 * @start: start text addr
 * @end: end text addr
 *
 * checks if the text addr located between @start and @end
 * overlaps with any of the jump label patch addresses. Code
 * that wants to modify kernel text should first verify that
 * it does not overlap with any of the jump label addresses.
 * Caller must hold jump_label_mutex.
 *
 * returns 1 if there is an overlap, 0 otherwise
 */
int jump_label_text_reserved(void *start, void *end)
{
	int ret = __jump_label_text_reserved(__start___jump_table,
			__stop___jump_table, start, end);

	if (ret)
		return ret;

#ifdef CONFIG_MODULES
	ret = __jump_label_mod_text_reserved(start, end);
#endif
	return ret;
}

700
static void jump_label_update(struct static_key *key)
701
{
702
	struct jump_entry *stop = __stop___jump_table;
703
	struct jump_entry *entry;
704
#ifdef CONFIG_MODULES
705
	struct module *mod;
706

707 708 709 710
	if (static_key_linked(key)) {
		__jump_label_mod_update(key);
		return;
	}
711

712 713
	preempt_disable();
	mod = __module_address((unsigned long)key);
714 715
	if (mod)
		stop = mod->jump_entries + mod->num_jump_entries;
716
	preempt_enable();
717
#endif
718
	entry = static_key_entries(key);
719 720
	/* if there are no users, entry can be NULL */
	if (entry)
721
		__jump_label_update(key, entry, stop);
722 723
}

724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
#ifdef CONFIG_STATIC_KEYS_SELFTEST
static DEFINE_STATIC_KEY_TRUE(sk_true);
static DEFINE_STATIC_KEY_FALSE(sk_false);

static __init int jump_label_test(void)
{
	int i;

	for (i = 0; i < 2; i++) {
		WARN_ON(static_key_enabled(&sk_true.key) != true);
		WARN_ON(static_key_enabled(&sk_false.key) != false);

		WARN_ON(!static_branch_likely(&sk_true));
		WARN_ON(!static_branch_unlikely(&sk_true));
		WARN_ON(static_branch_likely(&sk_false));
		WARN_ON(static_branch_unlikely(&sk_false));

		static_branch_disable(&sk_true);
		static_branch_enable(&sk_false);

		WARN_ON(static_key_enabled(&sk_true.key) == true);
		WARN_ON(static_key_enabled(&sk_false.key) == false);

		WARN_ON(static_branch_likely(&sk_true));
		WARN_ON(static_branch_unlikely(&sk_true));
		WARN_ON(!static_branch_likely(&sk_false));
		WARN_ON(!static_branch_unlikely(&sk_false));

		static_branch_enable(&sk_true);
		static_branch_disable(&sk_false);
	}

	return 0;
}
late_initcall(jump_label_test);
#endif /* STATIC_KEYS_SELFTEST */

#endif /* HAVE_JUMP_LABEL */