jump_label.c 19.2 KB
Newer Older
1 2 3 4
/*
 * jump label support
 *
 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5
 * Copyright (C) 2011 Peter Zijlstra
6 7 8 9 10 11 12 13 14
 *
 */
#include <linux/memory.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/err.h>
15
#include <linux/static_key.h>
16
#include <linux/jump_label_ratelimit.h>
17
#include <linux/bug.h>
18
#include <linux/cpu.h>
19 20 21 22 23 24

#ifdef HAVE_JUMP_LABEL

/* mutex to protect coming/going of the the jump_label table */
static DEFINE_MUTEX(jump_label_mutex);

25 26 27 28 29 30 31 32 33 34
void jump_label_lock(void)
{
	mutex_lock(&jump_label_mutex);
}

void jump_label_unlock(void)
{
	mutex_unlock(&jump_label_mutex);
}

35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
static int jump_label_cmp(const void *a, const void *b)
{
	const struct jump_entry *jea = a;
	const struct jump_entry *jeb = b;

	if (jea->key < jeb->key)
		return -1;

	if (jea->key > jeb->key)
		return 1;

	return 0;
}

static void
50
jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
51 52 53 54 55 56 57 58
{
	unsigned long size;

	size = (((unsigned long)stop - (unsigned long)start)
					/ sizeof(struct jump_entry));
	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
}

59
static void jump_label_update(struct static_key *key);
60

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/*
 * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
 * The use of 'atomic_read()' requires atomic.h and its problematic for some
 * kernel headers such as kernel.h and others. Since static_key_count() is not
 * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
 * to have it be a function here. Similarly, for 'static_key_enable()' and
 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
 * to be included from most/all places for HAVE_JUMP_LABEL.
 */
int static_key_count(struct static_key *key)
{
	/*
	 * -1 means the first static_key_slow_inc() is in progress.
	 *  static_key_enabled() must return true, so return 1 here.
	 */
	int n = atomic_read(&key->enabled);

	return n >= 0 ? n : 1;
}
EXPORT_SYMBOL_GPL(static_key_count);

82
static void static_key_slow_inc_cpuslocked(struct static_key *key)
83
{
84 85
	int v, v1;

86
	STATIC_KEY_CHECK_USE();
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101

	/*
	 * Careful if we get concurrent static_key_slow_inc() calls;
	 * later calls must wait for the first one to _finish_ the
	 * jump_label_update() process.  At the same time, however,
	 * the jump_label_update() call below wants to see
	 * static_key_enabled(&key) for jumps to be updated properly.
	 *
	 * So give a special meaning to negative key->enabled: it sends
	 * static_key_slow_inc() down the slow path, and it is non-zero
	 * so it counts as "enabled" in jump_label_update().  Note that
	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
	 */
	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
102
		if (likely(v1 == v))
103 104
			return;
	}
105

106
	jump_label_lock();
107 108
	if (atomic_read(&key->enabled) == 0) {
		atomic_set(&key->enabled, -1);
109
		jump_label_update(key);
110 111 112 113 114
		/*
		 * Ensure that if the above cmpxchg loop observes our positive
		 * value, it must also observe all the text changes.
		 */
		atomic_set_release(&key->enabled, 1);
115 116 117
	} else {
		atomic_inc(&key->enabled);
	}
118
	jump_label_unlock();
119 120 121 122 123 124
}

void static_key_slow_inc(struct static_key *key)
{
	cpus_read_lock();
	static_key_slow_inc_cpuslocked(key);
125
	cpus_read_unlock();
126
}
127
EXPORT_SYMBOL_GPL(static_key_slow_inc);
128

129
void static_key_enable_cpuslocked(struct static_key *key)
130 131
{
	STATIC_KEY_CHECK_USE();
132

133 134 135 136 137 138 139 140 141
	if (atomic_read(&key->enabled) > 0) {
		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
		return;
	}

	jump_label_lock();
	if (atomic_read(&key->enabled) == 0) {
		atomic_set(&key->enabled, -1);
		jump_label_update(key);
142 143 144 145
		/*
		 * See static_key_slow_inc().
		 */
		atomic_set_release(&key->enabled, 1);
146 147
	}
	jump_label_unlock();
148 149 150 151 152 153 154
}
EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);

void static_key_enable(struct static_key *key)
{
	cpus_read_lock();
	static_key_enable_cpuslocked(key);
155 156 157 158
	cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_enable);

159
void static_key_disable_cpuslocked(struct static_key *key)
160 161
{
	STATIC_KEY_CHECK_USE();
162

163 164 165 166 167 168 169 170 171
	if (atomic_read(&key->enabled) != 1) {
		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
		return;
	}

	jump_label_lock();
	if (atomic_cmpxchg(&key->enabled, 1, 0))
		jump_label_update(key);
	jump_label_unlock();
172 173 174 175 176 177 178
}
EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);

void static_key_disable(struct static_key *key)
{
	cpus_read_lock();
	static_key_disable_cpuslocked(key);
179 180 181 182
	cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_disable);

183 184 185
static void static_key_slow_dec_cpuslocked(struct static_key *key,
					   unsigned long rate_limit,
					   struct delayed_work *work)
186
{
187 188 189 190 191 192 193
	/*
	 * The negative count check is valid even when a negative
	 * key->enabled is in use by static_key_slow_inc(); a
	 * __static_key_slow_dec() before the first static_key_slow_inc()
	 * returns is unbalanced, because all other static_key_slow_inc()
	 * instances block while the update is in progress.
	 */
194 195 196
	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
		WARN(atomic_read(&key->enabled) < 0,
		     "jump label: negative count!\n");
197
		return;
198
	}
199

200 201 202
	if (rate_limit) {
		atomic_inc(&key->enabled);
		schedule_delayed_work(work, rate_limit);
203
	} else {
204
		jump_label_update(key);
205
	}
206
	jump_label_unlock();
207 208 209 210 211 212 213 214
}

static void __static_key_slow_dec(struct static_key *key,
				  unsigned long rate_limit,
				  struct delayed_work *work)
{
	cpus_read_lock();
	static_key_slow_dec_cpuslocked(key, rate_limit, work);
215
	cpus_read_unlock();
216 217
}

218 219
static void jump_label_update_timeout(struct work_struct *work)
{
220 221 222
	struct static_key_deferred *key =
		container_of(work, struct static_key_deferred, work.work);
	__static_key_slow_dec(&key->key, 0, NULL);
223 224
}

225
void static_key_slow_dec(struct static_key *key)
226
{
227
	STATIC_KEY_CHECK_USE();
228
	__static_key_slow_dec(key, 0, NULL);
229
}
230
EXPORT_SYMBOL_GPL(static_key_slow_dec);
231

232
void static_key_slow_dec_deferred(struct static_key_deferred *key)
233
{
234
	STATIC_KEY_CHECK_USE();
235
	__static_key_slow_dec(&key->key, key->timeout, &key->work);
236
}
237
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
238

239 240 241 242 243 244 245
void static_key_deferred_flush(struct static_key_deferred *key)
{
	STATIC_KEY_CHECK_USE();
	flush_delayed_work(&key->work);
}
EXPORT_SYMBOL_GPL(static_key_deferred_flush);

246
void jump_label_rate_limit(struct static_key_deferred *key,
247 248
		unsigned long rl)
{
249
	STATIC_KEY_CHECK_USE();
250 251 252
	key->timeout = rl;
	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
}
253
EXPORT_SYMBOL_GPL(jump_label_rate_limit);
254

255 256 257 258 259 260 261 262 263
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
	if (entry->code <= (unsigned long)end &&
		entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
		return 1;

	return 0;
}

264 265
static int __jump_label_text_reserved(struct jump_entry *iter_start,
		struct jump_entry *iter_stop, void *start, void *end)
266 267 268 269 270
{
	struct jump_entry *iter;

	iter = iter_start;
	while (iter < iter_stop) {
271 272
		if (addr_conflict(iter, start, end))
			return 1;
273 274 275
		iter++;
	}

276 277 278
	return 0;
}

279
/*
280 281 282 283 284
 * Update code which is definitely not currently executing.
 * Architectures which need heavyweight synchronization to modify
 * running code can override this to make the non-live update case
 * cheaper.
 */
285
void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
286 287
					    enum jump_label_type type)
{
288
	arch_jump_label_transform(entry, type);
289 290
}

291
static inline struct jump_entry *static_key_entries(struct static_key *key)
292
{
293 294
	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
295 296
}

297
static inline bool static_key_type(struct static_key *key)
298
{
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
	return key->type & JUMP_TYPE_TRUE;
}

static inline bool static_key_linked(struct static_key *key)
{
	return key->type & JUMP_TYPE_LINKED;
}

static inline void static_key_clear_linked(struct static_key *key)
{
	key->type &= ~JUMP_TYPE_LINKED;
}

static inline void static_key_set_linked(struct static_key *key)
{
	key->type |= JUMP_TYPE_LINKED;
315
}
316

317 318
static inline struct static_key *jump_entry_key(struct jump_entry *entry)
{
319 320 321 322 323 324
	return (struct static_key *)((unsigned long)entry->key & ~1UL);
}

static bool jump_entry_branch(struct jump_entry *entry)
{
	return (unsigned long)entry->key & 1UL;
325 326
}

327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
/***
 * A 'struct static_key' uses a union such that it either points directly
 * to a table of 'struct jump_entry' or to a linked list of modules which in
 * turn point to 'struct jump_entry' tables.
 *
 * The two lower bits of the pointer are used to keep track of which pointer
 * type is in use and to store the initial branch direction, we use an access
 * function which preserves these bits.
 */
static void static_key_set_entries(struct static_key *key,
				   struct jump_entry *entries)
{
	unsigned long type;

	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
	type = key->type & JUMP_TYPE_MASK;
	key->entries = entries;
	key->type |= type;
}

347
static enum jump_label_type jump_label_type(struct jump_entry *entry)
348
{
349
	struct static_key *key = jump_entry_key(entry);
350
	bool enabled = static_key_enabled(key);
351
	bool branch = jump_entry_branch(entry);
352

353 354
	/* See the comment in linux/jump_label.h */
	return enabled ^ branch;
355 356
}

357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
static void __jump_label_update(struct static_key *key,
				struct jump_entry *entry,
				struct jump_entry *stop)
{
	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
		/*
		 * entry->code set to 0 invalidates module init text sections
		 * kernel_text_address() verifies we are not in core kernel
		 * init code, see jump_label_invalidate_module_init().
		 */
		if (entry->code && kernel_text_address(entry->code))
			arch_jump_label_transform(entry, jump_label_type(entry));
	}
}

372
void __init jump_label_init(void)
373 374 375
{
	struct jump_entry *iter_start = __start___jump_table;
	struct jump_entry *iter_stop = __stop___jump_table;
376
	struct static_key *key = NULL;
377 378
	struct jump_entry *iter;

379 380 381 382 383 384 385 386 387
	/*
	 * Since we are initializing the static_key.enabled field with
	 * with the 'raw' int values (to avoid pulling in atomic.h) in
	 * jump_label.h, let's make sure that is safe. There are only two
	 * cases to check since we initialize to 0 or 1.
	 */
	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);

388 389 390
	if (static_key_initialized)
		return;

391
	cpus_read_lock();
392
	jump_label_lock();
393 394 395
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
396
		struct static_key *iterk;
397

398 399 400 401
		/* rewrite NOPs */
		if (jump_label_type(iter) == JUMP_LABEL_NOP)
			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);

402
		iterk = jump_entry_key(iter);
403
		if (iterk == key)
404 405
			continue;

406
		key = iterk;
407
		static_key_set_entries(key, iter);
408
	}
409
	static_key_initialized = true;
410
	jump_label_unlock();
411
	cpus_read_unlock();
412 413 414 415
}

#ifdef CONFIG_MODULES

416 417 418 419 420 421 422 423 424 425
static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
{
	struct static_key *key = jump_entry_key(entry);
	bool type = static_key_type(key);
	bool branch = jump_entry_branch(entry);

	/* See the comment in linux/jump_label.h */
	return type ^ branch;
}

426 427
struct static_key_mod {
	struct static_key_mod *next;
428 429 430 431
	struct jump_entry *entries;
	struct module *mod;
};

432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
static inline struct static_key_mod *static_key_mod(struct static_key *key)
{
	WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED));
	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
}

/***
 * key->type and key->next are the same via union.
 * This sets key->next and preserves the type bits.
 *
 * See additional comments above static_key_set_entries().
 */
static void static_key_set_mod(struct static_key *key,
			       struct static_key_mod *mod)
{
	unsigned long type;

	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
	type = key->type & JUMP_TYPE_MASK;
	key->next = mod;
	key->type |= type;
}

455 456 457 458
static int __jump_label_mod_text_reserved(void *start, void *end)
{
	struct module *mod;

459
	preempt_disable();
460
	mod = __module_text_address((unsigned long)start);
461 462 463
	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
	preempt_enable();

464 465 466 467 468 469 470 471 472
	if (!mod)
		return 0;


	return __jump_label_text_reserved(mod->jump_entries,
				mod->jump_entries + mod->num_jump_entries,
				start, end);
}

473
static void __jump_label_mod_update(struct static_key *key)
474
{
475
	struct static_key_mod *mod;
476

477 478 479 480 481 482 483 484 485 486
	for (mod = static_key_mod(key); mod; mod = mod->next) {
		struct jump_entry *stop;
		struct module *m;

		/*
		 * NULL if the static_key is defined in a module
		 * that does not use it
		 */
		if (!mod->entries)
			continue;
487

488 489 490 491 492 493
		m = mod->mod;
		if (!m)
			stop = __stop___jump_table;
		else
			stop = m->jump_entries + m->num_jump_entries;
		__jump_label_update(key, mod->entries, stop);
494 495 496 497 498 499 500 501 502 503 504 505
	}
}

/***
 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
 * @mod: module to patch
 *
 * Allow for run-time selection of the optimal nops. Before the module
 * loads patch these with arch_get_jump_label_nop(), which is specified by
 * the arch specific jump label code.
 */
void jump_label_apply_nops(struct module *mod)
506
{
507 508 509 510 511 512 513 514
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;

	/* if the module doesn't have jump label entries, just return */
	if (iter_start == iter_stop)
		return;

515 516 517 518 519
	for (iter = iter_start; iter < iter_stop; iter++) {
		/* Only write NOPs for arch_branch_static(). */
		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
	}
520 521
}

522
static int jump_label_add_module(struct module *mod)
523
{
524 525 526
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;
527
	struct static_key *key = NULL;
528
	struct static_key_mod *jlm, *jlm2;
529 530

	/* if the module doesn't have jump label entries, just return */
531
	if (iter_start == iter_stop)
532 533
		return 0;

534 535 536
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
537
		struct static_key *iterk;
538

539
		iterk = jump_entry_key(iter);
540 541
		if (iterk == key)
			continue;
542

543
		key = iterk;
544
		if (within_module(iter->key, mod)) {
545
			static_key_set_entries(key, iter);
546
			continue;
547
		}
548
		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
549 550
		if (!jlm)
			return -ENOMEM;
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
		if (!static_key_linked(key)) {
			jlm2 = kzalloc(sizeof(struct static_key_mod),
				       GFP_KERNEL);
			if (!jlm2) {
				kfree(jlm);
				return -ENOMEM;
			}
			preempt_disable();
			jlm2->mod = __module_address((unsigned long)key);
			preempt_enable();
			jlm2->entries = static_key_entries(key);
			jlm2->next = NULL;
			static_key_set_mod(key, jlm2);
			static_key_set_linked(key);
		}
566 567
		jlm->mod = mod;
		jlm->entries = iter;
568 569 570
		jlm->next = static_key_mod(key);
		static_key_set_mod(key, jlm);
		static_key_set_linked(key);
571

572 573
		/* Only update if we've changed from our initial state */
		if (jump_label_type(iter) != jump_label_init_type(iter))
574
			__jump_label_update(key, iter, iter_stop);
575
	}
576

577 578 579
	return 0;
}

580
static void jump_label_del_module(struct module *mod)
581
{
582 583 584
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;
585 586
	struct static_key *key = NULL;
	struct static_key_mod *jlm, **prev;
587

588
	for (iter = iter_start; iter < iter_stop; iter++) {
589
		if (jump_entry_key(iter) == key)
590 591
			continue;

592
		key = jump_entry_key(iter);
593

594
		if (within_module(iter->key, mod))
595 596
			continue;

597 598 599 600
		/* No memory during module load */
		if (WARN_ON(!static_key_linked(key)))
			continue;

601
		prev = &key->next;
602
		jlm = static_key_mod(key);
603

604 605 606 607 608
		while (jlm && jlm->mod != mod) {
			prev = &jlm->next;
			jlm = jlm->next;
		}

609 610 611 612 613 614 615
		/* No memory during module load */
		if (WARN_ON(!jlm))
			continue;

		if (prev == &key->next)
			static_key_set_mod(key, jlm->next);
		else
616
			*prev = jlm->next;
617 618 619 620 621 622 623 624

		kfree(jlm);

		jlm = static_key_mod(key);
		/* if only one etry is left, fold it back into the static_key */
		if (jlm->next == NULL) {
			static_key_set_entries(key, jlm->entries);
			static_key_clear_linked(key);
625
			kfree(jlm);
626 627 628 629
		}
	}
}

630
static void jump_label_invalidate_module_init(struct module *mod)
631
{
632 633
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
634 635
	struct jump_entry *iter;

636 637 638
	for (iter = iter_start; iter < iter_stop; iter++) {
		if (within_module_init(iter->code, mod))
			iter->code = 0;
639 640 641
	}
}

642 643 644 645 646 647 648
static int
jump_label_module_notify(struct notifier_block *self, unsigned long val,
			 void *data)
{
	struct module *mod = data;
	int ret = 0;

649 650 651
	cpus_read_lock();
	jump_label_lock();

652 653
	switch (val) {
	case MODULE_STATE_COMING:
654
		ret = jump_label_add_module(mod);
655 656
		if (ret) {
			WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
657
			jump_label_del_module(mod);
658
		}
659 660
		break;
	case MODULE_STATE_GOING:
661
		jump_label_del_module(mod);
662
		break;
663
	case MODULE_STATE_LIVE:
664
		jump_label_invalidate_module_init(mod);
665
		break;
666 667
	}

668 669 670
	jump_label_unlock();
	cpus_read_unlock();

671
	return notifier_from_errno(ret);
672 673
}

674
static struct notifier_block jump_label_module_nb = {
675
	.notifier_call = jump_label_module_notify,
676
	.priority = 1, /* higher than tracepoints */
677 678
};

679
static __init int jump_label_init_module(void)
680 681 682
{
	return register_module_notifier(&jump_label_module_nb);
}
683
early_initcall(jump_label_init_module);
684 685 686

#endif /* CONFIG_MODULES */

687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
/***
 * jump_label_text_reserved - check if addr range is reserved
 * @start: start text addr
 * @end: end text addr
 *
 * checks if the text addr located between @start and @end
 * overlaps with any of the jump label patch addresses. Code
 * that wants to modify kernel text should first verify that
 * it does not overlap with any of the jump label addresses.
 * Caller must hold jump_label_mutex.
 *
 * returns 1 if there is an overlap, 0 otherwise
 */
int jump_label_text_reserved(void *start, void *end)
{
	int ret = __jump_label_text_reserved(__start___jump_table,
			__stop___jump_table, start, end);

	if (ret)
		return ret;

#ifdef CONFIG_MODULES
	ret = __jump_label_mod_text_reserved(start, end);
#endif
	return ret;
}

714
static void jump_label_update(struct static_key *key)
715
{
716
	struct jump_entry *stop = __stop___jump_table;
717
	struct jump_entry *entry;
718
#ifdef CONFIG_MODULES
719
	struct module *mod;
720

721 722 723 724
	if (static_key_linked(key)) {
		__jump_label_mod_update(key);
		return;
	}
725

726 727
	preempt_disable();
	mod = __module_address((unsigned long)key);
728 729
	if (mod)
		stop = mod->jump_entries + mod->num_jump_entries;
730
	preempt_enable();
731
#endif
732
	entry = static_key_entries(key);
733 734
	/* if there are no users, entry can be NULL */
	if (entry)
735
		__jump_label_update(key, entry, stop);
736 737
}

738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
#ifdef CONFIG_STATIC_KEYS_SELFTEST
static DEFINE_STATIC_KEY_TRUE(sk_true);
static DEFINE_STATIC_KEY_FALSE(sk_false);

static __init int jump_label_test(void)
{
	int i;

	for (i = 0; i < 2; i++) {
		WARN_ON(static_key_enabled(&sk_true.key) != true);
		WARN_ON(static_key_enabled(&sk_false.key) != false);

		WARN_ON(!static_branch_likely(&sk_true));
		WARN_ON(!static_branch_unlikely(&sk_true));
		WARN_ON(static_branch_likely(&sk_false));
		WARN_ON(static_branch_unlikely(&sk_false));

		static_branch_disable(&sk_true);
		static_branch_enable(&sk_false);

		WARN_ON(static_key_enabled(&sk_true.key) == true);
		WARN_ON(static_key_enabled(&sk_false.key) == false);

		WARN_ON(static_branch_likely(&sk_true));
		WARN_ON(static_branch_unlikely(&sk_true));
		WARN_ON(!static_branch_likely(&sk_false));
		WARN_ON(!static_branch_unlikely(&sk_false));

		static_branch_enable(&sk_true);
		static_branch_disable(&sk_false);
	}

	return 0;
}
late_initcall(jump_label_test);
#endif /* STATIC_KEYS_SELFTEST */

#endif /* HAVE_JUMP_LABEL */