jump_label.c 12.6 KB
Newer Older
1 2 3 4
/*
 * jump label support
 *
 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5
 * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
6 7 8 9 10 11 12 13 14
 *
 */
#include <linux/memory.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/err.h>
15
#include <linux/static_key.h>
16
#include <linux/jump_label_ratelimit.h>
17 18 19 20 21 22

#ifdef HAVE_JUMP_LABEL

/* mutex to protect coming/going of the the jump_label table */
static DEFINE_MUTEX(jump_label_mutex);

23 24 25 26 27 28 29 30 31 32
void jump_label_lock(void)
{
	mutex_lock(&jump_label_mutex);
}

void jump_label_unlock(void)
{
	mutex_unlock(&jump_label_mutex);
}

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
static int jump_label_cmp(const void *a, const void *b)
{
	const struct jump_entry *jea = a;
	const struct jump_entry *jeb = b;

	if (jea->key < jeb->key)
		return -1;

	if (jea->key > jeb->key)
		return 1;

	return 0;
}

static void
48
jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
49 50 51 52 53 54 55 56
{
	unsigned long size;

	size = (((unsigned long)stop - (unsigned long)start)
					/ sizeof(struct jump_entry));
	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
}

57
static void jump_label_update(struct static_key *key);
58

59
void static_key_slow_inc(struct static_key *key)
60
{
61
	STATIC_KEY_CHECK_USE();
62 63
	if (atomic_inc_not_zero(&key->enabled))
		return;
64

65
	jump_label_lock();
66 67
	if (atomic_inc_return(&key->enabled) == 1)
		jump_label_update(key);
68
	jump_label_unlock();
69
}
70
EXPORT_SYMBOL_GPL(static_key_slow_inc);
71

72
static void __static_key_slow_dec(struct static_key *key,
73
		unsigned long rate_limit, struct delayed_work *work)
74
{
75 76 77
	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
		WARN(atomic_read(&key->enabled) < 0,
		     "jump label: negative count!\n");
78
		return;
79
	}
80

81 82 83
	if (rate_limit) {
		atomic_inc(&key->enabled);
		schedule_delayed_work(work, rate_limit);
84
	} else {
85
		jump_label_update(key);
86
	}
87
	jump_label_unlock();
88 89
}

90 91
static void jump_label_update_timeout(struct work_struct *work)
{
92 93 94
	struct static_key_deferred *key =
		container_of(work, struct static_key_deferred, work.work);
	__static_key_slow_dec(&key->key, 0, NULL);
95 96
}

97
void static_key_slow_dec(struct static_key *key)
98
{
99
	STATIC_KEY_CHECK_USE();
100
	__static_key_slow_dec(key, 0, NULL);
101
}
102
EXPORT_SYMBOL_GPL(static_key_slow_dec);
103

104
void static_key_slow_dec_deferred(struct static_key_deferred *key)
105
{
106
	STATIC_KEY_CHECK_USE();
107
	__static_key_slow_dec(&key->key, key->timeout, &key->work);
108
}
109
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
110

111
void jump_label_rate_limit(struct static_key_deferred *key,
112 113
		unsigned long rl)
{
114
	STATIC_KEY_CHECK_USE();
115 116 117
	key->timeout = rl;
	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
}
118
EXPORT_SYMBOL_GPL(jump_label_rate_limit);
119

120 121 122 123 124 125 126 127 128
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
	if (entry->code <= (unsigned long)end &&
		entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
		return 1;

	return 0;
}

129 130
static int __jump_label_text_reserved(struct jump_entry *iter_start,
		struct jump_entry *iter_stop, void *start, void *end)
131 132 133 134 135
{
	struct jump_entry *iter;

	iter = iter_start;
	while (iter < iter_stop) {
136 137
		if (addr_conflict(iter, start, end))
			return 1;
138 139 140
		iter++;
	}

141 142 143
	return 0;
}

144
/*
145 146 147 148 149
 * Update code which is definitely not currently executing.
 * Architectures which need heavyweight synchronization to modify
 * running code can override this to make the non-live update case
 * cheaper.
 */
150
void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
151 152
					    enum jump_label_type type)
{
153
	arch_jump_label_transform(entry, type);
154 155
}

156
static inline struct jump_entry *static_key_entries(struct static_key *key)
157
{
158
	return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
159 160
}

161
static inline bool static_key_type(struct static_key *key)
162
{
163
	return (unsigned long)key->entries & JUMP_TYPE_MASK;
164
}
165

166 167
static inline struct static_key *jump_entry_key(struct jump_entry *entry)
{
168 169 170 171 172 173
	return (struct static_key *)((unsigned long)entry->key & ~1UL);
}

static bool jump_entry_branch(struct jump_entry *entry)
{
	return (unsigned long)entry->key & 1UL;
174 175
}

176
static enum jump_label_type jump_label_type(struct jump_entry *entry)
177
{
178
	struct static_key *key = jump_entry_key(entry);
179
	bool enabled = static_key_enabled(key);
180
	bool branch = jump_entry_branch(entry);
181

182 183
	/* See the comment in linux/jump_label.h */
	return enabled ^ branch;
184 185
}

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
static void __jump_label_update(struct static_key *key,
				struct jump_entry *entry,
				struct jump_entry *stop)
{
	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
		/*
		 * entry->code set to 0 invalidates module init text sections
		 * kernel_text_address() verifies we are not in core kernel
		 * init code, see jump_label_invalidate_module_init().
		 */
		if (entry->code && kernel_text_address(entry->code))
			arch_jump_label_transform(entry, jump_label_type(entry));
	}
}

201
void __init jump_label_init(void)
202 203 204
{
	struct jump_entry *iter_start = __start___jump_table;
	struct jump_entry *iter_stop = __stop___jump_table;
205
	struct static_key *key = NULL;
206 207
	struct jump_entry *iter;

208
	jump_label_lock();
209 210 211
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
212
		struct static_key *iterk;
213

214 215 216 217
		/* rewrite NOPs */
		if (jump_label_type(iter) == JUMP_LABEL_NOP)
			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);

218
		iterk = jump_entry_key(iter);
219
		if (iterk == key)
220 221
			continue;

222
		key = iterk;
223 224 225 226
		/*
		 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
		 */
		*((unsigned long *)&key->entries) += (unsigned long)iter;
227 228 229
#ifdef CONFIG_MODULES
		key->next = NULL;
#endif
230
	}
231
	static_key_initialized = true;
232
	jump_label_unlock();
233 234 235 236
}

#ifdef CONFIG_MODULES

237 238 239 240 241 242 243 244 245 246
static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
{
	struct static_key *key = jump_entry_key(entry);
	bool type = static_key_type(key);
	bool branch = jump_entry_branch(entry);

	/* See the comment in linux/jump_label.h */
	return type ^ branch;
}

247 248
struct static_key_mod {
	struct static_key_mod *next;
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
	struct jump_entry *entries;
	struct module *mod;
};

static int __jump_label_mod_text_reserved(void *start, void *end)
{
	struct module *mod;

	mod = __module_text_address((unsigned long)start);
	if (!mod)
		return 0;

	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);

	return __jump_label_text_reserved(mod->jump_entries,
				mod->jump_entries + mod->num_jump_entries,
				start, end);
}

268
static void __jump_label_mod_update(struct static_key *key)
269
{
270
	struct static_key_mod *mod;
271

272
	for (mod = key->next; mod; mod = mod->next) {
273 274 275
		struct module *m = mod->mod;

		__jump_label_update(key, mod->entries,
276
				    m->jump_entries + m->num_jump_entries);
277 278 279 280 281 282 283 284 285 286 287 288
	}
}

/***
 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
 * @mod: module to patch
 *
 * Allow for run-time selection of the optimal nops. Before the module
 * loads patch these with arch_get_jump_label_nop(), which is specified by
 * the arch specific jump label code.
 */
void jump_label_apply_nops(struct module *mod)
289
{
290 291 292 293 294 295 296 297
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;

	/* if the module doesn't have jump label entries, just return */
	if (iter_start == iter_stop)
		return;

298 299 300 301 302
	for (iter = iter_start; iter < iter_stop; iter++) {
		/* Only write NOPs for arch_branch_static(). */
		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
	}
303 304
}

305
static int jump_label_add_module(struct module *mod)
306
{
307 308 309
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;
310 311
	struct static_key *key = NULL;
	struct static_key_mod *jlm;
312 313

	/* if the module doesn't have jump label entries, just return */
314
	if (iter_start == iter_stop)
315 316
		return 0;

317 318 319
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
320
		struct static_key *iterk;
321

322
		iterk = jump_entry_key(iter);
323 324
		if (iterk == key)
			continue;
325

326
		key = iterk;
327
		if (within_module(iter->key, mod)) {
328 329 330 331
			/*
			 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
			 */
			*((unsigned long *)&key->entries) += (unsigned long)iter;
332 333
			key->next = NULL;
			continue;
334
		}
335
		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
336 337 338 339 340 341 342
		if (!jlm)
			return -ENOMEM;
		jlm->mod = mod;
		jlm->entries = iter;
		jlm->next = key->next;
		key->next = jlm;

343 344
		/* Only update if we've changed from our initial state */
		if (jump_label_type(iter) != jump_label_init_type(iter))
345
			__jump_label_update(key, iter, iter_stop);
346
	}
347

348 349 350
	return 0;
}

351
static void jump_label_del_module(struct module *mod)
352
{
353 354 355
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;
356 357
	struct static_key *key = NULL;
	struct static_key_mod *jlm, **prev;
358

359
	for (iter = iter_start; iter < iter_stop; iter++) {
360
		if (jump_entry_key(iter) == key)
361 362
			continue;

363
		key = jump_entry_key(iter);
364

365
		if (within_module(iter->key, mod))
366 367 368 369
			continue;

		prev = &key->next;
		jlm = key->next;
370

371 372 373 374 375 376 377 378
		while (jlm && jlm->mod != mod) {
			prev = &jlm->next;
			jlm = jlm->next;
		}

		if (jlm) {
			*prev = jlm->next;
			kfree(jlm);
379 380 381 382
		}
	}
}

383
static void jump_label_invalidate_module_init(struct module *mod)
384
{
385 386
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
387 388
	struct jump_entry *iter;

389 390 391
	for (iter = iter_start; iter < iter_stop; iter++) {
		if (within_module_init(iter->code, mod))
			iter->code = 0;
392 393 394
	}
}

395 396 397 398 399 400 401 402 403
static int
jump_label_module_notify(struct notifier_block *self, unsigned long val,
			 void *data)
{
	struct module *mod = data;
	int ret = 0;

	switch (val) {
	case MODULE_STATE_COMING:
404
		jump_label_lock();
405
		ret = jump_label_add_module(mod);
406
		if (ret)
407
			jump_label_del_module(mod);
408
		jump_label_unlock();
409 410
		break;
	case MODULE_STATE_GOING:
411
		jump_label_lock();
412
		jump_label_del_module(mod);
413
		jump_label_unlock();
414
		break;
415
	case MODULE_STATE_LIVE:
416
		jump_label_lock();
417
		jump_label_invalidate_module_init(mod);
418
		jump_label_unlock();
419
		break;
420 421
	}

422
	return notifier_from_errno(ret);
423 424 425 426
}

struct notifier_block jump_label_module_nb = {
	.notifier_call = jump_label_module_notify,
427
	.priority = 1, /* higher than tracepoints */
428 429
};

430
static __init int jump_label_init_module(void)
431 432 433
{
	return register_module_notifier(&jump_label_module_nb);
}
434
early_initcall(jump_label_init_module);
435 436 437

#endif /* CONFIG_MODULES */

438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
/***
 * jump_label_text_reserved - check if addr range is reserved
 * @start: start text addr
 * @end: end text addr
 *
 * checks if the text addr located between @start and @end
 * overlaps with any of the jump label patch addresses. Code
 * that wants to modify kernel text should first verify that
 * it does not overlap with any of the jump label addresses.
 * Caller must hold jump_label_mutex.
 *
 * returns 1 if there is an overlap, 0 otherwise
 */
int jump_label_text_reserved(void *start, void *end)
{
	int ret = __jump_label_text_reserved(__start___jump_table,
			__stop___jump_table, start, end);

	if (ret)
		return ret;

#ifdef CONFIG_MODULES
	ret = __jump_label_mod_text_reserved(start, end);
#endif
	return ret;
}

465
static void jump_label_update(struct static_key *key)
466
{
467
	struct jump_entry *stop = __stop___jump_table;
468
	struct jump_entry *entry = static_key_entries(key);
469
#ifdef CONFIG_MODULES
470
	struct module *mod;
471

472
	__jump_label_mod_update(key);
473

474 475
	preempt_disable();
	mod = __module_address((unsigned long)key);
476 477
	if (mod)
		stop = mod->jump_entries + mod->num_jump_entries;
478
	preempt_enable();
479
#endif
480 481
	/* if there are no users, entry can be NULL */
	if (entry)
482
		__jump_label_update(key, entry, stop);
483 484
}

485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
#ifdef CONFIG_STATIC_KEYS_SELFTEST
static DEFINE_STATIC_KEY_TRUE(sk_true);
static DEFINE_STATIC_KEY_FALSE(sk_false);

static __init int jump_label_test(void)
{
	int i;

	for (i = 0; i < 2; i++) {
		WARN_ON(static_key_enabled(&sk_true.key) != true);
		WARN_ON(static_key_enabled(&sk_false.key) != false);

		WARN_ON(!static_branch_likely(&sk_true));
		WARN_ON(!static_branch_unlikely(&sk_true));
		WARN_ON(static_branch_likely(&sk_false));
		WARN_ON(static_branch_unlikely(&sk_false));

		static_branch_disable(&sk_true);
		static_branch_enable(&sk_false);

		WARN_ON(static_key_enabled(&sk_true.key) == true);
		WARN_ON(static_key_enabled(&sk_false.key) == false);

		WARN_ON(static_branch_likely(&sk_true));
		WARN_ON(static_branch_unlikely(&sk_true));
		WARN_ON(!static_branch_likely(&sk_false));
		WARN_ON(!static_branch_unlikely(&sk_false));

		static_branch_enable(&sk_true);
		static_branch_disable(&sk_false);
	}

	return 0;
}
late_initcall(jump_label_test);
#endif /* STATIC_KEYS_SELFTEST */

#endif /* HAVE_JUMP_LABEL */