jump_label.c 19.8 KB
Newer Older
1 2 3 4
/*
 * jump label support
 *
 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5
 * Copyright (C) 2011 Peter Zijlstra
6 7 8 9 10 11 12 13 14
 *
 */
#include <linux/memory.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/err.h>
15
#include <linux/static_key.h>
16
#include <linux/jump_label_ratelimit.h>
17
#include <linux/bug.h>
18
#include <linux/cpu.h>
19
#include <asm/sections.h>
20 21 22 23 24 25

#ifdef HAVE_JUMP_LABEL

/* mutex to protect coming/going of the the jump_label table */
static DEFINE_MUTEX(jump_label_mutex);

26 27 28 29 30 31 32 33 34 35
void jump_label_lock(void)
{
	mutex_lock(&jump_label_mutex);
}

void jump_label_unlock(void)
{
	mutex_unlock(&jump_label_mutex);
}

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
static int jump_label_cmp(const void *a, const void *b)
{
	const struct jump_entry *jea = a;
	const struct jump_entry *jeb = b;

	if (jea->key < jeb->key)
		return -1;

	if (jea->key > jeb->key)
		return 1;

	return 0;
}

static void
51
jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
52 53 54 55 56 57 58 59
{
	unsigned long size;

	size = (((unsigned long)stop - (unsigned long)start)
					/ sizeof(struct jump_entry));
	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
}

60
static void jump_label_update(struct static_key *key);
61

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
/*
 * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
 * The use of 'atomic_read()' requires atomic.h and its problematic for some
 * kernel headers such as kernel.h and others. Since static_key_count() is not
 * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
 * to have it be a function here. Similarly, for 'static_key_enable()' and
 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
 * to be included from most/all places for HAVE_JUMP_LABEL.
 */
int static_key_count(struct static_key *key)
{
	/*
	 * -1 means the first static_key_slow_inc() is in progress.
	 *  static_key_enabled() must return true, so return 1 here.
	 */
	int n = atomic_read(&key->enabled);

	return n >= 0 ? n : 1;
}
EXPORT_SYMBOL_GPL(static_key_count);

83
void static_key_slow_inc_cpuslocked(struct static_key *key)
84
{
85 86
	int v, v1;

87
	STATIC_KEY_CHECK_USE(key);
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102

	/*
	 * Careful if we get concurrent static_key_slow_inc() calls;
	 * later calls must wait for the first one to _finish_ the
	 * jump_label_update() process.  At the same time, however,
	 * the jump_label_update() call below wants to see
	 * static_key_enabled(&key) for jumps to be updated properly.
	 *
	 * So give a special meaning to negative key->enabled: it sends
	 * static_key_slow_inc() down the slow path, and it is non-zero
	 * so it counts as "enabled" in jump_label_update().  Note that
	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
	 */
	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
103
		if (likely(v1 == v))
104 105
			return;
	}
106

107
	jump_label_lock();
108 109
	if (atomic_read(&key->enabled) == 0) {
		atomic_set(&key->enabled, -1);
110
		jump_label_update(key);
111 112 113 114 115
		/*
		 * Ensure that if the above cmpxchg loop observes our positive
		 * value, it must also observe all the text changes.
		 */
		atomic_set_release(&key->enabled, 1);
116 117 118
	} else {
		atomic_inc(&key->enabled);
	}
119
	jump_label_unlock();
120 121 122 123 124 125
}

void static_key_slow_inc(struct static_key *key)
{
	cpus_read_lock();
	static_key_slow_inc_cpuslocked(key);
126
	cpus_read_unlock();
127
}
128
EXPORT_SYMBOL_GPL(static_key_slow_inc);
129

130
void static_key_enable_cpuslocked(struct static_key *key)
131
{
132
	STATIC_KEY_CHECK_USE(key);
133

134 135 136 137 138 139 140 141 142
	if (atomic_read(&key->enabled) > 0) {
		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
		return;
	}

	jump_label_lock();
	if (atomic_read(&key->enabled) == 0) {
		atomic_set(&key->enabled, -1);
		jump_label_update(key);
143 144 145 146
		/*
		 * See static_key_slow_inc().
		 */
		atomic_set_release(&key->enabled, 1);
147 148
	}
	jump_label_unlock();
149 150 151 152 153 154 155
}
EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);

void static_key_enable(struct static_key *key)
{
	cpus_read_lock();
	static_key_enable_cpuslocked(key);
156 157 158 159
	cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_enable);

160
void static_key_disable_cpuslocked(struct static_key *key)
161
{
162
	STATIC_KEY_CHECK_USE(key);
163

164 165 166 167 168 169 170 171 172
	if (atomic_read(&key->enabled) != 1) {
		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
		return;
	}

	jump_label_lock();
	if (atomic_cmpxchg(&key->enabled, 1, 0))
		jump_label_update(key);
	jump_label_unlock();
173 174 175 176 177 178 179
}
EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);

void static_key_disable(struct static_key *key)
{
	cpus_read_lock();
	static_key_disable_cpuslocked(key);
180 181 182 183
	cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_disable);

184
static void __static_key_slow_dec_cpuslocked(struct static_key *key,
185 186
					   unsigned long rate_limit,
					   struct delayed_work *work)
187
{
188 189 190 191 192 193 194
	/*
	 * The negative count check is valid even when a negative
	 * key->enabled is in use by static_key_slow_inc(); a
	 * __static_key_slow_dec() before the first static_key_slow_inc()
	 * returns is unbalanced, because all other static_key_slow_inc()
	 * instances block while the update is in progress.
	 */
195 196 197
	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
		WARN(atomic_read(&key->enabled) < 0,
		     "jump label: negative count!\n");
198
		return;
199
	}
200

201 202 203
	if (rate_limit) {
		atomic_inc(&key->enabled);
		schedule_delayed_work(work, rate_limit);
204
	} else {
205
		jump_label_update(key);
206
	}
207
	jump_label_unlock();
208 209 210 211 212 213 214
}

static void __static_key_slow_dec(struct static_key *key,
				  unsigned long rate_limit,
				  struct delayed_work *work)
{
	cpus_read_lock();
215
	__static_key_slow_dec_cpuslocked(key, rate_limit, work);
216
	cpus_read_unlock();
217 218
}

219 220
static void jump_label_update_timeout(struct work_struct *work)
{
221 222 223
	struct static_key_deferred *key =
		container_of(work, struct static_key_deferred, work.work);
	__static_key_slow_dec(&key->key, 0, NULL);
224 225
}

226
void static_key_slow_dec(struct static_key *key)
227
{
228
	STATIC_KEY_CHECK_USE(key);
229
	__static_key_slow_dec(key, 0, NULL);
230
}
231
EXPORT_SYMBOL_GPL(static_key_slow_dec);
232

233 234 235 236 237 238
void static_key_slow_dec_cpuslocked(struct static_key *key)
{
	STATIC_KEY_CHECK_USE(key);
	__static_key_slow_dec_cpuslocked(key, 0, NULL);
}

239
void static_key_slow_dec_deferred(struct static_key_deferred *key)
240
{
241
	STATIC_KEY_CHECK_USE(key);
242
	__static_key_slow_dec(&key->key, key->timeout, &key->work);
243
}
244
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
245

246 247
void static_key_deferred_flush(struct static_key_deferred *key)
{
248
	STATIC_KEY_CHECK_USE(key);
249 250 251 252
	flush_delayed_work(&key->work);
}
EXPORT_SYMBOL_GPL(static_key_deferred_flush);

253
void jump_label_rate_limit(struct static_key_deferred *key,
254 255
		unsigned long rl)
{
256
	STATIC_KEY_CHECK_USE(key);
257 258 259
	key->timeout = rl;
	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
}
260
EXPORT_SYMBOL_GPL(jump_label_rate_limit);
261

262 263 264 265 266 267 268 269 270
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
	if (entry->code <= (unsigned long)end &&
		entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
		return 1;

	return 0;
}

271 272
static int __jump_label_text_reserved(struct jump_entry *iter_start,
		struct jump_entry *iter_stop, void *start, void *end)
273 274 275 276 277
{
	struct jump_entry *iter;

	iter = iter_start;
	while (iter < iter_stop) {
278 279
		if (addr_conflict(iter, start, end))
			return 1;
280 281 282
		iter++;
	}

283 284 285
	return 0;
}

286
/*
287 288 289 290 291
 * Update code which is definitely not currently executing.
 * Architectures which need heavyweight synchronization to modify
 * running code can override this to make the non-live update case
 * cheaper.
 */
292
void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
293 294
					    enum jump_label_type type)
{
295
	arch_jump_label_transform(entry, type);
296 297
}

298
static inline struct jump_entry *static_key_entries(struct static_key *key)
299
{
300 301
	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
302 303
}

304
static inline bool static_key_type(struct static_key *key)
305
{
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
	return key->type & JUMP_TYPE_TRUE;
}

static inline bool static_key_linked(struct static_key *key)
{
	return key->type & JUMP_TYPE_LINKED;
}

static inline void static_key_clear_linked(struct static_key *key)
{
	key->type &= ~JUMP_TYPE_LINKED;
}

static inline void static_key_set_linked(struct static_key *key)
{
	key->type |= JUMP_TYPE_LINKED;
322
}
323

324 325
static inline struct static_key *jump_entry_key(struct jump_entry *entry)
{
326 327 328 329 330 331
	return (struct static_key *)((unsigned long)entry->key & ~1UL);
}

static bool jump_entry_branch(struct jump_entry *entry)
{
	return (unsigned long)entry->key & 1UL;
332 333
}

334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
/***
 * A 'struct static_key' uses a union such that it either points directly
 * to a table of 'struct jump_entry' or to a linked list of modules which in
 * turn point to 'struct jump_entry' tables.
 *
 * The two lower bits of the pointer are used to keep track of which pointer
 * type is in use and to store the initial branch direction, we use an access
 * function which preserves these bits.
 */
static void static_key_set_entries(struct static_key *key,
				   struct jump_entry *entries)
{
	unsigned long type;

	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
	type = key->type & JUMP_TYPE_MASK;
	key->entries = entries;
	key->type |= type;
}

354
static enum jump_label_type jump_label_type(struct jump_entry *entry)
355
{
356
	struct static_key *key = jump_entry_key(entry);
357
	bool enabled = static_key_enabled(key);
358
	bool branch = jump_entry_branch(entry);
359

360 361
	/* See the comment in linux/jump_label.h */
	return enabled ^ branch;
362 363
}

364 365 366 367 368 369
static void __jump_label_update(struct static_key *key,
				struct jump_entry *entry,
				struct jump_entry *stop)
{
	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
		/*
370 371
		 * An entry->code of 0 indicates an entry which has been
		 * disabled because it was in an init text area.
372
		 */
373 374 375 376
		if (entry->code) {
			if (kernel_text_address(entry->code))
				arch_jump_label_transform(entry, jump_label_type(entry));
			else
377 378
				WARN_ONCE(1, "can't patch jump_label at %pS",
					  (void *)(unsigned long)entry->code);
379
		}
380 381 382
	}
}

383
void __init jump_label_init(void)
384 385 386
{
	struct jump_entry *iter_start = __start___jump_table;
	struct jump_entry *iter_stop = __stop___jump_table;
387
	struct static_key *key = NULL;
388 389
	struct jump_entry *iter;

390 391 392 393 394 395 396 397 398
	/*
	 * Since we are initializing the static_key.enabled field with
	 * with the 'raw' int values (to avoid pulling in atomic.h) in
	 * jump_label.h, let's make sure that is safe. There are only two
	 * cases to check since we initialize to 0 or 1.
	 */
	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);

399 400 401
	if (static_key_initialized)
		return;

402
	cpus_read_lock();
403
	jump_label_lock();
404 405 406
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
407
		struct static_key *iterk;
408

409 410 411 412
		/* rewrite NOPs */
		if (jump_label_type(iter) == JUMP_LABEL_NOP)
			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);

413
		iterk = jump_entry_key(iter);
414
		if (iterk == key)
415 416
			continue;

417
		key = iterk;
418
		static_key_set_entries(key, iter);
419
	}
420
	static_key_initialized = true;
421
	jump_label_unlock();
422
	cpus_read_unlock();
423 424
}

425 426
/* Disable any jump label entries in __init/__exit code */
void __init jump_label_invalidate_initmem(void)
427 428 429 430 431 432
{
	struct jump_entry *iter_start = __start___jump_table;
	struct jump_entry *iter_stop = __stop___jump_table;
	struct jump_entry *iter;

	for (iter = iter_start; iter < iter_stop; iter++) {
433
		if (init_section_contains((void *)(unsigned long)iter->code, 1))
434 435 436 437
			iter->code = 0;
	}
}

438 439
#ifdef CONFIG_MODULES

440 441 442 443 444 445 446 447 448 449
static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
{
	struct static_key *key = jump_entry_key(entry);
	bool type = static_key_type(key);
	bool branch = jump_entry_branch(entry);

	/* See the comment in linux/jump_label.h */
	return type ^ branch;
}

450 451
struct static_key_mod {
	struct static_key_mod *next;
452 453 454 455
	struct jump_entry *entries;
	struct module *mod;
};

456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
static inline struct static_key_mod *static_key_mod(struct static_key *key)
{
	WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED));
	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
}

/***
 * key->type and key->next are the same via union.
 * This sets key->next and preserves the type bits.
 *
 * See additional comments above static_key_set_entries().
 */
static void static_key_set_mod(struct static_key *key,
			       struct static_key_mod *mod)
{
	unsigned long type;

	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
	type = key->type & JUMP_TYPE_MASK;
	key->next = mod;
	key->type |= type;
}

479 480 481 482
static int __jump_label_mod_text_reserved(void *start, void *end)
{
	struct module *mod;

483
	preempt_disable();
484
	mod = __module_text_address((unsigned long)start);
485 486 487
	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
	preempt_enable();

488 489 490 491 492 493 494 495 496
	if (!mod)
		return 0;


	return __jump_label_text_reserved(mod->jump_entries,
				mod->jump_entries + mod->num_jump_entries,
				start, end);
}

497
static void __jump_label_mod_update(struct static_key *key)
498
{
499
	struct static_key_mod *mod;
500

501 502 503 504 505 506 507 508 509 510
	for (mod = static_key_mod(key); mod; mod = mod->next) {
		struct jump_entry *stop;
		struct module *m;

		/*
		 * NULL if the static_key is defined in a module
		 * that does not use it
		 */
		if (!mod->entries)
			continue;
511

512 513 514 515 516 517
		m = mod->mod;
		if (!m)
			stop = __stop___jump_table;
		else
			stop = m->jump_entries + m->num_jump_entries;
		__jump_label_update(key, mod->entries, stop);
518 519 520 521 522 523 524 525 526 527 528 529
	}
}

/***
 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
 * @mod: module to patch
 *
 * Allow for run-time selection of the optimal nops. Before the module
 * loads patch these with arch_get_jump_label_nop(), which is specified by
 * the arch specific jump label code.
 */
void jump_label_apply_nops(struct module *mod)
530
{
531 532 533 534 535 536 537 538
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;

	/* if the module doesn't have jump label entries, just return */
	if (iter_start == iter_stop)
		return;

539 540 541 542 543
	for (iter = iter_start; iter < iter_stop; iter++) {
		/* Only write NOPs for arch_branch_static(). */
		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
	}
544 545
}

546
static int jump_label_add_module(struct module *mod)
547
{
548 549 550
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;
551
	struct static_key *key = NULL;
552
	struct static_key_mod *jlm, *jlm2;
553 554

	/* if the module doesn't have jump label entries, just return */
555
	if (iter_start == iter_stop)
556 557
		return 0;

558 559 560
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
561
		struct static_key *iterk;
562

563
		iterk = jump_entry_key(iter);
564 565
		if (iterk == key)
			continue;
566

567
		key = iterk;
568
		if (within_module(iter->key, mod)) {
569
			static_key_set_entries(key, iter);
570
			continue;
571
		}
572
		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
573 574
		if (!jlm)
			return -ENOMEM;
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
		if (!static_key_linked(key)) {
			jlm2 = kzalloc(sizeof(struct static_key_mod),
				       GFP_KERNEL);
			if (!jlm2) {
				kfree(jlm);
				return -ENOMEM;
			}
			preempt_disable();
			jlm2->mod = __module_address((unsigned long)key);
			preempt_enable();
			jlm2->entries = static_key_entries(key);
			jlm2->next = NULL;
			static_key_set_mod(key, jlm2);
			static_key_set_linked(key);
		}
590 591
		jlm->mod = mod;
		jlm->entries = iter;
592 593 594
		jlm->next = static_key_mod(key);
		static_key_set_mod(key, jlm);
		static_key_set_linked(key);
595

596 597
		/* Only update if we've changed from our initial state */
		if (jump_label_type(iter) != jump_label_init_type(iter))
598
			__jump_label_update(key, iter, iter_stop);
599
	}
600

601 602 603
	return 0;
}

604
static void jump_label_del_module(struct module *mod)
605
{
606 607 608
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;
609 610
	struct static_key *key = NULL;
	struct static_key_mod *jlm, **prev;
611

612
	for (iter = iter_start; iter < iter_stop; iter++) {
613
		if (jump_entry_key(iter) == key)
614 615
			continue;

616
		key = jump_entry_key(iter);
617

618
		if (within_module(iter->key, mod))
619 620
			continue;

621 622 623 624
		/* No memory during module load */
		if (WARN_ON(!static_key_linked(key)))
			continue;

625
		prev = &key->next;
626
		jlm = static_key_mod(key);
627

628 629 630 631 632
		while (jlm && jlm->mod != mod) {
			prev = &jlm->next;
			jlm = jlm->next;
		}

633 634 635 636 637 638 639
		/* No memory during module load */
		if (WARN_ON(!jlm))
			continue;

		if (prev == &key->next)
			static_key_set_mod(key, jlm->next);
		else
640
			*prev = jlm->next;
641 642 643 644 645 646 647 648

		kfree(jlm);

		jlm = static_key_mod(key);
		/* if only one etry is left, fold it back into the static_key */
		if (jlm->next == NULL) {
			static_key_set_entries(key, jlm->entries);
			static_key_clear_linked(key);
649
			kfree(jlm);
650 651 652 653
		}
	}
}

654
/* Disable any jump label entries in module init code */
655
static void jump_label_invalidate_module_init(struct module *mod)
656
{
657 658
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
659 660
	struct jump_entry *iter;

661 662 663
	for (iter = iter_start; iter < iter_stop; iter++) {
		if (within_module_init(iter->code, mod))
			iter->code = 0;
664 665 666
	}
}

667 668 669 670 671 672 673
static int
jump_label_module_notify(struct notifier_block *self, unsigned long val,
			 void *data)
{
	struct module *mod = data;
	int ret = 0;

674 675 676
	cpus_read_lock();
	jump_label_lock();

677 678
	switch (val) {
	case MODULE_STATE_COMING:
679
		ret = jump_label_add_module(mod);
680 681
		if (ret) {
			WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
682
			jump_label_del_module(mod);
683
		}
684 685
		break;
	case MODULE_STATE_GOING:
686
		jump_label_del_module(mod);
687
		break;
688
	case MODULE_STATE_LIVE:
689
		jump_label_invalidate_module_init(mod);
690
		break;
691 692
	}

693 694 695
	jump_label_unlock();
	cpus_read_unlock();

696
	return notifier_from_errno(ret);
697 698
}

699
static struct notifier_block jump_label_module_nb = {
700
	.notifier_call = jump_label_module_notify,
701
	.priority = 1, /* higher than tracepoints */
702 703
};

704
static __init int jump_label_init_module(void)
705 706 707
{
	return register_module_notifier(&jump_label_module_nb);
}
708
early_initcall(jump_label_init_module);
709 710 711

#endif /* CONFIG_MODULES */

712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
/***
 * jump_label_text_reserved - check if addr range is reserved
 * @start: start text addr
 * @end: end text addr
 *
 * checks if the text addr located between @start and @end
 * overlaps with any of the jump label patch addresses. Code
 * that wants to modify kernel text should first verify that
 * it does not overlap with any of the jump label addresses.
 * Caller must hold jump_label_mutex.
 *
 * returns 1 if there is an overlap, 0 otherwise
 */
int jump_label_text_reserved(void *start, void *end)
{
	int ret = __jump_label_text_reserved(__start___jump_table,
			__stop___jump_table, start, end);

	if (ret)
		return ret;

#ifdef CONFIG_MODULES
	ret = __jump_label_mod_text_reserved(start, end);
#endif
	return ret;
}

739
static void jump_label_update(struct static_key *key)
740
{
741
	struct jump_entry *stop = __stop___jump_table;
742
	struct jump_entry *entry;
743
#ifdef CONFIG_MODULES
744
	struct module *mod;
745

746 747 748 749
	if (static_key_linked(key)) {
		__jump_label_mod_update(key);
		return;
	}
750

751 752
	preempt_disable();
	mod = __module_address((unsigned long)key);
753 754
	if (mod)
		stop = mod->jump_entries + mod->num_jump_entries;
755
	preempt_enable();
756
#endif
757
	entry = static_key_entries(key);
758 759
	/* if there are no users, entry can be NULL */
	if (entry)
760
		__jump_label_update(key, entry, stop);
761 762
}

763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
#ifdef CONFIG_STATIC_KEYS_SELFTEST
static DEFINE_STATIC_KEY_TRUE(sk_true);
static DEFINE_STATIC_KEY_FALSE(sk_false);

static __init int jump_label_test(void)
{
	int i;

	for (i = 0; i < 2; i++) {
		WARN_ON(static_key_enabled(&sk_true.key) != true);
		WARN_ON(static_key_enabled(&sk_false.key) != false);

		WARN_ON(!static_branch_likely(&sk_true));
		WARN_ON(!static_branch_unlikely(&sk_true));
		WARN_ON(static_branch_likely(&sk_false));
		WARN_ON(static_branch_unlikely(&sk_false));

		static_branch_disable(&sk_true);
		static_branch_enable(&sk_false);

		WARN_ON(static_key_enabled(&sk_true.key) == true);
		WARN_ON(static_key_enabled(&sk_false.key) == false);

		WARN_ON(static_branch_likely(&sk_true));
		WARN_ON(static_branch_unlikely(&sk_true));
		WARN_ON(!static_branch_likely(&sk_false));
		WARN_ON(!static_branch_unlikely(&sk_false));

		static_branch_enable(&sk_true);
		static_branch_disable(&sk_false);
	}

	return 0;
}
797
early_initcall(jump_label_test);
798 799 800
#endif /* STATIC_KEYS_SELFTEST */

#endif /* HAVE_JUMP_LABEL */