cpu.c 48.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/* CPU control.
 * (C) 2001, 2002, 2003, 2004 Rusty Russell
 *
 * This code is licenced under the GPL.
 */
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
10
#include <linux/sched/signal.h>
11
#include <linux/sched/hotplug.h>
12
#include <linux/sched/task.h>
L
Linus Torvalds 已提交
13 14
#include <linux/unistd.h>
#include <linux/cpu.h>
15 16
#include <linux/oom.h>
#include <linux/rcupdate.h>
17
#include <linux/export.h>
18
#include <linux/bug.h>
L
Linus Torvalds 已提交
19 20
#include <linux/kthread.h>
#include <linux/stop_machine.h>
21
#include <linux/mutex.h>
22
#include <linux/gfp.h>
23
#include <linux/suspend.h>
24
#include <linux/lockdep.h>
25
#include <linux/tick.h>
26
#include <linux/irq.h>
27
#include <linux/nmi.h>
28
#include <linux/smpboot.h>
29
#include <linux/relay.h>
30
#include <linux/slab.h>
31
#include <linux/percpu-rwsem.h>
32

33
#include <trace/events/power.h>
34 35
#define CREATE_TRACE_POINTS
#include <trace/events/cpuhp.h>
L
Linus Torvalds 已提交
36

37 38
#include "smpboot.h"

39 40 41 42
/**
 * cpuhp_cpu_state - Per cpu hotplug state storage
 * @state:	The current cpu state
 * @target:	The target state
43 44
 * @thread:	Pointer to the hotplug thread
 * @should_run:	Thread should execute
45
 * @rollback:	Perform a rollback
46 47 48
 * @single:	Single callback invocation
 * @bringup:	Single callback bringup or teardown selector
 * @cb_state:	The state for a single callback (install/uninstall)
49
 * @result:	Result of the operation
50 51
 * @done_up:	Signal completion to the issuer of the task for cpu-up
 * @done_down:	Signal completion to the issuer of the task for cpu-down
52 53 54 55
 */
struct cpuhp_cpu_state {
	enum cpuhp_state	state;
	enum cpuhp_state	target;
56
	enum cpuhp_state	fail;
57 58 59
#ifdef CONFIG_SMP
	struct task_struct	*thread;
	bool			should_run;
60
	bool			rollback;
61 62
	bool			single;
	bool			bringup;
63
	struct hlist_node	*node;
64
	struct hlist_node	*last;
65 66
	enum cpuhp_state	cb_state;
	int			result;
67 68
	struct completion	done_up;
	struct completion	done_down;
69
#endif
70 71
};

72 73 74
static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
	.fail = CPUHP_INVALID,
};
75

76
#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
static struct lockdep_map cpuhp_state_up_map =
	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
static struct lockdep_map cpuhp_state_down_map =
	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);


static void inline cpuhp_lock_acquire(bool bringup)
{
	lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
}

static void inline cpuhp_lock_release(bool bringup)
{
	lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
}
#else

static void inline cpuhp_lock_acquire(bool bringup) { }
static void inline cpuhp_lock_release(bool bringup) { }

97 98
#endif

99 100 101 102 103 104 105
/**
 * cpuhp_step - Hotplug state machine step
 * @name:	Name of the step
 * @startup:	Startup function of the step
 * @teardown:	Teardown function of the step
 * @skip_onerr:	Do not invoke the functions on error rollback
 *		Will go away once the notifiers	are gone
106
 * @cant_stop:	Bringup/teardown can't be stopped at this step
107 108
 */
struct cpuhp_step {
109 110
	const char		*name;
	union {
111 112 113 114
		int		(*single)(unsigned int cpu);
		int		(*multi)(unsigned int cpu,
					 struct hlist_node *node);
	} startup;
115
	union {
116 117 118 119
		int		(*single)(unsigned int cpu);
		int		(*multi)(unsigned int cpu,
					 struct hlist_node *node);
	} teardown;
120 121 122 123
	struct hlist_head	list;
	bool			skip_onerr;
	bool			cant_stop;
	bool			multi_instance;
124 125
};

126
static DEFINE_MUTEX(cpuhp_state_mutex);
127
static struct cpuhp_step cpuhp_bp_states[];
128
static struct cpuhp_step cpuhp_ap_states[];
129

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
static bool cpuhp_is_ap_state(enum cpuhp_state state)
{
	/*
	 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
	 * purposes as that state is handled explicitly in cpu_down.
	 */
	return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
}

static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
{
	struct cpuhp_step *sp;

	sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
	return sp + state;
}

147 148 149
/**
 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
 * @cpu:	The cpu for which the callback should be invoked
150
 * @state:	The state to do callbacks for
151
 * @bringup:	True if the bringup callback should be invoked
152 153
 * @node:	For multi-instance, do a single entry callback for install/remove
 * @lastp:	For multi-instance rollback, remember how far we got
154
 *
155
 * Called from cpu hotplug and from the state register machinery.
156
 */
157
static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
158 159
				 bool bringup, struct hlist_node *node,
				 struct hlist_node **lastp)
160 161
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
162
	struct cpuhp_step *step = cpuhp_get_step(state);
163 164 165 166
	int (*cbm)(unsigned int cpu, struct hlist_node *node);
	int (*cb)(unsigned int cpu);
	int ret, cnt;

167 168 169 170 171 172 173 174 175
	if (st->fail == state) {
		st->fail = CPUHP_INVALID;

		if (!(bringup ? step->startup.single : step->teardown.single))
			return 0;

		return -EAGAIN;
	}

176
	if (!step->multi_instance) {
177
		WARN_ON_ONCE(lastp && *lastp);
178
		cb = bringup ? step->startup.single : step->teardown.single;
179 180
		if (!cb)
			return 0;
181
		trace_cpuhp_enter(cpu, st->target, state, cb);
182
		ret = cb(cpu);
183
		trace_cpuhp_exit(cpu, st->state, state, ret);
184 185
		return ret;
	}
186
	cbm = bringup ? step->startup.multi : step->teardown.multi;
187 188 189 190 191
	if (!cbm)
		return 0;

	/* Single invocation for instance add/remove */
	if (node) {
192
		WARN_ON_ONCE(lastp && *lastp);
193 194 195 196 197 198 199 200 201
		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
		ret = cbm(cpu, node);
		trace_cpuhp_exit(cpu, st->state, state, ret);
		return ret;
	}

	/* State transition. Invoke on all instances */
	cnt = 0;
	hlist_for_each(node, &step->list) {
202 203 204
		if (lastp && node == *lastp)
			break;

205 206 207
		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
		ret = cbm(cpu, node);
		trace_cpuhp_exit(cpu, st->state, state, ret);
208 209 210 211 212 213 214
		if (ret) {
			if (!lastp)
				goto err;

			*lastp = node;
			return ret;
		}
215 216
		cnt++;
	}
217 218
	if (lastp)
		*lastp = NULL;
219 220 221
	return 0;
err:
	/* Rollback the instances if one failed */
222
	cbm = !bringup ? step->startup.multi : step->teardown.multi;
223 224 225 226 227 228
	if (!cbm)
		return ret;

	hlist_for_each(node, &step->list) {
		if (!cnt--)
			break;
229 230 231 232 233 234 235 236

		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
		ret = cbm(cpu, node);
		trace_cpuhp_exit(cpu, st->state, state, ret);
		/*
		 * Rollback must not fail,
		 */
		WARN_ON_ONCE(ret);
237 238 239 240
	}
	return ret;
}

241
#ifdef CONFIG_SMP
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
{
	struct completion *done = bringup ? &st->done_up : &st->done_down;
	wait_for_completion(done);
}

static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
{
	struct completion *done = bringup ? &st->done_up : &st->done_down;
	complete(done);
}

/*
 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
 */
static bool cpuhp_is_atomic_state(enum cpuhp_state state)
{
	return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
}

262
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
263
static DEFINE_MUTEX(cpu_add_remove_lock);
264 265
bool cpuhp_tasks_frozen;
EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
L
Linus Torvalds 已提交
266

267
/*
268 269
 * The following two APIs (cpu_maps_update_begin/done) must be used when
 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
270 271 272 273 274 275 276 277 278 279
 */
void cpu_maps_update_begin(void)
{
	mutex_lock(&cpu_add_remove_lock);
}

void cpu_maps_update_done(void)
{
	mutex_unlock(&cpu_add_remove_lock);
}
L
Linus Torvalds 已提交
280

281 282
/*
 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
283 284 285 286
 * Should always be manipulated under cpu_add_remove_lock
 */
static int cpu_hotplug_disabled;

287 288
#ifdef CONFIG_HOTPLUG_CPU

289
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
290

291
void cpus_read_lock(void)
292
{
293
	percpu_down_read(&cpu_hotplug_lock);
294
}
295
EXPORT_SYMBOL_GPL(cpus_read_lock);
296

297
void cpus_read_unlock(void)
298
{
299
	percpu_up_read(&cpu_hotplug_lock);
300
}
301
EXPORT_SYMBOL_GPL(cpus_read_unlock);
302

303
void cpus_write_lock(void)
304
{
305
	percpu_down_write(&cpu_hotplug_lock);
306
}
307

308
void cpus_write_unlock(void)
309
{
310
	percpu_up_write(&cpu_hotplug_lock);
311 312
}

313
void lockdep_assert_cpus_held(void)
314
{
315
	percpu_rwsem_assert_held(&cpu_hotplug_lock);
316
}
317

318 319 320 321 322 323 324 325 326 327
/*
 * Wait for currently running CPU hotplug operations to complete (if any) and
 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
 * hotplug path before performing hotplug operations. So acquiring that lock
 * guarantees mutual exclusion from any currently running hotplug operations.
 */
void cpu_hotplug_disable(void)
{
	cpu_maps_update_begin();
328
	cpu_hotplug_disabled++;
329 330
	cpu_maps_update_done();
}
331
EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
332

333 334 335 336 337 338 339
static void __cpu_hotplug_enable(void)
{
	if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
		return;
	cpu_hotplug_disabled--;
}

340 341 342
void cpu_hotplug_enable(void)
{
	cpu_maps_update_begin();
343
	__cpu_hotplug_enable();
344 345
	cpu_maps_update_done();
}
346
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
347
#endif	/* CONFIG_HOTPLUG_CPU */
348

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
static inline enum cpuhp_state
cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
{
	enum cpuhp_state prev_state = st->state;

	st->rollback = false;
	st->last = NULL;

	st->target = target;
	st->single = false;
	st->bringup = st->state < target;

	return prev_state;
}

static inline void
cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
{
	st->rollback = true;

	/*
	 * If we have st->last we need to undo partial multi_instance of this
	 * state first. Otherwise start undo at the previous state.
	 */
	if (!st->last) {
		if (st->bringup)
			st->state--;
		else
			st->state++;
	}

	st->target = prev_state;
	st->bringup = !st->bringup;
}

/* Regular hotplug invocation of the AP hotplug thread */
static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
{
	if (!st->single && st->state == st->target)
		return;

	st->result = 0;
	/*
	 * Make sure the above stores are visible before should_run becomes
	 * true. Paired with the mb() above in cpuhp_thread_fun()
	 */
	smp_mb();
	st->should_run = true;
	wake_up_process(st->thread);
398
	wait_for_ap_thread(st, st->bringup);
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
}

static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
{
	enum cpuhp_state prev_state;
	int ret;

	prev_state = cpuhp_set_state(st, target);
	__cpuhp_kick_ap(st);
	if ((ret = st->result)) {
		cpuhp_reset_state(st, prev_state);
		__cpuhp_kick_ap(st);
	}

	return ret;
}
415

416 417 418 419
static int bringup_wait_for_ap(unsigned int cpu)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);

420
	/* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
421
	wait_for_ap_thread(st, true);
422 423
	if (WARN_ON_ONCE((!cpu_online(cpu))))
		return -ECANCELED;
424 425 426 427 428

	/* Unpark the stopper thread and the hotplug thread of the target cpu */
	stop_machine_unpark(cpu);
	kthread_unpark(st->thread);

429 430 431 432
	if (st->target <= CPUHP_AP_ONLINE_IDLE)
		return 0;

	return cpuhp_kick_ap(st, st->target);
433 434
}

435 436 437 438 439
static int bringup_cpu(unsigned int cpu)
{
	struct task_struct *idle = idle_thread_get(cpu);
	int ret;

440 441 442 443 444 445 446
	/*
	 * Some architectures have to walk the irq descriptors to
	 * setup the vector space for the cpu which comes online.
	 * Prevent irq alloc/free across the bringup.
	 */
	irq_lock_sparse();

447 448
	/* Arch-specific enabling code. */
	ret = __cpu_up(cpu, idle);
449
	irq_unlock_sparse();
450
	if (ret)
451
		return ret;
452
	return bringup_wait_for_ap(cpu);
453 454
}

455 456 457 458
/*
 * Hotplug state machine related functions
 */

459
static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
460 461
{
	for (st->state--; st->state > st->target; st->state--) {
462
		struct cpuhp_step *step = cpuhp_get_step(st->state);
463 464

		if (!step->skip_onerr)
465
			cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
466 467 468 469
	}
}

static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
470
			      enum cpuhp_state target)
471 472 473 474 475 476
{
	enum cpuhp_state prev_state = st->state;
	int ret = 0;

	while (st->state < target) {
		st->state++;
477
		ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
478 479
		if (ret) {
			st->target = prev_state;
480
			undo_cpu_up(cpu, st);
481 482 483 484 485 486
			break;
		}
	}
	return ret;
}

487 488 489 490 491 492 493
/*
 * The cpu hotplug threads manage the bringup and teardown of the cpus
 */
static void cpuhp_create(unsigned int cpu)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);

494 495
	init_completion(&st->done_up);
	init_completion(&st->done_down);
496 497 498 499 500 501 502 503 504 505 506 507
}

static int cpuhp_should_run(unsigned int cpu)
{
	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);

	return st->should_run;
}

/*
 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
 * callbacks when a state gets [un]installed at runtime.
508 509 510 511 512 513 514 515 516 517
 *
 * Each invocation of this function by the smpboot thread does a single AP
 * state callback.
 *
 * It has 3 modes of operation:
 *  - single: runs st->cb_state
 *  - up:     runs ++st->state, while st->state < st->target
 *  - down:   runs st->state--, while st->state > st->target
 *
 * When complete or on error, should_run is cleared and the completion is fired.
518 519 520 521
 */
static void cpuhp_thread_fun(unsigned int cpu)
{
	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
522 523
	bool bringup = st->bringup;
	enum cpuhp_state state;
524 525

	/*
526 527
	 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
	 * that if we see ->should_run we also see the rest of the state.
528 529 530
	 */
	smp_mb();

531 532
	if (WARN_ON_ONCE(!st->should_run))
		return;
533

534
	cpuhp_lock_acquire(bringup);
535

536
	if (st->single) {
537 538 539 540 541 542 543 544
		state = st->cb_state;
		st->should_run = false;
	} else {
		if (bringup) {
			st->state++;
			state = st->state;
			st->should_run = (st->state < st->target);
			WARN_ON_ONCE(st->state > st->target);
545
		} else {
546 547 548 549
			state = st->state;
			st->state--;
			st->should_run = (st->state > st->target);
			WARN_ON_ONCE(st->state < st->target);
550
		}
551 552 553 554 555 556 557 558 559 560 561 562 563 564
	}

	WARN_ON_ONCE(!cpuhp_is_ap_state(state));

	if (st->rollback) {
		struct cpuhp_step *step = cpuhp_get_step(state);
		if (step->skip_onerr)
			goto next;
	}

	if (cpuhp_is_atomic_state(state)) {
		local_irq_disable();
		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
		local_irq_enable();
565

566 567 568 569
		/*
		 * STARTING/DYING must not fail!
		 */
		WARN_ON_ONCE(st->result);
570
	} else {
571 572 573 574 575 576 577 578 579 580 581
		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
	}

	if (st->result) {
		/*
		 * If we fail on a rollback, we're up a creek without no
		 * paddle, no way forward, no way back. We loose, thanks for
		 * playing.
		 */
		WARN_ON_ONCE(st->rollback);
		st->should_run = false;
582
	}
583 584

next:
585
	cpuhp_lock_release(bringup);
586 587

	if (!st->should_run)
588
		complete_ap_thread(st, bringup);
589 590 591
}

/* Invoke a single callback on a remote cpu */
592
static int
593 594
cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
			 struct hlist_node *node)
595 596
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
597
	int ret;
598 599 600 601

	if (!cpu_online(cpu))
		return 0;

602 603 604 605 606
	cpuhp_lock_acquire(false);
	cpuhp_lock_release(false);

	cpuhp_lock_acquire(true);
	cpuhp_lock_release(true);
607

608 609 610 611 612
	/*
	 * If we are up and running, use the hotplug thread. For early calls
	 * we invoke the thread function directly.
	 */
	if (!st->thread)
613
		return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
614

615 616 617 618 619
	st->rollback = false;
	st->last = NULL;

	st->node = node;
	st->bringup = bringup;
620
	st->cb_state = state;
621 622
	st->single = true;

623
	__cpuhp_kick_ap(st);
624 625

	/*
626
	 * If we failed and did a partial, do a rollback.
627
	 */
628 629 630 631 632 633 634
	if ((ret = st->result) && st->last) {
		st->rollback = true;
		st->bringup = !bringup;

		__cpuhp_kick_ap(st);
	}

635 636 637 638 639
	/*
	 * Clean up the leftovers so the next hotplug operation wont use stale
	 * data.
	 */
	st->node = st->last = NULL;
640
	return ret;
641 642 643 644 645
}

static int cpuhp_kick_ap_work(unsigned int cpu)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
646 647
	enum cpuhp_state prev_state = st->state;
	int ret;
648

649 650 651 652 653
	cpuhp_lock_acquire(false);
	cpuhp_lock_release(false);

	cpuhp_lock_acquire(true);
	cpuhp_lock_release(true);
654 655 656 657 658 659

	trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
	ret = cpuhp_kick_ap(st, st->target);
	trace_cpuhp_exit(cpu, st->state, prev_state, ret);

	return ret;
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
}

static struct smp_hotplug_thread cpuhp_threads = {
	.store			= &cpuhp_state.thread,
	.create			= &cpuhp_create,
	.thread_should_run	= cpuhp_should_run,
	.thread_fn		= cpuhp_thread_fun,
	.thread_comm		= "cpuhp/%u",
	.selfparking		= true,
};

void __init cpuhp_threads_init(void)
{
	BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
	kthread_unpark(this_cpu_read(cpuhp_state.thread));
}

677
#ifdef CONFIG_HOTPLUG_CPU
678 679 680 681 682 683 684 685 686 687 688 689
/**
 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
 * @cpu: a CPU id
 *
 * This function walks all processes, finds a valid mm struct for each one and
 * then clears a corresponding bit in mm's cpumask.  While this all sounds
 * trivial, there are various non-obvious corner cases, which this function
 * tries to solve in a safe manner.
 *
 * Also note that the function uses a somewhat relaxed locking scheme, so it may
 * be called only for an already offlined CPU.
 */
690 691 692 693 694 695 696 697 698 699 700
void clear_tasks_mm_cpumask(int cpu)
{
	struct task_struct *p;

	/*
	 * This function is called after the cpu is taken down and marked
	 * offline, so its not like new tasks will ever get this cpu set in
	 * their mm mask. -- Peter Zijlstra
	 * Thus, we may use rcu_read_lock() here, instead of grabbing
	 * full-fledged tasklist_lock.
	 */
701
	WARN_ON(cpu_online(cpu));
702 703 704 705
	rcu_read_lock();
	for_each_process(p) {
		struct task_struct *t;

706 707 708 709
		/*
		 * Main thread might exit, but other threads may still have
		 * a valid mm. Find one.
		 */
710 711 712 713 714 715 716 717 718
		t = find_lock_task_mm(p);
		if (!t)
			continue;
		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
		task_unlock(t);
	}
	rcu_read_unlock();
}

L
Linus Torvalds 已提交
719
/* Take this CPU down. */
720
static int take_cpu_down(void *_param)
L
Linus Torvalds 已提交
721
{
722 723
	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
	enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
724
	int err, cpu = smp_processor_id();
725
	int ret;
L
Linus Torvalds 已提交
726 727 728 729

	/* Ensure this CPU doesn't handle any more interrupts. */
	err = __cpu_disable();
	if (err < 0)
Z
Zwane Mwaikambo 已提交
730
		return err;
L
Linus Torvalds 已提交
731

732 733 734 735 736 737
	/*
	 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
	 * do this step again.
	 */
	WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
	st->state--;
738
	/* Invoke the former CPU_DYING callbacks */
739 740 741 742 743 744 745
	for (; st->state > target; st->state--) {
		ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
		/*
		 * DYING must not fail!
		 */
		WARN_ON_ONCE(ret);
	}
746

747 748
	/* Give up timekeeping duties */
	tick_handover_do_timer();
749
	/* Park the stopper thread */
750
	stop_machine_park(cpu);
Z
Zwane Mwaikambo 已提交
751
	return 0;
L
Linus Torvalds 已提交
752 753
}

754
static int takedown_cpu(unsigned int cpu)
L
Linus Torvalds 已提交
755
{
756
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
757
	int err;
L
Linus Torvalds 已提交
758

759
	/* Park the smpboot threads */
760
	kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
761
	smpboot_park_threads(cpu);
762

763
	/*
764 765
	 * Prevent irq alloc/free while the dying cpu reorganizes the
	 * interrupt affinities.
766
	 */
767
	irq_lock_sparse();
768

769 770 771
	/*
	 * So now all preempt/rcu users must observe !cpu_active().
	 */
772
	err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
773
	if (err) {
774
		/* CPU refused to die */
775
		irq_unlock_sparse();
776 777
		/* Unpark the hotplug thread so we can rollback there */
		kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
778
		return err;
779
	}
780
	BUG_ON(cpu_online(cpu));
L
Linus Torvalds 已提交
781

782
	/*
783 784
	 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
	 * all runnable tasks from the CPU, there's only the idle task left now
785
	 * that the migration thread is done doing the stop_machine thing.
P
Peter Zijlstra 已提交
786 787
	 *
	 * Wait for the stop thread to go away.
788
	 */
789
	wait_for_ap_thread(st, false);
790
	BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
L
Linus Torvalds 已提交
791

792 793 794
	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
	irq_unlock_sparse();

795
	hotplug_cpu__broadcast_tick_pull(cpu);
L
Linus Torvalds 已提交
796 797 798
	/* This actually kills the CPU. */
	__cpu_die(cpu);

799
	tick_cleanup_dead_cpu(cpu);
800
	rcutree_migrate_callbacks(cpu);
801 802
	return 0;
}
L
Linus Torvalds 已提交
803

804 805 806 807
static void cpuhp_complete_idle_dead(void *arg)
{
	struct cpuhp_cpu_state *st = arg;

808
	complete_ap_thread(st, false);
809 810
}

811 812 813 814 815
void cpuhp_report_idle_dead(void)
{
	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);

	BUG_ON(st->state != CPUHP_AP_OFFLINE);
816
	rcu_report_dead(smp_processor_id());
817 818 819 820 821 822 823
	st->state = CPUHP_AP_IDLE_DEAD;
	/*
	 * We cannot call complete after rcu_report_dead() so we delegate it
	 * to an online cpu.
	 */
	smp_call_function_single(cpumask_first(cpu_online_mask),
				 cpuhp_complete_idle_dead, st, 0);
824 825
}

826 827 828 829
static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
{
	for (st->state++; st->state < st->target; st->state++) {
		struct cpuhp_step *step = cpuhp_get_step(st->state);
830

831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
		if (!step->skip_onerr)
			cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
	}
}

static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
				enum cpuhp_state target)
{
	enum cpuhp_state prev_state = st->state;
	int ret = 0;

	for (; st->state > target; st->state--) {
		ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
		if (ret) {
			st->target = prev_state;
			undo_cpu_down(cpu, st);
			break;
		}
	}
	return ret;
}
852

853
/* Requires cpu_add_remove_lock to be held */
854 855
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
			   enum cpuhp_state target)
856
{
857 858
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
	int prev_state, ret = 0;
859 860 861 862

	if (num_online_cpus() == 1)
		return -EBUSY;

863
	if (!cpu_present(cpu))
864 865
		return -EINVAL;

866
	cpus_write_lock();
867 868 869

	cpuhp_tasks_frozen = tasks_frozen;

870
	prev_state = cpuhp_set_state(st, target);
871 872 873 874
	/*
	 * If the current CPU state is in the range of the AP hotplug thread,
	 * then we need to kick the thread.
	 */
875
	if (st->state > CPUHP_TEARDOWN_CPU) {
876
		st->target = max((int)target, CPUHP_TEARDOWN_CPU);
877 878 879 880 881 882 883 884 885 886 887 888
		ret = cpuhp_kick_ap_work(cpu);
		/*
		 * The AP side has done the error rollback already. Just
		 * return the error code..
		 */
		if (ret)
			goto out;

		/*
		 * We might have stopped still in the range of the AP hotplug
		 * thread. Nothing to do anymore.
		 */
889
		if (st->state > CPUHP_TEARDOWN_CPU)
890
			goto out;
891 892

		st->target = target;
893 894
	}
	/*
895
	 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
896 897
	 * to do the further cleanups.
	 */
898
	ret = cpuhp_down_callbacks(cpu, st, target);
899
	if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
900 901
		cpuhp_reset_state(st, prev_state);
		__cpuhp_kick_ap(st);
902
	}
903

904
out:
905
	cpus_write_unlock();
906 907 908 909 910
	/*
	 * Do post unplug cleanup. This is still protected against
	 * concurrent CPU hotplug via cpu_add_remove_lock.
	 */
	lockup_detector_cleanup();
911
	return ret;
912 913
}

914
static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
915
{
916
	int err;
917

918
	cpu_maps_update_begin();
919 920

	if (cpu_hotplug_disabled) {
921
		err = -EBUSY;
922 923 924
		goto out;
	}

925
	err = _cpu_down(cpu, 0, target);
926

927
out:
928
	cpu_maps_update_done();
L
Linus Torvalds 已提交
929 930
	return err;
}
931

932 933 934 935
int cpu_down(unsigned int cpu)
{
	return do_cpu_down(cpu, CPUHP_OFFLINE);
}
936
EXPORT_SYMBOL(cpu_down);
937 938 939

#else
#define takedown_cpu		NULL
L
Linus Torvalds 已提交
940 941
#endif /*CONFIG_HOTPLUG_CPU*/

942
/**
943
 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
944 945 946 947 948 949 950 951 952
 * @cpu: cpu that just started
 *
 * It must be called by the arch code on the new cpu, before the new cpu
 * enables interrupts and before the "boot" cpu returns from __cpu_up().
 */
void notify_cpu_starting(unsigned int cpu)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
	enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
953
	int ret;
954

955
	rcu_cpu_starting(cpu);	/* Enables RCU usage on this CPU. */
956 957
	while (st->state < target) {
		st->state++;
958 959 960 961 962
		ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
		/*
		 * STARTING must not fail!
		 */
		WARN_ON_ONCE(ret);
963 964 965
	}
}

966
/*
967 968 969
 * Called from the idle task. Wake up the controlling task which brings the
 * stopper and the hotplug thread of the upcoming CPU up and then delegates
 * the rest of the online bringup to the hotplug thread.
970
 */
971
void cpuhp_online_idle(enum cpuhp_state state)
972
{
973 974 975 976 977 978 979
	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);

	/* Happens for the boot cpu */
	if (state != CPUHP_AP_ONLINE_IDLE)
		return;

	st->state = CPUHP_AP_ONLINE_IDLE;
980
	complete_ap_thread(st, true);
981 982
}

983
/* Requires cpu_add_remove_lock to be held */
984
static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
L
Linus Torvalds 已提交
985
{
986
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
987
	struct task_struct *idle;
988
	int ret = 0;
L
Linus Torvalds 已提交
989

990
	cpus_write_lock();
991

992
	if (!cpu_present(cpu)) {
993 994 995 996
		ret = -EINVAL;
		goto out;
	}

997 998 999 1000 1001
	/*
	 * The caller of do_cpu_up might have raced with another
	 * caller. Ignore it for now.
	 */
	if (st->state >= target)
1002
		goto out;
1003 1004 1005 1006 1007 1008 1009 1010

	if (st->state == CPUHP_OFFLINE) {
		/* Let it fail before we try to bring the cpu up */
		idle = idle_thread_get(cpu);
		if (IS_ERR(idle)) {
			ret = PTR_ERR(idle);
			goto out;
		}
1011
	}
1012

1013 1014
	cpuhp_tasks_frozen = tasks_frozen;

1015
	cpuhp_set_state(st, target);
1016 1017 1018 1019
	/*
	 * If the current CPU state is in the range of the AP hotplug thread,
	 * then we need to kick the thread once more.
	 */
1020
	if (st->state > CPUHP_BRINGUP_CPU) {
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
		ret = cpuhp_kick_ap_work(cpu);
		/*
		 * The AP side has done the error rollback already. Just
		 * return the error code..
		 */
		if (ret)
			goto out;
	}

	/*
	 * Try to reach the target state. We max out on the BP at
1032
	 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1033 1034
	 * responsible for bringing it up to the target state.
	 */
1035
	target = min((int)target, CPUHP_BRINGUP_CPU);
1036
	ret = cpuhp_up_callbacks(cpu, st, target);
1037
out:
1038
	cpus_write_unlock();
1039 1040 1041
	return ret;
}

1042
static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
1043 1044
{
	int err = 0;
1045

R
Rusty Russell 已提交
1046
	if (!cpu_possible(cpu)) {
1047 1048
		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
		       cpu);
1049
#if defined(CONFIG_IA64)
1050
		pr_err("please check additional_cpus= boot parameter\n");
1051 1052 1053
#endif
		return -EINVAL;
	}
1054

1055 1056 1057
	err = try_online_node(cpu_to_node(cpu));
	if (err)
		return err;
1058

1059
	cpu_maps_update_begin();
1060 1061

	if (cpu_hotplug_disabled) {
1062
		err = -EBUSY;
1063 1064 1065
		goto out;
	}

1066
	err = _cpu_up(cpu, 0, target);
1067
out:
1068
	cpu_maps_update_done();
1069 1070
	return err;
}
1071 1072 1073 1074 1075

int cpu_up(unsigned int cpu)
{
	return do_cpu_up(cpu, CPUHP_ONLINE);
}
P
Paul E. McKenney 已提交
1076
EXPORT_SYMBOL_GPL(cpu_up);
1077

1078
#ifdef CONFIG_PM_SLEEP_SMP
R
Rusty Russell 已提交
1079
static cpumask_var_t frozen_cpus;
1080

1081
int freeze_secondary_cpus(int primary)
1082
{
1083
	int cpu, error = 0;
1084

1085
	cpu_maps_update_begin();
1086 1087
	if (!cpu_online(primary))
		primary = cpumask_first(cpu_online_mask);
1088 1089
	/*
	 * We take down all of the non-boot CPUs in one shot to avoid races
1090 1091
	 * with the userspace trying to use the CPU hotplug at the same time
	 */
R
Rusty Russell 已提交
1092
	cpumask_clear(frozen_cpus);
1093

1094
	pr_info("Disabling non-boot CPUs ...\n");
1095
	for_each_online_cpu(cpu) {
1096
		if (cpu == primary)
1097
			continue;
1098
		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1099
		error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1100
		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1101
		if (!error)
R
Rusty Russell 已提交
1102
			cpumask_set_cpu(cpu, frozen_cpus);
1103
		else {
1104
			pr_err("Error taking CPU%d down: %d\n", cpu, error);
1105 1106 1107
			break;
		}
	}
1108

1109
	if (!error)
1110
		BUG_ON(num_online_cpus() > 1);
1111
	else
1112
		pr_err("Non-boot CPUs are not disabled\n");
1113 1114 1115 1116 1117 1118 1119 1120

	/*
	 * Make sure the CPUs won't be enabled by someone else. We need to do
	 * this even in case of failure as all disable_nonboot_cpus() users are
	 * supposed to do enable_nonboot_cpus() on the failure path.
	 */
	cpu_hotplug_disabled++;

1121
	cpu_maps_update_done();
1122 1123 1124
	return error;
}

1125 1126 1127 1128 1129 1130 1131 1132
void __weak arch_enable_nonboot_cpus_begin(void)
{
}

void __weak arch_enable_nonboot_cpus_end(void)
{
}

1133
void enable_nonboot_cpus(void)
1134 1135 1136 1137
{
	int cpu, error;

	/* Allow everyone to use the CPU hotplug again */
1138
	cpu_maps_update_begin();
1139
	__cpu_hotplug_enable();
R
Rusty Russell 已提交
1140
	if (cpumask_empty(frozen_cpus))
1141
		goto out;
1142

1143
	pr_info("Enabling non-boot CPUs ...\n");
1144 1145 1146

	arch_enable_nonboot_cpus_begin();

R
Rusty Russell 已提交
1147
	for_each_cpu(cpu, frozen_cpus) {
1148
		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1149
		error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1150
		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1151
		if (!error) {
1152
			pr_info("CPU%d is up\n", cpu);
1153 1154
			continue;
		}
1155
		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1156
	}
1157 1158 1159

	arch_enable_nonboot_cpus_end();

R
Rusty Russell 已提交
1160
	cpumask_clear(frozen_cpus);
1161
out:
1162
	cpu_maps_update_done();
L
Linus Torvalds 已提交
1163
}
R
Rusty Russell 已提交
1164

1165
static int __init alloc_frozen_cpus(void)
R
Rusty Russell 已提交
1166 1167 1168 1169 1170 1171
{
	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
		return -ENOMEM;
	return 0;
}
core_initcall(alloc_frozen_cpus);
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191

/*
 * When callbacks for CPU hotplug notifications are being executed, we must
 * ensure that the state of the system with respect to the tasks being frozen
 * or not, as reported by the notification, remains unchanged *throughout the
 * duration* of the execution of the callbacks.
 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
 *
 * This synchronization is implemented by mutually excluding regular CPU
 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
 * Hibernate notifications.
 */
static int
cpu_hotplug_pm_callback(struct notifier_block *nb,
			unsigned long action, void *ptr)
{
	switch (action) {

	case PM_SUSPEND_PREPARE:
	case PM_HIBERNATION_PREPARE:
1192
		cpu_hotplug_disable();
1193 1194 1195 1196
		break;

	case PM_POST_SUSPEND:
	case PM_POST_HIBERNATION:
1197
		cpu_hotplug_enable();
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
		break;

	default:
		return NOTIFY_DONE;
	}

	return NOTIFY_OK;
}


1208
static int __init cpu_hotplug_pm_sync_init(void)
1209
{
1210 1211 1212 1213 1214
	/*
	 * cpu_hotplug_pm_callback has higher priority than x86
	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
	 * to disable cpu hotplug to avoid cpu hotplug race.
	 */
1215 1216 1217 1218 1219
	pm_notifier(cpu_hotplug_pm_callback, 0);
	return 0;
}
core_initcall(cpu_hotplug_pm_sync_init);

1220
#endif /* CONFIG_PM_SLEEP_SMP */
1221

1222 1223
int __boot_cpu_id;

1224
#endif /* CONFIG_SMP */
1225

1226 1227 1228 1229
/* Boot processor state steps */
static struct cpuhp_step cpuhp_bp_states[] = {
	[CPUHP_OFFLINE] = {
		.name			= "offline",
1230 1231
		.startup.single		= NULL,
		.teardown.single	= NULL,
1232 1233 1234
	},
#ifdef CONFIG_SMP
	[CPUHP_CREATE_THREADS]= {
1235
		.name			= "threads:prepare",
1236 1237
		.startup.single		= smpboot_create_threads,
		.teardown.single	= NULL,
1238
		.cant_stop		= true,
1239
	},
1240
	[CPUHP_PERF_PREPARE] = {
1241 1242 1243
		.name			= "perf:prepare",
		.startup.single		= perf_event_init_cpu,
		.teardown.single	= perf_event_exit_cpu,
1244
	},
1245
	[CPUHP_WORKQUEUE_PREP] = {
1246 1247 1248
		.name			= "workqueue:prepare",
		.startup.single		= workqueue_prepare_cpu,
		.teardown.single	= NULL,
1249
	},
1250
	[CPUHP_HRTIMERS_PREPARE] = {
1251 1252 1253
		.name			= "hrtimers:prepare",
		.startup.single		= hrtimers_prepare_cpu,
		.teardown.single	= hrtimers_dead_cpu,
1254
	},
1255
	[CPUHP_SMPCFD_PREPARE] = {
1256
		.name			= "smpcfd:prepare",
1257 1258
		.startup.single		= smpcfd_prepare_cpu,
		.teardown.single	= smpcfd_dead_cpu,
1259
	},
1260 1261 1262 1263 1264
	[CPUHP_RELAY_PREPARE] = {
		.name			= "relay:prepare",
		.startup.single		= relay_prepare_cpu,
		.teardown.single	= NULL,
	},
1265 1266 1267 1268
	[CPUHP_SLAB_PREPARE] = {
		.name			= "slab:prepare",
		.startup.single		= slab_prepare_cpu,
		.teardown.single	= slab_dead_cpu,
1269
	},
1270
	[CPUHP_RCUTREE_PREP] = {
1271
		.name			= "RCU/tree:prepare",
1272 1273
		.startup.single		= rcutree_prepare_cpu,
		.teardown.single	= rcutree_dead_cpu,
1274
	},
1275 1276 1277 1278 1279 1280
	/*
	 * On the tear-down path, timers_dead_cpu() must be invoked
	 * before blk_mq_queue_reinit_notify() from notify_dead(),
	 * otherwise a RCU stall occurs.
	 */
	[CPUHP_TIMERS_DEAD] = {
1281 1282 1283
		.name			= "timers:dead",
		.startup.single		= NULL,
		.teardown.single	= timers_dead_cpu,
1284
	},
1285
	/* Kicks the plugged cpu into life */
1286 1287
	[CPUHP_BRINGUP_CPU] = {
		.name			= "cpu:bringup",
1288 1289
		.startup.single		= bringup_cpu,
		.teardown.single	= NULL,
1290
		.cant_stop		= true,
1291
	},
1292
	[CPUHP_AP_SMPCFD_DYING] = {
1293
		.name			= "smpcfd:dying",
1294 1295
		.startup.single		= NULL,
		.teardown.single	= smpcfd_dying_cpu,
1296
	},
1297 1298 1299 1300
	/*
	 * Handled on controll processor until the plugged processor manages
	 * this itself.
	 */
1301 1302
	[CPUHP_TEARDOWN_CPU] = {
		.name			= "cpu:teardown",
1303 1304
		.startup.single		= NULL,
		.teardown.single	= takedown_cpu,
1305
		.cant_stop		= true,
1306
	},
1307 1308
#else
	[CPUHP_BRINGUP_CPU] = { },
1309 1310 1311
#endif
};

1312 1313 1314
/* Application processor state steps */
static struct cpuhp_step cpuhp_ap_states[] = {
#ifdef CONFIG_SMP
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
	/* Final state before CPU kills itself */
	[CPUHP_AP_IDLE_DEAD] = {
		.name			= "idle:dead",
	},
	/*
	 * Last state before CPU enters the idle loop to die. Transient state
	 * for synchronization.
	 */
	[CPUHP_AP_OFFLINE] = {
		.name			= "ap:offline",
		.cant_stop		= true,
	},
1327 1328 1329
	/* First state is scheduler control. Interrupts are disabled */
	[CPUHP_AP_SCHED_STARTING] = {
		.name			= "sched:starting",
1330 1331
		.startup.single		= sched_cpu_starting,
		.teardown.single	= sched_cpu_dying,
1332
	},
1333
	[CPUHP_AP_RCUTREE_DYING] = {
1334
		.name			= "RCU/tree:dying",
1335 1336
		.startup.single		= NULL,
		.teardown.single	= rcutree_dying_cpu,
1337
	},
1338 1339 1340 1341 1342 1343
	/* Entry state on starting. Interrupts enabled from here on. Transient
	 * state for synchronsization */
	[CPUHP_AP_ONLINE] = {
		.name			= "ap:online",
	},
	/* Handle smpboot threads park/unpark */
1344
	[CPUHP_AP_SMPBOOT_THREADS] = {
1345
		.name			= "smpboot/threads:online",
1346 1347
		.startup.single		= smpboot_unpark_threads,
		.teardown.single	= NULL,
1348
	},
1349 1350 1351 1352 1353
	[CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
		.name			= "irq/affinity:online",
		.startup.single		= irq_affinity_online_cpu,
		.teardown.single	= NULL,
	},
1354
	[CPUHP_AP_PERF_ONLINE] = {
1355 1356 1357
		.name			= "perf:online",
		.startup.single		= perf_event_init_cpu,
		.teardown.single	= perf_event_exit_cpu,
1358
	},
1359
	[CPUHP_AP_WORKQUEUE_ONLINE] = {
1360 1361 1362
		.name			= "workqueue:online",
		.startup.single		= workqueue_online_cpu,
		.teardown.single	= workqueue_offline_cpu,
1363
	},
1364
	[CPUHP_AP_RCUTREE_ONLINE] = {
1365
		.name			= "RCU/tree:online",
1366 1367
		.startup.single		= rcutree_online_cpu,
		.teardown.single	= rcutree_offline_cpu,
1368
	},
1369
#endif
1370 1371 1372 1373
	/*
	 * The dynamically registered state space is here
	 */

1374 1375 1376 1377
#ifdef CONFIG_SMP
	/* Last state is scheduler control setting the cpu active */
	[CPUHP_AP_ACTIVE] = {
		.name			= "sched:active",
1378 1379
		.startup.single		= sched_cpu_activate,
		.teardown.single	= sched_cpu_deactivate,
1380 1381 1382
	},
#endif

1383
	/* CPU is fully up and running. */
1384 1385
	[CPUHP_ONLINE] = {
		.name			= "online",
1386 1387
		.startup.single		= NULL,
		.teardown.single	= NULL,
1388 1389 1390
	},
};

1391 1392 1393 1394 1395 1396 1397 1398
/* Sanity check for callbacks */
static int cpuhp_cb_check(enum cpuhp_state state)
{
	if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
		return -EINVAL;
	return 0;
}

1399 1400 1401 1402 1403 1404 1405
/*
 * Returns a free for dynamic slot assignment of the Online state. The states
 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
 * by having no name assigned.
 */
static int cpuhp_reserve_state(enum cpuhp_state state)
{
1406 1407
	enum cpuhp_state i, end;
	struct cpuhp_step *step;
1408

1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
	switch (state) {
	case CPUHP_AP_ONLINE_DYN:
		step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
		end = CPUHP_AP_ONLINE_DYN_END;
		break;
	case CPUHP_BP_PREPARE_DYN:
		step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
		end = CPUHP_BP_PREPARE_DYN_END;
		break;
	default:
		return -EINVAL;
	}

	for (i = state; i <= end; i++, step++) {
		if (!step->name)
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
			return i;
	}
	WARN(1, "No more dynamic states available for CPU hotplug\n");
	return -ENOSPC;
}

static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
				 int (*startup)(unsigned int cpu),
				 int (*teardown)(unsigned int cpu),
				 bool multi_instance)
1434 1435 1436
{
	/* (Un)Install the callbacks for further cpu hotplug operations */
	struct cpuhp_step *sp;
1437
	int ret = 0;
1438

1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
	/*
	 * If name is NULL, then the state gets removed.
	 *
	 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
	 * the first allocation from these dynamic ranges, so the removal
	 * would trigger a new allocation and clear the wrong (already
	 * empty) state, leaving the callbacks of the to be cleared state
	 * dangling, which causes wreckage on the next hotplug operation.
	 */
	if (name && (state == CPUHP_AP_ONLINE_DYN ||
		     state == CPUHP_BP_PREPARE_DYN)) {
1450 1451
		ret = cpuhp_reserve_state(state);
		if (ret < 0)
1452
			return ret;
1453 1454
		state = ret;
	}
1455
	sp = cpuhp_get_step(state);
1456 1457 1458
	if (name && sp->name)
		return -EBUSY;

1459 1460
	sp->startup.single = startup;
	sp->teardown.single = teardown;
1461
	sp->name = name;
1462 1463
	sp->multi_instance = multi_instance;
	INIT_HLIST_HEAD(&sp->list);
1464
	return ret;
1465 1466 1467 1468
}

static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
{
1469
	return cpuhp_get_step(state)->teardown.single;
1470 1471 1472 1473 1474 1475
}

/*
 * Call the startup/teardown function for a step either on the AP or
 * on the current CPU.
 */
1476 1477
static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
			    struct hlist_node *node)
1478
{
1479
	struct cpuhp_step *sp = cpuhp_get_step(state);
1480 1481
	int ret;

1482 1483 1484 1485
	/*
	 * If there's nothing to do, we done.
	 * Relies on the union for multi_instance.
	 */
1486 1487
	if ((bringup && !sp->startup.single) ||
	    (!bringup && !sp->teardown.single))
1488 1489 1490 1491 1492
		return 0;
	/*
	 * The non AP bound callbacks can fail on bringup. On teardown
	 * e.g. module removal we crash for now.
	 */
1493 1494
#ifdef CONFIG_SMP
	if (cpuhp_is_ap_state(state))
1495
		ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1496
	else
1497
		ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1498
#else
1499
	ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1500
#endif
1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
	BUG_ON(ret && !bringup);
	return ret;
}

/*
 * Called from __cpuhp_setup_state on a recoverable failure.
 *
 * Note: The teardown callbacks for rollback are not allowed to fail!
 */
static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1511
				   struct hlist_node *node)
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
{
	int cpu;

	/* Roll back the already executed steps on the other cpus */
	for_each_present_cpu(cpu) {
		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
		int cpustate = st->state;

		if (cpu >= failedcpu)
			break;

		/* Did we invoke the startup call on that cpu ? */
		if (cpustate >= state)
1525
			cpuhp_issue_call(cpu, state, false, node);
1526 1527 1528
	}
}

1529 1530 1531
int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
					  struct hlist_node *node,
					  bool invoke)
1532 1533 1534 1535 1536
{
	struct cpuhp_step *sp;
	int cpu;
	int ret;

1537 1538
	lockdep_assert_cpus_held();

1539 1540 1541 1542
	sp = cpuhp_get_step(state);
	if (sp->multi_instance == false)
		return -EINVAL;

1543
	mutex_lock(&cpuhp_state_mutex);
1544

1545
	if (!invoke || !sp->startup.multi)
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
		goto add_node;

	/*
	 * Try to call the startup callback for each present cpu
	 * depending on the hotplug state of the cpu.
	 */
	for_each_present_cpu(cpu) {
		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
		int cpustate = st->state;

		if (cpustate < state)
			continue;

		ret = cpuhp_issue_call(cpu, state, true, node);
		if (ret) {
1561
			if (sp->teardown.multi)
1562
				cpuhp_rollback_install(cpu, state, node);
1563
			goto unlock;
1564 1565 1566 1567 1568
		}
	}
add_node:
	ret = 0;
	hlist_add_head(node, &sp->list);
1569
unlock:
1570
	mutex_unlock(&cpuhp_state_mutex);
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
	return ret;
}

int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
			       bool invoke)
{
	int ret;

	cpus_read_lock();
	ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
1581
	cpus_read_unlock();
1582 1583 1584 1585
	return ret;
}
EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);

1586
/**
1587
 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1588 1589 1590 1591 1592 1593 1594
 * @state:		The state to setup
 * @invoke:		If true, the startup function is invoked for cpus where
 *			cpu state >= @state
 * @startup:		startup callback function
 * @teardown:		teardown callback function
 * @multi_instance:	State is set up for multiple instances which get
 *			added afterwards.
1595
 *
1596
 * The caller needs to hold cpus read locked while calling this function.
1597 1598 1599 1600 1601
 * Returns:
 *   On success:
 *      Positive state number if @state is CPUHP_AP_ONLINE_DYN
 *      0 for all other states
 *   On failure: proper (negative) error code
1602
 */
1603 1604 1605 1606 1607
int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
				   const char *name, bool invoke,
				   int (*startup)(unsigned int cpu),
				   int (*teardown)(unsigned int cpu),
				   bool multi_instance)
1608 1609
{
	int cpu, ret = 0;
1610
	bool dynstate;
1611

1612 1613
	lockdep_assert_cpus_held();

1614 1615 1616
	if (cpuhp_cb_check(state) || !name)
		return -EINVAL;

1617
	mutex_lock(&cpuhp_state_mutex);
1618

1619 1620
	ret = cpuhp_store_callbacks(state, name, startup, teardown,
				    multi_instance);
1621

1622 1623 1624 1625 1626 1627
	dynstate = state == CPUHP_AP_ONLINE_DYN;
	if (ret > 0 && dynstate) {
		state = ret;
		ret = 0;
	}

1628
	if (ret || !invoke || !startup)
1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
		goto out;

	/*
	 * Try to call the startup callback for each present cpu
	 * depending on the hotplug state of the cpu.
	 */
	for_each_present_cpu(cpu) {
		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
		int cpustate = st->state;

		if (cpustate < state)
			continue;

1642
		ret = cpuhp_issue_call(cpu, state, true, NULL);
1643
		if (ret) {
1644
			if (teardown)
1645 1646
				cpuhp_rollback_install(cpu, state, NULL);
			cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1647 1648 1649 1650
			goto out;
		}
	}
out:
1651
	mutex_unlock(&cpuhp_state_mutex);
1652 1653 1654 1655
	/*
	 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
	 * dynamically allocated state in case of success.
	 */
1656
	if (!ret && dynstate)
1657 1658 1659
		return state;
	return ret;
}
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675
EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);

int __cpuhp_setup_state(enum cpuhp_state state,
			const char *name, bool invoke,
			int (*startup)(unsigned int cpu),
			int (*teardown)(unsigned int cpu),
			bool multi_instance)
{
	int ret;

	cpus_read_lock();
	ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
					     teardown, multi_instance);
	cpus_read_unlock();
	return ret;
}
1676 1677
EXPORT_SYMBOL(__cpuhp_setup_state);

1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
int __cpuhp_state_remove_instance(enum cpuhp_state state,
				  struct hlist_node *node, bool invoke)
{
	struct cpuhp_step *sp = cpuhp_get_step(state);
	int cpu;

	BUG_ON(cpuhp_cb_check(state));

	if (!sp->multi_instance)
		return -EINVAL;

1689
	cpus_read_lock();
1690 1691
	mutex_lock(&cpuhp_state_mutex);

1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
	if (!invoke || !cpuhp_get_teardown_cb(state))
		goto remove;
	/*
	 * Call the teardown callback for each present cpu depending
	 * on the hotplug state of the cpu. This function is not
	 * allowed to fail currently!
	 */
	for_each_present_cpu(cpu) {
		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
		int cpustate = st->state;

		if (cpustate >= state)
			cpuhp_issue_call(cpu, state, false, node);
	}

remove:
	hlist_del(node);
	mutex_unlock(&cpuhp_state_mutex);
1710
	cpus_read_unlock();
1711 1712 1713 1714

	return 0;
}
EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1715

1716
/**
1717
 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
1718 1719 1720 1721
 * @state:	The state to remove
 * @invoke:	If true, the teardown function is invoked for cpus where
 *		cpu state >= @state
 *
1722
 * The caller needs to hold cpus read locked while calling this function.
1723 1724 1725
 * The teardown callback is currently not allowed to fail. Think
 * about module removal!
 */
1726
void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
1727
{
1728
	struct cpuhp_step *sp = cpuhp_get_step(state);
1729 1730 1731 1732
	int cpu;

	BUG_ON(cpuhp_cb_check(state));

1733
	lockdep_assert_cpus_held();
1734

1735
	mutex_lock(&cpuhp_state_mutex);
1736 1737 1738 1739 1740 1741 1742
	if (sp->multi_instance) {
		WARN(!hlist_empty(&sp->list),
		     "Error: Removing state %d which has instances left.\n",
		     state);
		goto remove;
	}

1743
	if (!invoke || !cpuhp_get_teardown_cb(state))
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755
		goto remove;

	/*
	 * Call the teardown callback for each present cpu depending
	 * on the hotplug state of the cpu. This function is not
	 * allowed to fail currently!
	 */
	for_each_present_cpu(cpu) {
		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
		int cpustate = st->state;

		if (cpustate >= state)
1756
			cpuhp_issue_call(cpu, state, false, NULL);
1757 1758
	}
remove:
1759
	cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1760
	mutex_unlock(&cpuhp_state_mutex);
1761 1762 1763 1764 1765 1766 1767
}
EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);

void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
{
	cpus_read_lock();
	__cpuhp_remove_state_cpuslocked(state, invoke);
1768
	cpus_read_unlock();
1769 1770 1771
}
EXPORT_SYMBOL(__cpuhp_remove_state);

1772 1773 1774 1775 1776 1777 1778 1779 1780 1781
#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
static ssize_t show_cpuhp_state(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);

	return sprintf(buf, "%d\n", st->state);
}
static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);

1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810
static ssize_t write_cpuhp_target(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
	struct cpuhp_step *sp;
	int target, ret;

	ret = kstrtoint(buf, 10, &target);
	if (ret)
		return ret;

#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
	if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
		return -EINVAL;
#else
	if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
		return -EINVAL;
#endif

	ret = lock_device_hotplug_sysfs();
	if (ret)
		return ret;

	mutex_lock(&cpuhp_state_mutex);
	sp = cpuhp_get_step(target);
	ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
	mutex_unlock(&cpuhp_state_mutex);
	if (ret)
1811
		goto out;
1812 1813 1814 1815 1816

	if (st->state < target)
		ret = do_cpu_up(dev->id, target);
	else
		ret = do_cpu_down(dev->id, target);
1817
out:
1818 1819 1820 1821
	unlock_device_hotplug();
	return ret ? ret : count;
}

1822 1823 1824 1825 1826 1827 1828
static ssize_t show_cpuhp_target(struct device *dev,
				 struct device_attribute *attr, char *buf)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);

	return sprintf(buf, "%d\n", st->target);
}
1829
static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1830

1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875

static ssize_t write_cpuhp_fail(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t count)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
	struct cpuhp_step *sp;
	int fail, ret;

	ret = kstrtoint(buf, 10, &fail);
	if (ret)
		return ret;

	/*
	 * Cannot fail STARTING/DYING callbacks.
	 */
	if (cpuhp_is_atomic_state(fail))
		return -EINVAL;

	/*
	 * Cannot fail anything that doesn't have callbacks.
	 */
	mutex_lock(&cpuhp_state_mutex);
	sp = cpuhp_get_step(fail);
	if (!sp->startup.single && !sp->teardown.single)
		ret = -EINVAL;
	mutex_unlock(&cpuhp_state_mutex);
	if (ret)
		return ret;

	st->fail = fail;

	return count;
}

static ssize_t show_cpuhp_fail(struct device *dev,
			       struct device_attribute *attr, char *buf)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);

	return sprintf(buf, "%d\n", st->fail);
}

static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);

1876 1877 1878
static struct attribute *cpuhp_cpu_attrs[] = {
	&dev_attr_state.attr,
	&dev_attr_target.attr,
1879
	&dev_attr_fail.attr,
1880 1881 1882
	NULL
};

1883
static const struct attribute_group cpuhp_cpu_attr_group = {
1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895
	.attrs = cpuhp_cpu_attrs,
	.name = "hotplug",
	NULL
};

static ssize_t show_cpuhp_states(struct device *dev,
				 struct device_attribute *attr, char *buf)
{
	ssize_t cur, res = 0;
	int i;

	mutex_lock(&cpuhp_state_mutex);
1896
	for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
		struct cpuhp_step *sp = cpuhp_get_step(i);

		if (sp->name) {
			cur = sprintf(buf, "%3d: %s\n", i, sp->name);
			buf += cur;
			res += cur;
		}
	}
	mutex_unlock(&cpuhp_state_mutex);
	return res;
}
static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);

static struct attribute *cpuhp_cpu_root_attrs[] = {
	&dev_attr_states.attr,
	NULL
};

1915
static const struct attribute_group cpuhp_cpu_root_attr_group = {
1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
	.attrs = cpuhp_cpu_root_attrs,
	.name = "hotplug",
	NULL
};

static int __init cpuhp_sysfs_init(void)
{
	int cpu, ret;

	ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
				 &cpuhp_cpu_root_attr_group);
	if (ret)
		return ret;

	for_each_possible_cpu(cpu) {
		struct device *dev = get_cpu_device(cpu);

		if (!dev)
			continue;
		ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
		if (ret)
			return ret;
	}
	return 0;
}
device_initcall(cpuhp_sysfs_init);
#endif

1944 1945 1946 1947
/*
 * cpu_bit_bitmap[] is a special, "compressed" data structure that
 * represents all NR_CPUS bits binary values of 1<<nr.
 *
R
Rusty Russell 已提交
1948
 * It is used by cpumask_of() to get a constant address to a CPU
1949 1950
 * mask value that has a single bit set only.
 */
1951

1952
/* cpu_bit_bitmap[0] is empty - so we can back into it */
1953
#define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
1954 1955 1956
#define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
#define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
#define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1957

1958 1959 1960 1961 1962 1963 1964
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {

	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
#if BITS_PER_LONG > 32
	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
1965 1966
#endif
};
1967
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
1968 1969 1970

const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
EXPORT_SYMBOL(cpu_all_bits);
1971 1972

#ifdef CONFIG_INIT_ALL_POSSIBLE
1973
struct cpumask __cpu_possible_mask __read_mostly
1974
	= {CPU_BITS_ALL};
1975
#else
1976
struct cpumask __cpu_possible_mask __read_mostly;
1977
#endif
1978
EXPORT_SYMBOL(__cpu_possible_mask);
1979

1980 1981
struct cpumask __cpu_online_mask __read_mostly;
EXPORT_SYMBOL(__cpu_online_mask);
1982

1983 1984
struct cpumask __cpu_present_mask __read_mostly;
EXPORT_SYMBOL(__cpu_present_mask);
1985

1986 1987
struct cpumask __cpu_active_mask __read_mostly;
EXPORT_SYMBOL(__cpu_active_mask);
1988 1989 1990

void init_cpu_present(const struct cpumask *src)
{
1991
	cpumask_copy(&__cpu_present_mask, src);
1992 1993 1994 1995
}

void init_cpu_possible(const struct cpumask *src)
{
1996
	cpumask_copy(&__cpu_possible_mask, src);
1997 1998 1999 2000
}

void init_cpu_online(const struct cpumask *src)
{
2001
	cpumask_copy(&__cpu_online_mask, src);
2002
}
2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015

/*
 * Activate the first processor.
 */
void __init boot_cpu_init(void)
{
	int cpu = smp_processor_id();

	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
	set_cpu_online(cpu, true);
	set_cpu_active(cpu, true);
	set_cpu_present(cpu, true);
	set_cpu_possible(cpu, true);
2016 2017 2018 2019

#ifdef CONFIG_SMP
	__boot_cpu_id = cpu;
#endif
2020 2021 2022 2023 2024 2025 2026 2027 2028
}

/*
 * Must be called _AFTER_ setting up the per_cpu areas
 */
void __init boot_cpu_state_init(void)
{
	per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
}