cpu.c 42.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/* CPU control.
 * (C) 2001, 2002, 2003, 2004 Rusty Russell
 *
 * This code is licenced under the GPL.
 */
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
13 14
#include <linux/oom.h>
#include <linux/rcupdate.h>
15
#include <linux/export.h>
16
#include <linux/bug.h>
L
Linus Torvalds 已提交
17 18
#include <linux/kthread.h>
#include <linux/stop_machine.h>
19
#include <linux/mutex.h>
20
#include <linux/gfp.h>
21
#include <linux/suspend.h>
22
#include <linux/lockdep.h>
23
#include <linux/tick.h>
24
#include <linux/irq.h>
25
#include <linux/smpboot.h>
26

27
#include <trace/events/power.h>
28 29
#define CREATE_TRACE_POINTS
#include <trace/events/cpuhp.h>
L
Linus Torvalds 已提交
30

31 32
#include "smpboot.h"

33 34 35 36
/**
 * cpuhp_cpu_state - Per cpu hotplug state storage
 * @state:	The current cpu state
 * @target:	The target state
37 38
 * @thread:	Pointer to the hotplug thread
 * @should_run:	Thread should execute
39
 * @rollback:	Perform a rollback
40 41 42 43
 * @cb_stat:	The state for a single callback (install/uninstall)
 * @cb:		Single callback function (install/uninstall)
 * @result:	Result of the operation
 * @done:	Signal completion to the issuer of the task
44 45 46 47
 */
struct cpuhp_cpu_state {
	enum cpuhp_state	state;
	enum cpuhp_state	target;
48 49 50
#ifdef CONFIG_SMP
	struct task_struct	*thread;
	bool			should_run;
51
	bool			rollback;
52 53 54 55 56
	enum cpuhp_state	cb_state;
	int			(*cb)(unsigned int cpu);
	int			result;
	struct completion	done;
#endif
57 58 59 60 61 62 63 64 65 66 67
};

static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);

/**
 * cpuhp_step - Hotplug state machine step
 * @name:	Name of the step
 * @startup:	Startup function of the step
 * @teardown:	Teardown function of the step
 * @skip_onerr:	Do not invoke the functions on error rollback
 *		Will go away once the notifiers	are gone
68
 * @cant_stop:	Bringup/teardown can't be stopped at this step
69 70 71 72 73 74
 */
struct cpuhp_step {
	const char	*name;
	int		(*startup)(unsigned int cpu);
	int		(*teardown)(unsigned int cpu);
	bool		skip_onerr;
75
	bool		cant_stop;
76 77
};

78
static DEFINE_MUTEX(cpuhp_state_mutex);
79
static struct cpuhp_step cpuhp_bp_states[];
80
static struct cpuhp_step cpuhp_ap_states[];
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103

/**
 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
 * @cpu:	The cpu for which the callback should be invoked
 * @step:	The step in the state machine
 * @cb:		The callback function to invoke
 *
 * Called from cpu hotplug and from the state register machinery
 */
static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step,
				 int (*cb)(unsigned int))
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
	int ret = 0;

	if (cb) {
		trace_cpuhp_enter(cpu, st->target, step, cb);
		ret = cb(cpu);
		trace_cpuhp_exit(cpu, st->state, step, ret);
	}
	return ret;
}

104
#ifdef CONFIG_SMP
105
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
106
static DEFINE_MUTEX(cpu_add_remove_lock);
107 108
bool cpuhp_tasks_frozen;
EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
L
Linus Torvalds 已提交
109

110
/*
111 112 113 114 115
 * The following two APIs (cpu_maps_update_begin/done) must be used when
 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
 * hotplug callback (un)registration performed using __register_cpu_notifier()
 * or __unregister_cpu_notifier().
116 117 118 119 120
 */
void cpu_maps_update_begin(void)
{
	mutex_lock(&cpu_add_remove_lock);
}
121
EXPORT_SYMBOL(cpu_notifier_register_begin);
122 123 124 125 126

void cpu_maps_update_done(void)
{
	mutex_unlock(&cpu_add_remove_lock);
}
127
EXPORT_SYMBOL(cpu_notifier_register_done);
128

129
static RAW_NOTIFIER_HEAD(cpu_chain);
L
Linus Torvalds 已提交
130

131 132 133 134 135
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
 * Should always be manipulated under cpu_add_remove_lock
 */
static int cpu_hotplug_disabled;

136 137
#ifdef CONFIG_HOTPLUG_CPU

138 139
static struct {
	struct task_struct *active_writer;
140 141 142 143
	/* wait queue to wake up the active_writer */
	wait_queue_head_t wq;
	/* verifies that no writer will get active while readers are active */
	struct mutex lock;
144 145 146 147
	/*
	 * Also blocks the new readers during
	 * an ongoing cpu hotplug operation.
	 */
148
	atomic_t refcount;
149 150 151 152

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map dep_map;
#endif
153 154
} cpu_hotplug = {
	.active_writer = NULL,
155
	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
156
	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
157 158 159
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	.dep_map = {.name = "cpu_hotplug.lock" },
#endif
160
};
161

162 163
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
164 165
#define cpuhp_lock_acquire_tryread() \
				  lock_map_acquire_tryread(&cpu_hotplug.dep_map)
166 167 168
#define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)

169

170
void get_online_cpus(void)
171
{
172 173
	might_sleep();
	if (cpu_hotplug.active_writer == current)
174
		return;
175
	cpuhp_lock_acquire_read();
176
	mutex_lock(&cpu_hotplug.lock);
177
	atomic_inc(&cpu_hotplug.refcount);
178
	mutex_unlock(&cpu_hotplug.lock);
179
}
180
EXPORT_SYMBOL_GPL(get_online_cpus);
181

182
void put_online_cpus(void)
183
{
184 185
	int refcount;

186
	if (cpu_hotplug.active_writer == current)
187
		return;
188

189 190 191 192 193 194
	refcount = atomic_dec_return(&cpu_hotplug.refcount);
	if (WARN_ON(refcount < 0)) /* try to fix things up */
		atomic_inc(&cpu_hotplug.refcount);

	if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
		wake_up(&cpu_hotplug.wq);
195

196
	cpuhp_lock_release();
197

198
}
199
EXPORT_SYMBOL_GPL(put_online_cpus);
200

201 202 203 204 205 206 207
/*
 * This ensures that the hotplug operation can begin only when the
 * refcount goes to zero.
 *
 * Note that during a cpu-hotplug operation, the new readers, if any,
 * will be blocked by the cpu_hotplug.lock
 *
208 209
 * Since cpu_hotplug_begin() is always called after invoking
 * cpu_maps_update_begin(), we can be sure that only one writer is active.
210 211 212 213 214 215 216 217 218 219
 *
 * Note that theoretically, there is a possibility of a livelock:
 * - Refcount goes to zero, last reader wakes up the sleeping
 *   writer.
 * - Last reader unlocks the cpu_hotplug.lock.
 * - A new reader arrives at this moment, bumps up the refcount.
 * - The writer acquires the cpu_hotplug.lock finds the refcount
 *   non zero and goes to sleep again.
 *
 * However, this is very difficult to achieve in practice since
220
 * get_online_cpus() not an api which is called all that often.
221 222
 *
 */
223
void cpu_hotplug_begin(void)
224
{
225
	DEFINE_WAIT(wait);
226

227
	cpu_hotplug.active_writer = current;
228
	cpuhp_lock_acquire();
229

230 231
	for (;;) {
		mutex_lock(&cpu_hotplug.lock);
232 233 234
		prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
		if (likely(!atomic_read(&cpu_hotplug.refcount)))
				break;
235 236 237
		mutex_unlock(&cpu_hotplug.lock);
		schedule();
	}
238
	finish_wait(&cpu_hotplug.wq, &wait);
239 240
}

241
void cpu_hotplug_done(void)
242 243 244
{
	cpu_hotplug.active_writer = NULL;
	mutex_unlock(&cpu_hotplug.lock);
245
	cpuhp_lock_release();
246
}
247

248 249 250 251 252 253 254 255 256 257
/*
 * Wait for currently running CPU hotplug operations to complete (if any) and
 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
 * hotplug path before performing hotplug operations. So acquiring that lock
 * guarantees mutual exclusion from any currently running hotplug operations.
 */
void cpu_hotplug_disable(void)
{
	cpu_maps_update_begin();
258
	cpu_hotplug_disabled++;
259 260
	cpu_maps_update_done();
}
261
EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
262 263 264 265

void cpu_hotplug_enable(void)
{
	cpu_maps_update_begin();
266
	WARN_ON(--cpu_hotplug_disabled < 0);
267 268
	cpu_maps_update_done();
}
269
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
270
#endif	/* CONFIG_HOTPLUG_CPU */
271

L
Linus Torvalds 已提交
272
/* Need to know about CPUs going up/down? */
273
int register_cpu_notifier(struct notifier_block *nb)
L
Linus Torvalds 已提交
274
{
275
	int ret;
276
	cpu_maps_update_begin();
277
	ret = raw_notifier_chain_register(&cpu_chain, nb);
278
	cpu_maps_update_done();
279
	return ret;
L
Linus Torvalds 已提交
280
}
281

282
int __register_cpu_notifier(struct notifier_block *nb)
283 284 285 286
{
	return raw_notifier_chain_register(&cpu_chain, nb);
}

287
static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
288 289
			int *nr_calls)
{
290 291 292
	unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
	void *hcpu = (void *)(long)cpu;

293 294
	int ret;

295
	ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
296
					nr_calls);
297 298

	return notifier_to_errno(ret);
299 300
}

301
static int cpu_notify(unsigned long val, unsigned int cpu)
302
{
303
	return __cpu_notify(val, cpu, -1, NULL);
304 305
}

306 307 308 309 310
static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
{
	BUG_ON(cpu_notify(val, cpu));
}

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
/* Notifier wrappers for transitioning to state machine */
static int notify_prepare(unsigned int cpu)
{
	int nr_calls = 0;
	int ret;

	ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
	if (ret) {
		nr_calls--;
		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
				__func__, cpu);
		__cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
	}
	return ret;
}

static int notify_online(unsigned int cpu)
{
	cpu_notify(CPU_ONLINE, cpu);
	return 0;
}

333 334 335 336 337 338
static int notify_starting(unsigned int cpu)
{
	cpu_notify(CPU_STARTING, cpu);
	return 0;
}

339 340 341 342 343 344 345 346
static int bringup_wait_for_ap(unsigned int cpu)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);

	wait_for_completion(&st->done);
	return st->result;
}

347 348 349 350 351 352 353 354 355 356 357
static int bringup_cpu(unsigned int cpu)
{
	struct task_struct *idle = idle_thread_get(cpu);
	int ret;

	/* Arch-specific enabling code. */
	ret = __cpu_up(cpu, idle);
	if (ret) {
		cpu_notify(CPU_UP_CANCELED, cpu);
		return ret;
	}
358
	ret = bringup_wait_for_ap(cpu);
359
	BUG_ON(!cpu_online(cpu));
360
	return ret;
361 362
}

363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
/*
 * Hotplug state machine related functions
 */
static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st,
			  struct cpuhp_step *steps)
{
	for (st->state++; st->state < st->target; st->state++) {
		struct cpuhp_step *step = steps + st->state;

		if (!step->skip_onerr)
			cpuhp_invoke_callback(cpu, st->state, step->startup);
	}
}

static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
				struct cpuhp_step *steps, enum cpuhp_state target)
{
	enum cpuhp_state prev_state = st->state;
	int ret = 0;

	for (; st->state > target; st->state--) {
		struct cpuhp_step *step = steps + st->state;

		ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
		if (ret) {
			st->target = prev_state;
			undo_cpu_down(cpu, st, steps);
			break;
		}
	}
	return ret;
}

static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st,
			struct cpuhp_step *steps)
{
	for (st->state--; st->state > st->target; st->state--) {
		struct cpuhp_step *step = steps + st->state;

		if (!step->skip_onerr)
			cpuhp_invoke_callback(cpu, st->state, step->teardown);
	}
}

static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
			      struct cpuhp_step *steps, enum cpuhp_state target)
{
	enum cpuhp_state prev_state = st->state;
	int ret = 0;

	while (st->state < target) {
		struct cpuhp_step *step;

		st->state++;
		step = steps + st->state;
		ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
		if (ret) {
			st->target = prev_state;
			undo_cpu_up(cpu, st, steps);
			break;
		}
	}
	return ret;
}

428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
/*
 * The cpu hotplug threads manage the bringup and teardown of the cpus
 */
static void cpuhp_create(unsigned int cpu)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);

	init_completion(&st->done);
}

static int cpuhp_should_run(unsigned int cpu)
{
	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);

	return st->should_run;
}

/* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
{
448
	enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486

	return cpuhp_down_callbacks(cpu, st, cpuhp_ap_states, target);
}

/* Execute the online startup callbacks. Used to be CPU_ONLINE */
static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
{
	return cpuhp_up_callbacks(cpu, st, cpuhp_ap_states, st->target);
}

/*
 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
 * callbacks when a state gets [un]installed at runtime.
 */
static void cpuhp_thread_fun(unsigned int cpu)
{
	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
	int ret = 0;

	/*
	 * Paired with the mb() in cpuhp_kick_ap_work and
	 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
	 */
	smp_mb();
	if (!st->should_run)
		return;

	st->should_run = false;

	/* Single callback invocation for [un]install ? */
	if (st->cb) {
		if (st->cb_state < CPUHP_AP_ONLINE) {
			local_irq_disable();
			ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
			local_irq_enable();
		} else {
			ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
		}
487 488 489 490 491 492 493 494 495 496
	} else if (st->rollback) {
		BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);

		undo_cpu_down(cpu, st, cpuhp_ap_states);
		/*
		 * This is a momentary workaround to keep the notifier users
		 * happy. Will go away once we got rid of the notifiers.
		 */
		cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
		st->rollback = false;
497
	} else {
498
		/* Cannot happen .... */
499
		BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
500

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
		/* Regular hotplug work */
		if (st->state < st->target)
			ret = cpuhp_ap_online(cpu, st);
		else if (st->state > st->target)
			ret = cpuhp_ap_offline(cpu, st);
	}
	st->result = ret;
	complete(&st->done);
}

/* Invoke a single callback on a remote cpu */
static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state,
				    int (*cb)(unsigned int))
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);

	if (!cpu_online(cpu))
		return 0;

520 521 522 523 524 525 526
	/*
	 * If we are up and running, use the hotplug thread. For early calls
	 * we invoke the thread function directly.
	 */
	if (!st->thread)
		return cpuhp_invoke_callback(cpu, state, cb);

527 528 529 530 531 532 533 534 535 536 537 538 539 540
	st->cb_state = state;
	st->cb = cb;
	/*
	 * Make sure the above stores are visible before should_run becomes
	 * true. Paired with the mb() above in cpuhp_thread_fun()
	 */
	smp_mb();
	st->should_run = true;
	wake_up_process(st->thread);
	wait_for_completion(&st->done);
	return st->result;
}

/* Regular hotplug invocation of the AP hotplug thread */
541
static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
542 543 544 545 546 547 548 549 550 551
{
	st->result = 0;
	st->cb = NULL;
	/*
	 * Make sure the above stores are visible before should_run becomes
	 * true. Paired with the mb() above in cpuhp_thread_fun()
	 */
	smp_mb();
	st->should_run = true;
	wake_up_process(st->thread);
552 553 554 555 556 557 558 559 560
}

static int cpuhp_kick_ap_work(unsigned int cpu)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
	enum cpuhp_state state = st->state;

	trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
	__cpuhp_kick_ap_work(st);
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
	wait_for_completion(&st->done);
	trace_cpuhp_exit(cpu, st->state, state, st->result);
	return st->result;
}

static struct smp_hotplug_thread cpuhp_threads = {
	.store			= &cpuhp_state.thread,
	.create			= &cpuhp_create,
	.thread_should_run	= cpuhp_should_run,
	.thread_fn		= cpuhp_thread_fun,
	.thread_comm		= "cpuhp/%u",
	.selfparking		= true,
};

void __init cpuhp_threads_init(void)
{
	BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
	kthread_unpark(this_cpu_read(cpuhp_state.thread));
}

581
#ifdef CONFIG_HOTPLUG_CPU
L
Linus Torvalds 已提交
582
EXPORT_SYMBOL(register_cpu_notifier);
583
EXPORT_SYMBOL(__register_cpu_notifier);
584
void unregister_cpu_notifier(struct notifier_block *nb)
L
Linus Torvalds 已提交
585
{
586
	cpu_maps_update_begin();
587
	raw_notifier_chain_unregister(&cpu_chain, nb);
588
	cpu_maps_update_done();
L
Linus Torvalds 已提交
589 590 591
}
EXPORT_SYMBOL(unregister_cpu_notifier);

592
void __unregister_cpu_notifier(struct notifier_block *nb)
593 594 595 596 597
{
	raw_notifier_chain_unregister(&cpu_chain, nb);
}
EXPORT_SYMBOL(__unregister_cpu_notifier);

598 599 600 601 602 603 604 605 606 607 608 609
/**
 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
 * @cpu: a CPU id
 *
 * This function walks all processes, finds a valid mm struct for each one and
 * then clears a corresponding bit in mm's cpumask.  While this all sounds
 * trivial, there are various non-obvious corner cases, which this function
 * tries to solve in a safe manner.
 *
 * Also note that the function uses a somewhat relaxed locking scheme, so it may
 * be called only for an already offlined CPU.
 */
610 611 612 613 614 615 616 617 618 619 620
void clear_tasks_mm_cpumask(int cpu)
{
	struct task_struct *p;

	/*
	 * This function is called after the cpu is taken down and marked
	 * offline, so its not like new tasks will ever get this cpu set in
	 * their mm mask. -- Peter Zijlstra
	 * Thus, we may use rcu_read_lock() here, instead of grabbing
	 * full-fledged tasklist_lock.
	 */
621
	WARN_ON(cpu_online(cpu));
622 623 624 625
	rcu_read_lock();
	for_each_process(p) {
		struct task_struct *t;

626 627 628 629
		/*
		 * Main thread might exit, but other threads may still have
		 * a valid mm. Find one.
		 */
630 631 632 633 634 635 636 637 638
		t = find_lock_task_mm(p);
		if (!t)
			continue;
		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
		task_unlock(t);
	}
	rcu_read_unlock();
}

K
Kirill Tkhai 已提交
639
static inline void check_for_tasks(int dead_cpu)
L
Linus Torvalds 已提交
640
{
K
Kirill Tkhai 已提交
641
	struct task_struct *g, *p;
L
Linus Torvalds 已提交
642

643 644
	read_lock(&tasklist_lock);
	for_each_process_thread(g, p) {
K
Kirill Tkhai 已提交
645 646 647 648 649 650 651 652 653 654 655 656 657 658
		if (!p->on_rq)
			continue;
		/*
		 * We do the check with unlocked task_rq(p)->lock.
		 * Order the reading to do not warn about a task,
		 * which was running on this cpu in the past, and
		 * it's just been woken on another cpu.
		 */
		rmb();
		if (task_cpu(p) != dead_cpu)
			continue;

		pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
			p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
659 660
	}
	read_unlock(&tasklist_lock);
L
Linus Torvalds 已提交
661 662
}

663 664 665 666 667 668 669 670 671 672 673 674 675 676
static int notify_down_prepare(unsigned int cpu)
{
	int err, nr_calls = 0;

	err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
	if (err) {
		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
		pr_warn("%s: attempt to take down CPU %u failed\n",
				__func__, cpu);
	}
	return err;
}

677 678 679 680 681 682
static int notify_dying(unsigned int cpu)
{
	cpu_notify(CPU_DYING, cpu);
	return 0;
}

L
Linus Torvalds 已提交
683
/* Take this CPU down. */
684
static int take_cpu_down(void *_param)
L
Linus Torvalds 已提交
685
{
686 687
	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
	enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
688
	int err, cpu = smp_processor_id();
L
Linus Torvalds 已提交
689 690 691 692

	/* Ensure this CPU doesn't handle any more interrupts. */
	err = __cpu_disable();
	if (err < 0)
Z
Zwane Mwaikambo 已提交
693
		return err;
L
Linus Torvalds 已提交
694

695 696 697 698 699 700
	/* Invoke the former CPU_DYING callbacks */
	for (; st->state > target; st->state--) {
		struct cpuhp_step *step = cpuhp_ap_states + st->state;

		cpuhp_invoke_callback(cpu, st->state, step->teardown);
	}
701 702
	/* Give up timekeeping duties */
	tick_handover_do_timer();
703
	/* Park the stopper thread */
704
	stop_machine_park(cpu);
Z
Zwane Mwaikambo 已提交
705
	return 0;
L
Linus Torvalds 已提交
706 707
}

708
static int takedown_cpu(unsigned int cpu)
L
Linus Torvalds 已提交
709
{
710
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
711
	int err;
L
Linus Torvalds 已提交
712

713
	/* Park the smpboot threads */
714
	kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
715
	smpboot_park_threads(cpu);
716

717
	/*
718 719
	 * Prevent irq alloc/free while the dying cpu reorganizes the
	 * interrupt affinities.
720
	 */
721
	irq_lock_sparse();
722

723 724 725
	/*
	 * So now all preempt/rcu users must observe !cpu_active().
	 */
726
	err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
727
	if (err) {
728
		/* CPU refused to die */
729
		irq_unlock_sparse();
730 731
		/* Unpark the hotplug thread so we can rollback there */
		kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
732
		return err;
733
	}
734
	BUG_ON(cpu_online(cpu));
L
Linus Torvalds 已提交
735

736 737 738 739
	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
P
Peter Zijlstra 已提交
740 741
	 *
	 * Wait for the stop thread to go away.
742
	 */
743 744
	wait_for_completion(&st->done);
	BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
L
Linus Torvalds 已提交
745

746 747 748
	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
	irq_unlock_sparse();

749
	hotplug_cpu__broadcast_tick_pull(cpu);
L
Linus Torvalds 已提交
750 751 752
	/* This actually kills the CPU. */
	__cpu_die(cpu);

753
	tick_cleanup_dead_cpu(cpu);
754 755
	return 0;
}
L
Linus Torvalds 已提交
756

757 758 759
static int notify_dead(unsigned int cpu)
{
	cpu_notify_nofail(CPU_DEAD, cpu);
L
Linus Torvalds 已提交
760
	check_for_tasks(cpu);
761 762 763
	return 0;
}

764 765 766 767 768 769 770
static void cpuhp_complete_idle_dead(void *arg)
{
	struct cpuhp_cpu_state *st = arg;

	complete(&st->done);
}

771 772 773 774 775
void cpuhp_report_idle_dead(void)
{
	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);

	BUG_ON(st->state != CPUHP_AP_OFFLINE);
776
	rcu_report_dead(smp_processor_id());
777 778 779 780 781 782 783
	st->state = CPUHP_AP_IDLE_DEAD;
	/*
	 * We cannot call complete after rcu_report_dead() so we delegate it
	 * to an online cpu.
	 */
	smp_call_function_single(cpumask_first(cpu_online_mask),
				 cpuhp_complete_idle_dead, st, 0);
784 785
}

786 787 788 789
#else
#define notify_down_prepare	NULL
#define takedown_cpu		NULL
#define notify_dead		NULL
790
#define notify_dying		NULL
791 792 793 794
#endif

#ifdef CONFIG_HOTPLUG_CPU

795
/* Requires cpu_add_remove_lock to be held */
796 797
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
			   enum cpuhp_state target)
798
{
799 800 801
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
	int prev_state, ret = 0;
	bool hasdied = false;
802 803 804 805

	if (num_online_cpus() == 1)
		return -EBUSY;

806
	if (!cpu_present(cpu))
807 808 809 810 811 812
		return -EINVAL;

	cpu_hotplug_begin();

	cpuhp_tasks_frozen = tasks_frozen;

813
	prev_state = st->state;
814
	st->target = target;
815 816 817 818
	/*
	 * If the current CPU state is in the range of the AP hotplug thread,
	 * then we need to kick the thread.
	 */
819
	if (st->state > CPUHP_TEARDOWN_CPU) {
820 821 822 823 824 825 826 827 828 829 830 831
		ret = cpuhp_kick_ap_work(cpu);
		/*
		 * The AP side has done the error rollback already. Just
		 * return the error code..
		 */
		if (ret)
			goto out;

		/*
		 * We might have stopped still in the range of the AP hotplug
		 * thread. Nothing to do anymore.
		 */
832
		if (st->state > CPUHP_TEARDOWN_CPU)
833 834 835
			goto out;
	}
	/*
836
	 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
837 838
	 * to do the further cleanups.
	 */
839
	ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
840 841 842 843 844
	if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
		st->target = prev_state;
		st->rollback = true;
		cpuhp_kick_ap_work(cpu);
	}
845

846
	hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
847
out:
848
	cpu_hotplug_done();
849 850
	/* This post dead nonsense must die */
	if (!ret && hasdied)
851
		cpu_notify_nofail(CPU_POST_DEAD, cpu);
852
	return ret;
853 854
}

855
static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
856
{
857
	int err;
858

859
	cpu_maps_update_begin();
860 861

	if (cpu_hotplug_disabled) {
862
		err = -EBUSY;
863 864 865
		goto out;
	}

866
	err = _cpu_down(cpu, 0, target);
867

868
out:
869
	cpu_maps_update_done();
L
Linus Torvalds 已提交
870 871
	return err;
}
872 873 874 875
int cpu_down(unsigned int cpu)
{
	return do_cpu_down(cpu, CPUHP_OFFLINE);
}
876
EXPORT_SYMBOL(cpu_down);
L
Linus Torvalds 已提交
877 878
#endif /*CONFIG_HOTPLUG_CPU*/

879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
/**
 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
 * @cpu: cpu that just started
 *
 * This function calls the cpu_chain notifiers with CPU_STARTING.
 * It must be called by the arch code on the new cpu, before the new cpu
 * enables interrupts and before the "boot" cpu returns from __cpu_up().
 */
void notify_cpu_starting(unsigned int cpu)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
	enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);

	while (st->state < target) {
		struct cpuhp_step *step;

		st->state++;
		step = cpuhp_ap_states + st->state;
		cpuhp_invoke_callback(cpu, st->state, step->startup);
	}
}

901 902
/*
 * Called from the idle task. We need to set active here, so we can kick off
903 904 905
 * the stopper thread and unpark the smpboot threads. If the target state is
 * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
 * cpu further.
906
 */
907
void cpuhp_online_idle(enum cpuhp_state state)
908
{
909 910 911 912 913 914 915 916
	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
	unsigned int cpu = smp_processor_id();

	/* Happens for the boot cpu */
	if (state != CPUHP_AP_ONLINE_IDLE)
		return;

	st->state = CPUHP_AP_ONLINE_IDLE;
917

918
	/* Unpark the stopper thread and the hotplug thread of this cpu */
919
	stop_machine_unpark(cpu);
920
	kthread_unpark(st->thread);
921 922 923 924 925 926

	/* Should we go further up ? */
	if (st->target > CPUHP_AP_ONLINE_IDLE)
		__cpuhp_kick_ap_work(st);
	else
		complete(&st->done);
927 928
}

929
/* Requires cpu_add_remove_lock to be held */
930
static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
L
Linus Torvalds 已提交
931
{
932
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
933
	struct task_struct *idle;
934
	int ret = 0;
L
Linus Torvalds 已提交
935

936
	cpu_hotplug_begin();
937

938
	if (!cpu_present(cpu)) {
939 940 941 942
		ret = -EINVAL;
		goto out;
	}

943 944 945 946 947
	/*
	 * The caller of do_cpu_up might have raced with another
	 * caller. Ignore it for now.
	 */
	if (st->state >= target)
948
		goto out;
949 950 951 952 953 954 955 956

	if (st->state == CPUHP_OFFLINE) {
		/* Let it fail before we try to bring the cpu up */
		idle = idle_thread_get(cpu);
		if (IS_ERR(idle)) {
			ret = PTR_ERR(idle);
			goto out;
		}
957
	}
958

959 960
	cpuhp_tasks_frozen = tasks_frozen;

961
	st->target = target;
962 963 964 965
	/*
	 * If the current CPU state is in the range of the AP hotplug thread,
	 * then we need to kick the thread once more.
	 */
966
	if (st->state > CPUHP_BRINGUP_CPU) {
967 968 969 970 971 972 973 974 975 976 977
		ret = cpuhp_kick_ap_work(cpu);
		/*
		 * The AP side has done the error rollback already. Just
		 * return the error code..
		 */
		if (ret)
			goto out;
	}

	/*
	 * Try to reach the target state. We max out on the BP at
978
	 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
979 980
	 * responsible for bringing it up to the target state.
	 */
981
	target = min((int)target, CPUHP_BRINGUP_CPU);
982
	ret = cpuhp_up_callbacks(cpu, st, cpuhp_bp_states, target);
983
out:
984
	cpu_hotplug_done();
985 986 987
	return ret;
}

988
static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
989 990
{
	int err = 0;
991

R
Rusty Russell 已提交
992
	if (!cpu_possible(cpu)) {
993 994
		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
		       cpu);
995
#if defined(CONFIG_IA64)
996
		pr_err("please check additional_cpus= boot parameter\n");
997 998 999
#endif
		return -EINVAL;
	}
1000

1001 1002 1003
	err = try_online_node(cpu_to_node(cpu));
	if (err)
		return err;
1004

1005
	cpu_maps_update_begin();
1006 1007

	if (cpu_hotplug_disabled) {
1008
		err = -EBUSY;
1009 1010 1011
		goto out;
	}

1012
	err = _cpu_up(cpu, 0, target);
1013
out:
1014
	cpu_maps_update_done();
1015 1016
	return err;
}
1017 1018 1019 1020 1021

int cpu_up(unsigned int cpu)
{
	return do_cpu_up(cpu, CPUHP_ONLINE);
}
P
Paul E. McKenney 已提交
1022
EXPORT_SYMBOL_GPL(cpu_up);
1023

1024
#ifdef CONFIG_PM_SLEEP_SMP
R
Rusty Russell 已提交
1025
static cpumask_var_t frozen_cpus;
1026 1027 1028

int disable_nonboot_cpus(void)
{
1029
	int cpu, first_cpu, error = 0;
1030

1031
	cpu_maps_update_begin();
R
Rusty Russell 已提交
1032
	first_cpu = cpumask_first(cpu_online_mask);
1033 1034
	/*
	 * We take down all of the non-boot CPUs in one shot to avoid races
1035 1036
	 * with the userspace trying to use the CPU hotplug at the same time
	 */
R
Rusty Russell 已提交
1037
	cpumask_clear(frozen_cpus);
1038

1039
	pr_info("Disabling non-boot CPUs ...\n");
1040 1041 1042
	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
1043
		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1044
		error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1045
		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1046
		if (!error)
R
Rusty Russell 已提交
1047
			cpumask_set_cpu(cpu, frozen_cpus);
1048
		else {
1049
			pr_err("Error taking CPU%d down: %d\n", cpu, error);
1050 1051 1052
			break;
		}
	}
1053

1054
	if (!error)
1055
		BUG_ON(num_online_cpus() > 1);
1056
	else
1057
		pr_err("Non-boot CPUs are not disabled\n");
1058 1059 1060 1061 1062 1063 1064 1065

	/*
	 * Make sure the CPUs won't be enabled by someone else. We need to do
	 * this even in case of failure as all disable_nonboot_cpus() users are
	 * supposed to do enable_nonboot_cpus() on the failure path.
	 */
	cpu_hotplug_disabled++;

1066
	cpu_maps_update_done();
1067 1068 1069
	return error;
}

1070 1071 1072 1073 1074 1075 1076 1077
void __weak arch_enable_nonboot_cpus_begin(void)
{
}

void __weak arch_enable_nonboot_cpus_end(void)
{
}

1078
void enable_nonboot_cpus(void)
1079 1080 1081 1082
{
	int cpu, error;

	/* Allow everyone to use the CPU hotplug again */
1083
	cpu_maps_update_begin();
1084
	WARN_ON(--cpu_hotplug_disabled < 0);
R
Rusty Russell 已提交
1085
	if (cpumask_empty(frozen_cpus))
1086
		goto out;
1087

1088
	pr_info("Enabling non-boot CPUs ...\n");
1089 1090 1091

	arch_enable_nonboot_cpus_begin();

R
Rusty Russell 已提交
1092
	for_each_cpu(cpu, frozen_cpus) {
1093
		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1094
		error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1095
		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1096
		if (!error) {
1097
			pr_info("CPU%d is up\n", cpu);
1098 1099
			continue;
		}
1100
		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1101
	}
1102 1103 1104

	arch_enable_nonboot_cpus_end();

R
Rusty Russell 已提交
1105
	cpumask_clear(frozen_cpus);
1106
out:
1107
	cpu_maps_update_done();
L
Linus Torvalds 已提交
1108
}
R
Rusty Russell 已提交
1109

1110
static int __init alloc_frozen_cpus(void)
R
Rusty Russell 已提交
1111 1112 1113 1114 1115 1116
{
	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
		return -ENOMEM;
	return 0;
}
core_initcall(alloc_frozen_cpus);
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136

/*
 * When callbacks for CPU hotplug notifications are being executed, we must
 * ensure that the state of the system with respect to the tasks being frozen
 * or not, as reported by the notification, remains unchanged *throughout the
 * duration* of the execution of the callbacks.
 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
 *
 * This synchronization is implemented by mutually excluding regular CPU
 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
 * Hibernate notifications.
 */
static int
cpu_hotplug_pm_callback(struct notifier_block *nb,
			unsigned long action, void *ptr)
{
	switch (action) {

	case PM_SUSPEND_PREPARE:
	case PM_HIBERNATION_PREPARE:
1137
		cpu_hotplug_disable();
1138 1139 1140 1141
		break;

	case PM_POST_SUSPEND:
	case PM_POST_HIBERNATION:
1142
		cpu_hotplug_enable();
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
		break;

	default:
		return NOTIFY_DONE;
	}

	return NOTIFY_OK;
}


1153
static int __init cpu_hotplug_pm_sync_init(void)
1154
{
1155 1156 1157 1158 1159
	/*
	 * cpu_hotplug_pm_callback has higher priority than x86
	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
	 * to disable cpu hotplug to avoid cpu hotplug race.
	 */
1160 1161 1162 1163 1164
	pm_notifier(cpu_hotplug_pm_callback, 0);
	return 0;
}
core_initcall(cpu_hotplug_pm_sync_init);

1165
#endif /* CONFIG_PM_SLEEP_SMP */
1166 1167

#endif /* CONFIG_SMP */
1168

1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
/* Boot processor state steps */
static struct cpuhp_step cpuhp_bp_states[] = {
	[CPUHP_OFFLINE] = {
		.name			= "offline",
		.startup		= NULL,
		.teardown		= NULL,
	},
#ifdef CONFIG_SMP
	[CPUHP_CREATE_THREADS]= {
		.name			= "threads:create",
		.startup		= smpboot_create_threads,
		.teardown		= NULL,
1181
		.cant_stop		= true,
1182
	},
1183 1184 1185 1186 1187
	[CPUHP_PERF_PREPARE] = {
		.name = "perf prepare",
		.startup = perf_event_init_cpu,
		.teardown = perf_event_exit_cpu,
	},
1188 1189 1190 1191 1192
	[CPUHP_WORKQUEUE_PREP] = {
		.name = "workqueue prepare",
		.startup = workqueue_prepare_cpu,
		.teardown = NULL,
	},
1193 1194 1195 1196
	/*
	 * Preparatory and dead notifiers. Will be replaced once the notifiers
	 * are converted to states.
	 */
1197 1198 1199 1200 1201
	[CPUHP_NOTIFY_PREPARE] = {
		.name			= "notify:prepare",
		.startup		= notify_prepare,
		.teardown		= notify_dead,
		.skip_onerr		= true,
1202
		.cant_stop		= true,
1203
	},
1204
	/* Kicks the plugged cpu into life */
1205 1206 1207
	[CPUHP_BRINGUP_CPU] = {
		.name			= "cpu:bringup",
		.startup		= bringup_cpu,
1208
		.teardown		= NULL,
1209
		.cant_stop		= true,
1210
	},
1211 1212 1213 1214
	/*
	 * Handled on controll processor until the plugged processor manages
	 * this itself.
	 */
1215 1216 1217
	[CPUHP_TEARDOWN_CPU] = {
		.name			= "cpu:teardown",
		.startup		= NULL,
1218
		.teardown		= takedown_cpu,
1219
		.cant_stop		= true,
1220
	},
1221 1222
#else
	[CPUHP_BRINGUP_CPU] = { },
1223 1224 1225
#endif
};

1226 1227 1228
/* Application processor state steps */
static struct cpuhp_step cpuhp_ap_states[] = {
#ifdef CONFIG_SMP
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
	/* Final state before CPU kills itself */
	[CPUHP_AP_IDLE_DEAD] = {
		.name			= "idle:dead",
	},
	/*
	 * Last state before CPU enters the idle loop to die. Transient state
	 * for synchronization.
	 */
	[CPUHP_AP_OFFLINE] = {
		.name			= "ap:offline",
		.cant_stop		= true,
	},
1241 1242 1243 1244
	/* First state is scheduler control. Interrupts are disabled */
	[CPUHP_AP_SCHED_STARTING] = {
		.name			= "sched:starting",
		.startup		= sched_cpu_starting,
1245
		.teardown		= sched_cpu_dying,
1246
	},
1247 1248 1249 1250 1251
	/*
	 * Low level startup/teardown notifiers. Run with interrupts
	 * disabled. Will be removed once the notifiers are converted to
	 * states.
	 */
1252 1253 1254 1255 1256
	[CPUHP_AP_NOTIFY_STARTING] = {
		.name			= "notify:starting",
		.startup		= notify_starting,
		.teardown		= notify_dying,
		.skip_onerr		= true,
1257
		.cant_stop		= true,
1258
	},
1259 1260 1261 1262 1263 1264
	/* Entry state on starting. Interrupts enabled from here on. Transient
	 * state for synchronsization */
	[CPUHP_AP_ONLINE] = {
		.name			= "ap:online",
	},
	/* Handle smpboot threads park/unpark */
1265 1266 1267
	[CPUHP_AP_SMPBOOT_THREADS] = {
		.name			= "smpboot:threads",
		.startup		= smpboot_unpark_threads,
1268
		.teardown		= NULL,
1269
	},
1270 1271 1272 1273 1274
	[CPUHP_AP_PERF_ONLINE] = {
		.name = "perf online",
		.startup = perf_event_init_cpu,
		.teardown = perf_event_exit_cpu,
	},
1275 1276 1277 1278 1279
	[CPUHP_AP_WORKQUEUE_ONLINE] = {
		.name = "workqueue online",
		.startup = workqueue_online_cpu,
		.teardown = workqueue_offline_cpu,
	},
1280

1281 1282 1283 1284
	/*
	 * Online/down_prepare notifiers. Will be removed once the notifiers
	 * are converted to states.
	 */
1285 1286 1287 1288
	[CPUHP_AP_NOTIFY_ONLINE] = {
		.name			= "notify:online",
		.startup		= notify_online,
		.teardown		= notify_down_prepare,
1289
		.skip_onerr		= true,
1290
	},
1291
#endif
1292 1293 1294 1295
	/*
	 * The dynamically registered state space is here
	 */

1296 1297 1298 1299 1300 1301 1302 1303 1304
#ifdef CONFIG_SMP
	/* Last state is scheduler control setting the cpu active */
	[CPUHP_AP_ACTIVE] = {
		.name			= "sched:active",
		.startup		= sched_cpu_activate,
		.teardown		= sched_cpu_deactivate,
	},
#endif

1305
	/* CPU is fully up and running. */
1306 1307 1308 1309 1310 1311 1312
	[CPUHP_ONLINE] = {
		.name			= "online",
		.startup		= NULL,
		.teardown		= NULL,
	},
};

1313 1314 1315 1316 1317 1318 1319 1320
/* Sanity check for callbacks */
static int cpuhp_cb_check(enum cpuhp_state state)
{
	if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
		return -EINVAL;
	return 0;
}

1321 1322
static bool cpuhp_is_ap_state(enum cpuhp_state state)
{
1323 1324 1325 1326 1327
	/*
	 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
	 * purposes as that state is handled explicitely in cpu_down.
	 */
	return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
}

static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
{
	struct cpuhp_step *sp;

	sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
	return sp + state;
}

1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
static void cpuhp_store_callbacks(enum cpuhp_state state,
				  const char *name,
				  int (*startup)(unsigned int cpu),
				  int (*teardown)(unsigned int cpu))
{
	/* (Un)Install the callbacks for further cpu hotplug operations */
	struct cpuhp_step *sp;

	mutex_lock(&cpuhp_state_mutex);
	sp = cpuhp_get_step(state);
	sp->startup = startup;
	sp->teardown = teardown;
	sp->name = name;
	mutex_unlock(&cpuhp_state_mutex);
}

static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
{
	return cpuhp_get_step(state)->teardown;
}

/*
 * Call the startup/teardown function for a step either on the AP or
 * on the current CPU.
 */
static int cpuhp_issue_call(int cpu, enum cpuhp_state state,
			    int (*cb)(unsigned int), bool bringup)
{
	int ret;

	if (!cb)
		return 0;
	/*
	 * The non AP bound callbacks can fail on bringup. On teardown
	 * e.g. module removal we crash for now.
	 */
1374 1375 1376 1377 1378 1379 1380 1381
#ifdef CONFIG_SMP
	if (cpuhp_is_ap_state(state))
		ret = cpuhp_invoke_ap_callback(cpu, state, cb);
	else
		ret = cpuhp_invoke_callback(cpu, state, cb);
#else
	ret = cpuhp_invoke_callback(cpu, state, cb);
#endif
1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422
	BUG_ON(ret && !bringup);
	return ret;
}

/*
 * Called from __cpuhp_setup_state on a recoverable failure.
 *
 * Note: The teardown callbacks for rollback are not allowed to fail!
 */
static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
				   int (*teardown)(unsigned int cpu))
{
	int cpu;

	if (!teardown)
		return;

	/* Roll back the already executed steps on the other cpus */
	for_each_present_cpu(cpu) {
		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
		int cpustate = st->state;

		if (cpu >= failedcpu)
			break;

		/* Did we invoke the startup call on that cpu ? */
		if (cpustate >= state)
			cpuhp_issue_call(cpu, state, teardown, false);
	}
}

/*
 * Returns a free for dynamic slot assignment of the Online state. The states
 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
 * by having no name assigned.
 */
static int cpuhp_reserve_state(enum cpuhp_state state)
{
	enum cpuhp_state i;

	mutex_lock(&cpuhp_state_mutex);
1423 1424
	for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
		if (cpuhp_ap_states[i].name)
1425 1426
			continue;

1427
		cpuhp_ap_states[i].name = "Reserved";
1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
		mutex_unlock(&cpuhp_state_mutex);
		return i;
	}
	mutex_unlock(&cpuhp_state_mutex);
	WARN(1, "No more dynamic states available for CPU hotplug\n");
	return -ENOSPC;
}

/**
 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
 * @state:	The state to setup
 * @invoke:	If true, the startup function is invoked for cpus where
 *		cpu state >= @state
 * @startup:	startup callback function
 * @teardown:	teardown callback function
 *
 * Returns 0 if successful, otherwise a proper error code
 */
int __cpuhp_setup_state(enum cpuhp_state state,
			const char *name, bool invoke,
			int (*startup)(unsigned int cpu),
			int (*teardown)(unsigned int cpu))
{
	int cpu, ret = 0;
	int dyn_state = 0;

	if (cpuhp_cb_check(state) || !name)
		return -EINVAL;

	get_online_cpus();

	/* currently assignments for the ONLINE state are possible */
1460
	if (state == CPUHP_AP_ONLINE_DYN) {
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
		dyn_state = 1;
		ret = cpuhp_reserve_state(state);
		if (ret < 0)
			goto out;
		state = ret;
	}

	cpuhp_store_callbacks(state, name, startup, teardown);

	if (!invoke || !startup)
		goto out;

	/*
	 * Try to call the startup callback for each present cpu
	 * depending on the hotplug state of the cpu.
	 */
	for_each_present_cpu(cpu) {
		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
		int cpustate = st->state;

		if (cpustate < state)
			continue;

		ret = cpuhp_issue_call(cpu, state, startup, true);
		if (ret) {
			cpuhp_rollback_install(cpu, state, teardown);
			cpuhp_store_callbacks(state, NULL, NULL, NULL);
			goto out;
		}
	}
out:
	put_online_cpus();
	if (!ret && dyn_state)
		return state;
	return ret;
}
EXPORT_SYMBOL(__cpuhp_setup_state);

/**
 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
 * @state:	The state to remove
 * @invoke:	If true, the teardown function is invoked for cpus where
 *		cpu state >= @state
 *
 * The teardown callback is currently not allowed to fail. Think
 * about module removal!
 */
void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
{
	int (*teardown)(unsigned int cpu) = cpuhp_get_teardown_cb(state);
	int cpu;

	BUG_ON(cpuhp_cb_check(state));

	get_online_cpus();

	if (!invoke || !teardown)
		goto remove;

	/*
	 * Call the teardown callback for each present cpu depending
	 * on the hotplug state of the cpu. This function is not
	 * allowed to fail currently!
	 */
	for_each_present_cpu(cpu) {
		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
		int cpustate = st->state;

		if (cpustate >= state)
			cpuhp_issue_call(cpu, state, teardown, false);
	}
remove:
	cpuhp_store_callbacks(state, NULL, NULL, NULL);
	put_online_cpus();
}
EXPORT_SYMBOL(__cpuhp_remove_state);

1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
static ssize_t show_cpuhp_state(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);

	return sprintf(buf, "%d\n", st->state);
}
static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);

1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
static ssize_t write_cpuhp_target(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t count)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
	struct cpuhp_step *sp;
	int target, ret;

	ret = kstrtoint(buf, 10, &target);
	if (ret)
		return ret;

#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
	if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
		return -EINVAL;
#else
	if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
		return -EINVAL;
#endif

	ret = lock_device_hotplug_sysfs();
	if (ret)
		return ret;

	mutex_lock(&cpuhp_state_mutex);
	sp = cpuhp_get_step(target);
	ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
	mutex_unlock(&cpuhp_state_mutex);
	if (ret)
		return ret;

	if (st->state < target)
		ret = do_cpu_up(dev->id, target);
	else
		ret = do_cpu_down(dev->id, target);

	unlock_device_hotplug();
	return ret ? ret : count;
}

1588 1589 1590 1591 1592 1593 1594
static ssize_t show_cpuhp_target(struct device *dev,
				 struct device_attribute *attr, char *buf)
{
	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);

	return sprintf(buf, "%d\n", st->target);
}
1595
static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615

static struct attribute *cpuhp_cpu_attrs[] = {
	&dev_attr_state.attr,
	&dev_attr_target.attr,
	NULL
};

static struct attribute_group cpuhp_cpu_attr_group = {
	.attrs = cpuhp_cpu_attrs,
	.name = "hotplug",
	NULL
};

static ssize_t show_cpuhp_states(struct device *dev,
				 struct device_attribute *attr, char *buf)
{
	ssize_t cur, res = 0;
	int i;

	mutex_lock(&cpuhp_state_mutex);
1616
	for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
		struct cpuhp_step *sp = cpuhp_get_step(i);

		if (sp->name) {
			cur = sprintf(buf, "%3d: %s\n", i, sp->name);
			buf += cur;
			res += cur;
		}
	}
	mutex_unlock(&cpuhp_state_mutex);
	return res;
}
static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);

static struct attribute *cpuhp_cpu_root_attrs[] = {
	&dev_attr_states.attr,
	NULL
};

static struct attribute_group cpuhp_cpu_root_attr_group = {
	.attrs = cpuhp_cpu_root_attrs,
	.name = "hotplug",
	NULL
};

static int __init cpuhp_sysfs_init(void)
{
	int cpu, ret;

	ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
				 &cpuhp_cpu_root_attr_group);
	if (ret)
		return ret;

	for_each_possible_cpu(cpu) {
		struct device *dev = get_cpu_device(cpu);

		if (!dev)
			continue;
		ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
		if (ret)
			return ret;
	}
	return 0;
}
device_initcall(cpuhp_sysfs_init);
#endif

1664 1665 1666 1667
/*
 * cpu_bit_bitmap[] is a special, "compressed" data structure that
 * represents all NR_CPUS bits binary values of 1<<nr.
 *
R
Rusty Russell 已提交
1668
 * It is used by cpumask_of() to get a constant address to a CPU
1669 1670
 * mask value that has a single bit set only.
 */
1671

1672
/* cpu_bit_bitmap[0] is empty - so we can back into it */
1673
#define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
1674 1675 1676
#define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
#define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
#define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1677

1678 1679 1680 1681 1682 1683 1684
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {

	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
#if BITS_PER_LONG > 32
	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
1685 1686
#endif
};
1687
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
1688 1689 1690

const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
EXPORT_SYMBOL(cpu_all_bits);
1691 1692

#ifdef CONFIG_INIT_ALL_POSSIBLE
1693
struct cpumask __cpu_possible_mask __read_mostly
1694
	= {CPU_BITS_ALL};
1695
#else
1696
struct cpumask __cpu_possible_mask __read_mostly;
1697
#endif
1698
EXPORT_SYMBOL(__cpu_possible_mask);
1699

1700 1701
struct cpumask __cpu_online_mask __read_mostly;
EXPORT_SYMBOL(__cpu_online_mask);
1702

1703 1704
struct cpumask __cpu_present_mask __read_mostly;
EXPORT_SYMBOL(__cpu_present_mask);
1705

1706 1707
struct cpumask __cpu_active_mask __read_mostly;
EXPORT_SYMBOL(__cpu_active_mask);
1708 1709 1710

void init_cpu_present(const struct cpumask *src)
{
1711
	cpumask_copy(&__cpu_present_mask, src);
1712 1713 1714 1715
}

void init_cpu_possible(const struct cpumask *src)
{
1716
	cpumask_copy(&__cpu_possible_mask, src);
1717 1718 1719 1720
}

void init_cpu_online(const struct cpumask *src)
{
1721
	cpumask_copy(&__cpu_online_mask, src);
1722
}
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744

/*
 * Activate the first processor.
 */
void __init boot_cpu_init(void)
{
	int cpu = smp_processor_id();

	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
	set_cpu_online(cpu, true);
	set_cpu_active(cpu, true);
	set_cpu_present(cpu, true);
	set_cpu_possible(cpu, true);
}

/*
 * Must be called _AFTER_ setting up the per_cpu areas
 */
void __init boot_cpu_state_init(void)
{
	per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
}