cpu.c 20.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/* CPU control.
 * (C) 2001, 2002, 2003, 2004 Rusty Russell
 *
 * This code is licenced under the GPL.
 */
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
13 14
#include <linux/oom.h>
#include <linux/rcupdate.h>
15
#include <linux/export.h>
16
#include <linux/bug.h>
L
Linus Torvalds 已提交
17 18
#include <linux/kthread.h>
#include <linux/stop_machine.h>
19
#include <linux/mutex.h>
20
#include <linux/gfp.h>
21
#include <linux/suspend.h>
22
#include <linux/lockdep.h>
23
#include <linux/tick.h>
24
#include <linux/irq.h>
25
#include <trace/events/power.h>
L
Linus Torvalds 已提交
26

27 28
#include "smpboot.h"

29
#ifdef CONFIG_SMP
30
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
31
static DEFINE_MUTEX(cpu_add_remove_lock);
L
Linus Torvalds 已提交
32

33
/*
34 35 36 37 38
 * The following two APIs (cpu_maps_update_begin/done) must be used when
 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
 * hotplug callback (un)registration performed using __register_cpu_notifier()
 * or __unregister_cpu_notifier().
39 40 41 42 43
 */
void cpu_maps_update_begin(void)
{
	mutex_lock(&cpu_add_remove_lock);
}
44
EXPORT_SYMBOL(cpu_notifier_register_begin);
45 46 47 48 49

void cpu_maps_update_done(void)
{
	mutex_unlock(&cpu_add_remove_lock);
}
50
EXPORT_SYMBOL(cpu_notifier_register_done);
51

52
static RAW_NOTIFIER_HEAD(cpu_chain);
L
Linus Torvalds 已提交
53

54 55 56 57 58
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
 * Should always be manipulated under cpu_add_remove_lock
 */
static int cpu_hotplug_disabled;

59 60
#ifdef CONFIG_HOTPLUG_CPU

61 62
static struct {
	struct task_struct *active_writer;
63 64 65 66
	/* wait queue to wake up the active_writer */
	wait_queue_head_t wq;
	/* verifies that no writer will get active while readers are active */
	struct mutex lock;
67 68 69 70
	/*
	 * Also blocks the new readers during
	 * an ongoing cpu hotplug operation.
	 */
71
	atomic_t refcount;
72 73 74 75

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map dep_map;
#endif
76 77
} cpu_hotplug = {
	.active_writer = NULL,
78
	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
79
	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
80 81 82
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	.dep_map = {.name = "cpu_hotplug.lock" },
#endif
83
};
84

85 86
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
87 88
#define cpuhp_lock_acquire_tryread() \
				  lock_map_acquire_tryread(&cpu_hotplug.dep_map)
89 90 91
#define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)

92

93
void get_online_cpus(void)
94
{
95 96
	might_sleep();
	if (cpu_hotplug.active_writer == current)
97
		return;
98
	cpuhp_lock_acquire_read();
99
	mutex_lock(&cpu_hotplug.lock);
100
	atomic_inc(&cpu_hotplug.refcount);
101
	mutex_unlock(&cpu_hotplug.lock);
102
}
103
EXPORT_SYMBOL_GPL(get_online_cpus);
104

105 106 107 108 109 110 111
bool try_get_online_cpus(void)
{
	if (cpu_hotplug.active_writer == current)
		return true;
	if (!mutex_trylock(&cpu_hotplug.lock))
		return false;
	cpuhp_lock_acquire_tryread();
112
	atomic_inc(&cpu_hotplug.refcount);
113 114 115 116 117
	mutex_unlock(&cpu_hotplug.lock);
	return true;
}
EXPORT_SYMBOL_GPL(try_get_online_cpus);

118
void put_online_cpus(void)
119
{
120 121
	int refcount;

122
	if (cpu_hotplug.active_writer == current)
123
		return;
124

125 126 127 128 129 130
	refcount = atomic_dec_return(&cpu_hotplug.refcount);
	if (WARN_ON(refcount < 0)) /* try to fix things up */
		atomic_inc(&cpu_hotplug.refcount);

	if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
		wake_up(&cpu_hotplug.wq);
131

132
	cpuhp_lock_release();
133

134
}
135
EXPORT_SYMBOL_GPL(put_online_cpus);
136

137 138 139 140 141 142 143
/*
 * This ensures that the hotplug operation can begin only when the
 * refcount goes to zero.
 *
 * Note that during a cpu-hotplug operation, the new readers, if any,
 * will be blocked by the cpu_hotplug.lock
 *
144 145
 * Since cpu_hotplug_begin() is always called after invoking
 * cpu_maps_update_begin(), we can be sure that only one writer is active.
146 147 148 149 150 151 152 153 154 155
 *
 * Note that theoretically, there is a possibility of a livelock:
 * - Refcount goes to zero, last reader wakes up the sleeping
 *   writer.
 * - Last reader unlocks the cpu_hotplug.lock.
 * - A new reader arrives at this moment, bumps up the refcount.
 * - The writer acquires the cpu_hotplug.lock finds the refcount
 *   non zero and goes to sleep again.
 *
 * However, this is very difficult to achieve in practice since
156
 * get_online_cpus() not an api which is called all that often.
157 158
 *
 */
159
void cpu_hotplug_begin(void)
160
{
161
	DEFINE_WAIT(wait);
162

163
	cpu_hotplug.active_writer = current;
164
	cpuhp_lock_acquire();
165

166 167
	for (;;) {
		mutex_lock(&cpu_hotplug.lock);
168 169 170
		prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
		if (likely(!atomic_read(&cpu_hotplug.refcount)))
				break;
171 172 173
		mutex_unlock(&cpu_hotplug.lock);
		schedule();
	}
174
	finish_wait(&cpu_hotplug.wq, &wait);
175 176
}

177
void cpu_hotplug_done(void)
178 179 180
{
	cpu_hotplug.active_writer = NULL;
	mutex_unlock(&cpu_hotplug.lock);
181
	cpuhp_lock_release();
182
}
183

184 185 186 187 188 189 190 191 192 193
/*
 * Wait for currently running CPU hotplug operations to complete (if any) and
 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
 * hotplug path before performing hotplug operations. So acquiring that lock
 * guarantees mutual exclusion from any currently running hotplug operations.
 */
void cpu_hotplug_disable(void)
{
	cpu_maps_update_begin();
194
	cpu_hotplug_disabled++;
195 196
	cpu_maps_update_done();
}
197
EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
198 199 200 201

void cpu_hotplug_enable(void)
{
	cpu_maps_update_begin();
202
	WARN_ON(--cpu_hotplug_disabled < 0);
203 204
	cpu_maps_update_done();
}
205
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
206
#endif	/* CONFIG_HOTPLUG_CPU */
207

L
Linus Torvalds 已提交
208
/* Need to know about CPUs going up/down? */
209
int register_cpu_notifier(struct notifier_block *nb)
L
Linus Torvalds 已提交
210
{
211
	int ret;
212
	cpu_maps_update_begin();
213
	ret = raw_notifier_chain_register(&cpu_chain, nb);
214
	cpu_maps_update_done();
215
	return ret;
L
Linus Torvalds 已提交
216
}
217

218
int __register_cpu_notifier(struct notifier_block *nb)
219 220 221 222
{
	return raw_notifier_chain_register(&cpu_chain, nb);
}

223 224 225
static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
			int *nr_calls)
{
226 227 228
	int ret;

	ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
229
					nr_calls);
230 231

	return notifier_to_errno(ret);
232 233 234 235 236 237 238
}

static int cpu_notify(unsigned long val, void *v)
{
	return __cpu_notify(val, v, -1, NULL);
}

239 240
#ifdef CONFIG_HOTPLUG_CPU

241 242
static void cpu_notify_nofail(unsigned long val, void *v)
{
243
	BUG_ON(cpu_notify(val, v));
244
}
L
Linus Torvalds 已提交
245
EXPORT_SYMBOL(register_cpu_notifier);
246
EXPORT_SYMBOL(__register_cpu_notifier);
L
Linus Torvalds 已提交
247

248
void unregister_cpu_notifier(struct notifier_block *nb)
L
Linus Torvalds 已提交
249
{
250
	cpu_maps_update_begin();
251
	raw_notifier_chain_unregister(&cpu_chain, nb);
252
	cpu_maps_update_done();
L
Linus Torvalds 已提交
253 254 255
}
EXPORT_SYMBOL(unregister_cpu_notifier);

256
void __unregister_cpu_notifier(struct notifier_block *nb)
257 258 259 260 261
{
	raw_notifier_chain_unregister(&cpu_chain, nb);
}
EXPORT_SYMBOL(__unregister_cpu_notifier);

262 263 264 265 266 267 268 269 270 271 272 273
/**
 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
 * @cpu: a CPU id
 *
 * This function walks all processes, finds a valid mm struct for each one and
 * then clears a corresponding bit in mm's cpumask.  While this all sounds
 * trivial, there are various non-obvious corner cases, which this function
 * tries to solve in a safe manner.
 *
 * Also note that the function uses a somewhat relaxed locking scheme, so it may
 * be called only for an already offlined CPU.
 */
274 275 276 277 278 279 280 281 282 283 284
void clear_tasks_mm_cpumask(int cpu)
{
	struct task_struct *p;

	/*
	 * This function is called after the cpu is taken down and marked
	 * offline, so its not like new tasks will ever get this cpu set in
	 * their mm mask. -- Peter Zijlstra
	 * Thus, we may use rcu_read_lock() here, instead of grabbing
	 * full-fledged tasklist_lock.
	 */
285
	WARN_ON(cpu_online(cpu));
286 287 288 289
	rcu_read_lock();
	for_each_process(p) {
		struct task_struct *t;

290 291 292 293
		/*
		 * Main thread might exit, but other threads may still have
		 * a valid mm. Find one.
		 */
294 295 296 297 298 299 300 301 302
		t = find_lock_task_mm(p);
		if (!t)
			continue;
		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
		task_unlock(t);
	}
	rcu_read_unlock();
}

K
Kirill Tkhai 已提交
303
static inline void check_for_tasks(int dead_cpu)
L
Linus Torvalds 已提交
304
{
K
Kirill Tkhai 已提交
305
	struct task_struct *g, *p;
L
Linus Torvalds 已提交
306

K
Kirill Tkhai 已提交
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
	read_lock_irq(&tasklist_lock);
	do_each_thread(g, p) {
		if (!p->on_rq)
			continue;
		/*
		 * We do the check with unlocked task_rq(p)->lock.
		 * Order the reading to do not warn about a task,
		 * which was running on this cpu in the past, and
		 * it's just been woken on another cpu.
		 */
		rmb();
		if (task_cpu(p) != dead_cpu)
			continue;

		pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
			p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
	} while_each_thread(g, p);
	read_unlock_irq(&tasklist_lock);
L
Linus Torvalds 已提交
325 326
}

A
Avi Kivity 已提交
327 328 329 330 331
struct take_cpu_down_param {
	unsigned long mod;
	void *hcpu;
};

L
Linus Torvalds 已提交
332
/* Take this CPU down. */
333
static int take_cpu_down(void *_param)
L
Linus Torvalds 已提交
334
{
A
Avi Kivity 已提交
335
	struct take_cpu_down_param *param = _param;
L
Linus Torvalds 已提交
336 337 338 339 340
	int err;

	/* Ensure this CPU doesn't handle any more interrupts. */
	err = __cpu_disable();
	if (err < 0)
Z
Zwane Mwaikambo 已提交
341
		return err;
L
Linus Torvalds 已提交
342

343
	cpu_notify(CPU_DYING | param->mod, param->hcpu);
344 345
	/* Give up timekeeping duties */
	tick_handover_do_timer();
346 347
	/* Park the stopper thread */
	kthread_park(current);
Z
Zwane Mwaikambo 已提交
348
	return 0;
L
Linus Torvalds 已提交
349 350
}

351
/* Requires cpu_add_remove_lock to be held */
352
static int _cpu_down(unsigned int cpu, int tasks_frozen)
L
Linus Torvalds 已提交
353
{
354 355
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
356
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
A
Avi Kivity 已提交
357 358 359 360
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};
L
Linus Torvalds 已提交
361

362 363
	if (num_online_cpus() == 1)
		return -EBUSY;
L
Linus Torvalds 已提交
364

365 366
	if (!cpu_online(cpu))
		return -EINVAL;
L
Linus Torvalds 已提交
367

368
	cpu_hotplug_begin();
369

370
	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
371
	if (err) {
372
		nr_calls--;
373
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
374 375
		pr_warn("%s: attempt to take down CPU %u failed\n",
			__func__, cpu);
376
		goto out_release;
L
Linus Torvalds 已提交
377 378
	}

379 380 381 382 383 384
	/*
	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
	 * and RCU users of this state to go away such that all new such users
	 * will observe it.
	 *
	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
385
	 * not imply sync_sched(), so wait for both.
386 387
	 *
	 * Do sync before park smpboot threads to take care the rcu boost case.
388
	 */
389 390 391 392
	if (IS_ENABLED(CONFIG_PREEMPT))
		synchronize_rcu_mult(call_rcu, call_rcu_sched);
	else
		synchronize_rcu();
393

394 395
	smpboot_park_threads(cpu);

396
	/*
397 398
	 * Prevent irq alloc/free while the dying cpu reorganizes the
	 * interrupt affinities.
399
	 */
400
	irq_lock_sparse();
401

402 403 404
	/*
	 * So now all preempt/rcu users must observe !cpu_active().
	 */
405
	err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
406
	if (err) {
L
Linus Torvalds 已提交
407
		/* CPU didn't die: tell everyone.  Can't complain. */
408
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
409
		irq_unlock_sparse();
410
		goto out_release;
411
	}
412
	BUG_ON(cpu_online(cpu));
L
Linus Torvalds 已提交
413

414 415 416 417
	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
P
Peter Zijlstra 已提交
418 419
	 *
	 * Wait for the stop thread to go away.
420
	 */
421
	while (!per_cpu(cpu_dead_idle, cpu))
P
Peter Zijlstra 已提交
422
		cpu_relax();
423 424
	smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
	per_cpu(cpu_dead_idle, cpu) = false;
L
Linus Torvalds 已提交
425

426 427 428
	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
	irq_unlock_sparse();

429
	hotplug_cpu__broadcast_tick_pull(cpu);
L
Linus Torvalds 已提交
430 431 432 433
	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
434
	tick_cleanup_dead_cpu(cpu);
435
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);
L
Linus Torvalds 已提交
436 437 438

	check_for_tasks(cpu);

439
out_release:
440
	cpu_hotplug_done();
441 442
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
443 444 445
	return err;
}

446
int cpu_down(unsigned int cpu)
447
{
448
	int err;
449

450
	cpu_maps_update_begin();
451 452

	if (cpu_hotplug_disabled) {
453
		err = -EBUSY;
454 455 456 457
		goto out;
	}

	err = _cpu_down(cpu, 0);
458

459
out:
460
	cpu_maps_update_done();
L
Linus Torvalds 已提交
461 462
	return err;
}
463
EXPORT_SYMBOL(cpu_down);
L
Linus Torvalds 已提交
464 465
#endif /*CONFIG_HOTPLUG_CPU*/

466 467 468 469 470 471 472 473 474 475
/*
 * Unpark per-CPU smpboot kthreads at CPU-online time.
 */
static int smpboot_thread_call(struct notifier_block *nfb,
			       unsigned long action, void *hcpu)
{
	int cpu = (long)hcpu;

	switch (action & ~CPU_TASKS_FROZEN) {

476
	case CPU_DOWN_FAILED:
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
	case CPU_ONLINE:
		smpboot_unpark_threads(cpu);
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block smpboot_thread_notifier = {
	.notifier_call = smpboot_thread_call,
	.priority = CPU_PRI_SMPBOOT,
};

493
void smpboot_thread_init(void)
494 495 496 497
{
	register_cpu_notifier(&smpboot_thread_notifier);
}

498
/* Requires cpu_add_remove_lock to be held */
499
static int _cpu_up(unsigned int cpu, int tasks_frozen)
L
Linus Torvalds 已提交
500
{
501
	int ret, nr_calls = 0;
L
Linus Torvalds 已提交
502
	void *hcpu = (void *)(long)cpu;
503
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
504
	struct task_struct *idle;
L
Linus Torvalds 已提交
505

506
	cpu_hotplug_begin();
507

508 509 510 511 512
	if (cpu_online(cpu) || !cpu_present(cpu)) {
		ret = -EINVAL;
		goto out;
	}

513 514 515
	idle = idle_thread_get(cpu);
	if (IS_ERR(idle)) {
		ret = PTR_ERR(idle);
516
		goto out;
517
	}
518

519 520 521 522
	ret = smpboot_create_threads(cpu);
	if (ret)
		goto out;

523
	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
524
	if (ret) {
525
		nr_calls--;
526 527
		pr_warn("%s: attempt to bring up CPU %u failed\n",
			__func__, cpu);
L
Linus Torvalds 已提交
528 529 530 531
		goto out_notify;
	}

	/* Arch-specific enabling code. */
532
	ret = __cpu_up(cpu, idle);
533

L
Linus Torvalds 已提交
534 535
	if (ret != 0)
		goto out_notify;
536
	BUG_ON(!cpu_online(cpu));
L
Linus Torvalds 已提交
537 538

	/* Now call notifier in preparation. */
539
	cpu_notify(CPU_ONLINE | mod, hcpu);
L
Linus Torvalds 已提交
540 541 542

out_notify:
	if (ret != 0)
543
		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
544
out:
545
	cpu_hotplug_done();
546 547 548 549

	return ret;
}

550
int cpu_up(unsigned int cpu)
551 552
{
	int err = 0;
553

R
Rusty Russell 已提交
554
	if (!cpu_possible(cpu)) {
555 556
		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
		       cpu);
557
#if defined(CONFIG_IA64)
558
		pr_err("please check additional_cpus= boot parameter\n");
559 560 561
#endif
		return -EINVAL;
	}
562

563 564 565
	err = try_online_node(cpu_to_node(cpu));
	if (err)
		return err;
566

567
	cpu_maps_update_begin();
568 569

	if (cpu_hotplug_disabled) {
570
		err = -EBUSY;
571 572 573 574 575 576
		goto out;
	}

	err = _cpu_up(cpu, 0);

out:
577
	cpu_maps_update_done();
578 579
	return err;
}
P
Paul E. McKenney 已提交
580
EXPORT_SYMBOL_GPL(cpu_up);
581

582
#ifdef CONFIG_PM_SLEEP_SMP
R
Rusty Russell 已提交
583
static cpumask_var_t frozen_cpus;
584 585 586

int disable_nonboot_cpus(void)
{
587
	int cpu, first_cpu, error = 0;
588

589
	cpu_maps_update_begin();
R
Rusty Russell 已提交
590
	first_cpu = cpumask_first(cpu_online_mask);
591 592
	/*
	 * We take down all of the non-boot CPUs in one shot to avoid races
593 594
	 * with the userspace trying to use the CPU hotplug at the same time
	 */
R
Rusty Russell 已提交
595
	cpumask_clear(frozen_cpus);
596

597
	pr_info("Disabling non-boot CPUs ...\n");
598 599 600
	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
601
		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
602
		error = _cpu_down(cpu, 1);
603
		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
604
		if (!error)
R
Rusty Russell 已提交
605
			cpumask_set_cpu(cpu, frozen_cpus);
606
		else {
607
			pr_err("Error taking CPU%d down: %d\n", cpu, error);
608 609 610
			break;
		}
	}
611

612
	if (!error)
613
		BUG_ON(num_online_cpus() > 1);
614
	else
615
		pr_err("Non-boot CPUs are not disabled\n");
616 617 618 619 620 621 622 623

	/*
	 * Make sure the CPUs won't be enabled by someone else. We need to do
	 * this even in case of failure as all disable_nonboot_cpus() users are
	 * supposed to do enable_nonboot_cpus() on the failure path.
	 */
	cpu_hotplug_disabled++;

624
	cpu_maps_update_done();
625 626 627
	return error;
}

628 629 630 631 632 633 634 635
void __weak arch_enable_nonboot_cpus_begin(void)
{
}

void __weak arch_enable_nonboot_cpus_end(void)
{
}

636
void enable_nonboot_cpus(void)
637 638 639 640
{
	int cpu, error;

	/* Allow everyone to use the CPU hotplug again */
641
	cpu_maps_update_begin();
642
	WARN_ON(--cpu_hotplug_disabled < 0);
R
Rusty Russell 已提交
643
	if (cpumask_empty(frozen_cpus))
644
		goto out;
645

646
	pr_info("Enabling non-boot CPUs ...\n");
647 648 649

	arch_enable_nonboot_cpus_begin();

R
Rusty Russell 已提交
650
	for_each_cpu(cpu, frozen_cpus) {
651
		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
652
		error = _cpu_up(cpu, 1);
653
		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
654
		if (!error) {
655
			pr_info("CPU%d is up\n", cpu);
656 657
			continue;
		}
658
		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
659
	}
660 661 662

	arch_enable_nonboot_cpus_end();

R
Rusty Russell 已提交
663
	cpumask_clear(frozen_cpus);
664
out:
665
	cpu_maps_update_done();
L
Linus Torvalds 已提交
666
}
R
Rusty Russell 已提交
667

668
static int __init alloc_frozen_cpus(void)
R
Rusty Russell 已提交
669 670 671 672 673 674
{
	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
		return -ENOMEM;
	return 0;
}
core_initcall(alloc_frozen_cpus);
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694

/*
 * When callbacks for CPU hotplug notifications are being executed, we must
 * ensure that the state of the system with respect to the tasks being frozen
 * or not, as reported by the notification, remains unchanged *throughout the
 * duration* of the execution of the callbacks.
 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
 *
 * This synchronization is implemented by mutually excluding regular CPU
 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
 * Hibernate notifications.
 */
static int
cpu_hotplug_pm_callback(struct notifier_block *nb,
			unsigned long action, void *ptr)
{
	switch (action) {

	case PM_SUSPEND_PREPARE:
	case PM_HIBERNATION_PREPARE:
695
		cpu_hotplug_disable();
696 697 698 699
		break;

	case PM_POST_SUSPEND:
	case PM_POST_HIBERNATION:
700
		cpu_hotplug_enable();
701 702 703 704 705 706 707 708 709 710
		break;

	default:
		return NOTIFY_DONE;
	}

	return NOTIFY_OK;
}


711
static int __init cpu_hotplug_pm_sync_init(void)
712
{
713 714 715 716 717
	/*
	 * cpu_hotplug_pm_callback has higher priority than x86
	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
	 * to disable cpu hotplug to avoid cpu hotplug race.
	 */
718 719 720 721 722
	pm_notifier(cpu_hotplug_pm_callback, 0);
	return 0;
}
core_initcall(cpu_hotplug_pm_sync_init);

723
#endif /* CONFIG_PM_SLEEP_SMP */
724

725 726 727 728 729 730 731 732
/**
 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
 * @cpu: cpu that just started
 *
 * This function calls the cpu_chain notifiers with CPU_STARTING.
 * It must be called by the arch code on the new cpu, before the new cpu
 * enables interrupts and before the "boot" cpu returns from __cpu_up().
 */
733
void notify_cpu_starting(unsigned int cpu)
734 735 736 737
{
	unsigned long val = CPU_STARTING;

#ifdef CONFIG_PM_SLEEP_SMP
R
Rusty Russell 已提交
738
	if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
739 740
		val = CPU_STARTING_FROZEN;
#endif /* CONFIG_PM_SLEEP_SMP */
741
	cpu_notify(val, (void *)(long)cpu);
742 743
}

744
#endif /* CONFIG_SMP */
745

746 747 748 749
/*
 * cpu_bit_bitmap[] is a special, "compressed" data structure that
 * represents all NR_CPUS bits binary values of 1<<nr.
 *
R
Rusty Russell 已提交
750
 * It is used by cpumask_of() to get a constant address to a CPU
751 752
 * mask value that has a single bit set only.
 */
753

754
/* cpu_bit_bitmap[0] is empty - so we can back into it */
755
#define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
756 757 758
#define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
#define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
#define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
759

760 761 762 763 764 765 766
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {

	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
#if BITS_PER_LONG > 32
	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
767 768
#endif
};
769
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
770 771 772

const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
EXPORT_SYMBOL(cpu_all_bits);
773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793

#ifdef CONFIG_INIT_ALL_POSSIBLE
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
	= CPU_BITS_ALL;
#else
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
#endif
const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
EXPORT_SYMBOL(cpu_possible_mask);

static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
EXPORT_SYMBOL(cpu_online_mask);

static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
EXPORT_SYMBOL(cpu_present_mask);

static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
EXPORT_SYMBOL(cpu_active_mask);
794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812

void set_cpu_possible(unsigned int cpu, bool possible)
{
	if (possible)
		cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
}

void set_cpu_present(unsigned int cpu, bool present)
{
	if (present)
		cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
}

void set_cpu_online(unsigned int cpu, bool online)
{
813
	if (online) {
814
		cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
815 816
		cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
	} else {
817
		cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
818
	}
819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
}

void set_cpu_active(unsigned int cpu, bool active)
{
	if (active)
		cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
}

void init_cpu_present(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_present_bits), src);
}

void init_cpu_possible(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_possible_bits), src);
}

void init_cpu_online(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_online_bits), src);
}