cpu.c 20.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/* CPU control.
 * (C) 2001, 2002, 2003, 2004 Rusty Russell
 *
 * This code is licenced under the GPL.
 */
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
13 14
#include <linux/oom.h>
#include <linux/rcupdate.h>
15
#include <linux/export.h>
16
#include <linux/bug.h>
L
Linus Torvalds 已提交
17 18
#include <linux/kthread.h>
#include <linux/stop_machine.h>
19
#include <linux/mutex.h>
20
#include <linux/gfp.h>
21
#include <linux/suspend.h>
22
#include <linux/lockdep.h>
23
#include <linux/tick.h>
24
#include <trace/events/power.h>
L
Linus Torvalds 已提交
25

26 27
#include "smpboot.h"

28
#ifdef CONFIG_SMP
29
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
30
static DEFINE_MUTEX(cpu_add_remove_lock);
L
Linus Torvalds 已提交
31

32
/*
33 34 35 36 37
 * The following two APIs (cpu_maps_update_begin/done) must be used when
 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
 * hotplug callback (un)registration performed using __register_cpu_notifier()
 * or __unregister_cpu_notifier().
38 39 40 41 42
 */
void cpu_maps_update_begin(void)
{
	mutex_lock(&cpu_add_remove_lock);
}
43
EXPORT_SYMBOL(cpu_notifier_register_begin);
44 45 46 47 48

void cpu_maps_update_done(void)
{
	mutex_unlock(&cpu_add_remove_lock);
}
49
EXPORT_SYMBOL(cpu_notifier_register_done);
50

51
static RAW_NOTIFIER_HEAD(cpu_chain);
L
Linus Torvalds 已提交
52

53 54 55 56 57
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
 * Should always be manipulated under cpu_add_remove_lock
 */
static int cpu_hotplug_disabled;

58 59
#ifdef CONFIG_HOTPLUG_CPU

60 61
static struct {
	struct task_struct *active_writer;
62 63 64 65
	/* wait queue to wake up the active_writer */
	wait_queue_head_t wq;
	/* verifies that no writer will get active while readers are active */
	struct mutex lock;
66 67 68 69
	/*
	 * Also blocks the new readers during
	 * an ongoing cpu hotplug operation.
	 */
70
	atomic_t refcount;
71 72 73 74

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map dep_map;
#endif
75 76
} cpu_hotplug = {
	.active_writer = NULL,
77
	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
78
	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
79 80 81
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	.dep_map = {.name = "cpu_hotplug.lock" },
#endif
82
};
83

84 85
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
86 87
#define cpuhp_lock_acquire_tryread() \
				  lock_map_acquire_tryread(&cpu_hotplug.dep_map)
88 89 90
#define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)

91

92
void get_online_cpus(void)
93
{
94 95
	might_sleep();
	if (cpu_hotplug.active_writer == current)
96
		return;
97
	cpuhp_lock_acquire_read();
98
	mutex_lock(&cpu_hotplug.lock);
99
	atomic_inc(&cpu_hotplug.refcount);
100
	mutex_unlock(&cpu_hotplug.lock);
101
}
102
EXPORT_SYMBOL_GPL(get_online_cpus);
103

104 105 106 107 108 109 110
bool try_get_online_cpus(void)
{
	if (cpu_hotplug.active_writer == current)
		return true;
	if (!mutex_trylock(&cpu_hotplug.lock))
		return false;
	cpuhp_lock_acquire_tryread();
111
	atomic_inc(&cpu_hotplug.refcount);
112 113 114 115 116
	mutex_unlock(&cpu_hotplug.lock);
	return true;
}
EXPORT_SYMBOL_GPL(try_get_online_cpus);

117
void put_online_cpus(void)
118
{
119 120
	int refcount;

121
	if (cpu_hotplug.active_writer == current)
122
		return;
123

124 125 126 127 128 129
	refcount = atomic_dec_return(&cpu_hotplug.refcount);
	if (WARN_ON(refcount < 0)) /* try to fix things up */
		atomic_inc(&cpu_hotplug.refcount);

	if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
		wake_up(&cpu_hotplug.wq);
130

131
	cpuhp_lock_release();
132

133
}
134
EXPORT_SYMBOL_GPL(put_online_cpus);
135

136 137 138 139 140 141 142
/*
 * This ensures that the hotplug operation can begin only when the
 * refcount goes to zero.
 *
 * Note that during a cpu-hotplug operation, the new readers, if any,
 * will be blocked by the cpu_hotplug.lock
 *
143 144
 * Since cpu_hotplug_begin() is always called after invoking
 * cpu_maps_update_begin(), we can be sure that only one writer is active.
145 146 147 148 149 150 151 152 153 154
 *
 * Note that theoretically, there is a possibility of a livelock:
 * - Refcount goes to zero, last reader wakes up the sleeping
 *   writer.
 * - Last reader unlocks the cpu_hotplug.lock.
 * - A new reader arrives at this moment, bumps up the refcount.
 * - The writer acquires the cpu_hotplug.lock finds the refcount
 *   non zero and goes to sleep again.
 *
 * However, this is very difficult to achieve in practice since
155
 * get_online_cpus() not an api which is called all that often.
156 157
 *
 */
158
void cpu_hotplug_begin(void)
159
{
160
	DEFINE_WAIT(wait);
161

162
	cpu_hotplug.active_writer = current;
163
	cpuhp_lock_acquire();
164

165 166
	for (;;) {
		mutex_lock(&cpu_hotplug.lock);
167 168 169
		prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
		if (likely(!atomic_read(&cpu_hotplug.refcount)))
				break;
170 171 172
		mutex_unlock(&cpu_hotplug.lock);
		schedule();
	}
173
	finish_wait(&cpu_hotplug.wq, &wait);
174 175
}

176
void cpu_hotplug_done(void)
177 178 179
{
	cpu_hotplug.active_writer = NULL;
	mutex_unlock(&cpu_hotplug.lock);
180
	cpuhp_lock_release();
181
}
182

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
/*
 * Wait for currently running CPU hotplug operations to complete (if any) and
 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
 * hotplug path before performing hotplug operations. So acquiring that lock
 * guarantees mutual exclusion from any currently running hotplug operations.
 */
void cpu_hotplug_disable(void)
{
	cpu_maps_update_begin();
	cpu_hotplug_disabled = 1;
	cpu_maps_update_done();
}

void cpu_hotplug_enable(void)
{
	cpu_maps_update_begin();
	cpu_hotplug_disabled = 0;
	cpu_maps_update_done();
}

204
#endif	/* CONFIG_HOTPLUG_CPU */
205

L
Linus Torvalds 已提交
206
/* Need to know about CPUs going up/down? */
207
int __ref register_cpu_notifier(struct notifier_block *nb)
L
Linus Torvalds 已提交
208
{
209
	int ret;
210
	cpu_maps_update_begin();
211
	ret = raw_notifier_chain_register(&cpu_chain, nb);
212
	cpu_maps_update_done();
213
	return ret;
L
Linus Torvalds 已提交
214
}
215

216 217 218 219 220
int __ref __register_cpu_notifier(struct notifier_block *nb)
{
	return raw_notifier_chain_register(&cpu_chain, nb);
}

221 222 223
static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
			int *nr_calls)
{
224 225 226
	int ret;

	ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
227
					nr_calls);
228 229

	return notifier_to_errno(ret);
230 231 232 233 234 235 236
}

static int cpu_notify(unsigned long val, void *v)
{
	return __cpu_notify(val, v, -1, NULL);
}

237 238
#ifdef CONFIG_HOTPLUG_CPU

239 240
static void cpu_notify_nofail(unsigned long val, void *v)
{
241
	BUG_ON(cpu_notify(val, v));
242
}
L
Linus Torvalds 已提交
243
EXPORT_SYMBOL(register_cpu_notifier);
244
EXPORT_SYMBOL(__register_cpu_notifier);
L
Linus Torvalds 已提交
245

246
void __ref unregister_cpu_notifier(struct notifier_block *nb)
L
Linus Torvalds 已提交
247
{
248
	cpu_maps_update_begin();
249
	raw_notifier_chain_unregister(&cpu_chain, nb);
250
	cpu_maps_update_done();
L
Linus Torvalds 已提交
251 252 253
}
EXPORT_SYMBOL(unregister_cpu_notifier);

254 255 256 257 258 259
void __ref __unregister_cpu_notifier(struct notifier_block *nb)
{
	raw_notifier_chain_unregister(&cpu_chain, nb);
}
EXPORT_SYMBOL(__unregister_cpu_notifier);

260 261 262 263 264 265 266 267 268 269 270 271
/**
 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
 * @cpu: a CPU id
 *
 * This function walks all processes, finds a valid mm struct for each one and
 * then clears a corresponding bit in mm's cpumask.  While this all sounds
 * trivial, there are various non-obvious corner cases, which this function
 * tries to solve in a safe manner.
 *
 * Also note that the function uses a somewhat relaxed locking scheme, so it may
 * be called only for an already offlined CPU.
 */
272 273 274 275 276 277 278 279 280 281 282
void clear_tasks_mm_cpumask(int cpu)
{
	struct task_struct *p;

	/*
	 * This function is called after the cpu is taken down and marked
	 * offline, so its not like new tasks will ever get this cpu set in
	 * their mm mask. -- Peter Zijlstra
	 * Thus, we may use rcu_read_lock() here, instead of grabbing
	 * full-fledged tasklist_lock.
	 */
283
	WARN_ON(cpu_online(cpu));
284 285 286 287
	rcu_read_lock();
	for_each_process(p) {
		struct task_struct *t;

288 289 290 291
		/*
		 * Main thread might exit, but other threads may still have
		 * a valid mm. Find one.
		 */
292 293 294 295 296 297 298 299 300
		t = find_lock_task_mm(p);
		if (!t)
			continue;
		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
		task_unlock(t);
	}
	rcu_read_unlock();
}

K
Kirill Tkhai 已提交
301
static inline void check_for_tasks(int dead_cpu)
L
Linus Torvalds 已提交
302
{
K
Kirill Tkhai 已提交
303
	struct task_struct *g, *p;
L
Linus Torvalds 已提交
304

K
Kirill Tkhai 已提交
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
	read_lock_irq(&tasklist_lock);
	do_each_thread(g, p) {
		if (!p->on_rq)
			continue;
		/*
		 * We do the check with unlocked task_rq(p)->lock.
		 * Order the reading to do not warn about a task,
		 * which was running on this cpu in the past, and
		 * it's just been woken on another cpu.
		 */
		rmb();
		if (task_cpu(p) != dead_cpu)
			continue;

		pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
			p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
	} while_each_thread(g, p);
	read_unlock_irq(&tasklist_lock);
L
Linus Torvalds 已提交
323 324
}

A
Avi Kivity 已提交
325 326 327 328 329
struct take_cpu_down_param {
	unsigned long mod;
	void *hcpu;
};

L
Linus Torvalds 已提交
330
/* Take this CPU down. */
331
static int __ref take_cpu_down(void *_param)
L
Linus Torvalds 已提交
332
{
A
Avi Kivity 已提交
333
	struct take_cpu_down_param *param = _param;
L
Linus Torvalds 已提交
334 335 336 337 338
	int err;

	/* Ensure this CPU doesn't handle any more interrupts. */
	err = __cpu_disable();
	if (err < 0)
Z
Zwane Mwaikambo 已提交
339
		return err;
L
Linus Torvalds 已提交
340

341
	cpu_notify(CPU_DYING | param->mod, param->hcpu);
342 343
	/* Give up timekeeping duties */
	tick_handover_do_timer();
344 345
	/* Park the stopper thread */
	kthread_park(current);
Z
Zwane Mwaikambo 已提交
346
	return 0;
L
Linus Torvalds 已提交
347 348
}

349
/* Requires cpu_add_remove_lock to be held */
350
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
L
Linus Torvalds 已提交
351
{
352 353
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
354
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
A
Avi Kivity 已提交
355 356 357 358
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};
L
Linus Torvalds 已提交
359

360 361
	if (num_online_cpus() == 1)
		return -EBUSY;
L
Linus Torvalds 已提交
362

363 364
	if (!cpu_online(cpu))
		return -EINVAL;
L
Linus Torvalds 已提交
365

366
	cpu_hotplug_begin();
367

368
	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
369
	if (err) {
370
		nr_calls--;
371
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
372 373
		pr_warn("%s: attempt to take down CPU %u failed\n",
			__func__, cpu);
374
		goto out_release;
L
Linus Torvalds 已提交
375 376
	}

377 378 379 380 381 382 383
	/*
	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
	 * and RCU users of this state to go away such that all new such users
	 * will observe it.
	 *
	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
	 * not imply sync_sched(), so explicitly call both.
384 385
	 *
	 * Do sync before park smpboot threads to take care the rcu boost case.
386 387 388 389 390 391
	 */
#ifdef CONFIG_PREEMPT
	synchronize_sched();
#endif
	synchronize_rcu();

392 393
	smpboot_park_threads(cpu);

394 395 396 397
	/*
	 * So now all preempt/rcu users must observe !cpu_active().
	 */

R
Rusty Russell 已提交
398
	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
399
	if (err) {
L
Linus Torvalds 已提交
400
		/* CPU didn't die: tell everyone.  Can't complain. */
401
		smpboot_unpark_threads(cpu);
402
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
403
		goto out_release;
404
	}
405
	BUG_ON(cpu_online(cpu));
L
Linus Torvalds 已提交
406

407 408 409 410
	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
P
Peter Zijlstra 已提交
411 412
	 *
	 * Wait for the stop thread to go away.
413
	 */
414
	while (!per_cpu(cpu_dead_idle, cpu))
P
Peter Zijlstra 已提交
415
		cpu_relax();
416 417
	smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
	per_cpu(cpu_dead_idle, cpu) = false;
L
Linus Torvalds 已提交
418

419
	hotplug_cpu__broadcast_tick_pull(cpu);
L
Linus Torvalds 已提交
420 421 422 423
	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
424
	tick_cleanup_dead_cpu(cpu);
425
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);
L
Linus Torvalds 已提交
426 427 428

	check_for_tasks(cpu);

429
out_release:
430
	cpu_hotplug_done();
431 432
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
433 434 435
	return err;
}

436
int __ref cpu_down(unsigned int cpu)
437
{
438
	int err;
439

440
	cpu_maps_update_begin();
441 442

	if (cpu_hotplug_disabled) {
443
		err = -EBUSY;
444 445 446 447
		goto out;
	}

	err = _cpu_down(cpu, 0);
448

449
out:
450
	cpu_maps_update_done();
L
Linus Torvalds 已提交
451 452
	return err;
}
453
EXPORT_SYMBOL(cpu_down);
L
Linus Torvalds 已提交
454 455
#endif /*CONFIG_HOTPLUG_CPU*/

456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
/*
 * Unpark per-CPU smpboot kthreads at CPU-online time.
 */
static int smpboot_thread_call(struct notifier_block *nfb,
			       unsigned long action, void *hcpu)
{
	int cpu = (long)hcpu;

	switch (action & ~CPU_TASKS_FROZEN) {

	case CPU_ONLINE:
		smpboot_unpark_threads(cpu);
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block smpboot_thread_notifier = {
	.notifier_call = smpboot_thread_call,
	.priority = CPU_PRI_SMPBOOT,
};

void __cpuinit smpboot_thread_init(void)
{
	register_cpu_notifier(&smpboot_thread_notifier);
}

487
/* Requires cpu_add_remove_lock to be held */
488
static int _cpu_up(unsigned int cpu, int tasks_frozen)
L
Linus Torvalds 已提交
489
{
490
	int ret, nr_calls = 0;
L
Linus Torvalds 已提交
491
	void *hcpu = (void *)(long)cpu;
492
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
493
	struct task_struct *idle;
L
Linus Torvalds 已提交
494

495
	cpu_hotplug_begin();
496

497 498 499 500 501
	if (cpu_online(cpu) || !cpu_present(cpu)) {
		ret = -EINVAL;
		goto out;
	}

502 503 504
	idle = idle_thread_get(cpu);
	if (IS_ERR(idle)) {
		ret = PTR_ERR(idle);
505
		goto out;
506
	}
507

508 509 510 511
	ret = smpboot_create_threads(cpu);
	if (ret)
		goto out;

512
	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
513
	if (ret) {
514
		nr_calls--;
515 516
		pr_warn("%s: attempt to bring up CPU %u failed\n",
			__func__, cpu);
L
Linus Torvalds 已提交
517 518 519 520
		goto out_notify;
	}

	/* Arch-specific enabling code. */
521
	ret = __cpu_up(cpu, idle);
L
Linus Torvalds 已提交
522 523
	if (ret != 0)
		goto out_notify;
524
	BUG_ON(!cpu_online(cpu));
L
Linus Torvalds 已提交
525 526

	/* Now call notifier in preparation. */
527
	cpu_notify(CPU_ONLINE | mod, hcpu);
L
Linus Torvalds 已提交
528 529 530

out_notify:
	if (ret != 0)
531
		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
532
out:
533
	cpu_hotplug_done();
534 535 536 537

	return ret;
}

538
int cpu_up(unsigned int cpu)
539 540
{
	int err = 0;
541

R
Rusty Russell 已提交
542
	if (!cpu_possible(cpu)) {
543 544
		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
		       cpu);
545
#if defined(CONFIG_IA64)
546
		pr_err("please check additional_cpus= boot parameter\n");
547 548 549
#endif
		return -EINVAL;
	}
550

551 552 553
	err = try_online_node(cpu_to_node(cpu));
	if (err)
		return err;
554

555
	cpu_maps_update_begin();
556 557

	if (cpu_hotplug_disabled) {
558
		err = -EBUSY;
559 560 561 562 563 564
		goto out;
	}

	err = _cpu_up(cpu, 0);

out:
565
	cpu_maps_update_done();
566 567
	return err;
}
P
Paul E. McKenney 已提交
568
EXPORT_SYMBOL_GPL(cpu_up);
569

570
#ifdef CONFIG_PM_SLEEP_SMP
R
Rusty Russell 已提交
571
static cpumask_var_t frozen_cpus;
572 573 574

int disable_nonboot_cpus(void)
{
575
	int cpu, first_cpu, error = 0;
576

577
	cpu_maps_update_begin();
R
Rusty Russell 已提交
578
	first_cpu = cpumask_first(cpu_online_mask);
579 580
	/*
	 * We take down all of the non-boot CPUs in one shot to avoid races
581 582
	 * with the userspace trying to use the CPU hotplug at the same time
	 */
R
Rusty Russell 已提交
583
	cpumask_clear(frozen_cpus);
584

585
	pr_info("Disabling non-boot CPUs ...\n");
586 587 588
	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
589
		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
590
		error = _cpu_down(cpu, 1);
591
		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
592
		if (!error)
R
Rusty Russell 已提交
593
			cpumask_set_cpu(cpu, frozen_cpus);
594
		else {
595
			pr_err("Error taking CPU%d down: %d\n", cpu, error);
596 597 598
			break;
		}
	}
599

600 601 602 603 604
	if (!error) {
		BUG_ON(num_online_cpus() > 1);
		/* Make sure the CPUs won't be enabled by someone else */
		cpu_hotplug_disabled = 1;
	} else {
605
		pr_err("Non-boot CPUs are not disabled\n");
606
	}
607
	cpu_maps_update_done();
608 609 610
	return error;
}

611 612 613 614 615 616 617 618
void __weak arch_enable_nonboot_cpus_begin(void)
{
}

void __weak arch_enable_nonboot_cpus_end(void)
{
}

619
void __ref enable_nonboot_cpus(void)
620 621 622 623
{
	int cpu, error;

	/* Allow everyone to use the CPU hotplug again */
624
	cpu_maps_update_begin();
625
	cpu_hotplug_disabled = 0;
R
Rusty Russell 已提交
626
	if (cpumask_empty(frozen_cpus))
627
		goto out;
628

629
	pr_info("Enabling non-boot CPUs ...\n");
630 631 632

	arch_enable_nonboot_cpus_begin();

R
Rusty Russell 已提交
633
	for_each_cpu(cpu, frozen_cpus) {
634
		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
635
		error = _cpu_up(cpu, 1);
636
		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
637
		if (!error) {
638
			pr_info("CPU%d is up\n", cpu);
639 640
			continue;
		}
641
		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
642
	}
643 644 645

	arch_enable_nonboot_cpus_end();

R
Rusty Russell 已提交
646
	cpumask_clear(frozen_cpus);
647
out:
648
	cpu_maps_update_done();
L
Linus Torvalds 已提交
649
}
R
Rusty Russell 已提交
650

651
static int __init alloc_frozen_cpus(void)
R
Rusty Russell 已提交
652 653 654 655 656 657
{
	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
		return -ENOMEM;
	return 0;
}
core_initcall(alloc_frozen_cpus);
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677

/*
 * When callbacks for CPU hotplug notifications are being executed, we must
 * ensure that the state of the system with respect to the tasks being frozen
 * or not, as reported by the notification, remains unchanged *throughout the
 * duration* of the execution of the callbacks.
 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
 *
 * This synchronization is implemented by mutually excluding regular CPU
 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
 * Hibernate notifications.
 */
static int
cpu_hotplug_pm_callback(struct notifier_block *nb,
			unsigned long action, void *ptr)
{
	switch (action) {

	case PM_SUSPEND_PREPARE:
	case PM_HIBERNATION_PREPARE:
678
		cpu_hotplug_disable();
679 680 681 682
		break;

	case PM_POST_SUSPEND:
	case PM_POST_HIBERNATION:
683
		cpu_hotplug_enable();
684 685 686 687 688 689 690 691 692 693
		break;

	default:
		return NOTIFY_DONE;
	}

	return NOTIFY_OK;
}


694
static int __init cpu_hotplug_pm_sync_init(void)
695
{
696 697 698 699 700
	/*
	 * cpu_hotplug_pm_callback has higher priority than x86
	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
	 * to disable cpu hotplug to avoid cpu hotplug race.
	 */
701 702 703 704 705
	pm_notifier(cpu_hotplug_pm_callback, 0);
	return 0;
}
core_initcall(cpu_hotplug_pm_sync_init);

706
#endif /* CONFIG_PM_SLEEP_SMP */
707

708 709 710 711 712 713 714 715
/**
 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
 * @cpu: cpu that just started
 *
 * This function calls the cpu_chain notifiers with CPU_STARTING.
 * It must be called by the arch code on the new cpu, before the new cpu
 * enables interrupts and before the "boot" cpu returns from __cpu_up().
 */
716
void notify_cpu_starting(unsigned int cpu)
717 718 719 720
{
	unsigned long val = CPU_STARTING;

#ifdef CONFIG_PM_SLEEP_SMP
R
Rusty Russell 已提交
721
	if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
722 723
		val = CPU_STARTING_FROZEN;
#endif /* CONFIG_PM_SLEEP_SMP */
724
	cpu_notify(val, (void *)(long)cpu);
725 726
}

727
#endif /* CONFIG_SMP */
728

729 730 731 732
/*
 * cpu_bit_bitmap[] is a special, "compressed" data structure that
 * represents all NR_CPUS bits binary values of 1<<nr.
 *
R
Rusty Russell 已提交
733
 * It is used by cpumask_of() to get a constant address to a CPU
734 735
 * mask value that has a single bit set only.
 */
736

737
/* cpu_bit_bitmap[0] is empty - so we can back into it */
738
#define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
739 740 741
#define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
#define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
#define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
742

743 744 745 746 747 748 749
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {

	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
#if BITS_PER_LONG > 32
	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
750 751
#endif
};
752
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
753 754 755

const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
EXPORT_SYMBOL(cpu_all_bits);
756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776

#ifdef CONFIG_INIT_ALL_POSSIBLE
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
	= CPU_BITS_ALL;
#else
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
#endif
const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
EXPORT_SYMBOL(cpu_possible_mask);

static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
EXPORT_SYMBOL(cpu_online_mask);

static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
EXPORT_SYMBOL(cpu_present_mask);

static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
EXPORT_SYMBOL(cpu_active_mask);
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795

void set_cpu_possible(unsigned int cpu, bool possible)
{
	if (possible)
		cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
}

void set_cpu_present(unsigned int cpu, bool present)
{
	if (present)
		cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
}

void set_cpu_online(unsigned int cpu, bool online)
{
796
	if (online) {
797
		cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
798 799
		cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
	} else {
800
		cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
801
	}
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
}

void set_cpu_active(unsigned int cpu, bool active)
{
	if (active)
		cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
}

void init_cpu_present(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_present_bits), src);
}

void init_cpu_possible(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_possible_bits), src);
}

void init_cpu_online(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_online_bits), src);
}