cpu.c 19.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/* CPU control.
 * (C) 2001, 2002, 2003, 2004 Rusty Russell
 *
 * This code is licenced under the GPL.
 */
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
13 14
#include <linux/oom.h>
#include <linux/rcupdate.h>
15
#include <linux/export.h>
16
#include <linux/bug.h>
L
Linus Torvalds 已提交
17 18
#include <linux/kthread.h>
#include <linux/stop_machine.h>
19
#include <linux/mutex.h>
20
#include <linux/gfp.h>
21
#include <linux/suspend.h>
22
#include <linux/lockdep.h>
23
#include <trace/events/power.h>
L
Linus Torvalds 已提交
24

25 26
#include "smpboot.h"

27
#ifdef CONFIG_SMP
28
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
29
static DEFINE_MUTEX(cpu_add_remove_lock);
L
Linus Torvalds 已提交
30

31
/*
32 33 34 35 36
 * The following two APIs (cpu_maps_update_begin/done) must be used when
 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
 * hotplug callback (un)registration performed using __register_cpu_notifier()
 * or __unregister_cpu_notifier().
37 38 39 40 41
 */
void cpu_maps_update_begin(void)
{
	mutex_lock(&cpu_add_remove_lock);
}
42
EXPORT_SYMBOL(cpu_notifier_register_begin);
43 44 45 46 47

void cpu_maps_update_done(void)
{
	mutex_unlock(&cpu_add_remove_lock);
}
48
EXPORT_SYMBOL(cpu_notifier_register_done);
49

50
static RAW_NOTIFIER_HEAD(cpu_chain);
L
Linus Torvalds 已提交
51

52 53 54 55 56
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
 * Should always be manipulated under cpu_add_remove_lock
 */
static int cpu_hotplug_disabled;

57 58
#ifdef CONFIG_HOTPLUG_CPU

59 60
static struct {
	struct task_struct *active_writer;
61 62 63 64
	/* wait queue to wake up the active_writer */
	wait_queue_head_t wq;
	/* verifies that no writer will get active while readers are active */
	struct mutex lock;
65 66 67 68
	/*
	 * Also blocks the new readers during
	 * an ongoing cpu hotplug operation.
	 */
69
	atomic_t refcount;
70 71 72 73

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map dep_map;
#endif
74 75
} cpu_hotplug = {
	.active_writer = NULL,
76
	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
77
	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
78 79 80
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	.dep_map = {.name = "cpu_hotplug.lock" },
#endif
81
};
82

83 84
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
85 86
#define cpuhp_lock_acquire_tryread() \
				  lock_map_acquire_tryread(&cpu_hotplug.dep_map)
87 88 89
#define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)

90

91
void get_online_cpus(void)
92
{
93 94
	might_sleep();
	if (cpu_hotplug.active_writer == current)
95
		return;
96
	cpuhp_lock_acquire_read();
97
	mutex_lock(&cpu_hotplug.lock);
98
	atomic_inc(&cpu_hotplug.refcount);
99
	mutex_unlock(&cpu_hotplug.lock);
100
}
101
EXPORT_SYMBOL_GPL(get_online_cpus);
102

103 104 105 106 107 108 109
bool try_get_online_cpus(void)
{
	if (cpu_hotplug.active_writer == current)
		return true;
	if (!mutex_trylock(&cpu_hotplug.lock))
		return false;
	cpuhp_lock_acquire_tryread();
110
	atomic_inc(&cpu_hotplug.refcount);
111 112 113 114 115
	mutex_unlock(&cpu_hotplug.lock);
	return true;
}
EXPORT_SYMBOL_GPL(try_get_online_cpus);

116
void put_online_cpus(void)
117
{
118 119
	int refcount;

120
	if (cpu_hotplug.active_writer == current)
121
		return;
122

123 124 125 126 127 128
	refcount = atomic_dec_return(&cpu_hotplug.refcount);
	if (WARN_ON(refcount < 0)) /* try to fix things up */
		atomic_inc(&cpu_hotplug.refcount);

	if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
		wake_up(&cpu_hotplug.wq);
129

130
	cpuhp_lock_release();
131

132
}
133
EXPORT_SYMBOL_GPL(put_online_cpus);
134

135 136 137 138 139 140 141
/*
 * This ensures that the hotplug operation can begin only when the
 * refcount goes to zero.
 *
 * Note that during a cpu-hotplug operation, the new readers, if any,
 * will be blocked by the cpu_hotplug.lock
 *
142 143
 * Since cpu_hotplug_begin() is always called after invoking
 * cpu_maps_update_begin(), we can be sure that only one writer is active.
144 145 146 147 148 149 150 151 152 153
 *
 * Note that theoretically, there is a possibility of a livelock:
 * - Refcount goes to zero, last reader wakes up the sleeping
 *   writer.
 * - Last reader unlocks the cpu_hotplug.lock.
 * - A new reader arrives at this moment, bumps up the refcount.
 * - The writer acquires the cpu_hotplug.lock finds the refcount
 *   non zero and goes to sleep again.
 *
 * However, this is very difficult to achieve in practice since
154
 * get_online_cpus() not an api which is called all that often.
155 156
 *
 */
157
void cpu_hotplug_begin(void)
158
{
159
	DEFINE_WAIT(wait);
160

161
	cpu_hotplug.active_writer = current;
162
	cpuhp_lock_acquire();
163

164 165
	for (;;) {
		mutex_lock(&cpu_hotplug.lock);
166 167 168
		prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
		if (likely(!atomic_read(&cpu_hotplug.refcount)))
				break;
169 170 171
		mutex_unlock(&cpu_hotplug.lock);
		schedule();
	}
172
	finish_wait(&cpu_hotplug.wq, &wait);
173 174
}

175
void cpu_hotplug_done(void)
176 177 178
{
	cpu_hotplug.active_writer = NULL;
	mutex_unlock(&cpu_hotplug.lock);
179
	cpuhp_lock_release();
180
}
181

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
/*
 * Wait for currently running CPU hotplug operations to complete (if any) and
 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
 * hotplug path before performing hotplug operations. So acquiring that lock
 * guarantees mutual exclusion from any currently running hotplug operations.
 */
void cpu_hotplug_disable(void)
{
	cpu_maps_update_begin();
	cpu_hotplug_disabled = 1;
	cpu_maps_update_done();
}

void cpu_hotplug_enable(void)
{
	cpu_maps_update_begin();
	cpu_hotplug_disabled = 0;
	cpu_maps_update_done();
}

203
#endif	/* CONFIG_HOTPLUG_CPU */
204

L
Linus Torvalds 已提交
205
/* Need to know about CPUs going up/down? */
206
int __ref register_cpu_notifier(struct notifier_block *nb)
L
Linus Torvalds 已提交
207
{
208
	int ret;
209
	cpu_maps_update_begin();
210
	ret = raw_notifier_chain_register(&cpu_chain, nb);
211
	cpu_maps_update_done();
212
	return ret;
L
Linus Torvalds 已提交
213
}
214

215 216 217 218 219
int __ref __register_cpu_notifier(struct notifier_block *nb)
{
	return raw_notifier_chain_register(&cpu_chain, nb);
}

220 221 222
static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
			int *nr_calls)
{
223 224 225
	int ret;

	ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
226
					nr_calls);
227 228

	return notifier_to_errno(ret);
229 230 231 232 233 234 235
}

static int cpu_notify(unsigned long val, void *v)
{
	return __cpu_notify(val, v, -1, NULL);
}

236 237
#ifdef CONFIG_HOTPLUG_CPU

238 239
static void cpu_notify_nofail(unsigned long val, void *v)
{
240
	BUG_ON(cpu_notify(val, v));
241
}
L
Linus Torvalds 已提交
242
EXPORT_SYMBOL(register_cpu_notifier);
243
EXPORT_SYMBOL(__register_cpu_notifier);
L
Linus Torvalds 已提交
244

245
void __ref unregister_cpu_notifier(struct notifier_block *nb)
L
Linus Torvalds 已提交
246
{
247
	cpu_maps_update_begin();
248
	raw_notifier_chain_unregister(&cpu_chain, nb);
249
	cpu_maps_update_done();
L
Linus Torvalds 已提交
250 251 252
}
EXPORT_SYMBOL(unregister_cpu_notifier);

253 254 255 256 257 258
void __ref __unregister_cpu_notifier(struct notifier_block *nb)
{
	raw_notifier_chain_unregister(&cpu_chain, nb);
}
EXPORT_SYMBOL(__unregister_cpu_notifier);

259 260 261 262 263 264 265 266 267 268 269 270
/**
 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
 * @cpu: a CPU id
 *
 * This function walks all processes, finds a valid mm struct for each one and
 * then clears a corresponding bit in mm's cpumask.  While this all sounds
 * trivial, there are various non-obvious corner cases, which this function
 * tries to solve in a safe manner.
 *
 * Also note that the function uses a somewhat relaxed locking scheme, so it may
 * be called only for an already offlined CPU.
 */
271 272 273 274 275 276 277 278 279 280 281
void clear_tasks_mm_cpumask(int cpu)
{
	struct task_struct *p;

	/*
	 * This function is called after the cpu is taken down and marked
	 * offline, so its not like new tasks will ever get this cpu set in
	 * their mm mask. -- Peter Zijlstra
	 * Thus, we may use rcu_read_lock() here, instead of grabbing
	 * full-fledged tasklist_lock.
	 */
282
	WARN_ON(cpu_online(cpu));
283 284 285 286
	rcu_read_lock();
	for_each_process(p) {
		struct task_struct *t;

287 288 289 290
		/*
		 * Main thread might exit, but other threads may still have
		 * a valid mm. Find one.
		 */
291 292 293 294 295 296 297 298 299
		t = find_lock_task_mm(p);
		if (!t)
			continue;
		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
		task_unlock(t);
	}
	rcu_read_unlock();
}

K
Kirill Tkhai 已提交
300
static inline void check_for_tasks(int dead_cpu)
L
Linus Torvalds 已提交
301
{
K
Kirill Tkhai 已提交
302
	struct task_struct *g, *p;
L
Linus Torvalds 已提交
303

K
Kirill Tkhai 已提交
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
	read_lock_irq(&tasklist_lock);
	do_each_thread(g, p) {
		if (!p->on_rq)
			continue;
		/*
		 * We do the check with unlocked task_rq(p)->lock.
		 * Order the reading to do not warn about a task,
		 * which was running on this cpu in the past, and
		 * it's just been woken on another cpu.
		 */
		rmb();
		if (task_cpu(p) != dead_cpu)
			continue;

		pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
			p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
	} while_each_thread(g, p);
	read_unlock_irq(&tasklist_lock);
L
Linus Torvalds 已提交
322 323
}

A
Avi Kivity 已提交
324 325 326 327 328
struct take_cpu_down_param {
	unsigned long mod;
	void *hcpu;
};

L
Linus Torvalds 已提交
329
/* Take this CPU down. */
330
static int __ref take_cpu_down(void *_param)
L
Linus Torvalds 已提交
331
{
A
Avi Kivity 已提交
332
	struct take_cpu_down_param *param = _param;
L
Linus Torvalds 已提交
333 334 335 336 337
	int err;

	/* Ensure this CPU doesn't handle any more interrupts. */
	err = __cpu_disable();
	if (err < 0)
Z
Zwane Mwaikambo 已提交
338
		return err;
L
Linus Torvalds 已提交
339

340
	cpu_notify(CPU_DYING | param->mod, param->hcpu);
341 342
	/* Park the stopper thread */
	kthread_park(current);
Z
Zwane Mwaikambo 已提交
343
	return 0;
L
Linus Torvalds 已提交
344 345
}

346
/* Requires cpu_add_remove_lock to be held */
347
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
L
Linus Torvalds 已提交
348
{
349 350
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
351
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
A
Avi Kivity 已提交
352 353 354 355
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};
L
Linus Torvalds 已提交
356

357 358
	if (num_online_cpus() == 1)
		return -EBUSY;
L
Linus Torvalds 已提交
359

360 361
	if (!cpu_online(cpu))
		return -EINVAL;
L
Linus Torvalds 已提交
362

363
	cpu_hotplug_begin();
364

365
	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
366
	if (err) {
367
		nr_calls--;
368
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
369 370
		pr_warn("%s: attempt to take down CPU %u failed\n",
			__func__, cpu);
371
		goto out_release;
L
Linus Torvalds 已提交
372 373
	}

374 375 376 377 378 379 380
	/*
	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
	 * and RCU users of this state to go away such that all new such users
	 * will observe it.
	 *
	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
	 * not imply sync_sched(), so explicitly call both.
381 382
	 *
	 * Do sync before park smpboot threads to take care the rcu boost case.
383 384 385 386 387 388
	 */
#ifdef CONFIG_PREEMPT
	synchronize_sched();
#endif
	synchronize_rcu();

389 390
	smpboot_park_threads(cpu);

391 392 393 394
	/*
	 * So now all preempt/rcu users must observe !cpu_active().
	 */

R
Rusty Russell 已提交
395
	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
396
	if (err) {
L
Linus Torvalds 已提交
397
		/* CPU didn't die: tell everyone.  Can't complain. */
398
		smpboot_unpark_threads(cpu);
399
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
400
		goto out_release;
401
	}
402
	BUG_ON(cpu_online(cpu));
L
Linus Torvalds 已提交
403

404 405 406 407
	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
P
Peter Zijlstra 已提交
408 409
	 *
	 * Wait for the stop thread to go away.
410
	 */
411
	while (!per_cpu(cpu_dead_idle, cpu))
P
Peter Zijlstra 已提交
412
		cpu_relax();
413 414
	smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
	per_cpu(cpu_dead_idle, cpu) = false;
L
Linus Torvalds 已提交
415 416 417 418 419

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
420
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);
L
Linus Torvalds 已提交
421 422 423

	check_for_tasks(cpu);

424
out_release:
425
	cpu_hotplug_done();
426 427
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
428 429 430
	return err;
}

431
int __ref cpu_down(unsigned int cpu)
432
{
433
	int err;
434

435
	cpu_maps_update_begin();
436 437

	if (cpu_hotplug_disabled) {
438
		err = -EBUSY;
439 440 441 442
		goto out;
	}

	err = _cpu_down(cpu, 0);
443

444
out:
445
	cpu_maps_update_done();
L
Linus Torvalds 已提交
446 447
	return err;
}
448
EXPORT_SYMBOL(cpu_down);
L
Linus Torvalds 已提交
449 450
#endif /*CONFIG_HOTPLUG_CPU*/

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
/*
 * Unpark per-CPU smpboot kthreads at CPU-online time.
 */
static int smpboot_thread_call(struct notifier_block *nfb,
			       unsigned long action, void *hcpu)
{
	int cpu = (long)hcpu;

	switch (action & ~CPU_TASKS_FROZEN) {

	case CPU_ONLINE:
		smpboot_unpark_threads(cpu);
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block smpboot_thread_notifier = {
	.notifier_call = smpboot_thread_call,
	.priority = CPU_PRI_SMPBOOT,
};

void __cpuinit smpboot_thread_init(void)
{
	register_cpu_notifier(&smpboot_thread_notifier);
}

482
/* Requires cpu_add_remove_lock to be held */
483
static int _cpu_up(unsigned int cpu, int tasks_frozen)
L
Linus Torvalds 已提交
484
{
485
	int ret, nr_calls = 0;
L
Linus Torvalds 已提交
486
	void *hcpu = (void *)(long)cpu;
487
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
488
	struct task_struct *idle;
L
Linus Torvalds 已提交
489

490
	cpu_hotplug_begin();
491

492 493 494 495 496
	if (cpu_online(cpu) || !cpu_present(cpu)) {
		ret = -EINVAL;
		goto out;
	}

497 498 499
	idle = idle_thread_get(cpu);
	if (IS_ERR(idle)) {
		ret = PTR_ERR(idle);
500
		goto out;
501
	}
502

503 504 505 506
	ret = smpboot_create_threads(cpu);
	if (ret)
		goto out;

507
	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
508
	if (ret) {
509
		nr_calls--;
510 511
		pr_warn("%s: attempt to bring up CPU %u failed\n",
			__func__, cpu);
L
Linus Torvalds 已提交
512 513 514 515
		goto out_notify;
	}

	/* Arch-specific enabling code. */
516
	ret = __cpu_up(cpu, idle);
L
Linus Torvalds 已提交
517 518
	if (ret != 0)
		goto out_notify;
519
	BUG_ON(!cpu_online(cpu));
L
Linus Torvalds 已提交
520 521

	/* Now call notifier in preparation. */
522
	cpu_notify(CPU_ONLINE | mod, hcpu);
L
Linus Torvalds 已提交
523 524 525

out_notify:
	if (ret != 0)
526
		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
527
out:
528
	cpu_hotplug_done();
529 530 531 532

	return ret;
}

533
int cpu_up(unsigned int cpu)
534 535
{
	int err = 0;
536

R
Rusty Russell 已提交
537
	if (!cpu_possible(cpu)) {
538 539
		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
		       cpu);
540
#if defined(CONFIG_IA64)
541
		pr_err("please check additional_cpus= boot parameter\n");
542 543 544
#endif
		return -EINVAL;
	}
545

546 547 548
	err = try_online_node(cpu_to_node(cpu));
	if (err)
		return err;
549

550
	cpu_maps_update_begin();
551 552

	if (cpu_hotplug_disabled) {
553
		err = -EBUSY;
554 555 556 557 558 559
		goto out;
	}

	err = _cpu_up(cpu, 0);

out:
560
	cpu_maps_update_done();
561 562
	return err;
}
P
Paul E. McKenney 已提交
563
EXPORT_SYMBOL_GPL(cpu_up);
564

565
#ifdef CONFIG_PM_SLEEP_SMP
R
Rusty Russell 已提交
566
static cpumask_var_t frozen_cpus;
567 568 569

int disable_nonboot_cpus(void)
{
570
	int cpu, first_cpu, error = 0;
571

572
	cpu_maps_update_begin();
R
Rusty Russell 已提交
573
	first_cpu = cpumask_first(cpu_online_mask);
574 575
	/*
	 * We take down all of the non-boot CPUs in one shot to avoid races
576 577
	 * with the userspace trying to use the CPU hotplug at the same time
	 */
R
Rusty Russell 已提交
578
	cpumask_clear(frozen_cpus);
579

580
	pr_info("Disabling non-boot CPUs ...\n");
581 582 583
	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
584
		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
585
		error = _cpu_down(cpu, 1);
586
		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
587
		if (!error)
R
Rusty Russell 已提交
588
			cpumask_set_cpu(cpu, frozen_cpus);
589
		else {
590
			pr_err("Error taking CPU%d down: %d\n", cpu, error);
591 592 593
			break;
		}
	}
594

595 596 597 598 599
	if (!error) {
		BUG_ON(num_online_cpus() > 1);
		/* Make sure the CPUs won't be enabled by someone else */
		cpu_hotplug_disabled = 1;
	} else {
600
		pr_err("Non-boot CPUs are not disabled\n");
601
	}
602
	cpu_maps_update_done();
603 604 605
	return error;
}

606 607 608 609 610 611 612 613
void __weak arch_enable_nonboot_cpus_begin(void)
{
}

void __weak arch_enable_nonboot_cpus_end(void)
{
}

614
void __ref enable_nonboot_cpus(void)
615 616 617 618
{
	int cpu, error;

	/* Allow everyone to use the CPU hotplug again */
619
	cpu_maps_update_begin();
620
	cpu_hotplug_disabled = 0;
R
Rusty Russell 已提交
621
	if (cpumask_empty(frozen_cpus))
622
		goto out;
623

624
	pr_info("Enabling non-boot CPUs ...\n");
625 626 627

	arch_enable_nonboot_cpus_begin();

R
Rusty Russell 已提交
628
	for_each_cpu(cpu, frozen_cpus) {
629
		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
630
		error = _cpu_up(cpu, 1);
631
		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
632
		if (!error) {
633
			pr_info("CPU%d is up\n", cpu);
634 635
			continue;
		}
636
		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
637
	}
638 639 640

	arch_enable_nonboot_cpus_end();

R
Rusty Russell 已提交
641
	cpumask_clear(frozen_cpus);
642
out:
643
	cpu_maps_update_done();
L
Linus Torvalds 已提交
644
}
R
Rusty Russell 已提交
645

646
static int __init alloc_frozen_cpus(void)
R
Rusty Russell 已提交
647 648 649 650 651 652
{
	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
		return -ENOMEM;
	return 0;
}
core_initcall(alloc_frozen_cpus);
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672

/*
 * When callbacks for CPU hotplug notifications are being executed, we must
 * ensure that the state of the system with respect to the tasks being frozen
 * or not, as reported by the notification, remains unchanged *throughout the
 * duration* of the execution of the callbacks.
 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
 *
 * This synchronization is implemented by mutually excluding regular CPU
 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
 * Hibernate notifications.
 */
static int
cpu_hotplug_pm_callback(struct notifier_block *nb,
			unsigned long action, void *ptr)
{
	switch (action) {

	case PM_SUSPEND_PREPARE:
	case PM_HIBERNATION_PREPARE:
673
		cpu_hotplug_disable();
674 675 676 677
		break;

	case PM_POST_SUSPEND:
	case PM_POST_HIBERNATION:
678
		cpu_hotplug_enable();
679 680 681 682 683 684 685 686 687 688
		break;

	default:
		return NOTIFY_DONE;
	}

	return NOTIFY_OK;
}


689
static int __init cpu_hotplug_pm_sync_init(void)
690
{
691 692 693 694 695
	/*
	 * cpu_hotplug_pm_callback has higher priority than x86
	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
	 * to disable cpu hotplug to avoid cpu hotplug race.
	 */
696 697 698 699 700
	pm_notifier(cpu_hotplug_pm_callback, 0);
	return 0;
}
core_initcall(cpu_hotplug_pm_sync_init);

701
#endif /* CONFIG_PM_SLEEP_SMP */
702

703 704 705 706 707 708 709 710
/**
 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
 * @cpu: cpu that just started
 *
 * This function calls the cpu_chain notifiers with CPU_STARTING.
 * It must be called by the arch code on the new cpu, before the new cpu
 * enables interrupts and before the "boot" cpu returns from __cpu_up().
 */
711
void notify_cpu_starting(unsigned int cpu)
712 713 714 715
{
	unsigned long val = CPU_STARTING;

#ifdef CONFIG_PM_SLEEP_SMP
R
Rusty Russell 已提交
716
	if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
717 718
		val = CPU_STARTING_FROZEN;
#endif /* CONFIG_PM_SLEEP_SMP */
719
	cpu_notify(val, (void *)(long)cpu);
720 721
}

722
#endif /* CONFIG_SMP */
723

724 725 726 727
/*
 * cpu_bit_bitmap[] is a special, "compressed" data structure that
 * represents all NR_CPUS bits binary values of 1<<nr.
 *
R
Rusty Russell 已提交
728
 * It is used by cpumask_of() to get a constant address to a CPU
729 730
 * mask value that has a single bit set only.
 */
731

732
/* cpu_bit_bitmap[0] is empty - so we can back into it */
733
#define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
734 735 736
#define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
#define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
#define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
737

738 739 740 741 742 743 744
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {

	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
#if BITS_PER_LONG > 32
	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
745 746
#endif
};
747
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
748 749 750

const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
EXPORT_SYMBOL(cpu_all_bits);
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771

#ifdef CONFIG_INIT_ALL_POSSIBLE
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
	= CPU_BITS_ALL;
#else
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
#endif
const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
EXPORT_SYMBOL(cpu_possible_mask);

static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
EXPORT_SYMBOL(cpu_online_mask);

static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
EXPORT_SYMBOL(cpu_present_mask);

static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
EXPORT_SYMBOL(cpu_active_mask);
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790

void set_cpu_possible(unsigned int cpu, bool possible)
{
	if (possible)
		cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
}

void set_cpu_present(unsigned int cpu, bool present)
{
	if (present)
		cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
}

void set_cpu_online(unsigned int cpu, bool online)
{
791
	if (online) {
792
		cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
793 794
		cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
	} else {
795
		cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
796
	}
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
}

void set_cpu_active(unsigned int cpu, bool active)
{
	if (active)
		cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
}

void init_cpu_present(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_present_bits), src);
}

void init_cpu_possible(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_possible_bits), src);
}

void init_cpu_online(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_online_bits), src);
}