cpu.c 18.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/* CPU control.
 * (C) 2001, 2002, 2003, 2004 Rusty Russell
 *
 * This code is licenced under the GPL.
 */
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
13 14
#include <linux/oom.h>
#include <linux/rcupdate.h>
15
#include <linux/export.h>
16
#include <linux/bug.h>
L
Linus Torvalds 已提交
17 18
#include <linux/kthread.h>
#include <linux/stop_machine.h>
19
#include <linux/mutex.h>
20
#include <linux/gfp.h>
21
#include <linux/suspend.h>
22
#include <linux/lockdep.h>
L
Linus Torvalds 已提交
23

24 25
#include "smpboot.h"

26
#ifdef CONFIG_SMP
27
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
28
static DEFINE_MUTEX(cpu_add_remove_lock);
L
Linus Torvalds 已提交
29

30
/*
31 32 33 34 35
 * The following two APIs (cpu_maps_update_begin/done) must be used when
 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
 * hotplug callback (un)registration performed using __register_cpu_notifier()
 * or __unregister_cpu_notifier().
36 37 38 39 40
 */
void cpu_maps_update_begin(void)
{
	mutex_lock(&cpu_add_remove_lock);
}
41
EXPORT_SYMBOL(cpu_notifier_register_begin);
42 43 44 45 46

void cpu_maps_update_done(void)
{
	mutex_unlock(&cpu_add_remove_lock);
}
47
EXPORT_SYMBOL(cpu_notifier_register_done);
48

49
static RAW_NOTIFIER_HEAD(cpu_chain);
L
Linus Torvalds 已提交
50

51 52 53 54 55
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
 * Should always be manipulated under cpu_add_remove_lock
 */
static int cpu_hotplug_disabled;

56 57
#ifdef CONFIG_HOTPLUG_CPU

58 59 60 61 62 63 64 65
static struct {
	struct task_struct *active_writer;
	struct mutex lock; /* Synchronizes accesses to refcount, */
	/*
	 * Also blocks the new readers during
	 * an ongoing cpu hotplug operation.
	 */
	int refcount;
66 67 68 69

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map dep_map;
#endif
70 71 72 73
} cpu_hotplug = {
	.active_writer = NULL,
	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
	.refcount = 0,
74 75 76
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	.dep_map = {.name = "cpu_hotplug.lock" },
#endif
77
};
78

79 80 81 82 83
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)

84
void get_online_cpus(void)
85
{
86 87
	might_sleep();
	if (cpu_hotplug.active_writer == current)
88
		return;
89
	cpuhp_lock_acquire_read();
90 91 92 93
	mutex_lock(&cpu_hotplug.lock);
	cpu_hotplug.refcount++;
	mutex_unlock(&cpu_hotplug.lock);

94
}
95
EXPORT_SYMBOL_GPL(get_online_cpus);
96

97
void put_online_cpus(void)
98
{
99
	if (cpu_hotplug.active_writer == current)
100
		return;
101
	mutex_lock(&cpu_hotplug.lock);
102 103 104 105

	if (WARN_ON(!cpu_hotplug.refcount))
		cpu_hotplug.refcount++; /* try to fix things up */

106 107
	if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
		wake_up_process(cpu_hotplug.active_writer);
108
	mutex_unlock(&cpu_hotplug.lock);
109
	cpuhp_lock_release();
110

111
}
112
EXPORT_SYMBOL_GPL(put_online_cpus);
113

114 115 116 117 118 119 120
/*
 * This ensures that the hotplug operation can begin only when the
 * refcount goes to zero.
 *
 * Note that during a cpu-hotplug operation, the new readers, if any,
 * will be blocked by the cpu_hotplug.lock
 *
121 122
 * Since cpu_hotplug_begin() is always called after invoking
 * cpu_maps_update_begin(), we can be sure that only one writer is active.
123 124 125 126 127 128 129 130 131 132
 *
 * Note that theoretically, there is a possibility of a livelock:
 * - Refcount goes to zero, last reader wakes up the sleeping
 *   writer.
 * - Last reader unlocks the cpu_hotplug.lock.
 * - A new reader arrives at this moment, bumps up the refcount.
 * - The writer acquires the cpu_hotplug.lock finds the refcount
 *   non zero and goes to sleep again.
 *
 * However, this is very difficult to achieve in practice since
133
 * get_online_cpus() not an api which is called all that often.
134 135
 *
 */
136
void cpu_hotplug_begin(void)
137 138
{
	cpu_hotplug.active_writer = current;
139

140
	cpuhp_lock_acquire();
141 142 143 144 145
	for (;;) {
		mutex_lock(&cpu_hotplug.lock);
		if (likely(!cpu_hotplug.refcount))
			break;
		__set_current_state(TASK_UNINTERRUPTIBLE);
146 147 148 149 150
		mutex_unlock(&cpu_hotplug.lock);
		schedule();
	}
}

151
void cpu_hotplug_done(void)
152 153 154
{
	cpu_hotplug.active_writer = NULL;
	mutex_unlock(&cpu_hotplug.lock);
155
	cpuhp_lock_release();
156
}
157

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
/*
 * Wait for currently running CPU hotplug operations to complete (if any) and
 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
 * hotplug path before performing hotplug operations. So acquiring that lock
 * guarantees mutual exclusion from any currently running hotplug operations.
 */
void cpu_hotplug_disable(void)
{
	cpu_maps_update_begin();
	cpu_hotplug_disabled = 1;
	cpu_maps_update_done();
}

void cpu_hotplug_enable(void)
{
	cpu_maps_update_begin();
	cpu_hotplug_disabled = 0;
	cpu_maps_update_done();
}

179
#endif	/* CONFIG_HOTPLUG_CPU */
180

L
Linus Torvalds 已提交
181
/* Need to know about CPUs going up/down? */
182
int __ref register_cpu_notifier(struct notifier_block *nb)
L
Linus Torvalds 已提交
183
{
184
	int ret;
185
	cpu_maps_update_begin();
186
	ret = raw_notifier_chain_register(&cpu_chain, nb);
187
	cpu_maps_update_done();
188
	return ret;
L
Linus Torvalds 已提交
189
}
190

191 192 193 194 195
int __ref __register_cpu_notifier(struct notifier_block *nb)
{
	return raw_notifier_chain_register(&cpu_chain, nb);
}

196 197 198
static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
			int *nr_calls)
{
199 200 201
	int ret;

	ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
202
					nr_calls);
203 204

	return notifier_to_errno(ret);
205 206 207 208 209 210 211
}

static int cpu_notify(unsigned long val, void *v)
{
	return __cpu_notify(val, v, -1, NULL);
}

212 213
#ifdef CONFIG_HOTPLUG_CPU

214 215
static void cpu_notify_nofail(unsigned long val, void *v)
{
216
	BUG_ON(cpu_notify(val, v));
217
}
L
Linus Torvalds 已提交
218
EXPORT_SYMBOL(register_cpu_notifier);
219
EXPORT_SYMBOL(__register_cpu_notifier);
L
Linus Torvalds 已提交
220

221
void __ref unregister_cpu_notifier(struct notifier_block *nb)
L
Linus Torvalds 已提交
222
{
223
	cpu_maps_update_begin();
224
	raw_notifier_chain_unregister(&cpu_chain, nb);
225
	cpu_maps_update_done();
L
Linus Torvalds 已提交
226 227 228
}
EXPORT_SYMBOL(unregister_cpu_notifier);

229 230 231 232 233 234
void __ref __unregister_cpu_notifier(struct notifier_block *nb)
{
	raw_notifier_chain_unregister(&cpu_chain, nb);
}
EXPORT_SYMBOL(__unregister_cpu_notifier);

235 236 237 238 239 240 241 242 243 244 245 246
/**
 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
 * @cpu: a CPU id
 *
 * This function walks all processes, finds a valid mm struct for each one and
 * then clears a corresponding bit in mm's cpumask.  While this all sounds
 * trivial, there are various non-obvious corner cases, which this function
 * tries to solve in a safe manner.
 *
 * Also note that the function uses a somewhat relaxed locking scheme, so it may
 * be called only for an already offlined CPU.
 */
247 248 249 250 251 252 253 254 255 256 257
void clear_tasks_mm_cpumask(int cpu)
{
	struct task_struct *p;

	/*
	 * This function is called after the cpu is taken down and marked
	 * offline, so its not like new tasks will ever get this cpu set in
	 * their mm mask. -- Peter Zijlstra
	 * Thus, we may use rcu_read_lock() here, instead of grabbing
	 * full-fledged tasklist_lock.
	 */
258
	WARN_ON(cpu_online(cpu));
259 260 261 262
	rcu_read_lock();
	for_each_process(p) {
		struct task_struct *t;

263 264 265 266
		/*
		 * Main thread might exit, but other threads may still have
		 * a valid mm. Find one.
		 */
267 268 269 270 271 272 273 274 275
		t = find_lock_task_mm(p);
		if (!t)
			continue;
		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
		task_unlock(t);
	}
	rcu_read_unlock();
}

L
Linus Torvalds 已提交
276 277 278
static inline void check_for_tasks(int cpu)
{
	struct task_struct *p;
279
	cputime_t utime, stime;
L
Linus Torvalds 已提交
280 281 282

	write_lock_irq(&tasklist_lock);
	for_each_process(p) {
283
		task_cputime(p, &utime, &stime);
284
		if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
285
		    (utime || stime))
286 287 288 289
			printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
				"(state = %ld, flags = %x)\n",
				p->comm, task_pid_nr(p), cpu,
				p->state, p->flags);
L
Linus Torvalds 已提交
290 291 292 293
	}
	write_unlock_irq(&tasklist_lock);
}

A
Avi Kivity 已提交
294 295 296 297 298
struct take_cpu_down_param {
	unsigned long mod;
	void *hcpu;
};

L
Linus Torvalds 已提交
299
/* Take this CPU down. */
300
static int __ref take_cpu_down(void *_param)
L
Linus Torvalds 已提交
301
{
A
Avi Kivity 已提交
302
	struct take_cpu_down_param *param = _param;
L
Linus Torvalds 已提交
303 304 305 306 307
	int err;

	/* Ensure this CPU doesn't handle any more interrupts. */
	err = __cpu_disable();
	if (err < 0)
Z
Zwane Mwaikambo 已提交
308
		return err;
L
Linus Torvalds 已提交
309

310
	cpu_notify(CPU_DYING | param->mod, param->hcpu);
311 312
	/* Park the stopper thread */
	kthread_park(current);
Z
Zwane Mwaikambo 已提交
313
	return 0;
L
Linus Torvalds 已提交
314 315
}

316
/* Requires cpu_add_remove_lock to be held */
317
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
L
Linus Torvalds 已提交
318
{
319 320
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
321
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
A
Avi Kivity 已提交
322 323 324 325
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};
L
Linus Torvalds 已提交
326

327 328
	if (num_online_cpus() == 1)
		return -EBUSY;
L
Linus Torvalds 已提交
329

330 331
	if (!cpu_online(cpu))
		return -EINVAL;
L
Linus Torvalds 已提交
332

333
	cpu_hotplug_begin();
334

335
	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
336
	if (err) {
337
		nr_calls--;
338
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
L
Linus Torvalds 已提交
339
		printk("%s: attempt to take down CPU %u failed\n",
340
				__func__, cpu);
341
		goto out_release;
L
Linus Torvalds 已提交
342 343
	}

344 345 346 347 348 349 350
	/*
	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
	 * and RCU users of this state to go away such that all new such users
	 * will observe it.
	 *
	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
	 * not imply sync_sched(), so explicitly call both.
351 352
	 *
	 * Do sync before park smpboot threads to take care the rcu boost case.
353 354 355 356 357 358
	 */
#ifdef CONFIG_PREEMPT
	synchronize_sched();
#endif
	synchronize_rcu();

359 360
	smpboot_park_threads(cpu);

361 362 363 364
	/*
	 * So now all preempt/rcu users must observe !cpu_active().
	 */

R
Rusty Russell 已提交
365
	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
366
	if (err) {
L
Linus Torvalds 已提交
367
		/* CPU didn't die: tell everyone.  Can't complain. */
368
		smpboot_unpark_threads(cpu);
369
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
370
		goto out_release;
371
	}
372
	BUG_ON(cpu_online(cpu));
L
Linus Torvalds 已提交
373

374 375 376 377
	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
P
Peter Zijlstra 已提交
378 379
	 *
	 * Wait for the stop thread to go away.
380
	 */
P
Peter Zijlstra 已提交
381 382
	while (!idle_cpu(cpu))
		cpu_relax();
L
Linus Torvalds 已提交
383 384 385 386 387

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
388
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);
L
Linus Torvalds 已提交
389 390 391

	check_for_tasks(cpu);

392
out_release:
393
	cpu_hotplug_done();
394 395
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
396 397 398
	return err;
}

399
int __ref cpu_down(unsigned int cpu)
400
{
401
	int err;
402

403
	cpu_maps_update_begin();
404 405

	if (cpu_hotplug_disabled) {
406
		err = -EBUSY;
407 408 409 410
		goto out;
	}

	err = _cpu_down(cpu, 0);
411

412
out:
413
	cpu_maps_update_done();
L
Linus Torvalds 已提交
414 415
	return err;
}
416
EXPORT_SYMBOL(cpu_down);
L
Linus Torvalds 已提交
417 418
#endif /*CONFIG_HOTPLUG_CPU*/

419
/* Requires cpu_add_remove_lock to be held */
420
static int _cpu_up(unsigned int cpu, int tasks_frozen)
L
Linus Torvalds 已提交
421
{
422
	int ret, nr_calls = 0;
L
Linus Torvalds 已提交
423
	void *hcpu = (void *)(long)cpu;
424
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
425
	struct task_struct *idle;
L
Linus Torvalds 已提交
426

427
	cpu_hotplug_begin();
428

429 430 431 432 433
	if (cpu_online(cpu) || !cpu_present(cpu)) {
		ret = -EINVAL;
		goto out;
	}

434 435 436
	idle = idle_thread_get(cpu);
	if (IS_ERR(idle)) {
		ret = PTR_ERR(idle);
437
		goto out;
438
	}
439

440 441 442 443
	ret = smpboot_create_threads(cpu);
	if (ret)
		goto out;

444
	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
445
	if (ret) {
446
		nr_calls--;
447
		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
448
				__func__, cpu);
L
Linus Torvalds 已提交
449 450 451 452
		goto out_notify;
	}

	/* Arch-specific enabling code. */
453
	ret = __cpu_up(cpu, idle);
L
Linus Torvalds 已提交
454 455
	if (ret != 0)
		goto out_notify;
456
	BUG_ON(!cpu_online(cpu));
L
Linus Torvalds 已提交
457

458 459 460
	/* Wake the per cpu threads */
	smpboot_unpark_threads(cpu);

L
Linus Torvalds 已提交
461
	/* Now call notifier in preparation. */
462
	cpu_notify(CPU_ONLINE | mod, hcpu);
L
Linus Torvalds 已提交
463 464 465

out_notify:
	if (ret != 0)
466
		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
467
out:
468
	cpu_hotplug_done();
469 470 471 472

	return ret;
}

473
int cpu_up(unsigned int cpu)
474 475
{
	int err = 0;
476

R
Rusty Russell 已提交
477
	if (!cpu_possible(cpu)) {
478 479
		printk(KERN_ERR "can't online cpu %d because it is not "
			"configured as may-hotadd at boot time\n", cpu);
480
#if defined(CONFIG_IA64)
481 482 483 484 485
		printk(KERN_ERR "please check additional_cpus= boot "
				"parameter\n");
#endif
		return -EINVAL;
	}
486

487 488 489
	err = try_online_node(cpu_to_node(cpu));
	if (err)
		return err;
490

491
	cpu_maps_update_begin();
492 493

	if (cpu_hotplug_disabled) {
494
		err = -EBUSY;
495 496 497 498 499 500
		goto out;
	}

	err = _cpu_up(cpu, 0);

out:
501
	cpu_maps_update_done();
502 503
	return err;
}
P
Paul E. McKenney 已提交
504
EXPORT_SYMBOL_GPL(cpu_up);
505

506
#ifdef CONFIG_PM_SLEEP_SMP
R
Rusty Russell 已提交
507
static cpumask_var_t frozen_cpus;
508 509 510

int disable_nonboot_cpus(void)
{
511
	int cpu, first_cpu, error = 0;
512

513
	cpu_maps_update_begin();
R
Rusty Russell 已提交
514
	first_cpu = cpumask_first(cpu_online_mask);
515 516
	/*
	 * We take down all of the non-boot CPUs in one shot to avoid races
517 518
	 * with the userspace trying to use the CPU hotplug at the same time
	 */
R
Rusty Russell 已提交
519
	cpumask_clear(frozen_cpus);
520

521 522 523 524
	printk("Disabling non-boot CPUs ...\n");
	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
525
		error = _cpu_down(cpu, 1);
526
		if (!error)
R
Rusty Russell 已提交
527
			cpumask_set_cpu(cpu, frozen_cpus);
528
		else {
529 530 531 532 533
			printk(KERN_ERR "Error taking CPU%d down: %d\n",
				cpu, error);
			break;
		}
	}
534

535 536 537 538 539
	if (!error) {
		BUG_ON(num_online_cpus() > 1);
		/* Make sure the CPUs won't be enabled by someone else */
		cpu_hotplug_disabled = 1;
	} else {
540
		printk(KERN_ERR "Non-boot CPUs are not disabled\n");
541
	}
542
	cpu_maps_update_done();
543 544 545
	return error;
}

546 547 548 549 550 551 552 553
void __weak arch_enable_nonboot_cpus_begin(void)
{
}

void __weak arch_enable_nonboot_cpus_end(void)
{
}

554
void __ref enable_nonboot_cpus(void)
555 556 557 558
{
	int cpu, error;

	/* Allow everyone to use the CPU hotplug again */
559
	cpu_maps_update_begin();
560
	cpu_hotplug_disabled = 0;
R
Rusty Russell 已提交
561
	if (cpumask_empty(frozen_cpus))
562
		goto out;
563

564
	printk(KERN_INFO "Enabling non-boot CPUs ...\n");
565 566 567

	arch_enable_nonboot_cpus_begin();

R
Rusty Russell 已提交
568
	for_each_cpu(cpu, frozen_cpus) {
569
		error = _cpu_up(cpu, 1);
570
		if (!error) {
571
			printk(KERN_INFO "CPU%d is up\n", cpu);
572 573
			continue;
		}
574
		printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
575
	}
576 577 578

	arch_enable_nonboot_cpus_end();

R
Rusty Russell 已提交
579
	cpumask_clear(frozen_cpus);
580
out:
581
	cpu_maps_update_done();
L
Linus Torvalds 已提交
582
}
R
Rusty Russell 已提交
583

584
static int __init alloc_frozen_cpus(void)
R
Rusty Russell 已提交
585 586 587 588 589 590
{
	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
		return -ENOMEM;
	return 0;
}
core_initcall(alloc_frozen_cpus);
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610

/*
 * When callbacks for CPU hotplug notifications are being executed, we must
 * ensure that the state of the system with respect to the tasks being frozen
 * or not, as reported by the notification, remains unchanged *throughout the
 * duration* of the execution of the callbacks.
 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
 *
 * This synchronization is implemented by mutually excluding regular CPU
 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
 * Hibernate notifications.
 */
static int
cpu_hotplug_pm_callback(struct notifier_block *nb,
			unsigned long action, void *ptr)
{
	switch (action) {

	case PM_SUSPEND_PREPARE:
	case PM_HIBERNATION_PREPARE:
611
		cpu_hotplug_disable();
612 613 614 615
		break;

	case PM_POST_SUSPEND:
	case PM_POST_HIBERNATION:
616
		cpu_hotplug_enable();
617 618 619 620 621 622 623 624 625 626
		break;

	default:
		return NOTIFY_DONE;
	}

	return NOTIFY_OK;
}


627
static int __init cpu_hotplug_pm_sync_init(void)
628
{
629 630 631 632 633
	/*
	 * cpu_hotplug_pm_callback has higher priority than x86
	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
	 * to disable cpu hotplug to avoid cpu hotplug race.
	 */
634 635 636 637 638
	pm_notifier(cpu_hotplug_pm_callback, 0);
	return 0;
}
core_initcall(cpu_hotplug_pm_sync_init);

639
#endif /* CONFIG_PM_SLEEP_SMP */
640

641 642 643 644 645 646 647 648
/**
 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
 * @cpu: cpu that just started
 *
 * This function calls the cpu_chain notifiers with CPU_STARTING.
 * It must be called by the arch code on the new cpu, before the new cpu
 * enables interrupts and before the "boot" cpu returns from __cpu_up().
 */
649
void notify_cpu_starting(unsigned int cpu)
650 651 652 653
{
	unsigned long val = CPU_STARTING;

#ifdef CONFIG_PM_SLEEP_SMP
R
Rusty Russell 已提交
654
	if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
655 656
		val = CPU_STARTING_FROZEN;
#endif /* CONFIG_PM_SLEEP_SMP */
657
	cpu_notify(val, (void *)(long)cpu);
658 659
}

660
#endif /* CONFIG_SMP */
661

662 663 664 665
/*
 * cpu_bit_bitmap[] is a special, "compressed" data structure that
 * represents all NR_CPUS bits binary values of 1<<nr.
 *
R
Rusty Russell 已提交
666
 * It is used by cpumask_of() to get a constant address to a CPU
667 668
 * mask value that has a single bit set only.
 */
669

670
/* cpu_bit_bitmap[0] is empty - so we can back into it */
671
#define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
672 673 674
#define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
#define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
#define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
675

676 677 678 679 680 681 682
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {

	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
#if BITS_PER_LONG > 32
	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
683 684
#endif
};
685
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
686 687 688

const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
EXPORT_SYMBOL(cpu_all_bits);
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709

#ifdef CONFIG_INIT_ALL_POSSIBLE
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
	= CPU_BITS_ALL;
#else
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
#endif
const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
EXPORT_SYMBOL(cpu_possible_mask);

static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
EXPORT_SYMBOL(cpu_online_mask);

static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
EXPORT_SYMBOL(cpu_present_mask);

static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
EXPORT_SYMBOL(cpu_active_mask);
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756

void set_cpu_possible(unsigned int cpu, bool possible)
{
	if (possible)
		cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
}

void set_cpu_present(unsigned int cpu, bool present)
{
	if (present)
		cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
}

void set_cpu_online(unsigned int cpu, bool online)
{
	if (online)
		cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
}

void set_cpu_active(unsigned int cpu, bool active)
{
	if (active)
		cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
}

void init_cpu_present(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_present_bits), src);
}

void init_cpu_possible(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_possible_bits), src);
}

void init_cpu_online(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_online_bits), src);
}