cpu.c 15.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/* CPU control.
 * (C) 2001, 2002, 2003, 2004 Rusty Russell
 *
 * This code is licenced under the GPL.
 */
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
13
#include <linux/export.h>
L
Linus Torvalds 已提交
14 15
#include <linux/kthread.h>
#include <linux/stop_machine.h>
16
#include <linux/mutex.h>
17
#include <linux/gfp.h>
18
#include <linux/suspend.h>
L
Linus Torvalds 已提交
19

20 21
#include "smpboot.h"

22
#ifdef CONFIG_SMP
23
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
24
static DEFINE_MUTEX(cpu_add_remove_lock);
L
Linus Torvalds 已提交
25

26 27 28 29 30 31 32 33 34 35 36 37 38 39
/*
 * The following two API's must be used when attempting
 * to serialize the updates to cpu_online_mask, cpu_present_mask.
 */
void cpu_maps_update_begin(void)
{
	mutex_lock(&cpu_add_remove_lock);
}

void cpu_maps_update_done(void)
{
	mutex_unlock(&cpu_add_remove_lock);
}

40
static RAW_NOTIFIER_HEAD(cpu_chain);
L
Linus Torvalds 已提交
41

42 43 44 45 46
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
 * Should always be manipulated under cpu_add_remove_lock
 */
static int cpu_hotplug_disabled;

47 48
#ifdef CONFIG_HOTPLUG_CPU

49 50 51 52 53 54 55 56
static struct {
	struct task_struct *active_writer;
	struct mutex lock; /* Synchronizes accesses to refcount, */
	/*
	 * Also blocks the new readers during
	 * an ongoing cpu hotplug operation.
	 */
	int refcount;
57 58 59 60 61
} cpu_hotplug = {
	.active_writer = NULL,
	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
	.refcount = 0,
};
62

63
void get_online_cpus(void)
64
{
65 66
	might_sleep();
	if (cpu_hotplug.active_writer == current)
67
		return;
68 69 70 71
	mutex_lock(&cpu_hotplug.lock);
	cpu_hotplug.refcount++;
	mutex_unlock(&cpu_hotplug.lock);

72
}
73
EXPORT_SYMBOL_GPL(get_online_cpus);
74

75
void put_online_cpus(void)
76
{
77
	if (cpu_hotplug.active_writer == current)
78
		return;
79
	mutex_lock(&cpu_hotplug.lock);
80 81
	if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
		wake_up_process(cpu_hotplug.active_writer);
82 83
	mutex_unlock(&cpu_hotplug.lock);

84
}
85
EXPORT_SYMBOL_GPL(put_online_cpus);
86

87 88 89 90 91 92 93
/*
 * This ensures that the hotplug operation can begin only when the
 * refcount goes to zero.
 *
 * Note that during a cpu-hotplug operation, the new readers, if any,
 * will be blocked by the cpu_hotplug.lock
 *
94 95
 * Since cpu_hotplug_begin() is always called after invoking
 * cpu_maps_update_begin(), we can be sure that only one writer is active.
96 97 98 99 100 101 102 103 104 105
 *
 * Note that theoretically, there is a possibility of a livelock:
 * - Refcount goes to zero, last reader wakes up the sleeping
 *   writer.
 * - Last reader unlocks the cpu_hotplug.lock.
 * - A new reader arrives at this moment, bumps up the refcount.
 * - The writer acquires the cpu_hotplug.lock finds the refcount
 *   non zero and goes to sleep again.
 *
 * However, this is very difficult to achieve in practice since
106
 * get_online_cpus() not an api which is called all that often.
107 108 109 110 111
 *
 */
static void cpu_hotplug_begin(void)
{
	cpu_hotplug.active_writer = current;
112 113 114 115 116 117

	for (;;) {
		mutex_lock(&cpu_hotplug.lock);
		if (likely(!cpu_hotplug.refcount))
			break;
		__set_current_state(TASK_UNINTERRUPTIBLE);
118 119 120 121 122 123 124 125 126 127
		mutex_unlock(&cpu_hotplug.lock);
		schedule();
	}
}

static void cpu_hotplug_done(void)
{
	cpu_hotplug.active_writer = NULL;
	mutex_unlock(&cpu_hotplug.lock);
}
128 129 130 131

#else /* #if CONFIG_HOTPLUG_CPU */
static void cpu_hotplug_begin(void) {}
static void cpu_hotplug_done(void) {}
L
Lucas De Marchi 已提交
132
#endif	/* #else #if CONFIG_HOTPLUG_CPU */
133

L
Linus Torvalds 已提交
134
/* Need to know about CPUs going up/down? */
135
int __ref register_cpu_notifier(struct notifier_block *nb)
L
Linus Torvalds 已提交
136
{
137
	int ret;
138
	cpu_maps_update_begin();
139
	ret = raw_notifier_chain_register(&cpu_chain, nb);
140
	cpu_maps_update_done();
141
	return ret;
L
Linus Torvalds 已提交
142
}
143

144 145 146
static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
			int *nr_calls)
{
147 148 149
	int ret;

	ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
150
					nr_calls);
151 152

	return notifier_to_errno(ret);
153 154 155 156 157 158 159
}

static int cpu_notify(unsigned long val, void *v)
{
	return __cpu_notify(val, v, -1, NULL);
}

160 161
#ifdef CONFIG_HOTPLUG_CPU

162 163
static void cpu_notify_nofail(unsigned long val, void *v)
{
164
	BUG_ON(cpu_notify(val, v));
165
}
L
Linus Torvalds 已提交
166 167
EXPORT_SYMBOL(register_cpu_notifier);

168
void __ref unregister_cpu_notifier(struct notifier_block *nb)
L
Linus Torvalds 已提交
169
{
170
	cpu_maps_update_begin();
171
	raw_notifier_chain_unregister(&cpu_chain, nb);
172
	cpu_maps_update_done();
L
Linus Torvalds 已提交
173 174 175 176 177 178 179 180 181
}
EXPORT_SYMBOL(unregister_cpu_notifier);

static inline void check_for_tasks(int cpu)
{
	struct task_struct *p;

	write_lock_irq(&tasklist_lock);
	for_each_process(p) {
182
		if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
183
		    (p->utime || p->stime))
184 185 186 187
			printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
				"(state = %ld, flags = %x)\n",
				p->comm, task_pid_nr(p), cpu,
				p->state, p->flags);
L
Linus Torvalds 已提交
188 189 190 191
	}
	write_unlock_irq(&tasklist_lock);
}

A
Avi Kivity 已提交
192 193 194 195 196
struct take_cpu_down_param {
	unsigned long mod;
	void *hcpu;
};

L
Linus Torvalds 已提交
197
/* Take this CPU down. */
198
static int __ref take_cpu_down(void *_param)
L
Linus Torvalds 已提交
199
{
A
Avi Kivity 已提交
200
	struct take_cpu_down_param *param = _param;
L
Linus Torvalds 已提交
201 202 203 204 205
	int err;

	/* Ensure this CPU doesn't handle any more interrupts. */
	err = __cpu_disable();
	if (err < 0)
Z
Zwane Mwaikambo 已提交
206
		return err;
L
Linus Torvalds 已提交
207

208
	cpu_notify(CPU_DYING | param->mod, param->hcpu);
Z
Zwane Mwaikambo 已提交
209
	return 0;
L
Linus Torvalds 已提交
210 211
}

212
/* Requires cpu_add_remove_lock to be held */
213
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
L
Linus Torvalds 已提交
214
{
215 216
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
217
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
A
Avi Kivity 已提交
218 219 220 221
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};
L
Linus Torvalds 已提交
222

223 224
	if (num_online_cpus() == 1)
		return -EBUSY;
L
Linus Torvalds 已提交
225

226 227
	if (!cpu_online(cpu))
		return -EINVAL;
L
Linus Torvalds 已提交
228

229
	cpu_hotplug_begin();
230

231
	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
232
	if (err) {
233
		nr_calls--;
234
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
L
Linus Torvalds 已提交
235
		printk("%s: attempt to take down CPU %u failed\n",
236
				__func__, cpu);
237
		goto out_release;
L
Linus Torvalds 已提交
238 239
	}

R
Rusty Russell 已提交
240
	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
241
	if (err) {
L
Linus Torvalds 已提交
242
		/* CPU didn't die: tell everyone.  Can't complain. */
243
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
L
Linus Torvalds 已提交
244

245
		goto out_release;
246
	}
247
	BUG_ON(cpu_online(cpu));
L
Linus Torvalds 已提交
248

249 250 251 252
	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
P
Peter Zijlstra 已提交
253 254
	 *
	 * Wait for the stop thread to go away.
255
	 */
P
Peter Zijlstra 已提交
256 257
	while (!idle_cpu(cpu))
		cpu_relax();
L
Linus Torvalds 已提交
258 259 260 261 262

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
263
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);
L
Linus Torvalds 已提交
264 265 266

	check_for_tasks(cpu);

267
out_release:
268
	cpu_hotplug_done();
269 270
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
271 272 273
	return err;
}

274
int __ref cpu_down(unsigned int cpu)
275
{
276
	int err;
277

278
	cpu_maps_update_begin();
279 280

	if (cpu_hotplug_disabled) {
281
		err = -EBUSY;
282 283 284 285
		goto out;
	}

	err = _cpu_down(cpu, 0);
286

287
out:
288
	cpu_maps_update_done();
L
Linus Torvalds 已提交
289 290
	return err;
}
291
EXPORT_SYMBOL(cpu_down);
L
Linus Torvalds 已提交
292 293
#endif /*CONFIG_HOTPLUG_CPU*/

294
/* Requires cpu_add_remove_lock to be held */
295
static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
L
Linus Torvalds 已提交
296
{
297
	int ret, nr_calls = 0;
L
Linus Torvalds 已提交
298
	void *hcpu = (void *)(long)cpu;
299
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
L
Linus Torvalds 已提交
300

301 302
	if (cpu_online(cpu) || !cpu_present(cpu))
		return -EINVAL;
303

304
	cpu_hotplug_begin();
305 306 307 308 309

	ret = smpboot_prepare(cpu);
	if (ret)
		goto out;

310
	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
311
	if (ret) {
312
		nr_calls--;
313
		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
314
				__func__, cpu);
L
Linus Torvalds 已提交
315 316 317 318
		goto out_notify;
	}

	/* Arch-specific enabling code. */
319
	ret = __cpu_up(cpu, NULL);
L
Linus Torvalds 已提交
320 321
	if (ret != 0)
		goto out_notify;
322
	BUG_ON(!cpu_online(cpu));
L
Linus Torvalds 已提交
323 324

	/* Now call notifier in preparation. */
325
	cpu_notify(CPU_ONLINE | mod, hcpu);
L
Linus Torvalds 已提交
326 327 328

out_notify:
	if (ret != 0)
329
		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
330
out:
331
	cpu_hotplug_done();
332 333 334 335

	return ret;
}

336
int __cpuinit cpu_up(unsigned int cpu)
337 338
{
	int err = 0;
339 340 341 342 343 344

#ifdef	CONFIG_MEMORY_HOTPLUG
	int nid;
	pg_data_t	*pgdat;
#endif

R
Rusty Russell 已提交
345
	if (!cpu_possible(cpu)) {
346 347
		printk(KERN_ERR "can't online cpu %d because it is not "
			"configured as may-hotadd at boot time\n", cpu);
348
#if defined(CONFIG_IA64)
349 350 351 352 353
		printk(KERN_ERR "please check additional_cpus= boot "
				"parameter\n");
#endif
		return -EINVAL;
	}
354

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
#ifdef	CONFIG_MEMORY_HOTPLUG
	nid = cpu_to_node(cpu);
	if (!node_online(nid)) {
		err = mem_online_node(nid);
		if (err)
			return err;
	}

	pgdat = NODE_DATA(nid);
	if (!pgdat) {
		printk(KERN_ERR
			"Can't online cpu %d due to NULL pgdat\n", cpu);
		return -ENOMEM;
	}

370 371
	if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
		mutex_lock(&zonelists_mutex);
372
		build_all_zonelists(NULL);
373 374
		mutex_unlock(&zonelists_mutex);
	}
375 376
#endif

377
	cpu_maps_update_begin();
378 379

	if (cpu_hotplug_disabled) {
380
		err = -EBUSY;
381 382 383 384 385 386
		goto out;
	}

	err = _cpu_up(cpu, 0);

out:
387
	cpu_maps_update_done();
388 389
	return err;
}
P
Paul E. McKenney 已提交
390
EXPORT_SYMBOL_GPL(cpu_up);
391

392
#ifdef CONFIG_PM_SLEEP_SMP
R
Rusty Russell 已提交
393
static cpumask_var_t frozen_cpus;
394

395 396 397 398 399 400 401 402
void __weak arch_disable_nonboot_cpus_begin(void)
{
}

void __weak arch_disable_nonboot_cpus_end(void)
{
}

403 404
int disable_nonboot_cpus(void)
{
405
	int cpu, first_cpu, error = 0;
406

407
	cpu_maps_update_begin();
R
Rusty Russell 已提交
408
	first_cpu = cpumask_first(cpu_online_mask);
409 410
	/*
	 * We take down all of the non-boot CPUs in one shot to avoid races
411 412
	 * with the userspace trying to use the CPU hotplug at the same time
	 */
R
Rusty Russell 已提交
413
	cpumask_clear(frozen_cpus);
414
	arch_disable_nonboot_cpus_begin();
415

416 417 418 419
	printk("Disabling non-boot CPUs ...\n");
	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
420
		error = _cpu_down(cpu, 1);
421
		if (!error)
R
Rusty Russell 已提交
422
			cpumask_set_cpu(cpu, frozen_cpus);
423
		else {
424 425 426 427 428
			printk(KERN_ERR "Error taking CPU%d down: %d\n",
				cpu, error);
			break;
		}
	}
429

430 431
	arch_disable_nonboot_cpus_end();

432 433 434 435 436
	if (!error) {
		BUG_ON(num_online_cpus() > 1);
		/* Make sure the CPUs won't be enabled by someone else */
		cpu_hotplug_disabled = 1;
	} else {
437
		printk(KERN_ERR "Non-boot CPUs are not disabled\n");
438
	}
439
	cpu_maps_update_done();
440 441 442
	return error;
}

443 444 445 446 447 448 449 450
void __weak arch_enable_nonboot_cpus_begin(void)
{
}

void __weak arch_enable_nonboot_cpus_end(void)
{
}

451
void __ref enable_nonboot_cpus(void)
452 453 454 455
{
	int cpu, error;

	/* Allow everyone to use the CPU hotplug again */
456
	cpu_maps_update_begin();
457
	cpu_hotplug_disabled = 0;
R
Rusty Russell 已提交
458
	if (cpumask_empty(frozen_cpus))
459
		goto out;
460

461
	printk(KERN_INFO "Enabling non-boot CPUs ...\n");
462 463 464

	arch_enable_nonboot_cpus_begin();

R
Rusty Russell 已提交
465
	for_each_cpu(cpu, frozen_cpus) {
466
		error = _cpu_up(cpu, 1);
467
		if (!error) {
468
			printk(KERN_INFO "CPU%d is up\n", cpu);
469 470
			continue;
		}
471
		printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
472
	}
473 474 475

	arch_enable_nonboot_cpus_end();

R
Rusty Russell 已提交
476
	cpumask_clear(frozen_cpus);
477
out:
478
	cpu_maps_update_done();
L
Linus Torvalds 已提交
479
}
R
Rusty Russell 已提交
480

481
static int __init alloc_frozen_cpus(void)
R
Rusty Russell 已提交
482 483 484 485 486 487
{
	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
		return -ENOMEM;
	return 0;
}
core_initcall(alloc_frozen_cpus);
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553

/*
 * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
 * hotplug when tasks are about to be frozen. Also, don't allow the freezer
 * to continue until any currently running CPU hotplug operation gets
 * completed.
 * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
 * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
 * CPU hotplug path and released only after it is complete. Thus, we
 * (and hence the freezer) will block here until any currently running CPU
 * hotplug operation gets completed.
 */
void cpu_hotplug_disable_before_freeze(void)
{
	cpu_maps_update_begin();
	cpu_hotplug_disabled = 1;
	cpu_maps_update_done();
}


/*
 * When tasks have been thawed, re-enable regular CPU hotplug (which had been
 * disabled while beginning to freeze tasks).
 */
void cpu_hotplug_enable_after_thaw(void)
{
	cpu_maps_update_begin();
	cpu_hotplug_disabled = 0;
	cpu_maps_update_done();
}

/*
 * When callbacks for CPU hotplug notifications are being executed, we must
 * ensure that the state of the system with respect to the tasks being frozen
 * or not, as reported by the notification, remains unchanged *throughout the
 * duration* of the execution of the callbacks.
 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
 *
 * This synchronization is implemented by mutually excluding regular CPU
 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
 * Hibernate notifications.
 */
static int
cpu_hotplug_pm_callback(struct notifier_block *nb,
			unsigned long action, void *ptr)
{
	switch (action) {

	case PM_SUSPEND_PREPARE:
	case PM_HIBERNATION_PREPARE:
		cpu_hotplug_disable_before_freeze();
		break;

	case PM_POST_SUSPEND:
	case PM_POST_HIBERNATION:
		cpu_hotplug_enable_after_thaw();
		break;

	default:
		return NOTIFY_DONE;
	}

	return NOTIFY_OK;
}


554
static int __init cpu_hotplug_pm_sync_init(void)
555 556 557 558 559 560
{
	pm_notifier(cpu_hotplug_pm_callback, 0);
	return 0;
}
core_initcall(cpu_hotplug_pm_sync_init);

561
#endif /* CONFIG_PM_SLEEP_SMP */
562

563 564 565 566 567 568 569 570
/**
 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
 * @cpu: cpu that just started
 *
 * This function calls the cpu_chain notifiers with CPU_STARTING.
 * It must be called by the arch code on the new cpu, before the new cpu
 * enables interrupts and before the "boot" cpu returns from __cpu_up().
 */
A
Al Viro 已提交
571
void __cpuinit notify_cpu_starting(unsigned int cpu)
572 573 574 575
{
	unsigned long val = CPU_STARTING;

#ifdef CONFIG_PM_SLEEP_SMP
R
Rusty Russell 已提交
576
	if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
577 578
		val = CPU_STARTING_FROZEN;
#endif /* CONFIG_PM_SLEEP_SMP */
579
	cpu_notify(val, (void *)(long)cpu);
580 581
}

582
#endif /* CONFIG_SMP */
583

584 585 586 587
/*
 * cpu_bit_bitmap[] is a special, "compressed" data structure that
 * represents all NR_CPUS bits binary values of 1<<nr.
 *
R
Rusty Russell 已提交
588
 * It is used by cpumask_of() to get a constant address to a CPU
589 590
 * mask value that has a single bit set only.
 */
591

592
/* cpu_bit_bitmap[0] is empty - so we can back into it */
593
#define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
594 595 596
#define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
#define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
#define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
597

598 599 600 601 602 603 604
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {

	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
#if BITS_PER_LONG > 32
	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
605 606
#endif
};
607
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
608 609 610

const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
EXPORT_SYMBOL(cpu_all_bits);
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631

#ifdef CONFIG_INIT_ALL_POSSIBLE
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
	= CPU_BITS_ALL;
#else
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
#endif
const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
EXPORT_SYMBOL(cpu_possible_mask);

static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
EXPORT_SYMBOL(cpu_online_mask);

static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
EXPORT_SYMBOL(cpu_present_mask);

static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
EXPORT_SYMBOL(cpu_active_mask);
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678

void set_cpu_possible(unsigned int cpu, bool possible)
{
	if (possible)
		cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
}

void set_cpu_present(unsigned int cpu, bool present)
{
	if (present)
		cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
}

void set_cpu_online(unsigned int cpu, bool online)
{
	if (online)
		cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
}

void set_cpu_active(unsigned int cpu, bool active)
{
	if (active)
		cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
}

void init_cpu_present(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_present_bits), src);
}

void init_cpu_possible(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_possible_bits), src);
}

void init_cpu_online(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_online_bits), src);
}