smp.c 27.3 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
M
Martin Schwidefsky 已提交
2
 *  SMP related functions
L
Linus Torvalds 已提交
3
 *
M
Martin Schwidefsky 已提交
4 5 6 7
 *    Copyright IBM Corp. 1999,2012
 *    Author(s): Denis Joseph Barrow,
 *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
 *		 Heiko Carstens <heiko.carstens@de.ibm.com>,
L
Linus Torvalds 已提交
8
 *
9
 *  based on other smp stuff by
L
Linus Torvalds 已提交
10 11 12
 *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
 *    (c) 1998 Ingo Molnar
 *
M
Martin Schwidefsky 已提交
13 14 15
 * The code outside of smp.c uses logical cpu numbers, only smp.c does
 * the translation of logical to physical cpu ids. All new code that
 * operates on physical cpu numbers needs to go into smp.c.
L
Linus Torvalds 已提交
16 17
 */

18 19 20
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

21
#include <linux/workqueue.h>
L
Linus Torvalds 已提交
22 23 24
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
25
#include <linux/err.h>
L
Linus Torvalds 已提交
26 27 28 29
#include <linux/spinlock.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
30
#include <linux/irqflags.h>
L
Linus Torvalds 已提交
31
#include <linux/cpu.h>
32
#include <linux/slab.h>
M
Michael Holzheu 已提交
33
#include <linux/crash_dump.h>
34
#include <asm/asm-offsets.h>
M
Michael Holzheu 已提交
35
#include <asm/ipl.h>
36
#include <asm/setup.h>
L
Linus Torvalds 已提交
37 38
#include <asm/irq.h>
#include <asm/tlbflush.h>
39
#include <asm/timer.h>
M
Michael Holzheu 已提交
40
#include <asm/lowcore.h>
41
#include <asm/sclp.h>
42
#include <asm/vdso.h>
43
#include "entry.h"
L
Linus Torvalds 已提交
44

M
Martin Schwidefsky 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
enum {
	sigp_sense = 1,
	sigp_external_call = 2,
	sigp_emergency_signal = 3,
	sigp_start = 4,
	sigp_stop = 5,
	sigp_restart = 6,
	sigp_stop_and_store_status = 9,
	sigp_initial_cpu_reset = 11,
	sigp_cpu_reset = 12,
	sigp_set_prefix = 13,
	sigp_store_status_at_address = 14,
	sigp_store_extended_status_at_address = 15,
	sigp_set_architecture = 18,
	sigp_conditional_emergency_signal = 19,
	sigp_sense_running = 21,
};
62

M
Martin Schwidefsky 已提交
63 64 65 66 67 68
enum {
	sigp_order_code_accepted = 0,
	sigp_status_stored = 1,
	sigp_busy = 2,
	sigp_not_operational = 3,
};
L
Linus Torvalds 已提交
69

M
Martin Schwidefsky 已提交
70 71 72 73 74 75
enum {
	ec_schedule = 0,
	ec_call_function,
	ec_call_function_single,
	ec_stop_cpu,
};
76

M
Martin Schwidefsky 已提交
77
enum {
78 79 80 81
	CPU_STATE_STANDBY,
	CPU_STATE_CONFIGURED,
};

M
Martin Schwidefsky 已提交
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
struct pcpu {
	struct cpu cpu;
	struct task_struct *idle;	/* idle process for the cpu */
	struct _lowcore *lowcore;	/* lowcore page(s) for the cpu */
	unsigned long async_stack;	/* async stack for the cpu */
	unsigned long panic_stack;	/* panic stack for the cpu */
	unsigned long ec_mask;		/* bit mask for ec_xxx functions */
	int state;			/* physical cpu state */
	u32 status;			/* last status received via sigp */
	u16 address;			/* physical cpu address */
};

static u8 boot_cpu_type;
static u16 boot_cpu_address;
static struct pcpu pcpu_devices[NR_CPUS];

98
DEFINE_MUTEX(smp_cpu_state_mutex);
99

M
Martin Schwidefsky 已提交
100 101 102 103 104 105 106
/*
 * Signal processor helper functions.
 */
static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
{
	register unsigned int reg1 asm ("1") = parm;
	int cc;
107

M
Martin Schwidefsky 已提交
108 109 110 111 112 113 114 115 116
	asm volatile(
		"	sigp	%1,%2,0(%3)\n"
		"	ipm	%0\n"
		"	srl	%0,28\n"
		: "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
	if (status && cc == 1)
		*status = reg1;
	return cc;
}
L
Linus Torvalds 已提交
117

M
Martin Schwidefsky 已提交
118
static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
119
{
M
Martin Schwidefsky 已提交
120
	int cc;
121

M
Martin Schwidefsky 已提交
122 123 124 125 126
	while (1) {
		cc = __pcpu_sigp(addr, order, parm, status);
		if (cc != sigp_busy)
			return cc;
		cpu_relax();
127 128 129
	}
}

M
Martin Schwidefsky 已提交
130
static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
H
Heiko Carstens 已提交
131
{
M
Martin Schwidefsky 已提交
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
	int cc, retry;

	for (retry = 0; ; retry++) {
		cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status);
		if (cc != sigp_busy)
			break;
		if (retry >= 3)
			udelay(10);
	}
	return cc;
}

static inline int pcpu_stopped(struct pcpu *pcpu)
{
	if (__pcpu_sigp(pcpu->address, sigp_sense,
			0, &pcpu->status) != sigp_status_stored)
		return 0;
	/* Check for stopped and check stop state */
	return !!(pcpu->status & 0x50);
}

static inline int pcpu_running(struct pcpu *pcpu)
{
	if (__pcpu_sigp(pcpu->address, sigp_sense_running,
			0, &pcpu->status) != sigp_status_stored)
		return 1;
	/* Check for running status */
	return !(pcpu->status & 0x400);
H
Heiko Carstens 已提交
160 161
}

162
/*
M
Martin Schwidefsky 已提交
163
 * Find struct pcpu by cpu address.
164
 */
M
Martin Schwidefsky 已提交
165
static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
166 167 168
{
	int cpu;

M
Martin Schwidefsky 已提交
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
	for_each_cpu(cpu, mask)
		if (pcpu_devices[cpu].address == address)
			return pcpu_devices + cpu;
	return NULL;
}

static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
{
	int order;

	set_bit(ec_bit, &pcpu->ec_mask);
	order = pcpu_running(pcpu) ?
		sigp_external_call : sigp_emergency_signal;
	pcpu_sigp_retry(pcpu, order, 0);
}

static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
{
	struct _lowcore *lc;

	if (pcpu != &pcpu_devices[0]) {
		pcpu->lowcore =	(struct _lowcore *)
			__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
		pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
		pcpu->panic_stack = __get_free_page(GFP_KERNEL);
		if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
			goto out;
196
	}
M
Martin Schwidefsky 已提交
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
	lc = pcpu->lowcore;
	memcpy(lc, &S390_lowcore, 512);
	memset((char *) lc + 512, 0, sizeof(*lc) - 512);
	lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
	lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
	lc->cpu_nr = cpu;
#ifndef CONFIG_64BIT
	if (MACHINE_HAS_IEEE) {
		lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
		if (!lc->extended_save_area_addr)
			goto out;
	}
#else
	if (vdso_alloc_per_cpu(lc))
		goto out;
#endif
	lowcore_ptr[cpu] = lc;
	pcpu_sigp_retry(pcpu, sigp_set_prefix, (u32)(unsigned long) lc);
	return 0;
out:
	if (pcpu != &pcpu_devices[0]) {
		free_page(pcpu->panic_stack);
		free_pages(pcpu->async_stack, ASYNC_ORDER);
		free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
	}
	return -ENOMEM;
223 224
}

M
Martin Schwidefsky 已提交
225
static void pcpu_free_lowcore(struct pcpu *pcpu)
226
{
M
Martin Schwidefsky 已提交
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
	pcpu_sigp_retry(pcpu, sigp_set_prefix, 0);
	lowcore_ptr[pcpu - pcpu_devices] = NULL;
#ifndef CONFIG_64BIT
	if (MACHINE_HAS_IEEE) {
		struct _lowcore *lc = pcpu->lowcore;

		free_page((unsigned long) lc->extended_save_area_addr);
		lc->extended_save_area_addr = 0;
	}
#else
	vdso_free_per_cpu(pcpu->lowcore);
#endif
	if (pcpu != &pcpu_devices[0]) {
		free_page(pcpu->panic_stack);
		free_pages(pcpu->async_stack, ASYNC_ORDER);
		free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
	}
}

static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
{
	struct _lowcore *lc = pcpu->lowcore;

	atomic_inc(&init_mm.context.attach_count);
	lc->cpu_nr = cpu;
	lc->percpu_offset = __per_cpu_offset[cpu];
	lc->kernel_asce = S390_lowcore.kernel_asce;
	lc->machine_flags = S390_lowcore.machine_flags;
	lc->ftrace_func = S390_lowcore.ftrace_func;
	lc->user_timer = lc->system_timer = lc->steal_timer = 0;
	__ctl_store(lc->cregs_save_area, 0, 15);
	save_access_regs((unsigned int *) lc->access_regs_save_area);
	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
	       MAX_FACILITY_BIT/8);
}

static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
{
	struct _lowcore *lc = pcpu->lowcore;
	struct thread_info *ti = task_thread_info(tsk);

	lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
	lc->thread_info = (unsigned long) task_thread_info(tsk);
	lc->current_task = (unsigned long) tsk;
	lc->user_timer = ti->user_timer;
	lc->system_timer = ti->system_timer;
	lc->steal_timer = 0;
}

static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
{
	struct _lowcore *lc = pcpu->lowcore;

	lc->restart_stack = lc->kernel_stack;
	lc->restart_fn = (unsigned long) func;
	lc->restart_data = (unsigned long) data;
	lc->restart_source = -1UL;
	pcpu_sigp_retry(pcpu, sigp_restart, 0);
}

/*
 * Call function via PSW restart on pcpu and stop the current cpu.
 */
static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
			  void *data, unsigned long stack)
{
	struct _lowcore *lc = pcpu->lowcore;
	unsigned short this_cpu;

	__load_psw_mask(psw_kernel_bits);
	this_cpu = stap();
	if (pcpu->address == this_cpu)
		func(data);	/* should not return */
	/* Stop target cpu (if func returns this stops the current cpu). */
	pcpu_sigp_retry(pcpu, sigp_stop, 0);
	/* Restart func on the target cpu and stop the current cpu. */
	lc->restart_stack = stack;
	lc->restart_fn = (unsigned long) func;
	lc->restart_data = (unsigned long) data;
	lc->restart_source = (unsigned long) this_cpu;
	asm volatile(
		"0:	sigp	0,%0,6	# sigp restart to target cpu\n"
		"	brc	2,0b	# busy, try again\n"
		"1:	sigp	0,%1,5	# sigp stop to current cpu\n"
		"	brc	2,1b	# busy, try again\n"
		: : "d" (pcpu->address), "d" (this_cpu) : "0", "1", "cc");
	for (;;) ;
}

/*
 * Call function on an online CPU.
 */
void smp_call_online_cpu(void (*func)(void *), void *data)
{
	struct pcpu *pcpu;

	/* Use the current cpu if it is online. */
	pcpu = pcpu_find_address(cpu_online_mask, stap());
	if (!pcpu)
		/* Use the first online cpu. */
		pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
	pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
}

/*
 * Call function on the ipl CPU.
 */
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
	pcpu_delegate(&pcpu_devices[0], func, data, pcpu_devices->panic_stack);
}

int smp_find_processor_id(u16 address)
{
	int cpu;

	for_each_present_cpu(cpu)
		if (pcpu_devices[cpu].address == address)
			return cpu;
	return -1;
347 348
}

M
Martin Schwidefsky 已提交
349
int smp_vcpu_scheduled(int cpu)
350
{
M
Martin Schwidefsky 已提交
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
	return pcpu_running(pcpu_devices + cpu);
}

void smp_yield(void)
{
	if (MACHINE_HAS_DIAG44)
		asm volatile("diag 0,0,0x44");
}

void smp_yield_cpu(int cpu)
{
	if (MACHINE_HAS_DIAG9C)
		asm volatile("diag %0,0,0x9c"
			     : : "d" (pcpu_devices[cpu].address));
	else if (MACHINE_HAS_DIAG44)
		asm volatile("diag 0,0,0x44");
}

/*
 * Send cpus emergency shutdown signal. This gives the cpus the
 * opportunity to complete outstanding interrupts.
 */
void smp_emergency_stop(cpumask_t *cpumask)
{
	u64 end;
	int cpu;

	end = get_clock() + (1000000UL << 12);
	for_each_cpu(cpu, cpumask) {
		struct pcpu *pcpu = pcpu_devices + cpu;
		set_bit(ec_stop_cpu, &pcpu->ec_mask);
		while (__pcpu_sigp(pcpu->address, sigp_emergency_signal,
				   0, NULL) == sigp_busy &&
		       get_clock() < end)
			cpu_relax();
	}
	while (get_clock() < end) {
		for_each_cpu(cpu, cpumask)
			if (pcpu_stopped(pcpu_devices + cpu))
				cpumask_clear_cpu(cpu, cpumask);
		if (cpumask_empty(cpumask))
			break;
393
		cpu_relax();
M
Martin Schwidefsky 已提交
394
	}
395 396
}

M
Martin Schwidefsky 已提交
397 398 399
/*
 * Stop all cpus but the current one.
 */
400
void smp_send_stop(void)
L
Linus Torvalds 已提交
401
{
402 403
	cpumask_t cpumask;
	int cpu;
L
Linus Torvalds 已提交
404

405
	/* Disable all interrupts/machine checks */
406
	__load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
407
	trace_hardirqs_off();
L
Linus Torvalds 已提交
408

409 410 411
	cpumask_copy(&cpumask, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), &cpumask);

M
Martin Schwidefsky 已提交
412 413
	if (oops_in_progress)
		smp_emergency_stop(&cpumask);
L
Linus Torvalds 已提交
414

415 416
	/* stop all processors */
	for_each_cpu(cpu, &cpumask) {
M
Martin Schwidefsky 已提交
417 418 419
		struct pcpu *pcpu = pcpu_devices + cpu;
		pcpu_sigp_retry(pcpu, sigp_stop, 0);
		while (!pcpu_stopped(pcpu))
H
Heiko Carstens 已提交
420 421 422 423
			cpu_relax();
	}
}

M
Martin Schwidefsky 已提交
424 425 426 427 428 429 430 431 432
/*
 * Stop the current cpu.
 */
void smp_stop_cpu(void)
{
	pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
	for (;;) ;
}

L
Linus Torvalds 已提交
433 434 435 436
/*
 * This is the main routine where commands issued by other
 * cpus are handled.
 */
437 438
static void do_ext_call_interrupt(unsigned int ext_int_code,
				  unsigned int param32, unsigned long param64)
L
Linus Torvalds 已提交
439
{
440
	unsigned long bits;
M
Martin Schwidefsky 已提交
441
	int cpu;
L
Linus Torvalds 已提交
442

M
Martin Schwidefsky 已提交
443
	cpu = smp_processor_id();
444
	if ((ext_int_code & 0xffff) == 0x1202)
M
Martin Schwidefsky 已提交
445
		kstat_cpu(cpu).irqs[EXTINT_EXC]++;
446
	else
M
Martin Schwidefsky 已提交
447
		kstat_cpu(cpu).irqs[EXTINT_EMS]++;
448 449 450
	/*
	 * handle bit signal external calls
	 */
M
Martin Schwidefsky 已提交
451
	bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
L
Linus Torvalds 已提交
452

453 454 455
	if (test_bit(ec_stop_cpu, &bits))
		smp_stop_cpu();

456 457 458
	if (test_bit(ec_schedule, &bits))
		scheduler_ipi();

459
	if (test_bit(ec_call_function, &bits))
460 461 462 463
		generic_smp_call_function_interrupt();

	if (test_bit(ec_call_function_single, &bits))
		generic_smp_call_function_single_interrupt();
464

L
Linus Torvalds 已提交
465 466
}

467
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
468 469 470
{
	int cpu;

471
	for_each_cpu(cpu, mask)
M
Martin Schwidefsky 已提交
472
		pcpu_ec_call(pcpu_devices + cpu, ec_call_function);
473 474 475 476
}

void arch_send_call_function_single_ipi(int cpu)
{
M
Martin Schwidefsky 已提交
477
	pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
478 479
}

480
#ifndef CONFIG_64BIT
L
Linus Torvalds 已提交
481 482 483
/*
 * this function sends a 'purge tlb' signal to another CPU.
 */
484
static void smp_ptlb_callback(void *info)
L
Linus Torvalds 已提交
485
{
M
Martin Schwidefsky 已提交
486
	__tlb_flush_local();
L
Linus Torvalds 已提交
487 488 489 490
}

void smp_ptlb_all(void)
{
491
	on_each_cpu(smp_ptlb_callback, NULL, 1);
L
Linus Torvalds 已提交
492 493
}
EXPORT_SYMBOL(smp_ptlb_all);
494
#endif /* ! CONFIG_64BIT */
L
Linus Torvalds 已提交
495 496 497 498 499 500 501 502

/*
 * this function sends a 'reschedule' IPI to another CPU.
 * it goes straight through and wastes no time serializing
 * anything. Worst case is that we lose a reschedule ...
 */
void smp_send_reschedule(int cpu)
{
M
Martin Schwidefsky 已提交
503
	pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
L
Linus Torvalds 已提交
504 505 506 507 508
}

/*
 * parameter area for the set/clear control bit callbacks
 */
509
struct ec_creg_mask_parms {
M
Martin Schwidefsky 已提交
510 511 512
	unsigned long orval;
	unsigned long andval;
	int cr;
513
};
L
Linus Torvalds 已提交
514 515 516 517

/*
 * callback for setting/clearing control bits
 */
518 519
static void smp_ctl_bit_callback(void *info)
{
520
	struct ec_creg_mask_parms *pp = info;
L
Linus Torvalds 已提交
521
	unsigned long cregs[16];
522

523
	__ctl_store(cregs, 0, 15);
M
Martin Schwidefsky 已提交
524
	cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
525
	__ctl_load(cregs, 0, 15);
L
Linus Torvalds 已提交
526 527 528 529 530
}

/*
 * Set a bit in a control register of all cpus
 */
531 532
void smp_ctl_set_bit(int cr, int bit)
{
M
Martin Schwidefsky 已提交
533
	struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
L
Linus Torvalds 已提交
534

535
	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
L
Linus Torvalds 已提交
536
}
537
EXPORT_SYMBOL(smp_ctl_set_bit);
L
Linus Torvalds 已提交
538 539 540 541

/*
 * Clear a bit in a control register of all cpus
 */
542 543
void smp_ctl_clear_bit(int cr, int bit)
{
M
Martin Schwidefsky 已提交
544
	struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
L
Linus Torvalds 已提交
545

546
	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
L
Linus Torvalds 已提交
547
}
548
EXPORT_SYMBOL(smp_ctl_clear_bit);
L
Linus Torvalds 已提交
549

M
Michael Holzheu 已提交
550
#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
M
Michael Holzheu 已提交
551

M
Martin Schwidefsky 已提交
552 553 554 555
struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
EXPORT_SYMBOL_GPL(zfcpdump_save_areas);

static void __init smp_get_save_area(int cpu, u16 address)
M
Michael Holzheu 已提交
556
{
M
Martin Schwidefsky 已提交
557 558 559
	void *lc = pcpu_devices[0].lowcore;
	struct save_area *save_area;

M
Michael Holzheu 已提交
560
	if (is_kdump_kernel())
M
Michael Holzheu 已提交
561
		return;
M
Martin Schwidefsky 已提交
562 563 564
	if (!OLDMEM_BASE && (address == boot_cpu_address ||
			     ipl_info.type != IPL_TYPE_FCP_DUMP))
		return;
565
	if (cpu >= NR_CPUS) {
M
Martin Schwidefsky 已提交
566 567
		pr_warning("CPU %i exceeds the maximum %i and is excluded "
			   "from the dump\n", cpu, NR_CPUS - 1);
568
		return;
M
Michael Holzheu 已提交
569
	}
M
Martin Schwidefsky 已提交
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
	save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
	if (!save_area)
		panic("could not allocate memory for save area\n");
	zfcpdump_save_areas[cpu] = save_area;
#ifdef CONFIG_CRASH_DUMP
	if (address == boot_cpu_address) {
		/* Copy the registers of the boot cpu. */
		copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
				 SAVE_AREA_BASE - PAGE_SIZE, 0);
		return;
	}
#endif
	/* Get the registers of a non-boot cpu. */
	__pcpu_sigp_relax(address, sigp_stop_and_store_status, 0, NULL);
	memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area));
M
Michael Holzheu 已提交
585 586
}

M
Martin Schwidefsky 已提交
587
int smp_store_status(int cpu)
588
{
M
Martin Schwidefsky 已提交
589
	struct pcpu *pcpu;
590

M
Martin Schwidefsky 已提交
591 592 593 594
	pcpu = pcpu_devices + cpu;
	if (__pcpu_sigp_relax(pcpu->address, sigp_stop_and_store_status,
			      0, NULL) != sigp_order_code_accepted)
		return -EIO;
595 596 597
	return 0;
}

M
Martin Schwidefsky 已提交
598
#else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
599

M
Martin Schwidefsky 已提交
600 601 602
static inline void smp_get_save_area(int cpu, u16 address) { }

#endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
603

M
Martin Schwidefsky 已提交
604
static struct sclp_cpu_info *smp_get_cpu_info(void)
605
{
M
Martin Schwidefsky 已提交
606
	static int use_sigp_detection;
607
	struct sclp_cpu_info *info;
M
Martin Schwidefsky 已提交
608 609 610 611 612 613 614 615 616 617 618 619 620
	int address;

	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
		use_sigp_detection = 1;
		for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
			if (__pcpu_sigp_relax(address, sigp_sense, 0, NULL) ==
			    sigp_not_operational)
				continue;
			info->cpu[info->configured].address = address;
			info->configured++;
		}
		info->combined = info->configured;
621
	}
M
Martin Schwidefsky 已提交
622
	return info;
623 624
}

M
Martin Schwidefsky 已提交
625 626 627 628
static int __devinit smp_add_present_cpu(int cpu);

static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info,
				       int sysfs_add)
629
{
M
Martin Schwidefsky 已提交
630
	struct pcpu *pcpu;
631
	cpumask_t avail;
M
Martin Schwidefsky 已提交
632
	int cpu, nr, i;
633

M
Martin Schwidefsky 已提交
634
	nr = 0;
635
	cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
M
Martin Schwidefsky 已提交
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
	cpu = cpumask_first(&avail);
	for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
		if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
			continue;
		if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
			continue;
		pcpu = pcpu_devices + cpu;
		pcpu->address = info->cpu[i].address;
		pcpu->state = (cpu >= info->configured) ?
			CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
		cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
		set_cpu_present(cpu, true);
		if (sysfs_add && smp_add_present_cpu(cpu) != 0)
			set_cpu_present(cpu, false);
		else
			nr++;
		cpu = cpumask_next(cpu, &avail);
	}
	return nr;
L
Linus Torvalds 已提交
655 656
}

657 658 659 660 661
static void __init smp_detect_cpus(void)
{
	unsigned int cpu, c_cpus, s_cpus;
	struct sclp_cpu_info *info;

M
Martin Schwidefsky 已提交
662
	info = smp_get_cpu_info();
663 664 665 666
	if (!info)
		panic("smp_detect_cpus failed to allocate memory\n");
	if (info->has_cpu_type) {
		for (cpu = 0; cpu < info->combined; cpu++) {
M
Martin Schwidefsky 已提交
667 668 669 670 671
			if (info->cpu[cpu].address != boot_cpu_address)
				continue;
			/* The boot cpu dictates the cpu type. */
			boot_cpu_type = info->cpu[cpu].type;
			break;
672 673
		}
	}
M
Martin Schwidefsky 已提交
674
	c_cpus = s_cpus = 0;
675
	for (cpu = 0; cpu < info->combined; cpu++) {
M
Martin Schwidefsky 已提交
676
		if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
677
			continue;
M
Martin Schwidefsky 已提交
678 679 680 681
		if (cpu < info->configured) {
			smp_get_save_area(c_cpus, info->cpu[cpu].address);
			c_cpus++;
		} else
682 683
			s_cpus++;
	}
684
	pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
685
	get_online_cpus();
M
Martin Schwidefsky 已提交
686
	__smp_rescan_cpus(info, 0);
687
	put_online_cpus();
M
Martin Schwidefsky 已提交
688
	kfree(info);
689 690
}

L
Linus Torvalds 已提交
691
/*
692
 *	Activate a secondary processor.
L
Linus Torvalds 已提交
693
 */
M
Martin Schwidefsky 已提交
694
static void __cpuinit smp_start_secondary(void *cpuvoid)
L
Linus Torvalds 已提交
695
{
M
Martin Schwidefsky 已提交
696 697 698 699 700 701 702 703
	S390_lowcore.last_update_clock = get_clock();
	S390_lowcore.restart_stack = (unsigned long) restart_stack;
	S390_lowcore.restart_fn = (unsigned long) do_restart;
	S390_lowcore.restart_data = 0;
	S390_lowcore.restart_source = -1UL;
	restore_access_regs(S390_lowcore.access_regs_save_area);
	__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
	__load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
704
	cpu_init();
705
	preempt_disable();
706 707
	init_cpu_timer();
	init_cpu_vtimer();
H
Heiko Carstens 已提交
708
	pfault_init();
709
	notify_cpu_starting(smp_processor_id());
710
	ipi_call_lock();
711
	set_cpu_online(smp_processor_id(), true);
712
	ipi_call_unlock();
713 714 715 716 717 718
	/*
	 * Wait until the cpu which brought this one up marked it
	 * active before enabling interrupts.
	 */
	while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
		cpu_relax();
L
Linus Torvalds 已提交
719
	local_irq_enable();
720 721
	/* cpu_idle will call schedule for us */
	cpu_idle();
L
Linus Torvalds 已提交
722 723
}

724 725 726 727 728 729 730 731
struct create_idle {
	struct work_struct work;
	struct task_struct *idle;
	struct completion done;
	int cpu;
};

static void __cpuinit smp_fork_idle(struct work_struct *work)
L
Linus Torvalds 已提交
732
{
733
	struct create_idle *c_idle;
L
Linus Torvalds 已提交
734

735 736 737
	c_idle = container_of(work, struct create_idle, work);
	c_idle->idle = fork_idle(c_idle->cpu);
	complete(&c_idle->done);
L
Linus Torvalds 已提交
738 739 740
}

/* Upping and downing of CPUs */
741
int __cpuinit __cpu_up(unsigned int cpu)
L
Linus Torvalds 已提交
742
{
743
	struct create_idle c_idle;
M
Martin Schwidefsky 已提交
744 745
	struct pcpu *pcpu;
	int rc;
L
Linus Torvalds 已提交
746

M
Martin Schwidefsky 已提交
747 748
	pcpu = pcpu_devices + cpu;
	if (pcpu->state != CPU_STATE_CONFIGURED)
749
		return -EIO;
M
Martin Schwidefsky 已提交
750 751 752 753
	if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) !=
	    sigp_order_code_accepted)
		return -EIO;
	if (!pcpu->idle) {
754 755 756 757 758 759 760
		c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
		INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
		c_idle.cpu = cpu;
		schedule_work(&c_idle.work);
		wait_for_completion(&c_idle.done);
		if (IS_ERR(c_idle.idle))
			return PTR_ERR(c_idle.idle);
M
Martin Schwidefsky 已提交
761
		pcpu->idle = c_idle.idle;
762
	}
M
Martin Schwidefsky 已提交
763 764 765 766 767 768 769
	init_idle(pcpu->idle, cpu);
	rc = pcpu_alloc_lowcore(pcpu, cpu);
	if (rc)
		return rc;
	pcpu_prepare_secondary(pcpu, cpu);
	pcpu_attach_task(pcpu, pcpu->idle);
	pcpu_start_fn(pcpu, smp_start_secondary, NULL);
L
Linus Torvalds 已提交
770 771 772 773 774
	while (!cpu_online(cpu))
		cpu_relax();
	return 0;
}

775
static int __init setup_possible_cpus(char *s)
776
{
M
Martin Schwidefsky 已提交
777
	int max, cpu;
778

M
Martin Schwidefsky 已提交
779 780
	if (kstrtoint(s, 0, &max) < 0)
		return 0;
781
	init_cpu_possible(cpumask_of(0));
M
Martin Schwidefsky 已提交
782
	for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
783
		set_cpu_possible(cpu, true);
784 785 786 787
	return 0;
}
early_param("possible_cpus", setup_possible_cpus);

788 789
#ifdef CONFIG_HOTPLUG_CPU

790
int __cpu_disable(void)
L
Linus Torvalds 已提交
791
{
M
Martin Schwidefsky 已提交
792
	unsigned long cregs[16];
L
Linus Torvalds 已提交
793

M
Martin Schwidefsky 已提交
794 795
	set_cpu_online(smp_processor_id(), false);
	/* Disable pseudo page faults on this cpu. */
H
Heiko Carstens 已提交
796
	pfault_fini();
M
Martin Schwidefsky 已提交
797 798 799 800 801 802
	/* Disable interrupt sources via control register. */
	__ctl_store(cregs, 0, 15);
	cregs[0]  &= ~0x0000ee70UL;	/* disable all external interrupts */
	cregs[6]  &= ~0xff000000UL;	/* disable all I/O interrupts */
	cregs[14] &= ~0x1f000000UL;	/* disable most machine checks */
	__ctl_load(cregs, 0, 15);
L
Linus Torvalds 已提交
803 804 805
	return 0;
}

806
void __cpu_die(unsigned int cpu)
L
Linus Torvalds 已提交
807
{
M
Martin Schwidefsky 已提交
808 809
	struct pcpu *pcpu;

L
Linus Torvalds 已提交
810
	/* Wait until target cpu is down */
M
Martin Schwidefsky 已提交
811 812
	pcpu = pcpu_devices + cpu;
	while (!pcpu_stopped(pcpu))
L
Linus Torvalds 已提交
813
		cpu_relax();
M
Martin Schwidefsky 已提交
814
	pcpu_free_lowcore(pcpu);
815
	atomic_dec(&init_mm.context.attach_count);
L
Linus Torvalds 已提交
816 817
}

818
void __noreturn cpu_die(void)
L
Linus Torvalds 已提交
819 820
{
	idle_task_exit();
M
Martin Schwidefsky 已提交
821 822
	pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
	for (;;) ;
L
Linus Torvalds 已提交
823 824
}

825 826
#endif /* CONFIG_HOTPLUG_CPU */

L
Linus Torvalds 已提交
827 828
void __init smp_prepare_cpus(unsigned int max_cpus)
{
829 830 831
	/* request the 0x1201 emergency signal external interrupt */
	if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
		panic("Couldn't request external interrupt 0x1201");
832 833 834
	/* request the 0x1202 external call external interrupt */
	if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
		panic("Couldn't request external interrupt 0x1202");
M
Martin Schwidefsky 已提交
835
	smp_detect_cpus();
L
Linus Torvalds 已提交
836 837
}

H
Heiko Carstens 已提交
838
void __init smp_prepare_boot_cpu(void)
L
Linus Torvalds 已提交
839
{
M
Martin Schwidefsky 已提交
840 841 842 843 844 845 846 847 848
	struct pcpu *pcpu = pcpu_devices;

	boot_cpu_address = stap();
	pcpu->idle = current;
	pcpu->state = CPU_STATE_CONFIGURED;
	pcpu->address = boot_cpu_address;
	pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
	pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
	pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
L
Linus Torvalds 已提交
849
	S390_lowcore.percpu_offset = __per_cpu_offset[0];
850
	cpu_set_polarization(0, POLARIZATION_UNKNOWN);
M
Martin Schwidefsky 已提交
851 852
	set_cpu_present(0, true);
	set_cpu_online(0, true);
L
Linus Torvalds 已提交
853 854
}

H
Heiko Carstens 已提交
855
void __init smp_cpus_done(unsigned int max_cpus)
L
Linus Torvalds 已提交
856 857 858
{
}

859 860 861 862 863
void __init smp_setup_processor_id(void)
{
	S390_lowcore.cpu_nr = 0;
}

L
Linus Torvalds 已提交
864 865 866 867 868 869 870 871
/*
 * the frequency of the profiling timer can be changed
 * by writing a multiplier value into /proc/profile.
 *
 * usually you want to run this on all CPUs ;)
 */
int setup_profiling_timer(unsigned int multiplier)
{
872
	return 0;
L
Linus Torvalds 已提交
873 874
}

875
#ifdef CONFIG_HOTPLUG_CPU
876
static ssize_t cpu_configure_show(struct device *dev,
M
Martin Schwidefsky 已提交
877
				  struct device_attribute *attr, char *buf)
878 879 880 881
{
	ssize_t count;

	mutex_lock(&smp_cpu_state_mutex);
M
Martin Schwidefsky 已提交
882
	count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
883 884 885 886
	mutex_unlock(&smp_cpu_state_mutex);
	return count;
}

887
static ssize_t cpu_configure_store(struct device *dev,
M
Martin Schwidefsky 已提交
888 889
				   struct device_attribute *attr,
				   const char *buf, size_t count)
890
{
M
Martin Schwidefsky 已提交
891 892
	struct pcpu *pcpu;
	int cpu, val, rc;
893 894 895 896 897 898
	char delim;

	if (sscanf(buf, "%d %c", &val, &delim) != 1)
		return -EINVAL;
	if (val != 0 && val != 1)
		return -EINVAL;
899
	get_online_cpus();
H
Heiko Carstens 已提交
900
	mutex_lock(&smp_cpu_state_mutex);
901
	rc = -EBUSY;
902
	/* disallow configuration changes of online cpus and cpu 0 */
M
Martin Schwidefsky 已提交
903
	cpu = dev->id;
904
	if (cpu_online(cpu) || cpu == 0)
905
		goto out;
M
Martin Schwidefsky 已提交
906
	pcpu = pcpu_devices + cpu;
907 908 909
	rc = 0;
	switch (val) {
	case 0:
M
Martin Schwidefsky 已提交
910 911 912 913 914 915 916 917
		if (pcpu->state != CPU_STATE_CONFIGURED)
			break;
		rc = sclp_cpu_deconfigure(pcpu->address);
		if (rc)
			break;
		pcpu->state = CPU_STATE_STANDBY;
		cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
		topology_expect_change();
918 919
		break;
	case 1:
M
Martin Schwidefsky 已提交
920 921 922 923 924 925 926 927
		if (pcpu->state != CPU_STATE_STANDBY)
			break;
		rc = sclp_cpu_configure(pcpu->address);
		if (rc)
			break;
		pcpu->state = CPU_STATE_CONFIGURED;
		cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
		topology_expect_change();
928 929 930 931 932 933
		break;
	default:
		break;
	}
out:
	mutex_unlock(&smp_cpu_state_mutex);
H
Heiko Carstens 已提交
934
	put_online_cpus();
935 936
	return rc ? rc : count;
}
937
static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
938 939
#endif /* CONFIG_HOTPLUG_CPU */

940 941
static ssize_t show_cpu_address(struct device *dev,
				struct device_attribute *attr, char *buf)
942
{
M
Martin Schwidefsky 已提交
943
	return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
944
}
945
static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
946 947 948

static struct attribute *cpu_common_attrs[] = {
#ifdef CONFIG_HOTPLUG_CPU
949
	&dev_attr_configure.attr,
950
#endif
951
	&dev_attr_address.attr,
952 953 954 955 956 957
	NULL,
};

static struct attribute_group cpu_common_attr_group = {
	.attrs = cpu_common_attrs,
};
L
Linus Torvalds 已提交
958

959 960
static ssize_t show_capability(struct device *dev,
				struct device_attribute *attr, char *buf)
961 962 963 964 965 966 967 968 969
{
	unsigned int capability;
	int rc;

	rc = get_cpu_capability(&capability);
	if (rc)
		return rc;
	return sprintf(buf, "%u\n", capability);
}
970
static DEVICE_ATTR(capability, 0444, show_capability, NULL);
971

972 973
static ssize_t show_idle_count(struct device *dev,
				struct device_attribute *attr, char *buf)
974 975 976
{
	struct s390_idle_data *idle;
	unsigned long long idle_count;
977
	unsigned int sequence;
978 979

	idle = &per_cpu(s390_idle, dev->id);
980 981 982 983 984
repeat:
	sequence = idle->sequence;
	smp_rmb();
	if (sequence & 1)
		goto repeat;
985
	idle_count = idle->idle_count;
986 987
	if (idle->idle_enter)
		idle_count++;
988 989 990
	smp_rmb();
	if (idle->sequence != sequence)
		goto repeat;
991 992
	return sprintf(buf, "%llu\n", idle_count);
}
993
static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
994

995 996
static ssize_t show_idle_time(struct device *dev,
				struct device_attribute *attr, char *buf)
997 998
{
	struct s390_idle_data *idle;
999
	unsigned long long now, idle_time, idle_enter;
1000
	unsigned int sequence;
1001 1002

	idle = &per_cpu(s390_idle, dev->id);
1003
	now = get_clock();
1004 1005 1006 1007 1008
repeat:
	sequence = idle->sequence;
	smp_rmb();
	if (sequence & 1)
		goto repeat;
1009 1010 1011 1012
	idle_time = idle->idle_time;
	idle_enter = idle->idle_enter;
	if (idle_enter != 0ULL && idle_enter < now)
		idle_time += now - idle_enter;
1013 1014 1015
	smp_rmb();
	if (idle->sequence != sequence)
		goto repeat;
1016
	return sprintf(buf, "%llu\n", idle_time >> 12);
1017
}
1018
static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
1019

1020
static struct attribute *cpu_online_attrs[] = {
1021 1022 1023
	&dev_attr_capability.attr,
	&dev_attr_idle_count.attr,
	&dev_attr_idle_time_us.attr,
1024 1025 1026
	NULL,
};

1027 1028
static struct attribute_group cpu_online_attr_group = {
	.attrs = cpu_online_attrs,
1029 1030
};

1031 1032 1033 1034
static int __cpuinit smp_cpu_notify(struct notifier_block *self,
				    unsigned long action, void *hcpu)
{
	unsigned int cpu = (unsigned int)(long)hcpu;
M
Martin Schwidefsky 已提交
1035
	struct cpu *c = &pcpu_devices[cpu].cpu;
1036
	struct device *s = &c->dev;
1037
	struct s390_idle_data *idle;
1038
	int err = 0;
1039 1040 1041

	switch (action) {
	case CPU_ONLINE:
1042
	case CPU_ONLINE_FROZEN:
1043
		idle = &per_cpu(s390_idle, cpu);
1044
		memset(idle, 0, sizeof(struct s390_idle_data));
1045
		err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1046 1047
		break;
	case CPU_DEAD:
1048
	case CPU_DEAD_FROZEN:
1049
		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1050 1051
		break;
	}
1052
	return notifier_from_errno(err);
1053 1054 1055
}

static struct notifier_block __cpuinitdata smp_cpu_nb = {
1056
	.notifier_call = smp_cpu_notify,
1057 1058
};

1059
static int __devinit smp_add_present_cpu(int cpu)
1060
{
M
Martin Schwidefsky 已提交
1061
	struct cpu *c = &pcpu_devices[cpu].cpu;
1062
	struct device *s = &c->dev;
1063 1064 1065 1066 1067 1068 1069 1070 1071
	int rc;

	c->hotpluggable = 1;
	rc = register_cpu(c, cpu);
	if (rc)
		goto out;
	rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
	if (rc)
		goto out_cpu;
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	if (cpu_online(cpu)) {
		rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
		if (rc)
			goto out_online;
	}
	rc = topology_cpu_init(c);
	if (rc)
		goto out_topology;
	return 0;

out_topology:
	if (cpu_online(cpu))
		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
out_online:
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
	sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
out_cpu:
#ifdef CONFIG_HOTPLUG_CPU
	unregister_cpu(c);
#endif
out:
	return rc;
}

#ifdef CONFIG_HOTPLUG_CPU
1096

1097
int __ref smp_rescan_cpus(void)
1098
{
M
Martin Schwidefsky 已提交
1099 1100
	struct sclp_cpu_info *info;
	int nr;
1101

M
Martin Schwidefsky 已提交
1102 1103 1104
	info = smp_get_cpu_info();
	if (!info)
		return -ENOMEM;
1105
	get_online_cpus();
H
Heiko Carstens 已提交
1106
	mutex_lock(&smp_cpu_state_mutex);
M
Martin Schwidefsky 已提交
1107
	nr = __smp_rescan_cpus(info, 1);
1108
	mutex_unlock(&smp_cpu_state_mutex);
H
Heiko Carstens 已提交
1109
	put_online_cpus();
M
Martin Schwidefsky 已提交
1110 1111
	kfree(info);
	if (nr)
H
Heiko Carstens 已提交
1112
		topology_schedule_update();
M
Martin Schwidefsky 已提交
1113
	return 0;
1114 1115
}

1116 1117
static ssize_t __ref rescan_store(struct device *dev,
				  struct device_attribute *attr,
1118
				  const char *buf,
1119 1120 1121 1122 1123
				  size_t count)
{
	int rc;

	rc = smp_rescan_cpus();
1124 1125
	return rc ? rc : count;
}
1126
static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1127 1128
#endif /* CONFIG_HOTPLUG_CPU */

1129
static int __init s390_smp_init(void)
L
Linus Torvalds 已提交
1130
{
1131
	int cpu, rc;
1132 1133

	register_cpu_notifier(&smp_cpu_nb);
1134
#ifdef CONFIG_HOTPLUG_CPU
1135
	rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1136 1137 1138 1139 1140
	if (rc)
		return rc;
#endif
	for_each_present_cpu(cpu) {
		rc = smp_add_present_cpu(cpu);
1141 1142
		if (rc)
			return rc;
L
Linus Torvalds 已提交
1143 1144 1145
	}
	return 0;
}
1146
subsys_initcall(s390_smp_init);