smp.c 25.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 *  arch/s390/kernel/smp.c
 *
4
 *    Copyright IBM Corp. 1999, 2009
L
Linus Torvalds 已提交
5
 *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 7
 *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
 *		 Heiko Carstens (heiko.carstens@de.ibm.com)
L
Linus Torvalds 已提交
8
 *
9
 *  based on other smp stuff by
L
Linus Torvalds 已提交
10 11 12 13 14 15 16 17 18 19 20 21 22
 *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
 *    (c) 1998 Ingo Molnar
 *
 * We work with logical cpu numbering everywhere we can. The only
 * functions using the real cpu address (got from STAP) are the sigp
 * functions. For all other functions we use the identity mapping.
 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
 * used e.g. to find the idle task belonging to a logical cpu. Every array
 * in the kernel is sorted by the logical cpu number and not by the physical
 * one which is causing all the confusion with __cpu_logical_map and
 * cpu_number_map in other architectures.
 */

23 24 25
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

L
Linus Torvalds 已提交
26 27 28
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
29
#include <linux/err.h>
L
Linus Torvalds 已提交
30 31 32 33 34
#include <linux/spinlock.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
35
#include <linux/irqflags.h>
L
Linus Torvalds 已提交
36
#include <linux/cpu.h>
37
#include <linux/timex.h>
M
Michael Holzheu 已提交
38
#include <linux/bootmem.h>
M
Michael Holzheu 已提交
39
#include <asm/ipl.h>
40
#include <asm/setup.h>
L
Linus Torvalds 已提交
41 42 43 44 45 46
#include <asm/sigp.h>
#include <asm/pgalloc.h>
#include <asm/irq.h>
#include <asm/s390_ext.h>
#include <asm/cpcmd.h>
#include <asm/tlbflush.h>
47
#include <asm/timer.h>
M
Michael Holzheu 已提交
48
#include <asm/lowcore.h>
49
#include <asm/sclp.h>
50
#include <asm/cputime.h>
51
#include <asm/vdso.h>
52
#include <asm/cpu.h>
53
#include "entry.h"
L
Linus Torvalds 已提交
54 55 56

static struct task_struct *current_set[NR_CPUS];

57 58 59 60 61 62 63 64
static u8 smp_cpu_type;
static int smp_use_sigp_detection;

enum s390_cpu_state {
	CPU_STATE_STANDBY,
	CPU_STATE_CONFIGURED,
};

65
DEFINE_MUTEX(smp_cpu_state_mutex);
H
Heiko Carstens 已提交
66
int smp_cpu_polarization[NR_CPUS];
67
static int smp_cpu_state[NR_CPUS];
H
Heiko Carstens 已提交
68
static int cpu_management;
69 70 71

static DEFINE_PER_CPU(struct cpu, cpu_devices);

L
Linus Torvalds 已提交
72 73
static void smp_ext_bitcall(int, ec_bit_sig);

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
static int cpu_stopped(int cpu)
{
	__u32 status;

	switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
	case sigp_order_code_accepted:
	case sigp_status_stored:
		/* Check for stopped and check stop state */
		if (status & 0x50)
			return 1;
		break;
	default:
		break;
	}
	return 0;
}

91
void smp_send_stop(void)
L
Linus Torvalds 已提交
92
{
93
	int cpu, rc;
L
Linus Torvalds 已提交
94

95 96
	/* Disable all interrupts/machine checks */
	__load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
97
	trace_hardirqs_off();
L
Linus Torvalds 已提交
98

99
	/* stop all processors */
L
Linus Torvalds 已提交
100 101 102 103
	for_each_online_cpu(cpu) {
		if (cpu == smp_processor_id())
			continue;
		do {
104
			rc = signal_processor(cpu, sigp_stop);
105
		} while (rc == sigp_busy);
L
Linus Torvalds 已提交
106

107
		while (!cpu_stopped(cpu))
H
Heiko Carstens 已提交
108 109 110 111
			cpu_relax();
	}
}

L
Linus Torvalds 已提交
112 113 114 115 116
/*
 * This is the main routine where commands issued by other
 * cpus are handled.
 */

117
static void do_ext_call_interrupt(__u16 code)
L
Linus Torvalds 已提交
118
{
119
	unsigned long bits;
L
Linus Torvalds 已提交
120

121 122 123 124 125 126
	/*
	 * handle bit signal external calls
	 *
	 * For the ec_schedule signal we have to do nothing. All the work
	 * is done automatically when we return from the interrupt.
	 */
L
Linus Torvalds 已提交
127 128
	bits = xchg(&S390_lowcore.ext_call_fast, 0);

129
	if (test_bit(ec_call_function, &bits))
130 131 132 133
		generic_smp_call_function_interrupt();

	if (test_bit(ec_call_function_single, &bits))
		generic_smp_call_function_single_interrupt();
L
Linus Torvalds 已提交
134 135 136 137 138 139 140 141
}

/*
 * Send an external call sigp to another cpu and return without waiting
 * for its completion.
 */
static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
{
142 143 144
	/*
	 * Set signaling bit in lowcore of target cpu and kick it
	 */
L
Linus Torvalds 已提交
145
	set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
146
	while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
L
Linus Torvalds 已提交
147 148 149
		udelay(10);
}

150
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
151 152 153
{
	int cpu;

154
	for_each_cpu(cpu, mask)
155 156 157 158 159 160 161 162
		smp_ext_bitcall(cpu, ec_call_function);
}

void arch_send_call_function_single_ipi(int cpu)
{
	smp_ext_bitcall(cpu, ec_call_function_single);
}

163
#ifndef CONFIG_64BIT
L
Linus Torvalds 已提交
164 165 166
/*
 * this function sends a 'purge tlb' signal to another CPU.
 */
167
static void smp_ptlb_callback(void *info)
L
Linus Torvalds 已提交
168
{
M
Martin Schwidefsky 已提交
169
	__tlb_flush_local();
L
Linus Torvalds 已提交
170 171 172 173
}

void smp_ptlb_all(void)
{
174
	on_each_cpu(smp_ptlb_callback, NULL, 1);
L
Linus Torvalds 已提交
175 176
}
EXPORT_SYMBOL(smp_ptlb_all);
177
#endif /* ! CONFIG_64BIT */
L
Linus Torvalds 已提交
178 179 180 181 182 183 184 185

/*
 * this function sends a 'reschedule' IPI to another CPU.
 * it goes straight through and wastes no time serializing
 * anything. Worst case is that we lose a reschedule ...
 */
void smp_send_reschedule(int cpu)
{
186
	smp_ext_bitcall(cpu, ec_schedule);
L
Linus Torvalds 已提交
187 188 189 190 191
}

/*
 * parameter area for the set/clear control bit callbacks
 */
192
struct ec_creg_mask_parms {
L
Linus Torvalds 已提交
193 194
	unsigned long orvals[16];
	unsigned long andvals[16];
195
};
L
Linus Torvalds 已提交
196 197 198 199

/*
 * callback for setting/clearing control bits
 */
200 201
static void smp_ctl_bit_callback(void *info)
{
202
	struct ec_creg_mask_parms *pp = info;
L
Linus Torvalds 已提交
203 204
	unsigned long cregs[16];
	int i;
205

206 207
	__ctl_store(cregs, 0, 15);
	for (i = 0; i <= 15; i++)
L
Linus Torvalds 已提交
208
		cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
209
	__ctl_load(cregs, 0, 15);
L
Linus Torvalds 已提交
210 211 212 213 214
}

/*
 * Set a bit in a control register of all cpus
 */
215 216 217
void smp_ctl_set_bit(int cr, int bit)
{
	struct ec_creg_mask_parms parms;
L
Linus Torvalds 已提交
218

219 220
	memset(&parms.orvals, 0, sizeof(parms.orvals));
	memset(&parms.andvals, 0xff, sizeof(parms.andvals));
L
Linus Torvalds 已提交
221
	parms.orvals[cr] = 1 << bit;
222
	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
L
Linus Torvalds 已提交
223
}
224
EXPORT_SYMBOL(smp_ctl_set_bit);
L
Linus Torvalds 已提交
225 226 227 228

/*
 * Clear a bit in a control register of all cpus
 */
229 230 231
void smp_ctl_clear_bit(int cr, int bit)
{
	struct ec_creg_mask_parms parms;
L
Linus Torvalds 已提交
232

233 234
	memset(&parms.orvals, 0, sizeof(parms.orvals));
	memset(&parms.andvals, 0xff, sizeof(parms.andvals));
L
Linus Torvalds 已提交
235
	parms.andvals[cr] = ~(1L << bit);
236
	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
L
Linus Torvalds 已提交
237
}
238
EXPORT_SYMBOL(smp_ctl_clear_bit);
L
Linus Torvalds 已提交
239

240 241 242 243 244 245 246
/*
 * In early ipl state a temp. logically cpu number is needed, so the sigp
 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
 */
#define CPU_INIT_NO	1

247
#ifdef CONFIG_ZFCPDUMP
M
Michael Holzheu 已提交
248 249 250 251 252 253 254 255 256 257

/*
 * zfcpdump_prefix_array holds prefix registers for the following scenario:
 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
 * save its prefix registers, since they get lost, when switching from 31 bit
 * to 64 bit.
 */
unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
	__attribute__((__section__(".data")));

258
static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
M
Michael Holzheu 已提交
259 260 261
{
	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
		return;
262
	if (cpu >= NR_CPUS) {
263 264
		pr_warning("CPU %i exceeds the maximum %i and is excluded from "
			   "the dump\n", cpu, NR_CPUS - 1);
265
		return;
M
Michael Holzheu 已提交
266
	}
267
	zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
268 269 270
	__cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
	while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
	       sigp_busy)
271 272 273 274 275 276 277 278
		cpu_relax();
	memcpy(zfcpdump_save_areas[cpu],
	       (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
	       SAVE_AREA_SIZE);
#ifdef CONFIG_64BIT
	/* copy original prefix register */
	zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu];
#endif
M
Michael Holzheu 已提交
279 280 281 282 283 284
}

union save_area *zfcpdump_save_areas[NR_CPUS + 1];
EXPORT_SYMBOL_GPL(zfcpdump_save_areas);

#else
285 286 287

static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }

288
#endif /* CONFIG_ZFCPDUMP */
M
Michael Holzheu 已提交
289

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
static int cpu_known(int cpu_id)
{
	int cpu;

	for_each_present_cpu(cpu) {
		if (__cpu_logical_map[cpu] == cpu_id)
			return 1;
	}
	return 0;
}

static int smp_rescan_cpus_sigp(cpumask_t avail)
{
	int cpu_id, logical_cpu;

305 306
	logical_cpu = cpumask_first(&avail);
	if (logical_cpu >= nr_cpu_ids)
307
		return 0;
308
	for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
309 310 311
		if (cpu_known(cpu_id))
			continue;
		__cpu_logical_map[logical_cpu] = cpu_id;
H
Heiko Carstens 已提交
312
		smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
313 314 315 316
		if (!cpu_stopped(logical_cpu))
			continue;
		cpu_set(logical_cpu, cpu_present_map);
		smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
317 318
		logical_cpu = cpumask_next(logical_cpu, &avail);
		if (logical_cpu >= nr_cpu_ids)
319 320 321 322 323
			break;
	}
	return 0;
}

324
static int smp_rescan_cpus_sclp(cpumask_t avail)
325 326 327 328 329
{
	struct sclp_cpu_info *info;
	int cpu_id, logical_cpu, cpu;
	int rc;

330 331
	logical_cpu = cpumask_first(&avail);
	if (logical_cpu >= nr_cpu_ids)
332
		return 0;
333
	info = kmalloc(sizeof(*info), GFP_KERNEL);
334 335 336 337 338 339 340 341 342 343 344 345
	if (!info)
		return -ENOMEM;
	rc = sclp_get_cpu_info(info);
	if (rc)
		goto out;
	for (cpu = 0; cpu < info->combined; cpu++) {
		if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
			continue;
		cpu_id = info->cpu[cpu].address;
		if (cpu_known(cpu_id))
			continue;
		__cpu_logical_map[logical_cpu] = cpu_id;
H
Heiko Carstens 已提交
346
		smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
347 348 349 350 351
		cpu_set(logical_cpu, cpu_present_map);
		if (cpu >= info->configured)
			smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
		else
			smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
352 353
		logical_cpu = cpumask_next(logical_cpu, &avail);
		if (logical_cpu >= nr_cpu_ids)
354 355 356
			break;
	}
out:
357
	kfree(info);
358 359 360
	return rc;
}

361
static int __smp_rescan_cpus(void)
362 363 364
{
	cpumask_t avail;

365
	cpus_xor(avail, cpu_possible_map, cpu_present_map);
366 367 368 369
	if (smp_use_sigp_detection)
		return smp_rescan_cpus_sigp(avail);
	else
		return smp_rescan_cpus_sclp(avail);
L
Linus Torvalds 已提交
370 371
}

372 373 374 375 376 377 378 379
static void __init smp_detect_cpus(void)
{
	unsigned int cpu, c_cpus, s_cpus;
	struct sclp_cpu_info *info;
	u16 boot_cpu_addr, cpu_addr;

	c_cpus = 1;
	s_cpus = 0;
380
	boot_cpu_addr = __cpu_logical_map[0];
381 382 383 384 385 386
	info = kmalloc(sizeof(*info), GFP_KERNEL);
	if (!info)
		panic("smp_detect_cpus failed to allocate memory\n");
	/* Use sigp detection algorithm if sclp doesn't work. */
	if (sclp_get_cpu_info(info)) {
		smp_use_sigp_detection = 1;
387
		for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
			if (cpu == boot_cpu_addr)
				continue;
			__cpu_logical_map[CPU_INIT_NO] = cpu;
			if (!cpu_stopped(CPU_INIT_NO))
				continue;
			smp_get_save_area(c_cpus, cpu);
			c_cpus++;
		}
		goto out;
	}

	if (info->has_cpu_type) {
		for (cpu = 0; cpu < info->combined; cpu++) {
			if (info->cpu[cpu].address == boot_cpu_addr) {
				smp_cpu_type = info->cpu[cpu].type;
				break;
			}
		}
	}

	for (cpu = 0; cpu < info->combined; cpu++) {
		if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
			continue;
		cpu_addr = info->cpu[cpu].address;
		if (cpu_addr == boot_cpu_addr)
			continue;
		__cpu_logical_map[CPU_INIT_NO] = cpu_addr;
		if (!cpu_stopped(CPU_INIT_NO)) {
			s_cpus++;
			continue;
		}
		smp_get_save_area(c_cpus, cpu_addr);
		c_cpus++;
	}
out:
	kfree(info);
424
	pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
425
	get_online_cpus();
426
	__smp_rescan_cpus();
427
	put_online_cpus();
428 429
}

L
Linus Torvalds 已提交
430
/*
431
 *	Activate a secondary processor.
L
Linus Torvalds 已提交
432
 */
H
Heiko Carstens 已提交
433
int __cpuinit start_secondary(void *cpuvoid)
L
Linus Torvalds 已提交
434
{
435 436
	/* Setup the cpu */
	cpu_init();
437
	preempt_disable();
M
Martin Schwidefsky 已提交
438
	/* Enable TOD clock interrupts on the secondary cpu. */
439
	init_cpu_timer();
M
Martin Schwidefsky 已提交
440
	/* Enable cpu timer interrupts on the secondary cpu. */
441
	init_cpu_vtimer();
L
Linus Torvalds 已提交
442
	/* Enable pfault pseudo page faults on this cpu. */
H
Heiko Carstens 已提交
443 444
	pfault_init();

445 446
	/* call cpu notifiers */
	notify_cpu_starting(smp_processor_id());
L
Linus Torvalds 已提交
447
	/* Mark this cpu as online */
448
	ipi_call_lock();
L
Linus Torvalds 已提交
449
	cpu_set(smp_processor_id(), cpu_online_map);
450
	ipi_call_unlock();
L
Linus Torvalds 已提交
451 452
	/* Switch on interrupts */
	local_irq_enable();
453
	/* Print info about this processor */
454
	print_cpu_info();
455 456 457
	/* cpu_idle will call schedule for us */
	cpu_idle();
	return 0;
L
Linus Torvalds 已提交
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
}

static void __init smp_create_idle(unsigned int cpu)
{
	struct task_struct *p;

	/*
	 *  don't care about the psw and regs settings since we'll never
	 *  reschedule the forked task.
	 */
	p = fork_idle(cpu);
	if (IS_ERR(p))
		panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
	current_set[cpu] = p;
}

474 475 476 477 478
static int __cpuinit smp_alloc_lowcore(int cpu)
{
	unsigned long async_stack, panic_stack;
	struct _lowcore *lowcore;

479
	lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
480 481 482 483
	if (!lowcore)
		return -ENOMEM;
	async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
	panic_stack = __get_free_page(GFP_KERNEL);
484 485
	if (!panic_stack || !async_stack)
		goto out;
486 487
	memcpy(lowcore, &S390_lowcore, 512);
	memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
488 489 490 491 492 493 494 495 496
	lowcore->async_stack = async_stack + ASYNC_SIZE;
	lowcore->panic_stack = panic_stack + PAGE_SIZE;

#ifndef CONFIG_64BIT
	if (MACHINE_HAS_IEEE) {
		unsigned long save_area;

		save_area = get_zeroed_page(GFP_KERNEL);
		if (!save_area)
497
			goto out;
498 499
		lowcore->extended_save_area_addr = (u32) save_area;
	}
500 501 502
#else
	if (vdso_alloc_per_cpu(cpu, lowcore))
		goto out;
503 504 505 506
#endif
	lowcore_ptr[cpu] = lowcore;
	return 0;

507
out:
508
	free_page(panic_stack);
509
	free_pages(async_stack, ASYNC_ORDER);
510
	free_pages((unsigned long) lowcore, LC_ORDER);
511 512 513 514 515 516 517 518 519 520 521
	return -ENOMEM;
}

static void smp_free_lowcore(int cpu)
{
	struct _lowcore *lowcore;

	lowcore = lowcore_ptr[cpu];
#ifndef CONFIG_64BIT
	if (MACHINE_HAS_IEEE)
		free_page((unsigned long) lowcore->extended_save_area_addr);
522 523
#else
	vdso_free_per_cpu(cpu, lowcore);
524 525 526
#endif
	free_page(lowcore->panic_stack - PAGE_SIZE);
	free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
527
	free_pages((unsigned long) lowcore, LC_ORDER);
528 529 530
	lowcore_ptr[cpu] = NULL;
}

L
Linus Torvalds 已提交
531
/* Upping and downing of CPUs */
532
int __cpuinit __cpu_up(unsigned int cpu)
L
Linus Torvalds 已提交
533 534
{
	struct task_struct *idle;
535
	struct _lowcore *cpu_lowcore;
L
Linus Torvalds 已提交
536
	struct stack_frame *sf;
537
	sigp_ccode ccode;
538
	u32 lowcore;
L
Linus Torvalds 已提交
539

540 541
	if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
		return -EIO;
542 543
	if (smp_alloc_lowcore(cpu))
		return -ENOMEM;
544 545 546 547 548 549 550 551 552 553 554
	do {
		ccode = signal_processor(cpu, sigp_initial_cpu_reset);
		if (ccode == sigp_busy)
			udelay(10);
		if (ccode == sigp_not_operational)
			goto err_out;
	} while (ccode == sigp_busy);

	lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
	while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
		udelay(10);
L
Linus Torvalds 已提交
555 556

	idle = current_set[cpu];
557
	cpu_lowcore = lowcore_ptr[cpu];
L
Linus Torvalds 已提交
558
	cpu_lowcore->kernel_stack = (unsigned long)
559
		task_stack_page(idle) + THREAD_SIZE;
560
	cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
L
Linus Torvalds 已提交
561 562 563 564 565 566
	sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
				     - sizeof(struct pt_regs)
				     - sizeof(struct stack_frame));
	memset(sf, 0, sizeof(struct stack_frame));
	sf->gprs[9] = (unsigned long) sf;
	cpu_lowcore->save_area[15] = (unsigned long) sf;
567
	__ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
568 569 570
	asm volatile(
		"	stam	0,15,0(%0)"
		: : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
L
Linus Torvalds 已提交
571
	cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
572
	cpu_lowcore->current_task = (unsigned long) idle;
573
	cpu_lowcore->cpu_nr = cpu;
574
	cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
575
	cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
576
	cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
L
Linus Torvalds 已提交
577
	eieio();
M
Michael Ryan 已提交
578

579
	while (signal_processor(cpu, sigp_restart) == sigp_busy)
M
Michael Ryan 已提交
580
		udelay(10);
L
Linus Torvalds 已提交
581 582 583 584

	while (!cpu_online(cpu))
		cpu_relax();
	return 0;
585 586 587 588

err_out:
	smp_free_lowcore(cpu);
	return -EIO;
L
Linus Torvalds 已提交
589 590
}

591
static int __init setup_possible_cpus(char *s)
592
{
593
	int pcpus, cpu;
594

595
	pcpus = simple_strtoul(s, NULL, 0);
596 597
	init_cpu_possible(cpumask_of(0));
	for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++)
598
		set_cpu_possible(cpu, true);
599 600 601 602
	return 0;
}
early_param("possible_cpus", setup_possible_cpus);

603 604
#ifdef CONFIG_HOTPLUG_CPU

605
int __cpu_disable(void)
L
Linus Torvalds 已提交
606
{
607
	struct ec_creg_mask_parms cr_parms;
Z
Zwane Mwaikambo 已提交
608
	int cpu = smp_processor_id();
L
Linus Torvalds 已提交
609

Z
Zwane Mwaikambo 已提交
610
	cpu_clear(cpu, cpu_online_map);
L
Linus Torvalds 已提交
611 612

	/* Disable pfault pseudo page faults on this cpu. */
H
Heiko Carstens 已提交
613
	pfault_fini();
L
Linus Torvalds 已提交
614

615 616
	memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
	memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
L
Linus Torvalds 已提交
617

618
	/* disable all external interrupts */
L
Linus Torvalds 已提交
619
	cr_parms.orvals[0] = 0;
620 621
	cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
				1 << 11 | 1 << 10 | 1 <<  6 | 1 <<  4);
L
Linus Torvalds 已提交
622 623
	/* disable all I/O interrupts */
	cr_parms.orvals[6] = 0;
624 625
	cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
				1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
L
Linus Torvalds 已提交
626 627
	/* disable most machine checks */
	cr_parms.orvals[14] = 0;
628 629
	cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
				 1 << 25 | 1 << 24);
630

L
Linus Torvalds 已提交
631 632 633 634 635
	smp_ctl_bit_callback(&cr_parms);

	return 0;
}

636
void __cpu_die(unsigned int cpu)
L
Linus Torvalds 已提交
637 638
{
	/* Wait until target cpu is down */
639
	while (!cpu_stopped(cpu))
L
Linus Torvalds 已提交
640
		cpu_relax();
641
	smp_free_lowcore(cpu);
642
	pr_info("Processor %d stopped\n", cpu);
L
Linus Torvalds 已提交
643 644
}

645
void cpu_die(void)
L
Linus Torvalds 已提交
646 647 648 649
{
	idle_task_exit();
	signal_processor(smp_processor_id(), sigp_stop);
	BUG();
650
	for (;;);
L
Linus Torvalds 已提交
651 652
}

653 654
#endif /* CONFIG_HOTPLUG_CPU */

L
Linus Torvalds 已提交
655 656
void __init smp_prepare_cpus(unsigned int max_cpus)
{
657 658 659 660 661
#ifndef CONFIG_64BIT
	unsigned long save_area = 0;
#endif
	unsigned long async_stack, panic_stack;
	struct _lowcore *lowcore;
L
Linus Torvalds 已提交
662
	unsigned int cpu;
663

664 665
	smp_detect_cpus();

666 667 668
	/* request the 0x1201 emergency signal external interrupt */
	if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
		panic("Couldn't request external interrupt 0x1201");
669
	print_cpu_info();
L
Linus Torvalds 已提交
670

671
	/* Reallocate current lowcore, but keep its contents. */
672
	lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
673 674
	panic_stack = __get_free_page(GFP_KERNEL);
	async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
675
	BUG_ON(!lowcore || !panic_stack || !async_stack);
676
#ifndef CONFIG_64BIT
677
	if (MACHINE_HAS_IEEE)
678
		save_area = get_zeroed_page(GFP_KERNEL);
679
#endif
680 681 682 683 684 685 686 687 688 689 690 691 692
	local_irq_disable();
	local_mcck_disable();
	lowcore_ptr[smp_processor_id()] = lowcore;
	*lowcore = S390_lowcore;
	lowcore->panic_stack = panic_stack + PAGE_SIZE;
	lowcore->async_stack = async_stack + ASYNC_SIZE;
#ifndef CONFIG_64BIT
	if (MACHINE_HAS_IEEE)
		lowcore->extended_save_area_addr = (u32) save_area;
#endif
	set_prefix((u32)(unsigned long) lowcore);
	local_mcck_enable();
	local_irq_enable();
693 694 695 696
#ifdef CONFIG_64BIT
	if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
		BUG();
#endif
697
	for_each_possible_cpu(cpu)
L
Linus Torvalds 已提交
698 699 700 701
		if (cpu != smp_processor_id())
			smp_create_idle(cpu);
}

H
Heiko Carstens 已提交
702
void __init smp_prepare_boot_cpu(void)
L
Linus Torvalds 已提交
703 704 705
{
	BUG_ON(smp_processor_id() != 0);

706 707
	current_thread_info()->cpu = 0;
	cpu_set(0, cpu_present_map);
L
Linus Torvalds 已提交
708 709 710
	cpu_set(0, cpu_online_map);
	S390_lowcore.percpu_offset = __per_cpu_offset[0];
	current_set[0] = current;
711
	smp_cpu_state[0] = CPU_STATE_CONFIGURED;
H
Heiko Carstens 已提交
712
	smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
L
Linus Torvalds 已提交
713 714
}

H
Heiko Carstens 已提交
715
void __init smp_cpus_done(unsigned int max_cpus)
L
Linus Torvalds 已提交
716 717 718 719 720 721 722 723 724 725 726
{
}

/*
 * the frequency of the profiling timer can be changed
 * by writing a multiplier value into /proc/profile.
 *
 * usually you want to run this on all CPUs ;)
 */
int setup_profiling_timer(unsigned int multiplier)
{
727
	return 0;
L
Linus Torvalds 已提交
728 729
}

730
#ifdef CONFIG_HOTPLUG_CPU
731 732
static ssize_t cpu_configure_show(struct sys_device *dev,
				struct sysdev_attribute *attr, char *buf)
733 734 735 736 737 738 739 740 741
{
	ssize_t count;

	mutex_lock(&smp_cpu_state_mutex);
	count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
	mutex_unlock(&smp_cpu_state_mutex);
	return count;
}

742 743 744
static ssize_t cpu_configure_store(struct sys_device *dev,
				  struct sysdev_attribute *attr,
				  const char *buf, size_t count)
745 746 747 748 749 750 751 752 753 754
{
	int cpu = dev->id;
	int val, rc;
	char delim;

	if (sscanf(buf, "%d %c", &val, &delim) != 1)
		return -EINVAL;
	if (val != 0 && val != 1)
		return -EINVAL;

755
	get_online_cpus();
H
Heiko Carstens 已提交
756
	mutex_lock(&smp_cpu_state_mutex);
757 758 759 760 761 762 763 764
	rc = -EBUSY;
	if (cpu_online(cpu))
		goto out;
	rc = 0;
	switch (val) {
	case 0:
		if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
			rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
H
Heiko Carstens 已提交
765
			if (!rc) {
766
				smp_cpu_state[cpu] = CPU_STATE_STANDBY;
H
Heiko Carstens 已提交
767 768
				smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
			}
769 770 771 772 773
		}
		break;
	case 1:
		if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
			rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
H
Heiko Carstens 已提交
774
			if (!rc) {
775
				smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
H
Heiko Carstens 已提交
776 777
				smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
			}
778 779 780 781 782 783 784
		}
		break;
	default:
		break;
	}
out:
	mutex_unlock(&smp_cpu_state_mutex);
H
Heiko Carstens 已提交
785
	put_online_cpus();
786 787 788 789 790
	return rc ? rc : count;
}
static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
#endif /* CONFIG_HOTPLUG_CPU */

791 792
static ssize_t cpu_polarization_show(struct sys_device *dev,
				     struct sysdev_attribute *attr, char *buf)
H
Heiko Carstens 已提交
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
{
	int cpu = dev->id;
	ssize_t count;

	mutex_lock(&smp_cpu_state_mutex);
	switch (smp_cpu_polarization[cpu]) {
	case POLARIZATION_HRZ:
		count = sprintf(buf, "horizontal\n");
		break;
	case POLARIZATION_VL:
		count = sprintf(buf, "vertical:low\n");
		break;
	case POLARIZATION_VM:
		count = sprintf(buf, "vertical:medium\n");
		break;
	case POLARIZATION_VH:
		count = sprintf(buf, "vertical:high\n");
		break;
	default:
		count = sprintf(buf, "unknown\n");
		break;
	}
	mutex_unlock(&smp_cpu_state_mutex);
	return count;
}
static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);

820 821
static ssize_t show_cpu_address(struct sys_device *dev,
				struct sysdev_attribute *attr, char *buf)
822 823 824 825 826 827 828 829 830 831 832
{
	return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
}
static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);


static struct attribute *cpu_common_attrs[] = {
#ifdef CONFIG_HOTPLUG_CPU
	&attr_configure.attr,
#endif
	&attr_address.attr,
H
Heiko Carstens 已提交
833
	&attr_polarization.attr,
834 835 836 837 838 839
	NULL,
};

static struct attribute_group cpu_common_attr_group = {
	.attrs = cpu_common_attrs,
};
L
Linus Torvalds 已提交
840

841 842
static ssize_t show_capability(struct sys_device *dev,
				struct sysdev_attribute *attr, char *buf)
843 844 845 846 847 848 849 850 851 852 853
{
	unsigned int capability;
	int rc;

	rc = get_cpu_capability(&capability);
	if (rc)
		return rc;
	return sprintf(buf, "%u\n", capability);
}
static SYSDEV_ATTR(capability, 0444, show_capability, NULL);

854 855
static ssize_t show_idle_count(struct sys_device *dev,
				struct sysdev_attribute *attr, char *buf)
856 857 858
{
	struct s390_idle_data *idle;
	unsigned long long idle_count;
859
	unsigned int sequence;
860 861

	idle = &per_cpu(s390_idle, dev->id);
862 863 864 865 866
repeat:
	sequence = idle->sequence;
	smp_rmb();
	if (sequence & 1)
		goto repeat;
867
	idle_count = idle->idle_count;
868 869
	if (idle->idle_enter)
		idle_count++;
870 871 872
	smp_rmb();
	if (idle->sequence != sequence)
		goto repeat;
873 874 875 876
	return sprintf(buf, "%llu\n", idle_count);
}
static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);

877 878
static ssize_t show_idle_time(struct sys_device *dev,
				struct sysdev_attribute *attr, char *buf)
879 880
{
	struct s390_idle_data *idle;
881
	unsigned long long now, idle_time, idle_enter;
882
	unsigned int sequence;
883 884

	idle = &per_cpu(s390_idle, dev->id);
885
	now = get_clock();
886 887 888 889 890
repeat:
	sequence = idle->sequence;
	smp_rmb();
	if (sequence & 1)
		goto repeat;
891 892 893 894
	idle_time = idle->idle_time;
	idle_enter = idle->idle_enter;
	if (idle_enter != 0ULL && idle_enter < now)
		idle_time += now - idle_enter;
895 896 897
	smp_rmb();
	if (idle->sequence != sequence)
		goto repeat;
898
	return sprintf(buf, "%llu\n", idle_time >> 12);
899
}
900
static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
901

902
static struct attribute *cpu_online_attrs[] = {
903 904
	&attr_capability.attr,
	&attr_idle_count.attr,
905
	&attr_idle_time_us.attr,
906 907 908
	NULL,
};

909 910
static struct attribute_group cpu_online_attr_group = {
	.attrs = cpu_online_attrs,
911 912
};

913 914 915 916 917 918
static int __cpuinit smp_cpu_notify(struct notifier_block *self,
				    unsigned long action, void *hcpu)
{
	unsigned int cpu = (unsigned int)(long)hcpu;
	struct cpu *c = &per_cpu(cpu_devices, cpu);
	struct sys_device *s = &c->sysdev;
919
	struct s390_idle_data *idle;
920 921 922

	switch (action) {
	case CPU_ONLINE:
923
	case CPU_ONLINE_FROZEN:
924
		idle = &per_cpu(s390_idle, cpu);
925
		memset(idle, 0, sizeof(struct s390_idle_data));
926
		if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
927 928 929
			return NOTIFY_BAD;
		break;
	case CPU_DEAD:
930
	case CPU_DEAD_FROZEN:
931
		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
932 933 934 935 936 937
		break;
	}
	return NOTIFY_OK;
}

static struct notifier_block __cpuinitdata smp_cpu_nb = {
938
	.notifier_call = smp_cpu_notify,
939 940
};

941
static int __devinit smp_add_present_cpu(int cpu)
942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
{
	struct cpu *c = &per_cpu(cpu_devices, cpu);
	struct sys_device *s = &c->sysdev;
	int rc;

	c->hotpluggable = 1;
	rc = register_cpu(c, cpu);
	if (rc)
		goto out;
	rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
	if (rc)
		goto out_cpu;
	if (!cpu_online(cpu))
		goto out;
	rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
	if (!rc)
		return 0;
	sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
out_cpu:
#ifdef CONFIG_HOTPLUG_CPU
	unregister_cpu(c);
#endif
out:
	return rc;
}

#ifdef CONFIG_HOTPLUG_CPU
969

970
int __ref smp_rescan_cpus(void)
971 972 973 974 975
{
	cpumask_t newcpus;
	int cpu;
	int rc;

976
	get_online_cpus();
H
Heiko Carstens 已提交
977
	mutex_lock(&smp_cpu_state_mutex);
978
	newcpus = cpu_present_map;
979
	rc = __smp_rescan_cpus();
980 981 982 983 984 985 986 987 988 989 990
	if (rc)
		goto out;
	cpus_andnot(newcpus, cpu_present_map, newcpus);
	for_each_cpu_mask(cpu, newcpus) {
		rc = smp_add_present_cpu(cpu);
		if (rc)
			cpu_clear(cpu, cpu_present_map);
	}
	rc = 0;
out:
	mutex_unlock(&smp_cpu_state_mutex);
H
Heiko Carstens 已提交
991
	put_online_cpus();
H
Heiko Carstens 已提交
992 993
	if (!cpus_empty(newcpus))
		topology_schedule_update();
994 995 996
	return rc;
}

997
static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf,
998 999 1000 1001 1002
				  size_t count)
{
	int rc;

	rc = smp_rescan_cpus();
1003 1004
	return rc ? rc : count;
}
1005
static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
1006 1007
#endif /* CONFIG_HOTPLUG_CPU */

1008
static ssize_t dispatching_show(struct sysdev_class *class, char *buf)
H
Heiko Carstens 已提交
1009 1010 1011 1012 1013 1014 1015 1016 1017
{
	ssize_t count;

	mutex_lock(&smp_cpu_state_mutex);
	count = sprintf(buf, "%d\n", cpu_management);
	mutex_unlock(&smp_cpu_state_mutex);
	return count;
}

1018 1019
static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf,
				 size_t count)
H
Heiko Carstens 已提交
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
{
	int val, rc;
	char delim;

	if (sscanf(buf, "%d %c", &val, &delim) != 1)
		return -EINVAL;
	if (val != 0 && val != 1)
		return -EINVAL;
	rc = 0;
	get_online_cpus();
H
Heiko Carstens 已提交
1030
	mutex_lock(&smp_cpu_state_mutex);
H
Heiko Carstens 已提交
1031 1032 1033 1034 1035 1036 1037
	if (cpu_management == val)
		goto out;
	rc = topology_set_cpu_management(val);
	if (!rc)
		cpu_management = val;
out:
	mutex_unlock(&smp_cpu_state_mutex);
H
Heiko Carstens 已提交
1038
	put_online_cpus();
H
Heiko Carstens 已提交
1039 1040
	return rc ? rc : count;
}
1041 1042
static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
			 dispatching_store);
H
Heiko Carstens 已提交
1043

L
Linus Torvalds 已提交
1044 1045 1046
static int __init topology_init(void)
{
	int cpu;
1047
	int rc;
1048 1049

	register_cpu_notifier(&smp_cpu_nb);
L
Linus Torvalds 已提交
1050

1051
#ifdef CONFIG_HOTPLUG_CPU
1052
	rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
1053 1054 1055
	if (rc)
		return rc;
#endif
1056
	rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
H
Heiko Carstens 已提交
1057 1058
	if (rc)
		return rc;
1059 1060
	for_each_present_cpu(cpu) {
		rc = smp_add_present_cpu(cpu);
1061 1062
		if (rc)
			return rc;
L
Linus Torvalds 已提交
1063 1064 1065 1066
	}
	return 0;
}
subsys_initcall(topology_init);