s390mach.c 12.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 *  drivers/s390/s390mach.c
 *   S/390 machine check handler
 *
 *  S390 version
 *    Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
 *    Author(s): Ingo Adlung (adlung@de.ibm.com)
 *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
 */

#include <linux/init.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/workqueue.h>
H
Heiko Carstens 已提交
15
#include <linux/time.h>
16
#include <linux/kthread.h>
L
Linus Torvalds 已提交
17 18 19 20 21 22 23

#include <asm/lowcore.h>

#include "s390mach.h"

static struct semaphore m_sem;

24
extern int css_process_crw(int, int);
L
Linus Torvalds 已提交
25 26 27 28 29 30 31
extern int chsc_process_crw(void);
extern int chp_process_crw(int, int);
extern void css_reiterate_subchannels(void);

extern struct workqueue_struct *slow_path_wq;
extern struct work_struct slow_path_work;

32
static NORET_TYPE void
L
Linus Torvalds 已提交
33 34 35 36 37 38
s390_handle_damage(char *msg)
{
#ifdef CONFIG_SMP
	smp_send_stop();
#endif
	disabled_wait((unsigned long) __builtin_return_address(0));
39
	for(;;);
L
Linus Torvalds 已提交
40 41 42 43 44 45 46 47 48 49
}

/*
 * Retrieve CRWs and call function to handle event.
 *
 * Note : we currently process CRWs for io and chsc subchannels only
 */
static int
s390_collect_crw_info(void *param)
{
50
	struct crw crw[2];
L
Linus Torvalds 已提交
51 52
	int ccode, ret, slow;
	struct semaphore *sem;
53
	unsigned int chain;
L
Linus Torvalds 已提交
54 55 56 57 58

	sem = (struct semaphore *)param;
repeat:
	down_interruptible(sem);
	slow = 0;
59
	chain = 0;
L
Linus Torvalds 已提交
60
	while (1) {
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
		if (unlikely(chain > 1)) {
			struct crw tmp_crw;

			printk(KERN_WARNING"%s: Code does not support more "
			       "than two chained crws; please report to "
			       "linux390@de.ibm.com!\n", __FUNCTION__);
			ccode = stcrw(&tmp_crw);
			printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
			       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
			       __FUNCTION__, tmp_crw.slct, tmp_crw.oflw,
			       tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
			       tmp_crw.erc, tmp_crw.rsid);
			printk(KERN_WARNING"%s: This was crw number %x in the "
			       "chain\n", __FUNCTION__, chain);
			if (ccode != 0)
				break;
			chain = tmp_crw.chn ? chain + 1 : 0;
			continue;
		}
		ccode = stcrw(&crw[chain]);
L
Linus Torvalds 已提交
81 82
		if (ccode != 0)
			break;
C
Cornelia Huck 已提交
83 84 85 86 87
		printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
		       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
		       crw[chain].slct, crw[chain].oflw, crw[chain].chn,
		       crw[chain].rsc, crw[chain].anc, crw[chain].erc,
		       crw[chain].rsid);
L
Linus Torvalds 已提交
88
		/* Check for overflows. */
89
		if (crw[chain].oflw) {
L
Linus Torvalds 已提交
90 91
			pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
			css_reiterate_subchannels();
92
			chain = 0;
L
Linus Torvalds 已提交
93 94 95
			slow = 1;
			continue;
		}
96
		switch (crw[chain].rsc) {
L
Linus Torvalds 已提交
97
		case CRW_RSC_SCH:
98 99 100 101 102
			if (crw[0].chn && !chain)
				break;
			pr_debug("source is subchannel %04X\n", crw[0].rsid);
			ret = css_process_crw (crw[0].rsid,
					       chain ? crw[1].rsid : 0);
L
Linus Torvalds 已提交
103 104 105 106 107 108 109
			if (ret == -EAGAIN)
				slow = 1;
			break;
		case CRW_RSC_MONITOR:
			pr_debug("source is monitoring facility\n");
			break;
		case CRW_RSC_CPATH:
110
			pr_debug("source is channel path %02X\n", crw[0].rsid);
111 112 113 114 115 116
			/*
			 * Check for solicited machine checks. These are
			 * created by reset channel path and need not be
			 * reported to the common I/O layer.
			 */
			if (crw[chain].slct) {
C
Cornelia Huck 已提交
117 118
				pr_debug("solicited machine check for "
					 "channel path %02X\n", crw[0].rsid);
119 120
				break;
			}
121
			switch (crw[0].erc) {
L
Linus Torvalds 已提交
122
			case CRW_ERC_IPARM: /* Path has come. */
123
				ret = chp_process_crw(crw[0].rsid, 1);
L
Linus Torvalds 已提交
124 125 126
				break;
			case CRW_ERC_PERRI: /* Path has gone. */
			case CRW_ERC_PERRN:
127
				ret = chp_process_crw(crw[0].rsid, 0);
L
Linus Torvalds 已提交
128 129 130
				break;
			default:
				pr_debug("Don't know how to handle erc=%x\n",
131
					 crw[0].erc);
L
Linus Torvalds 已提交
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
				ret = 0;
			}
			if (ret == -EAGAIN)
				slow = 1;
			break;
		case CRW_RSC_CONFIG:
			pr_debug("source is configuration-alert facility\n");
			break;
		case CRW_RSC_CSS:
			pr_debug("source is channel subsystem\n");
			ret = chsc_process_crw();
			if (ret == -EAGAIN)
				slow = 1;
			break;
		default:
			pr_debug("unknown source\n");
			break;
		}
150 151
		/* chain is always 0 or 1 here. */
		chain = crw[chain].chn ? chain + 1 : 0;
L
Linus Torvalds 已提交
152 153 154 155 156 157 158
	}
	if (slow)
		queue_work(slow_path_wq, &slow_path_work);
	goto repeat;
	return 0;
}

159 160 161 162 163 164 165 166 167
struct mcck_struct {
	int kill_task;
	int channel_report;
	int warning;
	unsigned long long mcck_code;
};

static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);

L
Linus Torvalds 已提交
168
/*
169 170
 * Main machine check handler function. Will be called with interrupts enabled
 * or disabled and machine checks enabled or disabled.
L
Linus Torvalds 已提交
171 172
 */
void
173
s390_handle_mcck(void)
L
Linus Torvalds 已提交
174
{
175 176
	unsigned long flags;
	struct mcck_struct mcck;
L
Linus Torvalds 已提交
177

178 179 180 181 182 183 184 185 186 187 188 189
	/*
	 * Disable machine checks and get the current state of accumulated
	 * machine checks. Afterwards delete the old state and enable machine
	 * checks again.
	 */
	local_irq_save(flags);
	local_mcck_disable();
	mcck = __get_cpu_var(cpu_mcck);
	memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct));
	clear_thread_flag(TIF_MCCK_PENDING);
	local_mcck_enable();
	local_irq_restore(flags);
L
Linus Torvalds 已提交
190

191
	if (mcck.channel_report)
L
Linus Torvalds 已提交
192 193 194 195 196 197 198 199 200 201 202 203
		up(&m_sem);

#ifdef CONFIG_MACHCHK_WARNING
/*
 * The warning may remain for a prolonged period on the bare iron.
 * (actually till the machine is powered off, or until the problem is gone)
 * So we just stop listening for the WARNING MCH and prevent continuously
 * being interrupted.  One caveat is however, that we must do this per
 * processor and cannot use the smp version of ctl_clear_bit().
 * On VM we only get one interrupt per virtally presented machinecheck.
 * Though one suffices, we may get one interrupt per (virtual) processor.
 */
204
	if (mcck.warning) {	/* WARNING pending ? */
L
Linus Torvalds 已提交
205 206 207 208 209 210 211 212 213
		static int mchchk_wng_posted = 0;
		/*
		 * Use single machine clear, as we cannot handle smp right now
		 */
		__ctl_clear_bit(14, 24);	/* Disable WARNING MCH */
		if (xchg(&mchchk_wng_posted, 1) == 0)
			kill_proc(1, SIGPWR, 1);
	}
#endif
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254

	if (mcck.kill_task) {
		local_irq_enable();
		printk(KERN_EMERG "mcck: Terminating task because of machine "
		       "malfunction (code 0x%016llx).\n", mcck.mcck_code);
		printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
		       current->comm, current->pid);
		do_exit(SIGSEGV);
	}
}

/*
 * returns 0 if all registers could be validated
 * returns 1 otherwise
 */
static int
s390_revalidate_registers(struct mci *mci)
{
	int kill_task;
	u64 tmpclock;
	u64 zero;
	void *fpt_save_area, *fpt_creg_save_area;

	kill_task = 0;
	zero = 0;
	/* General purpose registers */
	if (!mci->gr)
		/*
		 * General purpose registers couldn't be restored and have
		 * unknown contents. Process needs to be terminated.
		 */
		kill_task = 1;

	/* Revalidate floating point registers */
	if (!mci->fp)
		/*
		 * Floating point registers can't be restored and
		 * therefore the process needs to be terminated.
		 */
		kill_task = 1;

255
#ifndef CONFIG_64BIT
256 257 258 259 260 261
	asm volatile(
		"	ld	0,0(%0)\n"
		"	ld	2,8(%0)\n"
		"	ld	4,16(%0)\n"
		"	ld	6,24(%0)"
		: : "a" (&S390_lowcore.floating_pt_save_area));
262 263 264
#endif

	if (MACHINE_HAS_IEEE) {
265
#ifdef CONFIG_64BIT
266 267 268 269 270 271 272 273 274 275 276 277
		fpt_save_area = &S390_lowcore.floating_pt_save_area;
		fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
#else
		fpt_save_area = (void *) S390_lowcore.extended_save_area_addr;
		fpt_creg_save_area = fpt_save_area+128;
#endif
		/* Floating point control register */
		if (!mci->fc) {
			/*
			 * Floating point control register can't be restored.
			 * Task will be terminated.
			 */
278
			asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
279 280
			kill_task = 1;

281 282
		} else
			asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
283

284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
		asm volatile(
			"	ld	0,0(%0)\n"
			"	ld	1,8(%0)\n"
			"	ld	2,16(%0)\n"
			"	ld	3,24(%0)\n"
			"	ld	4,32(%0)\n"
			"	ld	5,40(%0)\n"
			"	ld	6,48(%0)\n"
			"	ld	7,56(%0)\n"
			"	ld	8,64(%0)\n"
			"	ld	9,72(%0)\n"
			"	ld	10,80(%0)\n"
			"	ld	11,88(%0)\n"
			"	ld	12,96(%0)\n"
			"	ld	13,104(%0)\n"
			"	ld	14,112(%0)\n"
			"	ld	15,120(%0)\n"
			: : "a" (fpt_save_area));
302 303 304
	}

	/* Revalidate access registers */
305 306 307
	asm volatile(
		"	lam	0,15,0(%0)"
		: : "a" (&S390_lowcore.access_regs_save_area));
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
	if (!mci->ar)
		/*
		 * Access registers have unknown contents.
		 * Terminating task.
		 */
		kill_task = 1;

	/* Revalidate control registers */
	if (!mci->cr)
		/*
		 * Control registers have unknown contents.
		 * Can't recover and therefore stopping machine.
		 */
		s390_handle_damage("invalid control registers.");
	else
323
#ifdef CONFIG_64BIT
324 325 326
		asm volatile(
			"	lctlg	0,15,0(%0)"
			: : "a" (&S390_lowcore.cregs_save_area));
327
#else
328 329 330
		asm volatile(
			"	lctl	0,15,0(%0)"
			: : "a" (&S390_lowcore.cregs_save_area));
331 332 333 334 335 336 337
#endif

	/*
	 * We don't even try to revalidate the TOD register, since we simply
	 * can't write something sensible into that register.
	 */

338
#ifdef CONFIG_64BIT
339 340 341 342 343
	/*
	 * See if we can revalidate the TOD programmable register with its
	 * old contents (should be zero) otherwise set it to zero.
	 */
	if (!mci->pr)
344 345 346 347
		asm volatile(
			"	sr	0,0\n"
			"	sckpf"
			: : : "0", "cc");
348 349
	else
		asm volatile(
350 351 352 353
			"	l	0,0(%0)\n"
			"	sckpf"
			: : "a" (&S390_lowcore.tod_progreg_save_area)
			: "0", "cc");
354 355 356
#endif

	/* Revalidate clock comparator register */
357 358 359 360
	asm volatile(
		"	stck	0(%1)\n"
		"	sckc	0(%1)"
		: "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375

	/* Check if old PSW is valid */
	if (!mci->wp)
		/*
		 * Can't tell if we come from user or kernel mode
		 * -> stopping machine.
		 */
		s390_handle_damage("old psw invalid.");

	if (!mci->ms || !mci->pm || !mci->ia)
		kill_task = 1;

	return kill_task;
}

376
#define MAX_IPD_COUNT	29
H
Heiko Carstens 已提交
377
#define MAX_IPD_TIME	(5 * 60 * USEC_PER_SEC) /* 5 minutes */
378

379 380 381 382 383 384
/*
 * machine check handler.
 */
void
s390_do_machine_check(struct pt_regs *regs)
{
385 386 387 388
	static DEFINE_SPINLOCK(ipd_lock);
	static unsigned long long last_ipd;
	static int ipd_count;
	unsigned long long tmp;
389 390 391 392
	struct mci *mci;
	struct mcck_struct *mcck;
	int umode;

393 394
	lockdep_off();

395 396 397 398 399 400 401 402 403 404 405 406
	mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
	mcck = &__get_cpu_var(cpu_mcck);
	umode = user_mode(regs);

	if (mci->sd)
		/* System damage -> stopping machine */
		s390_handle_damage("received system damage machine check.");

	if (mci->pd) {
		if (mci->b) {
			/* Processing backup -> verify if we can survive this */
			u64 z_mcic, o_mcic, t_mcic;
407
#ifdef CONFIG_64BIT
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
			z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
			o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
				  1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
				  1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
				  1ULL<<16);
#else
			z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 |
				  1ULL<<29);
			o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
				  1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
				  1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16);
#endif
			t_mcic = *(u64 *)mci;

			if (((t_mcic & z_mcic) != 0) ||
			    ((t_mcic & o_mcic) != o_mcic)) {
				s390_handle_damage("processing backup machine "
						   "check with damage.");
			}
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447

			/*
			 * Nullifying exigent condition, therefore we might
			 * retry this instruction.
			 */

			spin_lock(&ipd_lock);

			tmp = get_clock();

			if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
				ipd_count++;
			else
				ipd_count = 1;

			last_ipd = tmp;

			if (ipd_count == MAX_IPD_COUNT)
				s390_handle_damage("too many ipd retries.");

			spin_unlock(&ipd_lock);
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
		}
		else {
			/* Processing damage -> stopping machine */
			s390_handle_damage("received instruction processing "
					   "damage machine check.");
		}
	}
	if (s390_revalidate_registers(mci)) {
		if (umode) {
			/*
			 * Couldn't restore all register contents while in
			 * user mode -> mark task for termination.
			 */
			mcck->kill_task = 1;
			mcck->mcck_code = *(unsigned long long *) mci;
			set_thread_flag(TIF_MCCK_PENDING);
		}
		else
			/*
			 * Couldn't restore all register contents while in
			 * kernel mode -> stopping machine.
			 */
			s390_handle_damage("unable to revalidate registers.");
	}

	if (mci->se)
		/* Storage error uncorrected */
		s390_handle_damage("received storage error uncorrected "
				   "machine check.");

	if (mci->ke)
		/* Storage key-error uncorrected */
		s390_handle_damage("received storage key-error uncorrected "
				   "machine check.");

	if (mci->ds && mci->fa)
		/* Storage degradation */
		s390_handle_damage("received storage degradation machine "
				   "check.");

	if (mci->cp) {
		/* Channel report word pending */
		mcck->channel_report = 1;
		set_thread_flag(TIF_MCCK_PENDING);
	}

	if (mci->w) {
		/* Warning pending */
		mcck->warning = 1;
		set_thread_flag(TIF_MCCK_PENDING);
	}
499
	lockdep_on();
L
Linus Torvalds 已提交
500 501 502 503 504 505 506 507 508 509 510
}

/*
 * s390_init_machine_check
 *
 * initialize machine check handling
 */
static int
machine_check_init(void)
{
	init_MUTEX_LOCKED(&m_sem);
511 512
	ctl_clear_bit(14, 25);	/* disable external damage MCH */
	ctl_set_bit(14, 27);    /* enable system recovery MCH */
L
Linus Torvalds 已提交
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
#ifdef CONFIG_MACHCHK_WARNING
	ctl_set_bit(14, 24);	/* enable warning MCH */
#endif
	return 0;
}

/*
 * Initialize the machine check handler really early to be able to
 * catch all machine checks that happen during boot
 */
arch_initcall(machine_check_init);

/*
 * Machine checks for the channel subsystem must be enabled
 * after the channel subsystem is initialized
 */
static int __init
machine_check_crw_init (void)
{
532
	kthread_run(s390_collect_crw_info, &m_sem, "kmcheck");
L
Linus Torvalds 已提交
533 534 535 536 537
	ctl_set_bit(14, 28);	/* enable channel report MCH */
	return 0;
}

device_initcall (machine_check_crw_init);