chsc.c 23.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 *  drivers/s390/cio/chsc.c
 *   S/390 common I/O routines -- channel subsystem call
 *
 *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
 *			      IBM Corporation
 *    Author(s): Ingo Adlung (adlung@de.ibm.com)
8
 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17
 *		 Arnd Bergmann (arndb@de.ibm.com)
 */

#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>

#include <asm/cio.h>
18
#include <asm/chpid.h>
L
Linus Torvalds 已提交
19 20 21 22 23

#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
24
#include "chp.h"
L
Linus Torvalds 已提交
25 26 27 28
#include "chsc.h"

static void *sei_page;

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
static int chsc_error_from_response(int response)
{
	switch (response) {
	case 0x0001:
		return 0;
	case 0x0002:
	case 0x0003:
	case 0x0006:
	case 0x0007:
	case 0x0008:
	case 0x000a:
		return -EINVAL;
	case 0x0004:
		return -EOPNOTSUPP;
	default:
		return -EIO;
	}
}

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
struct chsc_ssd_area {
	struct chsc_header request;
	u16 :10;
	u16 ssid:2;
	u16 :4;
	u16 f_sch;	  /* first subchannel */
	u16 :16;
	u16 l_sch;	  /* last subchannel */
	u32 :32;
	struct chsc_header response;
	u32 :32;
	u8 sch_valid : 1;
	u8 dev_valid : 1;
	u8 st	     : 3; /* subchannel type */
	u8 zeroes    : 3;
	u8  unit_addr;	  /* unit address */
	u16 devno;	  /* device number */
	u8 path_mask;
	u8 fla_valid_mask;
	u16 sch;	  /* subchannel */
	u8 chpid[8];	  /* chpids 0-7 */
	u16 fla[8];	  /* full link addresses 0-7 */
} __attribute__ ((packed));

int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
L
Linus Torvalds 已提交
73
{
74 75 76 77 78 79
	unsigned long page;
	struct chsc_ssd_area *ssd_area;
	int ccode;
	int ret;
	int i;
	int mask;
L
Linus Torvalds 已提交
80

81 82 83 84
	page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!page)
		return -ENOMEM;
	ssd_area = (struct chsc_ssd_area *) page;
85 86
	ssd_area->request.length = 0x0010;
	ssd_area->request.code = 0x0004;
87 88 89
	ssd_area->ssid = schid.ssid;
	ssd_area->f_sch = schid.sch_no;
	ssd_area->l_sch = schid.sch_no;
L
Linus Torvalds 已提交
90 91

	ccode = chsc(ssd_area);
92
	/* Check response. */
L
Linus Torvalds 已提交
93
	if (ccode > 0) {
94 95
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out_free;
L
Linus Torvalds 已提交
96
	}
97 98
	ret = chsc_error_from_response(ssd_area->response.code);
	if (ret != 0) {
99 100
		CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
			      schid.ssid, schid.sch_no,
L
Linus Torvalds 已提交
101
			      ssd_area->response.code);
102
		goto out_free;
L
Linus Torvalds 已提交
103
	}
104 105 106
	if (!ssd_area->sch_valid) {
		ret = -ENODEV;
		goto out_free;
L
Linus Torvalds 已提交
107
	}
108 109 110
	/* Copy data */
	ret = 0;
	memset(ssd, 0, sizeof(struct chsc_ssd_info));
111 112
	if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
	    (ssd_area->st != SUBCHANNEL_TYPE_MSG))
113 114 115 116 117 118 119 120
		goto out_free;
	ssd->path_mask = ssd_area->path_mask;
	ssd->fla_valid_mask = ssd_area->fla_valid_mask;
	for (i = 0; i < 8; i++) {
		mask = 0x80 >> i;
		if (ssd_area->path_mask & mask) {
			chp_id_init(&ssd->chpid[i]);
			ssd->chpid[i].id = ssd_area->chpid[i];
L
Linus Torvalds 已提交
121
		}
122 123
		if (ssd_area->fla_valid_mask & mask)
			ssd->fla[i] = ssd_area->fla[i];
L
Linus Torvalds 已提交
124
	}
125 126
out_free:
	free_page(page);
L
Linus Torvalds 已提交
127 128 129
	return ret;
}

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
static int check_for_io_on_path(struct subchannel *sch, int mask)
{
	int cc;

	cc = stsch(sch->schid, &sch->schib);
	if (cc)
		return 0;
	if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
		return 1;
	return 0;
}

static void terminate_internal_io(struct subchannel *sch)
{
	if (cio_clear(sch)) {
		/* Recheck device in case clear failed. */
		sch->lpm = 0;
147 148
		if (device_trigger_verify(sch) != 0)
			css_schedule_eval(sch->schid);
149 150 151 152 153 154
		return;
	}
	/* Request retry of internal operation. */
	device_set_intretry(sch);
	/* Call handler. */
	if (sch->driver && sch->driver->termination)
155
		sch->driver->termination(sch);
156 157
}

158
static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
159 160 161
{
	int j;
	int mask;
162
	struct chp_id *chpid = data;
L
Linus Torvalds 已提交
163 164
	struct schib schib;

C
Cornelia Huck 已提交
165 166 167
	for (j = 0; j < 8; j++) {
		mask = 0x80 >> j;
		if ((sch->schib.pmcw.pim & mask) &&
168
		    (sch->schib.pmcw.chpid[j] == chpid->id))
L
Linus Torvalds 已提交
169
			break;
C
Cornelia Huck 已提交
170
	}
L
Linus Torvalds 已提交
171 172 173
	if (j >= 8)
		return 0;

C
Cornelia Huck 已提交
174
	spin_lock_irq(sch->lock);
L
Linus Torvalds 已提交
175

176
	stsch(sch->schid, &schib);
177
	if (!css_sch_is_valid(&schib))
L
Linus Torvalds 已提交
178 179 180 181 182 183
		goto out_unreg;
	memcpy(&sch->schib, &schib, sizeof(struct schib));
	/* Check for single path devices. */
	if (sch->schib.pmcw.pim == 0x80)
		goto out_unreg;

184 185 186 187 188 189 190
	if (check_for_io_on_path(sch, mask)) {
		if (device_is_online(sch))
			device_kill_io(sch);
		else {
			terminate_internal_io(sch);
			/* Re-start path verification. */
			if (sch->driver && sch->driver->verify)
191
				sch->driver->verify(sch);
192 193 194 195
		}
	} else {
		/* trigger path verification. */
		if (sch->driver && sch->driver->verify)
196
			sch->driver->verify(sch);
197
		else if (sch->lpm == mask)
L
Linus Torvalds 已提交
198 199 200
			goto out_unreg;
	}

C
Cornelia Huck 已提交
201
	spin_unlock_irq(sch->lock);
L
Linus Torvalds 已提交
202
	return 0;
203

L
Linus Torvalds 已提交
204 205
out_unreg:
	sch->lpm = 0;
206
	spin_unlock_irq(sch->lock);
207
	css_schedule_eval(sch->schid);
L
Linus Torvalds 已提交
208 209 210
	return 0;
}

211
void chsc_chp_offline(struct chp_id chpid)
L
Linus Torvalds 已提交
212 213 214
{
	char dbf_txt[15];

215
	sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
L
Linus Torvalds 已提交
216 217
	CIO_TRACE_EVENT(2, dbf_txt);

218
	if (chp_get_status(chpid) <= 0)
L
Linus Torvalds 已提交
219
		return;
220 221
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
222
	for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
L
Linus Torvalds 已提交
223 224
}

225
static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
226 227 228 229 230 231 232 233 234 235
{
	struct schib schib;
	/*
	 * We don't know the device yet, but since a path
	 * may be available now to the device we'll have
	 * to do recognition again.
	 * Since we don't have any idea about which chpid
	 * that beast may be on we'll have to do a stsch
	 * on all devices, grr...
	 */
236
	if (stsch_err(schid, &schib))
237
		/* We're through */
238
		return -ENXIO;
239 240

	/* Put it on the slow path. */
241
	css_schedule_eval(schid);
242 243 244
	return 0;
}

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
struct res_acc_data {
	struct chp_id chpid;
	u32 fla_mask;
	u16 fla;
};

static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
			      struct res_acc_data *data)
{
	int i;
	int mask;

	for (i = 0; i < 8; i++) {
		mask = 0x80 >> i;
		if (!(ssd->path_mask & mask))
			continue;
		if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
			continue;
		if ((ssd->fla_valid_mask & mask) &&
		    ((ssd->fla[i] & data->fla_mask) != data->fla))
			continue;
		return mask;
	}
	return 0;
}

271
static int __s390_process_res_acc(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
272
{
273
	int chp_mask, old_lpm;
274
	struct res_acc_data *res_data = data;
275

C
Cornelia Huck 已提交
276
	spin_lock_irq(sch->lock);
277 278 279 280 281
	chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
	if (chp_mask == 0)
		goto out;
	if (stsch(sch->schid, &sch->schib))
		goto out;
282 283 284 285 286 287 288 289
	old_lpm = sch->lpm;
	sch->lpm = ((sch->schib.pmcw.pim &
		     sch->schib.pmcw.pam &
		     sch->schib.pmcw.pom)
		    | chp_mask) & sch->opm;
	if (!old_lpm && sch->lpm)
		device_trigger_reprobe(sch);
	else if (sch->driver && sch->driver->verify)
290
		sch->driver->verify(sch);
291
out:
C
Cornelia Huck 已提交
292
	spin_unlock_irq(sch->lock);
293

294
	return 0;
295 296
}

297
static void s390_process_res_acc (struct res_acc_data *res_data)
298
{
L
Linus Torvalds 已提交
299 300
	char dbf_txt[15];

301 302
	sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
		res_data->chpid.id);
L
Linus Torvalds 已提交
303
	CIO_TRACE_EVENT( 2, dbf_txt);
304 305
	if (res_data->fla != 0) {
		sprintf(dbf_txt, "fla%x", res_data->fla);
L
Linus Torvalds 已提交
306 307
		CIO_TRACE_EVENT( 2, dbf_txt);
	}
308 309
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
L
Linus Torvalds 已提交
310 311 312 313 314 315 316
	/*
	 * I/O resources may have become accessible.
	 * Scan through all subchannels that may be concerned and
	 * do a validation on those.
	 * The more information we have (info), the less scanning
	 * will we have to do.
	 */
317 318
	for_each_subchannel_staged(__s390_process_res_acc,
				   s390_process_res_acc_new_sch, res_data);
L
Linus Torvalds 已提交
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
}

static int
__get_chpid_from_lir(void *data)
{
	struct lir {
		u8  iq;
		u8  ic;
		u16 sci;
		/* incident-node descriptor */
		u32 indesc[28];
		/* attached-node descriptor */
		u32 andesc[28];
		/* incident-specific information */
		u32 isinfo[28];
334
	} __attribute__ ((packed)) *lir;
L
Linus Torvalds 已提交
335

336
	lir = data;
L
Linus Torvalds 已提交
337 338 339 340 341 342 343 344 345 346 347 348 349 350
	if (!(lir->iq&0x80))
		/* NULL link incident record */
		return -EINVAL;
	if (!(lir->indesc[0]&0xc0000000))
		/* node descriptor not valid */
		return -EINVAL;
	if (!(lir->indesc[0]&0x10000000))
		/* don't handle device-type nodes - FIXME */
		return -EINVAL;
	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */

	return (u16) (lir->indesc[0]&0x000000ff);
}

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
struct chsc_sei_area {
	struct chsc_header request;
	u32 reserved1;
	u32 reserved2;
	u32 reserved3;
	struct chsc_header response;
	u32 reserved4;
	u8  flags;
	u8  vf;		/* validity flags */
	u8  rs;		/* reporting source */
	u8  cc;		/* content code */
	u16 fla;	/* full link address */
	u16 rsid;	/* reporting source id */
	u32 reserved5;
	u32 reserved6;
	u8 ccdf[4096 - 16 - 24];	/* content-code dependent field */
	/* ccdf has to be big enough for a link-incident record */
} __attribute__ ((packed));

370
static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
371
{
372 373
	struct chp_id chpid;
	int id;
374 375 376 377

	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
		      sei_area->rs, sei_area->rsid);
	if (sei_area->rs != 4)
378
		return;
379 380
	id = __get_chpid_from_lir(sei_area->ccdf);
	if (id < 0)
381
		CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
382 383 384
	else {
		chp_id_init(&chpid);
		chpid.id = id;
385
		chsc_chp_offline(chpid);
386
	}
387 388
}

389
static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
L
Linus Torvalds 已提交
390
{
391
	struct res_acc_data res_data;
392
	struct chp_id chpid;
393 394 395 396 397
	int status;

	CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
		      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
	if (sei_area->rs != 4)
398
		return;
399 400
	chp_id_init(&chpid);
	chpid.id = sei_area->rsid;
401
	/* allocate a new channel path structure, if needed */
402
	status = chp_get_status(chpid);
403
	if (status < 0)
404
		chp_new(chpid);
405
	else if (!status)
406
		return;
407
	memset(&res_data, 0, sizeof(struct res_acc_data));
408
	res_data.chpid = chpid;
409 410 411 412 413 414 415 416 417
	if ((sei_area->vf & 0xc0) != 0) {
		res_data.fla = sei_area->fla;
		if ((sei_area->vf & 0xc0) == 0xc0)
			/* full link address */
			res_data.fla_mask = 0xffff;
		else
			/* link address */
			res_data.fla_mask = 0xff00;
	}
418
	s390_process_res_acc(&res_data);
419 420
}

421 422 423 424 425 426
struct chp_config_data {
	u8 map[32];
	u8 op;
	u8 pc;
};

427
static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
428 429 430 431 432 433 434
{
	struct chp_config_data *data;
	struct chp_id chpid;
	int num;

	CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
	if (sei_area->rs != 0)
435
		return;
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
	data = (struct chp_config_data *) &(sei_area->ccdf);
	chp_id_init(&chpid);
	for (num = 0; num <= __MAX_CHPID; num++) {
		if (!chp_test_bit(data->map, num))
			continue;
		chpid.id = num;
		printk(KERN_WARNING "cio: processing configure event %d for "
		       "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
		switch (data->op) {
		case 0:
			chp_cfg_schedule(chpid, 1);
			break;
		case 1:
			chp_cfg_schedule(chpid, 0);
			break;
		case 2:
			chp_cfg_cancel_deconfigure(chpid);
			break;
		}
	}
}

458
static void chsc_process_sei(struct chsc_sei_area *sei_area)
459 460
{
	/* Check if we might have lost some information. */
461
	if (sei_area->flags & 0x40) {
462
		CIO_CRW_EVENT(2, "chsc: event overflow\n");
463 464
		css_schedule_eval_all();
	}
465 466 467
	/* which kind of information was stored? */
	switch (sei_area->cc) {
	case 1: /* link incident*/
468
		chsc_process_sei_link_incident(sei_area);
469 470
		break;
	case 2: /* i/o resource accessibiliy */
471
		chsc_process_sei_res_acc(sei_area);
472
		break;
473
	case 8: /* channel-path-configuration notification */
474
		chsc_process_sei_chp_config(sei_area);
475
		break;
476 477 478 479 480 481 482
	default: /* other stuff */
		CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
			      sei_area->cc);
		break;
	}
}

483
void chsc_process_crw(void)
484 485
{
	struct chsc_sei_area *sei_area;
L
Linus Torvalds 已提交
486 487

	if (!sei_page)
488
		return;
489 490
	/* Access to sei_page is serialized through machine check handler
	 * thread, so no need for locking. */
L
Linus Torvalds 已提交
491 492 493 494 495
	sei_area = sei_page;

	CIO_TRACE_EVENT( 2, "prcss");
	do {
		memset(sei_area, 0, sizeof(*sei_area));
496 497
		sei_area->request.length = 0x0010;
		sei_area->request.code = 0x000e;
498 499
		if (chsc(sei_area))
			break;
L
Linus Torvalds 已提交
500

501 502
		if (sei_area->response.code == 0x0001) {
			CIO_CRW_EVENT(4, "chsc: sei successful\n");
503
			chsc_process_sei(sei_area);
504 505
		} else {
			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
L
Linus Torvalds 已提交
506 507 508 509 510 511
				      sei_area->response.code);
			break;
		}
	} while (sei_area->flags & 0x80);
}

512
static int __chp_add_new_sch(struct subchannel_id schid, void *data)
513 514 515
{
	struct schib schib;

516
	if (stsch_err(schid, &schib))
517
		/* We're through */
518
		return -ENXIO;
519 520

	/* Put it on the slow path. */
521
	css_schedule_eval(schid);
522 523 524 525
	return 0;
}


526
static int __chp_add(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
527
{
C
Cornelia Huck 已提交
528
	int i, mask;
529 530
	struct chp_id *chpid = data;

C
Cornelia Huck 已提交
531
	spin_lock_irq(sch->lock);
C
Cornelia Huck 已提交
532 533 534
	for (i=0; i<8; i++) {
		mask = 0x80 >> i;
		if ((sch->schib.pmcw.pim & mask) &&
535
		    (sch->schib.pmcw.chpid[i] == chpid->id))
536
			break;
C
Cornelia Huck 已提交
537
	}
538
	if (i==8) {
C
Cornelia Huck 已提交
539
		spin_unlock_irq(sch->lock);
540 541
		return 0;
	}
542 543 544 545 546
	if (stsch(sch->schid, &sch->schib)) {
		spin_unlock_irq(sch->lock);
		css_schedule_eval(sch->schid);
		return 0;
	}
547 548 549
	sch->lpm = ((sch->schib.pmcw.pim &
		     sch->schib.pmcw.pam &
		     sch->schib.pmcw.pom)
C
Cornelia Huck 已提交
550
		    | mask) & sch->opm;
551 552

	if (sch->driver && sch->driver->verify)
553
		sch->driver->verify(sch);
554

C
Cornelia Huck 已提交
555
	spin_unlock_irq(sch->lock);
556

557 558 559
	return 0;
}

560
void chsc_chp_online(struct chp_id chpid)
561
{
L
Linus Torvalds 已提交
562 563
	char dbf_txt[15];

564
	sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
L
Linus Torvalds 已提交
565 566
	CIO_TRACE_EVENT(2, dbf_txt);

567 568 569
	if (chp_get_status(chpid) != 0) {
		/* Wait until previous actions have settled. */
		css_wait_for_slow_path();
570 571
		for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
					   &chpid);
572
	}
L
Linus Torvalds 已提交
573 574
}

575 576
static void __s390_subchannel_vary_chpid(struct subchannel *sch,
					 struct chp_id chpid, int on)
L
Linus Torvalds 已提交
577 578
{
	int chp, old_lpm;
579
	int mask;
L
Linus Torvalds 已提交
580 581
	unsigned long flags;

C
Cornelia Huck 已提交
582
	spin_lock_irqsave(sch->lock, flags);
L
Linus Torvalds 已提交
583 584
	old_lpm = sch->lpm;
	for (chp = 0; chp < 8; chp++) {
585 586 587 588
		mask = 0x80 >> chp;
		if (!(sch->ssd_info.path_mask & mask))
			continue;
		if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
L
Linus Torvalds 已提交
589 590 591
			continue;

		if (on) {
592 593
			sch->opm |= mask;
			sch->lpm |= mask;
L
Linus Torvalds 已提交
594 595 596
			if (!old_lpm)
				device_trigger_reprobe(sch);
			else if (sch->driver && sch->driver->verify)
597
				sch->driver->verify(sch);
598 599
			break;
		}
600 601 602
		sch->opm &= ~mask;
		sch->lpm &= ~mask;
		if (check_for_io_on_path(sch, mask)) {
603 604 605
			if (device_is_online(sch))
				/* Path verification is done after killing. */
				device_kill_io(sch);
606
			else {
607 608
				/* Kill and retry internal I/O. */
				terminate_internal_io(sch);
609 610
				/* Re-start path verification. */
				if (sch->driver && sch->driver->verify)
611
					sch->driver->verify(sch);
612
			}
613
		} else if (!sch->lpm) {
614 615
			if (device_trigger_verify(sch) != 0)
				css_schedule_eval(sch->schid);
616
		} else if (sch->driver && sch->driver->verify)
617
			sch->driver->verify(sch);
L
Linus Torvalds 已提交
618 619
		break;
	}
C
Cornelia Huck 已提交
620
	spin_unlock_irqrestore(sch->lock, flags);
L
Linus Torvalds 已提交
621 622
}

623
static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
624
{
625
	struct chp_id *chpid = data;
L
Linus Torvalds 已提交
626 627 628 629 630

	__s390_subchannel_vary_chpid(sch, *chpid, 0);
	return 0;
}

631
static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
632
{
633
	struct chp_id *chpid = data;
L
Linus Torvalds 已提交
634 635 636 637 638

	__s390_subchannel_vary_chpid(sch, *chpid, 1);
	return 0;
}

639 640 641 642 643
static int
__s390_vary_chpid_on(struct subchannel_id schid, void *data)
{
	struct schib schib;

644
	if (stsch_err(schid, &schib))
645 646 647
		/* We're through */
		return -ENXIO;
	/* Put it on the slow path. */
648
	css_schedule_eval(schid);
649 650 651
	return 0;
}

652 653 654 655
/**
 * chsc_chp_vary - propagate channel-path vary operation to subchannels
 * @chpid: channl-path ID
 * @on: non-zero for vary online, zero for vary offline
L
Linus Torvalds 已提交
656
 */
657
int chsc_chp_vary(struct chp_id chpid, int on)
L
Linus Torvalds 已提交
658
{
659 660
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
L
Linus Torvalds 已提交
661 662 663 664
	/*
	 * Redo PathVerification on the devices the chpid connects to
	 */

665
	if (on)
666 667 668 669 670 671
		for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
					   __s390_vary_chpid_on, &chpid);
	else
		for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
					   NULL, &chpid);

L
Linus Torvalds 已提交
672 673 674
	return 0;
}

675 676 677 678 679 680 681 682
static void
chsc_remove_cmg_attr(struct channel_subsystem *css)
{
	int i;

	for (i = 0; i <= __MAX_CHPID; i++) {
		if (!css->chps[i])
			continue;
683
		chp_remove_cmg_attr(css->chps[i]);
684 685 686 687 688 689 690 691 692 693 694 695
	}
}

static int
chsc_add_cmg_attr(struct channel_subsystem *css)
{
	int i, ret;

	ret = 0;
	for (i = 0; i <= __MAX_CHPID; i++) {
		if (!css->chps[i])
			continue;
696
		ret = chp_add_cmg_attr(css->chps[i]);
697 698 699 700 701 702 703 704
		if (ret)
			goto cleanup;
	}
	return ret;
cleanup:
	for (--i; i >= 0; i--) {
		if (!css->chps[i])
			continue;
705
		chp_remove_cmg_attr(css->chps[i]);
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
	}
	return ret;
}

static int
__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
{
	struct {
		struct chsc_header request;
		u32 operation_code : 2;
		u32 : 30;
		u32 key : 4;
		u32 : 28;
		u32 zeroes1;
		u32 cub_addr1;
		u32 zeroes2;
		u32 cub_addr2;
		u32 reserved[13];
		struct chsc_header response;
		u32 status : 8;
		u32 : 4;
		u32 fmt : 4;
		u32 : 16;
729
	} __attribute__ ((packed)) *secm_area;
730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
	int ret, ccode;

	secm_area = page;
	secm_area->request.length = 0x0050;
	secm_area->request.code = 0x0016;

	secm_area->key = PAGE_DEFAULT_KEY;
	secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
	secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;

	secm_area->operation_code = enable ? 0 : 1;

	ccode = chsc(secm_area);
	if (ccode > 0)
		return (ccode == 3) ? -ENODEV : -EBUSY;

	switch (secm_area->response.code) {
747 748
	case 0x0102:
	case 0x0103:
749 750
		ret = -EINVAL;
	default:
751
		ret = chsc_error_from_response(secm_area->response.code);
752
	}
753 754 755
	if (ret != 0)
		CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
			      secm_area->response.code);
756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
	return ret;
}

int
chsc_secm(struct channel_subsystem *css, int enable)
{
	void  *secm_area;
	int ret;

	secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
	if (!secm_area)
		return -ENOMEM;

	if (enable && !css->cm_enabled) {
		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
		if (!css->cub_addr1 || !css->cub_addr2) {
			free_page((unsigned long)css->cub_addr1);
			free_page((unsigned long)css->cub_addr2);
			free_page((unsigned long)secm_area);
			return -ENOMEM;
		}
	}
	ret = __chsc_do_secm(css, enable, secm_area);
	if (!ret) {
		css->cm_enabled = enable;
		if (css->cm_enabled) {
			ret = chsc_add_cmg_attr(css);
			if (ret) {
				memset(secm_area, 0, PAGE_SIZE);
				__chsc_do_secm(css, 0, secm_area);
				css->cm_enabled = 0;
			}
		} else
			chsc_remove_cmg_attr(css);
	}
792
	if (!css->cm_enabled) {
793 794 795 796 797 798 799
		free_page((unsigned long)css->cub_addr1);
		free_page((unsigned long)css->cub_addr2);
	}
	free_page((unsigned long)secm_area);
	return ret;
}

800 801
int chsc_determine_channel_path_description(struct chp_id chpid,
					    struct channel_path_desc *desc)
L
Linus Torvalds 已提交
802 803 804 805 806 807 808 809 810 811 812 813 814
{
	int ccode, ret;

	struct {
		struct chsc_header request;
		u32 : 24;
		u32 first_chpid : 8;
		u32 : 24;
		u32 last_chpid : 8;
		u32 zeroes1;
		struct chsc_header response;
		u32 zeroes2;
		struct channel_path_desc desc;
815
	} __attribute__ ((packed)) *scpd_area;
L
Linus Torvalds 已提交
816 817 818 819 820

	scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!scpd_area)
		return -ENOMEM;

821 822
	scpd_area->request.length = 0x0010;
	scpd_area->request.code = 0x0002;
L
Linus Torvalds 已提交
823

824 825
	scpd_area->first_chpid = chpid.id;
	scpd_area->last_chpid = chpid.id;
L
Linus Torvalds 已提交
826 827 828 829 830 831 832

	ccode = chsc(scpd_area);
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}

833 834 835
	ret = chsc_error_from_response(scpd_area->response.code);
	if (ret == 0)
		/* Success. */
L
Linus Torvalds 已提交
836 837
		memcpy(desc, &scpd_area->desc,
		       sizeof(struct channel_path_desc));
838 839
	else
		CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
L
Linus Torvalds 已提交
840 841 842 843 844 845
			      scpd_area->response.code);
out:
	free_page((unsigned long)scpd_area);
	return ret;
}

846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
			  struct cmg_chars *chars)
{
	switch (chp->cmg) {
	case 2:
	case 3:
		chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
					 GFP_KERNEL);
		if (chp->cmg_chars) {
			int i, mask;
			struct cmg_chars *cmg_chars;

			cmg_chars = chp->cmg_chars;
			for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
				mask = 0x80 >> (i + 3);
				if (cmcv & mask)
					cmg_chars->values[i] = chars->values[i];
				else
					cmg_chars->values[i] = 0;
			}
		}
		break;
	default:
		/* No cmg-dependent data. */
		break;
	}
}

875
int chsc_get_channel_measurement_chars(struct channel_path *chp)
876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
{
	int ccode, ret;

	struct {
		struct chsc_header request;
		u32 : 24;
		u32 first_chpid : 8;
		u32 : 24;
		u32 last_chpid : 8;
		u32 zeroes1;
		struct chsc_header response;
		u32 zeroes2;
		u32 not_valid : 1;
		u32 shared : 1;
		u32 : 22;
		u32 chpid : 8;
		u32 cmcv : 5;
		u32 : 11;
		u32 cmgq : 8;
		u32 cmg : 8;
		u32 zeroes3;
		u32 data[NR_MEASUREMENT_CHARS];
898
	} __attribute__ ((packed)) *scmc_area;
899 900 901 902 903 904 905 906

	scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!scmc_area)
		return -ENOMEM;

	scmc_area->request.length = 0x0010;
	scmc_area->request.code = 0x0022;

907 908
	scmc_area->first_chpid = chp->chpid.id;
	scmc_area->last_chpid = chp->chpid.id;
909 910 911 912 913 914 915

	ccode = chsc(scmc_area);
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}

916 917 918
	ret = chsc_error_from_response(scmc_area->response.code);
	if (ret == 0) {
		/* Success. */
919 920 921 922 923 924 925 926 927 928
		if (!scmc_area->not_valid) {
			chp->cmg = scmc_area->cmg;
			chp->shared = scmc_area->shared;
			chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
						  (struct cmg_chars *)
						  &scmc_area->data);
		} else {
			chp->cmg = -1;
			chp->shared = -1;
		}
929 930
	} else {
		CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
931 932 933 934 935 936 937
			      scmc_area->response.code);
	}
out:
	free_page((unsigned long)scmc_area);
	return ret;
}

938
int __init chsc_alloc_sei_area(void)
L
Linus Torvalds 已提交
939 940 941
{
	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!sei_page)
C
Cornelia Huck 已提交
942 943
		CIO_MSG_EVENT(0, "Can't allocate page for processing of "
			      "chsc machine checks!\n");
L
Linus Torvalds 已提交
944 945 946
	return (sei_page ? 0 : -ENOMEM);
}

947 948 949 950 951
void __init chsc_free_sei_area(void)
{
	kfree(sei_page);
}

952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
int __init
chsc_enable_facility(int operation_code)
{
	int ret;
	struct {
		struct chsc_header request;
		u8 reserved1:4;
		u8 format:4;
		u8 reserved2;
		u16 operation_code;
		u32 reserved3;
		u32 reserved4;
		u32 operation_data_area[252];
		struct chsc_header response;
		u32 reserved5:4;
		u32 format2:4;
		u32 reserved6:24;
969
	} __attribute__ ((packed)) *sda_area;
970 971 972 973

	sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
	if (!sda_area)
		return -ENOMEM;
974 975
	sda_area->request.length = 0x0400;
	sda_area->request.code = 0x0031;
976 977 978 979 980 981 982
	sda_area->operation_code = operation_code;

	ret = chsc(sda_area);
	if (ret > 0) {
		ret = (ret == 3) ? -ENODEV : -EBUSY;
		goto out;
	}
983

984
	switch (sda_area->response.code) {
985
	case 0x0101:
986 987
		ret = -EOPNOTSUPP;
		break;
988 989
	default:
		ret = chsc_error_from_response(sda_area->response.code);
990
	}
991 992 993
	if (ret != 0)
		CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
			      operation_code, sda_area->response.code);
994 995 996 997 998
 out:
	free_page((unsigned long)sda_area);
	return ret;
}

L
Linus Torvalds 已提交
999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
struct css_general_char css_general_characteristics;
struct css_chsc_char css_chsc_characteristics;

int __init
chsc_determine_css_characteristics(void)
{
	int result;
	struct {
		struct chsc_header request;
		u32 reserved1;
		u32 reserved2;
		u32 reserved3;
		struct chsc_header response;
		u32 reserved4;
		u32 general_char[510];
		u32 chsc_char[518];
1015
	} __attribute__ ((packed)) *scsc_area;
L
Linus Torvalds 已提交
1016 1017

	scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1018
	if (!scsc_area)
L
Linus Torvalds 已提交
1019 1020
		return -ENOMEM;

1021 1022
	scsc_area->request.length = 0x0010;
	scsc_area->request.code = 0x0010;
L
Linus Torvalds 已提交
1023 1024 1025

	result = chsc(scsc_area);
	if (result) {
1026
		result = (result == 3) ? -ENODEV : -EBUSY;
L
Linus Torvalds 已提交
1027 1028 1029
		goto exit;
	}

1030 1031 1032 1033 1034 1035 1036 1037 1038
	result = chsc_error_from_response(scsc_area->response.code);
	if (result == 0) {
		memcpy(&css_general_characteristics, scsc_area->general_char,
		       sizeof(css_general_characteristics));
		memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
		       sizeof(css_chsc_characteristics));
	} else
		CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
			      scsc_area->response.code);
L
Linus Torvalds 已提交
1039 1040 1041 1042 1043 1044 1045
exit:
	free_page ((unsigned long) scsc_area);
	return result;
}

EXPORT_SYMBOL_GPL(css_general_characteristics);
EXPORT_SYMBOL_GPL(css_chsc_characteristics);