chsc.c 20.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 *  drivers/s390/cio/chsc.c
 *   S/390 common I/O routines -- channel subsystem call
 *
C
Cornelia Huck 已提交
5
 *    Copyright IBM Corp. 1999,2008
L
Linus Torvalds 已提交
6
 *    Author(s): Ingo Adlung (adlung@de.ibm.com)
7
 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16
 *		 Arnd Bergmann (arndb@de.ibm.com)
 */

#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>

#include <asm/cio.h>
17
#include <asm/chpid.h>
L
Linus Torvalds 已提交
18

19
#include "../s390mach.h"
L
Linus Torvalds 已提交
20 21 22 23
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
24
#include "chp.h"
L
Linus Torvalds 已提交
25 26 27 28
#include "chsc.h"

static void *sei_page;

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
static int chsc_error_from_response(int response)
{
	switch (response) {
	case 0x0001:
		return 0;
	case 0x0002:
	case 0x0003:
	case 0x0006:
	case 0x0007:
	case 0x0008:
	case 0x000a:
		return -EINVAL;
	case 0x0004:
		return -EOPNOTSUPP;
	default:
		return -EIO;
	}
}

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
struct chsc_ssd_area {
	struct chsc_header request;
	u16 :10;
	u16 ssid:2;
	u16 :4;
	u16 f_sch;	  /* first subchannel */
	u16 :16;
	u16 l_sch;	  /* last subchannel */
	u32 :32;
	struct chsc_header response;
	u32 :32;
	u8 sch_valid : 1;
	u8 dev_valid : 1;
	u8 st	     : 3; /* subchannel type */
	u8 zeroes    : 3;
	u8  unit_addr;	  /* unit address */
	u16 devno;	  /* device number */
	u8 path_mask;
	u8 fla_valid_mask;
	u16 sch;	  /* subchannel */
	u8 chpid[8];	  /* chpids 0-7 */
	u16 fla[8];	  /* full link addresses 0-7 */
} __attribute__ ((packed));

int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
L
Linus Torvalds 已提交
73
{
74 75 76 77 78 79
	unsigned long page;
	struct chsc_ssd_area *ssd_area;
	int ccode;
	int ret;
	int i;
	int mask;
L
Linus Torvalds 已提交
80

81 82 83 84
	page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!page)
		return -ENOMEM;
	ssd_area = (struct chsc_ssd_area *) page;
85 86
	ssd_area->request.length = 0x0010;
	ssd_area->request.code = 0x0004;
87 88 89
	ssd_area->ssid = schid.ssid;
	ssd_area->f_sch = schid.sch_no;
	ssd_area->l_sch = schid.sch_no;
L
Linus Torvalds 已提交
90 91

	ccode = chsc(ssd_area);
92
	/* Check response. */
L
Linus Torvalds 已提交
93
	if (ccode > 0) {
94 95
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out_free;
L
Linus Torvalds 已提交
96
	}
97 98
	ret = chsc_error_from_response(ssd_area->response.code);
	if (ret != 0) {
99 100
		CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
			      schid.ssid, schid.sch_no,
L
Linus Torvalds 已提交
101
			      ssd_area->response.code);
102
		goto out_free;
L
Linus Torvalds 已提交
103
	}
104 105 106
	if (!ssd_area->sch_valid) {
		ret = -ENODEV;
		goto out_free;
L
Linus Torvalds 已提交
107
	}
108 109 110
	/* Copy data */
	ret = 0;
	memset(ssd, 0, sizeof(struct chsc_ssd_info));
111 112
	if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
	    (ssd_area->st != SUBCHANNEL_TYPE_MSG))
113 114 115 116 117 118 119 120
		goto out_free;
	ssd->path_mask = ssd_area->path_mask;
	ssd->fla_valid_mask = ssd_area->fla_valid_mask;
	for (i = 0; i < 8; i++) {
		mask = 0x80 >> i;
		if (ssd_area->path_mask & mask) {
			chp_id_init(&ssd->chpid[i]);
			ssd->chpid[i].id = ssd_area->chpid[i];
L
Linus Torvalds 已提交
121
		}
122 123
		if (ssd_area->fla_valid_mask & mask)
			ssd->fla[i] = ssd_area->fla[i];
L
Linus Torvalds 已提交
124
	}
125 126
out_free:
	free_page(page);
L
Linus Torvalds 已提交
127 128 129
	return ret;
}

130
static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
131
{
C
Cornelia Huck 已提交
132
	spin_lock_irq(sch->lock);
C
Cornelia Huck 已提交
133 134
	if (sch->driver && sch->driver->chp_event)
		if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
L
Linus Torvalds 已提交
135
			goto out_unreg;
C
Cornelia Huck 已提交
136
	spin_unlock_irq(sch->lock);
L
Linus Torvalds 已提交
137
	return 0;
138

L
Linus Torvalds 已提交
139 140
out_unreg:
	sch->lpm = 0;
141
	spin_unlock_irq(sch->lock);
142
	css_schedule_eval(sch->schid);
L
Linus Torvalds 已提交
143 144 145
	return 0;
}

146
void chsc_chp_offline(struct chp_id chpid)
L
Linus Torvalds 已提交
147 148 149
{
	char dbf_txt[15];

150
	sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
L
Linus Torvalds 已提交
151 152
	CIO_TRACE_EVENT(2, dbf_txt);

153
	if (chp_get_status(chpid) <= 0)
L
Linus Torvalds 已提交
154
		return;
155 156
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
157
	for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
L
Linus Torvalds 已提交
158 159
}

160
static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
161 162 163 164 165 166 167 168 169 170
{
	struct schib schib;
	/*
	 * We don't know the device yet, but since a path
	 * may be available now to the device we'll have
	 * to do recognition again.
	 * Since we don't have any idea about which chpid
	 * that beast may be on we'll have to do a stsch
	 * on all devices, grr...
	 */
171
	if (stsch_err(schid, &schib))
172
		/* We're through */
173
		return -ENXIO;
174 175

	/* Put it on the slow path. */
176
	css_schedule_eval(schid);
177 178 179
	return 0;
}

180
static int __s390_process_res_acc(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
181
{
C
Cornelia Huck 已提交
182
	spin_lock_irq(sch->lock);
C
Cornelia Huck 已提交
183 184
	if (sch->driver && sch->driver->chp_event)
		sch->driver->chp_event(sch, data, CHP_ONLINE);
C
Cornelia Huck 已提交
185
	spin_unlock_irq(sch->lock);
186

187
	return 0;
188 189
}

190
static void s390_process_res_acc (struct res_acc_data *res_data)
191
{
L
Linus Torvalds 已提交
192 193
	char dbf_txt[15];

194 195
	sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
		res_data->chpid.id);
L
Linus Torvalds 已提交
196
	CIO_TRACE_EVENT( 2, dbf_txt);
197 198
	if (res_data->fla != 0) {
		sprintf(dbf_txt, "fla%x", res_data->fla);
L
Linus Torvalds 已提交
199 200
		CIO_TRACE_EVENT( 2, dbf_txt);
	}
201 202
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
L
Linus Torvalds 已提交
203 204 205 206 207 208 209
	/*
	 * I/O resources may have become accessible.
	 * Scan through all subchannels that may be concerned and
	 * do a validation on those.
	 * The more information we have (info), the less scanning
	 * will we have to do.
	 */
210 211
	for_each_subchannel_staged(__s390_process_res_acc,
				   s390_process_res_acc_new_sch, res_data);
L
Linus Torvalds 已提交
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
}

static int
__get_chpid_from_lir(void *data)
{
	struct lir {
		u8  iq;
		u8  ic;
		u16 sci;
		/* incident-node descriptor */
		u32 indesc[28];
		/* attached-node descriptor */
		u32 andesc[28];
		/* incident-specific information */
		u32 isinfo[28];
227
	} __attribute__ ((packed)) *lir;
L
Linus Torvalds 已提交
228

229
	lir = data;
L
Linus Torvalds 已提交
230 231 232 233 234 235 236 237 238 239 240 241 242 243
	if (!(lir->iq&0x80))
		/* NULL link incident record */
		return -EINVAL;
	if (!(lir->indesc[0]&0xc0000000))
		/* node descriptor not valid */
		return -EINVAL;
	if (!(lir->indesc[0]&0x10000000))
		/* don't handle device-type nodes - FIXME */
		return -EINVAL;
	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */

	return (u16) (lir->indesc[0]&0x000000ff);
}

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
struct chsc_sei_area {
	struct chsc_header request;
	u32 reserved1;
	u32 reserved2;
	u32 reserved3;
	struct chsc_header response;
	u32 reserved4;
	u8  flags;
	u8  vf;		/* validity flags */
	u8  rs;		/* reporting source */
	u8  cc;		/* content code */
	u16 fla;	/* full link address */
	u16 rsid;	/* reporting source id */
	u32 reserved5;
	u32 reserved6;
	u8 ccdf[4096 - 16 - 24];	/* content-code dependent field */
	/* ccdf has to be big enough for a link-incident record */
} __attribute__ ((packed));

263
static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
264
{
265 266
	struct chp_id chpid;
	int id;
267 268 269 270

	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
		      sei_area->rs, sei_area->rsid);
	if (sei_area->rs != 4)
271
		return;
272 273
	id = __get_chpid_from_lir(sei_area->ccdf);
	if (id < 0)
274
		CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
275 276 277
	else {
		chp_id_init(&chpid);
		chpid.id = id;
278
		chsc_chp_offline(chpid);
279
	}
280 281
}

282
static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
L
Linus Torvalds 已提交
283
{
284
	struct res_acc_data res_data;
285
	struct chp_id chpid;
286 287 288 289 290
	int status;

	CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
		      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
	if (sei_area->rs != 4)
291
		return;
292 293
	chp_id_init(&chpid);
	chpid.id = sei_area->rsid;
294
	/* allocate a new channel path structure, if needed */
295
	status = chp_get_status(chpid);
296
	if (status < 0)
297
		chp_new(chpid);
298
	else if (!status)
299
		return;
300
	memset(&res_data, 0, sizeof(struct res_acc_data));
301
	res_data.chpid = chpid;
302 303 304 305 306 307 308 309 310
	if ((sei_area->vf & 0xc0) != 0) {
		res_data.fla = sei_area->fla;
		if ((sei_area->vf & 0xc0) == 0xc0)
			/* full link address */
			res_data.fla_mask = 0xffff;
		else
			/* link address */
			res_data.fla_mask = 0xff00;
	}
311
	s390_process_res_acc(&res_data);
312 313
}

314 315 316 317 318 319
struct chp_config_data {
	u8 map[32];
	u8 op;
	u8 pc;
};

320
static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
321 322 323 324 325 326 327
{
	struct chp_config_data *data;
	struct chp_id chpid;
	int num;

	CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
	if (sei_area->rs != 0)
328
		return;
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
	data = (struct chp_config_data *) &(sei_area->ccdf);
	chp_id_init(&chpid);
	for (num = 0; num <= __MAX_CHPID; num++) {
		if (!chp_test_bit(data->map, num))
			continue;
		chpid.id = num;
		printk(KERN_WARNING "cio: processing configure event %d for "
		       "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
		switch (data->op) {
		case 0:
			chp_cfg_schedule(chpid, 1);
			break;
		case 1:
			chp_cfg_schedule(chpid, 0);
			break;
		case 2:
			chp_cfg_cancel_deconfigure(chpid);
			break;
		}
	}
}

351
static void chsc_process_sei(struct chsc_sei_area *sei_area)
352 353
{
	/* Check if we might have lost some information. */
354
	if (sei_area->flags & 0x40) {
355
		CIO_CRW_EVENT(2, "chsc: event overflow\n");
356 357
		css_schedule_eval_all();
	}
358 359 360
	/* which kind of information was stored? */
	switch (sei_area->cc) {
	case 1: /* link incident*/
361
		chsc_process_sei_link_incident(sei_area);
362 363
		break;
	case 2: /* i/o resource accessibiliy */
364
		chsc_process_sei_res_acc(sei_area);
365
		break;
366
	case 8: /* channel-path-configuration notification */
367
		chsc_process_sei_chp_config(sei_area);
368
		break;
369 370 371 372 373 374 375
	default: /* other stuff */
		CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
			      sei_area->cc);
		break;
	}
}

376
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
377 378
{
	struct chsc_sei_area *sei_area;
L
Linus Torvalds 已提交
379

380 381 382 383 384 385 386 387
	if (overflow) {
		css_schedule_eval_all();
		return;
	}
	CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
		      crw0->erc, crw0->rsid);
L
Linus Torvalds 已提交
388
	if (!sei_page)
389
		return;
390 391
	/* Access to sei_page is serialized through machine check handler
	 * thread, so no need for locking. */
L
Linus Torvalds 已提交
392 393
	sei_area = sei_page;

394
	CIO_TRACE_EVENT(2, "prcss");
L
Linus Torvalds 已提交
395 396
	do {
		memset(sei_area, 0, sizeof(*sei_area));
397 398
		sei_area->request.length = 0x0010;
		sei_area->request.code = 0x000e;
399 400
		if (chsc(sei_area))
			break;
L
Linus Torvalds 已提交
401

402 403
		if (sei_area->response.code == 0x0001) {
			CIO_CRW_EVENT(4, "chsc: sei successful\n");
404
			chsc_process_sei(sei_area);
405 406
		} else {
			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
L
Linus Torvalds 已提交
407 408 409 410 411 412
				      sei_area->response.code);
			break;
		}
	} while (sei_area->flags & 0x80);
}

413
void chsc_chp_online(struct chp_id chpid)
414
{
L
Linus Torvalds 已提交
415
	char dbf_txt[15];
C
Cornelia Huck 已提交
416
	struct res_acc_data res_data;
L
Linus Torvalds 已提交
417

418
	sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
L
Linus Torvalds 已提交
419 420
	CIO_TRACE_EVENT(2, dbf_txt);

421
	if (chp_get_status(chpid) != 0) {
C
Cornelia Huck 已提交
422 423
		memset(&res_data, 0, sizeof(struct res_acc_data));
		res_data.chpid = chpid;
424 425
		/* Wait until previous actions have settled. */
		css_wait_for_slow_path();
C
Cornelia Huck 已提交
426 427
		for_each_subchannel_staged(__s390_process_res_acc, NULL,
					   &res_data);
428
	}
L
Linus Torvalds 已提交
429 430
}

431 432
static void __s390_subchannel_vary_chpid(struct subchannel *sch,
					 struct chp_id chpid, int on)
L
Linus Torvalds 已提交
433 434
{
	unsigned long flags;
C
Cornelia Huck 已提交
435
	struct res_acc_data res_data;
L
Linus Torvalds 已提交
436

C
Cornelia Huck 已提交
437 438
	memset(&res_data, 0, sizeof(struct res_acc_data));
	res_data.chpid = chpid;
C
Cornelia Huck 已提交
439
	spin_lock_irqsave(sch->lock, flags);
C
Cornelia Huck 已提交
440 441 442
	if (sch->driver && sch->driver->chp_event)
		sch->driver->chp_event(sch, &res_data,
				       on ? CHP_VARY_ON : CHP_VARY_OFF);
C
Cornelia Huck 已提交
443
	spin_unlock_irqrestore(sch->lock, flags);
L
Linus Torvalds 已提交
444 445
}

446
static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
447
{
448
	struct chp_id *chpid = data;
L
Linus Torvalds 已提交
449 450 451 452 453

	__s390_subchannel_vary_chpid(sch, *chpid, 0);
	return 0;
}

454
static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
455
{
456
	struct chp_id *chpid = data;
L
Linus Torvalds 已提交
457 458 459 460 461

	__s390_subchannel_vary_chpid(sch, *chpid, 1);
	return 0;
}

462 463 464 465 466
static int
__s390_vary_chpid_on(struct subchannel_id schid, void *data)
{
	struct schib schib;

467
	if (stsch_err(schid, &schib))
468 469 470
		/* We're through */
		return -ENXIO;
	/* Put it on the slow path. */
471
	css_schedule_eval(schid);
472 473 474
	return 0;
}

475 476 477 478
/**
 * chsc_chp_vary - propagate channel-path vary operation to subchannels
 * @chpid: channl-path ID
 * @on: non-zero for vary online, zero for vary offline
L
Linus Torvalds 已提交
479
 */
480
int chsc_chp_vary(struct chp_id chpid, int on)
L
Linus Torvalds 已提交
481
{
482 483
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
L
Linus Torvalds 已提交
484 485 486 487
	/*
	 * Redo PathVerification on the devices the chpid connects to
	 */

488
	if (on)
489 490 491 492 493 494
		for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
					   __s390_vary_chpid_on, &chpid);
	else
		for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
					   NULL, &chpid);

L
Linus Torvalds 已提交
495 496 497
	return 0;
}

498 499 500 501 502 503 504 505
static void
chsc_remove_cmg_attr(struct channel_subsystem *css)
{
	int i;

	for (i = 0; i <= __MAX_CHPID; i++) {
		if (!css->chps[i])
			continue;
506
		chp_remove_cmg_attr(css->chps[i]);
507 508 509 510 511 512 513 514 515 516 517 518
	}
}

static int
chsc_add_cmg_attr(struct channel_subsystem *css)
{
	int i, ret;

	ret = 0;
	for (i = 0; i <= __MAX_CHPID; i++) {
		if (!css->chps[i])
			continue;
519
		ret = chp_add_cmg_attr(css->chps[i]);
520 521 522 523 524 525 526 527
		if (ret)
			goto cleanup;
	}
	return ret;
cleanup:
	for (--i; i >= 0; i--) {
		if (!css->chps[i])
			continue;
528
		chp_remove_cmg_attr(css->chps[i]);
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
	}
	return ret;
}

static int
__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
{
	struct {
		struct chsc_header request;
		u32 operation_code : 2;
		u32 : 30;
		u32 key : 4;
		u32 : 28;
		u32 zeroes1;
		u32 cub_addr1;
		u32 zeroes2;
		u32 cub_addr2;
		u32 reserved[13];
		struct chsc_header response;
		u32 status : 8;
		u32 : 4;
		u32 fmt : 4;
		u32 : 16;
552
	} __attribute__ ((packed)) *secm_area;
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
	int ret, ccode;

	secm_area = page;
	secm_area->request.length = 0x0050;
	secm_area->request.code = 0x0016;

	secm_area->key = PAGE_DEFAULT_KEY;
	secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
	secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;

	secm_area->operation_code = enable ? 0 : 1;

	ccode = chsc(secm_area);
	if (ccode > 0)
		return (ccode == 3) ? -ENODEV : -EBUSY;

	switch (secm_area->response.code) {
570 571
	case 0x0102:
	case 0x0103:
572 573
		ret = -EINVAL;
	default:
574
		ret = chsc_error_from_response(secm_area->response.code);
575
	}
576 577 578
	if (ret != 0)
		CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
			      secm_area->response.code);
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
	return ret;
}

int
chsc_secm(struct channel_subsystem *css, int enable)
{
	void  *secm_area;
	int ret;

	secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
	if (!secm_area)
		return -ENOMEM;

	if (enable && !css->cm_enabled) {
		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
		if (!css->cub_addr1 || !css->cub_addr2) {
			free_page((unsigned long)css->cub_addr1);
			free_page((unsigned long)css->cub_addr2);
			free_page((unsigned long)secm_area);
			return -ENOMEM;
		}
	}
	ret = __chsc_do_secm(css, enable, secm_area);
	if (!ret) {
		css->cm_enabled = enable;
		if (css->cm_enabled) {
			ret = chsc_add_cmg_attr(css);
			if (ret) {
				memset(secm_area, 0, PAGE_SIZE);
				__chsc_do_secm(css, 0, secm_area);
				css->cm_enabled = 0;
			}
		} else
			chsc_remove_cmg_attr(css);
	}
615
	if (!css->cm_enabled) {
616 617 618 619 620 621 622
		free_page((unsigned long)css->cub_addr1);
		free_page((unsigned long)css->cub_addr2);
	}
	free_page((unsigned long)secm_area);
	return ret;
}

623 624
int chsc_determine_channel_path_description(struct chp_id chpid,
					    struct channel_path_desc *desc)
L
Linus Torvalds 已提交
625 626 627 628 629 630 631 632 633 634 635 636 637
{
	int ccode, ret;

	struct {
		struct chsc_header request;
		u32 : 24;
		u32 first_chpid : 8;
		u32 : 24;
		u32 last_chpid : 8;
		u32 zeroes1;
		struct chsc_header response;
		u32 zeroes2;
		struct channel_path_desc desc;
638
	} __attribute__ ((packed)) *scpd_area;
L
Linus Torvalds 已提交
639 640 641 642 643

	scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!scpd_area)
		return -ENOMEM;

644 645
	scpd_area->request.length = 0x0010;
	scpd_area->request.code = 0x0002;
L
Linus Torvalds 已提交
646

647 648
	scpd_area->first_chpid = chpid.id;
	scpd_area->last_chpid = chpid.id;
L
Linus Torvalds 已提交
649 650 651 652 653 654 655

	ccode = chsc(scpd_area);
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}

656 657 658
	ret = chsc_error_from_response(scpd_area->response.code);
	if (ret == 0)
		/* Success. */
L
Linus Torvalds 已提交
659 660
		memcpy(desc, &scpd_area->desc,
		       sizeof(struct channel_path_desc));
661 662
	else
		CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
L
Linus Torvalds 已提交
663 664 665 666 667 668
			      scpd_area->response.code);
out:
	free_page((unsigned long)scpd_area);
	return ret;
}

669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
			  struct cmg_chars *chars)
{
	switch (chp->cmg) {
	case 2:
	case 3:
		chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
					 GFP_KERNEL);
		if (chp->cmg_chars) {
			int i, mask;
			struct cmg_chars *cmg_chars;

			cmg_chars = chp->cmg_chars;
			for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
				mask = 0x80 >> (i + 3);
				if (cmcv & mask)
					cmg_chars->values[i] = chars->values[i];
				else
					cmg_chars->values[i] = 0;
			}
		}
		break;
	default:
		/* No cmg-dependent data. */
		break;
	}
}

698
int chsc_get_channel_measurement_chars(struct channel_path *chp)
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
{
	int ccode, ret;

	struct {
		struct chsc_header request;
		u32 : 24;
		u32 first_chpid : 8;
		u32 : 24;
		u32 last_chpid : 8;
		u32 zeroes1;
		struct chsc_header response;
		u32 zeroes2;
		u32 not_valid : 1;
		u32 shared : 1;
		u32 : 22;
		u32 chpid : 8;
		u32 cmcv : 5;
		u32 : 11;
		u32 cmgq : 8;
		u32 cmg : 8;
		u32 zeroes3;
		u32 data[NR_MEASUREMENT_CHARS];
721
	} __attribute__ ((packed)) *scmc_area;
722 723 724 725 726 727 728 729

	scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!scmc_area)
		return -ENOMEM;

	scmc_area->request.length = 0x0010;
	scmc_area->request.code = 0x0022;

730 731
	scmc_area->first_chpid = chp->chpid.id;
	scmc_area->last_chpid = chp->chpid.id;
732 733 734 735 736 737 738

	ccode = chsc(scmc_area);
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}

739 740 741
	ret = chsc_error_from_response(scmc_area->response.code);
	if (ret == 0) {
		/* Success. */
742 743 744 745 746 747 748 749 750 751
		if (!scmc_area->not_valid) {
			chp->cmg = scmc_area->cmg;
			chp->shared = scmc_area->shared;
			chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
						  (struct cmg_chars *)
						  &scmc_area->data);
		} else {
			chp->cmg = -1;
			chp->shared = -1;
		}
752 753
	} else {
		CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
754 755 756 757 758 759 760
			      scmc_area->response.code);
	}
out:
	free_page((unsigned long)scmc_area);
	return ret;
}

761
int __init chsc_alloc_sei_area(void)
L
Linus Torvalds 已提交
762
{
763 764
	int ret;

L
Linus Torvalds 已提交
765
	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
766
	if (!sei_page) {
C
Cornelia Huck 已提交
767 768
		CIO_MSG_EVENT(0, "Can't allocate page for processing of "
			      "chsc machine checks!\n");
769 770 771 772 773 774
		return -ENOMEM;
	}
	ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw);
	if (ret)
		kfree(sei_page);
	return ret;
L
Linus Torvalds 已提交
775 776
}

777 778
void __init chsc_free_sei_area(void)
{
779
	s390_unregister_crw_handler(CRW_RSC_CSS);
780 781 782
	kfree(sei_page);
}

783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
int __init
chsc_enable_facility(int operation_code)
{
	int ret;
	struct {
		struct chsc_header request;
		u8 reserved1:4;
		u8 format:4;
		u8 reserved2;
		u16 operation_code;
		u32 reserved3;
		u32 reserved4;
		u32 operation_data_area[252];
		struct chsc_header response;
		u32 reserved5:4;
		u32 format2:4;
		u32 reserved6:24;
800
	} __attribute__ ((packed)) *sda_area;
801 802 803 804

	sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
	if (!sda_area)
		return -ENOMEM;
805 806
	sda_area->request.length = 0x0400;
	sda_area->request.code = 0x0031;
807 808 809 810 811 812 813
	sda_area->operation_code = operation_code;

	ret = chsc(sda_area);
	if (ret > 0) {
		ret = (ret == 3) ? -ENODEV : -EBUSY;
		goto out;
	}
814

815
	switch (sda_area->response.code) {
816
	case 0x0101:
817 818
		ret = -EOPNOTSUPP;
		break;
819 820
	default:
		ret = chsc_error_from_response(sda_area->response.code);
821
	}
822 823 824
	if (ret != 0)
		CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
			      operation_code, sda_area->response.code);
825 826 827 828 829
 out:
	free_page((unsigned long)sda_area);
	return ret;
}

L
Linus Torvalds 已提交
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
struct css_general_char css_general_characteristics;
struct css_chsc_char css_chsc_characteristics;

int __init
chsc_determine_css_characteristics(void)
{
	int result;
	struct {
		struct chsc_header request;
		u32 reserved1;
		u32 reserved2;
		u32 reserved3;
		struct chsc_header response;
		u32 reserved4;
		u32 general_char[510];
		u32 chsc_char[518];
846
	} __attribute__ ((packed)) *scsc_area;
L
Linus Torvalds 已提交
847 848

	scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
849
	if (!scsc_area)
L
Linus Torvalds 已提交
850 851
		return -ENOMEM;

852 853
	scsc_area->request.length = 0x0010;
	scsc_area->request.code = 0x0010;
L
Linus Torvalds 已提交
854 855 856

	result = chsc(scsc_area);
	if (result) {
857
		result = (result == 3) ? -ENODEV : -EBUSY;
L
Linus Torvalds 已提交
858 859 860
		goto exit;
	}

861 862 863 864 865 866 867 868 869
	result = chsc_error_from_response(scsc_area->response.code);
	if (result == 0) {
		memcpy(&css_general_characteristics, scsc_area->general_char,
		       sizeof(css_general_characteristics));
		memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
		       sizeof(css_chsc_characteristics));
	} else
		CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
			      scsc_area->response.code);
L
Linus Torvalds 已提交
870 871 872 873 874 875 876
exit:
	free_page ((unsigned long) scsc_area);
	return result;
}

EXPORT_SYMBOL_GPL(css_general_characteristics);
EXPORT_SYMBOL_GPL(css_chsc_characteristics);
M
Martin Schwidefsky 已提交
877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925

int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
{
	struct {
		struct chsc_header request;
		unsigned int rsvd0;
		unsigned int op : 8;
		unsigned int rsvd1 : 8;
		unsigned int ctrl : 16;
		unsigned int rsvd2[5];
		struct chsc_header response;
		unsigned int rsvd3[7];
	} __attribute__ ((packed)) *rr;
	int rc;

	memset(page, 0, PAGE_SIZE);
	rr = page;
	rr->request.length = 0x0020;
	rr->request.code = 0x0033;
	rr->op = op;
	rr->ctrl = ctrl;
	rc = chsc(rr);
	if (rc)
		return -EIO;
	rc = (rr->response.code == 0x0001) ? 0 : -EIO;
	return rc;
}

int chsc_sstpi(void *page, void *result, size_t size)
{
	struct {
		struct chsc_header request;
		unsigned int rsvd0[3];
		struct chsc_header response;
		char data[size];
	} __attribute__ ((packed)) *rr;
	int rc;

	memset(page, 0, PAGE_SIZE);
	rr = page;
	rr->request.length = 0x0010;
	rr->request.code = 0x0038;
	rc = chsc(rr);
	if (rc)
		return -EIO;
	memcpy(result, &rr->data, size);
	return (rr->response.code == 0x0001) ? 0 : -EIO;
}