chsc.c 23.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 *  drivers/s390/cio/chsc.c
 *   S/390 common I/O routines -- channel subsystem call
 *
C
Cornelia Huck 已提交
5
 *    Copyright IBM Corp. 1999,2008
L
Linus Torvalds 已提交
6
 *    Author(s): Ingo Adlung (adlung@de.ibm.com)
7
 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
L
Linus Torvalds 已提交
8 9 10
 *		 Arnd Bergmann (arndb@de.ibm.com)
 */

11 12 13
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

L
Linus Torvalds 已提交
14 15 16 17 18 19
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>

#include <asm/cio.h>
20
#include <asm/chpid.h>
21
#include <asm/chsc.h>
22
#include <asm/crw.h>
L
Linus Torvalds 已提交
23 24 25 26 27

#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
28
#include "chp.h"
L
Linus Torvalds 已提交
29 30 31
#include "chsc.h"

static void *sei_page;
M
Michael Ernst 已提交
32
static DEFINE_SPINLOCK(siosl_lock);
33
static DEFINE_SPINLOCK(sda_lock);
L
Linus Torvalds 已提交
34

35 36 37 38 39 40 41
/**
 * chsc_error_from_response() - convert a chsc response to an error
 * @response: chsc response code
 *
 * Returns an appropriate Linux error code for @response.
 */
int chsc_error_from_response(int response)
42 43 44 45 46 47 48 49 50 51
{
	switch (response) {
	case 0x0001:
		return 0;
	case 0x0002:
	case 0x0003:
	case 0x0006:
	case 0x0007:
	case 0x0008:
	case 0x000a:
M
Michael Ernst 已提交
52
	case 0x0104:
53 54 55 56 57 58 59
		return -EINVAL;
	case 0x0004:
		return -EOPNOTSUPP;
	default:
		return -EIO;
	}
}
60
EXPORT_SYMBOL_GPL(chsc_error_from_response);
61

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
struct chsc_ssd_area {
	struct chsc_header request;
	u16 :10;
	u16 ssid:2;
	u16 :4;
	u16 f_sch;	  /* first subchannel */
	u16 :16;
	u16 l_sch;	  /* last subchannel */
	u32 :32;
	struct chsc_header response;
	u32 :32;
	u8 sch_valid : 1;
	u8 dev_valid : 1;
	u8 st	     : 3; /* subchannel type */
	u8 zeroes    : 3;
	u8  unit_addr;	  /* unit address */
	u16 devno;	  /* device number */
	u8 path_mask;
	u8 fla_valid_mask;
	u16 sch;	  /* subchannel */
	u8 chpid[8];	  /* chpids 0-7 */
	u16 fla[8];	  /* full link addresses 0-7 */
} __attribute__ ((packed));

int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
L
Linus Torvalds 已提交
87
{
88 89 90 91 92 93
	unsigned long page;
	struct chsc_ssd_area *ssd_area;
	int ccode;
	int ret;
	int i;
	int mask;
L
Linus Torvalds 已提交
94

95 96 97 98
	page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!page)
		return -ENOMEM;
	ssd_area = (struct chsc_ssd_area *) page;
99 100
	ssd_area->request.length = 0x0010;
	ssd_area->request.code = 0x0004;
101 102 103
	ssd_area->ssid = schid.ssid;
	ssd_area->f_sch = schid.sch_no;
	ssd_area->l_sch = schid.sch_no;
L
Linus Torvalds 已提交
104 105

	ccode = chsc(ssd_area);
106
	/* Check response. */
L
Linus Torvalds 已提交
107
	if (ccode > 0) {
108 109
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out_free;
L
Linus Torvalds 已提交
110
	}
111 112
	ret = chsc_error_from_response(ssd_area->response.code);
	if (ret != 0) {
113 114
		CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
			      schid.ssid, schid.sch_no,
L
Linus Torvalds 已提交
115
			      ssd_area->response.code);
116
		goto out_free;
L
Linus Torvalds 已提交
117
	}
118 119 120
	if (!ssd_area->sch_valid) {
		ret = -ENODEV;
		goto out_free;
L
Linus Torvalds 已提交
121
	}
122 123 124
	/* Copy data */
	ret = 0;
	memset(ssd, 0, sizeof(struct chsc_ssd_info));
125 126
	if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
	    (ssd_area->st != SUBCHANNEL_TYPE_MSG))
127 128 129 130 131 132 133 134
		goto out_free;
	ssd->path_mask = ssd_area->path_mask;
	ssd->fla_valid_mask = ssd_area->fla_valid_mask;
	for (i = 0; i < 8; i++) {
		mask = 0x80 >> i;
		if (ssd_area->path_mask & mask) {
			chp_id_init(&ssd->chpid[i]);
			ssd->chpid[i].id = ssd_area->chpid[i];
L
Linus Torvalds 已提交
135
		}
136 137
		if (ssd_area->fla_valid_mask & mask)
			ssd->fla[i] = ssd_area->fla[i];
L
Linus Torvalds 已提交
138
	}
139 140
out_free:
	free_page(page);
L
Linus Torvalds 已提交
141 142 143
	return ret;
}

144
static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
145
{
C
Cornelia Huck 已提交
146
	spin_lock_irq(sch->lock);
C
Cornelia Huck 已提交
147 148
	if (sch->driver && sch->driver->chp_event)
		if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
L
Linus Torvalds 已提交
149
			goto out_unreg;
C
Cornelia Huck 已提交
150
	spin_unlock_irq(sch->lock);
L
Linus Torvalds 已提交
151
	return 0;
152

L
Linus Torvalds 已提交
153 154
out_unreg:
	sch->lpm = 0;
155
	spin_unlock_irq(sch->lock);
156
	css_schedule_eval(sch->schid);
L
Linus Torvalds 已提交
157 158 159
	return 0;
}

160
void chsc_chp_offline(struct chp_id chpid)
L
Linus Torvalds 已提交
161 162
{
	char dbf_txt[15];
163
	struct chp_link link;
L
Linus Torvalds 已提交
164

165
	sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
L
Linus Torvalds 已提交
166 167
	CIO_TRACE_EVENT(2, dbf_txt);

168
	if (chp_get_status(chpid) <= 0)
L
Linus Torvalds 已提交
169
		return;
170 171
	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
172 173
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
174
	for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
L
Linus Torvalds 已提交
175 176
}

177
static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
178 179 180 181 182 183 184 185 186 187
{
	struct schib schib;
	/*
	 * We don't know the device yet, but since a path
	 * may be available now to the device we'll have
	 * to do recognition again.
	 * Since we don't have any idea about which chpid
	 * that beast may be on we'll have to do a stsch
	 * on all devices, grr...
	 */
188
	if (stsch_err(schid, &schib))
189
		/* We're through */
190
		return -ENXIO;
191 192

	/* Put it on the slow path. */
193
	css_schedule_eval(schid);
194 195 196
	return 0;
}

197
static int __s390_process_res_acc(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
198
{
C
Cornelia Huck 已提交
199
	spin_lock_irq(sch->lock);
C
Cornelia Huck 已提交
200 201
	if (sch->driver && sch->driver->chp_event)
		sch->driver->chp_event(sch, data, CHP_ONLINE);
C
Cornelia Huck 已提交
202
	spin_unlock_irq(sch->lock);
203

204
	return 0;
205 206
}

207
static void s390_process_res_acc(struct chp_link *link)
208
{
L
Linus Torvalds 已提交
209 210
	char dbf_txt[15];

211 212
	sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
		link->chpid.id);
L
Linus Torvalds 已提交
213
	CIO_TRACE_EVENT( 2, dbf_txt);
214 215
	if (link->fla != 0) {
		sprintf(dbf_txt, "fla%x", link->fla);
L
Linus Torvalds 已提交
216 217
		CIO_TRACE_EVENT( 2, dbf_txt);
	}
218 219
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
L
Linus Torvalds 已提交
220 221 222 223 224 225 226
	/*
	 * I/O resources may have become accessible.
	 * Scan through all subchannels that may be concerned and
	 * do a validation on those.
	 * The more information we have (info), the less scanning
	 * will we have to do.
	 */
227
	for_each_subchannel_staged(__s390_process_res_acc,
228
				   s390_process_res_acc_new_sch, link);
L
Linus Torvalds 已提交
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
}

static int
__get_chpid_from_lir(void *data)
{
	struct lir {
		u8  iq;
		u8  ic;
		u16 sci;
		/* incident-node descriptor */
		u32 indesc[28];
		/* attached-node descriptor */
		u32 andesc[28];
		/* incident-specific information */
		u32 isinfo[28];
244
	} __attribute__ ((packed)) *lir;
L
Linus Torvalds 已提交
245

246
	lir = data;
L
Linus Torvalds 已提交
247 248 249 250 251 252 253 254 255 256 257 258 259 260
	if (!(lir->iq&0x80))
		/* NULL link incident record */
		return -EINVAL;
	if (!(lir->indesc[0]&0xc0000000))
		/* node descriptor not valid */
		return -EINVAL;
	if (!(lir->indesc[0]&0x10000000))
		/* don't handle device-type nodes - FIXME */
		return -EINVAL;
	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */

	return (u16) (lir->indesc[0]&0x000000ff);
}

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
struct chsc_sei_area {
	struct chsc_header request;
	u32 reserved1;
	u32 reserved2;
	u32 reserved3;
	struct chsc_header response;
	u32 reserved4;
	u8  flags;
	u8  vf;		/* validity flags */
	u8  rs;		/* reporting source */
	u8  cc;		/* content code */
	u16 fla;	/* full link address */
	u16 rsid;	/* reporting source id */
	u32 reserved5;
	u32 reserved6;
	u8 ccdf[4096 - 16 - 24];	/* content-code dependent field */
	/* ccdf has to be big enough for a link-incident record */
} __attribute__ ((packed));

280
static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
281
{
282 283
	struct chp_id chpid;
	int id;
284 285 286 287

	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
		      sei_area->rs, sei_area->rsid);
	if (sei_area->rs != 4)
288
		return;
289 290
	id = __get_chpid_from_lir(sei_area->ccdf);
	if (id < 0)
291
		CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
292 293 294
	else {
		chp_id_init(&chpid);
		chpid.id = id;
295
		chsc_chp_offline(chpid);
296
	}
297 298
}

299
static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
L
Linus Torvalds 已提交
300
{
301
	struct chp_link link;
302
	struct chp_id chpid;
303 304 305 306 307
	int status;

	CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
		      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
	if (sei_area->rs != 4)
308
		return;
309 310
	chp_id_init(&chpid);
	chpid.id = sei_area->rsid;
311
	/* allocate a new channel path structure, if needed */
312
	status = chp_get_status(chpid);
313
	if (status < 0)
314
		chp_new(chpid);
315
	else if (!status)
316
		return;
317 318
	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
319
	if ((sei_area->vf & 0xc0) != 0) {
320
		link.fla = sei_area->fla;
321 322
		if ((sei_area->vf & 0xc0) == 0xc0)
			/* full link address */
323
			link.fla_mask = 0xffff;
324 325
		else
			/* link address */
326
			link.fla_mask = 0xff00;
327
	}
328
	s390_process_res_acc(&link);
329 330
}

331 332 333 334 335 336
struct chp_config_data {
	u8 map[32];
	u8 op;
	u8 pc;
};

337
static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
338 339 340 341
{
	struct chp_config_data *data;
	struct chp_id chpid;
	int num;
342
	char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
343 344 345

	CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
	if (sei_area->rs != 0)
346
		return;
347 348 349 350 351 352
	data = (struct chp_config_data *) &(sei_area->ccdf);
	chp_id_init(&chpid);
	for (num = 0; num <= __MAX_CHPID; num++) {
		if (!chp_test_bit(data->map, num))
			continue;
		chpid.id = num;
353 354
		pr_notice("Processing %s for channel path %x.%02x\n",
			  events[data->op], chpid.cssid, chpid.id);
355 356 357 358 359 360 361 362 363 364 365 366 367 368
		switch (data->op) {
		case 0:
			chp_cfg_schedule(chpid, 1);
			break;
		case 1:
			chp_cfg_schedule(chpid, 0);
			break;
		case 2:
			chp_cfg_cancel_deconfigure(chpid);
			break;
		}
	}
}

369
static void chsc_process_sei(struct chsc_sei_area *sei_area)
370 371
{
	/* Check if we might have lost some information. */
372
	if (sei_area->flags & 0x40) {
373
		CIO_CRW_EVENT(2, "chsc: event overflow\n");
374 375
		css_schedule_eval_all();
	}
376 377 378
	/* which kind of information was stored? */
	switch (sei_area->cc) {
	case 1: /* link incident*/
379
		chsc_process_sei_link_incident(sei_area);
380 381
		break;
	case 2: /* i/o resource accessibiliy */
382
		chsc_process_sei_res_acc(sei_area);
383
		break;
384
	case 8: /* channel-path-configuration notification */
385
		chsc_process_sei_chp_config(sei_area);
386
		break;
387 388 389 390 391 392 393
	default: /* other stuff */
		CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
			      sei_area->cc);
		break;
	}
}

394
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
395 396
{
	struct chsc_sei_area *sei_area;
L
Linus Torvalds 已提交
397

398 399 400 401 402 403 404 405
	if (overflow) {
		css_schedule_eval_all();
		return;
	}
	CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
		      crw0->erc, crw0->rsid);
L
Linus Torvalds 已提交
406
	if (!sei_page)
407
		return;
408 409
	/* Access to sei_page is serialized through machine check handler
	 * thread, so no need for locking. */
L
Linus Torvalds 已提交
410 411
	sei_area = sei_page;

412
	CIO_TRACE_EVENT(2, "prcss");
L
Linus Torvalds 已提交
413 414
	do {
		memset(sei_area, 0, sizeof(*sei_area));
415 416
		sei_area->request.length = 0x0010;
		sei_area->request.code = 0x000e;
417 418
		if (chsc(sei_area))
			break;
L
Linus Torvalds 已提交
419

420 421
		if (sei_area->response.code == 0x0001) {
			CIO_CRW_EVENT(4, "chsc: sei successful\n");
422
			chsc_process_sei(sei_area);
423 424
		} else {
			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
L
Linus Torvalds 已提交
425 426 427 428 429 430
				      sei_area->response.code);
			break;
		}
	} while (sei_area->flags & 0x80);
}

431
void chsc_chp_online(struct chp_id chpid)
432
{
L
Linus Torvalds 已提交
433
	char dbf_txt[15];
434
	struct chp_link link;
L
Linus Torvalds 已提交
435

436
	sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
L
Linus Torvalds 已提交
437 438
	CIO_TRACE_EVENT(2, dbf_txt);

439
	if (chp_get_status(chpid) != 0) {
440 441
		memset(&link, 0, sizeof(struct chp_link));
		link.chpid = chpid;
442 443
		/* Wait until previous actions have settled. */
		css_wait_for_slow_path();
C
Cornelia Huck 已提交
444
		for_each_subchannel_staged(__s390_process_res_acc, NULL,
445
					   &link);
446
	}
L
Linus Torvalds 已提交
447 448
}

449 450
static void __s390_subchannel_vary_chpid(struct subchannel *sch,
					 struct chp_id chpid, int on)
L
Linus Torvalds 已提交
451 452
{
	unsigned long flags;
453
	struct chp_link link;
L
Linus Torvalds 已提交
454

455 456
	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
C
Cornelia Huck 已提交
457
	spin_lock_irqsave(sch->lock, flags);
C
Cornelia Huck 已提交
458
	if (sch->driver && sch->driver->chp_event)
459
		sch->driver->chp_event(sch, &link,
C
Cornelia Huck 已提交
460
				       on ? CHP_VARY_ON : CHP_VARY_OFF);
C
Cornelia Huck 已提交
461
	spin_unlock_irqrestore(sch->lock, flags);
L
Linus Torvalds 已提交
462 463
}

464
static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
465
{
466
	struct chp_id *chpid = data;
L
Linus Torvalds 已提交
467 468 469 470 471

	__s390_subchannel_vary_chpid(sch, *chpid, 0);
	return 0;
}

472
static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
473
{
474
	struct chp_id *chpid = data;
L
Linus Torvalds 已提交
475 476 477 478 479

	__s390_subchannel_vary_chpid(sch, *chpid, 1);
	return 0;
}

480 481 482 483 484
static int
__s390_vary_chpid_on(struct subchannel_id schid, void *data)
{
	struct schib schib;

485
	if (stsch_err(schid, &schib))
486 487 488
		/* We're through */
		return -ENXIO;
	/* Put it on the slow path. */
489
	css_schedule_eval(schid);
490 491 492
	return 0;
}

493 494 495 496
/**
 * chsc_chp_vary - propagate channel-path vary operation to subchannels
 * @chpid: channl-path ID
 * @on: non-zero for vary online, zero for vary offline
L
Linus Torvalds 已提交
497
 */
498
int chsc_chp_vary(struct chp_id chpid, int on)
L
Linus Torvalds 已提交
499
{
500 501 502 503
	struct chp_link link;

	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
504 505
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
L
Linus Torvalds 已提交
506 507 508 509
	/*
	 * Redo PathVerification on the devices the chpid connects to
	 */

510
	if (on)
511
		for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
512
					   __s390_vary_chpid_on, &link);
513 514
	else
		for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
515
					   NULL, &link);
516

L
Linus Torvalds 已提交
517 518 519
	return 0;
}

520 521 522 523 524 525 526 527
static void
chsc_remove_cmg_attr(struct channel_subsystem *css)
{
	int i;

	for (i = 0; i <= __MAX_CHPID; i++) {
		if (!css->chps[i])
			continue;
528
		chp_remove_cmg_attr(css->chps[i]);
529 530 531 532 533 534 535 536 537 538 539 540
	}
}

static int
chsc_add_cmg_attr(struct channel_subsystem *css)
{
	int i, ret;

	ret = 0;
	for (i = 0; i <= __MAX_CHPID; i++) {
		if (!css->chps[i])
			continue;
541
		ret = chp_add_cmg_attr(css->chps[i]);
542 543 544 545 546 547 548 549
		if (ret)
			goto cleanup;
	}
	return ret;
cleanup:
	for (--i; i >= 0; i--) {
		if (!css->chps[i])
			continue;
550
		chp_remove_cmg_attr(css->chps[i]);
551 552 553 554
	}
	return ret;
}

555
int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
{
	struct {
		struct chsc_header request;
		u32 operation_code : 2;
		u32 : 30;
		u32 key : 4;
		u32 : 28;
		u32 zeroes1;
		u32 cub_addr1;
		u32 zeroes2;
		u32 cub_addr2;
		u32 reserved[13];
		struct chsc_header response;
		u32 status : 8;
		u32 : 4;
		u32 fmt : 4;
		u32 : 16;
573
	} __attribute__ ((packed)) *secm_area;
574 575 576 577 578 579
	int ret, ccode;

	secm_area = page;
	secm_area->request.length = 0x0050;
	secm_area->request.code = 0x0016;

580
	secm_area->key = PAGE_DEFAULT_KEY >> 4;
581 582 583 584 585 586 587 588 589 590
	secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
	secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;

	secm_area->operation_code = enable ? 0 : 1;

	ccode = chsc(secm_area);
	if (ccode > 0)
		return (ccode == 3) ? -ENODEV : -EBUSY;

	switch (secm_area->response.code) {
591 592
	case 0x0102:
	case 0x0103:
593
		ret = -EINVAL;
594
		break;
595
	default:
596
		ret = chsc_error_from_response(secm_area->response.code);
597
	}
598 599 600
	if (ret != 0)
		CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
			      secm_area->response.code);
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
	return ret;
}

int
chsc_secm(struct channel_subsystem *css, int enable)
{
	void  *secm_area;
	int ret;

	secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
	if (!secm_area)
		return -ENOMEM;

	if (enable && !css->cm_enabled) {
		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
		if (!css->cub_addr1 || !css->cub_addr2) {
			free_page((unsigned long)css->cub_addr1);
			free_page((unsigned long)css->cub_addr2);
			free_page((unsigned long)secm_area);
			return -ENOMEM;
		}
	}
	ret = __chsc_do_secm(css, enable, secm_area);
	if (!ret) {
		css->cm_enabled = enable;
		if (css->cm_enabled) {
			ret = chsc_add_cmg_attr(css);
			if (ret) {
				memset(secm_area, 0, PAGE_SIZE);
				__chsc_do_secm(css, 0, secm_area);
				css->cm_enabled = 0;
			}
		} else
			chsc_remove_cmg_attr(css);
	}
637
	if (!css->cm_enabled) {
638 639 640 641 642 643 644
		free_page((unsigned long)css->cub_addr1);
		free_page((unsigned long)css->cub_addr2);
	}
	free_page((unsigned long)secm_area);
	return ret;
}

645 646 647
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
				     int c, int m,
				     struct chsc_response_struct *resp)
L
Linus Torvalds 已提交
648 649 650 651 652
{
	int ccode, ret;

	struct {
		struct chsc_header request;
653 654 655 656 657 658 659
		u32 : 2;
		u32 m : 1;
		u32 c : 1;
		u32 fmt : 4;
		u32 cssid : 8;
		u32 : 4;
		u32 rfmt : 4;
L
Linus Torvalds 已提交
660 661 662 663 664
		u32 first_chpid : 8;
		u32 : 24;
		u32 last_chpid : 8;
		u32 zeroes1;
		struct chsc_header response;
665
		u8 data[PAGE_SIZE - 20];
666
	} __attribute__ ((packed)) *scpd_area;
L
Linus Torvalds 已提交
667

668 669 670 671
	if ((rfmt == 1) && !css_general_characteristics.fcs)
		return -EINVAL;
	if ((rfmt == 2) && !css_general_characteristics.cib)
		return -EINVAL;
L
Linus Torvalds 已提交
672 673 674 675
	scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!scpd_area)
		return -ENOMEM;

676 677
	scpd_area->request.length = 0x0010;
	scpd_area->request.code = 0x0002;
L
Linus Torvalds 已提交
678

679
	scpd_area->cssid = chpid.cssid;
680 681
	scpd_area->first_chpid = chpid.id;
	scpd_area->last_chpid = chpid.id;
682 683 684 685
	scpd_area->m = m;
	scpd_area->c = c;
	scpd_area->fmt = fmt;
	scpd_area->rfmt = rfmt;
L
Linus Torvalds 已提交
686 687 688 689 690 691 692

	ccode = chsc(scpd_area);
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}

693 694 695
	ret = chsc_error_from_response(scpd_area->response.code);
	if (ret == 0)
		/* Success. */
696
		memcpy(resp, &scpd_area->response, scpd_area->response.length);
697 698
	else
		CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
L
Linus Torvalds 已提交
699 700 701 702 703
			      scpd_area->response.code);
out:
	free_page((unsigned long)scpd_area);
	return ret;
}
704 705 706 707 708 709 710 711 712 713 714 715 716 717
EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);

int chsc_determine_base_channel_path_desc(struct chp_id chpid,
					  struct channel_path_desc *desc)
{
	struct chsc_response_struct *chsc_resp;
	int ret;

	chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL);
	if (!chsc_resp)
		return -ENOMEM;
	ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
	if (ret)
		goto out_free;
718
	memcpy(desc, &chsc_resp->data, sizeof(*desc));
719 720 721 722
out_free:
	kfree(chsc_resp);
	return ret;
}
L
Linus Torvalds 已提交
723

724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
			  struct cmg_chars *chars)
{
	switch (chp->cmg) {
	case 2:
	case 3:
		chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
					 GFP_KERNEL);
		if (chp->cmg_chars) {
			int i, mask;
			struct cmg_chars *cmg_chars;

			cmg_chars = chp->cmg_chars;
			for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
				mask = 0x80 >> (i + 3);
				if (cmcv & mask)
					cmg_chars->values[i] = chars->values[i];
				else
					cmg_chars->values[i] = 0;
			}
		}
		break;
	default:
		/* No cmg-dependent data. */
		break;
	}
}

753
int chsc_get_channel_measurement_chars(struct channel_path *chp)
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
{
	int ccode, ret;

	struct {
		struct chsc_header request;
		u32 : 24;
		u32 first_chpid : 8;
		u32 : 24;
		u32 last_chpid : 8;
		u32 zeroes1;
		struct chsc_header response;
		u32 zeroes2;
		u32 not_valid : 1;
		u32 shared : 1;
		u32 : 22;
		u32 chpid : 8;
		u32 cmcv : 5;
		u32 : 11;
		u32 cmgq : 8;
		u32 cmg : 8;
		u32 zeroes3;
		u32 data[NR_MEASUREMENT_CHARS];
776
	} __attribute__ ((packed)) *scmc_area;
777 778 779 780 781 782 783 784

	scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!scmc_area)
		return -ENOMEM;

	scmc_area->request.length = 0x0010;
	scmc_area->request.code = 0x0022;

785 786
	scmc_area->first_chpid = chp->chpid.id;
	scmc_area->last_chpid = chp->chpid.id;
787 788 789 790 791 792 793

	ccode = chsc(scmc_area);
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}

794 795 796
	ret = chsc_error_from_response(scmc_area->response.code);
	if (ret == 0) {
		/* Success. */
797 798 799 800 801 802 803 804 805 806
		if (!scmc_area->not_valid) {
			chp->cmg = scmc_area->cmg;
			chp->shared = scmc_area->shared;
			chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
						  (struct cmg_chars *)
						  &scmc_area->data);
		} else {
			chp->cmg = -1;
			chp->shared = -1;
		}
807 808
	} else {
		CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
809 810 811 812 813 814 815
			      scmc_area->response.code);
	}
out:
	free_page((unsigned long)scmc_area);
	return ret;
}

816
int __init chsc_alloc_sei_area(void)
L
Linus Torvalds 已提交
817
{
818 819
	int ret;

L
Linus Torvalds 已提交
820
	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
821
	if (!sei_page) {
C
Cornelia Huck 已提交
822 823
		CIO_MSG_EVENT(0, "Can't allocate page for processing of "
			      "chsc machine checks!\n");
824 825
		return -ENOMEM;
	}
826
	ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
827 828 829
	if (ret)
		kfree(sei_page);
	return ret;
L
Linus Torvalds 已提交
830 831
}

832 833
void __init chsc_free_sei_area(void)
{
834
	crw_unregister_handler(CRW_RSC_CSS);
835 836 837
	kfree(sei_page);
}

838
int chsc_enable_facility(int operation_code)
839 840
{
	int ret;
841
	static struct {
842 843 844 845 846 847 848 849 850 851 852 853
		struct chsc_header request;
		u8 reserved1:4;
		u8 format:4;
		u8 reserved2;
		u16 operation_code;
		u32 reserved3;
		u32 reserved4;
		u32 operation_data_area[252];
		struct chsc_header response;
		u32 reserved5:4;
		u32 format2:4;
		u32 reserved6:24;
854
	} __attribute__ ((packed, aligned(4096))) sda_area;
855

856 857 858 859 860
	spin_lock(&sda_lock);
	memset(&sda_area, 0, sizeof(sda_area));
	sda_area.request.length = 0x0400;
	sda_area.request.code = 0x0031;
	sda_area.operation_code = operation_code;
861

862
	ret = chsc(&sda_area);
863 864 865 866
	if (ret > 0) {
		ret = (ret == 3) ? -ENODEV : -EBUSY;
		goto out;
	}
867

868
	switch (sda_area.response.code) {
869
	case 0x0101:
870 871
		ret = -EOPNOTSUPP;
		break;
872
	default:
873
		ret = chsc_error_from_response(sda_area.response.code);
874
	}
875 876
	if (ret != 0)
		CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
877
			      operation_code, sda_area.response.code);
878
 out:
879
	spin_unlock(&sda_lock);
880 881 882
	return ret;
}

L
Linus Torvalds 已提交
883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
struct css_general_char css_general_characteristics;
struct css_chsc_char css_chsc_characteristics;

int __init
chsc_determine_css_characteristics(void)
{
	int result;
	struct {
		struct chsc_header request;
		u32 reserved1;
		u32 reserved2;
		u32 reserved3;
		struct chsc_header response;
		u32 reserved4;
		u32 general_char[510];
		u32 chsc_char[518];
899
	} __attribute__ ((packed)) *scsc_area;
L
Linus Torvalds 已提交
900 901

	scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
902
	if (!scsc_area)
L
Linus Torvalds 已提交
903 904
		return -ENOMEM;

905 906
	scsc_area->request.length = 0x0010;
	scsc_area->request.code = 0x0010;
L
Linus Torvalds 已提交
907 908 909

	result = chsc(scsc_area);
	if (result) {
910
		result = (result == 3) ? -ENODEV : -EBUSY;
L
Linus Torvalds 已提交
911 912 913
		goto exit;
	}

914 915 916 917 918 919 920 921 922
	result = chsc_error_from_response(scsc_area->response.code);
	if (result == 0) {
		memcpy(&css_general_characteristics, scsc_area->general_char,
		       sizeof(css_general_characteristics));
		memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
		       sizeof(css_chsc_characteristics));
	} else
		CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
			      scsc_area->response.code);
L
Linus Torvalds 已提交
923 924 925 926 927 928 929
exit:
	free_page ((unsigned long) scsc_area);
	return result;
}

EXPORT_SYMBOL_GPL(css_general_characteristics);
EXPORT_SYMBOL_GPL(css_chsc_characteristics);
M
Martin Schwidefsky 已提交
930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978

int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
{
	struct {
		struct chsc_header request;
		unsigned int rsvd0;
		unsigned int op : 8;
		unsigned int rsvd1 : 8;
		unsigned int ctrl : 16;
		unsigned int rsvd2[5];
		struct chsc_header response;
		unsigned int rsvd3[7];
	} __attribute__ ((packed)) *rr;
	int rc;

	memset(page, 0, PAGE_SIZE);
	rr = page;
	rr->request.length = 0x0020;
	rr->request.code = 0x0033;
	rr->op = op;
	rr->ctrl = ctrl;
	rc = chsc(rr);
	if (rc)
		return -EIO;
	rc = (rr->response.code == 0x0001) ? 0 : -EIO;
	return rc;
}

int chsc_sstpi(void *page, void *result, size_t size)
{
	struct {
		struct chsc_header request;
		unsigned int rsvd0[3];
		struct chsc_header response;
		char data[size];
	} __attribute__ ((packed)) *rr;
	int rc;

	memset(page, 0, PAGE_SIZE);
	rr = page;
	rr->request.length = 0x0010;
	rr->request.code = 0x0038;
	rc = chsc(rr);
	if (rc)
		return -EIO;
	memcpy(result, &rr->data, size);
	return (rr->response.code == 0x0001) ? 0 : -EIO;
}

M
Michael Ernst 已提交
979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
static struct {
	struct chsc_header request;
	u32 word1;
	struct subchannel_id sid;
	u32 word3;
	struct chsc_header response;
	u32 word[11];
} __attribute__ ((packed)) siosl_area __attribute__ ((__aligned__(PAGE_SIZE)));

int chsc_siosl(struct subchannel_id schid)
{
	unsigned long flags;
	int ccode;
	int rc;

	spin_lock_irqsave(&siosl_lock, flags);
	memset(&siosl_area, 0, sizeof(siosl_area));
	siosl_area.request.length = 0x0010;
	siosl_area.request.code = 0x0046;
	siosl_area.word1 = 0x80000000;
	siosl_area.sid = schid;

	ccode = chsc(&siosl_area);
	if (ccode > 0) {
		if (ccode == 3)
			rc = -ENODEV;
		else
			rc = -EBUSY;
		CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
			      schid.ssid, schid.sch_no, ccode);
		goto out;
	}
	rc = chsc_error_from_response(siosl_area.response.code);
	if (rc)
		CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
			      schid.ssid, schid.sch_no,
			      siosl_area.response.code);
	else
		CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
			      schid.ssid, schid.sch_no);
out:
	spin_unlock_irqrestore(&siosl_lock, flags);

	return rc;
}
EXPORT_SYMBOL_GPL(chsc_siosl);