chsc.c 24.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 *  drivers/s390/cio/chsc.c
 *   S/390 common I/O routines -- channel subsystem call
 *
S
Sebastian Ott 已提交
5
 *    Copyright IBM Corp. 1999,2010
L
Linus Torvalds 已提交
6
 *    Author(s): Ingo Adlung (adlung@de.ibm.com)
7
 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
L
Linus Torvalds 已提交
8 9 10
 *		 Arnd Bergmann (arndb@de.ibm.com)
 */

11 12 13
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

L
Linus Torvalds 已提交
14 15 16 17 18 19
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>

#include <asm/cio.h>
20
#include <asm/chpid.h>
21
#include <asm/chsc.h>
22
#include <asm/crw.h>
L
Linus Torvalds 已提交
23 24 25 26 27

#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
28
#include "chp.h"
L
Linus Torvalds 已提交
29 30 31
#include "chsc.h"

static void *sei_page;
32 33
static void *chsc_page;
static DEFINE_SPINLOCK(chsc_page_lock);
L
Linus Torvalds 已提交
34

35 36 37 38 39 40 41
/**
 * chsc_error_from_response() - convert a chsc response to an error
 * @response: chsc response code
 *
 * Returns an appropriate Linux error code for @response.
 */
int chsc_error_from_response(int response)
42 43 44 45 46 47 48 49 50 51
{
	switch (response) {
	case 0x0001:
		return 0;
	case 0x0002:
	case 0x0003:
	case 0x0006:
	case 0x0007:
	case 0x0008:
	case 0x000a:
M
Michael Ernst 已提交
52
	case 0x0104:
53 54 55 56 57 58 59
		return -EINVAL;
	case 0x0004:
		return -EOPNOTSUPP;
	default:
		return -EIO;
	}
}
60
EXPORT_SYMBOL_GPL(chsc_error_from_response);
61

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
struct chsc_ssd_area {
	struct chsc_header request;
	u16 :10;
	u16 ssid:2;
	u16 :4;
	u16 f_sch;	  /* first subchannel */
	u16 :16;
	u16 l_sch;	  /* last subchannel */
	u32 :32;
	struct chsc_header response;
	u32 :32;
	u8 sch_valid : 1;
	u8 dev_valid : 1;
	u8 st	     : 3; /* subchannel type */
	u8 zeroes    : 3;
	u8  unit_addr;	  /* unit address */
	u16 devno;	  /* device number */
	u8 path_mask;
	u8 fla_valid_mask;
	u16 sch;	  /* subchannel */
	u8 chpid[8];	  /* chpids 0-7 */
	u16 fla[8];	  /* full link addresses 0-7 */
} __attribute__ ((packed));

int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
L
Linus Torvalds 已提交
87
{
88 89 90 91 92
	struct chsc_ssd_area *ssd_area;
	int ccode;
	int ret;
	int i;
	int mask;
L
Linus Torvalds 已提交
93

94 95 96
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	ssd_area = chsc_page;
97 98
	ssd_area->request.length = 0x0010;
	ssd_area->request.code = 0x0004;
99 100 101
	ssd_area->ssid = schid.ssid;
	ssd_area->f_sch = schid.sch_no;
	ssd_area->l_sch = schid.sch_no;
L
Linus Torvalds 已提交
102 103

	ccode = chsc(ssd_area);
104
	/* Check response. */
L
Linus Torvalds 已提交
105
	if (ccode > 0) {
106
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
107
		goto out;
L
Linus Torvalds 已提交
108
	}
109 110
	ret = chsc_error_from_response(ssd_area->response.code);
	if (ret != 0) {
111 112
		CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
			      schid.ssid, schid.sch_no,
L
Linus Torvalds 已提交
113
			      ssd_area->response.code);
114
		goto out;
L
Linus Torvalds 已提交
115
	}
116 117
	if (!ssd_area->sch_valid) {
		ret = -ENODEV;
118
		goto out;
L
Linus Torvalds 已提交
119
	}
120 121 122
	/* Copy data */
	ret = 0;
	memset(ssd, 0, sizeof(struct chsc_ssd_info));
123 124
	if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
	    (ssd_area->st != SUBCHANNEL_TYPE_MSG))
125
		goto out;
126 127 128 129 130 131 132
	ssd->path_mask = ssd_area->path_mask;
	ssd->fla_valid_mask = ssd_area->fla_valid_mask;
	for (i = 0; i < 8; i++) {
		mask = 0x80 >> i;
		if (ssd_area->path_mask & mask) {
			chp_id_init(&ssd->chpid[i]);
			ssd->chpid[i].id = ssd_area->chpid[i];
L
Linus Torvalds 已提交
133
		}
134 135
		if (ssd_area->fla_valid_mask & mask)
			ssd->fla[i] = ssd_area->fla[i];
L
Linus Torvalds 已提交
136
	}
137 138
out:
	spin_unlock_irq(&chsc_page_lock);
L
Linus Torvalds 已提交
139 140 141
	return ret;
}

142
static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
143
{
C
Cornelia Huck 已提交
144
	spin_lock_irq(sch->lock);
C
Cornelia Huck 已提交
145 146
	if (sch->driver && sch->driver->chp_event)
		if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
L
Linus Torvalds 已提交
147
			goto out_unreg;
C
Cornelia Huck 已提交
148
	spin_unlock_irq(sch->lock);
L
Linus Torvalds 已提交
149
	return 0;
150

L
Linus Torvalds 已提交
151 152
out_unreg:
	sch->lpm = 0;
153
	spin_unlock_irq(sch->lock);
154
	css_schedule_eval(sch->schid);
L
Linus Torvalds 已提交
155 156 157
	return 0;
}

158
void chsc_chp_offline(struct chp_id chpid)
L
Linus Torvalds 已提交
159 160
{
	char dbf_txt[15];
161
	struct chp_link link;
L
Linus Torvalds 已提交
162

163
	sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
L
Linus Torvalds 已提交
164 165
	CIO_TRACE_EVENT(2, dbf_txt);

166
	if (chp_get_status(chpid) <= 0)
L
Linus Torvalds 已提交
167
		return;
168 169
	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
170 171
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
172
	for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
L
Linus Torvalds 已提交
173 174
}

175
static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
176 177 178 179 180 181 182 183 184 185
{
	struct schib schib;
	/*
	 * We don't know the device yet, but since a path
	 * may be available now to the device we'll have
	 * to do recognition again.
	 * Since we don't have any idea about which chpid
	 * that beast may be on we'll have to do a stsch
	 * on all devices, grr...
	 */
186
	if (stsch_err(schid, &schib))
187
		/* We're through */
188
		return -ENXIO;
189 190

	/* Put it on the slow path. */
191
	css_schedule_eval(schid);
192 193 194
	return 0;
}

195
static int __s390_process_res_acc(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
196
{
C
Cornelia Huck 已提交
197
	spin_lock_irq(sch->lock);
C
Cornelia Huck 已提交
198 199
	if (sch->driver && sch->driver->chp_event)
		sch->driver->chp_event(sch, data, CHP_ONLINE);
C
Cornelia Huck 已提交
200
	spin_unlock_irq(sch->lock);
201

202
	return 0;
203 204
}

205
static void s390_process_res_acc(struct chp_link *link)
206
{
L
Linus Torvalds 已提交
207 208
	char dbf_txt[15];

209 210
	sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
		link->chpid.id);
L
Linus Torvalds 已提交
211
	CIO_TRACE_EVENT( 2, dbf_txt);
212 213
	if (link->fla != 0) {
		sprintf(dbf_txt, "fla%x", link->fla);
L
Linus Torvalds 已提交
214 215
		CIO_TRACE_EVENT( 2, dbf_txt);
	}
216 217
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
L
Linus Torvalds 已提交
218 219 220 221 222 223 224
	/*
	 * I/O resources may have become accessible.
	 * Scan through all subchannels that may be concerned and
	 * do a validation on those.
	 * The more information we have (info), the less scanning
	 * will we have to do.
	 */
225
	for_each_subchannel_staged(__s390_process_res_acc,
226
				   s390_process_res_acc_new_sch, link);
L
Linus Torvalds 已提交
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
}

static int
__get_chpid_from_lir(void *data)
{
	struct lir {
		u8  iq;
		u8  ic;
		u16 sci;
		/* incident-node descriptor */
		u32 indesc[28];
		/* attached-node descriptor */
		u32 andesc[28];
		/* incident-specific information */
		u32 isinfo[28];
242
	} __attribute__ ((packed)) *lir;
L
Linus Torvalds 已提交
243

244
	lir = data;
L
Linus Torvalds 已提交
245 246 247 248 249 250 251 252 253 254 255 256 257 258
	if (!(lir->iq&0x80))
		/* NULL link incident record */
		return -EINVAL;
	if (!(lir->indesc[0]&0xc0000000))
		/* node descriptor not valid */
		return -EINVAL;
	if (!(lir->indesc[0]&0x10000000))
		/* don't handle device-type nodes - FIXME */
		return -EINVAL;
	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */

	return (u16) (lir->indesc[0]&0x000000ff);
}

259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
struct chsc_sei_area {
	struct chsc_header request;
	u32 reserved1;
	u32 reserved2;
	u32 reserved3;
	struct chsc_header response;
	u32 reserved4;
	u8  flags;
	u8  vf;		/* validity flags */
	u8  rs;		/* reporting source */
	u8  cc;		/* content code */
	u16 fla;	/* full link address */
	u16 rsid;	/* reporting source id */
	u32 reserved5;
	u32 reserved6;
	u8 ccdf[4096 - 16 - 24];	/* content-code dependent field */
	/* ccdf has to be big enough for a link-incident record */
} __attribute__ ((packed));

278
static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
279
{
280 281
	struct chp_id chpid;
	int id;
282 283 284 285

	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
		      sei_area->rs, sei_area->rsid);
	if (sei_area->rs != 4)
286
		return;
287 288
	id = __get_chpid_from_lir(sei_area->ccdf);
	if (id < 0)
289
		CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
290 291 292
	else {
		chp_id_init(&chpid);
		chpid.id = id;
293
		chsc_chp_offline(chpid);
294
	}
295 296
}

297
static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
L
Linus Torvalds 已提交
298
{
299
	struct chp_link link;
300
	struct chp_id chpid;
301 302 303 304 305
	int status;

	CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
		      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
	if (sei_area->rs != 4)
306
		return;
307 308
	chp_id_init(&chpid);
	chpid.id = sei_area->rsid;
309
	/* allocate a new channel path structure, if needed */
310
	status = chp_get_status(chpid);
311
	if (status < 0)
312
		chp_new(chpid);
313
	else if (!status)
314
		return;
315 316
	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
317
	if ((sei_area->vf & 0xc0) != 0) {
318
		link.fla = sei_area->fla;
319 320
		if ((sei_area->vf & 0xc0) == 0xc0)
			/* full link address */
321
			link.fla_mask = 0xffff;
322 323
		else
			/* link address */
324
			link.fla_mask = 0xff00;
325
	}
326
	s390_process_res_acc(&link);
327 328
}

329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
static void chsc_process_sei_chp_avail(struct chsc_sei_area *sei_area)
{
	struct channel_path *chp;
	struct chp_id chpid;
	u8 *data;
	int num;

	CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
	if (sei_area->rs != 0)
		return;
	data = sei_area->ccdf;
	chp_id_init(&chpid);
	for (num = 0; num <= __MAX_CHPID; num++) {
		if (!chp_test_bit(data, num))
			continue;
		chpid.id = num;

		CIO_CRW_EVENT(4, "Update information for channel path "
			      "%x.%02x\n", chpid.cssid, chpid.id);
		chp = chpid_to_chp(chpid);
		if (!chp) {
			chp_new(chpid);
			continue;
		}
		mutex_lock(&chp->lock);
		chsc_determine_base_channel_path_desc(chpid, &chp->desc);
		mutex_unlock(&chp->lock);
	}
}

359 360 361 362 363 364
struct chp_config_data {
	u8 map[32];
	u8 op;
	u8 pc;
};

365
static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
366 367 368 369
{
	struct chp_config_data *data;
	struct chp_id chpid;
	int num;
370
	char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
371 372 373

	CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
	if (sei_area->rs != 0)
374
		return;
375 376 377 378 379 380
	data = (struct chp_config_data *) &(sei_area->ccdf);
	chp_id_init(&chpid);
	for (num = 0; num <= __MAX_CHPID; num++) {
		if (!chp_test_bit(data->map, num))
			continue;
		chpid.id = num;
381 382
		pr_notice("Processing %s for channel path %x.%02x\n",
			  events[data->op], chpid.cssid, chpid.id);
383 384 385 386 387 388 389 390 391 392 393 394 395 396
		switch (data->op) {
		case 0:
			chp_cfg_schedule(chpid, 1);
			break;
		case 1:
			chp_cfg_schedule(chpid, 0);
			break;
		case 2:
			chp_cfg_cancel_deconfigure(chpid);
			break;
		}
	}
}

397
static void chsc_process_sei(struct chsc_sei_area *sei_area)
398 399
{
	/* Check if we might have lost some information. */
400
	if (sei_area->flags & 0x40) {
401
		CIO_CRW_EVENT(2, "chsc: event overflow\n");
402 403
		css_schedule_eval_all();
	}
404 405 406
	/* which kind of information was stored? */
	switch (sei_area->cc) {
	case 1: /* link incident*/
407
		chsc_process_sei_link_incident(sei_area);
408
		break;
409
	case 2: /* i/o resource accessibility */
410
		chsc_process_sei_res_acc(sei_area);
411
		break;
412 413 414
	case 7: /* channel-path-availability information */
		chsc_process_sei_chp_avail(sei_area);
		break;
415
	case 8: /* channel-path-configuration notification */
416
		chsc_process_sei_chp_config(sei_area);
417
		break;
418 419 420 421 422 423 424
	default: /* other stuff */
		CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
			      sei_area->cc);
		break;
	}
}

425
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
426 427
{
	struct chsc_sei_area *sei_area;
L
Linus Torvalds 已提交
428

429 430 431 432 433 434 435 436
	if (overflow) {
		css_schedule_eval_all();
		return;
	}
	CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
		      crw0->erc, crw0->rsid);
L
Linus Torvalds 已提交
437
	if (!sei_page)
438
		return;
439 440
	/* Access to sei_page is serialized through machine check handler
	 * thread, so no need for locking. */
L
Linus Torvalds 已提交
441 442
	sei_area = sei_page;

443
	CIO_TRACE_EVENT(2, "prcss");
L
Linus Torvalds 已提交
444 445
	do {
		memset(sei_area, 0, sizeof(*sei_area));
446 447
		sei_area->request.length = 0x0010;
		sei_area->request.code = 0x000e;
448 449
		if (chsc(sei_area))
			break;
L
Linus Torvalds 已提交
450

451 452
		if (sei_area->response.code == 0x0001) {
			CIO_CRW_EVENT(4, "chsc: sei successful\n");
453
			chsc_process_sei(sei_area);
454 455
		} else {
			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
L
Linus Torvalds 已提交
456 457 458 459 460 461
				      sei_area->response.code);
			break;
		}
	} while (sei_area->flags & 0x80);
}

462
void chsc_chp_online(struct chp_id chpid)
463
{
L
Linus Torvalds 已提交
464
	char dbf_txt[15];
465
	struct chp_link link;
L
Linus Torvalds 已提交
466

467
	sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
L
Linus Torvalds 已提交
468 469
	CIO_TRACE_EVENT(2, dbf_txt);

470
	if (chp_get_status(chpid) != 0) {
471 472
		memset(&link, 0, sizeof(struct chp_link));
		link.chpid = chpid;
473 474
		/* Wait until previous actions have settled. */
		css_wait_for_slow_path();
C
Cornelia Huck 已提交
475
		for_each_subchannel_staged(__s390_process_res_acc, NULL,
476
					   &link);
477
	}
L
Linus Torvalds 已提交
478 479
}

480 481
static void __s390_subchannel_vary_chpid(struct subchannel *sch,
					 struct chp_id chpid, int on)
L
Linus Torvalds 已提交
482 483
{
	unsigned long flags;
484
	struct chp_link link;
L
Linus Torvalds 已提交
485

486 487
	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
C
Cornelia Huck 已提交
488
	spin_lock_irqsave(sch->lock, flags);
C
Cornelia Huck 已提交
489
	if (sch->driver && sch->driver->chp_event)
490
		sch->driver->chp_event(sch, &link,
C
Cornelia Huck 已提交
491
				       on ? CHP_VARY_ON : CHP_VARY_OFF);
C
Cornelia Huck 已提交
492
	spin_unlock_irqrestore(sch->lock, flags);
L
Linus Torvalds 已提交
493 494
}

495
static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
496
{
497
	struct chp_id *chpid = data;
L
Linus Torvalds 已提交
498 499 500 501 502

	__s390_subchannel_vary_chpid(sch, *chpid, 0);
	return 0;
}

503
static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
504
{
505
	struct chp_id *chpid = data;
L
Linus Torvalds 已提交
506 507 508 509 510

	__s390_subchannel_vary_chpid(sch, *chpid, 1);
	return 0;
}

511 512 513 514 515
static int
__s390_vary_chpid_on(struct subchannel_id schid, void *data)
{
	struct schib schib;

516
	if (stsch_err(schid, &schib))
517 518 519
		/* We're through */
		return -ENXIO;
	/* Put it on the slow path. */
520
	css_schedule_eval(schid);
521 522 523
	return 0;
}

524 525 526 527
/**
 * chsc_chp_vary - propagate channel-path vary operation to subchannels
 * @chpid: channl-path ID
 * @on: non-zero for vary online, zero for vary offline
L
Linus Torvalds 已提交
528
 */
529
int chsc_chp_vary(struct chp_id chpid, int on)
L
Linus Torvalds 已提交
530
{
531
	struct channel_path *chp = chpid_to_chp(chpid);
532 533 534 535
	struct chp_link link;

	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
536 537
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
L
Linus Torvalds 已提交
538 539 540
	/*
	 * Redo PathVerification on the devices the chpid connects to
	 */
541 542 543
	if (on) {
		/* Try to update the channel path descritor. */
		chsc_determine_base_channel_path_desc(chpid, &chp->desc);
544
		for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
545
					   __s390_vary_chpid_on, &link);
546
	} else
547
		for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
548
					   NULL, &link);
549

L
Linus Torvalds 已提交
550 551 552
	return 0;
}

553 554 555 556 557 558 559 560
static void
chsc_remove_cmg_attr(struct channel_subsystem *css)
{
	int i;

	for (i = 0; i <= __MAX_CHPID; i++) {
		if (!css->chps[i])
			continue;
561
		chp_remove_cmg_attr(css->chps[i]);
562 563 564 565 566 567 568 569 570 571 572 573
	}
}

static int
chsc_add_cmg_attr(struct channel_subsystem *css)
{
	int i, ret;

	ret = 0;
	for (i = 0; i <= __MAX_CHPID; i++) {
		if (!css->chps[i])
			continue;
574
		ret = chp_add_cmg_attr(css->chps[i]);
575 576 577 578 579 580 581 582
		if (ret)
			goto cleanup;
	}
	return ret;
cleanup:
	for (--i; i >= 0; i--) {
		if (!css->chps[i])
			continue;
583
		chp_remove_cmg_attr(css->chps[i]);
584 585 586 587
	}
	return ret;
}

588
int __chsc_do_secm(struct channel_subsystem *css, int enable)
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
{
	struct {
		struct chsc_header request;
		u32 operation_code : 2;
		u32 : 30;
		u32 key : 4;
		u32 : 28;
		u32 zeroes1;
		u32 cub_addr1;
		u32 zeroes2;
		u32 cub_addr2;
		u32 reserved[13];
		struct chsc_header response;
		u32 status : 8;
		u32 : 4;
		u32 fmt : 4;
		u32 : 16;
606
	} __attribute__ ((packed)) *secm_area;
607 608
	int ret, ccode;

609 610 611
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	secm_area = chsc_page;
612 613 614
	secm_area->request.length = 0x0050;
	secm_area->request.code = 0x0016;

615
	secm_area->key = PAGE_DEFAULT_KEY >> 4;
616 617 618 619 620 621
	secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
	secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;

	secm_area->operation_code = enable ? 0 : 1;

	ccode = chsc(secm_area);
622 623 624 625
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}
626 627

	switch (secm_area->response.code) {
628 629
	case 0x0102:
	case 0x0103:
630
		ret = -EINVAL;
631
		break;
632
	default:
633
		ret = chsc_error_from_response(secm_area->response.code);
634
	}
635 636 637
	if (ret != 0)
		CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
			      secm_area->response.code);
638 639
out:
	spin_unlock_irq(&chsc_page_lock);
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
	return ret;
}

int
chsc_secm(struct channel_subsystem *css, int enable)
{
	int ret;

	if (enable && !css->cm_enabled) {
		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
		if (!css->cub_addr1 || !css->cub_addr2) {
			free_page((unsigned long)css->cub_addr1);
			free_page((unsigned long)css->cub_addr2);
			return -ENOMEM;
		}
	}
657
	ret = __chsc_do_secm(css, enable);
658 659 660 661 662
	if (!ret) {
		css->cm_enabled = enable;
		if (css->cm_enabled) {
			ret = chsc_add_cmg_attr(css);
			if (ret) {
663
				__chsc_do_secm(css, 0);
664 665 666 667 668
				css->cm_enabled = 0;
			}
		} else
			chsc_remove_cmg_attr(css);
	}
669
	if (!css->cm_enabled) {
670 671 672 673 674 675
		free_page((unsigned long)css->cub_addr1);
		free_page((unsigned long)css->cub_addr2);
	}
	return ret;
}

676
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
677
				     int c, int m, void *page)
L
Linus Torvalds 已提交
678
{
679
	struct chsc_scpd *scpd_area;
L
Linus Torvalds 已提交
680 681
	int ccode, ret;

682 683 684 685
	if ((rfmt == 1) && !css_general_characteristics.fcs)
		return -EINVAL;
	if ((rfmt == 2) && !css_general_characteristics.cib)
		return -EINVAL;
L
Linus Torvalds 已提交
686

687 688
	memset(page, 0, PAGE_SIZE);
	scpd_area = page;
689 690
	scpd_area->request.length = 0x0010;
	scpd_area->request.code = 0x0002;
691
	scpd_area->cssid = chpid.cssid;
692 693
	scpd_area->first_chpid = chpid.id;
	scpd_area->last_chpid = chpid.id;
694 695 696 697
	scpd_area->m = m;
	scpd_area->c = c;
	scpd_area->fmt = fmt;
	scpd_area->rfmt = rfmt;
L
Linus Torvalds 已提交
698 699

	ccode = chsc(scpd_area);
700 701
	if (ccode > 0)
		return (ccode == 3) ? -ENODEV : -EBUSY;
L
Linus Torvalds 已提交
702

703
	ret = chsc_error_from_response(scpd_area->response.code);
704
	if (ret)
705
		CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
L
Linus Torvalds 已提交
706 707 708
			      scpd_area->response.code);
	return ret;
}
709 710 711 712 713 714
EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);

int chsc_determine_base_channel_path_desc(struct chp_id chpid,
					  struct channel_path_desc *desc)
{
	struct chsc_response_struct *chsc_resp;
715
	struct chsc_scpd *scpd_area;
716
	unsigned long flags;
717 718
	int ret;

719
	spin_lock_irqsave(&chsc_page_lock, flags);
720 721
	scpd_area = chsc_page;
	ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
722
	if (ret)
723 724
		goto out;
	chsc_resp = (void *)&scpd_area->response;
725
	memcpy(desc, &chsc_resp->data, sizeof(*desc));
726
out:
727
	spin_unlock_irqrestore(&chsc_page_lock, flags);
728 729
	return ret;
}
L
Linus Torvalds 已提交
730

731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
					  struct channel_path_desc_fmt1 *desc)
{
	struct chsc_response_struct *chsc_resp;
	struct chsc_scpd *scpd_area;
	int ret;

	spin_lock_irq(&chsc_page_lock);
	scpd_area = chsc_page;
	ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
	if (ret)
		goto out;
	chsc_resp = (void *)&scpd_area->response;
	memcpy(desc, &chsc_resp->data, sizeof(*desc));
out:
	spin_unlock_irq(&chsc_page_lock);
	return ret;
}

750 751 752 753
static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
			  struct cmg_chars *chars)
{
754 755 756 757 758 759 760 761 762 763
	struct cmg_chars *cmg_chars;
	int i, mask;

	cmg_chars = chp->cmg_chars;
	for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
		mask = 0x80 >> (i + 3);
		if (cmcv & mask)
			cmg_chars->values[i] = chars->values[i];
		else
			cmg_chars->values[i] = 0;
764 765 766
	}
}

767
int chsc_get_channel_measurement_chars(struct channel_path *chp)
768
{
769
	struct cmg_chars *cmg_chars;
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
	int ccode, ret;

	struct {
		struct chsc_header request;
		u32 : 24;
		u32 first_chpid : 8;
		u32 : 24;
		u32 last_chpid : 8;
		u32 zeroes1;
		struct chsc_header response;
		u32 zeroes2;
		u32 not_valid : 1;
		u32 shared : 1;
		u32 : 22;
		u32 chpid : 8;
		u32 cmcv : 5;
		u32 : 11;
		u32 cmgq : 8;
		u32 cmg : 8;
		u32 zeroes3;
		u32 data[NR_MEASUREMENT_CHARS];
791
	} __attribute__ ((packed)) *scmc_area;
792

793 794 795
	chp->cmg_chars = NULL;
	cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
	if (!cmg_chars)
796 797
		return -ENOMEM;

798 799 800
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	scmc_area = chsc_page;
801 802
	scmc_area->request.length = 0x0010;
	scmc_area->request.code = 0x0022;
803 804
	scmc_area->first_chpid = chp->chpid.id;
	scmc_area->last_chpid = chp->chpid.id;
805 806 807 808 809 810 811

	ccode = chsc(scmc_area);
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}

812
	ret = chsc_error_from_response(scmc_area->response.code);
813
	if (ret) {
814
		CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
815
			      scmc_area->response.code);
816 817 818 819 820 821 822 823 824 825 826 827
		goto out;
	}
	if (scmc_area->not_valid) {
		chp->cmg = -1;
		chp->shared = -1;
		goto out;
	}
	chp->cmg = scmc_area->cmg;
	chp->shared = scmc_area->shared;
	if (chp->cmg != 2 && chp->cmg != 3) {
		/* No cmg-dependent data. */
		goto out;
828
	}
829 830 831
	chp->cmg_chars = cmg_chars;
	chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
				  (struct cmg_chars *) &scmc_area->data);
832
out:
833 834 835 836
	spin_unlock_irq(&chsc_page_lock);
	if (!chp->cmg_chars)
		kfree(cmg_chars);

837 838 839
	return ret;
}

S
Sebastian Ott 已提交
840
int __init chsc_init(void)
L
Linus Torvalds 已提交
841
{
842 843
	int ret;

L
Linus Torvalds 已提交
844
	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
845 846 847 848
	chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!sei_page || !chsc_page) {
		ret = -ENOMEM;
		goto out_err;
849
	}
850
	ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
851
	if (ret)
852 853 854 855 856
		goto out_err;
	return ret;
out_err:
	free_page((unsigned long)chsc_page);
	free_page((unsigned long)sei_page);
857
	return ret;
L
Linus Torvalds 已提交
858 859
}

S
Sebastian Ott 已提交
860
void __init chsc_init_cleanup(void)
861
{
862
	crw_unregister_handler(CRW_RSC_CSS);
863
	free_page((unsigned long)chsc_page);
S
Sebastian Ott 已提交
864
	free_page((unsigned long)sei_page);
865 866
}

867
int chsc_enable_facility(int operation_code)
868
{
869
	unsigned long flags;
870
	int ret;
871
	struct {
872 873 874 875 876 877 878 879 880 881 882 883
		struct chsc_header request;
		u8 reserved1:4;
		u8 format:4;
		u8 reserved2;
		u16 operation_code;
		u32 reserved3;
		u32 reserved4;
		u32 operation_data_area[252];
		struct chsc_header response;
		u32 reserved5:4;
		u32 format2:4;
		u32 reserved6:24;
884
	} __attribute__ ((packed)) *sda_area;
885

886 887 888 889 890 891
	spin_lock_irqsave(&chsc_page_lock, flags);
	memset(chsc_page, 0, PAGE_SIZE);
	sda_area = chsc_page;
	sda_area->request.length = 0x0400;
	sda_area->request.code = 0x0031;
	sda_area->operation_code = operation_code;
892

893
	ret = chsc(sda_area);
894 895 896 897
	if (ret > 0) {
		ret = (ret == 3) ? -ENODEV : -EBUSY;
		goto out;
	}
898

899
	switch (sda_area->response.code) {
900
	case 0x0101:
901 902
		ret = -EOPNOTSUPP;
		break;
903
	default:
904
		ret = chsc_error_from_response(sda_area->response.code);
905
	}
906 907
	if (ret != 0)
		CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
908 909 910
			      operation_code, sda_area->response.code);
out:
	spin_unlock_irqrestore(&chsc_page_lock, flags);
911 912 913
	return ret;
}

L
Linus Torvalds 已提交
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
struct css_general_char css_general_characteristics;
struct css_chsc_char css_chsc_characteristics;

int __init
chsc_determine_css_characteristics(void)
{
	int result;
	struct {
		struct chsc_header request;
		u32 reserved1;
		u32 reserved2;
		u32 reserved3;
		struct chsc_header response;
		u32 reserved4;
		u32 general_char[510];
S
Sebastian Ott 已提交
929
		u32 chsc_char[508];
930
	} __attribute__ ((packed)) *scsc_area;
L
Linus Torvalds 已提交
931

932 933 934
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	scsc_area = chsc_page;
935 936
	scsc_area->request.length = 0x0010;
	scsc_area->request.code = 0x0010;
L
Linus Torvalds 已提交
937 938 939

	result = chsc(scsc_area);
	if (result) {
940
		result = (result == 3) ? -ENODEV : -EBUSY;
L
Linus Torvalds 已提交
941 942 943
		goto exit;
	}

944 945 946 947 948 949 950 951 952
	result = chsc_error_from_response(scsc_area->response.code);
	if (result == 0) {
		memcpy(&css_general_characteristics, scsc_area->general_char,
		       sizeof(css_general_characteristics));
		memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
		       sizeof(css_chsc_characteristics));
	} else
		CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
			      scsc_area->response.code);
L
Linus Torvalds 已提交
953
exit:
954
	spin_unlock_irq(&chsc_page_lock);
L
Linus Torvalds 已提交
955 956 957 958 959
	return result;
}

EXPORT_SYMBOL_GPL(css_general_characteristics);
EXPORT_SYMBOL_GPL(css_chsc_characteristics);
M
Martin Schwidefsky 已提交
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008

int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
{
	struct {
		struct chsc_header request;
		unsigned int rsvd0;
		unsigned int op : 8;
		unsigned int rsvd1 : 8;
		unsigned int ctrl : 16;
		unsigned int rsvd2[5];
		struct chsc_header response;
		unsigned int rsvd3[7];
	} __attribute__ ((packed)) *rr;
	int rc;

	memset(page, 0, PAGE_SIZE);
	rr = page;
	rr->request.length = 0x0020;
	rr->request.code = 0x0033;
	rr->op = op;
	rr->ctrl = ctrl;
	rc = chsc(rr);
	if (rc)
		return -EIO;
	rc = (rr->response.code == 0x0001) ? 0 : -EIO;
	return rc;
}

int chsc_sstpi(void *page, void *result, size_t size)
{
	struct {
		struct chsc_header request;
		unsigned int rsvd0[3];
		struct chsc_header response;
		char data[size];
	} __attribute__ ((packed)) *rr;
	int rc;

	memset(page, 0, PAGE_SIZE);
	rr = page;
	rr->request.length = 0x0010;
	rr->request.code = 0x0038;
	rc = chsc(rr);
	if (rc)
		return -EIO;
	memcpy(result, &rr->data, size);
	return (rr->response.code == 0x0001) ? 0 : -EIO;
}

M
Michael Ernst 已提交
1009 1010
int chsc_siosl(struct subchannel_id schid)
{
1011 1012 1013 1014 1015 1016 1017 1018
	struct {
		struct chsc_header request;
		u32 word1;
		struct subchannel_id sid;
		u32 word3;
		struct chsc_header response;
		u32 word[11];
	} __attribute__ ((packed)) *siosl_area;
M
Michael Ernst 已提交
1019 1020 1021 1022
	unsigned long flags;
	int ccode;
	int rc;

1023 1024 1025 1026 1027 1028 1029
	spin_lock_irqsave(&chsc_page_lock, flags);
	memset(chsc_page, 0, PAGE_SIZE);
	siosl_area = chsc_page;
	siosl_area->request.length = 0x0010;
	siosl_area->request.code = 0x0046;
	siosl_area->word1 = 0x80000000;
	siosl_area->sid = schid;
M
Michael Ernst 已提交
1030

1031
	ccode = chsc(siosl_area);
M
Michael Ernst 已提交
1032 1033 1034 1035 1036 1037 1038 1039 1040
	if (ccode > 0) {
		if (ccode == 3)
			rc = -ENODEV;
		else
			rc = -EBUSY;
		CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
			      schid.ssid, schid.sch_no, ccode);
		goto out;
	}
1041
	rc = chsc_error_from_response(siosl_area->response.code);
M
Michael Ernst 已提交
1042 1043 1044
	if (rc)
		CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
			      schid.ssid, schid.sch_no,
1045
			      siosl_area->response.code);
M
Michael Ernst 已提交
1046 1047 1048 1049
	else
		CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
			      schid.ssid, schid.sch_no);
out:
1050
	spin_unlock_irqrestore(&chsc_page_lock, flags);
M
Michael Ernst 已提交
1051 1052 1053
	return rc;
}
EXPORT_SYMBOL_GPL(chsc_siosl);