chsc.c 24.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 *   S/390 common I/O routines -- channel subsystem call
 *
4
 *    Copyright IBM Corp. 1999, 2010
L
Linus Torvalds 已提交
5
 *    Author(s): Ingo Adlung (adlung@de.ibm.com)
6
 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
L
Linus Torvalds 已提交
7 8 9
 *		 Arnd Bergmann (arndb@de.ibm.com)
 */

10 11 12
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

L
Linus Torvalds 已提交
13 14 15 16 17 18
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>

#include <asm/cio.h>
19
#include <asm/chpid.h>
20
#include <asm/chsc.h>
21
#include <asm/crw.h>
L
Linus Torvalds 已提交
22 23 24 25 26

#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
27
#include "chp.h"
L
Linus Torvalds 已提交
28 29 30
#include "chsc.h"

static void *sei_page;
31 32
static void *chsc_page;
static DEFINE_SPINLOCK(chsc_page_lock);
L
Linus Torvalds 已提交
33

34 35 36 37 38 39 40
/**
 * chsc_error_from_response() - convert a chsc response to an error
 * @response: chsc response code
 *
 * Returns an appropriate Linux error code for @response.
 */
int chsc_error_from_response(int response)
41 42 43 44 45 46 47 48 49 50
{
	switch (response) {
	case 0x0001:
		return 0;
	case 0x0002:
	case 0x0003:
	case 0x0006:
	case 0x0007:
	case 0x0008:
	case 0x000a:
M
Michael Ernst 已提交
51
	case 0x0104:
52 53 54 55 56 57 58
		return -EINVAL;
	case 0x0004:
		return -EOPNOTSUPP;
	default:
		return -EIO;
	}
}
59
EXPORT_SYMBOL_GPL(chsc_error_from_response);
60

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
struct chsc_ssd_area {
	struct chsc_header request;
	u16 :10;
	u16 ssid:2;
	u16 :4;
	u16 f_sch;	  /* first subchannel */
	u16 :16;
	u16 l_sch;	  /* last subchannel */
	u32 :32;
	struct chsc_header response;
	u32 :32;
	u8 sch_valid : 1;
	u8 dev_valid : 1;
	u8 st	     : 3; /* subchannel type */
	u8 zeroes    : 3;
	u8  unit_addr;	  /* unit address */
	u16 devno;	  /* device number */
	u8 path_mask;
	u8 fla_valid_mask;
	u16 sch;	  /* subchannel */
	u8 chpid[8];	  /* chpids 0-7 */
	u16 fla[8];	  /* full link addresses 0-7 */
} __attribute__ ((packed));

int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
L
Linus Torvalds 已提交
86
{
87 88 89 90 91
	struct chsc_ssd_area *ssd_area;
	int ccode;
	int ret;
	int i;
	int mask;
L
Linus Torvalds 已提交
92

93 94 95
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	ssd_area = chsc_page;
96 97
	ssd_area->request.length = 0x0010;
	ssd_area->request.code = 0x0004;
98 99 100
	ssd_area->ssid = schid.ssid;
	ssd_area->f_sch = schid.sch_no;
	ssd_area->l_sch = schid.sch_no;
L
Linus Torvalds 已提交
101 102

	ccode = chsc(ssd_area);
103
	/* Check response. */
L
Linus Torvalds 已提交
104
	if (ccode > 0) {
105
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
106
		goto out;
L
Linus Torvalds 已提交
107
	}
108 109
	ret = chsc_error_from_response(ssd_area->response.code);
	if (ret != 0) {
110 111
		CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
			      schid.ssid, schid.sch_no,
L
Linus Torvalds 已提交
112
			      ssd_area->response.code);
113
		goto out;
L
Linus Torvalds 已提交
114
	}
115 116
	if (!ssd_area->sch_valid) {
		ret = -ENODEV;
117
		goto out;
L
Linus Torvalds 已提交
118
	}
119 120 121
	/* Copy data */
	ret = 0;
	memset(ssd, 0, sizeof(struct chsc_ssd_info));
122 123
	if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
	    (ssd_area->st != SUBCHANNEL_TYPE_MSG))
124
		goto out;
125 126 127 128 129 130 131
	ssd->path_mask = ssd_area->path_mask;
	ssd->fla_valid_mask = ssd_area->fla_valid_mask;
	for (i = 0; i < 8; i++) {
		mask = 0x80 >> i;
		if (ssd_area->path_mask & mask) {
			chp_id_init(&ssd->chpid[i]);
			ssd->chpid[i].id = ssd_area->chpid[i];
L
Linus Torvalds 已提交
132
		}
133 134
		if (ssd_area->fla_valid_mask & mask)
			ssd->fla[i] = ssd_area->fla[i];
L
Linus Torvalds 已提交
135
	}
136 137
out:
	spin_unlock_irq(&chsc_page_lock);
L
Linus Torvalds 已提交
138 139 140
	return ret;
}

141
static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
142
{
C
Cornelia Huck 已提交
143
	spin_lock_irq(sch->lock);
C
Cornelia Huck 已提交
144 145
	if (sch->driver && sch->driver->chp_event)
		if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
L
Linus Torvalds 已提交
146
			goto out_unreg;
C
Cornelia Huck 已提交
147
	spin_unlock_irq(sch->lock);
L
Linus Torvalds 已提交
148
	return 0;
149

L
Linus Torvalds 已提交
150 151
out_unreg:
	sch->lpm = 0;
152
	spin_unlock_irq(sch->lock);
153
	css_schedule_eval(sch->schid);
L
Linus Torvalds 已提交
154 155 156
	return 0;
}

157
void chsc_chp_offline(struct chp_id chpid)
L
Linus Torvalds 已提交
158 159
{
	char dbf_txt[15];
160
	struct chp_link link;
L
Linus Torvalds 已提交
161

162
	sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
L
Linus Torvalds 已提交
163 164
	CIO_TRACE_EVENT(2, dbf_txt);

165
	if (chp_get_status(chpid) <= 0)
L
Linus Torvalds 已提交
166
		return;
167 168
	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
169 170
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
171
	for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
L
Linus Torvalds 已提交
172 173
}

174
static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
175 176 177 178 179 180 181 182 183 184
{
	struct schib schib;
	/*
	 * We don't know the device yet, but since a path
	 * may be available now to the device we'll have
	 * to do recognition again.
	 * Since we don't have any idea about which chpid
	 * that beast may be on we'll have to do a stsch
	 * on all devices, grr...
	 */
185
	if (stsch_err(schid, &schib))
186
		/* We're through */
187
		return -ENXIO;
188 189

	/* Put it on the slow path. */
190
	css_schedule_eval(schid);
191 192 193
	return 0;
}

194
static int __s390_process_res_acc(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
195
{
C
Cornelia Huck 已提交
196
	spin_lock_irq(sch->lock);
C
Cornelia Huck 已提交
197 198
	if (sch->driver && sch->driver->chp_event)
		sch->driver->chp_event(sch, data, CHP_ONLINE);
C
Cornelia Huck 已提交
199
	spin_unlock_irq(sch->lock);
200

201
	return 0;
202 203
}

204
static void s390_process_res_acc(struct chp_link *link)
205
{
L
Linus Torvalds 已提交
206 207
	char dbf_txt[15];

208 209
	sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
		link->chpid.id);
L
Linus Torvalds 已提交
210
	CIO_TRACE_EVENT( 2, dbf_txt);
211 212
	if (link->fla != 0) {
		sprintf(dbf_txt, "fla%x", link->fla);
L
Linus Torvalds 已提交
213 214
		CIO_TRACE_EVENT( 2, dbf_txt);
	}
215 216
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
L
Linus Torvalds 已提交
217 218 219 220 221 222 223
	/*
	 * I/O resources may have become accessible.
	 * Scan through all subchannels that may be concerned and
	 * do a validation on those.
	 * The more information we have (info), the less scanning
	 * will we have to do.
	 */
224
	for_each_subchannel_staged(__s390_process_res_acc,
225
				   s390_process_res_acc_new_sch, link);
L
Linus Torvalds 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
}

static int
__get_chpid_from_lir(void *data)
{
	struct lir {
		u8  iq;
		u8  ic;
		u16 sci;
		/* incident-node descriptor */
		u32 indesc[28];
		/* attached-node descriptor */
		u32 andesc[28];
		/* incident-specific information */
		u32 isinfo[28];
241
	} __attribute__ ((packed)) *lir;
L
Linus Torvalds 已提交
242

243
	lir = data;
L
Linus Torvalds 已提交
244 245 246 247 248 249 250 251 252 253 254 255 256 257
	if (!(lir->iq&0x80))
		/* NULL link incident record */
		return -EINVAL;
	if (!(lir->indesc[0]&0xc0000000))
		/* node descriptor not valid */
		return -EINVAL;
	if (!(lir->indesc[0]&0x10000000))
		/* don't handle device-type nodes - FIXME */
		return -EINVAL;
	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */

	return (u16) (lir->indesc[0]&0x000000ff);
}

258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
struct chsc_sei_area {
	struct chsc_header request;
	u32 reserved1;
	u32 reserved2;
	u32 reserved3;
	struct chsc_header response;
	u32 reserved4;
	u8  flags;
	u8  vf;		/* validity flags */
	u8  rs;		/* reporting source */
	u8  cc;		/* content code */
	u16 fla;	/* full link address */
	u16 rsid;	/* reporting source id */
	u32 reserved5;
	u32 reserved6;
	u8 ccdf[4096 - 16 - 24];	/* content-code dependent field */
	/* ccdf has to be big enough for a link-incident record */
} __attribute__ ((packed));

277
static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
278
{
279 280
	struct chp_id chpid;
	int id;
281 282 283 284

	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
		      sei_area->rs, sei_area->rsid);
	if (sei_area->rs != 4)
285
		return;
286 287
	id = __get_chpid_from_lir(sei_area->ccdf);
	if (id < 0)
288
		CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
289 290 291
	else {
		chp_id_init(&chpid);
		chpid.id = id;
292
		chsc_chp_offline(chpid);
293
	}
294 295
}

296
static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
L
Linus Torvalds 已提交
297
{
298
	struct chp_link link;
299
	struct chp_id chpid;
300 301 302 303 304
	int status;

	CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
		      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
	if (sei_area->rs != 4)
305
		return;
306 307
	chp_id_init(&chpid);
	chpid.id = sei_area->rsid;
308
	/* allocate a new channel path structure, if needed */
309
	status = chp_get_status(chpid);
310
	if (status < 0)
311
		chp_new(chpid);
312
	else if (!status)
313
		return;
314 315
	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
316
	if ((sei_area->vf & 0xc0) != 0) {
317
		link.fla = sei_area->fla;
318 319
		if ((sei_area->vf & 0xc0) == 0xc0)
			/* full link address */
320
			link.fla_mask = 0xffff;
321 322
		else
			/* link address */
323
			link.fla_mask = 0xff00;
324
	}
325
	s390_process_res_acc(&link);
326 327
}

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
static void chsc_process_sei_chp_avail(struct chsc_sei_area *sei_area)
{
	struct channel_path *chp;
	struct chp_id chpid;
	u8 *data;
	int num;

	CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
	if (sei_area->rs != 0)
		return;
	data = sei_area->ccdf;
	chp_id_init(&chpid);
	for (num = 0; num <= __MAX_CHPID; num++) {
		if (!chp_test_bit(data, num))
			continue;
		chpid.id = num;

		CIO_CRW_EVENT(4, "Update information for channel path "
			      "%x.%02x\n", chpid.cssid, chpid.id);
		chp = chpid_to_chp(chpid);
		if (!chp) {
			chp_new(chpid);
			continue;
		}
		mutex_lock(&chp->lock);
		chsc_determine_base_channel_path_desc(chpid, &chp->desc);
		mutex_unlock(&chp->lock);
	}
}

358 359 360 361 362 363
struct chp_config_data {
	u8 map[32];
	u8 op;
	u8 pc;
};

364
static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
365 366 367 368
{
	struct chp_config_data *data;
	struct chp_id chpid;
	int num;
369
	char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
370 371 372

	CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
	if (sei_area->rs != 0)
373
		return;
374 375 376 377 378 379
	data = (struct chp_config_data *) &(sei_area->ccdf);
	chp_id_init(&chpid);
	for (num = 0; num <= __MAX_CHPID; num++) {
		if (!chp_test_bit(data->map, num))
			continue;
		chpid.id = num;
380 381
		pr_notice("Processing %s for channel path %x.%02x\n",
			  events[data->op], chpid.cssid, chpid.id);
382 383 384 385 386 387 388 389 390 391 392 393 394 395
		switch (data->op) {
		case 0:
			chp_cfg_schedule(chpid, 1);
			break;
		case 1:
			chp_cfg_schedule(chpid, 0);
			break;
		case 2:
			chp_cfg_cancel_deconfigure(chpid);
			break;
		}
	}
}

396
static void chsc_process_sei(struct chsc_sei_area *sei_area)
397 398
{
	/* Check if we might have lost some information. */
399
	if (sei_area->flags & 0x40) {
400
		CIO_CRW_EVENT(2, "chsc: event overflow\n");
401 402
		css_schedule_eval_all();
	}
403 404 405
	/* which kind of information was stored? */
	switch (sei_area->cc) {
	case 1: /* link incident*/
406
		chsc_process_sei_link_incident(sei_area);
407
		break;
408
	case 2: /* i/o resource accessibility */
409
		chsc_process_sei_res_acc(sei_area);
410
		break;
411 412 413
	case 7: /* channel-path-availability information */
		chsc_process_sei_chp_avail(sei_area);
		break;
414
	case 8: /* channel-path-configuration notification */
415
		chsc_process_sei_chp_config(sei_area);
416
		break;
417 418 419 420 421 422 423
	default: /* other stuff */
		CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
			      sei_area->cc);
		break;
	}
}

424
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
425 426
{
	struct chsc_sei_area *sei_area;
L
Linus Torvalds 已提交
427

428 429 430 431 432 433 434 435
	if (overflow) {
		css_schedule_eval_all();
		return;
	}
	CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
		      crw0->erc, crw0->rsid);
L
Linus Torvalds 已提交
436
	if (!sei_page)
437
		return;
438 439
	/* Access to sei_page is serialized through machine check handler
	 * thread, so no need for locking. */
L
Linus Torvalds 已提交
440 441
	sei_area = sei_page;

442
	CIO_TRACE_EVENT(2, "prcss");
L
Linus Torvalds 已提交
443 444
	do {
		memset(sei_area, 0, sizeof(*sei_area));
445 446
		sei_area->request.length = 0x0010;
		sei_area->request.code = 0x000e;
447 448
		if (chsc(sei_area))
			break;
L
Linus Torvalds 已提交
449

450 451
		if (sei_area->response.code == 0x0001) {
			CIO_CRW_EVENT(4, "chsc: sei successful\n");
452
			chsc_process_sei(sei_area);
453 454
		} else {
			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
L
Linus Torvalds 已提交
455 456 457 458 459 460
				      sei_area->response.code);
			break;
		}
	} while (sei_area->flags & 0x80);
}

461
void chsc_chp_online(struct chp_id chpid)
462
{
L
Linus Torvalds 已提交
463
	char dbf_txt[15];
464
	struct chp_link link;
L
Linus Torvalds 已提交
465

466
	sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
L
Linus Torvalds 已提交
467 468
	CIO_TRACE_EVENT(2, dbf_txt);

469
	if (chp_get_status(chpid) != 0) {
470 471
		memset(&link, 0, sizeof(struct chp_link));
		link.chpid = chpid;
472 473
		/* Wait until previous actions have settled. */
		css_wait_for_slow_path();
C
Cornelia Huck 已提交
474
		for_each_subchannel_staged(__s390_process_res_acc, NULL,
475
					   &link);
476
	}
L
Linus Torvalds 已提交
477 478
}

479 480
static void __s390_subchannel_vary_chpid(struct subchannel *sch,
					 struct chp_id chpid, int on)
L
Linus Torvalds 已提交
481 482
{
	unsigned long flags;
483
	struct chp_link link;
L
Linus Torvalds 已提交
484

485 486
	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
C
Cornelia Huck 已提交
487
	spin_lock_irqsave(sch->lock, flags);
C
Cornelia Huck 已提交
488
	if (sch->driver && sch->driver->chp_event)
489
		sch->driver->chp_event(sch, &link,
C
Cornelia Huck 已提交
490
				       on ? CHP_VARY_ON : CHP_VARY_OFF);
C
Cornelia Huck 已提交
491
	spin_unlock_irqrestore(sch->lock, flags);
L
Linus Torvalds 已提交
492 493
}

494
static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
495
{
496
	struct chp_id *chpid = data;
L
Linus Torvalds 已提交
497 498 499 500 501

	__s390_subchannel_vary_chpid(sch, *chpid, 0);
	return 0;
}

502
static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
503
{
504
	struct chp_id *chpid = data;
L
Linus Torvalds 已提交
505 506 507 508 509

	__s390_subchannel_vary_chpid(sch, *chpid, 1);
	return 0;
}

510 511 512 513 514
static int
__s390_vary_chpid_on(struct subchannel_id schid, void *data)
{
	struct schib schib;

515
	if (stsch_err(schid, &schib))
516 517 518
		/* We're through */
		return -ENXIO;
	/* Put it on the slow path. */
519
	css_schedule_eval(schid);
520 521 522
	return 0;
}

523 524 525 526
/**
 * chsc_chp_vary - propagate channel-path vary operation to subchannels
 * @chpid: channl-path ID
 * @on: non-zero for vary online, zero for vary offline
L
Linus Torvalds 已提交
527
 */
528
int chsc_chp_vary(struct chp_id chpid, int on)
L
Linus Torvalds 已提交
529
{
530
	struct channel_path *chp = chpid_to_chp(chpid);
531

532 533
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
L
Linus Torvalds 已提交
534 535 536
	/*
	 * Redo PathVerification on the devices the chpid connects to
	 */
537 538 539
	if (on) {
		/* Try to update the channel path descritor. */
		chsc_determine_base_channel_path_desc(chpid, &chp->desc);
540
		for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
S
Sebastian Ott 已提交
541
					   __s390_vary_chpid_on, &chpid);
542
	} else
543
		for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
S
Sebastian Ott 已提交
544
					   NULL, &chpid);
545

L
Linus Torvalds 已提交
546 547 548
	return 0;
}

549 550 551 552 553 554 555 556
static void
chsc_remove_cmg_attr(struct channel_subsystem *css)
{
	int i;

	for (i = 0; i <= __MAX_CHPID; i++) {
		if (!css->chps[i])
			continue;
557
		chp_remove_cmg_attr(css->chps[i]);
558 559 560 561 562 563 564 565 566 567 568 569
	}
}

static int
chsc_add_cmg_attr(struct channel_subsystem *css)
{
	int i, ret;

	ret = 0;
	for (i = 0; i <= __MAX_CHPID; i++) {
		if (!css->chps[i])
			continue;
570
		ret = chp_add_cmg_attr(css->chps[i]);
571 572 573 574 575 576 577 578
		if (ret)
			goto cleanup;
	}
	return ret;
cleanup:
	for (--i; i >= 0; i--) {
		if (!css->chps[i])
			continue;
579
		chp_remove_cmg_attr(css->chps[i]);
580 581 582 583
	}
	return ret;
}

584
int __chsc_do_secm(struct channel_subsystem *css, int enable)
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
{
	struct {
		struct chsc_header request;
		u32 operation_code : 2;
		u32 : 30;
		u32 key : 4;
		u32 : 28;
		u32 zeroes1;
		u32 cub_addr1;
		u32 zeroes2;
		u32 cub_addr2;
		u32 reserved[13];
		struct chsc_header response;
		u32 status : 8;
		u32 : 4;
		u32 fmt : 4;
		u32 : 16;
602
	} __attribute__ ((packed)) *secm_area;
603 604
	int ret, ccode;

605 606 607
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	secm_area = chsc_page;
608 609 610
	secm_area->request.length = 0x0050;
	secm_area->request.code = 0x0016;

611
	secm_area->key = PAGE_DEFAULT_KEY >> 4;
612 613 614 615 616 617
	secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
	secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;

	secm_area->operation_code = enable ? 0 : 1;

	ccode = chsc(secm_area);
618 619 620 621
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}
622 623

	switch (secm_area->response.code) {
624 625
	case 0x0102:
	case 0x0103:
626
		ret = -EINVAL;
627
		break;
628
	default:
629
		ret = chsc_error_from_response(secm_area->response.code);
630
	}
631 632 633
	if (ret != 0)
		CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
			      secm_area->response.code);
634 635
out:
	spin_unlock_irq(&chsc_page_lock);
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
	return ret;
}

int
chsc_secm(struct channel_subsystem *css, int enable)
{
	int ret;

	if (enable && !css->cm_enabled) {
		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
		if (!css->cub_addr1 || !css->cub_addr2) {
			free_page((unsigned long)css->cub_addr1);
			free_page((unsigned long)css->cub_addr2);
			return -ENOMEM;
		}
	}
653
	ret = __chsc_do_secm(css, enable);
654 655 656 657 658
	if (!ret) {
		css->cm_enabled = enable;
		if (css->cm_enabled) {
			ret = chsc_add_cmg_attr(css);
			if (ret) {
659
				__chsc_do_secm(css, 0);
660 661 662 663 664
				css->cm_enabled = 0;
			}
		} else
			chsc_remove_cmg_attr(css);
	}
665
	if (!css->cm_enabled) {
666 667 668 669 670 671
		free_page((unsigned long)css->cub_addr1);
		free_page((unsigned long)css->cub_addr2);
	}
	return ret;
}

672
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
673
				     int c, int m, void *page)
L
Linus Torvalds 已提交
674
{
675
	struct chsc_scpd *scpd_area;
L
Linus Torvalds 已提交
676 677
	int ccode, ret;

678 679 680 681
	if ((rfmt == 1) && !css_general_characteristics.fcs)
		return -EINVAL;
	if ((rfmt == 2) && !css_general_characteristics.cib)
		return -EINVAL;
L
Linus Torvalds 已提交
682

683 684
	memset(page, 0, PAGE_SIZE);
	scpd_area = page;
685 686
	scpd_area->request.length = 0x0010;
	scpd_area->request.code = 0x0002;
687
	scpd_area->cssid = chpid.cssid;
688 689
	scpd_area->first_chpid = chpid.id;
	scpd_area->last_chpid = chpid.id;
690 691 692 693
	scpd_area->m = m;
	scpd_area->c = c;
	scpd_area->fmt = fmt;
	scpd_area->rfmt = rfmt;
L
Linus Torvalds 已提交
694 695

	ccode = chsc(scpd_area);
696 697
	if (ccode > 0)
		return (ccode == 3) ? -ENODEV : -EBUSY;
L
Linus Torvalds 已提交
698

699
	ret = chsc_error_from_response(scpd_area->response.code);
700
	if (ret)
701
		CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
L
Linus Torvalds 已提交
702 703 704
			      scpd_area->response.code);
	return ret;
}
705 706 707 708 709 710
EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);

int chsc_determine_base_channel_path_desc(struct chp_id chpid,
					  struct channel_path_desc *desc)
{
	struct chsc_response_struct *chsc_resp;
711
	struct chsc_scpd *scpd_area;
712
	unsigned long flags;
713 714
	int ret;

715
	spin_lock_irqsave(&chsc_page_lock, flags);
716 717
	scpd_area = chsc_page;
	ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
718
	if (ret)
719 720
		goto out;
	chsc_resp = (void *)&scpd_area->response;
721
	memcpy(desc, &chsc_resp->data, sizeof(*desc));
722
out:
723
	spin_unlock_irqrestore(&chsc_page_lock, flags);
724 725
	return ret;
}
L
Linus Torvalds 已提交
726

727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
					  struct channel_path_desc_fmt1 *desc)
{
	struct chsc_response_struct *chsc_resp;
	struct chsc_scpd *scpd_area;
	int ret;

	spin_lock_irq(&chsc_page_lock);
	scpd_area = chsc_page;
	ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
	if (ret)
		goto out;
	chsc_resp = (void *)&scpd_area->response;
	memcpy(desc, &chsc_resp->data, sizeof(*desc));
out:
	spin_unlock_irq(&chsc_page_lock);
	return ret;
}

746 747 748 749
static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
			  struct cmg_chars *chars)
{
750 751 752 753 754 755 756 757 758 759
	struct cmg_chars *cmg_chars;
	int i, mask;

	cmg_chars = chp->cmg_chars;
	for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
		mask = 0x80 >> (i + 3);
		if (cmcv & mask)
			cmg_chars->values[i] = chars->values[i];
		else
			cmg_chars->values[i] = 0;
760 761 762
	}
}

763
int chsc_get_channel_measurement_chars(struct channel_path *chp)
764
{
765
	struct cmg_chars *cmg_chars;
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
	int ccode, ret;

	struct {
		struct chsc_header request;
		u32 : 24;
		u32 first_chpid : 8;
		u32 : 24;
		u32 last_chpid : 8;
		u32 zeroes1;
		struct chsc_header response;
		u32 zeroes2;
		u32 not_valid : 1;
		u32 shared : 1;
		u32 : 22;
		u32 chpid : 8;
		u32 cmcv : 5;
		u32 : 11;
		u32 cmgq : 8;
		u32 cmg : 8;
		u32 zeroes3;
		u32 data[NR_MEASUREMENT_CHARS];
787
	} __attribute__ ((packed)) *scmc_area;
788

789 790 791
	chp->cmg_chars = NULL;
	cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
	if (!cmg_chars)
792 793
		return -ENOMEM;

794 795 796
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	scmc_area = chsc_page;
797 798
	scmc_area->request.length = 0x0010;
	scmc_area->request.code = 0x0022;
799 800
	scmc_area->first_chpid = chp->chpid.id;
	scmc_area->last_chpid = chp->chpid.id;
801 802 803 804 805 806 807

	ccode = chsc(scmc_area);
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}

808
	ret = chsc_error_from_response(scmc_area->response.code);
809
	if (ret) {
810
		CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
811
			      scmc_area->response.code);
812 813 814 815 816 817 818 819 820 821 822 823
		goto out;
	}
	if (scmc_area->not_valid) {
		chp->cmg = -1;
		chp->shared = -1;
		goto out;
	}
	chp->cmg = scmc_area->cmg;
	chp->shared = scmc_area->shared;
	if (chp->cmg != 2 && chp->cmg != 3) {
		/* No cmg-dependent data. */
		goto out;
824
	}
825 826 827
	chp->cmg_chars = cmg_chars;
	chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
				  (struct cmg_chars *) &scmc_area->data);
828
out:
829 830 831 832
	spin_unlock_irq(&chsc_page_lock);
	if (!chp->cmg_chars)
		kfree(cmg_chars);

833 834 835
	return ret;
}

S
Sebastian Ott 已提交
836
int __init chsc_init(void)
L
Linus Torvalds 已提交
837
{
838 839
	int ret;

L
Linus Torvalds 已提交
840
	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
841 842 843 844
	chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!sei_page || !chsc_page) {
		ret = -ENOMEM;
		goto out_err;
845
	}
846
	ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
847
	if (ret)
848 849 850 851 852
		goto out_err;
	return ret;
out_err:
	free_page((unsigned long)chsc_page);
	free_page((unsigned long)sei_page);
853
	return ret;
L
Linus Torvalds 已提交
854 855
}

S
Sebastian Ott 已提交
856
void __init chsc_init_cleanup(void)
857
{
858
	crw_unregister_handler(CRW_RSC_CSS);
859
	free_page((unsigned long)chsc_page);
S
Sebastian Ott 已提交
860
	free_page((unsigned long)sei_page);
861 862
}

863
int chsc_enable_facility(int operation_code)
864
{
865
	unsigned long flags;
866
	int ret;
867
	struct {
868 869 870 871 872 873 874 875 876 877 878 879
		struct chsc_header request;
		u8 reserved1:4;
		u8 format:4;
		u8 reserved2;
		u16 operation_code;
		u32 reserved3;
		u32 reserved4;
		u32 operation_data_area[252];
		struct chsc_header response;
		u32 reserved5:4;
		u32 format2:4;
		u32 reserved6:24;
880
	} __attribute__ ((packed)) *sda_area;
881

882 883 884 885 886 887
	spin_lock_irqsave(&chsc_page_lock, flags);
	memset(chsc_page, 0, PAGE_SIZE);
	sda_area = chsc_page;
	sda_area->request.length = 0x0400;
	sda_area->request.code = 0x0031;
	sda_area->operation_code = operation_code;
888

889
	ret = chsc(sda_area);
890 891 892 893
	if (ret > 0) {
		ret = (ret == 3) ? -ENODEV : -EBUSY;
		goto out;
	}
894

895
	switch (sda_area->response.code) {
896
	case 0x0101:
897 898
		ret = -EOPNOTSUPP;
		break;
899
	default:
900
		ret = chsc_error_from_response(sda_area->response.code);
901
	}
902 903
	if (ret != 0)
		CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
904 905 906
			      operation_code, sda_area->response.code);
out:
	spin_unlock_irqrestore(&chsc_page_lock, flags);
907 908 909
	return ret;
}

L
Linus Torvalds 已提交
910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
struct css_general_char css_general_characteristics;
struct css_chsc_char css_chsc_characteristics;

int __init
chsc_determine_css_characteristics(void)
{
	int result;
	struct {
		struct chsc_header request;
		u32 reserved1;
		u32 reserved2;
		u32 reserved3;
		struct chsc_header response;
		u32 reserved4;
		u32 general_char[510];
S
Sebastian Ott 已提交
925
		u32 chsc_char[508];
926
	} __attribute__ ((packed)) *scsc_area;
L
Linus Torvalds 已提交
927

928 929 930
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	scsc_area = chsc_page;
931 932
	scsc_area->request.length = 0x0010;
	scsc_area->request.code = 0x0010;
L
Linus Torvalds 已提交
933 934 935

	result = chsc(scsc_area);
	if (result) {
936
		result = (result == 3) ? -ENODEV : -EBUSY;
L
Linus Torvalds 已提交
937 938 939
		goto exit;
	}

940 941 942 943 944 945 946 947 948
	result = chsc_error_from_response(scsc_area->response.code);
	if (result == 0) {
		memcpy(&css_general_characteristics, scsc_area->general_char,
		       sizeof(css_general_characteristics));
		memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
		       sizeof(css_chsc_characteristics));
	} else
		CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
			      scsc_area->response.code);
L
Linus Torvalds 已提交
949
exit:
950
	spin_unlock_irq(&chsc_page_lock);
L
Linus Torvalds 已提交
951 952 953 954 955
	return result;
}

EXPORT_SYMBOL_GPL(css_general_characteristics);
EXPORT_SYMBOL_GPL(css_chsc_characteristics);
M
Martin Schwidefsky 已提交
956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004

int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
{
	struct {
		struct chsc_header request;
		unsigned int rsvd0;
		unsigned int op : 8;
		unsigned int rsvd1 : 8;
		unsigned int ctrl : 16;
		unsigned int rsvd2[5];
		struct chsc_header response;
		unsigned int rsvd3[7];
	} __attribute__ ((packed)) *rr;
	int rc;

	memset(page, 0, PAGE_SIZE);
	rr = page;
	rr->request.length = 0x0020;
	rr->request.code = 0x0033;
	rr->op = op;
	rr->ctrl = ctrl;
	rc = chsc(rr);
	if (rc)
		return -EIO;
	rc = (rr->response.code == 0x0001) ? 0 : -EIO;
	return rc;
}

int chsc_sstpi(void *page, void *result, size_t size)
{
	struct {
		struct chsc_header request;
		unsigned int rsvd0[3];
		struct chsc_header response;
		char data[size];
	} __attribute__ ((packed)) *rr;
	int rc;

	memset(page, 0, PAGE_SIZE);
	rr = page;
	rr->request.length = 0x0010;
	rr->request.code = 0x0038;
	rc = chsc(rr);
	if (rc)
		return -EIO;
	memcpy(result, &rr->data, size);
	return (rr->response.code == 0x0001) ? 0 : -EIO;
}

M
Michael Ernst 已提交
1005 1006
int chsc_siosl(struct subchannel_id schid)
{
1007 1008 1009 1010 1011 1012 1013 1014
	struct {
		struct chsc_header request;
		u32 word1;
		struct subchannel_id sid;
		u32 word3;
		struct chsc_header response;
		u32 word[11];
	} __attribute__ ((packed)) *siosl_area;
M
Michael Ernst 已提交
1015 1016 1017 1018
	unsigned long flags;
	int ccode;
	int rc;

1019 1020 1021 1022 1023 1024 1025
	spin_lock_irqsave(&chsc_page_lock, flags);
	memset(chsc_page, 0, PAGE_SIZE);
	siosl_area = chsc_page;
	siosl_area->request.length = 0x0010;
	siosl_area->request.code = 0x0046;
	siosl_area->word1 = 0x80000000;
	siosl_area->sid = schid;
M
Michael Ernst 已提交
1026

1027
	ccode = chsc(siosl_area);
M
Michael Ernst 已提交
1028 1029 1030 1031 1032 1033 1034 1035 1036
	if (ccode > 0) {
		if (ccode == 3)
			rc = -ENODEV;
		else
			rc = -EBUSY;
		CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
			      schid.ssid, schid.sch_no, ccode);
		goto out;
	}
1037
	rc = chsc_error_from_response(siosl_area->response.code);
M
Michael Ernst 已提交
1038 1039 1040
	if (rc)
		CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
			      schid.ssid, schid.sch_no,
1041
			      siosl_area->response.code);
M
Michael Ernst 已提交
1042 1043 1044 1045
	else
		CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
			      schid.ssid, schid.sch_no);
out:
1046
	spin_unlock_irqrestore(&chsc_page_lock, flags);
M
Michael Ernst 已提交
1047 1048 1049
	return rc;
}
EXPORT_SYMBOL_GPL(chsc_siosl);