chsc.c 25.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 *   S/390 common I/O routines -- channel subsystem call
 *
4
 *    Copyright IBM Corp. 1999, 2010
L
Linus Torvalds 已提交
5
 *    Author(s): Ingo Adlung (adlung@de.ibm.com)
6
 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
L
Linus Torvalds 已提交
7 8 9
 *		 Arnd Bergmann (arndb@de.ibm.com)
 */

10 11 12
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

L
Linus Torvalds 已提交
13 14 15 16 17 18
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>

#include <asm/cio.h>
19
#include <asm/chpid.h>
20
#include <asm/chsc.h>
21
#include <asm/crw.h>
L
Linus Torvalds 已提交
22 23 24 25 26

#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
27
#include "chp.h"
L
Linus Torvalds 已提交
28 29 30
#include "chsc.h"

static void *sei_page;
31 32
static void *chsc_page;
static DEFINE_SPINLOCK(chsc_page_lock);
L
Linus Torvalds 已提交
33

34 35 36 37 38 39 40
/**
 * chsc_error_from_response() - convert a chsc response to an error
 * @response: chsc response code
 *
 * Returns an appropriate Linux error code for @response.
 */
int chsc_error_from_response(int response)
41 42 43 44 45 46 47 48 49 50
{
	switch (response) {
	case 0x0001:
		return 0;
	case 0x0002:
	case 0x0003:
	case 0x0006:
	case 0x0007:
	case 0x0008:
	case 0x000a:
M
Michael Ernst 已提交
51
	case 0x0104:
52 53 54
		return -EINVAL;
	case 0x0004:
		return -EOPNOTSUPP;
55 56 57 58 59
	case 0x000b:
		return -EBUSY;
	case 0x0100:
	case 0x0102:
		return -ENOMEM;
60 61 62 63
	default:
		return -EIO;
	}
}
64
EXPORT_SYMBOL_GPL(chsc_error_from_response);
65

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
struct chsc_ssd_area {
	struct chsc_header request;
	u16 :10;
	u16 ssid:2;
	u16 :4;
	u16 f_sch;	  /* first subchannel */
	u16 :16;
	u16 l_sch;	  /* last subchannel */
	u32 :32;
	struct chsc_header response;
	u32 :32;
	u8 sch_valid : 1;
	u8 dev_valid : 1;
	u8 st	     : 3; /* subchannel type */
	u8 zeroes    : 3;
	u8  unit_addr;	  /* unit address */
	u16 devno;	  /* device number */
	u8 path_mask;
	u8 fla_valid_mask;
	u16 sch;	  /* subchannel */
	u8 chpid[8];	  /* chpids 0-7 */
	u16 fla[8];	  /* full link addresses 0-7 */
} __attribute__ ((packed));

int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
L
Linus Torvalds 已提交
91
{
92 93 94 95 96
	struct chsc_ssd_area *ssd_area;
	int ccode;
	int ret;
	int i;
	int mask;
L
Linus Torvalds 已提交
97

98 99 100
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	ssd_area = chsc_page;
101 102
	ssd_area->request.length = 0x0010;
	ssd_area->request.code = 0x0004;
103 104 105
	ssd_area->ssid = schid.ssid;
	ssd_area->f_sch = schid.sch_no;
	ssd_area->l_sch = schid.sch_no;
L
Linus Torvalds 已提交
106 107

	ccode = chsc(ssd_area);
108
	/* Check response. */
L
Linus Torvalds 已提交
109
	if (ccode > 0) {
110
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
111
		goto out;
L
Linus Torvalds 已提交
112
	}
113 114
	ret = chsc_error_from_response(ssd_area->response.code);
	if (ret != 0) {
115 116
		CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
			      schid.ssid, schid.sch_no,
L
Linus Torvalds 已提交
117
			      ssd_area->response.code);
118
		goto out;
L
Linus Torvalds 已提交
119
	}
120 121
	if (!ssd_area->sch_valid) {
		ret = -ENODEV;
122
		goto out;
L
Linus Torvalds 已提交
123
	}
124 125 126
	/* Copy data */
	ret = 0;
	memset(ssd, 0, sizeof(struct chsc_ssd_info));
127 128
	if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
	    (ssd_area->st != SUBCHANNEL_TYPE_MSG))
129
		goto out;
130 131 132 133 134 135 136
	ssd->path_mask = ssd_area->path_mask;
	ssd->fla_valid_mask = ssd_area->fla_valid_mask;
	for (i = 0; i < 8; i++) {
		mask = 0x80 >> i;
		if (ssd_area->path_mask & mask) {
			chp_id_init(&ssd->chpid[i]);
			ssd->chpid[i].id = ssd_area->chpid[i];
L
Linus Torvalds 已提交
137
		}
138 139
		if (ssd_area->fla_valid_mask & mask)
			ssd->fla[i] = ssd_area->fla[i];
L
Linus Torvalds 已提交
140
	}
141 142
out:
	spin_unlock_irq(&chsc_page_lock);
L
Linus Torvalds 已提交
143 144 145
	return ret;
}

146
static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
147
{
C
Cornelia Huck 已提交
148
	spin_lock_irq(sch->lock);
C
Cornelia Huck 已提交
149 150
	if (sch->driver && sch->driver->chp_event)
		if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
L
Linus Torvalds 已提交
151
			goto out_unreg;
C
Cornelia Huck 已提交
152
	spin_unlock_irq(sch->lock);
L
Linus Torvalds 已提交
153
	return 0;
154

L
Linus Torvalds 已提交
155 156
out_unreg:
	sch->lpm = 0;
157
	spin_unlock_irq(sch->lock);
158
	css_schedule_eval(sch->schid);
L
Linus Torvalds 已提交
159 160 161
	return 0;
}

162
void chsc_chp_offline(struct chp_id chpid)
L
Linus Torvalds 已提交
163 164
{
	char dbf_txt[15];
165
	struct chp_link link;
L
Linus Torvalds 已提交
166

167
	sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
L
Linus Torvalds 已提交
168 169
	CIO_TRACE_EVENT(2, dbf_txt);

170
	if (chp_get_status(chpid) <= 0)
L
Linus Torvalds 已提交
171
		return;
172 173
	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
174 175
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
176
	for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
L
Linus Torvalds 已提交
177 178
}

179
static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
180 181 182 183 184 185 186 187 188 189
{
	struct schib schib;
	/*
	 * We don't know the device yet, but since a path
	 * may be available now to the device we'll have
	 * to do recognition again.
	 * Since we don't have any idea about which chpid
	 * that beast may be on we'll have to do a stsch
	 * on all devices, grr...
	 */
190
	if (stsch_err(schid, &schib))
191
		/* We're through */
192
		return -ENXIO;
193 194

	/* Put it on the slow path. */
195
	css_schedule_eval(schid);
196 197 198
	return 0;
}

199
static int __s390_process_res_acc(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
200
{
C
Cornelia Huck 已提交
201
	spin_lock_irq(sch->lock);
C
Cornelia Huck 已提交
202 203
	if (sch->driver && sch->driver->chp_event)
		sch->driver->chp_event(sch, data, CHP_ONLINE);
C
Cornelia Huck 已提交
204
	spin_unlock_irq(sch->lock);
205

206
	return 0;
207 208
}

209
static void s390_process_res_acc(struct chp_link *link)
210
{
L
Linus Torvalds 已提交
211 212
	char dbf_txt[15];

213 214
	sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
		link->chpid.id);
L
Linus Torvalds 已提交
215
	CIO_TRACE_EVENT( 2, dbf_txt);
216 217
	if (link->fla != 0) {
		sprintf(dbf_txt, "fla%x", link->fla);
L
Linus Torvalds 已提交
218 219
		CIO_TRACE_EVENT( 2, dbf_txt);
	}
220 221
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
L
Linus Torvalds 已提交
222 223 224 225 226 227 228
	/*
	 * I/O resources may have become accessible.
	 * Scan through all subchannels that may be concerned and
	 * do a validation on those.
	 * The more information we have (info), the less scanning
	 * will we have to do.
	 */
229
	for_each_subchannel_staged(__s390_process_res_acc,
230
				   s390_process_res_acc_new_sch, link);
L
Linus Torvalds 已提交
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
}

static int
__get_chpid_from_lir(void *data)
{
	struct lir {
		u8  iq;
		u8  ic;
		u16 sci;
		/* incident-node descriptor */
		u32 indesc[28];
		/* attached-node descriptor */
		u32 andesc[28];
		/* incident-specific information */
		u32 isinfo[28];
246
	} __attribute__ ((packed)) *lir;
L
Linus Torvalds 已提交
247

248
	lir = data;
L
Linus Torvalds 已提交
249 250 251 252 253 254 255 256 257 258 259 260 261 262
	if (!(lir->iq&0x80))
		/* NULL link incident record */
		return -EINVAL;
	if (!(lir->indesc[0]&0xc0000000))
		/* node descriptor not valid */
		return -EINVAL;
	if (!(lir->indesc[0]&0x10000000))
		/* don't handle device-type nodes - FIXME */
		return -EINVAL;
	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */

	return (u16) (lir->indesc[0]&0x000000ff);
}

263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
struct chsc_sei_area {
	struct chsc_header request;
	u32 reserved1;
	u32 reserved2;
	u32 reserved3;
	struct chsc_header response;
	u32 reserved4;
	u8  flags;
	u8  vf;		/* validity flags */
	u8  rs;		/* reporting source */
	u8  cc;		/* content code */
	u16 fla;	/* full link address */
	u16 rsid;	/* reporting source id */
	u32 reserved5;
	u32 reserved6;
	u8 ccdf[4096 - 16 - 24];	/* content-code dependent field */
	/* ccdf has to be big enough for a link-incident record */
} __attribute__ ((packed));

282
static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
283
{
284 285
	struct chp_id chpid;
	int id;
286 287 288 289

	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
		      sei_area->rs, sei_area->rsid);
	if (sei_area->rs != 4)
290
		return;
291 292
	id = __get_chpid_from_lir(sei_area->ccdf);
	if (id < 0)
293
		CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
294 295 296
	else {
		chp_id_init(&chpid);
		chpid.id = id;
297
		chsc_chp_offline(chpid);
298
	}
299 300
}

301
static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
L
Linus Torvalds 已提交
302
{
303
	struct chp_link link;
304
	struct chp_id chpid;
305 306 307 308 309
	int status;

	CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
		      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
	if (sei_area->rs != 4)
310
		return;
311 312
	chp_id_init(&chpid);
	chpid.id = sei_area->rsid;
313
	/* allocate a new channel path structure, if needed */
314
	status = chp_get_status(chpid);
315
	if (status < 0)
316
		chp_new(chpid);
317
	else if (!status)
318
		return;
319 320
	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
321
	if ((sei_area->vf & 0xc0) != 0) {
322
		link.fla = sei_area->fla;
323 324
		if ((sei_area->vf & 0xc0) == 0xc0)
			/* full link address */
325
			link.fla_mask = 0xffff;
326 327
		else
			/* link address */
328
			link.fla_mask = 0xff00;
329
	}
330
	s390_process_res_acc(&link);
331 332
}

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
static void chsc_process_sei_chp_avail(struct chsc_sei_area *sei_area)
{
	struct channel_path *chp;
	struct chp_id chpid;
	u8 *data;
	int num;

	CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
	if (sei_area->rs != 0)
		return;
	data = sei_area->ccdf;
	chp_id_init(&chpid);
	for (num = 0; num <= __MAX_CHPID; num++) {
		if (!chp_test_bit(data, num))
			continue;
		chpid.id = num;

		CIO_CRW_EVENT(4, "Update information for channel path "
			      "%x.%02x\n", chpid.cssid, chpid.id);
		chp = chpid_to_chp(chpid);
		if (!chp) {
			chp_new(chpid);
			continue;
		}
		mutex_lock(&chp->lock);
		chsc_determine_base_channel_path_desc(chpid, &chp->desc);
		mutex_unlock(&chp->lock);
	}
}

363 364 365 366 367 368
struct chp_config_data {
	u8 map[32];
	u8 op;
	u8 pc;
};

369
static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
370 371 372 373
{
	struct chp_config_data *data;
	struct chp_id chpid;
	int num;
374
	char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
375 376 377

	CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
	if (sei_area->rs != 0)
378
		return;
379 380 381 382 383 384
	data = (struct chp_config_data *) &(sei_area->ccdf);
	chp_id_init(&chpid);
	for (num = 0; num <= __MAX_CHPID; num++) {
		if (!chp_test_bit(data->map, num))
			continue;
		chpid.id = num;
385 386
		pr_notice("Processing %s for channel path %x.%02x\n",
			  events[data->op], chpid.cssid, chpid.id);
387 388 389 390 391 392 393 394 395 396 397 398 399 400
		switch (data->op) {
		case 0:
			chp_cfg_schedule(chpid, 1);
			break;
		case 1:
			chp_cfg_schedule(chpid, 0);
			break;
		case 2:
			chp_cfg_cancel_deconfigure(chpid);
			break;
		}
	}
}

S
Sebastian Ott 已提交
401 402 403 404 405 406 407 408 409 410 411 412 413 414
static void chsc_process_sei_scm_change(struct chsc_sei_area *sei_area)
{
	int ret;

	CIO_CRW_EVENT(4, "chsc: scm change notification\n");
	if (sei_area->rs != 7)
		return;

	ret = scm_update_information();
	if (ret)
		CIO_CRW_EVENT(0, "chsc: updating change notification"
			      " failed (rc=%d).\n", ret);
}

415
static void chsc_process_sei(struct chsc_sei_area *sei_area)
416 417
{
	/* Check if we might have lost some information. */
418
	if (sei_area->flags & 0x40) {
419
		CIO_CRW_EVENT(2, "chsc: event overflow\n");
420 421
		css_schedule_eval_all();
	}
422 423 424
	/* which kind of information was stored? */
	switch (sei_area->cc) {
	case 1: /* link incident*/
425
		chsc_process_sei_link_incident(sei_area);
426
		break;
427
	case 2: /* i/o resource accessibility */
428
		chsc_process_sei_res_acc(sei_area);
429
		break;
430 431 432
	case 7: /* channel-path-availability information */
		chsc_process_sei_chp_avail(sei_area);
		break;
433
	case 8: /* channel-path-configuration notification */
434
		chsc_process_sei_chp_config(sei_area);
435
		break;
S
Sebastian Ott 已提交
436 437 438
	case 12: /* scm change notification */
		chsc_process_sei_scm_change(sei_area);
		break;
439 440 441 442 443 444 445
	default: /* other stuff */
		CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
			      sei_area->cc);
		break;
	}
}

446
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
447 448
{
	struct chsc_sei_area *sei_area;
L
Linus Torvalds 已提交
449

450 451 452 453 454 455 456 457
	if (overflow) {
		css_schedule_eval_all();
		return;
	}
	CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
		      crw0->erc, crw0->rsid);
L
Linus Torvalds 已提交
458
	if (!sei_page)
459
		return;
460 461
	/* Access to sei_page is serialized through machine check handler
	 * thread, so no need for locking. */
L
Linus Torvalds 已提交
462 463
	sei_area = sei_page;

464
	CIO_TRACE_EVENT(2, "prcss");
L
Linus Torvalds 已提交
465 466
	do {
		memset(sei_area, 0, sizeof(*sei_area));
467 468
		sei_area->request.length = 0x0010;
		sei_area->request.code = 0x000e;
469 470
		if (chsc(sei_area))
			break;
L
Linus Torvalds 已提交
471

472 473
		if (sei_area->response.code == 0x0001) {
			CIO_CRW_EVENT(4, "chsc: sei successful\n");
474
			chsc_process_sei(sei_area);
475 476
		} else {
			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
L
Linus Torvalds 已提交
477 478 479 480 481 482
				      sei_area->response.code);
			break;
		}
	} while (sei_area->flags & 0x80);
}

483
void chsc_chp_online(struct chp_id chpid)
484
{
L
Linus Torvalds 已提交
485
	char dbf_txt[15];
486
	struct chp_link link;
L
Linus Torvalds 已提交
487

488
	sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
L
Linus Torvalds 已提交
489 490
	CIO_TRACE_EVENT(2, dbf_txt);

491
	if (chp_get_status(chpid) != 0) {
492 493
		memset(&link, 0, sizeof(struct chp_link));
		link.chpid = chpid;
494 495
		/* Wait until previous actions have settled. */
		css_wait_for_slow_path();
C
Cornelia Huck 已提交
496
		for_each_subchannel_staged(__s390_process_res_acc, NULL,
497
					   &link);
498
	}
L
Linus Torvalds 已提交
499 500
}

501 502
static void __s390_subchannel_vary_chpid(struct subchannel *sch,
					 struct chp_id chpid, int on)
L
Linus Torvalds 已提交
503 504
{
	unsigned long flags;
505
	struct chp_link link;
L
Linus Torvalds 已提交
506

507 508
	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
C
Cornelia Huck 已提交
509
	spin_lock_irqsave(sch->lock, flags);
C
Cornelia Huck 已提交
510
	if (sch->driver && sch->driver->chp_event)
511
		sch->driver->chp_event(sch, &link,
C
Cornelia Huck 已提交
512
				       on ? CHP_VARY_ON : CHP_VARY_OFF);
C
Cornelia Huck 已提交
513
	spin_unlock_irqrestore(sch->lock, flags);
L
Linus Torvalds 已提交
514 515
}

516
static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
517
{
518
	struct chp_id *chpid = data;
L
Linus Torvalds 已提交
519 520 521 522 523

	__s390_subchannel_vary_chpid(sch, *chpid, 0);
	return 0;
}

524
static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
525
{
526
	struct chp_id *chpid = data;
L
Linus Torvalds 已提交
527 528 529 530 531

	__s390_subchannel_vary_chpid(sch, *chpid, 1);
	return 0;
}

532 533 534 535 536
static int
__s390_vary_chpid_on(struct subchannel_id schid, void *data)
{
	struct schib schib;

537
	if (stsch_err(schid, &schib))
538 539 540
		/* We're through */
		return -ENXIO;
	/* Put it on the slow path. */
541
	css_schedule_eval(schid);
542 543 544
	return 0;
}

545 546 547 548
/**
 * chsc_chp_vary - propagate channel-path vary operation to subchannels
 * @chpid: channl-path ID
 * @on: non-zero for vary online, zero for vary offline
L
Linus Torvalds 已提交
549
 */
550
int chsc_chp_vary(struct chp_id chpid, int on)
L
Linus Torvalds 已提交
551
{
552
	struct channel_path *chp = chpid_to_chp(chpid);
553

554 555
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
L
Linus Torvalds 已提交
556 557 558
	/*
	 * Redo PathVerification on the devices the chpid connects to
	 */
559 560 561
	if (on) {
		/* Try to update the channel path descritor. */
		chsc_determine_base_channel_path_desc(chpid, &chp->desc);
562
		for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
S
Sebastian Ott 已提交
563
					   __s390_vary_chpid_on, &chpid);
564
	} else
565
		for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
S
Sebastian Ott 已提交
566
					   NULL, &chpid);
567

L
Linus Torvalds 已提交
568 569 570
	return 0;
}

571 572 573 574 575 576 577 578
static void
chsc_remove_cmg_attr(struct channel_subsystem *css)
{
	int i;

	for (i = 0; i <= __MAX_CHPID; i++) {
		if (!css->chps[i])
			continue;
579
		chp_remove_cmg_attr(css->chps[i]);
580 581 582 583 584 585 586 587 588 589 590 591
	}
}

static int
chsc_add_cmg_attr(struct channel_subsystem *css)
{
	int i, ret;

	ret = 0;
	for (i = 0; i <= __MAX_CHPID; i++) {
		if (!css->chps[i])
			continue;
592
		ret = chp_add_cmg_attr(css->chps[i]);
593 594 595 596 597 598 599 600
		if (ret)
			goto cleanup;
	}
	return ret;
cleanup:
	for (--i; i >= 0; i--) {
		if (!css->chps[i])
			continue;
601
		chp_remove_cmg_attr(css->chps[i]);
602 603 604 605
	}
	return ret;
}

606
int __chsc_do_secm(struct channel_subsystem *css, int enable)
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
{
	struct {
		struct chsc_header request;
		u32 operation_code : 2;
		u32 : 30;
		u32 key : 4;
		u32 : 28;
		u32 zeroes1;
		u32 cub_addr1;
		u32 zeroes2;
		u32 cub_addr2;
		u32 reserved[13];
		struct chsc_header response;
		u32 status : 8;
		u32 : 4;
		u32 fmt : 4;
		u32 : 16;
624
	} __attribute__ ((packed)) *secm_area;
625 626
	int ret, ccode;

627 628 629
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	secm_area = chsc_page;
630 631 632
	secm_area->request.length = 0x0050;
	secm_area->request.code = 0x0016;

633
	secm_area->key = PAGE_DEFAULT_KEY >> 4;
634 635 636 637 638 639
	secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
	secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;

	secm_area->operation_code = enable ? 0 : 1;

	ccode = chsc(secm_area);
640 641 642 643
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}
644 645

	switch (secm_area->response.code) {
646 647
	case 0x0102:
	case 0x0103:
648
		ret = -EINVAL;
649
		break;
650
	default:
651
		ret = chsc_error_from_response(secm_area->response.code);
652
	}
653 654 655
	if (ret != 0)
		CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
			      secm_area->response.code);
656 657
out:
	spin_unlock_irq(&chsc_page_lock);
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
	return ret;
}

int
chsc_secm(struct channel_subsystem *css, int enable)
{
	int ret;

	if (enable && !css->cm_enabled) {
		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
		if (!css->cub_addr1 || !css->cub_addr2) {
			free_page((unsigned long)css->cub_addr1);
			free_page((unsigned long)css->cub_addr2);
			return -ENOMEM;
		}
	}
675
	ret = __chsc_do_secm(css, enable);
676 677 678 679 680
	if (!ret) {
		css->cm_enabled = enable;
		if (css->cm_enabled) {
			ret = chsc_add_cmg_attr(css);
			if (ret) {
681
				__chsc_do_secm(css, 0);
682 683 684 685 686
				css->cm_enabled = 0;
			}
		} else
			chsc_remove_cmg_attr(css);
	}
687
	if (!css->cm_enabled) {
688 689 690 691 692 693
		free_page((unsigned long)css->cub_addr1);
		free_page((unsigned long)css->cub_addr2);
	}
	return ret;
}

694
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
695
				     int c, int m, void *page)
L
Linus Torvalds 已提交
696
{
697
	struct chsc_scpd *scpd_area;
L
Linus Torvalds 已提交
698 699
	int ccode, ret;

700 701 702 703
	if ((rfmt == 1) && !css_general_characteristics.fcs)
		return -EINVAL;
	if ((rfmt == 2) && !css_general_characteristics.cib)
		return -EINVAL;
L
Linus Torvalds 已提交
704

705 706
	memset(page, 0, PAGE_SIZE);
	scpd_area = page;
707 708
	scpd_area->request.length = 0x0010;
	scpd_area->request.code = 0x0002;
709
	scpd_area->cssid = chpid.cssid;
710 711
	scpd_area->first_chpid = chpid.id;
	scpd_area->last_chpid = chpid.id;
712 713 714 715
	scpd_area->m = m;
	scpd_area->c = c;
	scpd_area->fmt = fmt;
	scpd_area->rfmt = rfmt;
L
Linus Torvalds 已提交
716 717

	ccode = chsc(scpd_area);
718 719
	if (ccode > 0)
		return (ccode == 3) ? -ENODEV : -EBUSY;
L
Linus Torvalds 已提交
720

721
	ret = chsc_error_from_response(scpd_area->response.code);
722
	if (ret)
723
		CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
L
Linus Torvalds 已提交
724 725 726
			      scpd_area->response.code);
	return ret;
}
727 728 729 730 731 732
EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);

int chsc_determine_base_channel_path_desc(struct chp_id chpid,
					  struct channel_path_desc *desc)
{
	struct chsc_response_struct *chsc_resp;
733
	struct chsc_scpd *scpd_area;
734
	unsigned long flags;
735 736
	int ret;

737
	spin_lock_irqsave(&chsc_page_lock, flags);
738 739
	scpd_area = chsc_page;
	ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
740
	if (ret)
741 742
		goto out;
	chsc_resp = (void *)&scpd_area->response;
743
	memcpy(desc, &chsc_resp->data, sizeof(*desc));
744
out:
745
	spin_unlock_irqrestore(&chsc_page_lock, flags);
746 747
	return ret;
}
L
Linus Torvalds 已提交
748

749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
					  struct channel_path_desc_fmt1 *desc)
{
	struct chsc_response_struct *chsc_resp;
	struct chsc_scpd *scpd_area;
	int ret;

	spin_lock_irq(&chsc_page_lock);
	scpd_area = chsc_page;
	ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
	if (ret)
		goto out;
	chsc_resp = (void *)&scpd_area->response;
	memcpy(desc, &chsc_resp->data, sizeof(*desc));
out:
	spin_unlock_irq(&chsc_page_lock);
	return ret;
}

768 769 770 771
static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
			  struct cmg_chars *chars)
{
772 773 774 775 776 777 778 779 780 781
	struct cmg_chars *cmg_chars;
	int i, mask;

	cmg_chars = chp->cmg_chars;
	for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
		mask = 0x80 >> (i + 3);
		if (cmcv & mask)
			cmg_chars->values[i] = chars->values[i];
		else
			cmg_chars->values[i] = 0;
782 783 784
	}
}

785
int chsc_get_channel_measurement_chars(struct channel_path *chp)
786
{
787
	struct cmg_chars *cmg_chars;
788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
	int ccode, ret;

	struct {
		struct chsc_header request;
		u32 : 24;
		u32 first_chpid : 8;
		u32 : 24;
		u32 last_chpid : 8;
		u32 zeroes1;
		struct chsc_header response;
		u32 zeroes2;
		u32 not_valid : 1;
		u32 shared : 1;
		u32 : 22;
		u32 chpid : 8;
		u32 cmcv : 5;
		u32 : 11;
		u32 cmgq : 8;
		u32 cmg : 8;
		u32 zeroes3;
		u32 data[NR_MEASUREMENT_CHARS];
809
	} __attribute__ ((packed)) *scmc_area;
810

811 812 813
	chp->cmg_chars = NULL;
	cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
	if (!cmg_chars)
814 815
		return -ENOMEM;

816 817 818
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	scmc_area = chsc_page;
819 820
	scmc_area->request.length = 0x0010;
	scmc_area->request.code = 0x0022;
821 822
	scmc_area->first_chpid = chp->chpid.id;
	scmc_area->last_chpid = chp->chpid.id;
823 824 825 826 827 828 829

	ccode = chsc(scmc_area);
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}

830
	ret = chsc_error_from_response(scmc_area->response.code);
831
	if (ret) {
832
		CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
833
			      scmc_area->response.code);
834 835 836 837 838 839 840 841 842 843 844 845
		goto out;
	}
	if (scmc_area->not_valid) {
		chp->cmg = -1;
		chp->shared = -1;
		goto out;
	}
	chp->cmg = scmc_area->cmg;
	chp->shared = scmc_area->shared;
	if (chp->cmg != 2 && chp->cmg != 3) {
		/* No cmg-dependent data. */
		goto out;
846
	}
847 848 849
	chp->cmg_chars = cmg_chars;
	chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
				  (struct cmg_chars *) &scmc_area->data);
850
out:
851 852 853 854
	spin_unlock_irq(&chsc_page_lock);
	if (!chp->cmg_chars)
		kfree(cmg_chars);

855 856 857
	return ret;
}

S
Sebastian Ott 已提交
858
int __init chsc_init(void)
L
Linus Torvalds 已提交
859
{
860 861
	int ret;

L
Linus Torvalds 已提交
862
	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
863 864 865 866
	chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!sei_page || !chsc_page) {
		ret = -ENOMEM;
		goto out_err;
867
	}
868
	ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
869
	if (ret)
870 871 872 873 874
		goto out_err;
	return ret;
out_err:
	free_page((unsigned long)chsc_page);
	free_page((unsigned long)sei_page);
875
	return ret;
L
Linus Torvalds 已提交
876 877
}

S
Sebastian Ott 已提交
878
void __init chsc_init_cleanup(void)
879
{
880
	crw_unregister_handler(CRW_RSC_CSS);
881
	free_page((unsigned long)chsc_page);
S
Sebastian Ott 已提交
882
	free_page((unsigned long)sei_page);
883 884
}

885
int chsc_enable_facility(int operation_code)
886
{
887
	unsigned long flags;
888
	int ret;
889
	struct {
890 891 892 893 894 895 896 897 898 899 900 901
		struct chsc_header request;
		u8 reserved1:4;
		u8 format:4;
		u8 reserved2;
		u16 operation_code;
		u32 reserved3;
		u32 reserved4;
		u32 operation_data_area[252];
		struct chsc_header response;
		u32 reserved5:4;
		u32 format2:4;
		u32 reserved6:24;
902
	} __attribute__ ((packed)) *sda_area;
903

904 905 906 907 908 909
	spin_lock_irqsave(&chsc_page_lock, flags);
	memset(chsc_page, 0, PAGE_SIZE);
	sda_area = chsc_page;
	sda_area->request.length = 0x0400;
	sda_area->request.code = 0x0031;
	sda_area->operation_code = operation_code;
910

911
	ret = chsc(sda_area);
912 913 914 915
	if (ret > 0) {
		ret = (ret == 3) ? -ENODEV : -EBUSY;
		goto out;
	}
916

917
	switch (sda_area->response.code) {
918
	case 0x0101:
919 920
		ret = -EOPNOTSUPP;
		break;
921
	default:
922
		ret = chsc_error_from_response(sda_area->response.code);
923
	}
924 925
	if (ret != 0)
		CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
926 927 928
			      operation_code, sda_area->response.code);
out:
	spin_unlock_irqrestore(&chsc_page_lock, flags);
929 930 931
	return ret;
}

L
Linus Torvalds 已提交
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
struct css_general_char css_general_characteristics;
struct css_chsc_char css_chsc_characteristics;

int __init
chsc_determine_css_characteristics(void)
{
	int result;
	struct {
		struct chsc_header request;
		u32 reserved1;
		u32 reserved2;
		u32 reserved3;
		struct chsc_header response;
		u32 reserved4;
		u32 general_char[510];
S
Sebastian Ott 已提交
947
		u32 chsc_char[508];
948
	} __attribute__ ((packed)) *scsc_area;
L
Linus Torvalds 已提交
949

950 951 952
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	scsc_area = chsc_page;
953 954
	scsc_area->request.length = 0x0010;
	scsc_area->request.code = 0x0010;
L
Linus Torvalds 已提交
955 956 957

	result = chsc(scsc_area);
	if (result) {
958
		result = (result == 3) ? -ENODEV : -EBUSY;
L
Linus Torvalds 已提交
959 960 961
		goto exit;
	}

962 963 964 965 966 967 968 969 970
	result = chsc_error_from_response(scsc_area->response.code);
	if (result == 0) {
		memcpy(&css_general_characteristics, scsc_area->general_char,
		       sizeof(css_general_characteristics));
		memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
		       sizeof(css_chsc_characteristics));
	} else
		CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
			      scsc_area->response.code);
L
Linus Torvalds 已提交
971
exit:
972
	spin_unlock_irq(&chsc_page_lock);
L
Linus Torvalds 已提交
973 974 975 976 977
	return result;
}

EXPORT_SYMBOL_GPL(css_general_characteristics);
EXPORT_SYMBOL_GPL(css_chsc_characteristics);
M
Martin Schwidefsky 已提交
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026

int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
{
	struct {
		struct chsc_header request;
		unsigned int rsvd0;
		unsigned int op : 8;
		unsigned int rsvd1 : 8;
		unsigned int ctrl : 16;
		unsigned int rsvd2[5];
		struct chsc_header response;
		unsigned int rsvd3[7];
	} __attribute__ ((packed)) *rr;
	int rc;

	memset(page, 0, PAGE_SIZE);
	rr = page;
	rr->request.length = 0x0020;
	rr->request.code = 0x0033;
	rr->op = op;
	rr->ctrl = ctrl;
	rc = chsc(rr);
	if (rc)
		return -EIO;
	rc = (rr->response.code == 0x0001) ? 0 : -EIO;
	return rc;
}

int chsc_sstpi(void *page, void *result, size_t size)
{
	struct {
		struct chsc_header request;
		unsigned int rsvd0[3];
		struct chsc_header response;
		char data[size];
	} __attribute__ ((packed)) *rr;
	int rc;

	memset(page, 0, PAGE_SIZE);
	rr = page;
	rr->request.length = 0x0010;
	rr->request.code = 0x0038;
	rc = chsc(rr);
	if (rc)
		return -EIO;
	memcpy(result, &rr->data, size);
	return (rr->response.code == 0x0001) ? 0 : -EIO;
}

M
Michael Ernst 已提交
1027 1028
int chsc_siosl(struct subchannel_id schid)
{
1029 1030 1031 1032 1033 1034 1035 1036
	struct {
		struct chsc_header request;
		u32 word1;
		struct subchannel_id sid;
		u32 word3;
		struct chsc_header response;
		u32 word[11];
	} __attribute__ ((packed)) *siosl_area;
M
Michael Ernst 已提交
1037 1038 1039 1040
	unsigned long flags;
	int ccode;
	int rc;

1041 1042 1043 1044 1045 1046 1047
	spin_lock_irqsave(&chsc_page_lock, flags);
	memset(chsc_page, 0, PAGE_SIZE);
	siosl_area = chsc_page;
	siosl_area->request.length = 0x0010;
	siosl_area->request.code = 0x0046;
	siosl_area->word1 = 0x80000000;
	siosl_area->sid = schid;
M
Michael Ernst 已提交
1048

1049
	ccode = chsc(siosl_area);
M
Michael Ernst 已提交
1050 1051 1052 1053 1054 1055 1056 1057 1058
	if (ccode > 0) {
		if (ccode == 3)
			rc = -ENODEV;
		else
			rc = -EBUSY;
		CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
			      schid.ssid, schid.sch_no, ccode);
		goto out;
	}
1059
	rc = chsc_error_from_response(siosl_area->response.code);
M
Michael Ernst 已提交
1060 1061 1062
	if (rc)
		CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
			      schid.ssid, schid.sch_no,
1063
			      siosl_area->response.code);
M
Michael Ernst 已提交
1064 1065 1066 1067
	else
		CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
			      schid.ssid, schid.sch_no);
out:
1068
	spin_unlock_irqrestore(&chsc_page_lock, flags);
M
Michael Ernst 已提交
1069 1070 1071
	return rc;
}
EXPORT_SYMBOL_GPL(chsc_siosl);
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101

/**
 * chsc_scm_info() - store SCM information (SSI)
 * @scm_area: request and response block for SSI
 * @token: continuation token
 *
 * Returns 0 on success.
 */
int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
{
	int ccode, ret;

	memset(scm_area, 0, sizeof(*scm_area));
	scm_area->request.length = 0x0020;
	scm_area->request.code = 0x004C;
	scm_area->reqtok = token;

	ccode = chsc(scm_area);
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}
	ret = chsc_error_from_response(scm_area->response.code);
	if (ret != 0)
		CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
			      scm_area->response.code);
out:
	return ret;
}
EXPORT_SYMBOL_GPL(chsc_scm_info);