chsc.c 26.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 *   S/390 common I/O routines -- channel subsystem call
 *
4
 *    Copyright IBM Corp. 1999,2012
L
Linus Torvalds 已提交
5
 *    Author(s): Ingo Adlung (adlung@de.ibm.com)
6
 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
L
Linus Torvalds 已提交
7 8 9
 *		 Arnd Bergmann (arndb@de.ibm.com)
 */

10 11 12
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

L
Linus Torvalds 已提交
13 14 15 16
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
17
#include <linux/pci.h>
L
Linus Torvalds 已提交
18 19

#include <asm/cio.h>
20
#include <asm/chpid.h>
21
#include <asm/chsc.h>
22
#include <asm/crw.h>
L
Linus Torvalds 已提交
23 24 25 26 27

#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
28
#include "chp.h"
L
Linus Torvalds 已提交
29 30 31
#include "chsc.h"

static void *sei_page;
32 33
static void *chsc_page;
static DEFINE_SPINLOCK(chsc_page_lock);
L
Linus Torvalds 已提交
34

35 36 37 38 39 40 41
/**
 * chsc_error_from_response() - convert a chsc response to an error
 * @response: chsc response code
 *
 * Returns an appropriate Linux error code for @response.
 */
int chsc_error_from_response(int response)
42 43 44 45 46 47 48 49 50 51
{
	switch (response) {
	case 0x0001:
		return 0;
	case 0x0002:
	case 0x0003:
	case 0x0006:
	case 0x0007:
	case 0x0008:
	case 0x000a:
M
Michael Ernst 已提交
52
	case 0x0104:
53 54 55
		return -EINVAL;
	case 0x0004:
		return -EOPNOTSUPP;
56 57 58 59 60
	case 0x000b:
		return -EBUSY;
	case 0x0100:
	case 0x0102:
		return -ENOMEM;
61 62 63 64
	default:
		return -EIO;
	}
}
65
EXPORT_SYMBOL_GPL(chsc_error_from_response);
66

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
struct chsc_ssd_area {
	struct chsc_header request;
	u16 :10;
	u16 ssid:2;
	u16 :4;
	u16 f_sch;	  /* first subchannel */
	u16 :16;
	u16 l_sch;	  /* last subchannel */
	u32 :32;
	struct chsc_header response;
	u32 :32;
	u8 sch_valid : 1;
	u8 dev_valid : 1;
	u8 st	     : 3; /* subchannel type */
	u8 zeroes    : 3;
	u8  unit_addr;	  /* unit address */
	u16 devno;	  /* device number */
	u8 path_mask;
	u8 fla_valid_mask;
	u16 sch;	  /* subchannel */
	u8 chpid[8];	  /* chpids 0-7 */
	u16 fla[8];	  /* full link addresses 0-7 */
} __attribute__ ((packed));

int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
L
Linus Torvalds 已提交
92
{
93 94 95 96 97
	struct chsc_ssd_area *ssd_area;
	int ccode;
	int ret;
	int i;
	int mask;
L
Linus Torvalds 已提交
98

99 100 101
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	ssd_area = chsc_page;
102 103
	ssd_area->request.length = 0x0010;
	ssd_area->request.code = 0x0004;
104 105 106
	ssd_area->ssid = schid.ssid;
	ssd_area->f_sch = schid.sch_no;
	ssd_area->l_sch = schid.sch_no;
L
Linus Torvalds 已提交
107 108

	ccode = chsc(ssd_area);
109
	/* Check response. */
L
Linus Torvalds 已提交
110
	if (ccode > 0) {
111
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
112
		goto out;
L
Linus Torvalds 已提交
113
	}
114 115
	ret = chsc_error_from_response(ssd_area->response.code);
	if (ret != 0) {
116 117
		CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
			      schid.ssid, schid.sch_no,
L
Linus Torvalds 已提交
118
			      ssd_area->response.code);
119
		goto out;
L
Linus Torvalds 已提交
120
	}
121 122
	if (!ssd_area->sch_valid) {
		ret = -ENODEV;
123
		goto out;
L
Linus Torvalds 已提交
124
	}
125 126 127
	/* Copy data */
	ret = 0;
	memset(ssd, 0, sizeof(struct chsc_ssd_info));
128 129
	if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
	    (ssd_area->st != SUBCHANNEL_TYPE_MSG))
130
		goto out;
131 132 133 134 135 136 137
	ssd->path_mask = ssd_area->path_mask;
	ssd->fla_valid_mask = ssd_area->fla_valid_mask;
	for (i = 0; i < 8; i++) {
		mask = 0x80 >> i;
		if (ssd_area->path_mask & mask) {
			chp_id_init(&ssd->chpid[i]);
			ssd->chpid[i].id = ssd_area->chpid[i];
L
Linus Torvalds 已提交
138
		}
139 140
		if (ssd_area->fla_valid_mask & mask)
			ssd->fla[i] = ssd_area->fla[i];
L
Linus Torvalds 已提交
141
	}
142 143
out:
	spin_unlock_irq(&chsc_page_lock);
L
Linus Torvalds 已提交
144 145 146
	return ret;
}

147
static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
148
{
C
Cornelia Huck 已提交
149
	spin_lock_irq(sch->lock);
C
Cornelia Huck 已提交
150 151
	if (sch->driver && sch->driver->chp_event)
		if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
L
Linus Torvalds 已提交
152
			goto out_unreg;
C
Cornelia Huck 已提交
153
	spin_unlock_irq(sch->lock);
L
Linus Torvalds 已提交
154
	return 0;
155

L
Linus Torvalds 已提交
156 157
out_unreg:
	sch->lpm = 0;
158
	spin_unlock_irq(sch->lock);
159
	css_schedule_eval(sch->schid);
L
Linus Torvalds 已提交
160 161 162
	return 0;
}

163
void chsc_chp_offline(struct chp_id chpid)
L
Linus Torvalds 已提交
164 165
{
	char dbf_txt[15];
166
	struct chp_link link;
L
Linus Torvalds 已提交
167

168
	sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
L
Linus Torvalds 已提交
169 170
	CIO_TRACE_EVENT(2, dbf_txt);

171
	if (chp_get_status(chpid) <= 0)
L
Linus Torvalds 已提交
172
		return;
173 174
	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
175 176
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
177
	for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
L
Linus Torvalds 已提交
178 179
}

180
static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
181 182 183 184 185 186 187 188 189 190
{
	struct schib schib;
	/*
	 * We don't know the device yet, but since a path
	 * may be available now to the device we'll have
	 * to do recognition again.
	 * Since we don't have any idea about which chpid
	 * that beast may be on we'll have to do a stsch
	 * on all devices, grr...
	 */
191
	if (stsch_err(schid, &schib))
192
		/* We're through */
193
		return -ENXIO;
194 195

	/* Put it on the slow path. */
196
	css_schedule_eval(schid);
197 198 199
	return 0;
}

200
static int __s390_process_res_acc(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
201
{
C
Cornelia Huck 已提交
202
	spin_lock_irq(sch->lock);
C
Cornelia Huck 已提交
203 204
	if (sch->driver && sch->driver->chp_event)
		sch->driver->chp_event(sch, data, CHP_ONLINE);
C
Cornelia Huck 已提交
205
	spin_unlock_irq(sch->lock);
206

207
	return 0;
208 209
}

210
static void s390_process_res_acc(struct chp_link *link)
211
{
L
Linus Torvalds 已提交
212 213
	char dbf_txt[15];

214 215
	sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
		link->chpid.id);
L
Linus Torvalds 已提交
216
	CIO_TRACE_EVENT( 2, dbf_txt);
217 218
	if (link->fla != 0) {
		sprintf(dbf_txt, "fla%x", link->fla);
L
Linus Torvalds 已提交
219 220
		CIO_TRACE_EVENT( 2, dbf_txt);
	}
221 222
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
L
Linus Torvalds 已提交
223 224 225 226 227 228 229
	/*
	 * I/O resources may have become accessible.
	 * Scan through all subchannels that may be concerned and
	 * do a validation on those.
	 * The more information we have (info), the less scanning
	 * will we have to do.
	 */
230
	for_each_subchannel_staged(__s390_process_res_acc,
231
				   s390_process_res_acc_new_sch, link);
L
Linus Torvalds 已提交
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
}

static int
__get_chpid_from_lir(void *data)
{
	struct lir {
		u8  iq;
		u8  ic;
		u16 sci;
		/* incident-node descriptor */
		u32 indesc[28];
		/* attached-node descriptor */
		u32 andesc[28];
		/* incident-specific information */
		u32 isinfo[28];
247
	} __attribute__ ((packed)) *lir;
L
Linus Torvalds 已提交
248

249
	lir = data;
L
Linus Torvalds 已提交
250 251 252 253 254 255 256 257 258 259 260 261 262 263
	if (!(lir->iq&0x80))
		/* NULL link incident record */
		return -EINVAL;
	if (!(lir->indesc[0]&0xc0000000))
		/* node descriptor not valid */
		return -EINVAL;
	if (!(lir->indesc[0]&0x10000000))
		/* don't handle device-type nodes - FIXME */
		return -EINVAL;
	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */

	return (u16) (lir->indesc[0]&0x000000ff);
}

264 265 266 267 268 269 270
struct chsc_sei_nt0_area {
	u8  flags;
	u8  vf;				/* validity flags */
	u8  rs;				/* reporting source */
	u8  cc;				/* content code */
	u16 fla;			/* full link address */
	u16 rsid;			/* reporting source id */
271 272 273
	u32 reserved1;
	u32 reserved2;
	/* ccdf has to be big enough for a link-incident record */
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
	u8  ccdf[PAGE_SIZE - 24 - 16];	/* content-code dependent field */
} __packed;

struct chsc_sei_nt2_area {
	u8  flags;			/* p and v bit */
	u8  reserved1;
	u8  reserved2;
	u8  cc;				/* content code */
	u32 reserved3[13];
	u8  ccdf[PAGE_SIZE - 24 - 56];	/* content-code dependent field */
} __packed;

#define CHSC_SEI_NT0	0ULL
#define CHSC_SEI_NT2	(1ULL << 61)

struct chsc_sei {
	struct chsc_header request;
	u32 reserved1;
	u64 ntsm;			/* notification type mask */
	struct chsc_header response;
	u32 reserved2;
	union {
		struct chsc_sei_nt0_area nt0_area;
		struct chsc_sei_nt2_area nt2_area;
		u8 nt_area[PAGE_SIZE - 24];
	} u;
} __packed;

static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
303
{
304 305
	struct chp_id chpid;
	int id;
306 307 308 309

	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
		      sei_area->rs, sei_area->rsid);
	if (sei_area->rs != 4)
310
		return;
311 312
	id = __get_chpid_from_lir(sei_area->ccdf);
	if (id < 0)
313
		CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
314 315 316
	else {
		chp_id_init(&chpid);
		chpid.id = id;
317
		chsc_chp_offline(chpid);
318
	}
319 320
}

321
static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
L
Linus Torvalds 已提交
322
{
323
	struct chp_link link;
324
	struct chp_id chpid;
325 326 327 328 329
	int status;

	CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
		      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
	if (sei_area->rs != 4)
330
		return;
331 332
	chp_id_init(&chpid);
	chpid.id = sei_area->rsid;
333
	/* allocate a new channel path structure, if needed */
334
	status = chp_get_status(chpid);
335
	if (status < 0)
336
		chp_new(chpid);
337
	else if (!status)
338
		return;
339 340
	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
341
	if ((sei_area->vf & 0xc0) != 0) {
342
		link.fla = sei_area->fla;
343 344
		if ((sei_area->vf & 0xc0) == 0xc0)
			/* full link address */
345
			link.fla_mask = 0xffff;
346 347
		else
			/* link address */
348
			link.fla_mask = 0xff00;
349
	}
350
	s390_process_res_acc(&link);
351 352
}

353
static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
{
	struct channel_path *chp;
	struct chp_id chpid;
	u8 *data;
	int num;

	CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
	if (sei_area->rs != 0)
		return;
	data = sei_area->ccdf;
	chp_id_init(&chpid);
	for (num = 0; num <= __MAX_CHPID; num++) {
		if (!chp_test_bit(data, num))
			continue;
		chpid.id = num;

		CIO_CRW_EVENT(4, "Update information for channel path "
			      "%x.%02x\n", chpid.cssid, chpid.id);
		chp = chpid_to_chp(chpid);
		if (!chp) {
			chp_new(chpid);
			continue;
		}
		mutex_lock(&chp->lock);
		chsc_determine_base_channel_path_desc(chpid, &chp->desc);
		mutex_unlock(&chp->lock);
	}
}

383 384 385 386 387 388
struct chp_config_data {
	u8 map[32];
	u8 op;
	u8 pc;
};

389
static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
390 391 392 393
{
	struct chp_config_data *data;
	struct chp_id chpid;
	int num;
394
	char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
395 396 397

	CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
	if (sei_area->rs != 0)
398
		return;
399 400 401 402 403 404
	data = (struct chp_config_data *) &(sei_area->ccdf);
	chp_id_init(&chpid);
	for (num = 0; num <= __MAX_CHPID; num++) {
		if (!chp_test_bit(data->map, num))
			continue;
		chpid.id = num;
405 406
		pr_notice("Processing %s for channel path %x.%02x\n",
			  events[data->op], chpid.cssid, chpid.id);
407 408 409 410 411 412 413 414 415 416 417 418 419 420
		switch (data->op) {
		case 0:
			chp_cfg_schedule(chpid, 1);
			break;
		case 1:
			chp_cfg_schedule(chpid, 0);
			break;
		case 2:
			chp_cfg_cancel_deconfigure(chpid);
			break;
		}
	}
}

421
static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
S
Sebastian Ott 已提交
422 423 424 425 426 427 428 429 430 431 432 433 434
{
	int ret;

	CIO_CRW_EVENT(4, "chsc: scm change notification\n");
	if (sei_area->rs != 7)
		return;

	ret = scm_update_information();
	if (ret)
		CIO_CRW_EVENT(0, "chsc: updating change notification"
			      " failed (rc=%d).\n", ret);
}

435
static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
436
{
437 438 439 440 441 442 443 444 445 446 447 448
#ifdef CONFIG_PCI
	switch (sei_area->cc) {
	case 1:
		zpci_event_error(sei_area->ccdf);
		break;
	case 2:
		zpci_event_availability(sei_area->ccdf);
		break;
	default:
		CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n",
			      sei_area->cc);
		break;
449
	}
450 451 452 453 454
#endif
}

static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
{
455 456 457
	/* which kind of information was stored? */
	switch (sei_area->cc) {
	case 1: /* link incident*/
458
		chsc_process_sei_link_incident(sei_area);
459
		break;
460
	case 2: /* i/o resource accessibility */
461
		chsc_process_sei_res_acc(sei_area);
462
		break;
463 464 465
	case 7: /* channel-path-availability information */
		chsc_process_sei_chp_avail(sei_area);
		break;
466
	case 8: /* channel-path-configuration notification */
467
		chsc_process_sei_chp_config(sei_area);
468
		break;
S
Sebastian Ott 已提交
469 470 471
	case 12: /* scm change notification */
		chsc_process_sei_scm_change(sei_area);
		break;
472 473 474 475 476 477 478
	default: /* other stuff */
		CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
			      sei_area->cc);
		break;
	}
}

479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
{
	do {
		memset(sei, 0, sizeof(*sei));
		sei->request.length = 0x0010;
		sei->request.code = 0x000e;
		sei->ntsm = ntsm;

		if (chsc(sei))
			break;

		if (sei->response.code == 0x0001) {
			CIO_CRW_EVENT(2, "chsc: sei successful\n");

			/* Check if we might have lost some information. */
			if (sei->u.nt0_area.flags & 0x40) {
				CIO_CRW_EVENT(2, "chsc: event overflow\n");
				css_schedule_eval_all();
			}

			switch (sei->ntsm) {
			case CHSC_SEI_NT0:
				chsc_process_sei_nt0(&sei->u.nt0_area);
				return 1;
			case CHSC_SEI_NT2:
				chsc_process_sei_nt2(&sei->u.nt2_area);
				return 1;
			default:
				CIO_CRW_EVENT(2, "chsc: unhandled nt (nt=%08Lx)\n",
					      sei->ntsm);
				return 0;
			}
		} else {
			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
				      sei->response.code);
			break;
		}
	} while (sei->u.nt0_area.flags & 0x80);

	return 0;
}

521
static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
522
{
523
	struct chsc_sei *sei;
L
Linus Torvalds 已提交
524

525 526 527 528 529 530 531 532
	if (overflow) {
		css_schedule_eval_all();
		return;
	}
	CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
		      crw0->erc, crw0->rsid);
L
Linus Torvalds 已提交
533
	if (!sei_page)
534
		return;
535 536
	/* Access to sei_page is serialized through machine check handler
	 * thread, so no need for locking. */
537
	sei = sei_page;
L
Linus Torvalds 已提交
538

539
	CIO_TRACE_EVENT(2, "prcss");
L
Linus Torvalds 已提交
540

541 542 543 544 545 546 547 548
	/*
	 * The ntsm does not allow to select NT0 and NT2 together. We need to
	 * first check for NT2, than additionally for NT0...
	 */
#ifdef CONFIG_PCI
	if (!__chsc_process_crw(sei, CHSC_SEI_NT2))
#endif
		__chsc_process_crw(sei, CHSC_SEI_NT0);
L
Linus Torvalds 已提交
549 550
}

551
void chsc_chp_online(struct chp_id chpid)
552
{
L
Linus Torvalds 已提交
553
	char dbf_txt[15];
554
	struct chp_link link;
L
Linus Torvalds 已提交
555

556
	sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
L
Linus Torvalds 已提交
557 558
	CIO_TRACE_EVENT(2, dbf_txt);

559
	if (chp_get_status(chpid) != 0) {
560 561
		memset(&link, 0, sizeof(struct chp_link));
		link.chpid = chpid;
562 563
		/* Wait until previous actions have settled. */
		css_wait_for_slow_path();
C
Cornelia Huck 已提交
564
		for_each_subchannel_staged(__s390_process_res_acc, NULL,
565
					   &link);
566
	}
L
Linus Torvalds 已提交
567 568
}

569 570
static void __s390_subchannel_vary_chpid(struct subchannel *sch,
					 struct chp_id chpid, int on)
L
Linus Torvalds 已提交
571 572
{
	unsigned long flags;
573
	struct chp_link link;
L
Linus Torvalds 已提交
574

575 576
	memset(&link, 0, sizeof(struct chp_link));
	link.chpid = chpid;
C
Cornelia Huck 已提交
577
	spin_lock_irqsave(sch->lock, flags);
C
Cornelia Huck 已提交
578
	if (sch->driver && sch->driver->chp_event)
579
		sch->driver->chp_event(sch, &link,
C
Cornelia Huck 已提交
580
				       on ? CHP_VARY_ON : CHP_VARY_OFF);
C
Cornelia Huck 已提交
581
	spin_unlock_irqrestore(sch->lock, flags);
L
Linus Torvalds 已提交
582 583
}

584
static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
585
{
586
	struct chp_id *chpid = data;
L
Linus Torvalds 已提交
587 588 589 590 591

	__s390_subchannel_vary_chpid(sch, *chpid, 0);
	return 0;
}

592
static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
593
{
594
	struct chp_id *chpid = data;
L
Linus Torvalds 已提交
595 596 597 598 599

	__s390_subchannel_vary_chpid(sch, *chpid, 1);
	return 0;
}

600 601 602 603 604
static int
__s390_vary_chpid_on(struct subchannel_id schid, void *data)
{
	struct schib schib;

605
	if (stsch_err(schid, &schib))
606 607 608
		/* We're through */
		return -ENXIO;
	/* Put it on the slow path. */
609
	css_schedule_eval(schid);
610 611 612
	return 0;
}

613 614 615 616
/**
 * chsc_chp_vary - propagate channel-path vary operation to subchannels
 * @chpid: channl-path ID
 * @on: non-zero for vary online, zero for vary offline
L
Linus Torvalds 已提交
617
 */
618
int chsc_chp_vary(struct chp_id chpid, int on)
L
Linus Torvalds 已提交
619
{
620
	struct channel_path *chp = chpid_to_chp(chpid);
621

622 623
	/* Wait until previous actions have settled. */
	css_wait_for_slow_path();
L
Linus Torvalds 已提交
624 625 626
	/*
	 * Redo PathVerification on the devices the chpid connects to
	 */
627 628 629
	if (on) {
		/* Try to update the channel path descritor. */
		chsc_determine_base_channel_path_desc(chpid, &chp->desc);
630
		for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
S
Sebastian Ott 已提交
631
					   __s390_vary_chpid_on, &chpid);
632
	} else
633
		for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
S
Sebastian Ott 已提交
634
					   NULL, &chpid);
635

L
Linus Torvalds 已提交
636 637 638
	return 0;
}

639 640 641 642 643 644 645 646
static void
chsc_remove_cmg_attr(struct channel_subsystem *css)
{
	int i;

	for (i = 0; i <= __MAX_CHPID; i++) {
		if (!css->chps[i])
			continue;
647
		chp_remove_cmg_attr(css->chps[i]);
648 649 650 651 652 653 654 655 656 657 658 659
	}
}

static int
chsc_add_cmg_attr(struct channel_subsystem *css)
{
	int i, ret;

	ret = 0;
	for (i = 0; i <= __MAX_CHPID; i++) {
		if (!css->chps[i])
			continue;
660
		ret = chp_add_cmg_attr(css->chps[i]);
661 662 663 664 665 666 667 668
		if (ret)
			goto cleanup;
	}
	return ret;
cleanup:
	for (--i; i >= 0; i--) {
		if (!css->chps[i])
			continue;
669
		chp_remove_cmg_attr(css->chps[i]);
670 671 672 673
	}
	return ret;
}

674
int __chsc_do_secm(struct channel_subsystem *css, int enable)
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
{
	struct {
		struct chsc_header request;
		u32 operation_code : 2;
		u32 : 30;
		u32 key : 4;
		u32 : 28;
		u32 zeroes1;
		u32 cub_addr1;
		u32 zeroes2;
		u32 cub_addr2;
		u32 reserved[13];
		struct chsc_header response;
		u32 status : 8;
		u32 : 4;
		u32 fmt : 4;
		u32 : 16;
692
	} __attribute__ ((packed)) *secm_area;
693 694
	int ret, ccode;

695 696 697
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	secm_area = chsc_page;
698 699 700
	secm_area->request.length = 0x0050;
	secm_area->request.code = 0x0016;

701
	secm_area->key = PAGE_DEFAULT_KEY >> 4;
702 703 704 705 706 707
	secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
	secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;

	secm_area->operation_code = enable ? 0 : 1;

	ccode = chsc(secm_area);
708 709 710 711
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}
712 713

	switch (secm_area->response.code) {
714 715
	case 0x0102:
	case 0x0103:
716
		ret = -EINVAL;
717
		break;
718
	default:
719
		ret = chsc_error_from_response(secm_area->response.code);
720
	}
721 722 723
	if (ret != 0)
		CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
			      secm_area->response.code);
724 725
out:
	spin_unlock_irq(&chsc_page_lock);
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
	return ret;
}

int
chsc_secm(struct channel_subsystem *css, int enable)
{
	int ret;

	if (enable && !css->cm_enabled) {
		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
		if (!css->cub_addr1 || !css->cub_addr2) {
			free_page((unsigned long)css->cub_addr1);
			free_page((unsigned long)css->cub_addr2);
			return -ENOMEM;
		}
	}
743
	ret = __chsc_do_secm(css, enable);
744 745 746 747 748
	if (!ret) {
		css->cm_enabled = enable;
		if (css->cm_enabled) {
			ret = chsc_add_cmg_attr(css);
			if (ret) {
749
				__chsc_do_secm(css, 0);
750 751 752 753 754
				css->cm_enabled = 0;
			}
		} else
			chsc_remove_cmg_attr(css);
	}
755
	if (!css->cm_enabled) {
756 757 758 759 760 761
		free_page((unsigned long)css->cub_addr1);
		free_page((unsigned long)css->cub_addr2);
	}
	return ret;
}

762
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
763
				     int c, int m, void *page)
L
Linus Torvalds 已提交
764
{
765
	struct chsc_scpd *scpd_area;
L
Linus Torvalds 已提交
766 767
	int ccode, ret;

768 769 770 771
	if ((rfmt == 1) && !css_general_characteristics.fcs)
		return -EINVAL;
	if ((rfmt == 2) && !css_general_characteristics.cib)
		return -EINVAL;
L
Linus Torvalds 已提交
772

773 774
	memset(page, 0, PAGE_SIZE);
	scpd_area = page;
775 776
	scpd_area->request.length = 0x0010;
	scpd_area->request.code = 0x0002;
777
	scpd_area->cssid = chpid.cssid;
778 779
	scpd_area->first_chpid = chpid.id;
	scpd_area->last_chpid = chpid.id;
780 781 782 783
	scpd_area->m = m;
	scpd_area->c = c;
	scpd_area->fmt = fmt;
	scpd_area->rfmt = rfmt;
L
Linus Torvalds 已提交
784 785

	ccode = chsc(scpd_area);
786 787
	if (ccode > 0)
		return (ccode == 3) ? -ENODEV : -EBUSY;
L
Linus Torvalds 已提交
788

789
	ret = chsc_error_from_response(scpd_area->response.code);
790
	if (ret)
791
		CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
L
Linus Torvalds 已提交
792 793 794
			      scpd_area->response.code);
	return ret;
}
795 796 797 798 799 800
EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);

int chsc_determine_base_channel_path_desc(struct chp_id chpid,
					  struct channel_path_desc *desc)
{
	struct chsc_response_struct *chsc_resp;
801
	struct chsc_scpd *scpd_area;
802
	unsigned long flags;
803 804
	int ret;

805
	spin_lock_irqsave(&chsc_page_lock, flags);
806 807
	scpd_area = chsc_page;
	ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
808
	if (ret)
809 810
		goto out;
	chsc_resp = (void *)&scpd_area->response;
811
	memcpy(desc, &chsc_resp->data, sizeof(*desc));
812
out:
813
	spin_unlock_irqrestore(&chsc_page_lock, flags);
814 815
	return ret;
}
L
Linus Torvalds 已提交
816

817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
					  struct channel_path_desc_fmt1 *desc)
{
	struct chsc_response_struct *chsc_resp;
	struct chsc_scpd *scpd_area;
	int ret;

	spin_lock_irq(&chsc_page_lock);
	scpd_area = chsc_page;
	ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
	if (ret)
		goto out;
	chsc_resp = (void *)&scpd_area->response;
	memcpy(desc, &chsc_resp->data, sizeof(*desc));
out:
	spin_unlock_irq(&chsc_page_lock);
	return ret;
}

836 837 838 839
static void
chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
			  struct cmg_chars *chars)
{
840 841 842 843 844 845 846 847 848 849
	struct cmg_chars *cmg_chars;
	int i, mask;

	cmg_chars = chp->cmg_chars;
	for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
		mask = 0x80 >> (i + 3);
		if (cmcv & mask)
			cmg_chars->values[i] = chars->values[i];
		else
			cmg_chars->values[i] = 0;
850 851 852
	}
}

853
int chsc_get_channel_measurement_chars(struct channel_path *chp)
854
{
855
	struct cmg_chars *cmg_chars;
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
	int ccode, ret;

	struct {
		struct chsc_header request;
		u32 : 24;
		u32 first_chpid : 8;
		u32 : 24;
		u32 last_chpid : 8;
		u32 zeroes1;
		struct chsc_header response;
		u32 zeroes2;
		u32 not_valid : 1;
		u32 shared : 1;
		u32 : 22;
		u32 chpid : 8;
		u32 cmcv : 5;
		u32 : 11;
		u32 cmgq : 8;
		u32 cmg : 8;
		u32 zeroes3;
		u32 data[NR_MEASUREMENT_CHARS];
877
	} __attribute__ ((packed)) *scmc_area;
878

879 880 881
	chp->cmg_chars = NULL;
	cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
	if (!cmg_chars)
882 883
		return -ENOMEM;

884 885 886
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	scmc_area = chsc_page;
887 888
	scmc_area->request.length = 0x0010;
	scmc_area->request.code = 0x0022;
889 890
	scmc_area->first_chpid = chp->chpid.id;
	scmc_area->last_chpid = chp->chpid.id;
891 892 893 894 895 896 897

	ccode = chsc(scmc_area);
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}

898
	ret = chsc_error_from_response(scmc_area->response.code);
899
	if (ret) {
900
		CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
901
			      scmc_area->response.code);
902 903 904 905 906 907 908 909 910 911 912 913
		goto out;
	}
	if (scmc_area->not_valid) {
		chp->cmg = -1;
		chp->shared = -1;
		goto out;
	}
	chp->cmg = scmc_area->cmg;
	chp->shared = scmc_area->shared;
	if (chp->cmg != 2 && chp->cmg != 3) {
		/* No cmg-dependent data. */
		goto out;
914
	}
915 916 917
	chp->cmg_chars = cmg_chars;
	chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
				  (struct cmg_chars *) &scmc_area->data);
918
out:
919 920 921 922
	spin_unlock_irq(&chsc_page_lock);
	if (!chp->cmg_chars)
		kfree(cmg_chars);

923 924 925
	return ret;
}

S
Sebastian Ott 已提交
926
int __init chsc_init(void)
L
Linus Torvalds 已提交
927
{
928 929
	int ret;

L
Linus Torvalds 已提交
930
	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
931 932 933 934
	chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
	if (!sei_page || !chsc_page) {
		ret = -ENOMEM;
		goto out_err;
935
	}
936
	ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
937
	if (ret)
938 939 940 941 942
		goto out_err;
	return ret;
out_err:
	free_page((unsigned long)chsc_page);
	free_page((unsigned long)sei_page);
943
	return ret;
L
Linus Torvalds 已提交
944 945
}

S
Sebastian Ott 已提交
946
void __init chsc_init_cleanup(void)
947
{
948
	crw_unregister_handler(CRW_RSC_CSS);
949
	free_page((unsigned long)chsc_page);
S
Sebastian Ott 已提交
950
	free_page((unsigned long)sei_page);
951 952
}

953
int chsc_enable_facility(int operation_code)
954
{
955
	unsigned long flags;
956
	int ret;
957
	struct {
958 959 960 961 962 963 964 965 966 967 968 969
		struct chsc_header request;
		u8 reserved1:4;
		u8 format:4;
		u8 reserved2;
		u16 operation_code;
		u32 reserved3;
		u32 reserved4;
		u32 operation_data_area[252];
		struct chsc_header response;
		u32 reserved5:4;
		u32 format2:4;
		u32 reserved6:24;
970
	} __attribute__ ((packed)) *sda_area;
971

972 973 974 975 976 977
	spin_lock_irqsave(&chsc_page_lock, flags);
	memset(chsc_page, 0, PAGE_SIZE);
	sda_area = chsc_page;
	sda_area->request.length = 0x0400;
	sda_area->request.code = 0x0031;
	sda_area->operation_code = operation_code;
978

979
	ret = chsc(sda_area);
980 981 982 983
	if (ret > 0) {
		ret = (ret == 3) ? -ENODEV : -EBUSY;
		goto out;
	}
984

985
	switch (sda_area->response.code) {
986
	case 0x0101:
987 988
		ret = -EOPNOTSUPP;
		break;
989
	default:
990
		ret = chsc_error_from_response(sda_area->response.code);
991
	}
992 993
	if (ret != 0)
		CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
994 995 996
			      operation_code, sda_area->response.code);
out:
	spin_unlock_irqrestore(&chsc_page_lock, flags);
997 998 999
	return ret;
}

L
Linus Torvalds 已提交
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
struct css_general_char css_general_characteristics;
struct css_chsc_char css_chsc_characteristics;

int __init
chsc_determine_css_characteristics(void)
{
	int result;
	struct {
		struct chsc_header request;
		u32 reserved1;
		u32 reserved2;
		u32 reserved3;
		struct chsc_header response;
		u32 reserved4;
		u32 general_char[510];
S
Sebastian Ott 已提交
1015
		u32 chsc_char[508];
1016
	} __attribute__ ((packed)) *scsc_area;
L
Linus Torvalds 已提交
1017

1018 1019 1020
	spin_lock_irq(&chsc_page_lock);
	memset(chsc_page, 0, PAGE_SIZE);
	scsc_area = chsc_page;
1021 1022
	scsc_area->request.length = 0x0010;
	scsc_area->request.code = 0x0010;
L
Linus Torvalds 已提交
1023 1024 1025

	result = chsc(scsc_area);
	if (result) {
1026
		result = (result == 3) ? -ENODEV : -EBUSY;
L
Linus Torvalds 已提交
1027 1028 1029
		goto exit;
	}

1030 1031 1032 1033 1034 1035 1036 1037 1038
	result = chsc_error_from_response(scsc_area->response.code);
	if (result == 0) {
		memcpy(&css_general_characteristics, scsc_area->general_char,
		       sizeof(css_general_characteristics));
		memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
		       sizeof(css_chsc_characteristics));
	} else
		CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
			      scsc_area->response.code);
L
Linus Torvalds 已提交
1039
exit:
1040
	spin_unlock_irq(&chsc_page_lock);
L
Linus Torvalds 已提交
1041 1042 1043 1044 1045
	return result;
}

EXPORT_SYMBOL_GPL(css_general_characteristics);
EXPORT_SYMBOL_GPL(css_chsc_characteristics);
M
Martin Schwidefsky 已提交
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094

int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
{
	struct {
		struct chsc_header request;
		unsigned int rsvd0;
		unsigned int op : 8;
		unsigned int rsvd1 : 8;
		unsigned int ctrl : 16;
		unsigned int rsvd2[5];
		struct chsc_header response;
		unsigned int rsvd3[7];
	} __attribute__ ((packed)) *rr;
	int rc;

	memset(page, 0, PAGE_SIZE);
	rr = page;
	rr->request.length = 0x0020;
	rr->request.code = 0x0033;
	rr->op = op;
	rr->ctrl = ctrl;
	rc = chsc(rr);
	if (rc)
		return -EIO;
	rc = (rr->response.code == 0x0001) ? 0 : -EIO;
	return rc;
}

int chsc_sstpi(void *page, void *result, size_t size)
{
	struct {
		struct chsc_header request;
		unsigned int rsvd0[3];
		struct chsc_header response;
		char data[size];
	} __attribute__ ((packed)) *rr;
	int rc;

	memset(page, 0, PAGE_SIZE);
	rr = page;
	rr->request.length = 0x0010;
	rr->request.code = 0x0038;
	rc = chsc(rr);
	if (rc)
		return -EIO;
	memcpy(result, &rr->data, size);
	return (rr->response.code == 0x0001) ? 0 : -EIO;
}

M
Michael Ernst 已提交
1095 1096
int chsc_siosl(struct subchannel_id schid)
{
1097 1098 1099 1100 1101 1102 1103 1104
	struct {
		struct chsc_header request;
		u32 word1;
		struct subchannel_id sid;
		u32 word3;
		struct chsc_header response;
		u32 word[11];
	} __attribute__ ((packed)) *siosl_area;
M
Michael Ernst 已提交
1105 1106 1107 1108
	unsigned long flags;
	int ccode;
	int rc;

1109 1110 1111 1112 1113 1114 1115
	spin_lock_irqsave(&chsc_page_lock, flags);
	memset(chsc_page, 0, PAGE_SIZE);
	siosl_area = chsc_page;
	siosl_area->request.length = 0x0010;
	siosl_area->request.code = 0x0046;
	siosl_area->word1 = 0x80000000;
	siosl_area->sid = schid;
M
Michael Ernst 已提交
1116

1117
	ccode = chsc(siosl_area);
M
Michael Ernst 已提交
1118 1119 1120 1121 1122 1123 1124 1125 1126
	if (ccode > 0) {
		if (ccode == 3)
			rc = -ENODEV;
		else
			rc = -EBUSY;
		CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
			      schid.ssid, schid.sch_no, ccode);
		goto out;
	}
1127
	rc = chsc_error_from_response(siosl_area->response.code);
M
Michael Ernst 已提交
1128 1129 1130
	if (rc)
		CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
			      schid.ssid, schid.sch_no,
1131
			      siosl_area->response.code);
M
Michael Ernst 已提交
1132 1133 1134 1135
	else
		CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
			      schid.ssid, schid.sch_no);
out:
1136
	spin_unlock_irqrestore(&chsc_page_lock, flags);
M
Michael Ernst 已提交
1137 1138 1139
	return rc;
}
EXPORT_SYMBOL_GPL(chsc_siosl);
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169

/**
 * chsc_scm_info() - store SCM information (SSI)
 * @scm_area: request and response block for SSI
 * @token: continuation token
 *
 * Returns 0 on success.
 */
int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
{
	int ccode, ret;

	memset(scm_area, 0, sizeof(*scm_area));
	scm_area->request.length = 0x0020;
	scm_area->request.code = 0x004C;
	scm_area->reqtok = token;

	ccode = chsc(scm_area);
	if (ccode > 0) {
		ret = (ccode == 3) ? -ENODEV : -EBUSY;
		goto out;
	}
	ret = chsc_error_from_response(scm_area->response.code);
	if (ret != 0)
		CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
			      scm_area->response.code);
out:
	return ret;
}
EXPORT_SYMBOL_GPL(chsc_scm_info);