css.c 22.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 *  drivers/s390/cio/css.c
 *  driver for channel subsystem
 *
5
 *    Copyright IBM Corp. 2002,2008
L
Linus Torvalds 已提交
6
 *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
7
 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
L
Linus Torvalds 已提交
8
 */
9 10 11 12

#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

L
Linus Torvalds 已提交
13 14 15 16 17 18
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
19
#include <linux/reboot.h>
20
#include <asm/isc.h>
21
#include <asm/crw.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27

#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
#include "chsc.h"
28
#include "device.h"
29
#include "idset.h"
30
#include "chp.h"
L
Linus Torvalds 已提交
31 32

int css_init_done = 0;
33
static int need_reprobe = 0;
34
static int max_ssid = 0;
L
Linus Torvalds 已提交
35

36
struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
L
Linus Torvalds 已提交
37

38
int
39 40 41 42 43 44 45 46
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
{
	struct subchannel_id schid;
	int ret;

	init_subchannel_id(&schid);
	ret = -ENODEV;
	do {
47 48 49 50 51 52 53
		do {
			ret = fn(schid, data);
			if (ret)
				break;
		} while (schid.sch_no++ < __MAX_SUBCHANNEL);
		schid.sch_no = 0;
	} while (schid.ssid++ < max_ssid);
54 55 56
	return ret;
}

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
struct cb_data {
	void *data;
	struct idset *set;
	int (*fn_known_sch)(struct subchannel *, void *);
	int (*fn_unknown_sch)(struct subchannel_id, void *);
};

static int call_fn_known_sch(struct device *dev, void *data)
{
	struct subchannel *sch = to_subchannel(dev);
	struct cb_data *cb = data;
	int rc = 0;

	idset_sch_del(cb->set, sch->schid);
	if (cb->fn_known_sch)
		rc = cb->fn_known_sch(sch, cb->data);
	return rc;
}

static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
{
	struct cb_data *cb = data;
	int rc = 0;

	if (idset_sch_contains(cb->set, schid))
		rc = cb->fn_unknown_sch(schid, cb->data);
	return rc;
}

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
static int call_fn_all_sch(struct subchannel_id schid, void *data)
{
	struct cb_data *cb = data;
	struct subchannel *sch;
	int rc = 0;

	sch = get_subchannel_by_schid(schid);
	if (sch) {
		if (cb->fn_known_sch)
			rc = cb->fn_known_sch(sch, cb->data);
		put_device(&sch->dev);
	} else {
		if (cb->fn_unknown_sch)
			rc = cb->fn_unknown_sch(schid, cb->data);
	}

	return rc;
}

105 106 107 108 109 110 111 112 113 114
int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
			       int (*fn_unknown)(struct subchannel_id,
			       void *), void *data)
{
	struct cb_data cb;
	int rc;

	cb.data = data;
	cb.fn_known_sch = fn_known;
	cb.fn_unknown_sch = fn_unknown;
115 116 117 118 119 120 121 122

	cb.set = idset_sch_new();
	if (!cb.set)
		/* fall back to brute force scanning in case of oom */
		return for_each_subchannel(call_fn_all_sch, &cb);

	idset_fill(cb.set);

123 124 125 126 127 128 129 130 131 132 133 134 135
	/* Process registered subchannels. */
	rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
	if (rc)
		goto out;
	/* Process unregistered subchannels. */
	if (fn_unknown)
		rc = for_each_subchannel(call_fn_unknown_sch, &cb);
out:
	idset_free(cb.set);

	return rc;
}

L
Linus Torvalds 已提交
136
static struct subchannel *
137
css_alloc_subchannel(struct subchannel_id schid)
L
Linus Torvalds 已提交
138 139 140 141 142 143 144
{
	struct subchannel *sch;
	int ret;

	sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
	if (sch == NULL)
		return ERR_PTR(-ENOMEM);
145
	ret = cio_validate_subchannel (sch, schid);
L
Linus Torvalds 已提交
146 147 148 149 150 151 152 153 154 155 156 157
	if (ret < 0) {
		kfree(sch);
		return ERR_PTR(ret);
	}
	return sch;
}

static void
css_free_subchannel(struct subchannel *sch)
{
	if (sch) {
		/* Reset intparm to zeroes. */
158 159
		sch->config.intparm = 0;
		cio_commit_config(sch);
C
Cornelia Huck 已提交
160
		kfree(sch->lock);
L
Linus Torvalds 已提交
161 162 163 164 165 166 167 168 169 170
		kfree(sch);
	}
}

static void
css_subchannel_release(struct device *dev)
{
	struct subchannel *sch;

	sch = to_subchannel(dev);
C
Cornelia Huck 已提交
171 172
	if (!cio_is_console(sch->schid)) {
		kfree(sch->lock);
L
Linus Torvalds 已提交
173
		kfree(sch);
C
Cornelia Huck 已提交
174
	}
L
Linus Torvalds 已提交
175 176
}

177
static int css_sch_device_register(struct subchannel *sch)
178 179 180 181 182 183 184 185 186
{
	int ret;

	mutex_lock(&sch->reg_mutex);
	ret = device_register(&sch->dev);
	mutex_unlock(&sch->reg_mutex);
	return ret;
}

187 188 189 190
/**
 * css_sch_device_unregister - unregister a subchannel
 * @sch: subchannel to be unregistered
 */
191 192 193
void css_sch_device_unregister(struct subchannel *sch)
{
	mutex_lock(&sch->reg_mutex);
194 195
	if (device_is_registered(&sch->dev))
		device_unregister(&sch->dev);
196 197
	mutex_unlock(&sch->reg_mutex);
}
198
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
199

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
	int i;
	int mask;

	memset(ssd, 0, sizeof(struct chsc_ssd_info));
	ssd->path_mask = pmcw->pim;
	for (i = 0; i < 8; i++) {
		mask = 0x80 >> i;
		if (pmcw->pim & mask) {
			chp_id_init(&ssd->chpid[i]);
			ssd->chpid[i].id = pmcw->chpid[i];
		}
	}
}

static void ssd_register_chpids(struct chsc_ssd_info *ssd)
{
	int i;
	int mask;

	for (i = 0; i < 8; i++) {
		mask = 0x80 >> i;
		if (ssd->path_mask & mask)
			if (!chp_is_registered(ssd->chpid[i]))
				chp_new(ssd->chpid[i]);
	}
}

void css_update_ssd_info(struct subchannel *sch)
{
	int ret;

	if (cio_is_console(sch->schid)) {
		/* Console is initialized too early for functions requiring
		 * memory allocation. */
		ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
	} else {
		ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
		if (ret)
			ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
		ssd_register_chpids(&sch->ssd_info);
	}
}

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
			 char *buf)
{
	struct subchannel *sch = to_subchannel(dev);

	return sprintf(buf, "%01x\n", sch->st);
}

static DEVICE_ATTR(type, 0444, type_show, NULL);

static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
			     char *buf)
{
	struct subchannel *sch = to_subchannel(dev);

	return sprintf(buf, "css:t%01X\n", sch->st);
}

static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);

static struct attribute *subch_attrs[] = {
	&dev_attr_type.attr,
	&dev_attr_modalias.attr,
	NULL,
};

static struct attribute_group subch_attr_group = {
	.attrs = subch_attrs,
};

static struct attribute_group *default_subch_attr_groups[] = {
	&subch_attr_group,
	NULL,
};

280
static int css_register_subchannel(struct subchannel *sch)
L
Linus Torvalds 已提交
281 282 283 284
{
	int ret;

	/* Initialize the subchannel structure */
285
	sch->dev.parent = &channel_subsystems[0]->device;
L
Linus Torvalds 已提交
286 287
	sch->dev.bus = &css_bus_type;
	sch->dev.release = &css_subchannel_release;
288
	sch->dev.groups = default_subch_attr_groups;
289 290 291 292 293
	/*
	 * We don't want to generate uevents for I/O subchannels that don't
	 * have a working ccw device behind them since they will be
	 * unregistered before they can be used anyway, so we delay the add
	 * uevent until after device recognition was successful.
294 295 296
	 * Note that we suppress the uevent for all subchannel types;
	 * the subchannel driver can decide itself when it wants to inform
	 * userspace of its existence.
297
	 */
298
	sch->dev.uevent_suppress = 1;
299
	css_update_ssd_info(sch);
L
Linus Torvalds 已提交
300
	/* make it known to the system */
301
	ret = css_sch_device_register(sch);
302
	if (ret) {
C
Cornelia Huck 已提交
303 304
		CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
			      sch->schid.ssid, sch->schid.sch_no, ret);
305 306
		return ret;
	}
307 308 309 310 311 312 313 314 315
	if (!sch->driver) {
		/*
		 * No driver matched. Generate the uevent now so that
		 * a fitting driver module may be loaded based on the
		 * modalias.
		 */
		sch->dev.uevent_suppress = 0;
		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
	}
L
Linus Torvalds 已提交
316 317 318
	return ret;
}

C
Cornelia Huck 已提交
319
int css_probe_device(struct subchannel_id schid)
L
Linus Torvalds 已提交
320 321 322 323
{
	int ret;
	struct subchannel *sch;

324
	sch = css_alloc_subchannel(schid);
L
Linus Torvalds 已提交
325 326 327 328 329 330 331 332
	if (IS_ERR(sch))
		return PTR_ERR(sch);
	ret = css_register_subchannel(sch);
	if (ret)
		css_free_subchannel(sch);
	return ret;
}

C
Cornelia Huck 已提交
333 334 335 336
static int
check_subchannel(struct device * dev, void * data)
{
	struct subchannel *sch;
337
	struct subchannel_id *schid = data;
C
Cornelia Huck 已提交
338 339

	sch = to_subchannel(dev);
340
	return schid_equal(&sch->schid, schid);
C
Cornelia Huck 已提交
341 342
}

L
Linus Torvalds 已提交
343
struct subchannel *
344
get_subchannel_by_schid(struct subchannel_id schid)
L
Linus Torvalds 已提交
345 346 347
{
	struct device *dev;

C
Cornelia Huck 已提交
348
	dev = bus_find_device(&css_bus_type, NULL,
349
			      &schid, check_subchannel);
L
Linus Torvalds 已提交
350

C
Cornelia Huck 已提交
351
	return dev ? to_subchannel(dev) : NULL;
L
Linus Torvalds 已提交
352 353
}

354 355 356 357 358 359 360 361
/**
 * css_sch_is_valid() - check if a subchannel is valid
 * @schib: subchannel information block for the subchannel
 */
int css_sch_is_valid(struct schib *schib)
{
	if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
		return 0;
362 363
	if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
		return 0;
364 365 366 367
	return 1;
}
EXPORT_SYMBOL_GPL(css_sch_is_valid);

368 369 370 371 372 373 374 375
static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
{
	struct schib schib;

	if (!slow) {
		/* Will be done on the slow path. */
		return -EAGAIN;
	}
376
	if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
377 378
		/* Unusable - ignore. */
		return 0;
L
Linus Torvalds 已提交
379
	}
380 381 382 383 384 385
	CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
			 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);

	return css_probe_device(schid);
}

C
Cornelia Huck 已提交
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
{
	int ret = 0;

	if (sch->driver) {
		if (sch->driver->sch_event)
			ret = sch->driver->sch_event(sch, slow);
		else
			dev_dbg(&sch->dev,
				"Got subchannel machine check but "
				"no sch_event handler provided.\n");
	}
	return ret;
}

401
static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
402 403 404 405 406 407 408 409 410 411
{
	struct subchannel *sch;
	int ret;

	sch = get_subchannel_by_schid(schid);
	if (sch) {
		ret = css_evaluate_known_subchannel(sch, slow);
		put_device(&sch->dev);
	} else
		ret = css_evaluate_new_subchannel(schid, slow);
412 413
	if (ret == -EAGAIN)
		css_schedule_eval(schid);
L
Linus Torvalds 已提交
414 415
}

416 417 418 419
static struct idset *slow_subchannel_set;
static spinlock_t slow_subchannel_lock;

static int __init slow_subchannel_init(void)
L
Linus Torvalds 已提交
420
{
421 422 423
	spin_lock_init(&slow_subchannel_lock);
	slow_subchannel_set = idset_sch_new();
	if (!slow_subchannel_set) {
C
Cornelia Huck 已提交
424
		CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
425 426 427
		return -ENOMEM;
	}
	return 0;
L
Linus Torvalds 已提交
428 429
}

430
static int slow_eval_known_fn(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
431
{
432 433
	int eval;
	int rc;
L
Linus Torvalds 已提交
434 435

	spin_lock_irq(&slow_subchannel_lock);
436 437 438 439 440 441 442
	eval = idset_sch_contains(slow_subchannel_set, sch->schid);
	idset_sch_del(slow_subchannel_set, sch->schid);
	spin_unlock_irq(&slow_subchannel_lock);
	if (eval) {
		rc = css_evaluate_known_subchannel(sch, 1);
		if (rc == -EAGAIN)
			css_schedule_eval(sch->schid);
L
Linus Torvalds 已提交
443
	}
444 445 446 447 448 449 450 451 452 453 454
	return 0;
}

static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
{
	int eval;
	int rc = 0;

	spin_lock_irq(&slow_subchannel_lock);
	eval = idset_sch_contains(slow_subchannel_set, schid);
	idset_sch_del(slow_subchannel_set, schid);
L
Linus Torvalds 已提交
455
	spin_unlock_irq(&slow_subchannel_lock);
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
	if (eval) {
		rc = css_evaluate_new_subchannel(schid, 1);
		switch (rc) {
		case -EAGAIN:
			css_schedule_eval(schid);
			rc = 0;
			break;
		case -ENXIO:
		case -ENOMEM:
		case -EIO:
			/* These should abort looping */
			break;
		default:
			rc = 0;
		}
	}
	return rc;
}

static void css_slow_path_func(struct work_struct *unused)
{
	CIO_TRACE_EVENT(4, "slowpath");
	for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
				   NULL);
L
Linus Torvalds 已提交
480 481
}

482
static DECLARE_WORK(slow_path_work, css_slow_path_func);
L
Linus Torvalds 已提交
483 484
struct workqueue_struct *slow_path_wq;

485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
void css_schedule_eval(struct subchannel_id schid)
{
	unsigned long flags;

	spin_lock_irqsave(&slow_subchannel_lock, flags);
	idset_sch_add(slow_subchannel_set, schid);
	queue_work(slow_path_wq, &slow_path_work);
	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}

void css_schedule_eval_all(void)
{
	unsigned long flags;

	spin_lock_irqsave(&slow_subchannel_lock, flags);
	idset_fill(slow_subchannel_set);
	queue_work(slow_path_wq, &slow_path_work);
	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}

505 506 507 508 509
void css_wait_for_slow_path(void)
{
	flush_workqueue(slow_path_wq);
}

510 511 512 513 514
/* Reprobe subchannel if unregistered. */
static int reprobe_subchannel(struct subchannel_id schid, void *data)
{
	int ret;

C
Cornelia Huck 已提交
515 516
	CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
		      schid.ssid, schid.sch_no);
517 518 519 520 521 522 523 524 525
	if (need_reprobe)
		return -EAGAIN;

	ret = css_probe_device(schid);
	switch (ret) {
	case 0:
		break;
	case -ENXIO:
	case -ENOMEM:
526
	case -EIO:
527 528 529 530 531 532 533 534 535
		/* These should abort looping */
		break;
	default:
		ret = 0;
	}

	return ret;
}

536 537 538 539 540 541 542 543 544 545 546
static void reprobe_after_idle(struct work_struct *unused)
{
	/* Make sure initial subchannel scan is done. */
	wait_event(ccw_device_init_wq,
		   atomic_read(&ccw_device_init_count) == 0);
	if (need_reprobe)
		css_schedule_reprobe();
}

static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle);

547
/* Work function used to reprobe all unregistered subchannels. */
548
static void reprobe_all(struct work_struct *unused)
549 550 551
{
	int ret;

552
	CIO_MSG_EVENT(4, "reprobe start\n");
553 554

	/* Make sure initial subchannel scan is done. */
555 556 557 558 559
	if (atomic_read(&ccw_device_init_count) != 0) {
		queue_work(ccw_device_work, &reprobe_idle_work);
		return;
	}
	need_reprobe = 0;
560
	ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
561

562
	CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
563 564 565
		      need_reprobe);
}

566
static DECLARE_WORK(css_reprobe_work, reprobe_all);
567 568 569 570 571

/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void)
{
	need_reprobe = 1;
572
	queue_work(slow_path_wq, &css_reprobe_work);
573 574 575 576
}

EXPORT_SYMBOL_GPL(css_schedule_reprobe);

L
Linus Torvalds 已提交
577 578 579
/*
 * Called from the machine check handler for subchannel report words.
 */
580
static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
L
Linus Torvalds 已提交
581
{
582
	struct subchannel_id mchk_schid;
L
Linus Torvalds 已提交
583

584 585 586 587 588 589 590 591 592 593 594 595 596
	if (overflow) {
		css_schedule_eval_all();
		return;
	}
	CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
		      crw0->erc, crw0->rsid);
	if (crw1)
		CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
			      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
			      crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
			      crw1->anc, crw1->erc, crw1->rsid);
597
	init_subchannel_id(&mchk_schid);
598 599 600
	mchk_schid.sch_no = crw0->rsid;
	if (crw1)
		mchk_schid.ssid = (crw1->rsid >> 8) & 3;
601

602
	/*
L
Linus Torvalds 已提交
603 604 605 606
	 * Since we are always presented with IPI in the CRW, we have to
	 * use stsch() to find out if the subchannel in question has come
	 * or gone.
	 */
607
	css_evaluate_subchannel(mchk_schid, 0);
L
Linus Torvalds 已提交
608 609
}

610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
static int __init
__init_channel_subsystem(struct subchannel_id schid, void *data)
{
	struct subchannel *sch;
	int ret;

	if (cio_is_console(schid))
		sch = cio_get_console_subchannel();
	else {
		sch = css_alloc_subchannel(schid);
		if (IS_ERR(sch))
			ret = PTR_ERR(sch);
		else
			ret = 0;
		switch (ret) {
		case 0:
			break;
		case -ENOMEM:
			panic("Out of memory in init_channel_subsystem\n");
		/* -ENXIO: no more subchannels. */
		case -ENXIO:
			return ret;
632 633 634
		/* -EIO: this subchannel set not supported. */
		case -EIO:
			return ret;
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
		default:
			return 0;
		}
	}
	/*
	 * We register ALL valid subchannels in ioinfo, even those
	 * that have been present before init_channel_subsystem.
	 * These subchannels can't have been registered yet (kmalloc
	 * not working) so we do it now. This is true e.g. for the
	 * console subchannel.
	 */
	css_register_subchannel(sch);
	return 0;
}

L
Linus Torvalds 已提交
650
static void __init
651
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
L
Linus Torvalds 已提交
652
{
653
	if (css_general_characteristics.mcss) {
654 655 656
		css->global_pgid.pgid_high.ext_cssid.version = 0x80;
		css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
	} else {
L
Linus Torvalds 已提交
657
#ifdef CONFIG_SMP
658
		css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
L
Linus Torvalds 已提交
659
#else
660
		css->global_pgid.pgid_high.cpu_addr = 0;
L
Linus Torvalds 已提交
661 662
#endif
	}
663 664 665 666 667 668
	css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
	css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
	css->global_pgid.tod_high = tod_high;

}

669 670 671 672 673 674
static void
channel_subsystem_release(struct device *dev)
{
	struct channel_subsystem *css;

	css = to_css(dev);
675
	mutex_destroy(&css->mutex);
676 677 678 679 680
	if (css->pseudo_subchannel) {
		/* Implies that it has been generated but never registered. */
		css_subchannel_release(&css->pseudo_subchannel->dev);
		css->pseudo_subchannel = NULL;
	}
681 682 683
	kfree(css);
}

684 685 686 687 688
static ssize_t
css_cm_enable_show(struct device *dev, struct device_attribute *attr,
		   char *buf)
{
	struct channel_subsystem *css = to_css(dev);
689
	int ret;
690 691 692

	if (!css)
		return 0;
693 694 695 696
	mutex_lock(&css->mutex);
	ret = sprintf(buf, "%x\n", css->cm_enabled);
	mutex_unlock(&css->mutex);
	return ret;
697 698 699 700 701 702 703 704
}

static ssize_t
css_cm_enable_store(struct device *dev, struct device_attribute *attr,
		    const char *buf, size_t count)
{
	struct channel_subsystem *css = to_css(dev);
	int ret;
705
	unsigned long val;
706

707 708 709
	ret = strict_strtoul(buf, 16, &val);
	if (ret)
		return ret;
710
	mutex_lock(&css->mutex);
711 712
	switch (val) {
	case 0:
713 714
		ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
		break;
715
	case 1:
716 717 718 719 720
		ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
		break;
	default:
		ret = -EINVAL;
	}
721
	mutex_unlock(&css->mutex);
722 723 724 725 726
	return ret < 0 ? ret : count;
}

static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);

727
static int __init setup_css(int nr)
728 729
{
	u32 tod_high;
730
	int ret;
731
	struct channel_subsystem *css;
732

733 734 735 736 737
	css = channel_subsystems[nr];
	memset(css, 0, sizeof(struct channel_subsystem));
	css->pseudo_subchannel =
		kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
	if (!css->pseudo_subchannel)
738
		return -ENOMEM;
739 740
	css->pseudo_subchannel->dev.parent = &css->device;
	css->pseudo_subchannel->dev.release = css_subchannel_release;
741
	dev_set_name(&css->pseudo_subchannel->dev, "defunct");
742
	ret = cio_create_sch_lock(css->pseudo_subchannel);
743
	if (ret) {
744
		kfree(css->pseudo_subchannel);
745 746
		return ret;
	}
747 748 749
	mutex_init(&css->mutex);
	css->valid = 1;
	css->cssid = nr;
750
	dev_set_name(&css->device, "css%x", nr);
751
	css->device.release = channel_subsystem_release;
752
	tod_high = (u32) (get_clock() >> 32);
753
	css_generate_pgid(css, tod_high);
754
	return 0;
L
Linus Torvalds 已提交
755 756
}

757 758 759 760 761 762 763 764 765 766 767
static int css_reboot_event(struct notifier_block *this,
			    unsigned long event,
			    void *ptr)
{
	int ret, i;

	ret = NOTIFY_DONE;
	for (i = 0; i <= __MAX_CSSID; i++) {
		struct channel_subsystem *css;

		css = channel_subsystems[i];
768
		mutex_lock(&css->mutex);
769 770 771
		if (css->cm_enabled)
			if (chsc_secm(css, 0))
				ret = NOTIFY_BAD;
772
		mutex_unlock(&css->mutex);
773 774 775 776 777 778 779 780 781
	}

	return ret;
}

static struct notifier_block css_reboot_notifier = {
	.notifier_call = css_reboot_event,
};

L
Linus Torvalds 已提交
782 783 784 785 786 787 788 789
/*
 * Now that the driver core is running, we can setup our channel subsystem.
 * The struct subchannel's are created during probing (except for the
 * static console subchannel).
 */
static int __init
init_channel_subsystem (void)
{
790
	int ret, i;
L
Linus Torvalds 已提交
791

792 793 794
	ret = chsc_determine_css_characteristics();
	if (ret == -ENOMEM)
		goto out; /* No need to continue. */
L
Linus Torvalds 已提交
795

796 797 798 799 800 801 802 803
	ret = chsc_alloc_sei_area();
	if (ret)
		goto out;

	ret = slow_subchannel_init();
	if (ret)
		goto out;

804
	ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
805 806 807
	if (ret)
		goto out;

L
Linus Torvalds 已提交
808 809 810
	if ((ret = bus_register(&css_bus_type)))
		goto out;

811 812 813 814 815 816 817 818 819 820 821
	/* Try to enable MSS. */
	ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
	switch (ret) {
	case 0: /* Success. */
		max_ssid = __MAX_SSID;
		break;
	case -ENOMEM:
		goto out_bus;
	default:
		max_ssid = 0;
	}
822 823
	/* Setup css structure. */
	for (i = 0; i <= __MAX_CSSID; i++) {
824 825 826 827
		struct channel_subsystem *css;

		css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
		if (!css) {
828
			ret = -ENOMEM;
829
			goto out_unregister;
830
		}
831
		channel_subsystems[i] = css;
832
		ret = setup_css(i);
833 834 835 836
		if (ret) {
			kfree(channel_subsystems[i]);
			goto out_unregister;
		}
837
		ret = device_register(&css->device);
838 839 840 841
		if (ret) {
			put_device(&css->device);
			goto out_unregister;
		}
842
		if (css_chsc_characteristics.secm) {
843
			ret = device_create_file(&css->device,
844 845 846 847
						 &dev_attr_cm_enable);
			if (ret)
				goto out_device;
		}
848
		ret = device_register(&css->pseudo_subchannel->dev);
849 850
		if (ret)
			goto out_file;
851
	}
852 853
	ret = register_reboot_notifier(&css_reboot_notifier);
	if (ret)
854
		goto out_unregister;
L
Linus Torvalds 已提交
855 856
	css_init_done = 1;

857
	/* Enable default isc for I/O subchannels. */
858
	isc_register(IO_SCH_ISC);
L
Linus Torvalds 已提交
859

860
	for_each_subchannel(__init_channel_subsystem, NULL);
L
Linus Torvalds 已提交
861
	return 0;
862
out_file:
863 864 865
	if (css_chsc_characteristics.secm)
		device_remove_file(&channel_subsystems[i]->device,
				   &dev_attr_cm_enable);
866
out_device:
867
	device_unregister(&channel_subsystems[i]->device);
868
out_unregister:
869
	while (i > 0) {
870 871
		struct channel_subsystem *css;

872
		i--;
873 874
		css = channel_subsystems[i];
		device_unregister(&css->pseudo_subchannel->dev);
875
		css->pseudo_subchannel = NULL;
876
		if (css_chsc_characteristics.secm)
877
			device_remove_file(&css->device,
878
					   &dev_attr_cm_enable);
879
		device_unregister(&css->device);
880
	}
881
out_bus:
L
Linus Torvalds 已提交
882 883
	bus_unregister(&css_bus_type);
out:
884
	crw_unregister_handler(CRW_RSC_CSS);
885 886
	chsc_free_sei_area();
	kfree(slow_subchannel_set);
887 888
	pr_alert("The CSS device driver initialization failed with "
		 "errno=%d\n", ret);
L
Linus Torvalds 已提交
889 890 891
	return ret;
}

892 893 894 895 896
int sch_is_pseudo_sch(struct subchannel *sch)
{
	return sch == to_css(sch->dev.parent)->pseudo_subchannel;
}

897
static int css_bus_match(struct device *dev, struct device_driver *drv)
L
Linus Torvalds 已提交
898
{
899 900
	struct subchannel *sch = to_subchannel(dev);
	struct css_driver *driver = to_cssdriver(drv);
901
	struct css_device_id *id;
L
Linus Torvalds 已提交
902

903 904 905 906
	for (id = driver->subchannel_type; id->match_flags; id++) {
		if (sch->st == id->type)
			return 1;
	}
L
Linus Torvalds 已提交
907 908 909 910

	return 0;
}

C
Cornelia Huck 已提交
911
static int css_probe(struct device *dev)
912 913
{
	struct subchannel *sch;
C
Cornelia Huck 已提交
914
	int ret;
915 916

	sch = to_subchannel(dev);
917
	sch->driver = to_cssdriver(dev->driver);
C
Cornelia Huck 已提交
918 919 920 921
	ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
	if (ret)
		sch->driver = NULL;
	return ret;
922 923
}

C
Cornelia Huck 已提交
924
static int css_remove(struct device *dev)
925 926
{
	struct subchannel *sch;
C
Cornelia Huck 已提交
927
	int ret;
928 929

	sch = to_subchannel(dev);
C
Cornelia Huck 已提交
930 931 932
	ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
	sch->driver = NULL;
	return ret;
933 934
}

C
Cornelia Huck 已提交
935
static void css_shutdown(struct device *dev)
936 937 938 939
{
	struct subchannel *sch;

	sch = to_subchannel(dev);
C
Cornelia Huck 已提交
940
	if (sch->driver && sch->driver->shutdown)
941 942 943
		sch->driver->shutdown(sch);
}

944 945 946 947 948 949 950 951 952 953 954 955
static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
{
	struct subchannel *sch = to_subchannel(dev);
	int ret;

	ret = add_uevent_var(env, "ST=%01X", sch->st);
	if (ret)
		return ret;
	ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
	return ret;
}

L
Linus Torvalds 已提交
956
struct bus_type css_bus_type = {
957 958 959 960 961
	.name     = "css",
	.match    = css_bus_match,
	.probe    = css_probe,
	.remove   = css_remove,
	.shutdown = css_shutdown,
962
	.uevent   = css_uevent,
L
Linus Torvalds 已提交
963 964
};

965 966 967 968 969 970 971 972 973 974 975
/**
 * css_driver_register - register a css driver
 * @cdrv: css driver to register
 *
 * This is mainly a wrapper around driver_register that sets name
 * and bus_type in the embedded struct device_driver correctly.
 */
int css_driver_register(struct css_driver *cdrv)
{
	cdrv->drv.name = cdrv->name;
	cdrv->drv.bus = &css_bus_type;
976
	cdrv->drv.owner = cdrv->owner;
977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
	return driver_register(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_register);

/**
 * css_driver_unregister - unregister a css driver
 * @cdrv: css driver to unregister
 *
 * This is a wrapper around driver_unregister.
 */
void css_driver_unregister(struct css_driver *cdrv)
{
	driver_unregister(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_unregister);

L
Linus Torvalds 已提交
993 994 995 996
subsys_initcall(init_channel_subsystem);

MODULE_LICENSE("GPL");
EXPORT_SYMBOL(css_bus_type);