css.c 32.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3
 * driver for channel subsystem
L
Linus Torvalds 已提交
4
 *
S
Sebastian Ott 已提交
5
 * Copyright IBM Corp. 2002, 2010
6 7 8
 *
 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
 *	      Cornelia Huck (cornelia.huck@de.ibm.com)
L
Linus Torvalds 已提交
9
 */
10 11 12 13

#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

14
#include <linux/export.h>
L
Linus Torvalds 已提交
15 16 17 18 19
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
20
#include <linux/reboot.h>
S
Sebastian Ott 已提交
21
#include <linux/proc_fs.h>
22 23
#include <linux/genalloc.h>
#include <linux/dma-mapping.h>
24
#include <asm/isc.h>
25
#include <asm/crw.h>
L
Linus Torvalds 已提交
26 27 28

#include "css.h"
#include "cio.h"
S
Sebastian Ott 已提交
29
#include "blacklist.h"
L
Linus Torvalds 已提交
30 31 32
#include "cio_debug.h"
#include "ioasm.h"
#include "chsc.h"
33
#include "device.h"
34
#include "idset.h"
35
#include "chp.h"
L
Linus Torvalds 已提交
36 37

int css_init_done = 0;
38
int max_ssid;
L
Linus Torvalds 已提交
39

S
Sebastian Ott 已提交
40 41
#define MAX_CSS_IDX 0
struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
42
static struct bus_type css_bus_type;
L
Linus Torvalds 已提交
43

44
int
45 46 47 48 49 50 51
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
{
	struct subchannel_id schid;
	int ret;

	init_subchannel_id(&schid);
	do {
52 53 54 55 56 57 58
		do {
			ret = fn(schid, data);
			if (ret)
				break;
		} while (schid.sch_no++ < __MAX_SUBCHANNEL);
		schid.sch_no = 0;
	} while (schid.ssid++ < max_ssid);
59 60 61
	return ret;
}

62 63 64 65 66 67 68 69 70 71 72 73 74
struct cb_data {
	void *data;
	struct idset *set;
	int (*fn_known_sch)(struct subchannel *, void *);
	int (*fn_unknown_sch)(struct subchannel_id, void *);
};

static int call_fn_known_sch(struct device *dev, void *data)
{
	struct subchannel *sch = to_subchannel(dev);
	struct cb_data *cb = data;
	int rc = 0;

75 76
	if (cb->set)
		idset_sch_del(cb->set, sch->schid);
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
	if (cb->fn_known_sch)
		rc = cb->fn_known_sch(sch, cb->data);
	return rc;
}

static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
{
	struct cb_data *cb = data;
	int rc = 0;

	if (idset_sch_contains(cb->set, schid))
		rc = cb->fn_unknown_sch(schid, cb->data);
	return rc;
}

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
static int call_fn_all_sch(struct subchannel_id schid, void *data)
{
	struct cb_data *cb = data;
	struct subchannel *sch;
	int rc = 0;

	sch = get_subchannel_by_schid(schid);
	if (sch) {
		if (cb->fn_known_sch)
			rc = cb->fn_known_sch(sch, cb->data);
		put_device(&sch->dev);
	} else {
		if (cb->fn_unknown_sch)
			rc = cb->fn_unknown_sch(schid, cb->data);
	}

	return rc;
}

111 112 113 114 115 116 117 118 119 120
int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
			       int (*fn_unknown)(struct subchannel_id,
			       void *), void *data)
{
	struct cb_data cb;
	int rc;

	cb.data = data;
	cb.fn_known_sch = fn_known;
	cb.fn_unknown_sch = fn_unknown;
121

122 123 124 125 126 127 128
	if (fn_known && !fn_unknown) {
		/* Skip idset allocation in case of known-only loop. */
		cb.set = NULL;
		return bus_for_each_dev(&css_bus_type, NULL, &cb,
					call_fn_known_sch);
	}

129 130 131 132 133 134 135
	cb.set = idset_sch_new();
	if (!cb.set)
		/* fall back to brute force scanning in case of oom */
		return for_each_subchannel(call_fn_all_sch, &cb);

	idset_fill(cb.set);

136 137 138 139 140 141 142 143 144 145 146 147 148
	/* Process registered subchannels. */
	rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
	if (rc)
		goto out;
	/* Process unregistered subchannels. */
	if (fn_unknown)
		rc = for_each_subchannel(call_fn_unknown_sch, &cb);
out:
	idset_free(cb.set);

	return rc;
}

149 150
static void css_sch_todo(struct work_struct *work);

151 152 153 154 155 156 157 158 159 160 161 162
static int css_sch_create_locks(struct subchannel *sch)
{
	sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
	if (!sch->lock)
		return -ENOMEM;

	spin_lock_init(sch->lock);
	mutex_init(&sch->reg_mutex);

	return 0;
}

163 164
static void css_subchannel_release(struct device *dev)
{
165
	struct subchannel *sch = to_subchannel(dev);
166

167 168
	sch->config.intparm = 0;
	cio_commit_config(sch);
169
	kfree(sch->driver_override);
170 171
	kfree(sch->lock);
	kfree(sch);
172 173
}

S
Sebastian Ott 已提交
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
static int css_validate_subchannel(struct subchannel_id schid,
				   struct schib *schib)
{
	int err;

	switch (schib->pmcw.st) {
	case SUBCHANNEL_TYPE_IO:
	case SUBCHANNEL_TYPE_MSG:
		if (!css_sch_is_valid(schib))
			err = -ENODEV;
		else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
			CIO_MSG_EVENT(6, "Blacklisted device detected "
				      "at devno %04X, subchannel set %x\n",
				      schib->pmcw.dev, schid.ssid);
			err = -ENODEV;
		} else
			err = 0;
		break;
	default:
		err = 0;
	}
	if (err)
		goto out;

	CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
		      schid.ssid, schid.sch_no, schib->pmcw.st);
out:
	return err;
}

struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
					struct schib *schib)
L
Linus Torvalds 已提交
206 207 208 209
{
	struct subchannel *sch;
	int ret;

S
Sebastian Ott 已提交
210
	ret = css_validate_subchannel(schid, schib);
211 212 213
	if (ret < 0)
		return ERR_PTR(ret);

214 215
	sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
	if (!sch)
L
Linus Torvalds 已提交
216
		return ERR_PTR(-ENOMEM);
217

218
	sch->schid = schid;
S
Sebastian Ott 已提交
219 220
	sch->schib = *schib;
	sch->st = schib->pmcw.st;
221 222 223 224 225

	ret = css_sch_create_locks(sch);
	if (ret)
		goto err;

226
	INIT_WORK(&sch->todo_work, css_sch_todo);
227
	sch->dev.release = &css_subchannel_release;
228
	sch->dev.dma_mask = &sch->dma_mask;
229
	device_initialize(&sch->dev);
230
	/*
231
	 * The physical addresses for some of the dma structures that can
232 233
	 * belong to a subchannel need to fit 31 bit width (e.g. ccw).
	 */
234 235 236
	ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
	if (ret)
		goto err;
237 238 239 240
	/*
	 * But we don't have such restrictions imposed on the stuff that
	 * is handled by the streaming API.
	 */
241 242 243 244
	ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
	if (ret)
		goto err;

L
Linus Torvalds 已提交
245
	return sch;
246 247 248 249

err:
	kfree(sch);
	return ERR_PTR(ret);
L
Linus Torvalds 已提交
250 251
}

252
static int css_sch_device_register(struct subchannel *sch)
253 254 255 256
{
	int ret;

	mutex_lock(&sch->reg_mutex);
257 258
	dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
		     sch->schid.sch_no);
259
	ret = device_add(&sch->dev);
260 261 262 263
	mutex_unlock(&sch->reg_mutex);
	return ret;
}

264 265 266 267
/**
 * css_sch_device_unregister - unregister a subchannel
 * @sch: subchannel to be unregistered
 */
268 269 270
void css_sch_device_unregister(struct subchannel *sch)
{
	mutex_lock(&sch->reg_mutex);
271 272
	if (device_is_registered(&sch->dev))
		device_unregister(&sch->dev);
273 274
	mutex_unlock(&sch->reg_mutex);
}
275
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
276

277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
	int i;
	int mask;

	memset(ssd, 0, sizeof(struct chsc_ssd_info));
	ssd->path_mask = pmcw->pim;
	for (i = 0; i < 8; i++) {
		mask = 0x80 >> i;
		if (pmcw->pim & mask) {
			chp_id_init(&ssd->chpid[i]);
			ssd->chpid[i].id = pmcw->chpid[i];
		}
	}
}

static void ssd_register_chpids(struct chsc_ssd_info *ssd)
{
	int i;
	int mask;

	for (i = 0; i < 8; i++) {
		mask = 0x80 >> i;
		if (ssd->path_mask & mask)
301
			chp_new(ssd->chpid[i]);
302 303 304 305 306 307 308
	}
}

void css_update_ssd_info(struct subchannel *sch)
{
	int ret;

309 310
	ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
	if (ret)
311
		ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
312 313

	ssd_register_chpids(&sch->ssd_info);
314 315
}

316 317 318 319 320 321 322 323
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
			 char *buf)
{
	struct subchannel *sch = to_subchannel(dev);

	return sprintf(buf, "%01x\n", sch->st);
}

J
Joe Perches 已提交
324
static DEVICE_ATTR_RO(type);
325 326 327 328 329 330 331 332 333

static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
			     char *buf)
{
	struct subchannel *sch = to_subchannel(dev);

	return sprintf(buf, "css:t%01X\n", sch->st);
}

J
Joe Perches 已提交
334
static DEVICE_ATTR_RO(modalias);
335

336 337 338 339 340
static ssize_t driver_override_store(struct device *dev,
				     struct device_attribute *attr,
				     const char *buf, size_t count)
{
	struct subchannel *sch = to_subchannel(dev);
341
	int ret;
342

343 344 345
	ret = driver_set_override(dev, &sch->driver_override, buf, count);
	if (ret)
		return ret;
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362

	return count;
}

static ssize_t driver_override_show(struct device *dev,
				    struct device_attribute *attr, char *buf)
{
	struct subchannel *sch = to_subchannel(dev);
	ssize_t len;

	device_lock(dev);
	len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
	device_unlock(dev);
	return len;
}
static DEVICE_ATTR_RW(driver_override);

363 364 365
static struct attribute *subch_attrs[] = {
	&dev_attr_type.attr,
	&dev_attr_modalias.attr,
366
	&dev_attr_driver_override.attr,
367 368 369 370 371 372 373
	NULL,
};

static struct attribute_group subch_attr_group = {
	.attrs = subch_attrs,
};

374
static const struct attribute_group *default_subch_attr_groups[] = {
375 376 377 378
	&subch_attr_group,
	NULL,
};

379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
static ssize_t chpids_show(struct device *dev,
			   struct device_attribute *attr,
			   char *buf)
{
	struct subchannel *sch = to_subchannel(dev);
	struct chsc_ssd_info *ssd = &sch->ssd_info;
	ssize_t ret = 0;
	int mask;
	int chp;

	for (chp = 0; chp < 8; chp++) {
		mask = 0x80 >> chp;
		if (ssd->path_mask & mask)
			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
		else
			ret += sprintf(buf + ret, "00 ");
	}
	ret += sprintf(buf + ret, "\n");
	return ret;
}
J
Joe Perches 已提交
399
static DEVICE_ATTR_RO(chpids);
400 401 402 403 404 405 406 407 408 409 410

static ssize_t pimpampom_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	struct subchannel *sch = to_subchannel(dev);
	struct pmcw *pmcw = &sch->schib.pmcw;

	return sprintf(buf, "%02x %02x %02x\n",
		       pmcw->pim, pmcw->pam, pmcw->pom);
}
J
Joe Perches 已提交
411
static DEVICE_ATTR_RO(pimpampom);
412

413 414 415 416 417 418 419
static ssize_t dev_busid_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	struct subchannel *sch = to_subchannel(dev);
	struct pmcw *pmcw = &sch->schib.pmcw;

420 421
	if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
	    (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
422 423 424 425 426 427 428
		return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
				  pmcw->dev);
	else
		return sysfs_emit(buf, "none\n");
}
static DEVICE_ATTR_RO(dev_busid);

429 430 431
static struct attribute *io_subchannel_type_attrs[] = {
	&dev_attr_chpids.attr,
	&dev_attr_pimpampom.attr,
432
	&dev_attr_dev_busid.attr,
433 434 435 436 437 438 439 440
	NULL,
};
ATTRIBUTE_GROUPS(io_subchannel_type);

static const struct device_type io_subchannel_type = {
	.groups = io_subchannel_type_groups,
};

441
int css_register_subchannel(struct subchannel *sch)
L
Linus Torvalds 已提交
442 443 444 445
{
	int ret;

	/* Initialize the subchannel structure */
446
	sch->dev.parent = &channel_subsystems[0]->device;
L
Linus Torvalds 已提交
447
	sch->dev.bus = &css_bus_type;
448
	sch->dev.groups = default_subch_attr_groups;
449 450 451 452

	if (sch->st == SUBCHANNEL_TYPE_IO)
		sch->dev.type = &io_subchannel_type;

453
	css_update_ssd_info(sch);
L
Linus Torvalds 已提交
454
	/* make it known to the system */
455
	ret = css_sch_device_register(sch);
456
	if (ret) {
C
Cornelia Huck 已提交
457 458
		CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
			      sch->schid.ssid, sch->schid.sch_no, ret);
459 460
		return ret;
	}
L
Linus Torvalds 已提交
461 462 463
	return ret;
}

S
Sebastian Ott 已提交
464
static int css_probe_device(struct subchannel_id schid, struct schib *schib)
L
Linus Torvalds 已提交
465 466
{
	struct subchannel *sch;
467
	int ret;
L
Linus Torvalds 已提交
468

S
Sebastian Ott 已提交
469
	sch = css_alloc_subchannel(schid, schib);
470 471 472
	if (IS_ERR(sch))
		return PTR_ERR(sch);

L
Linus Torvalds 已提交
473
	ret = css_register_subchannel(sch);
474 475 476
	if (ret)
		put_device(&sch->dev);

L
Linus Torvalds 已提交
477 478 479
	return ret;
}

C
Cornelia Huck 已提交
480
static int
481
check_subchannel(struct device *dev, const void *data)
C
Cornelia Huck 已提交
482 483
{
	struct subchannel *sch;
484
	struct subchannel_id *schid = (void *)data;
C
Cornelia Huck 已提交
485 486

	sch = to_subchannel(dev);
487
	return schid_equal(&sch->schid, schid);
C
Cornelia Huck 已提交
488 489
}

L
Linus Torvalds 已提交
490
struct subchannel *
491
get_subchannel_by_schid(struct subchannel_id schid)
L
Linus Torvalds 已提交
492 493 494
{
	struct device *dev;

C
Cornelia Huck 已提交
495
	dev = bus_find_device(&css_bus_type, NULL,
496
			      &schid, check_subchannel);
L
Linus Torvalds 已提交
497

C
Cornelia Huck 已提交
498
	return dev ? to_subchannel(dev) : NULL;
L
Linus Torvalds 已提交
499 500
}

501 502 503 504 505 506 507 508
/**
 * css_sch_is_valid() - check if a subchannel is valid
 * @schib: subchannel information block for the subchannel
 */
int css_sch_is_valid(struct schib *schib)
{
	if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
		return 0;
509 510
	if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
		return 0;
511 512 513 514
	return 1;
}
EXPORT_SYMBOL_GPL(css_sch_is_valid);

515 516 517
static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
{
	struct schib schib;
S
Sebastian Ott 已提交
518
	int ccode;
519 520 521 522 523

	if (!slow) {
		/* Will be done on the slow path. */
		return -EAGAIN;
	}
S
Sebastian Ott 已提交
524 525 526 527 528 529 530 531 532
	/*
	 * The first subchannel that is not-operational (ccode==3)
	 * indicates that there aren't any more devices available.
	 * If stsch gets an exception, it means the current subchannel set
	 * is not valid.
	 */
	ccode = stsch(schid, &schib);
	if (ccode)
		return (ccode == 3) ? -ENXIO : ccode;
533

S
Sebastian Ott 已提交
534
	return css_probe_device(schid, &schib);
535 536
}

C
Cornelia Huck 已提交
537 538 539 540 541 542 543 544 545 546 547 548
static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
{
	int ret = 0;

	if (sch->driver) {
		if (sch->driver->sch_event)
			ret = sch->driver->sch_event(sch, slow);
		else
			dev_dbg(&sch->dev,
				"Got subchannel machine check but "
				"no sch_event handler provided.\n");
	}
549 550 551 552
	if (ret != 0 && ret != -EAGAIN) {
		CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
			      sch->schid.ssid, sch->schid.sch_no, ret);
	}
C
Cornelia Huck 已提交
553 554 555
	return ret;
}

556
static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
557 558 559 560 561 562 563 564 565 566
{
	struct subchannel *sch;
	int ret;

	sch = get_subchannel_by_schid(schid);
	if (sch) {
		ret = css_evaluate_known_subchannel(sch, slow);
		put_device(&sch->dev);
	} else
		ret = css_evaluate_new_subchannel(schid, slow);
567 568
	if (ret == -EAGAIN)
		css_schedule_eval(schid);
L
Linus Torvalds 已提交
569 570
}

571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
/**
 * css_sched_sch_todo - schedule a subchannel operation
 * @sch: subchannel
 * @todo: todo
 *
 * Schedule the operation identified by @todo to be performed on the slow path
 * workqueue. Do nothing if another operation with higher priority is already
 * scheduled. Needs to be called with subchannel lock held.
 */
void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
{
	CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
		      sch->schid.ssid, sch->schid.sch_no, todo);
	if (sch->todo >= todo)
		return;
	/* Get workqueue ref. */
	if (!get_device(&sch->dev))
		return;
	sch->todo = todo;
	if (!queue_work(cio_work_q, &sch->todo_work)) {
		/* Already queued, release workqueue ref. */
		put_device(&sch->dev);
	}
}
595
EXPORT_SYMBOL_GPL(css_sched_sch_todo);
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630

static void css_sch_todo(struct work_struct *work)
{
	struct subchannel *sch;
	enum sch_todo todo;
	int ret;

	sch = container_of(work, struct subchannel, todo_work);
	/* Find out todo. */
	spin_lock_irq(sch->lock);
	todo = sch->todo;
	CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
		      sch->schid.sch_no, todo);
	sch->todo = SCH_TODO_NOTHING;
	spin_unlock_irq(sch->lock);
	/* Perform todo. */
	switch (todo) {
	case SCH_TODO_NOTHING:
		break;
	case SCH_TODO_EVAL:
		ret = css_evaluate_known_subchannel(sch, 1);
		if (ret == -EAGAIN) {
			spin_lock_irq(sch->lock);
			css_sched_sch_todo(sch, todo);
			spin_unlock_irq(sch->lock);
		}
		break;
	case SCH_TODO_UNREG:
		css_sch_device_unregister(sch);
		break;
	}
	/* Release workqueue ref. */
	put_device(&sch->dev);
}

631
static struct idset *slow_subchannel_set;
632
static DEFINE_SPINLOCK(slow_subchannel_lock);
633
static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq);
634
static atomic_t css_eval_scheduled;
635 636

static int __init slow_subchannel_init(void)
L
Linus Torvalds 已提交
637
{
638
	atomic_set(&css_eval_scheduled, 0);
639 640
	slow_subchannel_set = idset_sch_new();
	if (!slow_subchannel_set) {
C
Cornelia Huck 已提交
641
		CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
642 643 644
		return -ENOMEM;
	}
	return 0;
L
Linus Torvalds 已提交
645 646
}

647
static int slow_eval_known_fn(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
648
{
649 650
	int eval;
	int rc;
L
Linus Torvalds 已提交
651 652

	spin_lock_irq(&slow_subchannel_lock);
653 654 655 656 657 658 659
	eval = idset_sch_contains(slow_subchannel_set, sch->schid);
	idset_sch_del(slow_subchannel_set, sch->schid);
	spin_unlock_irq(&slow_subchannel_lock);
	if (eval) {
		rc = css_evaluate_known_subchannel(sch, 1);
		if (rc == -EAGAIN)
			css_schedule_eval(sch->schid);
660 661 662 663 664
		/*
		 * The loop might take long time for platforms with lots of
		 * known devices. Allow scheduling here.
		 */
		cond_resched();
L
Linus Torvalds 已提交
665
	}
666 667 668 669 670 671 672 673 674 675 676
	return 0;
}

static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
{
	int eval;
	int rc = 0;

	spin_lock_irq(&slow_subchannel_lock);
	eval = idset_sch_contains(slow_subchannel_set, schid);
	idset_sch_del(slow_subchannel_set, schid);
L
Linus Torvalds 已提交
677
	spin_unlock_irq(&slow_subchannel_lock);
678 679 680 681 682 683 684 685 686 687 688
	if (eval) {
		rc = css_evaluate_new_subchannel(schid, 1);
		switch (rc) {
		case -EAGAIN:
			css_schedule_eval(schid);
			rc = 0;
			break;
		case -ENXIO:
		case -ENOMEM:
		case -EIO:
			/* These should abort looping */
689
			spin_lock_irq(&slow_subchannel_lock);
690
			idset_sch_del_subseq(slow_subchannel_set, schid);
691
			spin_unlock_irq(&slow_subchannel_lock);
692 693 694 695
			break;
		default:
			rc = 0;
		}
696 697 698
		/* Allow scheduling here since the containing loop might
		 * take a while.  */
		cond_resched();
699 700 701 702 703 704
	}
	return rc;
}

static void css_slow_path_func(struct work_struct *unused)
{
705 706
	unsigned long flags;

707 708 709
	CIO_TRACE_EVENT(4, "slowpath");
	for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
				   NULL);
710 711 712 713 714 715
	spin_lock_irqsave(&slow_subchannel_lock, flags);
	if (idset_is_empty(slow_subchannel_set)) {
		atomic_set(&css_eval_scheduled, 0);
		wake_up(&css_eval_wq);
	}
	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
L
Linus Torvalds 已提交
716 717
}

718
static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
719
struct workqueue_struct *cio_work_q;
L
Linus Torvalds 已提交
720

721 722 723 724 725 726
void css_schedule_eval(struct subchannel_id schid)
{
	unsigned long flags;

	spin_lock_irqsave(&slow_subchannel_lock, flags);
	idset_sch_add(slow_subchannel_set, schid);
727
	atomic_set(&css_eval_scheduled, 1);
728
	queue_delayed_work(cio_work_q, &slow_path_work, 0);
729 730 731 732 733 734 735 736 737
	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}

void css_schedule_eval_all(void)
{
	unsigned long flags;

	spin_lock_irqsave(&slow_subchannel_lock, flags);
	idset_fill(slow_subchannel_set);
738
	atomic_set(&css_eval_scheduled, 1);
739
	queue_delayed_work(cio_work_q, &slow_path_work, 0);
740 741 742
	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}

743
static int __unset_registered(struct device *dev, void *data)
744
{
745 746
	struct idset *set = data;
	struct subchannel *sch = to_subchannel(dev);
747

748 749
	idset_sch_del(set, sch->schid);
	return 0;
750 751
}

752 753 754 755 756
static int __unset_online(struct device *dev, void *data)
{
	struct idset *set = data;
	struct subchannel *sch = to_subchannel(dev);

757 758
	if (sch->st == SUBCHANNEL_TYPE_IO && sch->config.ena)
		idset_sch_del(set, sch->schid);
759 760 761 762 763

	return 0;
}

void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
764
{
765
	unsigned long flags;
766
	struct idset *set;
767

768
	/* Find unregistered subchannels. */
769 770
	set = idset_sch_new();
	if (!set) {
771 772
		/* Fallback. */
		css_schedule_eval_all();
773 774
		return;
	}
775 776 777 778 779 780 781 782 783 784 785 786
	idset_fill(set);
	switch (cond) {
	case CSS_EVAL_UNREG:
		bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);
		break;
	case CSS_EVAL_NOT_ONLINE:
		bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
		break;
	default:
		break;
	}

787 788
	/* Apply to slow_subchannel_set. */
	spin_lock_irqsave(&slow_subchannel_lock, flags);
789
	idset_add_set(slow_subchannel_set, set);
790
	atomic_set(&css_eval_scheduled, 1);
791
	queue_delayed_work(cio_work_q, &slow_path_work, delay);
792
	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
793
	idset_free(set);
794 795
}

796 797
void css_wait_for_slow_path(void)
{
798
	flush_workqueue(cio_work_q);
799
}
800 801 802 803

/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void)
{
804
	/* Schedule with a delay to allow merging of subsequent calls. */
805
	css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);
806 807 808
}
EXPORT_SYMBOL_GPL(css_schedule_reprobe);

L
Linus Torvalds 已提交
809 810 811
/*
 * Called from the machine check handler for subchannel report words.
 */
812
static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
L
Linus Torvalds 已提交
813
{
814
	struct subchannel_id mchk_schid;
815
	struct subchannel *sch;
L
Linus Torvalds 已提交
816

817 818 819 820 821 822 823 824 825 826 827 828 829
	if (overflow) {
		css_schedule_eval_all();
		return;
	}
	CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
		      crw0->erc, crw0->rsid);
	if (crw1)
		CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
			      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
			      crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
			      crw1->anc, crw1->erc, crw1->rsid);
830
	init_subchannel_id(&mchk_schid);
831 832
	mchk_schid.sch_no = crw0->rsid;
	if (crw1)
833
		mchk_schid.ssid = (crw1->rsid >> 4) & 3;
834

835 836 837 838 839 840 841
	if (crw0->erc == CRW_ERC_PMOD) {
		sch = get_subchannel_by_schid(mchk_schid);
		if (sch) {
			css_update_ssd_info(sch);
			put_device(&sch->dev);
		}
	}
842
	/*
L
Linus Torvalds 已提交
843 844 845 846
	 * Since we are always presented with IPI in the CRW, we have to
	 * use stsch() to find out if the subchannel in question has come
	 * or gone.
	 */
847
	css_evaluate_subchannel(mchk_schid, 0);
L
Linus Torvalds 已提交
848 849 850
}

static void __init
851
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
L
Linus Torvalds 已提交
852
{
853 854
	struct cpuid cpu_id;

855
	if (css_general_characteristics.mcss) {
856
		css->global_pgid.pgid_high.ext_cssid.version = 0x80;
857
		css->global_pgid.pgid_high.ext_cssid.cssid =
858
			css->id_valid ? css->cssid : 0;
859
	} else {
860
		css->global_pgid.pgid_high.cpu_addr = stap();
L
Linus Torvalds 已提交
861
	}
862 863 864
	get_cpu_id(&cpu_id);
	css->global_pgid.cpu_id = cpu_id.ident;
	css->global_pgid.cpu_model = cpu_id.machine;
865 866 867
	css->global_pgid.tod_high = tod_high;
}

868
static void channel_subsystem_release(struct device *dev)
869
{
870
	struct channel_subsystem *css = to_css(dev);
871

872
	mutex_destroy(&css->mutex);
873 874 875
	kfree(css);
}

S
Sebastian Ott 已提交
876 877 878 879 880
static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
			       char *buf)
{
	struct channel_subsystem *css = to_css(dev);

881
	if (!css->id_valid)
S
Sebastian Ott 已提交
882 883 884 885 886 887
		return -EINVAL;

	return sprintf(buf, "%x\n", css->cssid);
}
static DEVICE_ATTR_RO(real_cssid);

888 889 890 891 892 893 894 895 896 897 898 899
static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
			    const char *buf, size_t count)
{
	CIO_TRACE_EVENT(4, "usr-rescan");

	css_schedule_eval_all();
	css_complete_work();

	return count;
}
static DEVICE_ATTR_WO(rescan);

S
Sebastian Ott 已提交
900 901
static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
			      char *buf)
902 903
{
	struct channel_subsystem *css = to_css(dev);
904
	int ret;
905

906 907 908 909
	mutex_lock(&css->mutex);
	ret = sprintf(buf, "%x\n", css->cm_enabled);
	mutex_unlock(&css->mutex);
	return ret;
910 911
}

S
Sebastian Ott 已提交
912 913
static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
			       const char *buf, size_t count)
914 915
{
	struct channel_subsystem *css = to_css(dev);
916
	unsigned long val;
S
Sebastian Ott 已提交
917
	int ret;
918

919
	ret = kstrtoul(buf, 16, &val);
920 921
	if (ret)
		return ret;
922
	mutex_lock(&css->mutex);
923 924
	switch (val) {
	case 0:
925 926
		ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
		break;
927
	case 1:
928 929 930 931 932
		ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
		break;
	default:
		ret = -EINVAL;
	}
933
	mutex_unlock(&css->mutex);
934 935
	return ret < 0 ? ret : count;
}
S
Sebastian Ott 已提交
936 937 938 939 940 941 942 943
static DEVICE_ATTR_RW(cm_enable);

static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
			      int index)
{
	return css_chsc_characteristics.secm ? attr->mode : 0;
}

S
Sebastian Ott 已提交
944 945
static struct attribute *cssdev_attrs[] = {
	&dev_attr_real_cssid.attr,
946
	&dev_attr_rescan.attr,
S
Sebastian Ott 已提交
947 948 949 950 951 952 953
	NULL,
};

static struct attribute_group cssdev_attr_group = {
	.attrs = cssdev_attrs,
};

S
Sebastian Ott 已提交
954 955 956 957 958 959 960 961 962
static struct attribute *cssdev_cm_attrs[] = {
	&dev_attr_cm_enable.attr,
	NULL,
};

static struct attribute_group cssdev_cm_attr_group = {
	.attrs = cssdev_cm_attrs,
	.is_visible = cm_enable_mode,
};
963

S
Sebastian Ott 已提交
964
static const struct attribute_group *cssdev_attr_groups[] = {
S
Sebastian Ott 已提交
965
	&cssdev_attr_group,
S
Sebastian Ott 已提交
966 967 968
	&cssdev_cm_attr_group,
	NULL,
};
969

970
static int __init setup_css(int nr)
971
{
972
	struct channel_subsystem *css;
973
	int ret;
974

975 976
	css = kzalloc(sizeof(*css), GFP_KERNEL);
	if (!css)
977
		return -ENOMEM;
978 979 980 981 982

	channel_subsystems[nr] = css;
	dev_set_name(&css->device, "css%x", nr);
	css->device.groups = cssdev_attr_groups;
	css->device.release = channel_subsystem_release;
983 984 985 986 987
	/*
	 * We currently allocate notifier bits with this (using
	 * css->device as the device argument with the DMA API)
	 * and are fine with 64 bit addresses.
	 */
988 989 990 991 992
	ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
	if (ret) {
		kfree(css);
		goto out_err;
	}
993 994

	mutex_init(&css->mutex);
995 996 997 998 999 1000
	ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
	if (!ret) {
		css->id_valid = true;
		pr_info("Partition identifier %01x.%01x\n", css->cssid,
			css->iid);
	}
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
	css_generate_pgid(css, (u32) (get_tod_clock() >> 32));

	ret = device_register(&css->device);
	if (ret) {
		put_device(&css->device);
		goto out_err;
	}

	css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
					 GFP_KERNEL);
	if (!css->pseudo_subchannel) {
		device_unregister(&css->device);
		ret = -ENOMEM;
		goto out_err;
	}

1017 1018
	css->pseudo_subchannel->dev.parent = &css->device;
	css->pseudo_subchannel->dev.release = css_subchannel_release;
1019
	mutex_init(&css->pseudo_subchannel->reg_mutex);
1020
	ret = css_sch_create_locks(css->pseudo_subchannel);
1021
	if (ret) {
1022
		kfree(css->pseudo_subchannel);
1023 1024
		device_unregister(&css->device);
		goto out_err;
1025
	}
1026

1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
	dev_set_name(&css->pseudo_subchannel->dev, "defunct");
	ret = device_register(&css->pseudo_subchannel->dev);
	if (ret) {
		put_device(&css->pseudo_subchannel->dev);
		device_unregister(&css->device);
		goto out_err;
	}

	return ret;
out_err:
	channel_subsystems[nr] = NULL;
	return ret;
L
Linus Torvalds 已提交
1039 1040
}

1041 1042 1043 1044
static int css_reboot_event(struct notifier_block *this,
			    unsigned long event,
			    void *ptr)
{
S
Sebastian Ott 已提交
1045 1046
	struct channel_subsystem *css;
	int ret;
1047 1048

	ret = NOTIFY_DONE;
S
Sebastian Ott 已提交
1049
	for_each_css(css) {
1050
		mutex_lock(&css->mutex);
1051 1052 1053
		if (css->cm_enabled)
			if (chsc_secm(css, 0))
				ret = NOTIFY_BAD;
1054
		mutex_unlock(&css->mutex);
1055 1056 1057 1058 1059 1060 1061 1062 1063
	}

	return ret;
}

static struct notifier_block css_reboot_notifier = {
	.notifier_call = css_reboot_event,
};

1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
#define  CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
static struct gen_pool *cio_dma_pool;

/* Currently cio supports only a single css */
struct device *cio_get_dma_css_dev(void)
{
	return &channel_subsystems[0]->device;
}

struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
{
	struct gen_pool *gp_dma;
	void *cpu_addr;
	dma_addr_t dma_addr;
	int i;

	gp_dma = gen_pool_create(3, -1);
	if (!gp_dma)
		return NULL;
	for (i = 0; i < nr_pages; ++i) {
		cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
					      CIO_DMA_GFP);
		if (!cpu_addr)
			return gp_dma;
		gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
				  dma_addr, PAGE_SIZE, -1);
	}
	return gp_dma;
}

static void __gp_dma_free_dma(struct gen_pool *pool,
			      struct gen_pool_chunk *chunk, void *data)
{
	size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;

	dma_free_coherent((struct device *) data, chunk_size,
			 (void *) chunk->start_addr,
			 (dma_addr_t) chunk->phys_addr);
}

void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
{
	if (!gp_dma)
		return;
	/* this is quite ugly but no better idea */
	gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
	gen_pool_destroy(gp_dma);
}

static int cio_dma_pool_init(void)
{
	/* No need to free up the resources: compiled in */
	cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
	if (!cio_dma_pool)
		return -ENOMEM;
	return 0;
}

void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
			size_t size)
{
	dma_addr_t dma_addr;
	unsigned long addr;
	size_t chunk_size;

	if (!gp_dma)
		return NULL;
	addr = gen_pool_alloc(gp_dma, size);
	while (!addr) {
		chunk_size = round_up(size, PAGE_SIZE);
		addr = (unsigned long) dma_alloc_coherent(dma_dev,
					 chunk_size, &dma_addr, CIO_DMA_GFP);
		if (!addr)
			return NULL;
		gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
		addr = gen_pool_alloc(gp_dma, size);
	}
	return (void *) addr;
}

void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
{
	if (!cpu_addr)
		return;
	memset(cpu_addr, 0, size);
	gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
}

/*
 * Allocate dma memory from the css global pool. Intended for memory not
 * specific to any single device within the css. The allocated memory
 * is not guaranteed to be 31-bit addressable.
 *
 * Caution: Not suitable for early stuff like console.
 */
void *cio_dma_zalloc(size_t size)
{
	return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
}

void cio_dma_free(void *cpu_addr, size_t size)
{
	cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
}

L
Linus Torvalds 已提交
1169 1170
/*
 * Now that the driver core is running, we can setup our channel subsystem.
1171
 * The struct subchannel's are created during probing.
L
Linus Torvalds 已提交
1172
 */
S
Sebastian Ott 已提交
1173
static int __init css_bus_init(void)
L
Linus Torvalds 已提交
1174
{
1175
	int ret, i;
L
Linus Torvalds 已提交
1176

S
Sebastian Ott 已提交
1177 1178 1179 1180
	ret = chsc_init();
	if (ret)
		return ret;

1181
	chsc_determine_css_characteristics();
1182 1183
	/* Try to enable MSS. */
	ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1184
	if (ret)
1185
		max_ssid = 0;
1186 1187
	else /* Success. */
		max_ssid = __MAX_SSID;
1188

1189 1190 1191 1192
	ret = slow_subchannel_init();
	if (ret)
		goto out;

1193
	ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1194 1195 1196
	if (ret)
		goto out;

L
Linus Torvalds 已提交
1197 1198 1199
	if ((ret = bus_register(&css_bus_type)))
		goto out;

1200
	/* Setup css structure. */
S
Sebastian Ott 已提交
1201
	for (i = 0; i <= MAX_CSS_IDX; i++) {
1202
		ret = setup_css(i);
1203
		if (ret)
1204
			goto out_unregister;
1205
	}
1206 1207
	ret = register_reboot_notifier(&css_reboot_notifier);
	if (ret)
1208
		goto out_unregister;
1209 1210
	ret = cio_dma_pool_init();
	if (ret)
1211
		goto out_unregister_rn;
1212
	airq_init();
L
Linus Torvalds 已提交
1213 1214
	css_init_done = 1;

1215
	/* Enable default isc for I/O subchannels. */
1216
	isc_register(IO_SCH_ISC);
L
Linus Torvalds 已提交
1217 1218

	return 0;
1219 1220
out_unregister_rn:
	unregister_reboot_notifier(&css_reboot_notifier);
1221
out_unregister:
1222 1223
	while (i-- > 0) {
		struct channel_subsystem *css = channel_subsystems[i];
1224 1225
		device_unregister(&css->pseudo_subchannel->dev);
		device_unregister(&css->device);
1226
	}
L
Linus Torvalds 已提交
1227 1228
	bus_unregister(&css_bus_type);
out:
S
Sebastian Ott 已提交
1229
	crw_unregister_handler(CRW_RSC_SCH);
1230
	idset_free(slow_subchannel_set);
S
Sebastian Ott 已提交
1231
	chsc_init_cleanup();
1232 1233
	pr_alert("The CSS device driver initialization failed with "
		 "errno=%d\n", ret);
L
Linus Torvalds 已提交
1234 1235 1236
	return ret;
}

S
Sebastian Ott 已提交
1237 1238 1239 1240
static void __init css_bus_cleanup(void)
{
	struct channel_subsystem *css;

S
Sebastian Ott 已提交
1241
	for_each_css(css) {
S
Sebastian Ott 已提交
1242 1243 1244 1245
		device_unregister(&css->pseudo_subchannel->dev);
		device_unregister(&css->device);
	}
	bus_unregister(&css_bus_type);
S
Sebastian Ott 已提交
1246
	crw_unregister_handler(CRW_RSC_SCH);
1247
	idset_free(slow_subchannel_set);
S
Sebastian Ott 已提交
1248
	chsc_init_cleanup();
S
Sebastian Ott 已提交
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
	isc_unregister(IO_SCH_ISC);
}

static int __init channel_subsystem_init(void)
{
	int ret;

	ret = css_bus_init();
	if (ret)
		return ret;
1259 1260 1261 1262 1263
	cio_work_q = create_singlethread_workqueue("cio");
	if (!cio_work_q) {
		ret = -ENOMEM;
		goto out_bus;
	}
S
Sebastian Ott 已提交
1264 1265
	ret = io_subchannel_init();
	if (ret)
1266
		goto out_wq;
S
Sebastian Ott 已提交
1267

1268 1269 1270 1271 1272
	/* Register subchannels which are already in use. */
	cio_register_early_subchannels();
	/* Start initial subchannel evaluation. */
	css_schedule_eval_all();

S
Sebastian Ott 已提交
1273
	return ret;
1274 1275 1276 1277 1278
out_wq:
	destroy_workqueue(cio_work_q);
out_bus:
	css_bus_cleanup();
	return ret;
S
Sebastian Ott 已提交
1279 1280 1281
}
subsys_initcall(channel_subsystem_init);

S
Sebastian Ott 已提交
1282 1283 1284 1285 1286
static int css_settle(struct device_driver *drv, void *unused)
{
	struct css_driver *cssdrv = to_cssdriver(drv);

	if (cssdrv->settle)
1287
		return cssdrv->settle();
S
Sebastian Ott 已提交
1288 1289 1290
	return 0;
}

1291
int css_complete_work(void)
S
Sebastian Ott 已提交
1292 1293 1294 1295
{
	int ret;

	/* Wait for the evaluation of subchannels to finish. */
1296 1297 1298 1299
	ret = wait_event_interruptible(css_eval_wq,
				       atomic_read(&css_eval_scheduled) == 0);
	if (ret)
		return -EINTR;
S
Sebastian Ott 已提交
1300 1301
	flush_workqueue(cio_work_q);
	/* Wait for the subchannel type specific initialization to finish */
1302
	return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
S
Sebastian Ott 已提交
1303 1304 1305
}


S
Sebastian Ott 已提交
1306 1307 1308 1309 1310 1311
/*
 * Wait for the initialization of devices to finish, to make sure we are
 * done with our setup if the search for the root device starts.
 */
static int __init channel_subsystem_init_sync(void)
{
S
Sebastian Ott 已提交
1312 1313
	css_complete_work();
	return 0;
S
Sebastian Ott 已提交
1314 1315 1316
}
subsys_initcall_sync(channel_subsystem_init_sync);

S
Sebastian Ott 已提交
1317 1318 1319 1320
#ifdef CONFIG_PROC_FS
static ssize_t cio_settle_write(struct file *file, const char __user *buf,
				size_t count, loff_t *ppos)
{
1321 1322
	int ret;

S
Sebastian Ott 已提交
1323 1324
	/* Handle pending CRW's. */
	crw_wait_for_channel_report();
1325 1326 1327
	ret = css_complete_work();

	return ret ? ret : count;
S
Sebastian Ott 已提交
1328 1329
}

1330 1331 1332 1333
static const struct proc_ops cio_settle_proc_ops = {
	.proc_open	= nonseekable_open,
	.proc_write	= cio_settle_write,
	.proc_lseek	= no_llseek,
S
Sebastian Ott 已提交
1334 1335 1336 1337 1338 1339
};

static int __init cio_settle_init(void)
{
	struct proc_dir_entry *entry;

1340
	entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
S
Sebastian Ott 已提交
1341 1342 1343 1344 1345 1346 1347
	if (!entry)
		return -ENOMEM;
	return 0;
}
device_initcall(cio_settle_init);
#endif /*CONFIG_PROC_FS*/

1348 1349
int sch_is_pseudo_sch(struct subchannel *sch)
{
1350 1351
	if (!sch->dev.parent)
		return 0;
1352 1353 1354
	return sch == to_css(sch->dev.parent)->pseudo_subchannel;
}

1355
static int css_bus_match(struct device *dev, struct device_driver *drv)
L
Linus Torvalds 已提交
1356
{
1357 1358
	struct subchannel *sch = to_subchannel(dev);
	struct css_driver *driver = to_cssdriver(drv);
1359
	struct css_device_id *id;
L
Linus Torvalds 已提交
1360

1361 1362 1363 1364
	/* When driver_override is set, only bind to the matching driver */
	if (sch->driver_override && strcmp(sch->driver_override, drv->name))
		return 0;

1365 1366 1367 1368
	for (id = driver->subchannel_type; id->match_flags; id++) {
		if (sch->st == id->type)
			return 1;
	}
L
Linus Torvalds 已提交
1369 1370 1371 1372

	return 0;
}

C
Cornelia Huck 已提交
1373
static int css_probe(struct device *dev)
1374 1375
{
	struct subchannel *sch;
C
Cornelia Huck 已提交
1376
	int ret;
1377 1378

	sch = to_subchannel(dev);
1379
	sch->driver = to_cssdriver(dev->driver);
C
Cornelia Huck 已提交
1380 1381 1382 1383
	ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
	if (ret)
		sch->driver = NULL;
	return ret;
1384 1385
}

1386
static void css_remove(struct device *dev)
1387 1388 1389 1390
{
	struct subchannel *sch;

	sch = to_subchannel(dev);
1391 1392
	if (sch->driver->remove)
		sch->driver->remove(sch);
C
Cornelia Huck 已提交
1393
	sch->driver = NULL;
1394 1395
}

C
Cornelia Huck 已提交
1396
static void css_shutdown(struct device *dev)
1397 1398 1399 1400
{
	struct subchannel *sch;

	sch = to_subchannel(dev);
C
Cornelia Huck 已提交
1401
	if (sch->driver && sch->driver->shutdown)
1402 1403 1404
		sch->driver->shutdown(sch);
}

1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
{
	struct subchannel *sch = to_subchannel(dev);
	int ret;

	ret = add_uevent_var(env, "ST=%01X", sch->st);
	if (ret)
		return ret;
	ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
	return ret;
}

1417
static struct bus_type css_bus_type = {
1418 1419 1420 1421 1422
	.name     = "css",
	.match    = css_bus_match,
	.probe    = css_probe,
	.remove   = css_remove,
	.shutdown = css_shutdown,
1423
	.uevent   = css_uevent,
L
Linus Torvalds 已提交
1424 1425
};

1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
/**
 * css_driver_register - register a css driver
 * @cdrv: css driver to register
 *
 * This is mainly a wrapper around driver_register that sets name
 * and bus_type in the embedded struct device_driver correctly.
 */
int css_driver_register(struct css_driver *cdrv)
{
	cdrv->drv.bus = &css_bus_type;
	return driver_register(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_register);

/**
 * css_driver_unregister - unregister a css driver
 * @cdrv: css driver to unregister
 *
 * This is a wrapper around driver_unregister.
 */
void css_driver_unregister(struct css_driver *cdrv)
{
	driver_unregister(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_unregister);