css.c 32.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3
 * driver for channel subsystem
L
Linus Torvalds 已提交
4
 *
S
Sebastian Ott 已提交
5
 * Copyright IBM Corp. 2002, 2010
6 7 8
 *
 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
 *	      Cornelia Huck (cornelia.huck@de.ibm.com)
L
Linus Torvalds 已提交
9
 */
10 11 12 13

#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

14
#include <linux/export.h>
L
Linus Torvalds 已提交
15 16 17 18 19
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
20
#include <linux/reboot.h>
S
Sebastian Ott 已提交
21
#include <linux/proc_fs.h>
22 23
#include <linux/genalloc.h>
#include <linux/dma-mapping.h>
24
#include <asm/isc.h>
25
#include <asm/crw.h>
L
Linus Torvalds 已提交
26 27 28

#include "css.h"
#include "cio.h"
S
Sebastian Ott 已提交
29
#include "blacklist.h"
L
Linus Torvalds 已提交
30 31 32
#include "cio_debug.h"
#include "ioasm.h"
#include "chsc.h"
33
#include "device.h"
34
#include "idset.h"
35
#include "chp.h"
L
Linus Torvalds 已提交
36 37

int css_init_done = 0;
38
int max_ssid;
L
Linus Torvalds 已提交
39

S
Sebastian Ott 已提交
40 41
#define MAX_CSS_IDX 0
struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
42
static struct bus_type css_bus_type;
L
Linus Torvalds 已提交
43

44
int
45 46 47 48 49 50 51
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
{
	struct subchannel_id schid;
	int ret;

	init_subchannel_id(&schid);
	do {
52 53 54 55 56 57 58
		do {
			ret = fn(schid, data);
			if (ret)
				break;
		} while (schid.sch_no++ < __MAX_SUBCHANNEL);
		schid.sch_no = 0;
	} while (schid.ssid++ < max_ssid);
59 60 61
	return ret;
}

62 63 64 65 66 67 68 69 70 71 72 73 74
struct cb_data {
	void *data;
	struct idset *set;
	int (*fn_known_sch)(struct subchannel *, void *);
	int (*fn_unknown_sch)(struct subchannel_id, void *);
};

static int call_fn_known_sch(struct device *dev, void *data)
{
	struct subchannel *sch = to_subchannel(dev);
	struct cb_data *cb = data;
	int rc = 0;

75 76
	if (cb->set)
		idset_sch_del(cb->set, sch->schid);
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
	if (cb->fn_known_sch)
		rc = cb->fn_known_sch(sch, cb->data);
	return rc;
}

static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
{
	struct cb_data *cb = data;
	int rc = 0;

	if (idset_sch_contains(cb->set, schid))
		rc = cb->fn_unknown_sch(schid, cb->data);
	return rc;
}

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
static int call_fn_all_sch(struct subchannel_id schid, void *data)
{
	struct cb_data *cb = data;
	struct subchannel *sch;
	int rc = 0;

	sch = get_subchannel_by_schid(schid);
	if (sch) {
		if (cb->fn_known_sch)
			rc = cb->fn_known_sch(sch, cb->data);
		put_device(&sch->dev);
	} else {
		if (cb->fn_unknown_sch)
			rc = cb->fn_unknown_sch(schid, cb->data);
	}

	return rc;
}

111 112 113 114 115 116 117 118 119 120
int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
			       int (*fn_unknown)(struct subchannel_id,
			       void *), void *data)
{
	struct cb_data cb;
	int rc;

	cb.data = data;
	cb.fn_known_sch = fn_known;
	cb.fn_unknown_sch = fn_unknown;
121

122 123 124 125 126 127 128
	if (fn_known && !fn_unknown) {
		/* Skip idset allocation in case of known-only loop. */
		cb.set = NULL;
		return bus_for_each_dev(&css_bus_type, NULL, &cb,
					call_fn_known_sch);
	}

129 130 131 132 133 134 135
	cb.set = idset_sch_new();
	if (!cb.set)
		/* fall back to brute force scanning in case of oom */
		return for_each_subchannel(call_fn_all_sch, &cb);

	idset_fill(cb.set);

136 137 138 139 140 141 142 143 144 145 146 147 148
	/* Process registered subchannels. */
	rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
	if (rc)
		goto out;
	/* Process unregistered subchannels. */
	if (fn_unknown)
		rc = for_each_subchannel(call_fn_unknown_sch, &cb);
out:
	idset_free(cb.set);

	return rc;
}

149 150
static void css_sch_todo(struct work_struct *work);

151 152 153 154 155 156 157 158 159 160 161 162
static int css_sch_create_locks(struct subchannel *sch)
{
	sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
	if (!sch->lock)
		return -ENOMEM;

	spin_lock_init(sch->lock);
	mutex_init(&sch->reg_mutex);

	return 0;
}

163 164
static void css_subchannel_release(struct device *dev)
{
165
	struct subchannel *sch = to_subchannel(dev);
166

167 168
	sch->config.intparm = 0;
	cio_commit_config(sch);
169
	kfree(sch->driver_override);
170 171
	kfree(sch->lock);
	kfree(sch);
172 173
}

S
Sebastian Ott 已提交
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
static int css_validate_subchannel(struct subchannel_id schid,
				   struct schib *schib)
{
	int err;

	switch (schib->pmcw.st) {
	case SUBCHANNEL_TYPE_IO:
	case SUBCHANNEL_TYPE_MSG:
		if (!css_sch_is_valid(schib))
			err = -ENODEV;
		else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
			CIO_MSG_EVENT(6, "Blacklisted device detected "
				      "at devno %04X, subchannel set %x\n",
				      schib->pmcw.dev, schid.ssid);
			err = -ENODEV;
		} else
			err = 0;
		break;
	default:
		err = 0;
	}
	if (err)
		goto out;

	CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
		      schid.ssid, schid.sch_no, schib->pmcw.st);
out:
	return err;
}

struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
					struct schib *schib)
L
Linus Torvalds 已提交
206 207 208 209
{
	struct subchannel *sch;
	int ret;

S
Sebastian Ott 已提交
210
	ret = css_validate_subchannel(schid, schib);
211 212 213
	if (ret < 0)
		return ERR_PTR(ret);

214 215
	sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
	if (!sch)
L
Linus Torvalds 已提交
216
		return ERR_PTR(-ENOMEM);
217

218
	sch->schid = schid;
S
Sebastian Ott 已提交
219 220
	sch->schib = *schib;
	sch->st = schib->pmcw.st;
221 222 223 224 225

	ret = css_sch_create_locks(sch);
	if (ret)
		goto err;

226
	INIT_WORK(&sch->todo_work, css_sch_todo);
227
	sch->dev.release = &css_subchannel_release;
228
	sch->dev.dma_mask = &sch->dma_mask;
229
	device_initialize(&sch->dev);
230
	/*
231
	 * The physical addresses for some of the dma structures that can
232 233
	 * belong to a subchannel need to fit 31 bit width (e.g. ccw).
	 */
234 235 236
	ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
	if (ret)
		goto err;
237 238 239 240
	/*
	 * But we don't have such restrictions imposed on the stuff that
	 * is handled by the streaming API.
	 */
241 242 243 244
	ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
	if (ret)
		goto err;

L
Linus Torvalds 已提交
245
	return sch;
246 247 248 249

err:
	kfree(sch);
	return ERR_PTR(ret);
L
Linus Torvalds 已提交
250 251
}

252
static int css_sch_device_register(struct subchannel *sch)
253 254 255 256
{
	int ret;

	mutex_lock(&sch->reg_mutex);
257 258
	dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
		     sch->schid.sch_no);
259
	ret = device_add(&sch->dev);
260 261 262 263
	mutex_unlock(&sch->reg_mutex);
	return ret;
}

264 265 266 267
/**
 * css_sch_device_unregister - unregister a subchannel
 * @sch: subchannel to be unregistered
 */
268 269 270
void css_sch_device_unregister(struct subchannel *sch)
{
	mutex_lock(&sch->reg_mutex);
271 272
	if (device_is_registered(&sch->dev))
		device_unregister(&sch->dev);
273 274
	mutex_unlock(&sch->reg_mutex);
}
275
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
276

277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
	int i;
	int mask;

	memset(ssd, 0, sizeof(struct chsc_ssd_info));
	ssd->path_mask = pmcw->pim;
	for (i = 0; i < 8; i++) {
		mask = 0x80 >> i;
		if (pmcw->pim & mask) {
			chp_id_init(&ssd->chpid[i]);
			ssd->chpid[i].id = pmcw->chpid[i];
		}
	}
}

static void ssd_register_chpids(struct chsc_ssd_info *ssd)
{
	int i;
	int mask;

	for (i = 0; i < 8; i++) {
		mask = 0x80 >> i;
		if (ssd->path_mask & mask)
301
			chp_new(ssd->chpid[i]);
302 303 304 305 306 307 308
	}
}

void css_update_ssd_info(struct subchannel *sch)
{
	int ret;

309 310
	ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
	if (ret)
311
		ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
312 313

	ssd_register_chpids(&sch->ssd_info);
314 315
}

316 317 318 319 320 321 322 323
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
			 char *buf)
{
	struct subchannel *sch = to_subchannel(dev);

	return sprintf(buf, "%01x\n", sch->st);
}

J
Joe Perches 已提交
324
static DEVICE_ATTR_RO(type);
325 326 327 328 329 330 331 332 333

static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
			     char *buf)
{
	struct subchannel *sch = to_subchannel(dev);

	return sprintf(buf, "css:t%01X\n", sch->st);
}

J
Joe Perches 已提交
334
static DEVICE_ATTR_RO(modalias);
335

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
static ssize_t driver_override_store(struct device *dev,
				     struct device_attribute *attr,
				     const char *buf, size_t count)
{
	struct subchannel *sch = to_subchannel(dev);
	char *driver_override, *old, *cp;

	/* We need to keep extra room for a newline */
	if (count >= (PAGE_SIZE - 1))
		return -EINVAL;

	driver_override = kstrndup(buf, count, GFP_KERNEL);
	if (!driver_override)
		return -ENOMEM;

	cp = strchr(driver_override, '\n');
	if (cp)
		*cp = '\0';

	device_lock(dev);
	old = sch->driver_override;
	if (strlen(driver_override)) {
		sch->driver_override = driver_override;
	} else {
		kfree(driver_override);
		sch->driver_override = NULL;
	}
	device_unlock(dev);

	kfree(old);

	return count;
}

static ssize_t driver_override_show(struct device *dev,
				    struct device_attribute *attr, char *buf)
{
	struct subchannel *sch = to_subchannel(dev);
	ssize_t len;

	device_lock(dev);
	len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
	device_unlock(dev);
	return len;
}
static DEVICE_ATTR_RW(driver_override);

383 384 385
static struct attribute *subch_attrs[] = {
	&dev_attr_type.attr,
	&dev_attr_modalias.attr,
386
	&dev_attr_driver_override.attr,
387 388 389 390 391 392 393
	NULL,
};

static struct attribute_group subch_attr_group = {
	.attrs = subch_attrs,
};

394
static const struct attribute_group *default_subch_attr_groups[] = {
395 396 397 398
	&subch_attr_group,
	NULL,
};

399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
static ssize_t chpids_show(struct device *dev,
			   struct device_attribute *attr,
			   char *buf)
{
	struct subchannel *sch = to_subchannel(dev);
	struct chsc_ssd_info *ssd = &sch->ssd_info;
	ssize_t ret = 0;
	int mask;
	int chp;

	for (chp = 0; chp < 8; chp++) {
		mask = 0x80 >> chp;
		if (ssd->path_mask & mask)
			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
		else
			ret += sprintf(buf + ret, "00 ");
	}
	ret += sprintf(buf + ret, "\n");
	return ret;
}
J
Joe Perches 已提交
419
static DEVICE_ATTR_RO(chpids);
420 421 422 423 424 425 426 427 428 429 430

static ssize_t pimpampom_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	struct subchannel *sch = to_subchannel(dev);
	struct pmcw *pmcw = &sch->schib.pmcw;

	return sprintf(buf, "%02x %02x %02x\n",
		       pmcw->pim, pmcw->pam, pmcw->pom);
}
J
Joe Perches 已提交
431
static DEVICE_ATTR_RO(pimpampom);
432 433 434 435 436 437 438 439 440 441 442 443

static struct attribute *io_subchannel_type_attrs[] = {
	&dev_attr_chpids.attr,
	&dev_attr_pimpampom.attr,
	NULL,
};
ATTRIBUTE_GROUPS(io_subchannel_type);

static const struct device_type io_subchannel_type = {
	.groups = io_subchannel_type_groups,
};

444
int css_register_subchannel(struct subchannel *sch)
L
Linus Torvalds 已提交
445 446 447 448
{
	int ret;

	/* Initialize the subchannel structure */
449
	sch->dev.parent = &channel_subsystems[0]->device;
L
Linus Torvalds 已提交
450
	sch->dev.bus = &css_bus_type;
451
	sch->dev.groups = default_subch_attr_groups;
452 453 454 455

	if (sch->st == SUBCHANNEL_TYPE_IO)
		sch->dev.type = &io_subchannel_type;

456 457 458 459 460
	/*
	 * We don't want to generate uevents for I/O subchannels that don't
	 * have a working ccw device behind them since they will be
	 * unregistered before they can be used anyway, so we delay the add
	 * uevent until after device recognition was successful.
461 462 463
	 * Note that we suppress the uevent for all subchannel types;
	 * the subchannel driver can decide itself when it wants to inform
	 * userspace of its existence.
464
	 */
465
	dev_set_uevent_suppress(&sch->dev, 1);
466
	css_update_ssd_info(sch);
L
Linus Torvalds 已提交
467
	/* make it known to the system */
468
	ret = css_sch_device_register(sch);
469
	if (ret) {
C
Cornelia Huck 已提交
470 471
		CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
			      sch->schid.ssid, sch->schid.sch_no, ret);
472 473
		return ret;
	}
474 475 476 477 478 479
	if (!sch->driver) {
		/*
		 * No driver matched. Generate the uevent now so that
		 * a fitting driver module may be loaded based on the
		 * modalias.
		 */
480
		dev_set_uevent_suppress(&sch->dev, 0);
481 482
		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
	}
L
Linus Torvalds 已提交
483 484 485
	return ret;
}

S
Sebastian Ott 已提交
486
static int css_probe_device(struct subchannel_id schid, struct schib *schib)
L
Linus Torvalds 已提交
487 488
{
	struct subchannel *sch;
489
	int ret;
L
Linus Torvalds 已提交
490

S
Sebastian Ott 已提交
491
	sch = css_alloc_subchannel(schid, schib);
492 493 494
	if (IS_ERR(sch))
		return PTR_ERR(sch);

L
Linus Torvalds 已提交
495
	ret = css_register_subchannel(sch);
496 497 498
	if (ret)
		put_device(&sch->dev);

L
Linus Torvalds 已提交
499 500 501
	return ret;
}

C
Cornelia Huck 已提交
502
static int
503
check_subchannel(struct device *dev, const void *data)
C
Cornelia Huck 已提交
504 505
{
	struct subchannel *sch;
506
	struct subchannel_id *schid = (void *)data;
C
Cornelia Huck 已提交
507 508

	sch = to_subchannel(dev);
509
	return schid_equal(&sch->schid, schid);
C
Cornelia Huck 已提交
510 511
}

L
Linus Torvalds 已提交
512
struct subchannel *
513
get_subchannel_by_schid(struct subchannel_id schid)
L
Linus Torvalds 已提交
514 515 516
{
	struct device *dev;

C
Cornelia Huck 已提交
517
	dev = bus_find_device(&css_bus_type, NULL,
518
			      &schid, check_subchannel);
L
Linus Torvalds 已提交
519

C
Cornelia Huck 已提交
520
	return dev ? to_subchannel(dev) : NULL;
L
Linus Torvalds 已提交
521 522
}

523 524 525 526 527 528 529 530
/**
 * css_sch_is_valid() - check if a subchannel is valid
 * @schib: subchannel information block for the subchannel
 */
int css_sch_is_valid(struct schib *schib)
{
	if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
		return 0;
531 532
	if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
		return 0;
533 534 535 536
	return 1;
}
EXPORT_SYMBOL_GPL(css_sch_is_valid);

537 538 539
static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
{
	struct schib schib;
S
Sebastian Ott 已提交
540
	int ccode;
541 542 543 544 545

	if (!slow) {
		/* Will be done on the slow path. */
		return -EAGAIN;
	}
S
Sebastian Ott 已提交
546 547 548 549 550 551 552 553 554
	/*
	 * The first subchannel that is not-operational (ccode==3)
	 * indicates that there aren't any more devices available.
	 * If stsch gets an exception, it means the current subchannel set
	 * is not valid.
	 */
	ccode = stsch(schid, &schib);
	if (ccode)
		return (ccode == 3) ? -ENXIO : ccode;
555

S
Sebastian Ott 已提交
556
	return css_probe_device(schid, &schib);
557 558
}

C
Cornelia Huck 已提交
559 560 561 562 563 564 565 566 567 568 569 570
static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
{
	int ret = 0;

	if (sch->driver) {
		if (sch->driver->sch_event)
			ret = sch->driver->sch_event(sch, slow);
		else
			dev_dbg(&sch->dev,
				"Got subchannel machine check but "
				"no sch_event handler provided.\n");
	}
571 572 573 574
	if (ret != 0 && ret != -EAGAIN) {
		CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
			      sch->schid.ssid, sch->schid.sch_no, ret);
	}
C
Cornelia Huck 已提交
575 576 577
	return ret;
}

578
static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
579 580 581 582 583 584 585 586 587 588
{
	struct subchannel *sch;
	int ret;

	sch = get_subchannel_by_schid(schid);
	if (sch) {
		ret = css_evaluate_known_subchannel(sch, slow);
		put_device(&sch->dev);
	} else
		ret = css_evaluate_new_subchannel(schid, slow);
589 590
	if (ret == -EAGAIN)
		css_schedule_eval(schid);
L
Linus Torvalds 已提交
591 592
}

593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
/**
 * css_sched_sch_todo - schedule a subchannel operation
 * @sch: subchannel
 * @todo: todo
 *
 * Schedule the operation identified by @todo to be performed on the slow path
 * workqueue. Do nothing if another operation with higher priority is already
 * scheduled. Needs to be called with subchannel lock held.
 */
void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
{
	CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
		      sch->schid.ssid, sch->schid.sch_no, todo);
	if (sch->todo >= todo)
		return;
	/* Get workqueue ref. */
	if (!get_device(&sch->dev))
		return;
	sch->todo = todo;
	if (!queue_work(cio_work_q, &sch->todo_work)) {
		/* Already queued, release workqueue ref. */
		put_device(&sch->dev);
	}
}
617
EXPORT_SYMBOL_GPL(css_sched_sch_todo);
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652

static void css_sch_todo(struct work_struct *work)
{
	struct subchannel *sch;
	enum sch_todo todo;
	int ret;

	sch = container_of(work, struct subchannel, todo_work);
	/* Find out todo. */
	spin_lock_irq(sch->lock);
	todo = sch->todo;
	CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
		      sch->schid.sch_no, todo);
	sch->todo = SCH_TODO_NOTHING;
	spin_unlock_irq(sch->lock);
	/* Perform todo. */
	switch (todo) {
	case SCH_TODO_NOTHING:
		break;
	case SCH_TODO_EVAL:
		ret = css_evaluate_known_subchannel(sch, 1);
		if (ret == -EAGAIN) {
			spin_lock_irq(sch->lock);
			css_sched_sch_todo(sch, todo);
			spin_unlock_irq(sch->lock);
		}
		break;
	case SCH_TODO_UNREG:
		css_sch_device_unregister(sch);
		break;
	}
	/* Release workqueue ref. */
	put_device(&sch->dev);
}

653 654
static struct idset *slow_subchannel_set;
static spinlock_t slow_subchannel_lock;
655 656
static wait_queue_head_t css_eval_wq;
static atomic_t css_eval_scheduled;
657 658

static int __init slow_subchannel_init(void)
L
Linus Torvalds 已提交
659
{
660
	spin_lock_init(&slow_subchannel_lock);
661 662
	atomic_set(&css_eval_scheduled, 0);
	init_waitqueue_head(&css_eval_wq);
663 664
	slow_subchannel_set = idset_sch_new();
	if (!slow_subchannel_set) {
C
Cornelia Huck 已提交
665
		CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
666 667 668
		return -ENOMEM;
	}
	return 0;
L
Linus Torvalds 已提交
669 670
}

671
static int slow_eval_known_fn(struct subchannel *sch, void *data)
L
Linus Torvalds 已提交
672
{
673 674
	int eval;
	int rc;
L
Linus Torvalds 已提交
675 676

	spin_lock_irq(&slow_subchannel_lock);
677 678 679 680 681 682 683
	eval = idset_sch_contains(slow_subchannel_set, sch->schid);
	idset_sch_del(slow_subchannel_set, sch->schid);
	spin_unlock_irq(&slow_subchannel_lock);
	if (eval) {
		rc = css_evaluate_known_subchannel(sch, 1);
		if (rc == -EAGAIN)
			css_schedule_eval(sch->schid);
684 685 686 687 688
		/*
		 * The loop might take long time for platforms with lots of
		 * known devices. Allow scheduling here.
		 */
		cond_resched();
L
Linus Torvalds 已提交
689
	}
690 691 692 693 694 695 696 697 698 699 700
	return 0;
}

static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
{
	int eval;
	int rc = 0;

	spin_lock_irq(&slow_subchannel_lock);
	eval = idset_sch_contains(slow_subchannel_set, schid);
	idset_sch_del(slow_subchannel_set, schid);
L
Linus Torvalds 已提交
701
	spin_unlock_irq(&slow_subchannel_lock);
702 703 704 705 706 707 708 709 710 711 712
	if (eval) {
		rc = css_evaluate_new_subchannel(schid, 1);
		switch (rc) {
		case -EAGAIN:
			css_schedule_eval(schid);
			rc = 0;
			break;
		case -ENXIO:
		case -ENOMEM:
		case -EIO:
			/* These should abort looping */
713
			spin_lock_irq(&slow_subchannel_lock);
714
			idset_sch_del_subseq(slow_subchannel_set, schid);
715
			spin_unlock_irq(&slow_subchannel_lock);
716 717 718 719
			break;
		default:
			rc = 0;
		}
720 721 722
		/* Allow scheduling here since the containing loop might
		 * take a while.  */
		cond_resched();
723 724 725 726 727 728
	}
	return rc;
}

static void css_slow_path_func(struct work_struct *unused)
{
729 730
	unsigned long flags;

731 732 733
	CIO_TRACE_EVENT(4, "slowpath");
	for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
				   NULL);
734 735 736 737 738 739
	spin_lock_irqsave(&slow_subchannel_lock, flags);
	if (idset_is_empty(slow_subchannel_set)) {
		atomic_set(&css_eval_scheduled, 0);
		wake_up(&css_eval_wq);
	}
	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
L
Linus Torvalds 已提交
740 741
}

742
static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
743
struct workqueue_struct *cio_work_q;
L
Linus Torvalds 已提交
744

745 746 747 748 749 750
void css_schedule_eval(struct subchannel_id schid)
{
	unsigned long flags;

	spin_lock_irqsave(&slow_subchannel_lock, flags);
	idset_sch_add(slow_subchannel_set, schid);
751
	atomic_set(&css_eval_scheduled, 1);
752
	queue_delayed_work(cio_work_q, &slow_path_work, 0);
753 754 755 756 757 758 759 760 761
	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}

void css_schedule_eval_all(void)
{
	unsigned long flags;

	spin_lock_irqsave(&slow_subchannel_lock, flags);
	idset_fill(slow_subchannel_set);
762
	atomic_set(&css_eval_scheduled, 1);
763
	queue_delayed_work(cio_work_q, &slow_path_work, 0);
764 765 766
	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}

767
static int __unset_registered(struct device *dev, void *data)
768
{
769 770
	struct idset *set = data;
	struct subchannel *sch = to_subchannel(dev);
771

772 773
	idset_sch_del(set, sch->schid);
	return 0;
774 775
}

776
void css_schedule_eval_all_unreg(unsigned long delay)
777
{
778 779
	unsigned long flags;
	struct idset *unreg_set;
780

781 782 783 784 785
	/* Find unregistered subchannels. */
	unreg_set = idset_sch_new();
	if (!unreg_set) {
		/* Fallback. */
		css_schedule_eval_all();
786 787
		return;
	}
788 789 790 791 792 793
	idset_fill(unreg_set);
	bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
	/* Apply to slow_subchannel_set. */
	spin_lock_irqsave(&slow_subchannel_lock, flags);
	idset_add_set(slow_subchannel_set, unreg_set);
	atomic_set(&css_eval_scheduled, 1);
794
	queue_delayed_work(cio_work_q, &slow_path_work, delay);
795 796
	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
	idset_free(unreg_set);
797 798
}

799 800
void css_wait_for_slow_path(void)
{
801
	flush_workqueue(cio_work_q);
802
}
803 804 805 806

/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void)
{
807 808
	/* Schedule with a delay to allow merging of subsequent calls. */
	css_schedule_eval_all_unreg(1 * HZ);
809 810 811
}
EXPORT_SYMBOL_GPL(css_schedule_reprobe);

L
Linus Torvalds 已提交
812 813 814
/*
 * Called from the machine check handler for subchannel report words.
 */
815
static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
L
Linus Torvalds 已提交
816
{
817
	struct subchannel_id mchk_schid;
818
	struct subchannel *sch;
L
Linus Torvalds 已提交
819

820 821 822 823 824 825 826 827 828 829 830 831 832
	if (overflow) {
		css_schedule_eval_all();
		return;
	}
	CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
		      crw0->erc, crw0->rsid);
	if (crw1)
		CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
			      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
			      crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
			      crw1->anc, crw1->erc, crw1->rsid);
833
	init_subchannel_id(&mchk_schid);
834 835
	mchk_schid.sch_no = crw0->rsid;
	if (crw1)
836
		mchk_schid.ssid = (crw1->rsid >> 4) & 3;
837

838 839 840 841 842 843 844
	if (crw0->erc == CRW_ERC_PMOD) {
		sch = get_subchannel_by_schid(mchk_schid);
		if (sch) {
			css_update_ssd_info(sch);
			put_device(&sch->dev);
		}
	}
845
	/*
L
Linus Torvalds 已提交
846 847 848 849
	 * Since we are always presented with IPI in the CRW, we have to
	 * use stsch() to find out if the subchannel in question has come
	 * or gone.
	 */
850
	css_evaluate_subchannel(mchk_schid, 0);
L
Linus Torvalds 已提交
851 852 853
}

static void __init
854
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
L
Linus Torvalds 已提交
855
{
856 857
	struct cpuid cpu_id;

858
	if (css_general_characteristics.mcss) {
859
		css->global_pgid.pgid_high.ext_cssid.version = 0x80;
860
		css->global_pgid.pgid_high.ext_cssid.cssid =
861
			css->id_valid ? css->cssid : 0;
862
	} else {
863
		css->global_pgid.pgid_high.cpu_addr = stap();
L
Linus Torvalds 已提交
864
	}
865 866 867
	get_cpu_id(&cpu_id);
	css->global_pgid.cpu_id = cpu_id.ident;
	css->global_pgid.cpu_model = cpu_id.machine;
868 869 870
	css->global_pgid.tod_high = tod_high;
}

871
static void channel_subsystem_release(struct device *dev)
872
{
873
	struct channel_subsystem *css = to_css(dev);
874

875
	mutex_destroy(&css->mutex);
876 877 878
	kfree(css);
}

S
Sebastian Ott 已提交
879 880 881 882 883
static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
			       char *buf)
{
	struct channel_subsystem *css = to_css(dev);

884
	if (!css->id_valid)
S
Sebastian Ott 已提交
885 886 887 888 889 890
		return -EINVAL;

	return sprintf(buf, "%x\n", css->cssid);
}
static DEVICE_ATTR_RO(real_cssid);

S
Sebastian Ott 已提交
891 892
static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
			      char *buf)
893 894
{
	struct channel_subsystem *css = to_css(dev);
895
	int ret;
896

897 898 899 900
	mutex_lock(&css->mutex);
	ret = sprintf(buf, "%x\n", css->cm_enabled);
	mutex_unlock(&css->mutex);
	return ret;
901 902
}

S
Sebastian Ott 已提交
903 904
static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
			       const char *buf, size_t count)
905 906
{
	struct channel_subsystem *css = to_css(dev);
907
	unsigned long val;
S
Sebastian Ott 已提交
908
	int ret;
909

910
	ret = kstrtoul(buf, 16, &val);
911 912
	if (ret)
		return ret;
913
	mutex_lock(&css->mutex);
914 915
	switch (val) {
	case 0:
916 917
		ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
		break;
918
	case 1:
919 920 921 922 923
		ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
		break;
	default:
		ret = -EINVAL;
	}
924
	mutex_unlock(&css->mutex);
925 926
	return ret < 0 ? ret : count;
}
S
Sebastian Ott 已提交
927 928 929 930 931 932 933 934
static DEVICE_ATTR_RW(cm_enable);

static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
			      int index)
{
	return css_chsc_characteristics.secm ? attr->mode : 0;
}

S
Sebastian Ott 已提交
935 936 937 938 939 940 941 942 943
static struct attribute *cssdev_attrs[] = {
	&dev_attr_real_cssid.attr,
	NULL,
};

static struct attribute_group cssdev_attr_group = {
	.attrs = cssdev_attrs,
};

S
Sebastian Ott 已提交
944 945 946 947 948 949 950 951 952
static struct attribute *cssdev_cm_attrs[] = {
	&dev_attr_cm_enable.attr,
	NULL,
};

static struct attribute_group cssdev_cm_attr_group = {
	.attrs = cssdev_cm_attrs,
	.is_visible = cm_enable_mode,
};
953

S
Sebastian Ott 已提交
954
static const struct attribute_group *cssdev_attr_groups[] = {
S
Sebastian Ott 已提交
955
	&cssdev_attr_group,
S
Sebastian Ott 已提交
956 957 958
	&cssdev_cm_attr_group,
	NULL,
};
959

960
static int __init setup_css(int nr)
961
{
962
	struct channel_subsystem *css;
963
	int ret;
964

965 966
	css = kzalloc(sizeof(*css), GFP_KERNEL);
	if (!css)
967
		return -ENOMEM;
968 969 970 971 972

	channel_subsystems[nr] = css;
	dev_set_name(&css->device, "css%x", nr);
	css->device.groups = cssdev_attr_groups;
	css->device.release = channel_subsystem_release;
973 974 975 976 977
	/*
	 * We currently allocate notifier bits with this (using
	 * css->device as the device argument with the DMA API)
	 * and are fine with 64 bit addresses.
	 */
978 979 980 981 982
	ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
	if (ret) {
		kfree(css);
		goto out_err;
	}
983 984

	mutex_init(&css->mutex);
985 986 987 988 989 990
	ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
	if (!ret) {
		css->id_valid = true;
		pr_info("Partition identifier %01x.%01x\n", css->cssid,
			css->iid);
	}
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
	css_generate_pgid(css, (u32) (get_tod_clock() >> 32));

	ret = device_register(&css->device);
	if (ret) {
		put_device(&css->device);
		goto out_err;
	}

	css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
					 GFP_KERNEL);
	if (!css->pseudo_subchannel) {
		device_unregister(&css->device);
		ret = -ENOMEM;
		goto out_err;
	}

1007 1008
	css->pseudo_subchannel->dev.parent = &css->device;
	css->pseudo_subchannel->dev.release = css_subchannel_release;
1009
	mutex_init(&css->pseudo_subchannel->reg_mutex);
1010
	ret = css_sch_create_locks(css->pseudo_subchannel);
1011
	if (ret) {
1012
		kfree(css->pseudo_subchannel);
1013 1014
		device_unregister(&css->device);
		goto out_err;
1015
	}
1016

1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
	dev_set_name(&css->pseudo_subchannel->dev, "defunct");
	ret = device_register(&css->pseudo_subchannel->dev);
	if (ret) {
		put_device(&css->pseudo_subchannel->dev);
		device_unregister(&css->device);
		goto out_err;
	}

	return ret;
out_err:
	channel_subsystems[nr] = NULL;
	return ret;
L
Linus Torvalds 已提交
1029 1030
}

1031 1032 1033 1034
static int css_reboot_event(struct notifier_block *this,
			    unsigned long event,
			    void *ptr)
{
S
Sebastian Ott 已提交
1035 1036
	struct channel_subsystem *css;
	int ret;
1037 1038

	ret = NOTIFY_DONE;
S
Sebastian Ott 已提交
1039
	for_each_css(css) {
1040
		mutex_lock(&css->mutex);
1041 1042 1043
		if (css->cm_enabled)
			if (chsc_secm(css, 0))
				ret = NOTIFY_BAD;
1044
		mutex_unlock(&css->mutex);
1045 1046 1047 1048 1049 1050 1051 1052 1053
	}

	return ret;
}

static struct notifier_block css_reboot_notifier = {
	.notifier_call = css_reboot_event,
};

1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
#define  CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
static struct gen_pool *cio_dma_pool;

/* Currently cio supports only a single css */
struct device *cio_get_dma_css_dev(void)
{
	return &channel_subsystems[0]->device;
}

struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
{
	struct gen_pool *gp_dma;
	void *cpu_addr;
	dma_addr_t dma_addr;
	int i;

	gp_dma = gen_pool_create(3, -1);
	if (!gp_dma)
		return NULL;
	for (i = 0; i < nr_pages; ++i) {
		cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
					      CIO_DMA_GFP);
		if (!cpu_addr)
			return gp_dma;
		gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
				  dma_addr, PAGE_SIZE, -1);
	}
	return gp_dma;
}

static void __gp_dma_free_dma(struct gen_pool *pool,
			      struct gen_pool_chunk *chunk, void *data)
{
	size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;

	dma_free_coherent((struct device *) data, chunk_size,
			 (void *) chunk->start_addr,
			 (dma_addr_t) chunk->phys_addr);
}

void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
{
	if (!gp_dma)
		return;
	/* this is quite ugly but no better idea */
	gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
	gen_pool_destroy(gp_dma);
}

static int cio_dma_pool_init(void)
{
	/* No need to free up the resources: compiled in */
	cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
	if (!cio_dma_pool)
		return -ENOMEM;
	return 0;
}

void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
			size_t size)
{
	dma_addr_t dma_addr;
	unsigned long addr;
	size_t chunk_size;

	if (!gp_dma)
		return NULL;
	addr = gen_pool_alloc(gp_dma, size);
	while (!addr) {
		chunk_size = round_up(size, PAGE_SIZE);
		addr = (unsigned long) dma_alloc_coherent(dma_dev,
					 chunk_size, &dma_addr, CIO_DMA_GFP);
		if (!addr)
			return NULL;
		gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
		addr = gen_pool_alloc(gp_dma, size);
	}
	return (void *) addr;
}

void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
{
	if (!cpu_addr)
		return;
	memset(cpu_addr, 0, size);
	gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
}

/*
 * Allocate dma memory from the css global pool. Intended for memory not
 * specific to any single device within the css. The allocated memory
 * is not guaranteed to be 31-bit addressable.
 *
 * Caution: Not suitable for early stuff like console.
 */
void *cio_dma_zalloc(size_t size)
{
	return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
}

void cio_dma_free(void *cpu_addr, size_t size)
{
	cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
}

L
Linus Torvalds 已提交
1159 1160
/*
 * Now that the driver core is running, we can setup our channel subsystem.
1161
 * The struct subchannel's are created during probing.
L
Linus Torvalds 已提交
1162
 */
S
Sebastian Ott 已提交
1163
static int __init css_bus_init(void)
L
Linus Torvalds 已提交
1164
{
1165
	int ret, i;
L
Linus Torvalds 已提交
1166

S
Sebastian Ott 已提交
1167 1168 1169 1170
	ret = chsc_init();
	if (ret)
		return ret;

1171
	chsc_determine_css_characteristics();
1172 1173
	/* Try to enable MSS. */
	ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1174
	if (ret)
1175
		max_ssid = 0;
1176 1177
	else /* Success. */
		max_ssid = __MAX_SSID;
1178

1179 1180 1181 1182
	ret = slow_subchannel_init();
	if (ret)
		goto out;

1183
	ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1184 1185 1186
	if (ret)
		goto out;

L
Linus Torvalds 已提交
1187 1188 1189
	if ((ret = bus_register(&css_bus_type)))
		goto out;

1190
	/* Setup css structure. */
S
Sebastian Ott 已提交
1191
	for (i = 0; i <= MAX_CSS_IDX; i++) {
1192
		ret = setup_css(i);
1193
		if (ret)
1194
			goto out_unregister;
1195
	}
1196 1197
	ret = register_reboot_notifier(&css_reboot_notifier);
	if (ret)
1198
		goto out_unregister;
1199 1200
	ret = cio_dma_pool_init();
	if (ret)
1201
		goto out_unregister_rn;
1202
	airq_init();
L
Linus Torvalds 已提交
1203 1204
	css_init_done = 1;

1205
	/* Enable default isc for I/O subchannels. */
1206
	isc_register(IO_SCH_ISC);
L
Linus Torvalds 已提交
1207 1208

	return 0;
1209 1210
out_unregister_rn:
	unregister_reboot_notifier(&css_reboot_notifier);
1211
out_unregister:
1212 1213
	while (i-- > 0) {
		struct channel_subsystem *css = channel_subsystems[i];
1214 1215
		device_unregister(&css->pseudo_subchannel->dev);
		device_unregister(&css->device);
1216
	}
L
Linus Torvalds 已提交
1217 1218
	bus_unregister(&css_bus_type);
out:
S
Sebastian Ott 已提交
1219
	crw_unregister_handler(CRW_RSC_SCH);
1220
	idset_free(slow_subchannel_set);
S
Sebastian Ott 已提交
1221
	chsc_init_cleanup();
1222 1223
	pr_alert("The CSS device driver initialization failed with "
		 "errno=%d\n", ret);
L
Linus Torvalds 已提交
1224 1225 1226
	return ret;
}

S
Sebastian Ott 已提交
1227 1228 1229 1230
static void __init css_bus_cleanup(void)
{
	struct channel_subsystem *css;

S
Sebastian Ott 已提交
1231
	for_each_css(css) {
S
Sebastian Ott 已提交
1232 1233 1234 1235
		device_unregister(&css->pseudo_subchannel->dev);
		device_unregister(&css->device);
	}
	bus_unregister(&css_bus_type);
S
Sebastian Ott 已提交
1236
	crw_unregister_handler(CRW_RSC_SCH);
1237
	idset_free(slow_subchannel_set);
S
Sebastian Ott 已提交
1238
	chsc_init_cleanup();
S
Sebastian Ott 已提交
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
	isc_unregister(IO_SCH_ISC);
}

static int __init channel_subsystem_init(void)
{
	int ret;

	ret = css_bus_init();
	if (ret)
		return ret;
1249 1250 1251 1252 1253
	cio_work_q = create_singlethread_workqueue("cio");
	if (!cio_work_q) {
		ret = -ENOMEM;
		goto out_bus;
	}
S
Sebastian Ott 已提交
1254 1255
	ret = io_subchannel_init();
	if (ret)
1256
		goto out_wq;
S
Sebastian Ott 已提交
1257

1258 1259 1260 1261 1262
	/* Register subchannels which are already in use. */
	cio_register_early_subchannels();
	/* Start initial subchannel evaluation. */
	css_schedule_eval_all();

S
Sebastian Ott 已提交
1263
	return ret;
1264 1265 1266 1267 1268
out_wq:
	destroy_workqueue(cio_work_q);
out_bus:
	css_bus_cleanup();
	return ret;
S
Sebastian Ott 已提交
1269 1270 1271
}
subsys_initcall(channel_subsystem_init);

S
Sebastian Ott 已提交
1272 1273 1274 1275 1276
static int css_settle(struct device_driver *drv, void *unused)
{
	struct css_driver *cssdrv = to_cssdriver(drv);

	if (cssdrv->settle)
1277
		return cssdrv->settle();
S
Sebastian Ott 已提交
1278 1279 1280
	return 0;
}

1281
int css_complete_work(void)
S
Sebastian Ott 已提交
1282 1283 1284 1285
{
	int ret;

	/* Wait for the evaluation of subchannels to finish. */
1286 1287 1288 1289
	ret = wait_event_interruptible(css_eval_wq,
				       atomic_read(&css_eval_scheduled) == 0);
	if (ret)
		return -EINTR;
S
Sebastian Ott 已提交
1290 1291
	flush_workqueue(cio_work_q);
	/* Wait for the subchannel type specific initialization to finish */
1292
	return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
S
Sebastian Ott 已提交
1293 1294 1295
}


S
Sebastian Ott 已提交
1296 1297 1298 1299 1300 1301
/*
 * Wait for the initialization of devices to finish, to make sure we are
 * done with our setup if the search for the root device starts.
 */
static int __init channel_subsystem_init_sync(void)
{
S
Sebastian Ott 已提交
1302 1303
	css_complete_work();
	return 0;
S
Sebastian Ott 已提交
1304 1305 1306
}
subsys_initcall_sync(channel_subsystem_init_sync);

S
Sebastian Ott 已提交
1307 1308 1309 1310
#ifdef CONFIG_PROC_FS
static ssize_t cio_settle_write(struct file *file, const char __user *buf,
				size_t count, loff_t *ppos)
{
1311 1312
	int ret;

S
Sebastian Ott 已提交
1313 1314
	/* Handle pending CRW's. */
	crw_wait_for_channel_report();
1315 1316 1317
	ret = css_complete_work();

	return ret ? ret : count;
S
Sebastian Ott 已提交
1318 1319
}

1320 1321 1322 1323
static const struct proc_ops cio_settle_proc_ops = {
	.proc_open	= nonseekable_open,
	.proc_write	= cio_settle_write,
	.proc_lseek	= no_llseek,
S
Sebastian Ott 已提交
1324 1325 1326 1327 1328 1329
};

static int __init cio_settle_init(void)
{
	struct proc_dir_entry *entry;

1330
	entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
S
Sebastian Ott 已提交
1331 1332 1333 1334 1335 1336 1337
	if (!entry)
		return -ENOMEM;
	return 0;
}
device_initcall(cio_settle_init);
#endif /*CONFIG_PROC_FS*/

1338 1339
int sch_is_pseudo_sch(struct subchannel *sch)
{
1340 1341
	if (!sch->dev.parent)
		return 0;
1342 1343 1344
	return sch == to_css(sch->dev.parent)->pseudo_subchannel;
}

1345
static int css_bus_match(struct device *dev, struct device_driver *drv)
L
Linus Torvalds 已提交
1346
{
1347 1348
	struct subchannel *sch = to_subchannel(dev);
	struct css_driver *driver = to_cssdriver(drv);
1349
	struct css_device_id *id;
L
Linus Torvalds 已提交
1350

1351 1352 1353 1354
	/* When driver_override is set, only bind to the matching driver */
	if (sch->driver_override && strcmp(sch->driver_override, drv->name))
		return 0;

1355 1356 1357 1358
	for (id = driver->subchannel_type; id->match_flags; id++) {
		if (sch->st == id->type)
			return 1;
	}
L
Linus Torvalds 已提交
1359 1360 1361 1362

	return 0;
}

C
Cornelia Huck 已提交
1363
static int css_probe(struct device *dev)
1364 1365
{
	struct subchannel *sch;
C
Cornelia Huck 已提交
1366
	int ret;
1367 1368

	sch = to_subchannel(dev);
1369
	sch->driver = to_cssdriver(dev->driver);
C
Cornelia Huck 已提交
1370 1371 1372 1373
	ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
	if (ret)
		sch->driver = NULL;
	return ret;
1374 1375
}

C
Cornelia Huck 已提交
1376
static int css_remove(struct device *dev)
1377 1378
{
	struct subchannel *sch;
C
Cornelia Huck 已提交
1379
	int ret;
1380 1381

	sch = to_subchannel(dev);
C
Cornelia Huck 已提交
1382 1383 1384
	ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
	sch->driver = NULL;
	return ret;
1385 1386
}

C
Cornelia Huck 已提交
1387
static void css_shutdown(struct device *dev)
1388 1389 1390 1391
{
	struct subchannel *sch;

	sch = to_subchannel(dev);
C
Cornelia Huck 已提交
1392
	if (sch->driver && sch->driver->shutdown)
1393 1394 1395
		sch->driver->shutdown(sch);
}

1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
{
	struct subchannel *sch = to_subchannel(dev);
	int ret;

	ret = add_uevent_var(env, "ST=%01X", sch->st);
	if (ret)
		return ret;
	ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
	return ret;
}

1408
static struct bus_type css_bus_type = {
1409 1410 1411 1412 1413
	.name     = "css",
	.match    = css_bus_match,
	.probe    = css_probe,
	.remove   = css_remove,
	.shutdown = css_shutdown,
1414
	.uevent   = css_uevent,
L
Linus Torvalds 已提交
1415 1416
};

1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
/**
 * css_driver_register - register a css driver
 * @cdrv: css driver to register
 *
 * This is mainly a wrapper around driver_register that sets name
 * and bus_type in the embedded struct device_driver correctly.
 */
int css_driver_register(struct css_driver *cdrv)
{
	cdrv->drv.bus = &css_bus_type;
	return driver_register(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_register);

/**
 * css_driver_unregister - unregister a css driver
 * @cdrv: css driver to unregister
 *
 * This is a wrapper around driver_unregister.
 */
void css_driver_unregister(struct css_driver *cdrv)
{
	driver_unregister(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_unregister);