css.c 16.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 *  drivers/s390/cio/css.c
 *  driver for channel subsystem
 *
 *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
 *			 IBM Corporation
 *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
8
 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18 19 20 21
 */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>

#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
#include "chsc.h"
22
#include "device.h"
L
Linus Torvalds 已提交
23 24 25

int need_rescan = 0;
int css_init_done = 0;
26
static int need_reprobe = 0;
27
static int max_ssid = 0;
L
Linus Torvalds 已提交
28

29
struct channel_subsystem *css[__MAX_CSSID + 1];
L
Linus Torvalds 已提交
30

31
int css_characteristics_avail = 0;
L
Linus Torvalds 已提交
32

33 34 35 36 37 38 39 40 41
inline int
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
{
	struct subchannel_id schid;
	int ret;

	init_subchannel_id(&schid);
	ret = -ENODEV;
	do {
42 43 44 45 46 47 48
		do {
			ret = fn(schid, data);
			if (ret)
				break;
		} while (schid.sch_no++ < __MAX_SUBCHANNEL);
		schid.sch_no = 0;
	} while (schid.ssid++ < max_ssid);
49 50 51
	return ret;
}

L
Linus Torvalds 已提交
52
static struct subchannel *
53
css_alloc_subchannel(struct subchannel_id schid)
L
Linus Torvalds 已提交
54 55 56 57 58 59 60
{
	struct subchannel *sch;
	int ret;

	sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
	if (sch == NULL)
		return ERR_PTR(-ENOMEM);
61
	ret = cio_validate_subchannel (sch, schid);
L
Linus Torvalds 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
	if (ret < 0) {
		kfree(sch);
		return ERR_PTR(ret);
	}

	if (sch->st != SUBCHANNEL_TYPE_IO) {
		/* For now we ignore all non-io subchannels. */
		kfree(sch);
		return ERR_PTR(-EINVAL);
	}

	/* 
	 * Set intparm to subchannel address.
	 * This is fine even on 64bit since the subchannel is always located
	 * under 2G.
	 */
	sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
	ret = cio_modify(sch);
	if (ret) {
		kfree(sch);
		return ERR_PTR(ret);
	}
	return sch;
}

static void
css_free_subchannel(struct subchannel *sch)
{
	if (sch) {
		/* Reset intparm to zeroes. */
		sch->schib.pmcw.intparm = 0;
		cio_modify(sch);
		kfree(sch);
	}
	
}

static void
css_subchannel_release(struct device *dev)
{
	struct subchannel *sch;

	sch = to_subchannel(dev);
105
	if (!cio_is_console(sch->schid))
L
Linus Torvalds 已提交
106 107 108 109 110
		kfree(sch);
}

extern int css_get_ssd_info(struct subchannel *sch);

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128

int css_sch_device_register(struct subchannel *sch)
{
	int ret;

	mutex_lock(&sch->reg_mutex);
	ret = device_register(&sch->dev);
	mutex_unlock(&sch->reg_mutex);
	return ret;
}

void css_sch_device_unregister(struct subchannel *sch)
{
	mutex_lock(&sch->reg_mutex);
	device_unregister(&sch->dev);
	mutex_unlock(&sch->reg_mutex);
}

L
Linus Torvalds 已提交
129 130 131 132 133 134
static int
css_register_subchannel(struct subchannel *sch)
{
	int ret;

	/* Initialize the subchannel structure */
135
	sch->dev.parent = &css[0]->device;
L
Linus Torvalds 已提交
136 137 138 139
	sch->dev.bus = &css_bus_type;
	sch->dev.release = &css_subchannel_release;
	
	/* make it known to the system */
140
	ret = css_sch_device_register(sch);
L
Linus Torvalds 已提交
141 142 143 144 145 146 147 148 149
	if (ret)
		printk (KERN_WARNING "%s: could not register %s\n",
			__func__, sch->dev.bus_id);
	else
		css_get_ssd_info(sch);
	return ret;
}

int
150
css_probe_device(struct subchannel_id schid)
L
Linus Torvalds 已提交
151 152 153 154
{
	int ret;
	struct subchannel *sch;

155
	sch = css_alloc_subchannel(schid);
L
Linus Torvalds 已提交
156 157 158 159 160 161 162 163
	if (IS_ERR(sch))
		return PTR_ERR(sch);
	ret = css_register_subchannel(sch);
	if (ret)
		css_free_subchannel(sch);
	return ret;
}

C
Cornelia Huck 已提交
164 165 166 167
static int
check_subchannel(struct device * dev, void * data)
{
	struct subchannel *sch;
168
	struct subchannel_id *schid = data;
C
Cornelia Huck 已提交
169 170

	sch = to_subchannel(dev);
171
	return schid_equal(&sch->schid, schid);
C
Cornelia Huck 已提交
172 173
}

L
Linus Torvalds 已提交
174
struct subchannel *
175
get_subchannel_by_schid(struct subchannel_id schid)
L
Linus Torvalds 已提交
176 177 178
{
	struct device *dev;

C
Cornelia Huck 已提交
179
	dev = bus_find_device(&css_bus_type, NULL,
180
			      &schid, check_subchannel);
L
Linus Torvalds 已提交
181

C
Cornelia Huck 已提交
182
	return dev ? to_subchannel(dev) : NULL;
L
Linus Torvalds 已提交
183 184
}

185
static inline int css_get_subchannel_status(struct subchannel *sch)
L
Linus Torvalds 已提交
186 187 188
{
	struct schib schib;

189
	if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
L
Linus Torvalds 已提交
190
		return CIO_GONE;
191
	if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
L
Linus Torvalds 已提交
192
		return CIO_REVALIDATE;
193
	if (!sch->lpm)
L
Linus Torvalds 已提交
194 195 196
		return CIO_NO_PATH;
	return CIO_OPER;
}
197 198

static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
L
Linus Torvalds 已提交
199 200 201
{
	int event, ret, disc;
	unsigned long flags;
202
	enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
L
Linus Torvalds 已提交
203

204 205
	spin_lock_irqsave(&sch->lock, flags);
	disc = device_is_disconnected(sch);
L
Linus Torvalds 已提交
206
	if (disc && slow) {
207 208 209
		/* Disconnected devices are evaluated directly only.*/
		spin_unlock_irqrestore(&sch->lock, flags);
		return 0;
L
Linus Torvalds 已提交
210
	}
211 212
	/* No interrupt after machine check - kill pending timers. */
	device_kill_pending_timer(sch);
L
Linus Torvalds 已提交
213
	if (!disc && !slow) {
214 215 216
		/* Non-disconnected devices are evaluated on the slow path. */
		spin_unlock_irqrestore(&sch->lock, flags);
		return -EAGAIN;
L
Linus Torvalds 已提交
217
	}
218
	event = css_get_subchannel_status(sch);
219
	CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
220 221 222 223 224
		      sch->schid.ssid, sch->schid.sch_no, event,
		      disc ? "disconnected" : "normal",
		      slow ? "slow" : "fast");
	/* Analyze subchannel status. */
	action = NONE;
L
Linus Torvalds 已提交
225 226
	switch (event) {
	case CIO_NO_PATH:
227 228 229
		if (disc) {
			/* Check if paths have become available. */
			action = REPROBE;
L
Linus Torvalds 已提交
230 231
			break;
		}
232 233 234 235 236 237 238 239
		/* fall through */
	case CIO_GONE:
		/* Prevent unwanted effects when opening lock. */
		cio_disable_subchannel(sch);
		device_set_disconnected(sch);
		/* Ask driver what to do with device. */
		action = UNREGISTER;
		if (sch->driver && sch->driver->notify) {
L
Linus Torvalds 已提交
240
			spin_unlock_irqrestore(&sch->lock, flags);
241 242 243 244
			ret = sch->driver->notify(&sch->dev, event);
			spin_lock_irqsave(&sch->lock, flags);
			if (ret)
				action = NONE;
L
Linus Torvalds 已提交
245 246 247
		}
		break;
	case CIO_REVALIDATE:
248 249 250 251 252 253
		/* Device will be removed, so no notify necessary. */
		if (disc)
			/* Reprobe because immediate unregister might block. */
			action = REPROBE;
		else
			action = UNREGISTER_PROBE;
L
Linus Torvalds 已提交
254 255
		break;
	case CIO_OPER:
256
		if (disc)
L
Linus Torvalds 已提交
257
			/* Get device operational again. */
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
			action = REPROBE;
		break;
	}
	/* Perform action. */
	ret = 0;
	switch (action) {
	case UNREGISTER:
	case UNREGISTER_PROBE:
		/* Unregister device (will use subchannel lock). */
		spin_unlock_irqrestore(&sch->lock, flags);
		css_sch_device_unregister(sch);
		spin_lock_irqsave(&sch->lock, flags);

		/* Reset intparm to zeroes. */
		sch->schib.pmcw.intparm = 0;
		cio_modify(sch);
		break;
	case REPROBE:
		device_trigger_reprobe(sch);
L
Linus Torvalds 已提交
277 278
		break;
	default:
279 280 281
		break;
	}
	spin_unlock_irqrestore(&sch->lock, flags);
282 283 284
	/* Probe if necessary. */
	if (action == UNREGISTER_PROBE)
		ret = css_probe_device(sch->schid);
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299

	return ret;
}

static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
{
	struct schib schib;

	if (!slow) {
		/* Will be done on the slow path. */
		return -EAGAIN;
	}
	if (stsch(schid, &schib) || !schib.pmcw.dnv) {
		/* Unusable - ignore. */
		return 0;
L
Linus Torvalds 已提交
300
	}
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
	CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
			 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);

	return css_probe_device(schid);
}

static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
{
	struct subchannel *sch;
	int ret;

	sch = get_subchannel_by_schid(schid);
	if (sch) {
		ret = css_evaluate_known_subchannel(sch, slow);
		put_device(&sch->dev);
	} else
		ret = css_evaluate_new_subchannel(schid, slow);

L
Linus Torvalds 已提交
319 320 321
	return ret;
}

322 323
static int
css_rescan_devices(struct subchannel_id schid, void *data)
L
Linus Torvalds 已提交
324
{
325
	return css_evaluate_subchannel(schid, 1);
L
Linus Torvalds 已提交
326 327 328 329
}

struct slow_subchannel {
	struct list_head slow_list;
330
	struct subchannel_id schid;
L
Linus Torvalds 已提交
331 332 333 334 335 336 337 338 339 340 341 342
};

static LIST_HEAD(slow_subchannels_head);
static DEFINE_SPINLOCK(slow_subchannel_lock);

static void
css_trigger_slow_path(void)
{
	CIO_TRACE_EVENT(4, "slowpath");

	if (need_rescan) {
		need_rescan = 0;
343
		for_each_subchannel(css_rescan_devices, NULL);
L
Linus Torvalds 已提交
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
		return;
	}

	spin_lock_irq(&slow_subchannel_lock);
	while (!list_empty(&slow_subchannels_head)) {
		struct slow_subchannel *slow_sch =
			list_entry(slow_subchannels_head.next,
				   struct slow_subchannel, slow_list);

		list_del_init(slow_subchannels_head.next);
		spin_unlock_irq(&slow_subchannel_lock);
		css_evaluate_subchannel(slow_sch->schid, 1);
		spin_lock_irq(&slow_subchannel_lock);
		kfree(slow_sch);
	}
	spin_unlock_irq(&slow_subchannel_lock);
}

typedef void (*workfunc)(void *);
DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL);
struct workqueue_struct *slow_path_wq;

366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
/* Reprobe subchannel if unregistered. */
static int reprobe_subchannel(struct subchannel_id schid, void *data)
{
	struct subchannel *sch;
	int ret;

	CIO_DEBUG(KERN_INFO, 6, "cio: reprobe 0.%x.%04x\n",
		  schid.ssid, schid.sch_no);
	if (need_reprobe)
		return -EAGAIN;

	sch = get_subchannel_by_schid(schid);
	if (sch) {
		/* Already known. */
		put_device(&sch->dev);
		return 0;
	}

	ret = css_probe_device(schid);
	switch (ret) {
	case 0:
		break;
	case -ENXIO:
	case -ENOMEM:
		/* These should abort looping */
		break;
	default:
		ret = 0;
	}

	return ret;
}

/* Work function used to reprobe all unregistered subchannels. */
static void reprobe_all(void *data)
{
	int ret;

	CIO_MSG_EVENT(2, "reprobe start\n");

	need_reprobe = 0;
	/* Make sure initial subchannel scan is done. */
	wait_event(ccw_device_init_wq,
		   atomic_read(&ccw_device_init_count) == 0);
	ret = for_each_subchannel(reprobe_subchannel, NULL);

	CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
		      need_reprobe);
}

DECLARE_WORK(css_reprobe_work, reprobe_all, NULL);

/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void)
{
	need_reprobe = 1;
	queue_work(ccw_device_work, &css_reprobe_work);
}

EXPORT_SYMBOL_GPL(css_schedule_reprobe);

L
Linus Torvalds 已提交
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
/*
 * Rescan for new devices. FIXME: This is slow.
 * This function is called when we have lost CRWs due to overflows and we have
 * to do subchannel housekeeping.
 */
void
css_reiterate_subchannels(void)
{
	css_clear_subchannel_slow_list();
	need_rescan = 1;
}

/*
 * Called from the machine check handler for subchannel report words.
 */
int
443
css_process_crw(int rsid1, int rsid2)
L
Linus Torvalds 已提交
444 445
{
	int ret;
446
	struct subchannel_id mchk_schid;
L
Linus Torvalds 已提交
447

448 449
	CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
		      rsid1, rsid2);
L
Linus Torvalds 已提交
450 451 452 453

	if (need_rescan)
		/* We need to iterate all subchannels anyway. */
		return -EAGAIN;
454 455

	init_subchannel_id(&mchk_schid);
456 457 458 459
	mchk_schid.sch_no = rsid1;
	if (rsid2 != 0)
		mchk_schid.ssid = (rsid2 >> 8) & 3;

L
Linus Torvalds 已提交
460 461 462 463 464
	/* 
	 * Since we are always presented with IPI in the CRW, we have to
	 * use stsch() to find out if the subchannel in question has come
	 * or gone.
	 */
465
	ret = css_evaluate_subchannel(mchk_schid, 0);
L
Linus Torvalds 已提交
466
	if (ret == -EAGAIN) {
467
		if (css_enqueue_subchannel_slow(mchk_schid)) {
L
Linus Torvalds 已提交
468 469 470 471 472 473 474
			css_clear_subchannel_slow_list();
			need_rescan = 1;
		}
	}
	return ret;
}

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
static int __init
__init_channel_subsystem(struct subchannel_id schid, void *data)
{
	struct subchannel *sch;
	int ret;

	if (cio_is_console(schid))
		sch = cio_get_console_subchannel();
	else {
		sch = css_alloc_subchannel(schid);
		if (IS_ERR(sch))
			ret = PTR_ERR(sch);
		else
			ret = 0;
		switch (ret) {
		case 0:
			break;
		case -ENOMEM:
			panic("Out of memory in init_channel_subsystem\n");
		/* -ENXIO: no more subchannels. */
		case -ENXIO:
			return ret;
497 498 499
		/* -EIO: this subchannel set not supported. */
		case -EIO:
			return ret;
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
		default:
			return 0;
		}
	}
	/*
	 * We register ALL valid subchannels in ioinfo, even those
	 * that have been present before init_channel_subsystem.
	 * These subchannels can't have been registered yet (kmalloc
	 * not working) so we do it now. This is true e.g. for the
	 * console subchannel.
	 */
	css_register_subchannel(sch);
	return 0;
}

L
Linus Torvalds 已提交
515
static void __init
516
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
L
Linus Torvalds 已提交
517
{
518 519 520 521
	if (css_characteristics_avail && css_general_characteristics.mcss) {
		css->global_pgid.pgid_high.ext_cssid.version = 0x80;
		css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
	} else {
L
Linus Torvalds 已提交
522
#ifdef CONFIG_SMP
523
		css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
L
Linus Torvalds 已提交
524
#else
525
		css->global_pgid.pgid_high.cpu_addr = 0;
L
Linus Torvalds 已提交
526 527
#endif
	}
528 529 530 531 532 533
	css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
	css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
	css->global_pgid.tod_high = tod_high;

}

534 535 536 537 538 539
static void
channel_subsystem_release(struct device *dev)
{
	struct channel_subsystem *css;

	css = to_css(dev);
540
	mutex_destroy(&css->mutex);
541 542 543
	kfree(css);
}

544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
static ssize_t
css_cm_enable_show(struct device *dev, struct device_attribute *attr,
		   char *buf)
{
	struct channel_subsystem *css = to_css(dev);

	if (!css)
		return 0;
	return sprintf(buf, "%x\n", css->cm_enabled);
}

static ssize_t
css_cm_enable_store(struct device *dev, struct device_attribute *attr,
		    const char *buf, size_t count)
{
	struct channel_subsystem *css = to_css(dev);
	int ret;

	switch (buf[0]) {
	case '0':
		ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
		break;
	case '1':
		ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
		break;
	default:
		ret = -EINVAL;
	}
	return ret < 0 ? ret : count;
}

static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);

577 578 579 580 581 582
static inline void __init
setup_css(int nr)
{
	u32 tod_high;

	memset(css[nr], 0, sizeof(struct channel_subsystem));
583
	mutex_init(&css[nr]->mutex);
584 585 586
	css[nr]->valid = 1;
	css[nr]->cssid = nr;
	sprintf(css[nr]->device.bus_id, "css%x", nr);
587
	css[nr]->device.release = channel_subsystem_release;
588 589
	tod_high = (u32) (get_clock() >> 32);
	css_generate_pgid(css[nr], tod_high);
L
Linus Torvalds 已提交
590 591 592 593 594 595 596 597 598 599
}

/*
 * Now that the driver core is running, we can setup our channel subsystem.
 * The struct subchannel's are created during probing (except for the
 * static console subchannel).
 */
static int __init
init_channel_subsystem (void)
{
600
	int ret, i;
L
Linus Torvalds 已提交
601 602 603 604 605 606 607

	if (chsc_determine_css_characteristics() == 0)
		css_characteristics_avail = 1;

	if ((ret = bus_register(&css_bus_type)))
		goto out;

608 609 610 611 612 613 614 615 616 617 618
	/* Try to enable MSS. */
	ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
	switch (ret) {
	case 0: /* Success. */
		max_ssid = __MAX_SSID;
		break;
	case -ENOMEM:
		goto out_bus;
	default:
		max_ssid = 0;
	}
619 620 621 622 623
	/* Setup css structure. */
	for (i = 0; i <= __MAX_CSSID; i++) {
		css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
		if (!css[i]) {
			ret = -ENOMEM;
624
			goto out_unregister;
625 626 627 628 629
		}
		setup_css(i);
		ret = device_register(&css[i]->device);
		if (ret)
			goto out_free;
630 631 632 633 634 635 636
		if (css_characteristics_avail &&
		    css_chsc_characteristics.secm) {
			ret = device_create_file(&css[i]->device,
						 &dev_attr_cm_enable);
			if (ret)
				goto out_device;
		}
637
	}
L
Linus Torvalds 已提交
638 639 640 641
	css_init_done = 1;

	ctl_set_bit(6, 28);

642
	for_each_subchannel(__init_channel_subsystem, NULL);
L
Linus Torvalds 已提交
643
	return 0;
644 645
out_device:
	device_unregister(&css[i]->device);
646 647
out_free:
	kfree(css[i]);
648
out_unregister:
649 650
	while (i > 0) {
		i--;
651 652 653
		if (css_characteristics_avail && css_chsc_characteristics.secm)
			device_remove_file(&css[i]->device,
					   &dev_attr_cm_enable);
654 655
		device_unregister(&css[i]->device);
	}
656
out_bus:
L
Linus Torvalds 已提交
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
	bus_unregister(&css_bus_type);
out:
	return ret;
}

/*
 * find a driver for a subchannel. They identify by the subchannel
 * type with the exception that the console subchannel driver has its own
 * subchannel type although the device is an i/o subchannel
 */
static int
css_bus_match (struct device *dev, struct device_driver *drv)
{
	struct subchannel *sch = container_of (dev, struct subchannel, dev);
	struct css_driver *driver = container_of (drv, struct css_driver, drv);

	if (sch->st == driver->subchannel_type)
		return 1;

	return 0;
}

679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
static int
css_probe (struct device *dev)
{
	struct subchannel *sch;

	sch = to_subchannel(dev);
	sch->driver = container_of (dev->driver, struct css_driver, drv);
	return (sch->driver->probe ? sch->driver->probe(sch) : 0);
}

static int
css_remove (struct device *dev)
{
	struct subchannel *sch;

	sch = to_subchannel(dev);
	return (sch->driver->remove ? sch->driver->remove(sch) : 0);
}

static void
css_shutdown (struct device *dev)
{
	struct subchannel *sch;

	sch = to_subchannel(dev);
	if (sch->driver->shutdown)
		sch->driver->shutdown(sch);
}

L
Linus Torvalds 已提交
708
struct bus_type css_bus_type = {
709 710 711 712 713
	.name     = "css",
	.match    = css_bus_match,
	.probe    = css_probe,
	.remove   = css_remove,
	.shutdown = css_shutdown,
L
Linus Torvalds 已提交
714 715 716 717 718
};

subsys_initcall(init_channel_subsystem);

int
719
css_enqueue_subchannel_slow(struct subchannel_id schid)
L
Linus Torvalds 已提交
720 721 722 723
{
	struct slow_subchannel *new_slow_sch;
	unsigned long flags;

724
	new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
L
Linus Torvalds 已提交
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
	if (!new_slow_sch)
		return -ENOMEM;
	new_slow_sch->schid = schid;
	spin_lock_irqsave(&slow_subchannel_lock, flags);
	list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
	return 0;
}

void
css_clear_subchannel_slow_list(void)
{
	unsigned long flags;

	spin_lock_irqsave(&slow_subchannel_lock, flags);
	while (!list_empty(&slow_subchannels_head)) {
		struct slow_subchannel *slow_sch =
			list_entry(slow_subchannels_head.next,
				   struct slow_subchannel, slow_list);

		list_del_init(slow_subchannels_head.next);
		kfree(slow_sch);
	}
	spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}



int
css_slow_subchannels_exist(void)
{
	return (!list_empty(&slow_subchannels_head));
}

MODULE_LICENSE("GPL");
EXPORT_SYMBOL(css_bus_type);
EXPORT_SYMBOL_GPL(css_characteristics_avail);