dasd.c 112.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6 7
/*
 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
 *		    Horst Hummel <Horst.Hummel@de.ibm.com>
 *		    Carsten Otte <Cotte@de.ibm.com>
 *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
 * Bugreports.to..: <Linux390@de.ibm.com>
8
 * Copyright IBM Corp. 1999, 2009
L
Linus Torvalds 已提交
9 10
 */

S
Stefan Haberland 已提交
11 12 13
#define KMSG_COMPONENT "dasd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

L
Linus Torvalds 已提交
14 15 16 17 18 19
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/major.h>
#include <linux/slab.h>
20
#include <linux/hdreg.h>
21
#include <linux/async.h>
22
#include <linux/mutex.h>
23 24
#include <linux/debugfs.h>
#include <linux/seq_file.h>
25
#include <linux/vmalloc.h>
L
Linus Torvalds 已提交
26 27 28 29

#include <asm/ccwdev.h>
#include <asm/ebcdic.h>
#include <asm/idals.h>
30
#include <asm/itcw.h>
31
#include <asm/diag.h>
L
Linus Torvalds 已提交
32 33 34 35 36 37 38 39 40 41

/* This is ugly... */
#define PRINTK_HEADER "dasd:"

#include "dasd_int.h"
/*
 * SECTION: Constant definitions to be used within this file
 */
#define DASD_CHANQ_MAX_SIZE 4

42 43
#define DASD_DIAG_MOD		"dasd_diag_mod"

44 45 46 47 48 49 50 51 52
static unsigned int queue_depth = 32;
static unsigned int nr_hw_queues = 4;

module_param(queue_depth, uint, 0444);
MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");

module_param(nr_hw_queues, uint, 0444);
MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");

L
Linus Torvalds 已提交
53 54 55 56
/*
 * SECTION: exported variables of dasd.c
 */
debug_info_t *dasd_debug_area;
57
EXPORT_SYMBOL(dasd_debug_area);
58
static struct dentry *dasd_debugfs_root_entry;
L
Linus Torvalds 已提交
59
struct dasd_discipline *dasd_diag_discipline_pointer;
60
EXPORT_SYMBOL(dasd_diag_discipline_pointer);
61
void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
L
Linus Torvalds 已提交
62 63 64

MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
65
		   " Copyright IBM Corp. 2000");
L
Linus Torvalds 已提交
66 67 68 69 70 71
MODULE_SUPPORTED_DEVICE("dasd");
MODULE_LICENSE("GPL");

/*
 * SECTION: prototypes for static functions of dasd.c
 */
72 73 74
static int  dasd_alloc_queue(struct dasd_block *);
static void dasd_free_queue(struct dasd_block *);
static int dasd_flush_block_queue(struct dasd_block *);
75 76
static void dasd_device_tasklet(unsigned long);
static void dasd_block_tasklet(unsigned long);
77
static void do_kick_device(struct work_struct *);
78
static void do_restore_device(struct work_struct *);
79
static void do_reload_device(struct work_struct *);
80
static void do_requeue_requests(struct work_struct *);
81
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
82 83
static void dasd_device_timeout(struct timer_list *);
static void dasd_block_timeout(struct timer_list *);
84
static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
85 86
static void dasd_profile_init(struct dasd_profile *, struct dentry *);
static void dasd_profile_exit(struct dasd_profile *);
87 88
static void dasd_hosts_init(struct dentry *, struct dasd_device *);
static void dasd_hosts_exit(struct dasd_device *);
L
Linus Torvalds 已提交
89 90 91 92 93

/*
 * SECTION: Operations on the device structure.
 */
static wait_queue_head_t dasd_init_waitq;
94
static wait_queue_head_t dasd_flush_wq;
95
static wait_queue_head_t generic_waitq;
96
static wait_queue_head_t shutdown_waitq;
L
Linus Torvalds 已提交
97 98 99 100

/*
 * Allocate memory for a new device structure.
 */
101
struct dasd_device *dasd_alloc_device(void)
L
Linus Torvalds 已提交
102 103 104
{
	struct dasd_device *device;

105 106
	device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
	if (!device)
L
Linus Torvalds 已提交
107 108 109 110
		return ERR_PTR(-ENOMEM);

	/* Get two pages for normal block device operations. */
	device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
111
	if (!device->ccw_mem) {
L
Linus Torvalds 已提交
112 113 114 115 116
		kfree(device);
		return ERR_PTR(-ENOMEM);
	}
	/* Get one page for error recovery. */
	device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
117
	if (!device->erp_mem) {
L
Linus Torvalds 已提交
118 119 120 121
		free_pages((unsigned long) device->ccw_mem, 1);
		kfree(device);
		return ERR_PTR(-ENOMEM);
	}
122 123 124 125 126 127 128 129
	/* Get two pages for ese format. */
	device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
	if (!device->ese_mem) {
		free_page((unsigned long) device->erp_mem);
		free_pages((unsigned long) device->ccw_mem, 1);
		kfree(device);
		return ERR_PTR(-ENOMEM);
	}
L
Linus Torvalds 已提交
130 131 132

	dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
	dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
133
	dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2);
L
Linus Torvalds 已提交
134
	spin_lock_init(&device->mem_lock);
135
	atomic_set(&device->tasklet_scheduled, 0);
136
	tasklet_init(&device->tasklet, dasd_device_tasklet,
L
Linus Torvalds 已提交
137 138
		     (unsigned long) device);
	INIT_LIST_HEAD(&device->ccw_queue);
139
	timer_setup(&device->timer, dasd_device_timeout, 0);
140
	INIT_WORK(&device->kick_work, do_kick_device);
141
	INIT_WORK(&device->restore_device, do_restore_device);
142
	INIT_WORK(&device->reload_device, do_reload_device);
143
	INIT_WORK(&device->requeue_requests, do_requeue_requests);
L
Linus Torvalds 已提交
144 145
	device->state = DASD_STATE_NEW;
	device->target = DASD_STATE_NEW;
146
	mutex_init(&device->state_mutex);
147
	spin_lock_init(&device->profile.lock);
L
Linus Torvalds 已提交
148 149 150 151 152 153
	return device;
}

/*
 * Free memory of a device structure.
 */
154
void dasd_free_device(struct dasd_device *device)
L
Linus Torvalds 已提交
155
{
J
Jesper Juhl 已提交
156
	kfree(device->private);
157
	free_pages((unsigned long) device->ese_mem, 1);
L
Linus Torvalds 已提交
158 159 160 161 162
	free_page((unsigned long) device->erp_mem);
	free_pages((unsigned long) device->ccw_mem, 1);
	kfree(device);
}

163 164 165 166 167 168 169 170 171 172 173 174 175 176
/*
 * Allocate memory for a new device structure.
 */
struct dasd_block *dasd_alloc_block(void)
{
	struct dasd_block *block;

	block = kzalloc(sizeof(*block), GFP_ATOMIC);
	if (!block)
		return ERR_PTR(-ENOMEM);
	/* open_count = 0 means device online but not in use */
	atomic_set(&block->open_count, -1);

	atomic_set(&block->tasklet_scheduled, 0);
177
	tasklet_init(&block->tasklet, dasd_block_tasklet,
178 179 180
		     (unsigned long) block);
	INIT_LIST_HEAD(&block->ccw_queue);
	spin_lock_init(&block->queue_lock);
181
	timer_setup(&block->timer, dasd_block_timeout, 0);
182
	spin_lock_init(&block->profile.lock);
183 184 185

	return block;
}
186
EXPORT_SYMBOL_GPL(dasd_alloc_block);
187 188 189 190 191 192 193 194

/*
 * Free memory of a device structure.
 */
void dasd_free_block(struct dasd_block *block)
{
	kfree(block);
}
195
EXPORT_SYMBOL_GPL(dasd_free_block);
196

L
Linus Torvalds 已提交
197 198 199
/*
 * Make a new device known to the system.
 */
200
static int dasd_state_new_to_known(struct dasd_device *device)
L
Linus Torvalds 已提交
201 202 203 204
{
	int rc;

	/*
205
	 * As long as the device is not in state DASD_STATE_NEW we want to
L
Linus Torvalds 已提交
206 207 208 209
	 * keep the reference count > 0.
	 */
	dasd_get_device(device);

210 211 212 213 214 215
	if (device->block) {
		rc = dasd_alloc_queue(device->block);
		if (rc) {
			dasd_put_device(device);
			return rc;
		}
L
Linus Torvalds 已提交
216 217 218 219 220 221 222 223
	}
	device->state = DASD_STATE_KNOWN;
	return 0;
}

/*
 * Let the system forget about a device.
 */
224
static int dasd_state_known_to_new(struct dasd_device *device)
L
Linus Torvalds 已提交
225
{
226 227
	/* Disable extended error reporting for this device. */
	dasd_eer_disable(device);
L
Linus Torvalds 已提交
228 229
	device->state = DASD_STATE_NEW;

230 231
	if (device->block)
		dasd_free_queue(device->block);
L
Linus Torvalds 已提交
232 233 234

	/* Give up reference we took in dasd_state_new_to_known. */
	dasd_put_device(device);
235
	return 0;
L
Linus Torvalds 已提交
236 237
}

238 239 240 241 242 243 244 245 246 247 248 249 250
static struct dentry *dasd_debugfs_setup(const char *name,
					 struct dentry *base_dentry)
{
	struct dentry *pde;

	if (!base_dentry)
		return NULL;
	pde = debugfs_create_dir(name, base_dentry);
	if (!pde || IS_ERR(pde))
		return NULL;
	return pde;
}

L
Linus Torvalds 已提交
251 252 253
/*
 * Request the irq line for the device.
 */
254
static int dasd_state_known_to_basic(struct dasd_device *device)
L
Linus Torvalds 已提交
255
{
256
	struct dasd_block *block = device->block;
257
	int rc = 0;
L
Linus Torvalds 已提交
258 259

	/* Allocate and register gendisk structure. */
260 261
	if (block) {
		rc = dasd_gendisk_alloc(block);
262 263
		if (rc)
			return rc;
264 265 266 267 268 269 270 271 272 273 274
		block->debugfs_dentry =
			dasd_debugfs_setup(block->gdp->disk_name,
					   dasd_debugfs_root_entry);
		dasd_profile_init(&block->profile, block->debugfs_dentry);
		if (dasd_global_profile_level == DASD_PROFILE_ON)
			dasd_profile_on(&device->block->profile);
	}
	device->debugfs_dentry =
		dasd_debugfs_setup(dev_name(&device->cdev->dev),
				   dasd_debugfs_root_entry);
	dasd_profile_init(&device->profile, device->debugfs_dentry);
275
	dasd_hosts_init(device->debugfs_dentry, device);
276

L
Linus Torvalds 已提交
277
	/* register 'device' debug area, used for all DBF_DEV_XXX calls */
S
Stefan Haberland 已提交
278
	device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
279
					    8 * sizeof(long));
L
Linus Torvalds 已提交
280
	debug_register_view(device->debug_area, &debug_sprintf_view);
H
Horst Hummel 已提交
281
	debug_set_level(device->debug_area, DBF_WARNING);
L
Linus Torvalds 已提交
282 283 284
	DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");

	device->state = DASD_STATE_BASIC;
285 286

	return rc;
L
Linus Torvalds 已提交
287 288 289 290 291
}

/*
 * Release the irq line for the device. Terminate any running i/o.
 */
292
static int dasd_state_basic_to_known(struct dasd_device *device)
L
Linus Torvalds 已提交
293
{
294
	int rc;
295

296 297 298 299 300 301
	if (device->discipline->basic_to_known) {
		rc = device->discipline->basic_to_known(device);
		if (rc)
			return rc;
	}

302
	if (device->block) {
303
		dasd_profile_exit(&device->block->profile);
304
		debugfs_remove(device->block->debugfs_dentry);
305 306 307 308
		dasd_gendisk_free(device->block);
		dasd_block_clear_timer(device->block);
	}
	rc = dasd_flush_device_queue(device);
309 310
	if (rc)
		return rc;
311
	dasd_device_clear_timer(device);
312
	dasd_profile_exit(&device->profile);
313
	dasd_hosts_exit(device);
314
	debugfs_remove(device->debugfs_dentry);
L
Linus Torvalds 已提交
315 316 317 318 319 320
	DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
	if (device->debug_area != NULL) {
		debug_unregister(device->debug_area);
		device->debug_area = NULL;
	}
	device->state = DASD_STATE_KNOWN;
321
	return 0;
L
Linus Torvalds 已提交
322 323 324 325 326 327 328 329 330 331 332
}

/*
 * Do the initial analysis. The do_analysis function may return
 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
 * until the discipline decides to continue the startup sequence
 * by calling the function dasd_change_state. The eckd disciplines
 * uses this to start a ccw that detects the format. The completion
 * interrupt for this detection ccw uses the kernel event daemon to
 * trigger the call to dasd_change_state. All this is done in the
 * discipline code, see dasd_eckd.c.
333 334 335 336
 * After the analysis ccw is done (do_analysis returned 0) the block
 * device is setup.
 * In case the analysis returns an error, the device setup is stopped
 * (a fake disk was already added to allow formatting).
L
Linus Torvalds 已提交
337
 */
338
static int dasd_state_basic_to_ready(struct dasd_device *device)
L
Linus Torvalds 已提交
339 340
{
	int rc;
341
	struct dasd_block *block;
342
	struct gendisk *disk;
L
Linus Torvalds 已提交
343 344

	rc = 0;
345
	block = device->block;
346
	/* make disk known with correct capacity */
347 348 349 350
	if (block) {
		if (block->base->discipline->do_analysis != NULL)
			rc = block->base->discipline->do_analysis(block);
		if (rc) {
351
			if (rc != -EAGAIN) {
352
				device->state = DASD_STATE_UNFMT;
353 354 355
				disk = device->block->gdp;
				kobject_uevent(&disk_to_dev(disk)->kobj,
					       KOBJ_CHANGE);
356 357
				goto out;
			}
358 359
			return rc;
		}
360 361
		if (device->discipline->setup_blk_queue)
			device->discipline->setup_blk_queue(block);
362 363 364 365
		set_capacity(block->gdp,
			     block->blocks << block->s2b_shift);
		device->state = DASD_STATE_READY;
		rc = dasd_scan_partitions(block);
366
		if (rc) {
367
			device->state = DASD_STATE_BASIC;
368 369
			return rc;
		}
370 371 372
	} else {
		device->state = DASD_STATE_READY;
	}
373 374 375
out:
	if (device->discipline->basic_to_ready)
		rc = device->discipline->basic_to_ready(device);
376
	return rc;
L
Linus Torvalds 已提交
377 378
}

379 380 381 382 383 384 385 386 387 388
static inline
int _wait_for_empty_queues(struct dasd_device *device)
{
	if (device->block)
		return list_empty(&device->ccw_queue) &&
			list_empty(&device->block->ccw_queue);
	else
		return list_empty(&device->ccw_queue);
}

L
Linus Torvalds 已提交
389 390 391 392 393
/*
 * Remove device from block device layer. Destroy dirty buffers.
 * Forget format information. Check if the target level is basic
 * and if it is create fake disk for formatting.
 */
394
static int dasd_state_ready_to_basic(struct dasd_device *device)
L
Linus Torvalds 已提交
395
{
396 397
	int rc;

L
Linus Torvalds 已提交
398
	device->state = DASD_STATE_BASIC;
399 400 401 402 403 404 405
	if (device->block) {
		struct dasd_block *block = device->block;
		rc = dasd_flush_block_queue(block);
		if (rc) {
			device->state = DASD_STATE_READY;
			return rc;
		}
406
		dasd_destroy_partitions(block);
407 408 409 410
		block->blocks = 0;
		block->bp_block = 0;
		block->s2b_shift = 0;
	}
411
	return 0;
L
Linus Torvalds 已提交
412 413
}

414 415 416
/*
 * Back to basic.
 */
417
static int dasd_state_unfmt_to_basic(struct dasd_device *device)
418 419
{
	device->state = DASD_STATE_BASIC;
420
	return 0;
421 422
}

L
Linus Torvalds 已提交
423 424 425 426 427
/*
 * Make the device online and schedule the bottom half to start
 * the requeueing of requests from the linux request queue to the
 * ccw queue.
 */
428
static int
L
Linus Torvalds 已提交
429 430
dasd_state_ready_to_online(struct dasd_device * device)
{
431 432 433
	struct gendisk *disk;
	struct disk_part_iter piter;
	struct hd_struct *part;
434

L
Linus Torvalds 已提交
435
	device->state = DASD_STATE_ONLINE;
436
	if (device->block) {
437
		dasd_schedule_block_bh(device->block);
438 439 440 441 442
		if ((device->features & DASD_FEATURE_USERAW)) {
			disk = device->block->gdp;
			kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
			return 0;
		}
443 444 445 446 447 448
		disk = device->block->bdev->bd_disk;
		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
		while ((part = disk_part_iter_next(&piter)))
			kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
		disk_part_iter_exit(&piter);
	}
L
Linus Torvalds 已提交
449 450 451 452 453 454
	return 0;
}

/*
 * Stop the requeueing of requests again.
 */
455
static int dasd_state_online_to_ready(struct dasd_device *device)
L
Linus Torvalds 已提交
456
{
457
	int rc;
458 459 460
	struct gendisk *disk;
	struct disk_part_iter piter;
	struct hd_struct *part;
461 462 463 464 465 466

	if (device->discipline->online_to_ready) {
		rc = device->discipline->online_to_ready(device);
		if (rc)
			return rc;
	}
467

L
Linus Torvalds 已提交
468
	device->state = DASD_STATE_READY;
469
	if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
470 471 472 473 474 475
		disk = device->block->bdev->bd_disk;
		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
		while ((part = disk_part_iter_next(&piter)))
			kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
		disk_part_iter_exit(&piter);
	}
476
	return 0;
L
Linus Torvalds 已提交
477 478 479 480 481
}

/*
 * Device startup state changes.
 */
482
static int dasd_increase_state(struct dasd_device *device)
L
Linus Torvalds 已提交
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
{
	int rc;

	rc = 0;
	if (device->state == DASD_STATE_NEW &&
	    device->target >= DASD_STATE_KNOWN)
		rc = dasd_state_new_to_known(device);

	if (!rc &&
	    device->state == DASD_STATE_KNOWN &&
	    device->target >= DASD_STATE_BASIC)
		rc = dasd_state_known_to_basic(device);

	if (!rc &&
	    device->state == DASD_STATE_BASIC &&
	    device->target >= DASD_STATE_READY)
		rc = dasd_state_basic_to_ready(device);

501 502 503 504 505
	if (!rc &&
	    device->state == DASD_STATE_UNFMT &&
	    device->target > DASD_STATE_UNFMT)
		rc = -EPERM;

L
Linus Torvalds 已提交
506 507 508 509 510 511 512 513 514 515 516
	if (!rc &&
	    device->state == DASD_STATE_READY &&
	    device->target >= DASD_STATE_ONLINE)
		rc = dasd_state_ready_to_online(device);

	return rc;
}

/*
 * Device shutdown state changes.
 */
517
static int dasd_decrease_state(struct dasd_device *device)
L
Linus Torvalds 已提交
518
{
519 520 521
	int rc;

	rc = 0;
L
Linus Torvalds 已提交
522 523
	if (device->state == DASD_STATE_ONLINE &&
	    device->target <= DASD_STATE_READY)
524
		rc = dasd_state_online_to_ready(device);
525

526 527
	if (!rc &&
	    device->state == DASD_STATE_READY &&
L
Linus Torvalds 已提交
528
	    device->target <= DASD_STATE_BASIC)
529
		rc = dasd_state_ready_to_basic(device);
530

531 532
	if (!rc &&
	    device->state == DASD_STATE_UNFMT &&
533
	    device->target <= DASD_STATE_BASIC)
534
		rc = dasd_state_unfmt_to_basic(device);
535

536 537
	if (!rc &&
	    device->state == DASD_STATE_BASIC &&
L
Linus Torvalds 已提交
538
	    device->target <= DASD_STATE_KNOWN)
539
		rc = dasd_state_basic_to_known(device);
540

541 542
	if (!rc &&
	    device->state == DASD_STATE_KNOWN &&
L
Linus Torvalds 已提交
543
	    device->target <= DASD_STATE_NEW)
544
		rc = dasd_state_known_to_new(device);
L
Linus Torvalds 已提交
545

546
	return rc;
L
Linus Torvalds 已提交
547 548 549 550 551
}

/*
 * This is the main startup/shutdown routine.
 */
552
static void dasd_change_state(struct dasd_device *device)
L
Linus Torvalds 已提交
553
{
554
	int rc;
L
Linus Torvalds 已提交
555 556 557 558 559 560 561 562

	if (device->state == device->target)
		/* Already where we want to go today... */
		return;
	if (device->state < device->target)
		rc = dasd_increase_state(device);
	else
		rc = dasd_decrease_state(device);
563 564 565 566
	if (rc == -EAGAIN)
		return;
	if (rc)
		device->target = device->state;
L
Linus Torvalds 已提交
567

568 569
	/* let user-space know that the device status changed */
	kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
S
Sebastian Ott 已提交
570 571 572

	if (device->state == device->target)
		wake_up(&dasd_init_waitq);
L
Linus Torvalds 已提交
573 574 575 576 577 578 579 580
}

/*
 * Kick starter for devices that did not complete the startup/shutdown
 * procedure or were sleeping because of a pending state.
 * dasd_kick_device will schedule a call do do_kick_device to the kernel
 * event daemon.
 */
581
static void do_kick_device(struct work_struct *work)
L
Linus Torvalds 已提交
582
{
583
	struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
584
	mutex_lock(&device->state_mutex);
L
Linus Torvalds 已提交
585
	dasd_change_state(device);
586
	mutex_unlock(&device->state_mutex);
587
	dasd_schedule_device_bh(device);
L
Linus Torvalds 已提交
588 589 590
	dasd_put_device(device);
}

591
void dasd_kick_device(struct dasd_device *device)
L
Linus Torvalds 已提交
592 593 594
{
	dasd_get_device(device);
	/* queue call to dasd_kick_device to the kernel event daemon. */
595 596
	if (!schedule_work(&device->kick_work))
		dasd_put_device(device);
L
Linus Torvalds 已提交
597
}
598
EXPORT_SYMBOL(dasd_kick_device);
L
Linus Torvalds 已提交
599

600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
/*
 * dasd_reload_device will schedule a call do do_reload_device to the kernel
 * event daemon.
 */
static void do_reload_device(struct work_struct *work)
{
	struct dasd_device *device = container_of(work, struct dasd_device,
						  reload_device);
	device->discipline->reload(device);
	dasd_put_device(device);
}

void dasd_reload_device(struct dasd_device *device)
{
	dasd_get_device(device);
	/* queue call to dasd_reload_device to the kernel event daemon. */
616 617
	if (!schedule_work(&device->reload_device))
		dasd_put_device(device);
618 619 620
}
EXPORT_SYMBOL(dasd_reload_device);

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
/*
 * dasd_restore_device will schedule a call do do_restore_device to the kernel
 * event daemon.
 */
static void do_restore_device(struct work_struct *work)
{
	struct dasd_device *device = container_of(work, struct dasd_device,
						  restore_device);
	device->cdev->drv->restore(device->cdev);
	dasd_put_device(device);
}

void dasd_restore_device(struct dasd_device *device)
{
	dasd_get_device(device);
	/* queue call to dasd_restore_device to the kernel event daemon. */
637 638
	if (!schedule_work(&device->restore_device))
		dasd_put_device(device);
639 640
}

L
Linus Torvalds 已提交
641 642 643
/*
 * Set the target state for a device and starts the state change.
 */
644
void dasd_set_target_state(struct dasd_device *device, int target)
L
Linus Torvalds 已提交
645
{
646
	dasd_get_device(device);
647
	mutex_lock(&device->state_mutex);
L
Linus Torvalds 已提交
648 649 650 651
	/* If we are in probeonly mode stop at DASD_STATE_READY. */
	if (dasd_probeonly && target > DASD_STATE_READY)
		target = DASD_STATE_READY;
	if (device->target != target) {
652
		if (device->state == target)
L
Linus Torvalds 已提交
653 654 655 656 657
			wake_up(&dasd_init_waitq);
		device->target = target;
	}
	if (device->state != device->target)
		dasd_change_state(device);
658 659
	mutex_unlock(&device->state_mutex);
	dasd_put_device(device);
L
Linus Torvalds 已提交
660
}
661
EXPORT_SYMBOL(dasd_set_target_state);
L
Linus Torvalds 已提交
662 663 664 665

/*
 * Enable devices with device numbers in [from..to].
 */
666
static inline int _wait_for_device(struct dasd_device *device)
L
Linus Torvalds 已提交
667 668 669 670
{
	return (device->state == device->target);
}

671
void dasd_enable_device(struct dasd_device *device)
L
Linus Torvalds 已提交
672 673 674 675 676 677 678
{
	dasd_set_target_state(device, DASD_STATE_ONLINE);
	if (device->state <= DASD_STATE_KNOWN)
		/* No discipline for device found. */
		dasd_set_target_state(device, DASD_STATE_NEW);
	/* Now wait for the devices to come up. */
	wait_event(dasd_init_waitq, _wait_for_device(device));
679 680 681 682

	dasd_reload_device(device);
	if (device->discipline->kick_validate)
		device->discipline->kick_validate(device);
L
Linus Torvalds 已提交
683
}
684
EXPORT_SYMBOL(dasd_enable_device);
L
Linus Torvalds 已提交
685 686 687 688 689

/*
 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
 */

690
unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
L
Linus Torvalds 已提交
691

692
#ifdef CONFIG_DASD_PROFILE
693 694 695
struct dasd_profile dasd_global_profile = {
	.lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
};
696
static struct dentry *dasd_debugfs_global_entry;
L
Linus Torvalds 已提交
697 698 699 700

/*
 * Add profiling information for cqr before execution.
 */
701 702 703
static void dasd_profile_start(struct dasd_block *block,
			       struct dasd_ccw_req *cqr,
			       struct request *req)
L
Linus Torvalds 已提交
704 705 706
{
	struct list_head *l;
	unsigned int counter;
707
	struct dasd_device *device;
L
Linus Torvalds 已提交
708 709 710

	/* count the length of the chanq for statistics */
	counter = 0;
711 712 713 714 715
	if (dasd_global_profile_level || block->profile.data)
		list_for_each(l, &block->ccw_queue)
			if (++counter >= 31)
				break;

716
	spin_lock(&dasd_global_profile.lock);
S
Sebastian Ott 已提交
717
	if (dasd_global_profile.data) {
718
		dasd_global_profile.data->dasd_io_nr_req[counter]++;
719
		if (rq_data_dir(req) == READ)
720
			dasd_global_profile.data->dasd_read_nr_req[counter]++;
721
	}
722
	spin_unlock(&dasd_global_profile.lock);
723 724

	spin_lock(&block->profile.lock);
725
	if (block->profile.data) {
726 727 728
		block->profile.data->dasd_io_nr_req[counter]++;
		if (rq_data_dir(req) == READ)
			block->profile.data->dasd_read_nr_req[counter]++;
729
	}
730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
	spin_unlock(&block->profile.lock);

	/*
	 * We count the request for the start device, even though it may run on
	 * some other device due to error recovery. This way we make sure that
	 * we count each request only once.
	 */
	device = cqr->startdev;
	if (device->profile.data) {
		counter = 1; /* request is not yet queued on the start device */
		list_for_each(l, &device->ccw_queue)
			if (++counter >= 31)
				break;
	}
	spin_lock(&device->profile.lock);
	if (device->profile.data) {
		device->profile.data->dasd_io_nr_req[counter]++;
		if (rq_data_dir(req) == READ)
			device->profile.data->dasd_read_nr_req[counter]++;
	}
	spin_unlock(&device->profile.lock);
L
Linus Torvalds 已提交
751 752 753 754 755
}

/*
 * Add profiling information for cqr after execution.
 */
756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778

#define dasd_profile_counter(value, index)			   \
{								   \
	for (index = 0; index < 31 && value >> (2+index); index++) \
		;						   \
}

static void dasd_profile_end_add_data(struct dasd_profile_info *data,
				      int is_alias,
				      int is_tpm,
				      int is_read,
				      long sectors,
				      int sectors_ind,
				      int tottime_ind,
				      int tottimeps_ind,
				      int strtime_ind,
				      int irqtime_ind,
				      int irqtimeps_ind,
				      int endtime_ind)
{
	/* in case of an overflow, reset the whole profile */
	if (data->dasd_io_reqs == UINT_MAX) {
			memset(data, 0, sizeof(*data));
779
			ktime_get_real_ts64(&data->starttod);
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
	}
	data->dasd_io_reqs++;
	data->dasd_io_sects += sectors;
	if (is_alias)
		data->dasd_io_alias++;
	if (is_tpm)
		data->dasd_io_tpm++;

	data->dasd_io_secs[sectors_ind]++;
	data->dasd_io_times[tottime_ind]++;
	data->dasd_io_timps[tottimeps_ind]++;
	data->dasd_io_time1[strtime_ind]++;
	data->dasd_io_time2[irqtime_ind]++;
	data->dasd_io_time2ps[irqtimeps_ind]++;
	data->dasd_io_time3[endtime_ind]++;

	if (is_read) {
		data->dasd_read_reqs++;
		data->dasd_read_sects += sectors;
		if (is_alias)
			data->dasd_read_alias++;
		if (is_tpm)
			data->dasd_read_tpm++;
		data->dasd_read_secs[sectors_ind]++;
		data->dasd_read_times[tottime_ind]++;
		data->dasd_read_time1[strtime_ind]++;
		data->dasd_read_time2[irqtime_ind]++;
		data->dasd_read_time3[endtime_ind]++;
	}
}

811 812 813
static void dasd_profile_end(struct dasd_block *block,
			     struct dasd_ccw_req *cqr,
			     struct request *req)
L
Linus Torvalds 已提交
814
{
815 816
	unsigned long strtime, irqtime, endtime, tottime;
	unsigned long tottimeps, sectors;
817 818 819
	struct dasd_device *device;
	int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
	int irqtime_ind, irqtimeps_ind, endtime_ind;
820
	struct dasd_profile_info *data;
L
Linus Torvalds 已提交
821

822 823 824 825
	device = cqr->startdev;
	if (!(dasd_global_profile_level ||
	      block->profile.data ||
	      device->profile.data))
L
Linus Torvalds 已提交
826 827
		return;

828
	sectors = blk_rq_sectors(req);
L
Linus Torvalds 已提交
829 830 831 832 833 834 835 836 837 838 839
	if (!cqr->buildclk || !cqr->startclk ||
	    !cqr->stopclk || !cqr->endclk ||
	    !sectors)
		return;

	strtime = ((cqr->startclk - cqr->buildclk) >> 12);
	irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
	endtime = ((cqr->endclk - cqr->stopclk) >> 12);
	tottime = ((cqr->endclk - cqr->buildclk) >> 12);
	tottimeps = tottime / sectors;

840 841 842 843 844 845 846 847
	dasd_profile_counter(sectors, sectors_ind);
	dasd_profile_counter(tottime, tottime_ind);
	dasd_profile_counter(tottimeps, tottimeps_ind);
	dasd_profile_counter(strtime, strtime_ind);
	dasd_profile_counter(irqtime, irqtime_ind);
	dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
	dasd_profile_counter(endtime, endtime_ind);

848
	spin_lock(&dasd_global_profile.lock);
S
Sebastian Ott 已提交
849
	if (dasd_global_profile.data) {
850 851 852 853 854
		data = dasd_global_profile.data;
		data->dasd_sum_times += tottime;
		data->dasd_sum_time_str += strtime;
		data->dasd_sum_time_irq += irqtime;
		data->dasd_sum_time_end += endtime;
855
		dasd_profile_end_add_data(dasd_global_profile.data,
856 857 858 859 860 861 862 863
					  cqr->startdev != block->base,
					  cqr->cpmode == 1,
					  rq_data_dir(req) == READ,
					  sectors, sectors_ind, tottime_ind,
					  tottimeps_ind, strtime_ind,
					  irqtime_ind, irqtimeps_ind,
					  endtime_ind);
	}
864
	spin_unlock(&dasd_global_profile.lock);
865 866

	spin_lock(&block->profile.lock);
867 868 869 870 871 872
	if (block->profile.data) {
		data = block->profile.data;
		data->dasd_sum_times += tottime;
		data->dasd_sum_time_str += strtime;
		data->dasd_sum_time_irq += irqtime;
		data->dasd_sum_time_end += endtime;
873 874 875 876 877 878 879 880
		dasd_profile_end_add_data(block->profile.data,
					  cqr->startdev != block->base,
					  cqr->cpmode == 1,
					  rq_data_dir(req) == READ,
					  sectors, sectors_ind, tottime_ind,
					  tottimeps_ind, strtime_ind,
					  irqtime_ind, irqtimeps_ind,
					  endtime_ind);
881
	}
882 883 884
	spin_unlock(&block->profile.lock);

	spin_lock(&device->profile.lock);
885 886 887 888 889 890
	if (device->profile.data) {
		data = device->profile.data;
		data->dasd_sum_times += tottime;
		data->dasd_sum_time_str += strtime;
		data->dasd_sum_time_irq += irqtime;
		data->dasd_sum_time_end += endtime;
891 892 893 894 895 896 897 898
		dasd_profile_end_add_data(device->profile.data,
					  cqr->startdev != block->base,
					  cqr->cpmode == 1,
					  rq_data_dir(req) == READ,
					  sectors, sectors_ind, tottime_ind,
					  tottimeps_ind, strtime_ind,
					  irqtime_ind, irqtimeps_ind,
					  endtime_ind);
899
	}
900 901 902 903 904 905 906 907 908 909 910 911 912 913
	spin_unlock(&device->profile.lock);
}

void dasd_profile_reset(struct dasd_profile *profile)
{
	struct dasd_profile_info *data;

	spin_lock_bh(&profile->lock);
	data = profile->data;
	if (!data) {
		spin_unlock_bh(&profile->lock);
		return;
	}
	memset(data, 0, sizeof(*data));
914
	ktime_get_real_ts64(&data->starttod);
915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
	spin_unlock_bh(&profile->lock);
}

int dasd_profile_on(struct dasd_profile *profile)
{
	struct dasd_profile_info *data;

	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		return -ENOMEM;
	spin_lock_bh(&profile->lock);
	if (profile->data) {
		spin_unlock_bh(&profile->lock);
		kfree(data);
		return 0;
	}
931
	ktime_get_real_ts64(&data->starttod);
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
	profile->data = data;
	spin_unlock_bh(&profile->lock);
	return 0;
}

void dasd_profile_off(struct dasd_profile *profile)
{
	spin_lock_bh(&profile->lock);
	kfree(profile->data);
	profile->data = NULL;
	spin_unlock_bh(&profile->lock);
}

char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
{
	char *buffer;

949
	buffer = vmalloc(user_len + 1);
950 951 952
	if (buffer == NULL)
		return ERR_PTR(-ENOMEM);
	if (copy_from_user(buffer, user_buf, user_len) != 0) {
953
		vfree(buffer);
954 955 956 957 958 959 960 961
		return ERR_PTR(-EFAULT);
	}
	/* got the string, now strip linefeed. */
	if (buffer[user_len - 1] == '\n')
		buffer[user_len - 1] = 0;
	else
		buffer[user_len] = 0;
	return buffer;
L
Linus Torvalds 已提交
962
}
963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984

static ssize_t dasd_stats_write(struct file *file,
				const char __user *user_buf,
				size_t user_len, loff_t *pos)
{
	char *buffer, *str;
	int rc;
	struct seq_file *m = (struct seq_file *)file->private_data;
	struct dasd_profile *prof = m->private;

	if (user_len > 65536)
		user_len = 65536;
	buffer = dasd_get_user_string(user_buf, user_len);
	if (IS_ERR(buffer))
		return PTR_ERR(buffer);

	str = skip_spaces(buffer);
	rc = user_len;
	if (strncmp(str, "reset", 5) == 0) {
		dasd_profile_reset(prof);
	} else if (strncmp(str, "on", 2) == 0) {
		rc = dasd_profile_on(prof);
S
Sebastian Ott 已提交
985 986 987 988 989 990 991
		if (rc)
			goto out;
		rc = user_len;
		if (prof == &dasd_global_profile) {
			dasd_profile_reset(prof);
			dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
		}
992
	} else if (strncmp(str, "off", 3) == 0) {
S
Sebastian Ott 已提交
993 994
		if (prof == &dasd_global_profile)
			dasd_global_profile_level = DASD_PROFILE_OFF;
995 996 997
		dasd_profile_off(prof);
	} else
		rc = -EINVAL;
S
Sebastian Ott 已提交
998
out:
999
	vfree(buffer);
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
	return rc;
}

static void dasd_stats_array(struct seq_file *m, unsigned int *array)
{
	int i;

	for (i = 0; i < 32; i++)
		seq_printf(m, "%u ", array[i]);
	seq_putc(m, '\n');
}

static void dasd_stats_seq_print(struct seq_file *m,
				 struct dasd_profile_info *data)
{
1015 1016
	seq_printf(m, "start_time %lld.%09ld\n",
		   (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
1017 1018 1019 1020
	seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
	seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
	seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
	seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
1021 1022 1023 1024 1025 1026 1027 1028
	seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
		   data->dasd_sum_times / data->dasd_io_reqs : 0UL);
	seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
		   data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
	seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
		   data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
	seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
		   data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
1029
	seq_puts(m, "histogram_sectors ");
1030
	dasd_stats_array(m, data->dasd_io_secs);
1031
	seq_puts(m, "histogram_io_times ");
1032
	dasd_stats_array(m, data->dasd_io_times);
1033
	seq_puts(m, "histogram_io_times_weighted ");
1034
	dasd_stats_array(m, data->dasd_io_timps);
1035
	seq_puts(m, "histogram_time_build_to_ssch ");
1036
	dasd_stats_array(m, data->dasd_io_time1);
1037
	seq_puts(m, "histogram_time_ssch_to_irq ");
1038
	dasd_stats_array(m, data->dasd_io_time2);
1039
	seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
1040
	dasd_stats_array(m, data->dasd_io_time2ps);
1041
	seq_puts(m, "histogram_time_irq_to_end ");
1042
	dasd_stats_array(m, data->dasd_io_time3);
1043
	seq_puts(m, "histogram_ccw_queue_length ");
1044 1045 1046 1047 1048
	dasd_stats_array(m, data->dasd_io_nr_req);
	seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
	seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
	seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
	seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
1049
	seq_puts(m, "histogram_read_sectors ");
1050
	dasd_stats_array(m, data->dasd_read_secs);
1051
	seq_puts(m, "histogram_read_times ");
1052
	dasd_stats_array(m, data->dasd_read_times);
1053
	seq_puts(m, "histogram_read_time_build_to_ssch ");
1054
	dasd_stats_array(m, data->dasd_read_time1);
1055
	seq_puts(m, "histogram_read_time_ssch_to_irq ");
1056
	dasd_stats_array(m, data->dasd_read_time2);
1057
	seq_puts(m, "histogram_read_time_irq_to_end ");
1058
	dasd_stats_array(m, data->dasd_read_time3);
1059
	seq_puts(m, "histogram_read_ccw_queue_length ");
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
	dasd_stats_array(m, data->dasd_read_nr_req);
}

static int dasd_stats_show(struct seq_file *m, void *v)
{
	struct dasd_profile *profile;
	struct dasd_profile_info *data;

	profile = m->private;
	spin_lock_bh(&profile->lock);
	data = profile->data;
	if (!data) {
		spin_unlock_bh(&profile->lock);
1073
		seq_puts(m, "disabled\n");
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
		return 0;
	}
	dasd_stats_seq_print(m, data);
	spin_unlock_bh(&profile->lock);
	return 0;
}

static int dasd_stats_open(struct inode *inode, struct file *file)
{
	struct dasd_profile *profile = inode->i_private;
	return single_open(file, dasd_stats_show, profile);
}

static const struct file_operations dasd_stats_raw_fops = {
	.owner		= THIS_MODULE,
	.open		= dasd_stats_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
	.write		= dasd_stats_write,
};

static void dasd_profile_init(struct dasd_profile *profile,
			      struct dentry *base_dentry)
{
A
Al Viro 已提交
1099
	umode_t mode;
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
	struct dentry *pde;

	if (!base_dentry)
		return;
	profile->dentry = NULL;
	profile->data = NULL;
	mode = (S_IRUSR | S_IWUSR | S_IFREG);
	pde = debugfs_create_file("statistics", mode, base_dentry,
				  profile, &dasd_stats_raw_fops);
	if (pde && !IS_ERR(pde))
		profile->dentry = pde;
	return;
}

static void dasd_profile_exit(struct dasd_profile *profile)
{
	dasd_profile_off(profile);
1117 1118
	debugfs_remove(profile->dentry);
	profile->dentry = NULL;
1119 1120 1121 1122 1123
}

static void dasd_statistics_removeroot(void)
{
	dasd_global_profile_level = DASD_PROFILE_OFF;
S
Sebastian Ott 已提交
1124
	dasd_profile_exit(&dasd_global_profile);
1125 1126
	debugfs_remove(dasd_debugfs_global_entry);
	debugfs_remove(dasd_debugfs_root_entry);
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
}

static void dasd_statistics_createroot(void)
{
	struct dentry *pde;

	dasd_debugfs_root_entry = NULL;
	pde = debugfs_create_dir("dasd", NULL);
	if (!pde || IS_ERR(pde))
		goto error;
	dasd_debugfs_root_entry = pde;
	pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
	if (!pde || IS_ERR(pde))
		goto error;
	dasd_debugfs_global_entry = pde;
S
Sebastian Ott 已提交
1142
	dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
1143 1144 1145 1146 1147 1148 1149 1150 1151
	return;

error:
	DBF_EVENT(DBF_ERR, "%s",
		  "Creation of the dasd debugfs interface failed");
	dasd_statistics_removeroot();
	return;
}

L
Linus Torvalds 已提交
1152
#else
1153 1154
#define dasd_profile_start(block, cqr, req) do {} while (0)
#define dasd_profile_end(block, cqr, req) do {} while (0)
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167

static void dasd_statistics_createroot(void)
{
	return;
}

static void dasd_statistics_removeroot(void)
{
	return;
}

int dasd_stats_generic_show(struct seq_file *m, void *v)
{
1168
	seq_puts(m, "Statistics are not activated in this kernel\n");
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
	return 0;
}

static void dasd_profile_init(struct dasd_profile *profile,
			      struct dentry *base_dentry)
{
	return;
}

static void dasd_profile_exit(struct dasd_profile *profile)
{
	return;
}

int dasd_profile_on(struct dasd_profile *profile)
{
	return 0;
}

L
Linus Torvalds 已提交
1188 1189
#endif				/* CONFIG_DASD_PROFILE */

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
static int dasd_hosts_show(struct seq_file *m, void *v)
{
	struct dasd_device *device;
	int rc = -EOPNOTSUPP;

	device = m->private;
	dasd_get_device(device);

	if (device->discipline->hosts_print)
		rc = device->discipline->hosts_print(device, m);

	dasd_put_device(device);
	return rc;
}

1205
DEFINE_SHOW_ATTRIBUTE(dasd_hosts);
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228

static void dasd_hosts_exit(struct dasd_device *device)
{
	debugfs_remove(device->hosts_dentry);
	device->hosts_dentry = NULL;
}

static void dasd_hosts_init(struct dentry *base_dentry,
			    struct dasd_device *device)
{
	struct dentry *pde;
	umode_t mode;

	if (!base_dentry)
		return;

	mode = S_IRUSR | S_IFREG;
	pde = debugfs_create_file("host_access_list", mode, base_dentry,
				  device, &dasd_hosts_fops);
	if (pde && !IS_ERR(pde))
		device->hosts_dentry = pde;
}

1229 1230 1231
struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
					  struct dasd_device *device,
					  struct dasd_ccw_req *cqr)
L
Linus Torvalds 已提交
1232 1233
{
	unsigned long flags;
1234 1235
	char *data, *chunk;
	int size = 0;
L
Linus Torvalds 已提交
1236 1237 1238 1239 1240

	if (cplength > 0)
		size += cplength * sizeof(struct ccw1);
	if (datasize > 0)
		size += datasize;
1241 1242 1243
	if (!cqr)
		size += (sizeof(*cqr) + 7L) & -8L;

L
Linus Torvalds 已提交
1244
	spin_lock_irqsave(&device->mem_lock, flags);
1245
	data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
L
Linus Torvalds 已提交
1246
	spin_unlock_irqrestore(&device->mem_lock, flags);
1247
	if (!chunk)
L
Linus Torvalds 已提交
1248
		return ERR_PTR(-ENOMEM);
1249 1250 1251 1252 1253 1254
	if (!cqr) {
		cqr = (void *) data;
		data += (sizeof(*cqr) + 7L) & -8L;
	}
	memset(cqr, 0, sizeof(*cqr));
	cqr->mem_chunk = chunk;
L
Linus Torvalds 已提交
1255
	if (cplength > 0) {
1256 1257 1258
		cqr->cpaddr = data;
		data += cplength * sizeof(struct ccw1);
		memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
L
Linus Torvalds 已提交
1259 1260 1261 1262 1263
	}
	if (datasize > 0) {
		cqr->data = data;
 		memset(cqr->data, 0, datasize);
	}
1264
	cqr->magic = magic;
L
Linus Torvalds 已提交
1265 1266 1267 1268
	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
	dasd_get_device(device);
	return cqr;
}
1269
EXPORT_SYMBOL(dasd_smalloc_request);
L
Linus Torvalds 已提交
1270

1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength,
					  int datasize,
					  struct dasd_device *device)
{
	struct dasd_ccw_req *cqr;
	unsigned long flags;
	int size, cqr_size;
	char *data;

	cqr_size = (sizeof(*cqr) + 7L) & -8L;
	size = cqr_size;
	if (cplength > 0)
		size += cplength * sizeof(struct ccw1);
	if (datasize > 0)
		size += datasize;

	spin_lock_irqsave(&device->mem_lock, flags);
	cqr = dasd_alloc_chunk(&device->ese_chunks, size);
	spin_unlock_irqrestore(&device->mem_lock, flags);
	if (!cqr)
		return ERR_PTR(-ENOMEM);
	memset(cqr, 0, sizeof(*cqr));
	data = (char *)cqr + cqr_size;
	cqr->cpaddr = NULL;
	if (cplength > 0) {
		cqr->cpaddr = data;
		data += cplength * sizeof(struct ccw1);
		memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
	}
	cqr->data = NULL;
	if (datasize > 0) {
		cqr->data = data;
		memset(cqr->data, 0, datasize);
	}

	cqr->magic = magic;
	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
	dasd_get_device(device);

	return cqr;
}
EXPORT_SYMBOL(dasd_fmalloc_request);

1314
void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
L
Linus Torvalds 已提交
1315 1316 1317 1318
{
	unsigned long flags;

	spin_lock_irqsave(&device->mem_lock, flags);
1319
	dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
L
Linus Torvalds 已提交
1320 1321 1322
	spin_unlock_irqrestore(&device->mem_lock, flags);
	dasd_put_device(device);
}
1323
EXPORT_SYMBOL(dasd_sfree_request);
L
Linus Torvalds 已提交
1324

1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335
void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
{
	unsigned long flags;

	spin_lock_irqsave(&device->mem_lock, flags);
	dasd_free_chunk(&device->ese_chunks, cqr);
	spin_unlock_irqrestore(&device->mem_lock, flags);
	dasd_put_device(device);
}
EXPORT_SYMBOL(dasd_ffree_request);

L
Linus Torvalds 已提交
1336 1337 1338
/*
 * Check discipline magic in cqr.
 */
1339
static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
L
Linus Torvalds 已提交
1340 1341 1342 1343 1344
{
	struct dasd_device *device;

	if (cqr == NULL)
		return -EINVAL;
1345
	device = cqr->startdev;
L
Linus Torvalds 已提交
1346
	if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
S
Stefan Haberland 已提交
1347
		DBF_DEV_EVENT(DBF_WARNING, device,
L
Linus Torvalds 已提交
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
			    " dasd_ccw_req 0x%08x magic doesn't match"
			    " discipline 0x%08x",
			    cqr->magic,
			    *(unsigned int *) device->discipline->name);
		return -EINVAL;
	}
	return 0;
}

/*
 * Terminate the current i/o and set the request to clear_pending.
 * Timer keeps device runnig.
 * ccw_device_clear can fail if the i/o subsystem
 * is in a bad mood.
 */
1363
int dasd_term_IO(struct dasd_ccw_req *cqr)
L
Linus Torvalds 已提交
1364 1365 1366
{
	struct dasd_device *device;
	int retries, rc;
S
Stefan Haberland 已提交
1367
	char errorstring[ERRORLENGTH];
L
Linus Torvalds 已提交
1368 1369 1370 1371 1372 1373

	/* Check the cqr */
	rc = dasd_check_cqr(cqr);
	if (rc)
		return rc;
	retries = 0;
1374
	device = (struct dasd_device *) cqr->startdev;
L
Linus Torvalds 已提交
1375 1376 1377 1378
	while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
		rc = ccw_device_clear(device->cdev, (long) cqr);
		switch (rc) {
		case 0:	/* termination successful */
1379
			cqr->status = DASD_CQR_CLEAR_PENDING;
1380
			cqr->stopclk = get_tod_clock();
1381
			cqr->starttime = 0;
L
Linus Torvalds 已提交
1382 1383 1384 1385 1386 1387 1388 1389 1390
			DBF_DEV_EVENT(DBF_DEBUG, device,
				      "terminate cqr %p successful",
				      cqr);
			break;
		case -ENODEV:
			DBF_DEV_EVENT(DBF_ERR, device, "%s",
				      "device gone, retry");
			break;
		case -EINVAL:
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
			/*
			 * device not valid so no I/O could be running
			 * handle CQR as termination successful
			 */
			cqr->status = DASD_CQR_CLEARED;
			cqr->stopclk = get_tod_clock();
			cqr->starttime = 0;
			/* no retries for invalid devices */
			cqr->retries = -1;
			DBF_DEV_EVENT(DBF_ERR, device, "%s",
				      "EINVAL, handle as terminated");
			/* fake rc to success */
			rc = 0;
			break;
L
Linus Torvalds 已提交
1405
		default:
S
Stefan Haberland 已提交
1406 1407 1408 1409
			/* internal error 10 - unknown rc*/
			snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
			dev_err(&device->cdev->dev, "An error occurred in the "
				"DASD device driver, reason=%s\n", errorstring);
L
Linus Torvalds 已提交
1410 1411 1412 1413 1414
			BUG();
			break;
		}
		retries++;
	}
1415
	dasd_schedule_device_bh(device);
L
Linus Torvalds 已提交
1416 1417
	return rc;
}
1418
EXPORT_SYMBOL(dasd_term_IO);
L
Linus Torvalds 已提交
1419 1420 1421 1422 1423

/*
 * Start the i/o. This start_IO can fail if the channel is really busy.
 * In that case set up a timer to start the request later.
 */
1424
int dasd_start_IO(struct dasd_ccw_req *cqr)
L
Linus Torvalds 已提交
1425 1426 1427
{
	struct dasd_device *device;
	int rc;
S
Stefan Haberland 已提交
1428
	char errorstring[ERRORLENGTH];
L
Linus Torvalds 已提交
1429 1430 1431

	/* Check the cqr */
	rc = dasd_check_cqr(cqr);
1432 1433
	if (rc) {
		cqr->intrc = rc;
L
Linus Torvalds 已提交
1434
		return rc;
1435
	}
1436
	device = (struct dasd_device *) cqr->startdev;
1437 1438 1439 1440 1441 1442 1443 1444 1445 1446
	if (((cqr->block &&
	      test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
	     test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
		DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
			      "because of stolen lock", cqr);
		cqr->status = DASD_CQR_ERROR;
		cqr->intrc = -EPERM;
		return -EPERM;
	}
L
Linus Torvalds 已提交
1447
	if (cqr->retries < 0) {
S
Stefan Haberland 已提交
1448 1449 1450 1451
		/* internal error 14 - start_IO run out of retries */
		sprintf(errorstring, "14 %p", cqr);
		dev_err(&device->cdev->dev, "An error occurred in the DASD "
			"device driver, reason=%s\n", errorstring);
1452
		cqr->status = DASD_CQR_ERROR;
L
Linus Torvalds 已提交
1453 1454
		return -EIO;
	}
1455
	cqr->startclk = get_tod_clock();
L
Linus Torvalds 已提交
1456 1457
	cqr->starttime = jiffies;
	cqr->retries--;
1458
	if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1459
		cqr->lpm &= dasd_path_get_opm(device);
1460
		if (!cqr->lpm)
1461
			cqr->lpm = dasd_path_get_opm(device);
1462
	}
1463 1464 1465 1466 1467 1468 1469
	if (cqr->cpmode == 1) {
		rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
					 (long) cqr, cqr->lpm);
	} else {
		rc = ccw_device_start(device->cdev, cqr->cpaddr,
				      (long) cqr, cqr->lpm, 0);
	}
L
Linus Torvalds 已提交
1470 1471 1472 1473 1474
	switch (rc) {
	case 0:
		cqr->status = DASD_CQR_IN_IO;
		break;
	case -EBUSY:
1475
		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
L
Linus Torvalds 已提交
1476 1477 1478
			      "start_IO: device busy, retry later");
		break;
	case -EACCES:
1479 1480 1481 1482 1483 1484
		/* -EACCES indicates that the request used only a subset of the
		 * available paths and all these paths are gone. If the lpm of
		 * this request was only a subset of the opm (e.g. the ppm) then
		 * we just do a retry with all available paths.
		 * If we already use the full opm, something is amiss, and we
		 * need a full path verification.
L
Linus Torvalds 已提交
1485
		 */
1486 1487 1488 1489
		if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
			DBF_DEV_EVENT(DBF_WARNING, device,
				      "start_IO: selected paths gone (%x)",
				      cqr->lpm);
1490 1491
		} else if (cqr->lpm != dasd_path_get_opm(device)) {
			cqr->lpm = dasd_path_get_opm(device);
1492 1493 1494 1495 1496 1497 1498 1499
			DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
				      "start_IO: selected paths gone,"
				      " retry on all paths");
		} else {
			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
				      "start_IO: all paths in opm gone,"
				      " do path verification");
			dasd_generic_last_path_gone(device);
1500 1501 1502 1503
			dasd_path_no_path(device);
			dasd_path_set_tbvpm(device,
					  ccw_device_get_path_mask(
						  device->cdev));
1504
		}
L
Linus Torvalds 已提交
1505 1506
		break;
	case -ENODEV:
1507
		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1508 1509
			      "start_IO: -ENODEV device gone, retry");
		break;
L
Linus Torvalds 已提交
1510
	case -EIO:
1511
		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1512
			      "start_IO: -EIO device gone, retry");
L
Linus Torvalds 已提交
1513
		break;
1514 1515
	case -EINVAL:
		/* most likely caused in power management context */
1516
		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1517 1518 1519
			      "start_IO: -EINVAL device currently "
			      "not accessible");
		break;
L
Linus Torvalds 已提交
1520
	default:
S
Stefan Haberland 已提交
1521 1522 1523 1524 1525
		/* internal error 11 - unknown rc */
		snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
		dev_err(&device->cdev->dev,
			"An error occurred in the DASD device driver, "
			"reason=%s\n", errorstring);
L
Linus Torvalds 已提交
1526 1527 1528
		BUG();
		break;
	}
1529
	cqr->intrc = rc;
L
Linus Torvalds 已提交
1530 1531
	return rc;
}
1532
EXPORT_SYMBOL(dasd_start_IO);
L
Linus Torvalds 已提交
1533 1534 1535 1536 1537 1538 1539 1540 1541

/*
 * Timeout function for dasd devices. This is used for different purposes
 *  1) missing interrupt handler for normal operation
 *  2) delayed start of request where start_IO failed with -EBUSY
 *  3) timeout for missing state change interrupts
 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
 * DASD_CQR_QUEUED for 2) and 3).
 */
1542
static void dasd_device_timeout(struct timer_list *t)
L
Linus Torvalds 已提交
1543 1544 1545 1546
{
	unsigned long flags;
	struct dasd_device *device;

1547
	device = from_timer(device, t, timer);
L
Linus Torvalds 已提交
1548 1549
	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
	/* re-activate request queue */
1550
	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
L
Linus Torvalds 已提交
1551
	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1552
	dasd_schedule_device_bh(device);
L
Linus Torvalds 已提交
1553 1554 1555 1556 1557
}

/*
 * Setup timeout for a device in jiffies.
 */
1558
void dasd_device_set_timer(struct dasd_device *device, int expires)
L
Linus Torvalds 已提交
1559
{
1560 1561 1562 1563
	if (expires == 0)
		del_timer(&device->timer);
	else
		mod_timer(&device->timer, jiffies + expires);
L
Linus Torvalds 已提交
1564
}
1565
EXPORT_SYMBOL(dasd_device_set_timer);
L
Linus Torvalds 已提交
1566 1567 1568 1569

/*
 * Clear timeout for a device.
 */
1570
void dasd_device_clear_timer(struct dasd_device *device)
L
Linus Torvalds 已提交
1571
{
1572
	del_timer(&device->timer);
L
Linus Torvalds 已提交
1573
}
1574
EXPORT_SYMBOL(dasd_device_clear_timer);
L
Linus Torvalds 已提交
1575

1576 1577
static void dasd_handle_killed_request(struct ccw_device *cdev,
				       unsigned long intparm)
L
Linus Torvalds 已提交
1578 1579 1580 1581
{
	struct dasd_ccw_req *cqr;
	struct dasd_device *device;

1582 1583
	if (!intparm)
		return;
L
Linus Torvalds 已提交
1584 1585
	cqr = (struct dasd_ccw_req *) intparm;
	if (cqr->status != DASD_CQR_IN_IO) {
1586 1587 1588
		DBF_EVENT_DEVID(DBF_DEBUG, cdev,
				"invalid status in handle_killed_request: "
				"%02x", cqr->status);
L
Linus Torvalds 已提交
1589 1590 1591
		return;
	}

S
Stefan Haberland 已提交
1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
	device = dasd_device_from_cdev_locked(cdev);
	if (IS_ERR(device)) {
		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
				"unable to get device from cdev");
		return;
	}

	if (!cqr->startdev ||
	    device != cqr->startdev ||
	    strncmp(cqr->startdev->discipline->ebcname,
		    (char *) &cqr->magic, 4)) {
1603 1604
		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
				"invalid device in request");
S
Stefan Haberland 已提交
1605
		dasd_put_device(device);
L
Linus Torvalds 已提交
1606 1607 1608 1609 1610 1611
		return;
	}

	/* Schedule request to be retried. */
	cqr->status = DASD_CQR_QUEUED;

1612 1613
	dasd_device_clear_timer(device);
	dasd_schedule_device_bh(device);
L
Linus Torvalds 已提交
1614 1615 1616
	dasd_put_device(device);
}

1617
void dasd_generic_handle_state_change(struct dasd_device *device)
L
Linus Torvalds 已提交
1618
{
1619 1620 1621
	/* First of all start sense subsystem status request. */
	dasd_eer_snss(device);

1622
	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1623
	dasd_schedule_device_bh(device);
S
Stefan Haberland 已提交
1624
	if (device->block) {
1625
		dasd_schedule_block_bh(device->block);
1626 1627 1628
		if (device->block->request_queue)
			blk_mq_run_hw_queues(device->block->request_queue,
					     true);
S
Stefan Haberland 已提交
1629
	}
L
Linus Torvalds 已提交
1630
}
1631
EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
L
Linus Torvalds 已提交
1632

1633 1634 1635 1636 1637 1638 1639
static int dasd_check_hpf_error(struct irb *irb)
{
	return (scsw_tm_is_valid_schxs(&irb->scsw) &&
	    (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
	     irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
}

1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
{
	struct dasd_device *device = NULL;
	u8 *sense = NULL;

	if (!block)
		return 0;
	device = block->base;
	if (!device || !device->discipline->is_ese)
		return 0;
	if (!device->discipline->is_ese(device))
		return 0;

	sense = dasd_get_sense(irb);
	if (!sense)
		return 0;

	return !!(sense[1] & SNS1_NO_REC_FOUND) ||
		!!(sense[1] & SNS1_FILE_PROTECTED) ||
		scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN;
}

1662 1663 1664 1665 1666 1667 1668 1669
static int dasd_ese_oos_cond(u8 *sense)
{
	return sense[0] & SNS0_EQUIPMENT_CHECK &&
		sense[1] & SNS1_PERM_ERR &&
		sense[1] & SNS1_WRITE_INHIBITED &&
		sense[25] == 0x01;
}

L
Linus Torvalds 已提交
1670 1671 1672
/*
 * Interrupt handler for "normal" ssch-io based dasd devices.
 */
1673 1674
void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
		      struct irb *irb)
L
Linus Torvalds 已提交
1675
{
1676
	struct dasd_ccw_req *cqr, *next, *fcqr;
L
Linus Torvalds 已提交
1677
	struct dasd_device *device;
1678
	unsigned long now;
1679 1680 1681
	int nrf_suppressed = 0;
	int fp_suppressed = 0;
	u8 *sense = NULL;
L
Linus Torvalds 已提交
1682 1683
	int expires;

1684
	cqr = (struct dasd_ccw_req *) intparm;
L
Linus Torvalds 已提交
1685 1686 1687
	if (IS_ERR(irb)) {
		switch (PTR_ERR(irb)) {
		case -EIO:
1688
			if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
1689
				device = cqr->startdev;
1690 1691 1692 1693 1694 1695
				cqr->status = DASD_CQR_CLEARED;
				dasd_device_clear_timer(device);
				wake_up(&dasd_flush_wq);
				dasd_schedule_device_bh(device);
				return;
			}
L
Linus Torvalds 已提交
1696 1697
			break;
		case -ETIMEDOUT:
1698 1699
			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
					"request timed out\n", __func__);
L
Linus Torvalds 已提交
1700 1701
			break;
		default:
1702 1703 1704
			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
					"unknown error %ld\n", __func__,
					PTR_ERR(irb));
L
Linus Torvalds 已提交
1705
		}
1706
		dasd_handle_killed_request(cdev, intparm);
L
Linus Torvalds 已提交
1707 1708 1709
		return;
	}

1710
	now = get_tod_clock();
1711 1712 1713 1714
	/* check for conditions that should be handled immediately */
	if (!cqr ||
	    !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
	      scsw_cstat(&irb->scsw) == 0)) {
1715 1716
		if (cqr)
			memcpy(&cqr->irb, irb, sizeof(*irb));
1717
		device = dasd_device_from_cdev_locked(cdev);
1718 1719 1720 1721
		if (IS_ERR(device))
			return;
		/* ignore unsolicited interrupts for DIAG discipline */
		if (device->discipline == dasd_diag_discipline_pointer) {
L
Linus Torvalds 已提交
1722
			dasd_put_device(device);
1723
			return;
L
Linus Torvalds 已提交
1724
		}
1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737

		/*
		 * In some cases 'File Protected' or 'No Record Found' errors
		 * might be expected and debug log messages for the
		 * corresponding interrupts shouldn't be written then.
		 * Check if either of the according suppress bits is set.
		 */
		sense = dasd_get_sense(irb);
		if (sense) {
			fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
				test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
			nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
				test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748

			/*
			 * Extent pool probably out-of-space.
			 * Stop device and check exhaust level.
			 */
			if (dasd_ese_oos_cond(sense)) {
				dasd_generic_space_exhaust(device, cqr);
				device->discipline->ext_pool_exhaust(device, cqr);
				dasd_put_device(device);
				return;
			}
1749 1750 1751 1752
		}
		if (!(fp_suppressed || nrf_suppressed))
			device->discipline->dump_sense_dbf(device, irb, "int");

1753 1754 1755
		if (device->features & DASD_FEATURE_ERPLOG)
			device->discipline->dump_sense(device, cqr, irb);
		device->discipline->check_for_device_change(device, cqr, irb);
1756
		dasd_put_device(device);
L
Linus Torvalds 已提交
1757
	}
1758 1759 1760 1761

	/* check for for attention message */
	if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
		device = dasd_device_from_cdev_locked(cdev);
1762 1763 1764 1765 1766
		if (!IS_ERR(device)) {
			device->discipline->check_attention(device,
							    irb->esw.esw1.lpum);
			dasd_put_device(device);
		}
1767 1768
	}

1769 1770
	if (!cqr)
		return;
L
Linus Torvalds 已提交
1771

1772 1773
	device = (struct dasd_device *) cqr->startdev;
	if (!device ||
L
Linus Torvalds 已提交
1774
	    strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1775 1776
		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
				"invalid device in request");
L
Linus Torvalds 已提交
1777 1778 1779
		return;
	}

1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804
	if (dasd_ese_needs_format(cqr->block, irb)) {
		if (rq_data_dir((struct request *)cqr->callback_data) == READ) {
			device->discipline->ese_read(cqr);
			cqr->status = DASD_CQR_SUCCESS;
			cqr->stopclk = now;
			dasd_device_clear_timer(device);
			dasd_schedule_device_bh(device);
			return;
		}
		fcqr = device->discipline->ese_format(device, cqr);
		if (IS_ERR(fcqr)) {
			/*
			 * If we can't format now, let the request go
			 * one extra round. Maybe we can format later.
			 */
			cqr->status = DASD_CQR_QUEUED;
		} else {
			fcqr->status = DASD_CQR_QUEUED;
			cqr->status = DASD_CQR_QUEUED;
			list_add(&fcqr->devlist, &device->ccw_queue);
			dasd_schedule_device_bh(device);
			return;
		}
	}

L
Linus Torvalds 已提交
1805
	/* Check for clear pending */
1806
	if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1807
	    scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1808 1809
		cqr->status = DASD_CQR_CLEARED;
		dasd_device_clear_timer(device);
1810
		wake_up(&dasd_flush_wq);
1811
		dasd_schedule_device_bh(device);
L
Linus Torvalds 已提交
1812 1813 1814
		return;
	}

1815
	/* check status - the request might have been killed by dyn detach */
L
Linus Torvalds 已提交
1816
	if (cqr->status != DASD_CQR_IN_IO) {
S
Stefan Haberland 已提交
1817 1818
		DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
			      "status %02x", dev_name(&cdev->dev), cqr->status);
L
Linus Torvalds 已提交
1819 1820
		return;
	}
S
Stefan Haberland 已提交
1821

1822
	next = NULL;
L
Linus Torvalds 已提交
1823
	expires = 0;
1824 1825
	if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
	    scsw_cstat(&irb->scsw) == 0) {
1826 1827
		/* request was completed successfully */
		cqr->status = DASD_CQR_SUCCESS;
L
Linus Torvalds 已提交
1828 1829
		cqr->stopclk = now;
		/* Start first request on queue if possible -> fast_io. */
1830 1831 1832
		if (cqr->devlist.next != &device->ccw_queue) {
			next = list_entry(cqr->devlist.next,
					  struct dasd_ccw_req, devlist);
L
Linus Torvalds 已提交
1833
		}
1834
	} else {  /* error */
1835 1836 1837 1838 1839 1840 1841
		/* check for HPF error
		 * call discipline function to requeue all requests
		 * and disable HPF accordingly
		 */
		if (cqr->cpmode && dasd_check_hpf_error(irb) &&
		    device->discipline->handle_hpf_error)
			device->discipline->handle_hpf_error(device, irb);
1842 1843 1844
		/*
		 * If we don't want complex ERP for this request, then just
		 * reset this and retry it in the fastpath
1845
		 */
1846
		if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1847
		    cqr->retries > 0) {
1848
			if (cqr->lpm == dasd_path_get_opm(device))
S
Stefan Haberland 已提交
1849 1850 1851 1852
				DBF_DEV_EVENT(DBF_DEBUG, device,
					      "default ERP in fastpath "
					      "(%i retries left)",
					      cqr->retries);
1853
			if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
1854
				cqr->lpm = dasd_path_get_opm(device);
1855 1856 1857
			cqr->status = DASD_CQR_QUEUED;
			next = cqr;
		} else
L
Linus Torvalds 已提交
1858
			cqr->status = DASD_CQR_ERROR;
1859 1860 1861 1862 1863
	}
	if (next && (next->status == DASD_CQR_QUEUED) &&
	    (!device->stopped)) {
		if (device->discipline->start_IO(next) == 0)
			expires = next->expires;
L
Linus Torvalds 已提交
1864 1865
	}
	if (expires != 0)
1866
		dasd_device_set_timer(device, expires);
L
Linus Torvalds 已提交
1867
	else
1868 1869
		dasd_device_clear_timer(device);
	dasd_schedule_device_bh(device);
L
Linus Torvalds 已提交
1870
}
1871
EXPORT_SYMBOL(dasd_int_handler);
L
Linus Torvalds 已提交
1872

1873 1874 1875 1876 1877 1878 1879 1880 1881 1882
enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
{
	struct dasd_device *device;

	device = dasd_device_from_cdev_locked(cdev);

	if (IS_ERR(device))
		goto out;
	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
	   device->state != device->target ||
1883
	   !device->discipline->check_for_device_change){
1884 1885 1886
		dasd_put_device(device);
		goto out;
	}
1887 1888 1889
	if (device->discipline->dump_sense_dbf)
		device->discipline->dump_sense_dbf(device, irb, "uc");
	device->discipline->check_for_device_change(device, NULL, irb);
1890 1891 1892 1893 1894 1895
	dasd_put_device(device);
out:
	return UC_TODO_RETRY;
}
EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);

L
Linus Torvalds 已提交
1896
/*
1897 1898
 * If we have an error on a dasd_block layer request then we cancel
 * and return all further requests from the same dasd_block as well.
L
Linus Torvalds 已提交
1899
 */
1900 1901
static void __dasd_device_recovery(struct dasd_device *device,
				   struct dasd_ccw_req *ref_cqr)
L
Linus Torvalds 已提交
1902
{
1903 1904
	struct list_head *l, *n;
	struct dasd_ccw_req *cqr;
L
Linus Torvalds 已提交
1905

1906 1907 1908 1909 1910
	/*
	 * only requeue request that came from the dasd_block layer
	 */
	if (!ref_cqr->block)
		return;
L
Linus Torvalds 已提交
1911

1912 1913 1914 1915 1916 1917 1918 1919
	list_for_each_safe(l, n, &device->ccw_queue) {
		cqr = list_entry(l, struct dasd_ccw_req, devlist);
		if (cqr->status == DASD_CQR_QUEUED &&
		    ref_cqr->block == cqr->block) {
			cqr->status = DASD_CQR_CLEARED;
		}
	}
};
L
Linus Torvalds 已提交
1920 1921

/*
1922 1923
 * Remove those ccw requests from the queue that need to be returned
 * to the upper layer.
L
Linus Torvalds 已提交
1924
 */
1925 1926
static void __dasd_device_process_ccw_queue(struct dasd_device *device,
					    struct list_head *final_queue)
L
Linus Torvalds 已提交
1927 1928 1929 1930 1931 1932
{
	struct list_head *l, *n;
	struct dasd_ccw_req *cqr;

	/* Process request with final status. */
	list_for_each_safe(l, n, &device->ccw_queue) {
1933 1934
		cqr = list_entry(l, struct dasd_ccw_req, devlist);

1935
		/* Skip any non-final request. */
1936 1937 1938
		if (cqr->status == DASD_CQR_QUEUED ||
		    cqr->status == DASD_CQR_IN_IO ||
		    cqr->status == DASD_CQR_CLEAR_PENDING)
1939
			continue;
L
Linus Torvalds 已提交
1940
		if (cqr->status == DASD_CQR_ERROR) {
1941
			__dasd_device_recovery(device, cqr);
1942
		}
L
Linus Torvalds 已提交
1943
		/* Rechain finished requests to final queue */
1944
		list_move_tail(&cqr->devlist, final_queue);
L
Linus Torvalds 已提交
1945 1946 1947
	}
}

1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974
static void __dasd_process_cqr(struct dasd_device *device,
			       struct dasd_ccw_req *cqr)
{
	char errorstring[ERRORLENGTH];

	switch (cqr->status) {
	case DASD_CQR_SUCCESS:
		cqr->status = DASD_CQR_DONE;
		break;
	case DASD_CQR_ERROR:
		cqr->status = DASD_CQR_NEED_ERP;
		break;
	case DASD_CQR_CLEARED:
		cqr->status = DASD_CQR_TERMINATED;
		break;
	default:
		/* internal error 12 - wrong cqr status*/
		snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
		dev_err(&device->cdev->dev,
			"An error occurred in the DASD device driver, "
			"reason=%s\n", errorstring);
		BUG();
	}
	if (cqr->callback)
		cqr->callback(cqr, cqr->callback_data);
}

L
Linus Torvalds 已提交
1975
/*
1976 1977
 * the cqrs from the final queue are returned to the upper layer
 * by setting a dasd_block state and calling the callback function
L
Linus Torvalds 已提交
1978
 */
1979 1980
static void __dasd_device_process_final_queue(struct dasd_device *device,
					      struct list_head *final_queue)
L
Linus Torvalds 已提交
1981
{
1982
	struct list_head *l, *n;
L
Linus Torvalds 已提交
1983
	struct dasd_ccw_req *cqr;
1984
	struct dasd_block *block;
1985

1986 1987 1988
	list_for_each_safe(l, n, final_queue) {
		cqr = list_entry(l, struct dasd_ccw_req, devlist);
		list_del_init(&cqr->devlist);
1989
		block = cqr->block;
1990 1991 1992
		if (!block) {
			__dasd_process_cqr(device, cqr);
		} else {
1993
			spin_lock_bh(&block->queue_lock);
1994
			__dasd_process_cqr(device, cqr);
1995
			spin_unlock_bh(&block->queue_lock);
1996
		}
L
Linus Torvalds 已提交
1997 1998 1999 2000 2001 2002 2003
	}
}

/*
 * Take a look at the first request on the ccw queue and check
 * if it reached its expire time. If so, terminate the IO.
 */
2004
static void __dasd_device_check_expire(struct dasd_device *device)
L
Linus Torvalds 已提交
2005 2006 2007 2008 2009
{
	struct dasd_ccw_req *cqr;

	if (list_empty(&device->ccw_queue))
		return;
2010
	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2011 2012
	if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
	    (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
2013 2014 2015 2016 2017 2018 2019
		if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
			/*
			 * IO in safe offline processing should not
			 * run out of retries
			 */
			cqr->retries++;
		}
2020 2021
		if (device->discipline->term_IO(cqr) != 0) {
			/* Hmpf, try again in 5 sec */
S
Stefan Haberland 已提交
2022
			dev_err(&device->cdev->dev,
2023
				"cqr %p timed out (%lus) but cannot be "
S
Stefan Haberland 已提交
2024 2025
				"ended, retrying in 5 s\n",
				cqr, (cqr->expires/HZ));
2026 2027
			cqr->expires += 5*HZ;
			dasd_device_set_timer(device, 5*HZ);
2028
		} else {
S
Stefan Haberland 已提交
2029
			dev_err(&device->cdev->dev,
2030
				"cqr %p timed out (%lus), %i retries "
S
Stefan Haberland 已提交
2031 2032
				"remaining\n", cqr, (cqr->expires/HZ),
				cqr->retries);
L
Linus Torvalds 已提交
2033 2034 2035 2036
		}
	}
}

2037 2038 2039 2040 2041 2042
/*
 * return 1 when device is not eligible for IO
 */
static int __dasd_device_is_unusable(struct dasd_device *device,
				     struct dasd_ccw_req *cqr)
{
2043
	int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM | DASD_STOPPED_NOSPC);
2044

2045 2046 2047 2048 2049 2050
	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
		/*
		 * dasd is being set offline
		 * but it is no safe offline where we have to allow I/O
		 */
2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067
		return 1;
	}
	if (device->stopped) {
		if (device->stopped & mask) {
			/* stopped and CQR will not change that. */
			return 1;
		}
		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
			/* CQR is not able to change device to
			 * operational. */
			return 1;
		}
		/* CQR required to get device operational. */
	}
	return 0;
}

L
Linus Torvalds 已提交
2068 2069 2070 2071
/*
 * Take a look at the first request on the ccw queue and check
 * if it needs to be started.
 */
2072
static void __dasd_device_start_head(struct dasd_device *device)
L
Linus Torvalds 已提交
2073 2074 2075 2076 2077 2078
{
	struct dasd_ccw_req *cqr;
	int rc;

	if (list_empty(&device->ccw_queue))
		return;
2079
	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2080 2081
	if (cqr->status != DASD_CQR_QUEUED)
		return;
2082 2083
	/* if device is not usable return request to upper layer */
	if (__dasd_device_is_unusable(device, cqr)) {
2084
		cqr->intrc = -EAGAIN;
2085 2086
		cqr->status = DASD_CQR_CLEARED;
		dasd_schedule_device_bh(device);
2087
		return;
2088
	}
2089 2090 2091

	rc = device->discipline->start_IO(cqr);
	if (rc == 0)
2092
		dasd_device_set_timer(device, cqr->expires);
2093
	else if (rc == -EACCES) {
2094
		dasd_schedule_device_bh(device);
2095 2096
	} else
		/* Hmpf, try again in 1/2 sec */
2097
		dasd_device_set_timer(device, 50);
2098 2099
}

2100 2101 2102 2103
static void __dasd_device_check_path_events(struct dasd_device *device)
{
	int rc;

2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115
	if (!dasd_path_get_tbvpm(device))
		return;

	if (device->stopped &
	    ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
		return;
	rc = device->discipline->verify_path(device,
					     dasd_path_get_tbvpm(device));
	if (rc)
		dasd_device_set_timer(device, 50);
	else
		dasd_path_clear_all_verify(device);
2116 2117
};

L
Linus Torvalds 已提交
2118
/*
2119 2120 2121 2122 2123 2124 2125 2126
 * Go through all request on the dasd_device request queue,
 * terminate them on the cdev if necessary, and return them to the
 * submitting layer via callback.
 * Note:
 * Make sure that all 'submitting layers' still exist when
 * this function is called!. In other words, when 'device' is a base
 * device then all block layer requests must have been removed before
 * via dasd_flush_block_queue.
L
Linus Torvalds 已提交
2127
 */
2128
int dasd_flush_device_queue(struct dasd_device *device)
L
Linus Torvalds 已提交
2129
{
2130 2131
	struct dasd_ccw_req *cqr, *n;
	int rc;
L
Linus Torvalds 已提交
2132 2133 2134 2135
	struct list_head flush_queue;

	INIT_LIST_HEAD(&flush_queue);
	spin_lock_irq(get_ccwdev_lock(device->cdev));
2136
	rc = 0;
2137
	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2138 2139 2140 2141 2142 2143
		/* Check status and move request to flush_queue */
		switch (cqr->status) {
		case DASD_CQR_IN_IO:
			rc = device->discipline->term_IO(cqr);
			if (rc) {
				/* unable to terminate requeust */
S
Stefan Haberland 已提交
2144 2145 2146
				dev_err(&device->cdev->dev,
					"Flushing the DASD request queue "
					"failed for request %p\n", cqr);
2147 2148 2149 2150 2151
				/* stop flush processing */
				goto finished;
			}
			break;
		case DASD_CQR_QUEUED:
2152
			cqr->stopclk = get_tod_clock();
2153
			cqr->status = DASD_CQR_CLEARED;
2154
			break;
2155
		default: /* no need to modify the others */
2156 2157
			break;
		}
2158
		list_move_tail(&cqr->devlist, &flush_queue);
2159 2160 2161
	}
finished:
	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174
	/*
	 * After this point all requests must be in state CLEAR_PENDING,
	 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
	 * one of the others.
	 */
	list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
		wait_event(dasd_flush_wq,
			   (cqr->status != DASD_CQR_CLEAR_PENDING));
	/*
	 * Now set each request back to TERMINATED, DONE or NEED_ERP
	 * and call the callback function of flushed requests
	 */
	__dasd_device_process_final_queue(device, &flush_queue);
2175
	return rc;
L
Linus Torvalds 已提交
2176
}
2177
EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
L
Linus Torvalds 已提交
2178 2179 2180 2181

/*
 * Acquire the device lock and process queues for the device.
 */
2182
static void dasd_device_tasklet(unsigned long data)
L
Linus Torvalds 已提交
2183
{
2184
	struct dasd_device *device = (struct dasd_device *) data;
L
Linus Torvalds 已提交
2185 2186 2187 2188 2189 2190
	struct list_head final_queue;

	atomic_set (&device->tasklet_scheduled, 0);
	INIT_LIST_HEAD(&final_queue);
	spin_lock_irq(get_ccwdev_lock(device->cdev));
	/* Check expire time of first request on the ccw queue. */
2191 2192 2193
	__dasd_device_check_expire(device);
	/* find final requests on ccw queue */
	__dasd_device_process_ccw_queue(device, &final_queue);
2194
	__dasd_device_check_path_events(device);
L
Linus Torvalds 已提交
2195 2196
	spin_unlock_irq(get_ccwdev_lock(device->cdev));
	/* Now call the callback function of requests with final status */
2197 2198
	__dasd_device_process_final_queue(device, &final_queue);
	spin_lock_irq(get_ccwdev_lock(device->cdev));
L
Linus Torvalds 已提交
2199
	/* Now check if the head of the ccw queue needs to be started. */
2200 2201
	__dasd_device_start_head(device);
	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2202 2203
	if (waitqueue_active(&shutdown_waitq))
		wake_up(&shutdown_waitq);
L
Linus Torvalds 已提交
2204 2205 2206 2207 2208 2209
	dasd_put_device(device);
}

/*
 * Schedules a call to dasd_tasklet over the device tasklet.
 */
2210
void dasd_schedule_device_bh(struct dasd_device *device)
L
Linus Torvalds 已提交
2211 2212
{
	/* Protect against rescheduling. */
2213
	if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
L
Linus Torvalds 已提交
2214 2215 2216 2217
		return;
	dasd_get_device(device);
	tasklet_hi_schedule(&device->tasklet);
}
2218
EXPORT_SYMBOL(dasd_schedule_device_bh);
L
Linus Torvalds 已提交
2219

2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
{
	device->stopped |= bits;
}
EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);

void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
{
	device->stopped &= ~bits;
	if (!device->stopped)
		wake_up(&generic_waitq);
}
EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);

L
Linus Torvalds 已提交
2234
/*
2235 2236
 * Queue a request to the head of the device ccw_queue.
 * Start the I/O if possible.
L
Linus Torvalds 已提交
2237
 */
2238
void dasd_add_request_head(struct dasd_ccw_req *cqr)
L
Linus Torvalds 已提交
2239 2240 2241 2242
{
	struct dasd_device *device;
	unsigned long flags;

2243
	device = cqr->startdev;
L
Linus Torvalds 已提交
2244
	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2245 2246
	cqr->status = DASD_CQR_QUEUED;
	list_add(&cqr->devlist, &device->ccw_queue);
L
Linus Torvalds 已提交
2247
	/* let the bh start the request to keep them in order */
2248
	dasd_schedule_device_bh(device);
L
Linus Torvalds 已提交
2249 2250
	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
2251
EXPORT_SYMBOL(dasd_add_request_head);
L
Linus Torvalds 已提交
2252 2253

/*
2254 2255
 * Queue a request to the tail of the device ccw_queue.
 * Start the I/O if possible.
L
Linus Torvalds 已提交
2256
 */
2257
void dasd_add_request_tail(struct dasd_ccw_req *cqr)
L
Linus Torvalds 已提交
2258 2259 2260 2261
{
	struct dasd_device *device;
	unsigned long flags;

2262
	device = cqr->startdev;
L
Linus Torvalds 已提交
2263
	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2264 2265
	cqr->status = DASD_CQR_QUEUED;
	list_add_tail(&cqr->devlist, &device->ccw_queue);
L
Linus Torvalds 已提交
2266
	/* let the bh start the request to keep them in order */
2267
	dasd_schedule_device_bh(device);
L
Linus Torvalds 已提交
2268 2269
	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
2270
EXPORT_SYMBOL(dasd_add_request_tail);
L
Linus Torvalds 已提交
2271 2272

/*
2273
 * Wakeup helper for the 'sleep_on' functions.
L
Linus Torvalds 已提交
2274
 */
2275
void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
L
Linus Torvalds 已提交
2276
{
2277 2278 2279 2280
	spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
	cqr->callback_data = DASD_SLEEPON_END_TAG;
	spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
	wake_up(&generic_waitq);
L
Linus Torvalds 已提交
2281
}
2282
EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
L
Linus Torvalds 已提交
2283

2284
static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
L
Linus Torvalds 已提交
2285 2286 2287 2288
{
	struct dasd_device *device;
	int rc;

2289
	device = cqr->startdev;
L
Linus Torvalds 已提交
2290
	spin_lock_irq(get_ccwdev_lock(device->cdev));
2291
	rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
L
Linus Torvalds 已提交
2292 2293 2294 2295 2296
	spin_unlock_irq(get_ccwdev_lock(device->cdev));
	return rc;
}

/*
2297
 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
L
Linus Torvalds 已提交
2298
 */
2299
static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
L
Linus Torvalds 已提交
2300 2301
{
	struct dasd_device *device;
2302
	dasd_erp_fn_t erp_fn;
2303

2304 2305
	if (cqr->status == DASD_CQR_FILLED)
		return 0;
2306
	device = cqr->startdev;
2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325
	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
		if (cqr->status == DASD_CQR_TERMINATED) {
			device->discipline->handle_terminated_request(cqr);
			return 1;
		}
		if (cqr->status == DASD_CQR_NEED_ERP) {
			erp_fn = device->discipline->erp_action(cqr);
			erp_fn(cqr);
			return 1;
		}
		if (cqr->status == DASD_CQR_FAILED)
			dasd_log_sense(cqr, &cqr->irb);
		if (cqr->refers) {
			__dasd_process_erp(device, cqr);
			return 1;
		}
	}
	return 0;
}
2326

2327 2328 2329 2330 2331 2332 2333 2334 2335 2336
static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
{
	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
		if (cqr->refers) /* erp is not done yet */
			return 1;
		return ((cqr->status != DASD_CQR_DONE) &&
			(cqr->status != DASD_CQR_FAILED));
	} else
		return (cqr->status == DASD_CQR_FILLED);
}
2337

2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356
static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
{
	struct dasd_device *device;
	int rc;
	struct list_head ccw_queue;
	struct dasd_ccw_req *cqr;

	INIT_LIST_HEAD(&ccw_queue);
	maincqr->status = DASD_CQR_FILLED;
	device = maincqr->startdev;
	list_add(&maincqr->blocklist, &ccw_queue);
	for (cqr = maincqr;  __dasd_sleep_on_loop_condition(cqr);
	     cqr = list_first_entry(&ccw_queue,
				    struct dasd_ccw_req, blocklist)) {

		if (__dasd_sleep_on_erp(cqr))
			continue;
		if (cqr->status != DASD_CQR_FILLED) /* could be failed */
			continue;
2357 2358 2359 2360 2361 2362
		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
			cqr->status = DASD_CQR_FAILED;
			cqr->intrc = -EPERM;
			continue;
		}
2363 2364 2365 2366 2367
		/* Non-temporary stop condition will trigger fail fast */
		if (device->stopped & ~DASD_STOPPED_PENDING &&
		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
		    (!dasd_eer_enabled(device))) {
			cqr->status = DASD_CQR_FAILED;
2368
			cqr->intrc = -ENOLINK;
2369 2370
			continue;
		}
2371 2372 2373 2374 2375 2376 2377 2378 2379
		/*
		 * Don't try to start requests if device is in
		 * offline processing, it might wait forever
		 */
		if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
			cqr->status = DASD_CQR_FAILED;
			cqr->intrc = -ENODEV;
			continue;
		}
2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395
		/*
		 * Don't try to start requests if device is stopped
		 * except path verification requests
		 */
		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
			if (interruptible) {
				rc = wait_event_interruptible(
					generic_waitq, !(device->stopped));
				if (rc == -ERESTARTSYS) {
					cqr->status = DASD_CQR_FAILED;
					maincqr->intrc = rc;
					continue;
				}
			} else
				wait_event(generic_waitq, !(device->stopped));
		}
2396 2397 2398
		if (!cqr->callback)
			cqr->callback = dasd_wakeup_cb;

2399
		cqr->callback_data = DASD_SLEEPON_START_TAG;
2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416
		dasd_add_request_tail(cqr);
		if (interruptible) {
			rc = wait_event_interruptible(
				generic_waitq, _wait_for_wakeup(cqr));
			if (rc == -ERESTARTSYS) {
				dasd_cancel_req(cqr);
				/* wait (non-interruptible) for final status */
				wait_event(generic_waitq,
					   _wait_for_wakeup(cqr));
				cqr->status = DASD_CQR_FAILED;
				maincqr->intrc = rc;
				continue;
			}
		} else
			wait_event(generic_waitq, _wait_for_wakeup(cqr));
	}

2417
	maincqr->endclk = get_tod_clock();
2418 2419 2420 2421
	if ((maincqr->status != DASD_CQR_DONE) &&
	    (maincqr->intrc != -ERESTARTSYS))
		dasd_log_sense(maincqr, &maincqr->irb);
	if (maincqr->status == DASD_CQR_DONE)
2422
		rc = 0;
2423 2424
	else if (maincqr->intrc)
		rc = maincqr->intrc;
2425 2426
	else
		rc = -EIO;
L
Linus Torvalds 已提交
2427 2428 2429
	return rc;
}

2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445
static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
{
	struct dasd_ccw_req *cqr;

	list_for_each_entry(cqr, ccw_queue, blocklist) {
		if (cqr->callback_data != DASD_SLEEPON_END_TAG)
			return 0;
	}

	return 1;
}

static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
{
	struct dasd_device *device;
	struct dasd_ccw_req *cqr, *n;
2446
	u8 *sense = NULL;
2447
	int rc;
2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491

retry:
	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
		device = cqr->startdev;
		if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
			continue;

		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
			cqr->status = DASD_CQR_FAILED;
			cqr->intrc = -EPERM;
			continue;
		}
		/*Non-temporary stop condition will trigger fail fast*/
		if (device->stopped & ~DASD_STOPPED_PENDING &&
		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
		    !dasd_eer_enabled(device)) {
			cqr->status = DASD_CQR_FAILED;
			cqr->intrc = -EAGAIN;
			continue;
		}

		/*Don't try to start requests if device is stopped*/
		if (interruptible) {
			rc = wait_event_interruptible(
				generic_waitq, !device->stopped);
			if (rc == -ERESTARTSYS) {
				cqr->status = DASD_CQR_FAILED;
				cqr->intrc = rc;
				continue;
			}
		} else
			wait_event(generic_waitq, !(device->stopped));

		if (!cqr->callback)
			cqr->callback = dasd_wakeup_cb;
		cqr->callback_data = DASD_SLEEPON_START_TAG;
		dasd_add_request_tail(cqr);
	}

	wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));

	rc = 0;
	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505
		/*
		 * In some cases the 'File Protected' or 'Incorrect Length'
		 * error might be expected and error recovery would be
		 * unnecessary in these cases.	Check if the according suppress
		 * bit is set.
		 */
		sense = dasd_get_sense(&cqr->irb);
		if (sense && sense[1] & SNS1_FILE_PROTECTED &&
		    test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
			continue;
		if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
		    test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
			continue;

2506 2507 2508
		/*
		 * for alias devices simplify error recovery and
		 * return to upper layer
2509
		 * do not skip ERP requests
2510
		 */
2511
		if (cqr->startdev != cqr->basedev && !cqr->refers &&
2512 2513 2514
		    (cqr->status == DASD_CQR_TERMINATED ||
		     cqr->status == DASD_CQR_NEED_ERP))
			return -EAGAIN;
2515 2516

		/* normal recovery for basedev IO */
S
Stefan Haberland 已提交
2517 2518
		if (__dasd_sleep_on_erp(cqr))
			/* handle erp first */
2519
			goto retry;
2520
	}
2521

2522 2523 2524
	return 0;
}

2525 2526 2527 2528 2529 2530 2531 2532
/*
 * Queue a request to the tail of the device ccw_queue and wait for
 * it's completion.
 */
int dasd_sleep_on(struct dasd_ccw_req *cqr)
{
	return _dasd_sleep_on(cqr, 0);
}
2533
EXPORT_SYMBOL(dasd_sleep_on);
2534

2535 2536 2537 2538 2539 2540 2541 2542 2543
/*
 * Start requests from a ccw_queue and wait for their completion.
 */
int dasd_sleep_on_queue(struct list_head *ccw_queue)
{
	return _dasd_sleep_on_queue(ccw_queue, 0);
}
EXPORT_SYMBOL(dasd_sleep_on_queue);

2544 2545 2546 2547 2548 2549 2550 2551 2552
/*
 * Start requests from a ccw_queue and wait interruptible for their completion.
 */
int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue)
{
	return _dasd_sleep_on_queue(ccw_queue, 1);
}
EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible);

L
Linus Torvalds 已提交
2553
/*
2554 2555
 * Queue a request to the tail of the device ccw_queue and wait
 * interruptible for it's completion.
L
Linus Torvalds 已提交
2556
 */
2557
int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
L
Linus Torvalds 已提交
2558
{
2559
	return _dasd_sleep_on(cqr, 1);
L
Linus Torvalds 已提交
2560
}
2561
EXPORT_SYMBOL(dasd_sleep_on_interruptible);
L
Linus Torvalds 已提交
2562 2563 2564 2565 2566 2567 2568

/*
 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
 * for eckd devices) the currently running request has to be terminated
 * and be put back to status queued, before the special request is added
 * to the head of the queue. Then the special request is waited on normally.
 */
2569
static inline int _dasd_term_running_cqr(struct dasd_device *device)
L
Linus Torvalds 已提交
2570 2571
{
	struct dasd_ccw_req *cqr;
2572
	int rc;
L
Linus Torvalds 已提交
2573 2574 2575

	if (list_empty(&device->ccw_queue))
		return 0;
2576
	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2577 2578 2579 2580 2581 2582 2583 2584 2585
	rc = device->discipline->term_IO(cqr);
	if (!rc)
		/*
		 * CQR terminated because a more important request is pending.
		 * Undo decreasing of retry counter because this is
		 * not an error case.
		 */
		cqr->retries++;
	return rc;
L
Linus Torvalds 已提交
2586 2587
}

2588
int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
L
Linus Torvalds 已提交
2589 2590 2591
{
	struct dasd_device *device;
	int rc;
2592

2593
	device = cqr->startdev;
2594 2595 2596 2597 2598 2599
	if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
		cqr->status = DASD_CQR_FAILED;
		cqr->intrc = -EPERM;
		return -EIO;
	}
L
Linus Torvalds 已提交
2600 2601 2602 2603 2604 2605 2606
	spin_lock_irq(get_ccwdev_lock(device->cdev));
	rc = _dasd_term_running_cqr(device);
	if (rc) {
		spin_unlock_irq(get_ccwdev_lock(device->cdev));
		return rc;
	}
	cqr->callback = dasd_wakeup_cb;
2607
	cqr->callback_data = DASD_SLEEPON_START_TAG;
L
Linus Torvalds 已提交
2608
	cqr->status = DASD_CQR_QUEUED;
2609 2610 2611 2612 2613
	/*
	 * add new request as second
	 * first the terminated cqr needs to be finished
	 */
	list_add(&cqr->devlist, device->ccw_queue.next);
2614

L
Linus Torvalds 已提交
2615
	/* let the bh start the request to keep them in order */
2616
	dasd_schedule_device_bh(device);
2617

L
Linus Torvalds 已提交
2618 2619
	spin_unlock_irq(get_ccwdev_lock(device->cdev));

2620
	wait_event(generic_waitq, _wait_for_wakeup(cqr));
2621

2622 2623 2624 2625 2626 2627
	if (cqr->status == DASD_CQR_DONE)
		rc = 0;
	else if (cqr->intrc)
		rc = cqr->intrc;
	else
		rc = -EIO;
2628 2629 2630 2631 2632 2633

	/* kick tasklets */
	dasd_schedule_device_bh(device);
	if (device->block)
		dasd_schedule_block_bh(device->block);

L
Linus Torvalds 已提交
2634 2635
	return rc;
}
2636
EXPORT_SYMBOL(dasd_sleep_on_immediatly);
L
Linus Torvalds 已提交
2637 2638 2639 2640 2641

/*
 * Cancels a request that was started with dasd_sleep_on_req.
 * This is useful to timeout requests. The request will be
 * terminated if it is currently in i/o.
H
Hannes Reinecke 已提交
2642
 * Returns 0 if request termination was successful
2643 2644 2645
 *	   negative error code if termination failed
 * Cancellation of a request is an asynchronous operation! The calling
 * function has to wait until the request is properly returned via callback.
L
Linus Torvalds 已提交
2646
 */
2647
static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
L
Linus Torvalds 已提交
2648
{
2649
	struct dasd_device *device = cqr->startdev;
2650
	int rc = 0;
L
Linus Torvalds 已提交
2651 2652 2653

	switch (cqr->status) {
	case DASD_CQR_QUEUED:
2654 2655
		/* request was not started - just set to cleared */
		cqr->status = DASD_CQR_CLEARED;
L
Linus Torvalds 已提交
2656 2657 2658
		break;
	case DASD_CQR_IN_IO:
		/* request in IO - terminate IO and release again */
2659 2660
		rc = device->discipline->term_IO(cqr);
		if (rc) {
S
Stefan Haberland 已提交
2661 2662 2663
			dev_err(&device->cdev->dev,
				"Cancelling request %p failed with rc=%d\n",
				cqr, rc);
2664
		} else {
2665
			cqr->stopclk = get_tod_clock();
2666
		}
L
Linus Torvalds 已提交
2667
		break;
2668
	default: /* already finished or clear pending - do nothing */
L
Linus Torvalds 已提交
2669
		break;
2670 2671 2672 2673
	}
	dasd_schedule_device_bh(device);
	return rc;
}
2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685

int dasd_cancel_req(struct dasd_ccw_req *cqr)
{
	struct dasd_device *device = cqr->startdev;
	unsigned long flags;
	int rc;

	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
	rc = __dasd_cancel_req(cqr);
	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
	return rc;
}
2686 2687 2688 2689 2690 2691 2692 2693 2694 2695

/*
 * SECTION: Operations of the dasd_block layer.
 */

/*
 * Timeout function for dasd_block. This is used when the block layer
 * is waiting for something that may not come reliably, (e.g. a state
 * change interrupt)
 */
2696
static void dasd_block_timeout(struct timer_list *t)
2697 2698 2699 2700
{
	unsigned long flags;
	struct dasd_block *block;

2701
	block = from_timer(block, t, timer);
2702 2703
	spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
	/* re-activate request queue */
2704
	dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
2705 2706
	spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
	dasd_schedule_block_bh(block);
S
Stefan Haberland 已提交
2707
	blk_mq_run_hw_queues(block->request_queue, true);
2708 2709 2710 2711 2712 2713 2714
}

/*
 * Setup timeout for a dasd_block in jiffies.
 */
void dasd_block_set_timer(struct dasd_block *block, int expires)
{
2715 2716 2717 2718
	if (expires == 0)
		del_timer(&block->timer);
	else
		mod_timer(&block->timer, jiffies + expires);
2719
}
2720
EXPORT_SYMBOL(dasd_block_set_timer);
2721 2722 2723 2724 2725 2726

/*
 * Clear timeout for a dasd_block.
 */
void dasd_block_clear_timer(struct dasd_block *block)
{
2727
	del_timer(&block->timer);
2728
}
2729
EXPORT_SYMBOL(dasd_block_clear_timer);
2730 2731 2732 2733

/*
 * Process finished error recovery ccw.
 */
2734 2735
static void __dasd_process_erp(struct dasd_device *device,
			       struct dasd_ccw_req *cqr)
2736 2737 2738 2739 2740 2741
{
	dasd_erp_fn_t erp_fn;

	if (cqr->status == DASD_CQR_DONE)
		DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
	else
S
Stefan Haberland 已提交
2742
		dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
2743 2744 2745
	erp_fn = device->discipline->erp_postaction(cqr);
	erp_fn(cqr);
}
L
Linus Torvalds 已提交
2746

2747 2748 2749
static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
{
	struct request *req;
2750
	blk_status_t error = BLK_STS_OK;
S
Stefan Haberland 已提交
2751
	int status;
2752 2753 2754

	req = (struct request *) cqr->callback_data;
	dasd_profile_end(cqr->block, cqr, req);
2755

2756
	status = cqr->block->base->discipline->free_cp(cqr, req);
2757
	if (status < 0)
2758
		error = errno_to_blk_status(status);
2759
	else if (status == 0) {
2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773
		switch (cqr->intrc) {
		case -EPERM:
			error = BLK_STS_NEXUS;
			break;
		case -ENOLINK:
			error = BLK_STS_TRANSPORT;
			break;
		case -ETIMEDOUT:
			error = BLK_STS_TIMEOUT;
			break;
		default:
			error = BLK_STS_IOERR;
			break;
		}
2774
	}
S
Stefan Haberland 已提交
2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787

	/*
	 * We need to take care for ETIMEDOUT errors here since the
	 * complete callback does not get called in this case.
	 * Take care of all errors here and avoid additional code to
	 * transfer the error value to the complete callback.
	 */
	if (error) {
		blk_mq_end_request(req, error);
		blk_mq_run_hw_queues(req->q, true);
	} else {
		blk_mq_complete_request(req);
	}
2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818
}

/*
 * Process ccw request queue.
 */
static void __dasd_process_block_ccw_queue(struct dasd_block *block,
					   struct list_head *final_queue)
{
	struct list_head *l, *n;
	struct dasd_ccw_req *cqr;
	dasd_erp_fn_t erp_fn;
	unsigned long flags;
	struct dasd_device *base = block->base;

restart:
	/* Process request with final status. */
	list_for_each_safe(l, n, &block->ccw_queue) {
		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
		if (cqr->status != DASD_CQR_DONE &&
		    cqr->status != DASD_CQR_FAILED &&
		    cqr->status != DASD_CQR_NEED_ERP &&
		    cqr->status != DASD_CQR_TERMINATED)
			continue;

		if (cqr->status == DASD_CQR_TERMINATED) {
			base->discipline->handle_terminated_request(cqr);
			goto restart;
		}

		/*  Process requests that may be recovered */
		if (cqr->status == DASD_CQR_NEED_ERP) {
2819
			erp_fn = base->discipline->erp_action(cqr);
2820 2821
			if (IS_ERR(erp_fn(cqr)))
				continue;
2822 2823 2824
			goto restart;
		}

2825 2826 2827 2828 2829
		/* log sense for fatal error */
		if (cqr->status == DASD_CQR_FAILED) {
			dasd_log_sense(cqr, &cqr->irb);
		}

2830 2831 2832 2833 2834 2835 2836 2837 2838
		/* First of all call extended error reporting. */
		if (dasd_eer_enabled(base) &&
		    cqr->status == DASD_CQR_FAILED) {
			dasd_eer_write(base, cqr, DASD_EER_FATALERROR);

			/* restart request  */
			cqr->status = DASD_CQR_FILLED;
			cqr->retries = 255;
			spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
2839
			dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
2840 2841 2842 2843 2844 2845 2846
			spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
					       flags);
			goto restart;
		}

		/* Process finished ERP request. */
		if (cqr->refers) {
2847
			__dasd_process_erp(base, cqr);
2848 2849 2850 2851
			goto restart;
		}

		/* Rechain finished requests to final queue */
2852
		cqr->endclk = get_tod_clock();
2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874
		list_move_tail(&cqr->blocklist, final_queue);
	}
}

static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
{
	dasd_schedule_block_bh(cqr->block);
}

static void __dasd_block_start_head(struct dasd_block *block)
{
	struct dasd_ccw_req *cqr;

	if (list_empty(&block->ccw_queue))
		return;
	/* We allways begin with the first requests on the queue, as some
	 * of previously started requests have to be enqueued on a
	 * dasd_device again for error recovery.
	 */
	list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
		if (cqr->status != DASD_CQR_FILLED)
			continue;
2875 2876 2877 2878 2879 2880 2881
		if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
			cqr->status = DASD_CQR_FAILED;
			cqr->intrc = -EPERM;
			dasd_schedule_block_bh(block);
			continue;
		}
2882 2883 2884 2885 2886
		/* Non-temporary stop condition will trigger fail fast */
		if (block->base->stopped & ~DASD_STOPPED_PENDING &&
		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
		    (!dasd_eer_enabled(block->base))) {
			cqr->status = DASD_CQR_FAILED;
2887
			cqr->intrc = -ENOLINK;
2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910
			dasd_schedule_block_bh(block);
			continue;
		}
		/* Don't try to start requests if device is stopped */
		if (block->base->stopped)
			return;

		/* just a fail safe check, should not happen */
		if (!cqr->startdev)
			cqr->startdev = block->base;

		/* make sure that the requests we submit find their way back */
		cqr->callback = dasd_return_cqr_cb;

		dasd_add_request_tail(cqr);
	}
}

/*
 * Central dasd_block layer routine. Takes requests from the generic
 * block layer request queue, creates ccw requests, enqueues them on
 * a dasd_device and processes ccw requests that have been returned.
 */
2911
static void dasd_block_tasklet(unsigned long data)
2912
{
2913
	struct dasd_block *block = (struct dasd_block *) data;
2914 2915 2916
	struct list_head final_queue;
	struct list_head *l, *n;
	struct dasd_ccw_req *cqr;
S
Stefan Haberland 已提交
2917
	struct dasd_queue *dq;
2918 2919 2920

	atomic_set(&block->tasklet_scheduled, 0);
	INIT_LIST_HEAD(&final_queue);
S
Stefan Haberland 已提交
2921
	spin_lock_irq(&block->queue_lock);
2922 2923
	/* Finish off requests on ccw queue */
	__dasd_process_block_ccw_queue(block, &final_queue);
S
Stefan Haberland 已提交
2924 2925
	spin_unlock_irq(&block->queue_lock);

2926 2927 2928
	/* Now call the callback function of requests with final status */
	list_for_each_safe(l, n, &final_queue) {
		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
S
Stefan Haberland 已提交
2929 2930
		dq = cqr->dq;
		spin_lock_irq(&dq->lock);
2931 2932
		list_del_init(&cqr->blocklist);
		__dasd_cleanup_cqr(cqr);
S
Stefan Haberland 已提交
2933
		spin_unlock_irq(&dq->lock);
2934
	}
S
Stefan Haberland 已提交
2935 2936

	spin_lock_irq(&block->queue_lock);
2937 2938
	/* Now check if the head of the ccw queue needs to be started. */
	__dasd_block_start_head(block);
S
Stefan Haberland 已提交
2939 2940
	spin_unlock_irq(&block->queue_lock);

2941 2942
	if (waitqueue_active(&shutdown_waitq))
		wake_up(&shutdown_waitq);
2943 2944 2945 2946 2947 2948 2949 2950
	dasd_put_device(block->base);
}

static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
{
	wake_up(&dasd_flush_wq);
}

2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961
/*
 * Requeue a request back to the block request queue
 * only works for block requests
 */
static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
{
	struct dasd_block *block = cqr->block;
	struct request *req;

	if (!block)
		return -EINVAL;
S
Stefan Haberland 已提交
2962
	spin_lock_irq(&cqr->dq->lock);
2963
	req = (struct request *) cqr->callback_data;
S
Stefan Haberland 已提交
2964 2965
	blk_mq_requeue_request(req, false);
	spin_unlock_irq(&cqr->dq->lock);
2966 2967 2968 2969

	return 0;
}

2970 2971 2972 2973 2974 2975 2976 2977 2978 2979
/*
 * Go through all request on the dasd_block request queue, cancel them
 * on the respective dasd_device, and return them to the generic
 * block layer.
 */
static int dasd_flush_block_queue(struct dasd_block *block)
{
	struct dasd_ccw_req *cqr, *n;
	int rc, i;
	struct list_head flush_queue;
S
Stefan Haberland 已提交
2980
	unsigned long flags;
2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010

	INIT_LIST_HEAD(&flush_queue);
	spin_lock_bh(&block->queue_lock);
	rc = 0;
restart:
	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
		/* if this request currently owned by a dasd_device cancel it */
		if (cqr->status >= DASD_CQR_QUEUED)
			rc = dasd_cancel_req(cqr);
		if (rc < 0)
			break;
		/* Rechain request (including erp chain) so it won't be
		 * touched by the dasd_block_tasklet anymore.
		 * Replace the callback so we notice when the request
		 * is returned from the dasd_device layer.
		 */
		cqr->callback = _dasd_wake_block_flush_cb;
		for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
			list_move_tail(&cqr->blocklist, &flush_queue);
		if (i > 1)
			/* moved more than one request - need to restart */
			goto restart;
	}
	spin_unlock_bh(&block->queue_lock);
	/* Now call the callback function of flushed requests */
restart_cb:
	list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
		/* Process finished ERP request. */
		if (cqr->refers) {
3011
			spin_lock_bh(&block->queue_lock);
3012
			__dasd_process_erp(block->base, cqr);
3013
			spin_unlock_bh(&block->queue_lock);
3014 3015 3016 3017 3018
			/* restart list_for_xx loop since dasd_process_erp
			 * might remove multiple elements */
			goto restart_cb;
		}
		/* call the callback function */
S
Stefan Haberland 已提交
3019
		spin_lock_irqsave(&cqr->dq->lock, flags);
3020
		cqr->endclk = get_tod_clock();
3021 3022
		list_del_init(&cqr->blocklist);
		__dasd_cleanup_cqr(cqr);
S
Stefan Haberland 已提交
3023
		spin_unlock_irqrestore(&cqr->dq->lock, flags);
L
Linus Torvalds 已提交
3024 3025 3026 3027 3028
	}
	return rc;
}

/*
3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039
 * Schedules a call to dasd_tasklet over the device tasklet.
 */
void dasd_schedule_block_bh(struct dasd_block *block)
{
	/* Protect against rescheduling. */
	if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
		return;
	/* life cycle of block is bound to it's base device */
	dasd_get_device(block->base);
	tasklet_hi_schedule(&block->tasklet);
}
3040
EXPORT_SYMBOL(dasd_schedule_block_bh);
3041 3042 3043 3044 3045


/*
 * SECTION: external block device operations
 * (request queue handling, open, release, etc.)
L
Linus Torvalds 已提交
3046 3047 3048 3049 3050
 */

/*
 * Dasd request queue function. Called from ll_rw_blk.c
 */
S
Stefan Haberland 已提交
3051 3052
static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
				    const struct blk_mq_queue_data *qd)
L
Linus Torvalds 已提交
3053
{
S
Stefan Haberland 已提交
3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068
	struct dasd_block *block = hctx->queue->queuedata;
	struct dasd_queue *dq = hctx->driver_data;
	struct request *req = qd->rq;
	struct dasd_device *basedev;
	struct dasd_ccw_req *cqr;
	blk_status_t rc = BLK_STS_OK;

	basedev = block->base;
	spin_lock_irq(&dq->lock);
	if (basedev->state < DASD_STATE_READY) {
		DBF_DEV_EVENT(DBF_ERR, basedev,
			      "device not ready for request %p", req);
		rc = BLK_STS_IOERR;
		goto out;
	}
L
Linus Torvalds 已提交
3069

S
Stefan Haberland 已提交
3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119
	/*
	 * if device is stopped do not fetch new requests
	 * except failfast is active which will let requests fail
	 * immediately in __dasd_block_start_head()
	 */
	if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
		DBF_DEV_EVENT(DBF_ERR, basedev,
			      "device stopped request %p", req);
		rc = BLK_STS_RESOURCE;
		goto out;
	}

	if (basedev->features & DASD_FEATURE_READONLY &&
	    rq_data_dir(req) == WRITE) {
		DBF_DEV_EVENT(DBF_ERR, basedev,
			      "Rejecting write request %p", req);
		rc = BLK_STS_IOERR;
		goto out;
	}

	if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
	    (basedev->features & DASD_FEATURE_FAILFAST ||
	     blk_noretry_request(req))) {
		DBF_DEV_EVENT(DBF_ERR, basedev,
			      "Rejecting failfast request %p", req);
		rc = BLK_STS_IOERR;
		goto out;
	}

	cqr = basedev->discipline->build_cp(basedev, block, req);
	if (IS_ERR(cqr)) {
		if (PTR_ERR(cqr) == -EBUSY ||
		    PTR_ERR(cqr) == -ENOMEM ||
		    PTR_ERR(cqr) == -EAGAIN) {
			rc = BLK_STS_RESOURCE;
			goto out;
		}
		DBF_DEV_EVENT(DBF_ERR, basedev,
			      "CCW creation failed (rc=%ld) on request %p",
			      PTR_ERR(cqr), req);
		rc = BLK_STS_IOERR;
		goto out;
	}
	/*
	 *  Note: callback is set to dasd_return_cqr_cb in
	 * __dasd_block_start_head to cover erp requests as well
	 */
	cqr->callback_data = req;
	cqr->status = DASD_CQR_FILLED;
	cqr->dq = dq;
3120

S
Stefan Haberland 已提交
3121
	blk_mq_start_request(req);
3122
	spin_lock(&block->queue_lock);
S
Stefan Haberland 已提交
3123 3124 3125 3126
	list_add_tail(&cqr->blocklist, &block->ccw_queue);
	INIT_LIST_HEAD(&cqr->devlist);
	dasd_profile_start(block, cqr, req);
	dasd_schedule_block_bh(block);
3127
	spin_unlock(&block->queue_lock);
S
Stefan Haberland 已提交
3128 3129 3130 3131

out:
	spin_unlock_irq(&dq->lock);
	return rc;
L
Linus Torvalds 已提交
3132 3133
}

3134 3135 3136 3137 3138
/*
 * Block timeout callback, called from the block layer
 *
 * Return values:
 * BLK_EH_RESET_TIMER if the request should be left running
3139
 * BLK_EH_DONE if the request is handled or terminated
3140 3141
 *		      by the driver.
 */
S
Stefan Haberland 已提交
3142
enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
3143 3144 3145
{
	struct dasd_block *block = req->q->queuedata;
	struct dasd_device *device;
3146
	struct dasd_ccw_req *cqr;
S
Stefan Haberland 已提交
3147
	unsigned long flags;
3148 3149
	int rc = 0;

3150
	cqr = blk_mq_rq_to_pdu(req);
3151
	if (!cqr)
3152
		return BLK_EH_DONE;
3153

S
Stefan Haberland 已提交
3154
	spin_lock_irqsave(&cqr->dq->lock, flags);
3155
	device = cqr->startdev ? cqr->startdev : block->base;
S
Stefan Haberland 已提交
3156 3157
	if (!device->blk_timeout) {
		spin_unlock_irqrestore(&cqr->dq->lock, flags);
3158
		return BLK_EH_RESET_TIMER;
S
Stefan Haberland 已提交
3159
	}
3160 3161 3162 3163 3164 3165 3166 3167 3168
	DBF_DEV_EVENT(DBF_WARNING, device,
		      " dasd_times_out cqr %p status %x",
		      cqr, cqr->status);

	spin_lock(&block->queue_lock);
	spin_lock(get_ccwdev_lock(device->cdev));
	cqr->retries = -1;
	cqr->intrc = -ETIMEDOUT;
	if (cqr->status >= DASD_CQR_QUEUED) {
3169
		rc = __dasd_cancel_req(cqr);
3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186
	} else if (cqr->status == DASD_CQR_FILLED ||
		   cqr->status == DASD_CQR_NEED_ERP) {
		cqr->status = DASD_CQR_TERMINATED;
	} else if (cqr->status == DASD_CQR_IN_ERP) {
		struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;

		list_for_each_entry_safe(searchcqr, nextcqr,
					 &block->ccw_queue, blocklist) {
			tmpcqr = searchcqr;
			while (tmpcqr->refers)
				tmpcqr = tmpcqr->refers;
			if (tmpcqr != cqr)
				continue;
			/* searchcqr is an ERP request for cqr */
			searchcqr->retries = -1;
			searchcqr->intrc = -ETIMEDOUT;
			if (searchcqr->status >= DASD_CQR_QUEUED) {
3187
				rc = __dasd_cancel_req(searchcqr);
3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201
			} else if ((searchcqr->status == DASD_CQR_FILLED) ||
				   (searchcqr->status == DASD_CQR_NEED_ERP)) {
				searchcqr->status = DASD_CQR_TERMINATED;
				rc = 0;
			} else if (searchcqr->status == DASD_CQR_IN_ERP) {
				/*
				 * Shouldn't happen; most recent ERP
				 * request is at the front of queue
				 */
				continue;
			}
			break;
		}
	}
3202
	spin_unlock(get_ccwdev_lock(device->cdev));
3203 3204
	dasd_schedule_block_bh(block);
	spin_unlock(&block->queue_lock);
S
Stefan Haberland 已提交
3205
	spin_unlock_irqrestore(&cqr->dq->lock, flags);
3206

3207
	return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
3208 3209
}

S
Stefan Haberland 已提交
3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243
static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
			  unsigned int idx)
{
	struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);

	if (!dq)
		return -ENOMEM;

	spin_lock_init(&dq->lock);
	hctx->driver_data = dq;

	return 0;
}

static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
{
	kfree(hctx->driver_data);
	hctx->driver_data = NULL;
}

static void dasd_request_done(struct request *req)
{
	blk_mq_end_request(req, 0);
	blk_mq_run_hw_queues(req->q, true);
}

static struct blk_mq_ops dasd_mq_ops = {
	.queue_rq = do_dasd_request,
	.complete = dasd_request_done,
	.timeout = dasd_times_out,
	.init_hctx = dasd_init_hctx,
	.exit_hctx = dasd_exit_hctx,
};

L
Linus Torvalds 已提交
3244 3245 3246
/*
 * Allocate and initialize request queue and default I/O scheduler.
 */
3247
static int dasd_alloc_queue(struct dasd_block *block)
L
Linus Torvalds 已提交
3248
{
S
Stefan Haberland 已提交
3249 3250 3251
	int rc;

	block->tag_set.ops = &dasd_mq_ops;
3252
	block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
3253 3254
	block->tag_set.nr_hw_queues = nr_hw_queues;
	block->tag_set.queue_depth = queue_depth;
S
Stefan Haberland 已提交
3255
	block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
3256
	block->tag_set.numa_node = NUMA_NO_NODE;
S
Stefan Haberland 已提交
3257 3258 3259 3260 3261 3262 3263 3264

	rc = blk_mq_alloc_tag_set(&block->tag_set);
	if (rc)
		return rc;

	block->request_queue = blk_mq_init_queue(&block->tag_set);
	if (IS_ERR(block->request_queue))
		return PTR_ERR(block->request_queue);
L
Linus Torvalds 已提交
3265

3266
	block->request_queue->queuedata = block;
L
Linus Torvalds 已提交
3267

3268
	return 0;
L
Linus Torvalds 已提交
3269 3270 3271 3272 3273
}

/*
 * Deactivate and free request queue.
 */
3274
static void dasd_free_queue(struct dasd_block *block)
L
Linus Torvalds 已提交
3275
{
3276 3277
	if (block->request_queue) {
		blk_cleanup_queue(block->request_queue);
S
Stefan Haberland 已提交
3278
		blk_mq_free_tag_set(&block->tag_set);
3279
		block->request_queue = NULL;
L
Linus Torvalds 已提交
3280 3281 3282
	}
}

A
Al Viro 已提交
3283
static int dasd_open(struct block_device *bdev, fmode_t mode)
L
Linus Torvalds 已提交
3284
{
3285
	struct dasd_device *base;
L
Linus Torvalds 已提交
3286 3287
	int rc;

3288 3289
	base = dasd_device_from_gendisk(bdev->bd_disk);
	if (!base)
3290 3291
		return -ENODEV;

3292
	atomic_inc(&base->block->open_count);
3293
	if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
L
Linus Torvalds 已提交
3294 3295 3296 3297
		rc = -ENODEV;
		goto unlock;
	}

3298
	if (!try_module_get(base->discipline->owner)) {
L
Linus Torvalds 已提交
3299 3300 3301 3302 3303
		rc = -EINVAL;
		goto unlock;
	}

	if (dasd_probeonly) {
S
Stefan Haberland 已提交
3304 3305 3306
		dev_info(&base->cdev->dev,
			 "Accessing the DASD failed because it is in "
			 "probeonly mode\n");
L
Linus Torvalds 已提交
3307 3308 3309 3310
		rc = -EPERM;
		goto out;
	}

3311 3312
	if (base->state <= DASD_STATE_BASIC) {
		DBF_DEV_EVENT(DBF_ERR, base, " %s",
L
Linus Torvalds 已提交
3313 3314 3315 3316 3317
			      " Cannot open unrecognized device");
		rc = -ENODEV;
		goto out;
	}

3318 3319 3320 3321 3322 3323 3324
	if ((mode & FMODE_WRITE) &&
	    (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
	     (base->features & DASD_FEATURE_READONLY))) {
		rc = -EROFS;
		goto out;
	}

3325
	dasd_put_device(base);
L
Linus Torvalds 已提交
3326 3327 3328
	return 0;

out:
3329
	module_put(base->discipline->owner);
L
Linus Torvalds 已提交
3330
unlock:
3331 3332
	atomic_dec(&base->block->open_count);
	dasd_put_device(base);
L
Linus Torvalds 已提交
3333 3334 3335
	return rc;
}

3336
static void dasd_release(struct gendisk *disk, fmode_t mode)
L
Linus Torvalds 已提交
3337
{
3338 3339 3340 3341 3342 3343
	struct dasd_device *base = dasd_device_from_gendisk(disk);
	if (base) {
		atomic_dec(&base->block->open_count);
		module_put(base->discipline->owner);
		dasd_put_device(base);
	}
L
Linus Torvalds 已提交
3344 3345
}

3346 3347 3348
/*
 * Return disk geometry.
 */
3349
static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3350
{
3351
	struct dasd_device *base;
3352

3353 3354
	base = dasd_device_from_gendisk(bdev->bd_disk);
	if (!base)
3355 3356
		return -ENODEV;

3357
	if (!base->discipline ||
3358 3359
	    !base->discipline->fill_geometry) {
		dasd_put_device(base);
3360
		return -EINVAL;
3361 3362 3363 3364
	}
	base->discipline->fill_geometry(base->block, geo);
	geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
	dasd_put_device(base);
3365 3366 3367
	return 0;
}

3368
const struct block_device_operations
L
Linus Torvalds 已提交
3369 3370
dasd_device_operations = {
	.owner		= THIS_MODULE,
A
Al Viro 已提交
3371 3372
	.open		= dasd_open,
	.release	= dasd_release,
3373 3374
	.ioctl		= dasd_ioctl,
	.compat_ioctl	= dasd_ioctl,
3375
	.getgeo		= dasd_getgeo,
L
Linus Torvalds 已提交
3376 3377
};

3378 3379 3380
/*******************************************************************************
 * end of block device operations
 */
L
Linus Torvalds 已提交
3381 3382 3383 3384 3385 3386 3387

static void
dasd_exit(void)
{
#ifdef CONFIG_PROC_FS
	dasd_proc_exit();
#endif
3388
	dasd_eer_exit();
3389 3390
	kmem_cache_destroy(dasd_page_cache);
	dasd_page_cache = NULL;
L
Linus Torvalds 已提交
3391 3392 3393 3394 3395 3396
	dasd_gendisk_exit();
	dasd_devmap_exit();
	if (dasd_debug_area != NULL) {
		debug_unregister(dasd_debug_area);
		dasd_debug_area = NULL;
	}
3397
	dasd_statistics_removeroot();
L
Linus Torvalds 已提交
3398 3399 3400 3401 3402 3403
}

/*
 * SECTION: common functions for ccw_driver use
 */

3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431
/*
 * Is the device read-only?
 * Note that this function does not report the setting of the
 * readonly device attribute, but how it is configured in z/VM.
 */
int dasd_device_is_ro(struct dasd_device *device)
{
	struct ccw_dev_id dev_id;
	struct diag210 diag_data;
	int rc;

	if (!MACHINE_IS_VM)
		return 0;
	ccw_device_get_id(device->cdev, &dev_id);
	memset(&diag_data, 0, sizeof(diag_data));
	diag_data.vrdcdvno = dev_id.devno;
	diag_data.vrdclen = sizeof(diag_data);
	rc = diag210(&diag_data);
	if (rc == 0 || rc == 2) {
		return diag_data.vrdcvfla & 0x80;
	} else {
		DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
			  dev_id.devno, rc);
		return 0;
	}
}
EXPORT_SYMBOL_GPL(dasd_device_is_ro);

3432 3433 3434 3435 3436 3437 3438
static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
{
	struct ccw_device *cdev = data;
	int ret;

	ret = ccw_device_set_online(cdev);
	if (ret)
3439 3440
		pr_warn("%s: Setting the DASD online failed with rc=%d\n",
			dev_name(&cdev->dev), ret);
3441 3442
}

3443 3444 3445 3446
/*
 * Initial attempt at a probe function. this can be simplified once
 * the other detection code is gone.
 */
3447 3448
int dasd_generic_probe(struct ccw_device *cdev,
		       struct dasd_discipline *discipline)
L
Linus Torvalds 已提交
3449 3450 3451 3452 3453
{
	int ret;

	ret = dasd_add_sysfs_files(cdev);
	if (ret) {
3454 3455 3456
		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
				"dasd_generic_probe: could not add "
				"sysfs entries");
3457
		return ret;
L
Linus Torvalds 已提交
3458
	}
3459
	cdev->handler = &dasd_int_handler;
L
Linus Torvalds 已提交
3460

3461 3462 3463 3464 3465 3466
	/*
	 * Automatically online either all dasd devices (dasd_autodetect)
	 * or all devices specified with dasd= parameters during
	 * initial probe.
	 */
	if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
3467
	    (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
3468
		async_schedule(dasd_generic_auto_online, cdev);
3469
	return 0;
L
Linus Torvalds 已提交
3470
}
3471
EXPORT_SYMBOL_GPL(dasd_generic_probe);
L
Linus Torvalds 已提交
3472

3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488
void dasd_generic_free_discipline(struct dasd_device *device)
{
	/* Forget the discipline information. */
	if (device->discipline) {
		if (device->discipline->uncheck_device)
			device->discipline->uncheck_device(device);
		module_put(device->discipline->owner);
		device->discipline = NULL;
	}
	if (device->base_discipline) {
		module_put(device->base_discipline->owner);
		device->base_discipline = NULL;
	}
}
EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);

3489 3490 3491 3492
/*
 * This will one day be called from a global not_oper handler.
 * It is also used by driver_unregister during module unload.
 */
3493
void dasd_generic_remove(struct ccw_device *cdev)
L
Linus Torvalds 已提交
3494 3495
{
	struct dasd_device *device;
3496
	struct dasd_block *block;
L
Linus Torvalds 已提交
3497

3498 3499
	cdev->handler = NULL;

L
Linus Torvalds 已提交
3500
	device = dasd_device_from_cdev(cdev);
3501 3502
	if (IS_ERR(device)) {
		dasd_remove_sysfs_files(cdev);
L
Linus Torvalds 已提交
3503
		return;
3504
	}
3505 3506
	if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
L
Linus Torvalds 已提交
3507 3508
		/* Already doing offline processing */
		dasd_put_device(device);
3509
		dasd_remove_sysfs_files(cdev);
L
Linus Torvalds 已提交
3510 3511 3512 3513 3514 3515 3516 3517 3518
		return;
	}
	/*
	 * This device is removed unconditionally. Set offline
	 * flag to prevent dasd_open from opening it while it is
	 * no quite down yet.
	 */
	dasd_set_target_state(device, DASD_STATE_NEW);
	/* dasd_delete_device destroys the device reference. */
3519
	block = device->block;
L
Linus Torvalds 已提交
3520
	dasd_delete_device(device);
3521 3522 3523 3524 3525 3526
	/*
	 * life cycle of block is bound to device, so delete it after
	 * device was safely removed
	 */
	if (block)
		dasd_free_block(block);
3527 3528

	dasd_remove_sysfs_files(cdev);
L
Linus Torvalds 已提交
3529
}
3530
EXPORT_SYMBOL_GPL(dasd_generic_remove);
L
Linus Torvalds 已提交
3531

3532 3533
/*
 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
L
Linus Torvalds 已提交
3534
 * the device is detected for the first time and is supposed to be used
3535 3536
 * or the user has started activation through sysfs.
 */
3537 3538
int dasd_generic_set_online(struct ccw_device *cdev,
			    struct dasd_discipline *base_discipline)
L
Linus Torvalds 已提交
3539
{
3540
	struct dasd_discipline *discipline;
L
Linus Torvalds 已提交
3541
	struct dasd_device *device;
3542
	int rc;
3543

3544 3545
	/* first online clears initial online feature flag */
	dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
L
Linus Torvalds 已提交
3546 3547 3548 3549
	device = dasd_create_device(cdev);
	if (IS_ERR(device))
		return PTR_ERR(device);

3550
	discipline = base_discipline;
3551
	if (device->features & DASD_FEATURE_USEDIAG) {
L
Linus Torvalds 已提交
3552
	  	if (!dasd_diag_discipline_pointer) {
3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567
			/* Try to load the required module. */
			rc = request_module(DASD_DIAG_MOD);
			if (rc) {
				pr_warn("%s Setting the DASD online failed "
					"because the required module %s "
					"could not be loaded (rc=%d)\n",
					dev_name(&cdev->dev), DASD_DIAG_MOD,
					rc);
				dasd_delete_device(device);
				return -ENODEV;
			}
		}
		/* Module init could have failed, so check again here after
		 * request_module(). */
		if (!dasd_diag_discipline_pointer) {
3568 3569
			pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
				dev_name(&cdev->dev));
L
Linus Torvalds 已提交
3570 3571 3572 3573 3574
			dasd_delete_device(device);
			return -ENODEV;
		}
		discipline = dasd_diag_discipline_pointer;
	}
3575 3576 3577 3578 3579 3580 3581 3582 3583 3584
	if (!try_module_get(base_discipline->owner)) {
		dasd_delete_device(device);
		return -EINVAL;
	}
	if (!try_module_get(discipline->owner)) {
		module_put(base_discipline->owner);
		dasd_delete_device(device);
		return -EINVAL;
	}
	device->base_discipline = base_discipline;
L
Linus Torvalds 已提交
3585 3586
	device->discipline = discipline;

3587
	/* check_device will allocate block device if necessary */
L
Linus Torvalds 已提交
3588 3589
	rc = discipline->check_device(device);
	if (rc) {
3590 3591
		pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
			dev_name(&cdev->dev), discipline->name, rc);
3592 3593
		module_put(discipline->owner);
		module_put(base_discipline->owner);
L
Linus Torvalds 已提交
3594 3595 3596 3597 3598 3599
		dasd_delete_device(device);
		return rc;
	}

	dasd_set_target_state(device, DASD_STATE_ONLINE);
	if (device->state <= DASD_STATE_KNOWN) {
3600 3601
		pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
			dev_name(&cdev->dev));
L
Linus Torvalds 已提交
3602 3603
		rc = -ENODEV;
		dasd_set_target_state(device, DASD_STATE_NEW);
3604 3605
		if (device->block)
			dasd_free_block(device->block);
L
Linus Torvalds 已提交
3606 3607 3608
		dasd_delete_device(device);
	} else
		pr_debug("dasd_generic device %s found\n",
3609
				dev_name(&cdev->dev));
S
Stefan Haberland 已提交
3610 3611 3612

	wait_event(dasd_init_waitq, _wait_for_device(device));

L
Linus Torvalds 已提交
3613 3614 3615
	dasd_put_device(device);
	return rc;
}
3616
EXPORT_SYMBOL_GPL(dasd_generic_set_online);
L
Linus Torvalds 已提交
3617

3618
int dasd_generic_set_offline(struct ccw_device *cdev)
L
Linus Torvalds 已提交
3619 3620
{
	struct dasd_device *device;
3621
	struct dasd_block *block;
3622
	int max_count, open_count, rc;
3623
	unsigned long flags;
L
Linus Torvalds 已提交
3624

3625
	rc = 0;
3626 3627 3628 3629
	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
	device = dasd_device_from_cdev_locked(cdev);
	if (IS_ERR(device)) {
		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
L
Linus Torvalds 已提交
3630
		return PTR_ERR(device);
3631
	}
3632

L
Linus Torvalds 已提交
3633 3634 3635 3636 3637 3638
	/*
	 * We must make sure that this device is currently not in use.
	 * The open_count is increased for every opener, that includes
	 * the blkdev_get in dasd_scan_partitions. We are only interested
	 * in the other openers.
	 */
3639
	if (device->block) {
3640 3641
		max_count = device->block->bdev ? 0 : -1;
		open_count = atomic_read(&device->block->open_count);
3642 3643
		if (open_count > max_count) {
			if (open_count > 0)
3644 3645
				pr_warn("%s: The DASD cannot be set offline with open count %i\n",
					dev_name(&cdev->dev), open_count);
3646
			else
3647 3648
				pr_warn("%s: The DASD cannot be set offline while it is in use\n",
					dev_name(&cdev->dev));
3649 3650
			rc = -EBUSY;
			goto out_err;
3651
		}
L
Linus Torvalds 已提交
3652
	}
3653

3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667
	/*
	 * Test if the offline processing is already running and exit if so.
	 * If a safe offline is being processed this could only be a normal
	 * offline that should be able to overtake the safe offline and
	 * cancel any I/O we do not want to wait for any longer
	 */
	if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
		if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
			clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
				  &device->flags);
		} else {
			rc = -EBUSY;
			goto out_err;
		}
3668 3669
	}
	set_bit(DASD_FLAG_OFFLINE, &device->flags);
3670 3671

	/*
3672
	 * if safe_offline is called set safe_offline_running flag and
3673 3674 3675 3676 3677
	 * clear safe_offline so that a call to normal offline
	 * can overrun safe_offline processing
	 */
	if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
	    !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3678 3679
		/* need to unlock here to wait for outstanding I/O */
		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3680 3681 3682 3683 3684 3685
		/*
		 * If we want to set the device safe offline all IO operations
		 * should be finished before continuing the offline process
		 * so sync bdev first and then wait for our queues to become
		 * empty
		 */
3686 3687 3688 3689 3690
		if (device->block) {
			rc = fsync_bdev(device->block->bdev);
			if (rc != 0)
				goto interrupted;
		}
3691 3692 3693 3694 3695
		dasd_schedule_device_bh(device);
		rc = wait_event_interruptible(shutdown_waitq,
					      _wait_for_empty_queues(device));
		if (rc != 0)
			goto interrupted;
3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709

		/*
		 * check if a normal offline process overtook the offline
		 * processing in this case simply do nothing beside returning
		 * that we got interrupted
		 * otherwise mark safe offline as not running any longer and
		 * continue with normal offline
		 */
		spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
		if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
			rc = -ERESTARTSYS;
			goto out_err;
		}
		clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3710
	}
3711
	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3712

L
Linus Torvalds 已提交
3713 3714
	dasd_set_target_state(device, DASD_STATE_NEW);
	/* dasd_delete_device destroys the device reference. */
3715
	block = device->block;
L
Linus Torvalds 已提交
3716
	dasd_delete_device(device);
3717 3718 3719 3720 3721 3722
	/*
	 * life cycle of block is bound to device, so delete it after
	 * device was safely removed
	 */
	if (block)
		dasd_free_block(block);
3723

L
Linus Torvalds 已提交
3724
	return 0;
3725 3726 3727

interrupted:
	/* interrupted by signal */
3728
	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3729 3730
	clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
	clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3731
out_err:
3732 3733
	dasd_put_device(device);
	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3734
	return rc;
L
Linus Torvalds 已提交
3735
}
3736
EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
L
Linus Torvalds 已提交
3737

3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775
int dasd_generic_last_path_gone(struct dasd_device *device)
{
	struct dasd_ccw_req *cqr;

	dev_warn(&device->cdev->dev, "No operational channel path is left "
		 "for the device\n");
	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
	/* First of all call extended error reporting. */
	dasd_eer_write(device, NULL, DASD_EER_NOPATH);

	if (device->state < DASD_STATE_BASIC)
		return 0;
	/* Device is active. We want to keep it. */
	list_for_each_entry(cqr, &device->ccw_queue, devlist)
		if ((cqr->status == DASD_CQR_IN_IO) ||
		    (cqr->status == DASD_CQR_CLEAR_PENDING)) {
			cqr->status = DASD_CQR_QUEUED;
			cqr->retries++;
		}
	dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
	dasd_device_clear_timer(device);
	dasd_schedule_device_bh(device);
	return 1;
}
EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);

int dasd_generic_path_operational(struct dasd_device *device)
{
	dev_info(&device->cdev->dev, "A channel path to the device has become "
		 "operational\n");
	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
	dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
	if (device->stopped & DASD_UNRESUMED_PM) {
		dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
		dasd_restore_device(device);
		return 1;
	}
	dasd_schedule_device_bh(device);
S
Stefan Haberland 已提交
3776
	if (device->block) {
3777
		dasd_schedule_block_bh(device->block);
3778 3779 3780
		if (device->block->request_queue)
			blk_mq_run_hw_queues(device->block->request_queue,
					     true);
S
Stefan Haberland 已提交
3781
		}
3782 3783 3784 3785

	if (!device->stopped)
		wake_up(&generic_waitq);

3786 3787 3788 3789
	return 1;
}
EXPORT_SYMBOL_GPL(dasd_generic_path_operational);

3790
int dasd_generic_notify(struct ccw_device *cdev, int event)
L
Linus Torvalds 已提交
3791 3792 3793 3794
{
	struct dasd_device *device;
	int ret;

3795
	device = dasd_device_from_cdev_locked(cdev);
L
Linus Torvalds 已提交
3796 3797 3798 3799 3800
	if (IS_ERR(device))
		return 0;
	ret = 0;
	switch (event) {
	case CIO_GONE:
3801
	case CIO_BOXED:
L
Linus Torvalds 已提交
3802
	case CIO_NO_PATH:
3803
		dasd_path_no_path(device);
3804
		ret = dasd_generic_last_path_gone(device);
L
Linus Torvalds 已提交
3805 3806 3807
		break;
	case CIO_OPER:
		ret = 1;
3808
		if (dasd_path_get_opm(device))
3809
			ret = dasd_generic_path_operational(device);
L
Linus Torvalds 已提交
3810 3811 3812 3813 3814
		break;
	}
	dasd_put_device(device);
	return ret;
}
3815
EXPORT_SYMBOL_GPL(dasd_generic_notify);
L
Linus Torvalds 已提交
3816

3817 3818 3819
void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
{
	struct dasd_device *device;
3820
	int chp, oldopm, hpfpm, ifccpm;
3821 3822 3823 3824

	device = dasd_device_from_cdev_locked(cdev);
	if (IS_ERR(device))
		return;
3825 3826

	oldopm = dasd_path_get_opm(device);
3827 3828
	for (chp = 0; chp < 8; chp++) {
		if (path_event[chp] & PE_PATH_GONE) {
3829
			dasd_path_notoper(device, chp);
3830 3831
		}
		if (path_event[chp] & PE_PATH_AVAILABLE) {
3832
			dasd_path_available(device, chp);
3833 3834
			dasd_schedule_device_bh(device);
		}
3835
		if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
3836 3837
			if (!dasd_path_is_operational(device, chp) &&
			    !dasd_path_need_verify(device, chp)) {
S
Stefan Haberland 已提交
3838 3839 3840 3841 3842
				/*
				 * we can not establish a pathgroup on an
				 * unavailable path, so trigger a path
				 * verification first
				 */
3843 3844
			dasd_path_available(device, chp);
			dasd_schedule_device_bh(device);
S
Stefan Haberland 已提交
3845
			}
3846 3847 3848 3849 3850
			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
				      "Pathgroup re-established\n");
			if (device->discipline->kick_validate)
				device->discipline->kick_validate(device);
		}
3851
	}
3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875
	hpfpm = dasd_path_get_hpfpm(device);
	ifccpm = dasd_path_get_ifccpm(device);
	if (!dasd_path_get_opm(device) && hpfpm) {
		/*
		 * device has no operational paths but at least one path is
		 * disabled due to HPF errors
		 * disable HPF at all and use the path(s) again
		 */
		if (device->discipline->disable_hpf)
			device->discipline->disable_hpf(device);
		dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
		dasd_path_set_tbvpm(device, hpfpm);
		dasd_schedule_device_bh(device);
		dasd_schedule_requeue(device);
	} else if (!dasd_path_get_opm(device) && ifccpm) {
		/*
		 * device has no operational paths but at least one path is
		 * disabled due to IFCC errors
		 * trigger path verification on paths with IFCC errors
		 */
		dasd_path_set_tbvpm(device, ifccpm);
		dasd_schedule_device_bh(device);
	}
	if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
3876 3877 3878 3879 3880 3881 3882 3883
		dev_warn(&device->cdev->dev,
			 "No verified channel paths remain for the device\n");
		DBF_DEV_EVENT(DBF_WARNING, device,
			      "%s", "last verified path gone");
		dasd_eer_write(device, NULL, DASD_EER_NOPATH);
		dasd_device_set_stop_bits(device,
					  DASD_STOPPED_DC_WAIT);
	}
3884 3885 3886 3887 3888 3889
	dasd_put_device(device);
}
EXPORT_SYMBOL_GPL(dasd_generic_path_event);

int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
{
3890 3891
	if (!dasd_path_get_opm(device) && lpm) {
		dasd_path_set_opm(device, lpm);
3892 3893
		dasd_generic_path_operational(device);
	} else
3894
		dasd_path_add_opm(device, lpm);
3895 3896 3897 3898
	return 0;
}
EXPORT_SYMBOL_GPL(dasd_generic_verify_path);

3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935
void dasd_generic_space_exhaust(struct dasd_device *device,
				struct dasd_ccw_req *cqr)
{
	dasd_eer_write(device, NULL, DASD_EER_NOSPC);

	if (device->state < DASD_STATE_BASIC)
		return;

	if (cqr->status == DASD_CQR_IN_IO ||
	    cqr->status == DASD_CQR_CLEAR_PENDING) {
		cqr->status = DASD_CQR_QUEUED;
		cqr->retries++;
	}
	dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC);
	dasd_device_clear_timer(device);
	dasd_schedule_device_bh(device);
}
EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust);

void dasd_generic_space_avail(struct dasd_device *device)
{
	dev_info(&device->cdev->dev, "Extent pool space is available\n");
	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available");

	dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC);
	dasd_schedule_device_bh(device);

	if (device->block) {
		dasd_schedule_block_bh(device->block);
		if (device->block->request_queue)
			blk_mq_run_hw_queues(device->block->request_queue, true);
	}
	if (!device->stopped)
		wake_up(&generic_waitq);
}
EXPORT_SYMBOL_GPL(dasd_generic_space_avail);

3936 3937 3938 3939
/*
 * clear active requests and requeue them to block layer if possible
 */
static int dasd_generic_requeue_all_requests(struct dasd_device *device)
3940
{
3941
	struct list_head requeue_queue;
3942
	struct dasd_ccw_req *cqr, *n;
3943
	struct dasd_ccw_req *refers;
3944 3945
	int rc;

3946 3947
	INIT_LIST_HEAD(&requeue_queue);
	spin_lock_irq(get_ccwdev_lock(device->cdev));
3948 3949 3950 3951 3952 3953 3954 3955 3956 3957
	rc = 0;
	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
		/* Check status and move request to flush_queue */
		if (cqr->status == DASD_CQR_IN_IO) {
			rc = device->discipline->term_IO(cqr);
			if (rc) {
				/* unable to terminate requeust */
				dev_err(&device->cdev->dev,
					"Unable to terminate request %p "
					"on suspend\n", cqr);
3958
				spin_unlock_irq(get_ccwdev_lock(device->cdev));
3959 3960 3961 3962
				dasd_put_device(device);
				return rc;
			}
		}
3963
		list_move_tail(&cqr->devlist, &requeue_queue);
3964
	}
3965
	spin_unlock_irq(get_ccwdev_lock(device->cdev));
3966

3967
	list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
3968 3969
		wait_event(dasd_flush_wq,
			   (cqr->status != DASD_CQR_CLEAR_PENDING));
3970

3971 3972 3973 3974 3975 3976
		/*
		 * requeue requests to blocklayer will only work
		 * for block device requests
		 */
		if (_dasd_requeue_request(cqr))
			continue;
3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987

		/* remove requests from device and block queue */
		list_del_init(&cqr->devlist);
		while (cqr->refers != NULL) {
			refers = cqr->refers;
			/* remove the request from the block queue */
			list_del(&cqr->blocklist);
			/* free the finished erp request */
			dasd_free_erp_request(cqr, cqr->memdev);
			cqr = refers;
		}
3988

3989 3990 3991 3992 3993 3994 3995
		/*
		 * _dasd_requeue_request already checked for a valid
		 * blockdevice, no need to check again
		 * all erp requests (cqr->refers) have a cqr->block
		 * pointer copy from the original cqr
		 */
		list_del_init(&cqr->blocklist);
3996 3997
		cqr->block->base->discipline->free_cp(
			cqr, (struct request *) cqr->callback_data);
3998 3999
	}

4000 4001 4002 4003
	/*
	 * if requests remain then they are internal request
	 * and go back to the device queue
	 */
4004
	if (!list_empty(&requeue_queue)) {
4005
		/* move freeze_queue to start of the ccw_queue */
4006 4007 4008
		spin_lock_irq(get_ccwdev_lock(device->cdev));
		list_splice_tail(&requeue_queue, &device->ccw_queue);
		spin_unlock_irq(get_ccwdev_lock(device->cdev));
4009
	}
4010
	dasd_schedule_device_bh(device);
4011 4012
	return rc;
}
4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044

static void do_requeue_requests(struct work_struct *work)
{
	struct dasd_device *device = container_of(work, struct dasd_device,
						  requeue_requests);
	dasd_generic_requeue_all_requests(device);
	dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
	if (device->block)
		dasd_schedule_block_bh(device->block);
	dasd_put_device(device);
}

void dasd_schedule_requeue(struct dasd_device *device)
{
	dasd_get_device(device);
	/* queue call to dasd_reload_device to the kernel event daemon. */
	if (!schedule_work(&device->requeue_requests))
		dasd_put_device(device);
}
EXPORT_SYMBOL(dasd_schedule_requeue);

int dasd_generic_pm_freeze(struct ccw_device *cdev)
{
	struct dasd_device *device = dasd_device_from_cdev(cdev);

	if (IS_ERR(device))
		return PTR_ERR(device);

	/* mark device as suspended */
	set_bit(DASD_FLAG_SUSPENDED, &device->flags);

	if (device->discipline->freeze)
S
Sebastian Ott 已提交
4045
		device->discipline->freeze(device);
4046 4047 4048 4049 4050 4051

	/* disallow new I/O  */
	dasd_device_set_stop_bits(device, DASD_STOPPED_PM);

	return dasd_generic_requeue_all_requests(device);
}
4052 4053 4054 4055 4056 4057 4058 4059 4060 4061
EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);

int dasd_generic_restore_device(struct ccw_device *cdev)
{
	struct dasd_device *device = dasd_device_from_cdev(cdev);
	int rc = 0;

	if (IS_ERR(device))
		return PTR_ERR(device);

4062
	/* allow new IO again */
4063 4064
	dasd_device_remove_stop_bits(device,
				     (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
4065

4066 4067
	dasd_schedule_device_bh(device);

4068 4069 4070 4071 4072
	/*
	 * call discipline restore function
	 * if device is stopped do nothing e.g. for disconnected devices
	 */
	if (device->discipline->restore && !(device->stopped))
4073
		rc = device->discipline->restore(device);
4074
	if (rc || device->stopped)
4075 4076 4077 4078 4079
		/*
		 * if the resume failed for the DASD we put it in
		 * an UNRESUMED stop state
		 */
		device->stopped |= DASD_UNRESUMED_PM;
4080

S
Stefan Haberland 已提交
4081
	if (device->block) {
4082
		dasd_schedule_block_bh(device->block);
4083 4084 4085
		if (device->block->request_queue)
			blk_mq_run_hw_queues(device->block->request_queue,
					     true);
S
Stefan Haberland 已提交
4086
	}
4087

4088
	clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
4089
	dasd_put_device(device);
4090
	return 0;
4091 4092 4093
}
EXPORT_SYMBOL_GPL(dasd_generic_restore_device);

H
Heiko Carstens 已提交
4094 4095
static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
						   int rdc_buffer_size,
4096
						   int magic)
4097 4098 4099 4100
{
	struct dasd_ccw_req *cqr;
	struct ccw1 *ccw;

4101 4102
	cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
				   NULL);
4103 4104

	if (IS_ERR(cqr)) {
S
Stefan Haberland 已提交
4105 4106 4107 4108
		/* internal error 13 - Allocating the RDC request failed*/
		dev_err(&device->cdev->dev,
			 "An error occurred in the DASD device driver, "
			 "reason=%s\n", "13");
4109 4110 4111 4112 4113
		return cqr;
	}

	ccw = cqr->cpaddr;
	ccw->cmd_code = CCW_CMD_RDC;
4114 4115
	ccw->cda = (__u32)(addr_t) cqr->data;
	ccw->flags = 0;
4116
	ccw->count = rdc_buffer_size;
4117 4118
	cqr->startdev = device;
	cqr->memdev = device;
4119
	cqr->expires = 10*HZ;
4120
	cqr->retries = 256;
4121
	cqr->buildclk = get_tod_clock();
4122 4123 4124 4125 4126
	cqr->status = DASD_CQR_FILLED;
	return cqr;
}


4127
int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
4128
				void *rdc_buffer, int rdc_buffer_size)
4129 4130 4131 4132
{
	int ret;
	struct dasd_ccw_req *cqr;

4133
	cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
4134 4135 4136 4137
	if (IS_ERR(cqr))
		return PTR_ERR(cqr);

	ret = dasd_sleep_on(cqr);
4138 4139
	if (ret == 0)
		memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
4140
	dasd_sfree_request(cqr, cqr->memdev);
4141 4142
	return ret;
}
C
Cornelia Huck 已提交
4143
EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
4144

4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178
/*
 *   In command mode and transport mode we need to look for sense
 *   data in different places. The sense data itself is allways
 *   an array of 32 bytes, so we can unify the sense data access
 *   for both modes.
 */
char *dasd_get_sense(struct irb *irb)
{
	struct tsb *tsb = NULL;
	char *sense = NULL;

	if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
		if (irb->scsw.tm.tcw)
			tsb = tcw_get_tsb((struct tcw *)(unsigned long)
					  irb->scsw.tm.tcw);
		if (tsb && tsb->length == 64 && tsb->flags)
			switch (tsb->flags & 0x07) {
			case 1:	/* tsa_iostat */
				sense = tsb->tsa.iostat.sense;
				break;
			case 2: /* tsa_ddpc */
				sense = tsb->tsa.ddpc.sense;
				break;
			default:
				/* currently we don't use interrogate data */
				break;
			}
	} else if (irb->esw.esw0.erw.cons) {
		sense = irb->ecw;
	}
	return sense;
}
EXPORT_SYMBOL_GPL(dasd_get_sense);

4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195
void dasd_generic_shutdown(struct ccw_device *cdev)
{
	struct dasd_device *device;

	device = dasd_device_from_cdev(cdev);
	if (IS_ERR(device))
		return;

	if (device->block)
		dasd_schedule_block_bh(device->block);

	dasd_schedule_device_bh(device);

	wait_event(shutdown_waitq, _wait_for_empty_queues(device));
}
EXPORT_SYMBOL_GPL(dasd_generic_shutdown);

4196
static int __init dasd_init(void)
L
Linus Torvalds 已提交
4197 4198 4199 4200
{
	int rc;

	init_waitqueue_head(&dasd_init_waitq);
4201
	init_waitqueue_head(&dasd_flush_wq);
4202
	init_waitqueue_head(&generic_waitq);
4203
	init_waitqueue_head(&shutdown_waitq);
L
Linus Torvalds 已提交
4204 4205

	/* register 'common' DASD debug area, used for all DBF_XXX calls */
4206
	dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
L
Linus Torvalds 已提交
4207 4208 4209 4210 4211
	if (dasd_debug_area == NULL) {
		rc = -ENOMEM;
		goto failed;
	}
	debug_register_view(dasd_debug_area, &debug_sprintf_view);
H
Horst Hummel 已提交
4212
	debug_set_level(dasd_debug_area, DBF_WARNING);
L
Linus Torvalds 已提交
4213 4214 4215 4216 4217

	DBF_EVENT(DBF_EMERG, "%s", "debug area created");

	dasd_diag_discipline_pointer = NULL;

4218 4219
	dasd_statistics_createroot();

L
Linus Torvalds 已提交
4220 4221 4222 4223 4224 4225 4226 4227 4228
	rc = dasd_devmap_init();
	if (rc)
		goto failed;
	rc = dasd_gendisk_init();
	if (rc)
		goto failed;
	rc = dasd_parse();
	if (rc)
		goto failed;
4229 4230 4231
	rc = dasd_eer_init();
	if (rc)
		goto failed;
L
Linus Torvalds 已提交
4232 4233 4234 4235 4236 4237 4238 4239
#ifdef CONFIG_PROC_FS
	rc = dasd_proc_init();
	if (rc)
		goto failed;
#endif

	return 0;
failed:
S
Stefan Haberland 已提交
4240
	pr_info("The DASD device driver could not be initialized\n");
L
Linus Torvalds 已提交
4241 4242 4243 4244 4245 4246
	dasd_exit();
	return rc;
}

module_init(dasd_init);
module_exit(dasd_exit);