vmur.c 24.2 KB
Newer Older
1 2 3 4
/*
 * Linux driver for System z and s390 unit record devices
 * (z/VM virtual punch, reader, printer)
 *
5
 * Copyright IBM Corp. 2001, 2009
6 7 8 9 10
 * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
 *	    Michael Holzheu <holzheu@de.ibm.com>
 *	    Frank Munzert <munzert@de.ibm.com>
 */

11 12 13
#define KMSG_COMPONENT "vmur"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

14
#include <linux/kernel_stat.h>
15
#include <linux/cdev.h>
16
#include <linux/slab.h>
17 18 19 20 21

#include <asm/uaccess.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/debug.h>
22
#include <asm/diag.h>
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63

#include "vmur.h"

/*
 * Driver overview
 *
 * Unit record device support is implemented as a character device driver.
 * We can fit at least 16 bits into a device minor number and use the
 * simple method of mapping a character device number with minor abcd
 * to the unit record device with devno abcd.
 * I/O to virtual unit record devices is handled as follows:
 * Reads: Diagnose code 0x14 (input spool file manipulation)
 * is used to read spool data page-wise.
 * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length
 * is available by reading sysfs attr reclen. Each write() to the device
 * must specify an integral multiple (maximal 511) of reclen.
 */

static char ur_banner[] = "z/VM virtual unit record device driver";

MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
MODULE_LICENSE("GPL");

static dev_t ur_first_dev_maj_min;
static struct class *vmur_class;
static struct debug_info *vmur_dbf;

/* We put the device's record length (for writes) in the driver_info field */
static struct ccw_device_id ur_ids[] = {
	{ CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) },
	{ CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) },
	{ /* end of list */ }
};

MODULE_DEVICE_TABLE(ccw, ur_ids);

static int ur_probe(struct ccw_device *cdev);
static void ur_remove(struct ccw_device *cdev);
static int ur_set_online(struct ccw_device *cdev);
static int ur_set_offline(struct ccw_device *cdev);
64
static int ur_pm_suspend(struct ccw_device *cdev);
65 66

static struct ccw_driver ur_driver = {
67 68 69 70
	.driver = {
		.name	= "vmur",
		.owner	= THIS_MODULE,
	},
71 72 73 74 75
	.ids		= ur_ids,
	.probe		= ur_probe,
	.remove		= ur_remove,
	.set_online	= ur_set_online,
	.set_offline	= ur_set_offline,
76
	.freeze		= ur_pm_suspend,
77 78
};

79 80
static DEFINE_MUTEX(vmur_mutex);

81 82
/*
 * Allocation, freeing, getting and putting of urdev structures
83 84 85
 *
 * Each ur device (urd) contains a reference to its corresponding ccw device
 * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
86
 * ur device using dev_get_drvdata(&cdev->dev) pointer.
87 88 89
 *
 * urd references:
 * - ur_probe gets a urd reference, ur_remove drops the reference
90
 *   dev_get_drvdata(&cdev->dev)
91 92 93 94 95 96 97
 * - ur_open gets a urd reference, ur_relase drops the reference
 *   (urf->urd)
 *
 * cdev references:
 * - urdev_alloc get a cdev reference (urd->cdev)
 * - urdev_free drops the cdev reference (urd->cdev)
 *
98
 * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock
99 100 101 102 103 104 105 106 107 108 109
 */
static struct urdev *urdev_alloc(struct ccw_device *cdev)
{
	struct urdev *urd;

	urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
	if (!urd)
		return NULL;
	urd->reclen = cdev->id.driver_info;
	ccw_device_get_id(cdev, &urd->dev_id);
	mutex_init(&urd->io_mutex);
110 111
	init_waitqueue_head(&urd->wait);
	spin_lock_init(&urd->open_lock);
112 113 114
	atomic_set(&urd->ref_count,  1);
	urd->cdev = cdev;
	get_device(&cdev->dev);
115 116 117 118 119
	return urd;
}

static void urdev_free(struct urdev *urd)
{
120 121 122
	TRACE("urdev_free: %p\n", urd);
	if (urd->cdev)
		put_device(&urd->cdev->dev);
123 124 125
	kfree(urd);
}

126 127 128 129 130 131 132 133 134 135 136
static void urdev_get(struct urdev *urd)
{
	atomic_inc(&urd->ref_count);
}

static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
{
	struct urdev *urd;
	unsigned long flags;

	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
137
	urd = dev_get_drvdata(&cdev->dev);
138 139 140 141 142 143
	if (urd)
		urdev_get(urd);
	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
	return urd;
}

144 145 146 147
static struct urdev *urdev_get_from_devno(u16 devno)
{
	char bus_id[16];
	struct ccw_device *cdev;
148
	struct urdev *urd;
149 150 151 152 153

	sprintf(bus_id, "0.0.%04x", devno);
	cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
	if (!cdev)
		return NULL;
154 155 156
	urd = urdev_get_from_cdev(cdev);
	put_device(&cdev->dev);
	return urd;
157 158 159 160
}

static void urdev_put(struct urdev *urd)
{
161 162
	if (atomic_dec_and_test(&urd->ref_count))
		urdev_free(urd);
163 164
}

165 166 167 168 169 170 171 172 173 174 175
/*
 * State and contents of ur devices can be changed by class D users issuing
 * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended.
 * Also the Linux guest might be logged off, which causes all active spool
 * files to be closed.
 * So we cannot guarantee that spool files are still the same when the Linux
 * guest is resumed. In order to avoid unpredictable results at resume time
 * we simply refuse to suspend if a ur device node is open.
 */
static int ur_pm_suspend(struct ccw_device *cdev)
{
M
Martin Schwidefsky 已提交
176
	struct urdev *urd = dev_get_drvdata(&cdev->dev);
177 178 179 180 181 182 183 184 185 186

	TRACE("ur_pm_suspend: cdev=%p\n", cdev);
	if (urd->open_flag) {
		pr_err("Unit record device %s is busy, %s refusing to "
		       "suspend.\n", dev_name(&cdev->dev), ur_banner);
		return -EBUSY;
	}
	return 0;
}

187 188 189
/*
 * Low-level functions to do I/O to a ur device.
 *     alloc_chan_prog
190
 *     free_chan_prog
191 192 193 194
 *     do_ur_io
 *     ur_int_handler
 *
 * alloc_chan_prog allocates and builds the channel program
195
 * free_chan_prog frees memory of the channel program
196 197 198 199 200 201 202 203 204 205 206 207 208 209
 *
 * do_ur_io issues the channel program to the device and blocks waiting
 * on a completion event it publishes at urd->io_done. The function
 * serialises itself on the device's mutex so that only one I/O
 * is issued at a time (and that I/O is synchronous).
 *
 * ur_int_handler catches the "I/O done" interrupt, writes the
 * subchannel status word into the scsw member of the urdev structure
 * and complete()s the io_done to wake the waiting do_ur_io.
 *
 * The caller of do_ur_io is responsible for kfree()ing the channel program
 * address pointer that alloc_chan_prog returned.
 */

210 211 212 213 214 215 216 217 218 219
static void free_chan_prog(struct ccw1 *cpa)
{
	struct ccw1 *ptr = cpa;

	while (ptr->cda) {
		kfree((void *)(addr_t) ptr->cda);
		ptr++;
	}
	kfree(cpa);
}
220 221 222 223 224 225 226

/*
 * alloc_chan_prog
 * The channel program we use is write commands chained together
 * with a final NOP CCW command-chained on (which ensures that CE and DE
 * are presented together in a single interrupt instead of as separate
 * interrupts unless an incorrect length indication kicks in first). The
227
 * data length in each CCW is reclen.
228
 */
229 230
static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
				    int reclen)
231 232
{
	struct ccw1 *cpa;
233
	void *kbuf;
234 235
	int i;

236
	TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
237 238 239 240 241 242

	/*
	 * We chain a NOP onto the writes to force CE+DE together.
	 * That means we allocate room for CCWs to cover count/reclen
	 * records plus a NOP.
	 */
243 244
	cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1),
		      GFP_KERNEL | GFP_DMA);
245
	if (!cpa)
246
		return ERR_PTR(-ENOMEM);
247

248
	for (i = 0; i < rec_count; i++) {
249 250 251
		cpa[i].cmd_code = WRITE_CCW_CMD;
		cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
		cpa[i].count = reclen;
252 253 254 255 256 257 258 259 260 261 262
		kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
		if (!kbuf) {
			free_chan_prog(cpa);
			return ERR_PTR(-ENOMEM);
		}
		cpa[i].cda = (u32)(addr_t) kbuf;
		if (copy_from_user(kbuf, ubuf, reclen)) {
			free_chan_prog(cpa);
			return ERR_PTR(-EFAULT);
		}
		ubuf += reclen;
263 264 265 266 267 268 269 270 271 272
	}
	/* The following NOP CCW forces CE+DE to be presented together */
	cpa[i].cmd_code = CCW_CMD_NOOP;
	return cpa;
}

static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
{
	int rc;
	struct ccw_device *cdev = urd->cdev;
273
	DECLARE_COMPLETION_ONSTACK(event);
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307

	TRACE("do_ur_io: cpa=%p\n", cpa);

	rc = mutex_lock_interruptible(&urd->io_mutex);
	if (rc)
		return rc;

	urd->io_done = &event;

	spin_lock_irq(get_ccwdev_lock(cdev));
	rc = ccw_device_start(cdev, cpa, 1, 0, 0);
	spin_unlock_irq(get_ccwdev_lock(cdev));

	TRACE("do_ur_io: ccw_device_start returned %d\n", rc);
	if (rc)
		goto out;

	wait_for_completion(&event);
	TRACE("do_ur_io: I/O complete\n");
	rc = 0;

out:
	mutex_unlock(&urd->io_mutex);
	return rc;
}

/*
 * ur interrupt handler, called from the ccw_device layer
 */
static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
			   struct irb *irb)
{
	struct urdev *urd;

308
	kstat_cpu(smp_processor_id()).irqs[IOINT_VMR]++;
309
	TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
310 311
	      intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
	      irb->scsw.cmd.count);
312 313 314 315 316

	if (!intparm) {
		TRACE("ur_int_handler: unsolicited interrupt\n");
		return;
	}
317
	urd = dev_get_drvdata(&cdev->dev);
318
	BUG_ON(!urd);
319 320 321
	/* On special conditions irb is an error pointer */
	if (IS_ERR(irb))
		urd->io_request_rc = PTR_ERR(irb);
322
	else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
323 324 325 326 327 328 329 330 331 332 333 334 335
		urd->io_request_rc = 0;
	else
		urd->io_request_rc = -EIO;

	complete(urd->io_done);
}

/*
 * reclen sysfs attribute - The record length to be used for write CCWs
 */
static ssize_t ur_attr_reclen_show(struct device *dev,
				   struct device_attribute *attr, char *buf)
{
336 337
	struct urdev *urd;
	int rc;
338

339 340 341 342 343 344
	urd = urdev_get_from_cdev(to_ccwdev(dev));
	if (!urd)
		return -ENODEV;
	rc = sprintf(buf, "%zu\n", urd->reclen);
	urdev_put(urd);
	return rc;
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
}

static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);

static int ur_create_attributes(struct device *dev)
{
	return device_create_file(dev, &dev_attr_reclen);
}

static void ur_remove_attributes(struct device *dev)
{
	device_remove_file(dev, &dev_attr_reclen);
}

/*
 * diagnose code 0x210 - retrieve device information
 * cc=0  normal completion, we have a real device
 * cc=1  CP paging error
 * cc=2  The virtual device exists, but is not associated with a real device
 * cc=3  Invalid device address, or the virtual device does not exist
 */
static int get_urd_class(struct urdev *urd)
{
	static struct diag210 ur_diag210;
	int cc;

	ur_diag210.vrdcdvno = urd->dev_id.devno;
	ur_diag210.vrdclen = sizeof(struct diag210);

	cc = diag210(&ur_diag210);
	switch (cc) {
	case 0:
377
		return -EOPNOTSUPP;
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
	case 2:
		return ur_diag210.vrdcvcla; /* virtual device class */
	case 3:
		return -ENODEV;
	default:
		return -EIO;
	}
}

/*
 * Allocation and freeing of urfile structures
 */
static struct urfile *urfile_alloc(struct urdev *urd)
{
	struct urfile *urf;

	urf = kzalloc(sizeof(struct urfile), GFP_KERNEL);
	if (!urf)
		return NULL;
	urf->urd = urd;

	TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf,
	      urf->dev_reclen);

	return urf;
}

static void urfile_free(struct urfile *urf)
{
	TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd);
	kfree(urf);
}

/*
 * The fops implementation of the character device driver
 */
static ssize_t do_write(struct urdev *urd, const char __user *udata,
			size_t count, size_t reclen, loff_t *ppos)
{
	struct ccw1 *cpa;
	int rc;

420 421 422
	cpa = alloc_chan_prog(udata, count / reclen, reclen);
	if (IS_ERR(cpa))
		return PTR_ERR(cpa);
423 424 425 426 427 428 429 430 431 432 433

	rc = do_ur_io(urd, cpa);
	if (rc)
		goto fail_kfree_cpa;

	if (urd->io_request_rc) {
		rc = urd->io_request_rc;
		goto fail_kfree_cpa;
	}
	*ppos += count;
	rc = count;
434

435
fail_kfree_cpa:
436
	free_chan_prog(cpa);
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
	return rc;
}

static ssize_t ur_write(struct file *file, const char __user *udata,
			size_t count, loff_t *ppos)
{
	struct urfile *urf = file->private_data;

	TRACE("ur_write: count=%zu\n", count);

	if (count == 0)
		return 0;

	if (count % urf->dev_reclen)
		return -EINVAL;	/* count must be a multiple of reclen */

	if (count > urf->dev_reclen * MAX_RECS_PER_IO)
		count = urf->dev_reclen * MAX_RECS_PER_IO;

	return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
}

/*
 * diagnose code 0x14 subcode 0x0028 - position spool file to designated
 *				       record
 * cc=0  normal completion
 * cc=2  no file active on the virtual reader or device not ready
 * cc=3  record specified is beyond EOF
 */
static int diag_position_to_record(int devno, int record)
{
	int cc;

470
	cc = diag14(record, devno, 0x28);
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
	switch (cc) {
	case 0:
		return 0;
	case 2:
		return -ENOMEDIUM;
	case 3:
		return -ENODATA; /* position beyond end of file */
	default:
		return -EIO;
	}
}

/*
 * diagnose code 0x14 subcode 0x0000 - read next spool file buffer
 * cc=0  normal completion
 * cc=1  EOF reached
 * cc=2  no file active on the virtual reader, and no file eligible
 * cc=3  file already active on the virtual reader or specified virtual
 *	 reader does not exist or is not a reader
 */
static int diag_read_file(int devno, char *buf)
{
	int cc;

495
	cc = diag14((unsigned long) buf, devno, 0x00);
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
	switch (cc) {
	case 0:
		return 0;
	case 1:
		return -ENODATA;
	case 2:
		return -ENOMEDIUM;
	default:
		return -EIO;
	}
}

static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
			   loff_t *offs)
{
	size_t len, copied, res;
	char *buf;
	int rc;
	u16 reclen;
	struct urdev *urd;

	urd = ((struct urfile *) file->private_data)->urd;
	reclen = ((struct urfile *) file->private_data)->file_reclen;

	rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
	if (rc == -ENODATA)
		return 0;
	if (rc)
		return rc;

	len = min((size_t) PAGE_SIZE, count);
527
	buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
528 529 530 531 532 533 534 535 536 537 538 539
	if (!buf)
		return -ENOMEM;

	copied = 0;
	res = (size_t) (*offs % PAGE_SIZE);
	do {
		rc = diag_read_file(urd->dev_id.devno, buf);
		if (rc == -ENODATA) {
			break;
		}
		if (rc)
			goto fail;
F
Frank Munzert 已提交
540
		if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
541 542 543 544 545 546 547 548 549 550 551 552 553
			*((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
		len = min(count - copied, PAGE_SIZE - res);
		if (copy_to_user(ubuf + copied, buf + res, len)) {
			rc = -EFAULT;
			goto fail;
		}
		res = 0;
		copied += len;
	} while (copied != count);

	*offs += copied;
	rc = copied;
fail:
554
	free_page((unsigned long) buf);
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
	return rc;
}

static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count,
		       loff_t *offs)
{
	struct urdev *urd;
	int rc;

	TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs);

	if (count == 0)
		return 0;

	urd = ((struct urfile *) file->private_data)->urd;
	rc = mutex_lock_interruptible(&urd->io_mutex);
	if (rc)
		return rc;
	rc = diag14_read(file, ubuf, count, offs);
	mutex_unlock(&urd->io_mutex);
	return rc;
}

/*
 * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor
 * cc=0  normal completion
 * cc=1  no files on reader queue or no subsequent file
 * cc=2  spid specified is invalid
 */
static int diag_read_next_file_info(struct file_control_block *buf, int spid)
{
	int cc;

588
	cc = diag14((unsigned long) buf, spid, 0xfff);
589 590 591 592 593 594 595 596
	switch (cc) {
	case 0:
		return 0;
	default:
		return -ENODATA;
	}
}

597
static int verify_uri_device(struct urdev *urd)
598
{
599
	struct file_control_block *fcb;
600 601 602
	char *buf;
	int rc;

603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
	fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
	if (!fcb)
		return -ENOMEM;

	/* check for empty reader device (beginning of chain) */
	rc = diag_read_next_file_info(fcb, 0);
	if (rc)
		goto fail_free_fcb;

	/* if file is in hold status, we do not read it */
	if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
		rc = -EPERM;
		goto fail_free_fcb;
	}

	/* open file on virtual reader	*/
	buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
	if (!buf) {
		rc = -ENOMEM;
		goto fail_free_fcb;
	}
	rc = diag_read_file(urd->dev_id.devno, buf);
	if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
		goto fail_free_buf;

	/* check if the file on top of the queue is open now */
	rc = diag_read_next_file_info(fcb, 0);
	if (rc)
		goto fail_free_buf;
	if (!(fcb->file_stat & FLG_IN_USE)) {
		rc = -EMFILE;
		goto fail_free_buf;
	}
	rc = 0;

fail_free_buf:
	free_page((unsigned long) buf);
fail_free_fcb:
	kfree(fcb);
	return rc;
}

static int verify_device(struct urdev *urd)
{
647 648 649 650
	switch (urd->class) {
	case DEV_CLASS_UR_O:
		return 0; /* no check needed here */
	case DEV_CLASS_UR_I:
651
		return verify_uri_device(urd);
652
	default:
653
		return -EOPNOTSUPP;
654 655 656
	}
}

657
static int get_uri_file_reclen(struct urdev *urd)
658
{
659
	struct file_control_block *fcb;
660 661
	int rc;

662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
	fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
	if (!fcb)
		return -ENOMEM;
	rc = diag_read_next_file_info(fcb, 0);
	if (rc)
		goto fail_free;
	if (fcb->file_stat & FLG_CP_DUMP)
		rc = 0;
	else
		rc = fcb->rec_len;

fail_free:
	kfree(fcb);
	return rc;
}

static int get_file_reclen(struct urdev *urd)
{
680 681 682 683
	switch (urd->class) {
	case DEV_CLASS_UR_O:
		return 0;
	case DEV_CLASS_UR_I:
684
		return get_uri_file_reclen(urd);
685
	default:
686
		return -EOPNOTSUPP;
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
	}
}

static int ur_open(struct inode *inode, struct file *file)
{
	u16 devno;
	struct urdev *urd;
	struct urfile *urf;
	unsigned short accmode;
	int rc;

	accmode = file->f_flags & O_ACCMODE;

	if (accmode == O_RDWR)
		return -EACCES;
	/*
	 * We treat the minor number as the devno of the ur device
	 * to find in the driver tree.
	 */
	devno = MINOR(file->f_dentry->d_inode->i_rdev);

	urd = urdev_get_from_devno(devno);
709 710 711 712
	if (!urd) {
		rc = -ENXIO;
		goto out;
	}
713

714 715 716 717
	spin_lock(&urd->open_lock);
	while (urd->open_flag) {
		spin_unlock(&urd->open_lock);
		if (file->f_flags & O_NONBLOCK) {
718 719 720
			rc = -EBUSY;
			goto fail_put;
		}
721
		if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
722 723 724
			rc = -ERESTARTSYS;
			goto fail_put;
		}
725
		spin_lock(&urd->open_lock);
726
	}
727 728
	urd->open_flag++;
	spin_unlock(&urd->open_lock);
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759

	TRACE("ur_open\n");

	if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) ||
	    ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) {
		TRACE("ur_open: unsupported dev class (%d)\n", urd->class);
		rc = -EACCES;
		goto fail_unlock;
	}

	rc = verify_device(urd);
	if (rc)
		goto fail_unlock;

	urf = urfile_alloc(urd);
	if (!urf) {
		rc = -ENOMEM;
		goto fail_unlock;
	}

	urf->dev_reclen = urd->reclen;
	rc = get_file_reclen(urd);
	if (rc < 0)
		goto fail_urfile_free;
	urf->file_reclen = rc;
	file->private_data = urf;
	return 0;

fail_urfile_free:
	urfile_free(urf);
fail_unlock:
760 761 762
	spin_lock(&urd->open_lock);
	urd->open_flag--;
	spin_unlock(&urd->open_lock);
763 764
fail_put:
	urdev_put(urd);
765
out:
766 767 768 769 770 771 772 773
	return rc;
}

static int ur_release(struct inode *inode, struct file *file)
{
	struct urfile *urf = file->private_data;

	TRACE("ur_release\n");
774 775 776 777
	spin_lock(&urf->urd->open_lock);
	urf->urd->open_flag--;
	spin_unlock(&urf->urd->open_lock);
	wake_up_interruptible(&urf->urd->wait);
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
	urdev_put(urf->urd);
	urfile_free(urf);
	return 0;
}

static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
{
	loff_t newpos;

	if ((file->f_flags & O_ACCMODE) != O_RDONLY)
		return -ESPIPE; /* seek allowed only for reader */
	if (offset % PAGE_SIZE)
		return -ESPIPE; /* only multiples of 4K allowed */
	switch (whence) {
	case 0: /* SEEK_SET */
		newpos = offset;
		break;
	case 1: /* SEEK_CUR */
		newpos = file->f_pos + offset;
		break;
	default:
		return -EINVAL;
	}
	file->f_pos = newpos;
	return newpos;
}

805
static const struct file_operations ur_fops = {
806 807 808 809 810 811 812 813 814 815
	.owner	 = THIS_MODULE,
	.open	 = ur_open,
	.release = ur_release,
	.read	 = ur_read,
	.write	 = ur_write,
	.llseek  = ur_llseek,
};

/*
 * ccw_device infrastructure:
816 817 818 819 820 821 822 823 824 825 826
 *     ur_probe creates the struct urdev (with refcount = 1), the device
 *     attributes, sets up the interrupt handler and validates the virtual
 *     unit record device.
 *     ur_remove removes the device attributes and drops the reference to
 *     struct urdev.
 *
 *     ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized
 *     by the vmur_mutex lock.
 *
 *     urd->char_device is used as indication that the online function has
 *     been completed successfully.
827 828 829 830 831 832
 */
static int ur_probe(struct ccw_device *cdev)
{
	struct urdev *urd;
	int rc;

833
	TRACE("ur_probe: cdev=%p\n", cdev);
834

835
	mutex_lock(&vmur_mutex);
836 837 838
	urd = urdev_alloc(cdev);
	if (!urd) {
		rc = -ENOMEM;
839
		goto fail_unlock;
840
	}
841

842 843 844
	rc = ur_create_attributes(&cdev->dev);
	if (rc) {
		rc = -ENOMEM;
845
		goto fail_urdev_put;
846 847 848 849 850 851 852
	}
	cdev->handler = ur_int_handler;

	/* validate virtual unit record device */
	urd->class = get_urd_class(urd);
	if (urd->class < 0) {
		rc = urd->class;
853
		goto fail_remove_attr;
854 855
	}
	if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
856
		rc = -EOPNOTSUPP;
857
		goto fail_remove_attr;
858
	}
859
	spin_lock_irq(get_ccwdev_lock(cdev));
860
	dev_set_drvdata(&cdev->dev, urd);
861
	spin_unlock_irq(get_ccwdev_lock(cdev));
862

863
	mutex_unlock(&vmur_mutex);
864 865
	return 0;

866
fail_remove_attr:
867
	ur_remove_attributes(&cdev->dev);
868 869 870 871 872
fail_urdev_put:
	urdev_put(urd);
fail_unlock:
	mutex_unlock(&vmur_mutex);
	return rc;
873 874 875 876 877 878 879 880
}

static int ur_set_online(struct ccw_device *cdev)
{
	struct urdev *urd;
	int minor, major, rc;
	char node_id[16];

881
	TRACE("ur_set_online: cdev=%p\n", cdev);
882

883 884 885 886 887 888 889 890 891 892 893 894 895
	mutex_lock(&vmur_mutex);
	urd = urdev_get_from_cdev(cdev);
	if (!urd) {
		/* ur_remove already deleted our urd */
		rc = -ENODEV;
		goto fail_unlock;
	}

	if (urd->char_device) {
		/* Another ur_set_online was faster */
		rc = -EBUSY;
		goto fail_urdev_put;
	}
896 897 898 899 900 901 902

	minor = urd->dev_id.devno;
	major = MAJOR(ur_first_dev_maj_min);

	urd->char_device = cdev_alloc();
	if (!urd->char_device) {
		rc = -ENOMEM;
903
		goto fail_urdev_put;
904 905 906 907 908 909 910 911 912 913 914
	}

	cdev_init(urd->char_device, &ur_fops);
	urd->char_device->dev = MKDEV(major, minor);
	urd->char_device->owner = ur_fops.owner;

	rc = cdev_add(urd->char_device, urd->char_device->dev, 1);
	if (rc)
		goto fail_free_cdev;
	if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
		if (urd->class == DEV_CLASS_UR_I)
915
			sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev));
916
		if (urd->class == DEV_CLASS_UR_O)
917
			sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev));
918
	} else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
919
		sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev));
920
	} else {
921
		rc = -EOPNOTSUPP;
922 923 924
		goto fail_free_cdev;
	}

925 926
	urd->device = device_create(vmur_class, NULL, urd->char_device->dev,
				    NULL, "%s", node_id);
927 928 929 930 931
	if (IS_ERR(urd->device)) {
		rc = PTR_ERR(urd->device);
		TRACE("ur_set_online: device_create rc=%d\n", rc);
		goto fail_free_cdev;
	}
932 933
	urdev_put(urd);
	mutex_unlock(&vmur_mutex);
934 935 936 937
	return 0;

fail_free_cdev:
	cdev_del(urd->char_device);
938 939 940 941 942
	urd->char_device = NULL;
fail_urdev_put:
	urdev_put(urd);
fail_unlock:
	mutex_unlock(&vmur_mutex);
943 944 945
	return rc;
}

946
static int ur_set_offline_force(struct ccw_device *cdev, int force)
947 948
{
	struct urdev *urd;
949
	int rc;
950

951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966
	TRACE("ur_set_offline: cdev=%p\n", cdev);
	urd = urdev_get_from_cdev(cdev);
	if (!urd)
		/* ur_remove already deleted our urd */
		return -ENODEV;
	if (!urd->char_device) {
		/* Another ur_set_offline was faster */
		rc = -EBUSY;
		goto fail_urdev_put;
	}
	if (!force && (atomic_read(&urd->ref_count) > 2)) {
		/* There is still a user of urd (e.g. ur_open) */
		TRACE("ur_set_offline: BUSY\n");
		rc = -EBUSY;
		goto fail_urdev_put;
	}
967 968
	device_destroy(vmur_class, urd->char_device->dev);
	cdev_del(urd->char_device);
969 970
	urd->char_device = NULL;
	rc = 0;
971

972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
fail_urdev_put:
	urdev_put(urd);
	return rc;
}

static int ur_set_offline(struct ccw_device *cdev)
{
	int rc;

	mutex_lock(&vmur_mutex);
	rc = ur_set_offline_force(cdev, 0);
	mutex_unlock(&vmur_mutex);
	return rc;
}

static void ur_remove(struct ccw_device *cdev)
{
	unsigned long flags;

	TRACE("ur_remove\n");

	mutex_lock(&vmur_mutex);

	if (cdev->online)
		ur_set_offline_force(cdev, 1);
	ur_remove_attributes(&cdev->dev);

	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1000 1001
	urdev_put(dev_get_drvdata(&cdev->dev));
	dev_set_drvdata(&cdev->dev, NULL);
1002 1003 1004
	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);

	mutex_unlock(&vmur_mutex);
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
}

/*
 * Module initialisation and cleanup
 */
static int __init ur_init(void)
{
	int rc;
	dev_t dev;

	if (!MACHINE_IS_VM) {
1016 1017
		pr_err("The %s cannot be loaded without z/VM\n",
		       ur_banner);
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
		return -ENODEV;
	}

	vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long));
	if (!vmur_dbf)
		return -ENOMEM;
	rc = debug_register_view(vmur_dbf, &debug_sprintf_view);
	if (rc)
		goto fail_free_dbf;

	debug_set_level(vmur_dbf, 6);

1030 1031 1032 1033 1034 1035
	vmur_class = class_create(THIS_MODULE, "vmur");
	if (IS_ERR(vmur_class)) {
		rc = PTR_ERR(vmur_class);
		goto fail_free_dbf;
	}

1036 1037
	rc = ccw_driver_register(&ur_driver);
	if (rc)
1038
		goto fail_class_destroy;
1039 1040 1041

	rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
	if (rc) {
1042 1043
		pr_err("Kernel function alloc_chrdev_region failed with "
		       "error code %d\n", rc);
1044 1045 1046 1047
		goto fail_unregister_driver;
	}
	ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);

1048
	pr_info("%s loaded.\n", ur_banner);
1049 1050 1051 1052
	return 0;

fail_unregister_driver:
	ccw_driver_unregister(&ur_driver);
1053 1054
fail_class_destroy:
	class_destroy(vmur_class);
1055 1056 1057 1058 1059 1060 1061 1062 1063
fail_free_dbf:
	debug_unregister(vmur_dbf);
	return rc;
}

static void __exit ur_exit(void)
{
	unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
	ccw_driver_unregister(&ur_driver);
1064
	class_destroy(vmur_class);
1065
	debug_unregister(vmur_dbf);
1066
	pr_info("%s unloaded.\n", ur_banner);
1067 1068 1069 1070
}

module_init(ur_init);
module_exit(ur_exit);