提交 ba7a4822 编写于 作者: S Stephen Rothwell 提交者: Benjamin Herrenschmidt

powerpc: Remove some of the legacy iSeries specific device drivers

These drivers are specific to the PowerPC legacy iSeries platform and
their Kconfig is specified in arch/powerpc.  Legacy iSeries is being
removed, so these drivers can no longer be selected.

Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: NStephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
上级 aba0eb84
/* -*- linux-c -*-
* viodasd.c
* Authors: Dave Boutcher <boutcher@us.ibm.com>
* Ryan Arnold <ryanarn@us.ibm.com>
* Colin Devilbiss <devilbis@us.ibm.com>
* Stephen Rothwell
*
* (C) Copyright 2000-2004 IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* This routine provides access to disk space (termed "DASD" in historical
* IBM terms) owned and managed by an OS/400 partition running on the
* same box as this Linux partition.
*
* All disk operations are performed by sending messages back and forth to
* the OS/400 partition.
*/
#define pr_fmt(fmt) "viod: " fmt
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/genhd.h>
#include <linux/hdreg.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/mutex.h>
#include <linux/dma-mapping.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
#include <asm/uaccess.h>
#include <asm/vio.h>
#include <asm/iseries/hv_types.h>
#include <asm/iseries/hv_lp_event.h>
#include <asm/iseries/hv_lp_config.h>
#include <asm/iseries/vio.h>
#include <asm/firmware.h>
MODULE_DESCRIPTION("iSeries Virtual DASD");
MODULE_AUTHOR("Dave Boutcher");
MODULE_LICENSE("GPL");
/*
* We only support 7 partitions per physical disk....so with minor
* numbers 0-255 we get a maximum of 32 disks.
*/
#define VIOD_GENHD_NAME "iseries/vd"
#define VIOD_VERS "1.64"
enum {
PARTITION_SHIFT = 3,
MAX_DISKNO = HVMAXARCHITECTEDVIRTUALDISKS,
MAX_DISK_NAME = FIELD_SIZEOF(struct gendisk, disk_name)
};
static DEFINE_MUTEX(viodasd_mutex);
static DEFINE_SPINLOCK(viodasd_spinlock);
#define VIOMAXREQ 16
#define DEVICE_NO(cell) ((struct viodasd_device *)(cell) - &viodasd_devices[0])
struct viodasd_waitevent {
struct completion com;
int rc;
u16 sub_result;
int max_disk; /* open */
};
static const struct vio_error_entry viodasd_err_table[] = {
{ 0x0201, EINVAL, "Invalid Range" },
{ 0x0202, EINVAL, "Invalid Token" },
{ 0x0203, EIO, "DMA Error" },
{ 0x0204, EIO, "Use Error" },
{ 0x0205, EIO, "Release Error" },
{ 0x0206, EINVAL, "Invalid Disk" },
{ 0x0207, EBUSY, "Can't Lock" },
{ 0x0208, EIO, "Already Locked" },
{ 0x0209, EIO, "Already Unlocked" },
{ 0x020A, EIO, "Invalid Arg" },
{ 0x020B, EIO, "Bad IFS File" },
{ 0x020C, EROFS, "Read Only Device" },
{ 0x02FF, EIO, "Internal Error" },
{ 0x0000, 0, NULL },
};
/*
* Figure out the biggest I/O request (in sectors) we can accept
*/
#define VIODASD_MAXSECTORS (4096 / 512 * VIOMAXBLOCKDMA)
/*
* Number of disk I/O requests we've sent to OS/400
*/
static int num_req_outstanding;
/*
* This is our internal structure for keeping track of disk devices
*/
struct viodasd_device {
u16 cylinders;
u16 tracks;
u16 sectors;
u16 bytes_per_sector;
u64 size;
int read_only;
spinlock_t q_lock;
struct gendisk *disk;
struct device *dev;
} viodasd_devices[MAX_DISKNO];
/*
* External open entry point.
*/
static int viodasd_open(struct block_device *bdev, fmode_t mode)
{
struct viodasd_device *d = bdev->bd_disk->private_data;
HvLpEvent_Rc hvrc;
struct viodasd_waitevent we;
u16 flags = 0;
if (d->read_only) {
if (mode & FMODE_WRITE)
return -EROFS;
flags = vioblockflags_ro;
}
init_completion(&we.com);
/* Send the open event to OS/400 */
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_blockio | vioblockopen,
HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)(unsigned long)&we, VIOVERSION << 16,
((u64)DEVICE_NO(d) << 48) | ((u64)flags << 32),
0, 0, 0);
if (hvrc != 0) {
pr_warning("HV open failed %d\n", (int)hvrc);
return -EIO;
}
wait_for_completion(&we.com);
/* Check the return code */
if (we.rc != 0) {
const struct vio_error_entry *err =
vio_lookup_rc(viodasd_err_table, we.sub_result);
pr_warning("bad rc opening disk: %d:0x%04x (%s)\n",
(int)we.rc, we.sub_result, err->msg);
return -EIO;
}
return 0;
}
static int viodasd_unlocked_open(struct block_device *bdev, fmode_t mode)
{
int ret;
mutex_lock(&viodasd_mutex);
ret = viodasd_open(bdev, mode);
mutex_unlock(&viodasd_mutex);
return ret;
}
/*
* External release entry point.
*/
static int viodasd_release(struct gendisk *disk, fmode_t mode)
{
struct viodasd_device *d = disk->private_data;
HvLpEvent_Rc hvrc;
mutex_lock(&viodasd_mutex);
/* Send the event to OS/400. We DON'T expect a response */
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_blockio | vioblockclose,
HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
0, VIOVERSION << 16,
((u64)DEVICE_NO(d) << 48) /* | ((u64)flags << 32) */,
0, 0, 0);
if (hvrc != 0)
pr_warning("HV close call failed %d\n", (int)hvrc);
mutex_unlock(&viodasd_mutex);
return 0;
}
/* External ioctl entry point.
*/
static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct gendisk *disk = bdev->bd_disk;
struct viodasd_device *d = disk->private_data;
geo->sectors = d->sectors ? d->sectors : 32;
geo->heads = d->tracks ? d->tracks : 64;
geo->cylinders = d->cylinders ? d->cylinders :
get_capacity(disk) / (geo->sectors * geo->heads);
return 0;
}
/*
* Our file operations table
*/
static const struct block_device_operations viodasd_fops = {
.owner = THIS_MODULE,
.open = viodasd_unlocked_open,
.release = viodasd_release,
.getgeo = viodasd_getgeo,
};
/*
* End a request
*/
static void viodasd_end_request(struct request *req, int error,
int num_sectors)
{
__blk_end_request(req, error, num_sectors << 9);
}
/*
* Send an actual I/O request to OS/400
*/
static int send_request(struct request *req)
{
u64 start;
int direction;
int nsg;
u16 viocmd;
HvLpEvent_Rc hvrc;
struct vioblocklpevent *bevent;
struct HvLpEvent *hev;
struct scatterlist sg[VIOMAXBLOCKDMA];
int sgindex;
struct viodasd_device *d;
unsigned long flags;
start = (u64)blk_rq_pos(req) << 9;
if (rq_data_dir(req) == READ) {
direction = DMA_FROM_DEVICE;
viocmd = viomajorsubtype_blockio | vioblockread;
} else {
direction = DMA_TO_DEVICE;
viocmd = viomajorsubtype_blockio | vioblockwrite;
}
d = req->rq_disk->private_data;
/* Now build the scatter-gather list */
sg_init_table(sg, VIOMAXBLOCKDMA);
nsg = blk_rq_map_sg(req->q, req, sg);
nsg = dma_map_sg(d->dev, sg, nsg, direction);
spin_lock_irqsave(&viodasd_spinlock, flags);
num_req_outstanding++;
/* This optimization handles a single DMA block */
if (nsg == 1)
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo, viocmd,
HvLpEvent_AckInd_DoAck,
HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)(unsigned long)req, VIOVERSION << 16,
((u64)DEVICE_NO(d) << 48), start,
((u64)sg_dma_address(&sg[0])) << 32,
sg_dma_len(&sg[0]));
else {
bevent = (struct vioblocklpevent *)
vio_get_event_buffer(viomajorsubtype_blockio);
if (bevent == NULL) {
pr_warning("error allocating disk event buffer\n");
goto error_ret;
}
/*
* Now build up the actual request. Note that we store
* the pointer to the request in the correlation
* token so we can match the response up later
*/
memset(bevent, 0, sizeof(struct vioblocklpevent));
hev = &bevent->event;
hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK |
HV_LP_EVENT_INT;
hev->xType = HvLpEvent_Type_VirtualIo;
hev->xSubtype = viocmd;
hev->xSourceLp = HvLpConfig_getLpIndex();
hev->xTargetLp = viopath_hostLp;
hev->xSizeMinus1 =
offsetof(struct vioblocklpevent, u.rw_data.dma_info) +
(sizeof(bevent->u.rw_data.dma_info[0]) * nsg) - 1;
hev->xSourceInstanceId = viopath_sourceinst(viopath_hostLp);
hev->xTargetInstanceId = viopath_targetinst(viopath_hostLp);
hev->xCorrelationToken = (u64)req;
bevent->version = VIOVERSION;
bevent->disk = DEVICE_NO(d);
bevent->u.rw_data.offset = start;
/*
* Copy just the dma information from the sg list
* into the request
*/
for (sgindex = 0; sgindex < nsg; sgindex++) {
bevent->u.rw_data.dma_info[sgindex].token =
sg_dma_address(&sg[sgindex]);
bevent->u.rw_data.dma_info[sgindex].len =
sg_dma_len(&sg[sgindex]);
}
/* Send the request */
hvrc = HvCallEvent_signalLpEvent(&bevent->event);
vio_free_event_buffer(viomajorsubtype_blockio, bevent);
}
if (hvrc != HvLpEvent_Rc_Good) {
pr_warning("error sending disk event to OS/400 (rc %d)\n",
(int)hvrc);
goto error_ret;
}
spin_unlock_irqrestore(&viodasd_spinlock, flags);
return 0;
error_ret:
num_req_outstanding--;
spin_unlock_irqrestore(&viodasd_spinlock, flags);
dma_unmap_sg(d->dev, sg, nsg, direction);
return -1;
}
/*
* This is the external request processing routine
*/
static void do_viodasd_request(struct request_queue *q)
{
struct request *req;
/*
* If we already have the maximum number of requests
* outstanding to OS/400 just bail out. We'll come
* back later.
*/
while (num_req_outstanding < VIOMAXREQ) {
req = blk_fetch_request(q);
if (req == NULL)
return;
/* check that request contains a valid command */
if (req->cmd_type != REQ_TYPE_FS) {
viodasd_end_request(req, -EIO, blk_rq_sectors(req));
continue;
}
/* Try sending the request */
if (send_request(req) != 0)
viodasd_end_request(req, -EIO, blk_rq_sectors(req));
}
}
/*
* Probe a single disk and fill in the viodasd_device structure
* for it.
*/
static int probe_disk(struct viodasd_device *d)
{
HvLpEvent_Rc hvrc;
struct viodasd_waitevent we;
int dev_no = DEVICE_NO(d);
struct gendisk *g;
struct request_queue *q;
u16 flags = 0;
retry:
init_completion(&we.com);
/* Send the open event to OS/400 */
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_blockio | vioblockopen,
HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)(unsigned long)&we, VIOVERSION << 16,
((u64)dev_no << 48) | ((u64)flags<< 32),
0, 0, 0);
if (hvrc != 0) {
pr_warning("bad rc on HV open %d\n", (int)hvrc);
return 0;
}
wait_for_completion(&we.com);
if (we.rc != 0) {
if (flags != 0)
return 0;
/* try again with read only flag set */
flags = vioblockflags_ro;
goto retry;
}
if (we.max_disk > (MAX_DISKNO - 1)) {
printk_once(KERN_INFO pr_fmt("Only examining the first %d of %d disks connected\n"),
MAX_DISKNO, we.max_disk + 1);
}
/* Send the close event to OS/400. We DON'T expect a response */
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_blockio | vioblockclose,
HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
0, VIOVERSION << 16,
((u64)dev_no << 48) | ((u64)flags << 32),
0, 0, 0);
if (hvrc != 0) {
pr_warning("bad rc sending event to OS/400 %d\n", (int)hvrc);
return 0;
}
if (d->dev == NULL) {
/* this is when we reprobe for new disks */
if (vio_create_viodasd(dev_no) == NULL) {
pr_warning("cannot allocate virtual device for disk %d\n",
dev_no);
return 0;
}
/*
* The vio_create_viodasd will have recursed into this
* routine with d->dev set to the new vio device and
* will finish the setup of the disk below.
*/
return 1;
}
/* create the request queue for the disk */
spin_lock_init(&d->q_lock);
q = blk_init_queue(do_viodasd_request, &d->q_lock);
if (q == NULL) {
pr_warning("cannot allocate queue for disk %d\n", dev_no);
return 0;
}
g = alloc_disk(1 << PARTITION_SHIFT);
if (g == NULL) {
pr_warning("cannot allocate disk structure for disk %d\n",
dev_no);
blk_cleanup_queue(q);
return 0;
}
d->disk = g;
blk_queue_max_segments(q, VIOMAXBLOCKDMA);
blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS);
g->major = VIODASD_MAJOR;
g->first_minor = dev_no << PARTITION_SHIFT;
if (dev_no >= 26)
snprintf(g->disk_name, sizeof(g->disk_name),
VIOD_GENHD_NAME "%c%c",
'a' + (dev_no / 26) - 1, 'a' + (dev_no % 26));
else
snprintf(g->disk_name, sizeof(g->disk_name),
VIOD_GENHD_NAME "%c", 'a' + (dev_no % 26));
g->fops = &viodasd_fops;
g->queue = q;
g->private_data = d;
g->driverfs_dev = d->dev;
set_capacity(g, d->size >> 9);
pr_info("disk %d: %lu sectors (%lu MB) CHS=%d/%d/%d sector size %d%s\n",
dev_no, (unsigned long)(d->size >> 9),
(unsigned long)(d->size >> 20),
(int)d->cylinders, (int)d->tracks,
(int)d->sectors, (int)d->bytes_per_sector,
d->read_only ? " (RO)" : "");
/* register us in the global list */
add_disk(g);
return 1;
}
/* returns the total number of scatterlist elements converted */
static int block_event_to_scatterlist(const struct vioblocklpevent *bevent,
struct scatterlist *sg, int *total_len)
{
int i, numsg;
const struct rw_data *rw_data = &bevent->u.rw_data;
static const int offset =
offsetof(struct vioblocklpevent, u.rw_data.dma_info);
static const int element_size = sizeof(rw_data->dma_info[0]);
numsg = ((bevent->event.xSizeMinus1 + 1) - offset) / element_size;
if (numsg > VIOMAXBLOCKDMA)
numsg = VIOMAXBLOCKDMA;
*total_len = 0;
sg_init_table(sg, VIOMAXBLOCKDMA);
for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) {
sg_dma_address(&sg[i]) = rw_data->dma_info[i].token;
sg_dma_len(&sg[i]) = rw_data->dma_info[i].len;
*total_len += rw_data->dma_info[i].len;
}
return i;
}
/*
* Restart all queues, starting with the one _after_ the disk given,
* thus reducing the chance of starvation of higher numbered disks.
*/
static void viodasd_restart_all_queues_starting_from(int first_index)
{
int i;
for (i = first_index + 1; i < MAX_DISKNO; ++i)
if (viodasd_devices[i].disk)
blk_run_queue(viodasd_devices[i].disk->queue);
for (i = 0; i <= first_index; ++i)
if (viodasd_devices[i].disk)
blk_run_queue(viodasd_devices[i].disk->queue);
}
/*
* For read and write requests, decrement the number of outstanding requests,
* Free the DMA buffers we allocated.
*/
static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
{
int num_sg, num_sect, pci_direction, total_len;
struct request *req;
struct scatterlist sg[VIOMAXBLOCKDMA];
struct HvLpEvent *event = &bevent->event;
unsigned long irq_flags;
struct viodasd_device *d;
int error;
spinlock_t *qlock;
num_sg = block_event_to_scatterlist(bevent, sg, &total_len);
num_sect = total_len >> 9;
if (event->xSubtype == (viomajorsubtype_blockio | vioblockread))
pci_direction = DMA_FROM_DEVICE;
else
pci_direction = DMA_TO_DEVICE;
req = (struct request *)bevent->event.xCorrelationToken;
d = req->rq_disk->private_data;
dma_unmap_sg(d->dev, sg, num_sg, pci_direction);
/*
* Since this is running in interrupt mode, we need to make sure
* we're not stepping on any global I/O operations
*/
spin_lock_irqsave(&viodasd_spinlock, irq_flags);
num_req_outstanding--;
spin_unlock_irqrestore(&viodasd_spinlock, irq_flags);
error = (event->xRc == HvLpEvent_Rc_Good) ? 0 : -EIO;
if (error) {
const struct vio_error_entry *err;
err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
pr_warning("read/write error %d:0x%04x (%s)\n",
event->xRc, bevent->sub_result, err->msg);
num_sect = blk_rq_sectors(req);
}
qlock = req->q->queue_lock;
spin_lock_irqsave(qlock, irq_flags);
viodasd_end_request(req, error, num_sect);
spin_unlock_irqrestore(qlock, irq_flags);
/* Finally, try to get more requests off of this device's queue */
viodasd_restart_all_queues_starting_from(DEVICE_NO(d));
return 0;
}
/* This routine handles incoming block LP events */
static void handle_block_event(struct HvLpEvent *event)
{
struct vioblocklpevent *bevent = (struct vioblocklpevent *)event;
struct viodasd_waitevent *pwe;
if (event == NULL)
/* Notification that a partition went away! */
return;
/* First, we should NEVER get an int here...only acks */
if (hvlpevent_is_int(event)) {
pr_warning("Yikes! got an int in viodasd event handler!\n");
if (hvlpevent_need_ack(event)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
}
}
switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
case vioblockopen:
/*
* Handle a response to an open request. We get all the
* disk information in the response, so update it. The
* correlation token contains a pointer to a waitevent
* structure that has a completion in it. update the
* return code in the waitevent structure and post the
* completion to wake up the guy who sent the request
*/
pwe = (struct viodasd_waitevent *)event->xCorrelationToken;
pwe->rc = event->xRc;
pwe->sub_result = bevent->sub_result;
if (event->xRc == HvLpEvent_Rc_Good) {
const struct open_data *data = &bevent->u.open_data;
struct viodasd_device *device =
&viodasd_devices[bevent->disk];
device->read_only =
bevent->flags & vioblockflags_ro;
device->size = data->disk_size;
device->cylinders = data->cylinders;
device->tracks = data->tracks;
device->sectors = data->sectors;
device->bytes_per_sector = data->bytes_per_sector;
pwe->max_disk = data->max_disk;
}
complete(&pwe->com);
break;
case vioblockclose:
break;
case vioblockread:
case vioblockwrite:
viodasd_handle_read_write(bevent);
break;
default:
pr_warning("invalid subtype!");
if (hvlpevent_need_ack(event)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
}
}
}
/*
* Get the driver to reprobe for more disks.
*/
static ssize_t probe_disks(struct device_driver *drv, const char *buf,
size_t count)
{
struct viodasd_device *d;
for (d = viodasd_devices; d < &viodasd_devices[MAX_DISKNO]; d++) {
if (d->disk == NULL)
probe_disk(d);
}
return count;
}
static DRIVER_ATTR(probe, S_IWUSR, NULL, probe_disks);
static int viodasd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct viodasd_device *d = &viodasd_devices[vdev->unit_address];
d->dev = &vdev->dev;
if (!probe_disk(d))
return -ENODEV;
return 0;
}
static int viodasd_remove(struct vio_dev *vdev)
{
struct viodasd_device *d;
d = &viodasd_devices[vdev->unit_address];
if (d->disk) {
del_gendisk(d->disk);
blk_cleanup_queue(d->disk->queue);
put_disk(d->disk);
d->disk = NULL;
}
d->dev = NULL;
return 0;
}
/**
* viodasd_device_table: Used by vio.c to match devices that we
* support.
*/
static struct vio_device_id viodasd_device_table[] __devinitdata = {
{ "block", "IBM,iSeries-viodasd" },
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, viodasd_device_table);
static struct vio_driver viodasd_driver = {
.id_table = viodasd_device_table,
.probe = viodasd_probe,
.remove = viodasd_remove,
.driver = {
.name = "viodasd",
.owner = THIS_MODULE,
}
};
static int need_delete_probe;
/*
* Initialize the whole device driver. Handle module and non-module
* versions
*/
static int __init viodasd_init(void)
{
int rc;
if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
rc = -ENODEV;
goto early_fail;
}
/* Try to open to our host lp */
if (viopath_hostLp == HvLpIndexInvalid)
vio_set_hostlp();
if (viopath_hostLp == HvLpIndexInvalid) {
pr_warning("invalid hosting partition\n");
rc = -EIO;
goto early_fail;
}
pr_info("vers " VIOD_VERS ", hosting partition %d\n", viopath_hostLp);
/* register the block device */
rc = register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
if (rc) {
pr_warning("Unable to get major number %d for %s\n",
VIODASD_MAJOR, VIOD_GENHD_NAME);
goto early_fail;
}
/* Actually open the path to the hosting partition */
rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio,
VIOMAXREQ + 2);
if (rc) {
pr_warning("error opening path to host partition %d\n",
viopath_hostLp);
goto unregister_blk;
}
/* Initialize our request handler */
vio_setHandler(viomajorsubtype_blockio, handle_block_event);
rc = vio_register_driver(&viodasd_driver);
if (rc) {
pr_warning("vio_register_driver failed\n");
goto unset_handler;
}
/*
* If this call fails, it just means that we cannot dynamically
* add virtual disks, but the driver will still work fine for
* all existing disk, so ignore the failure.
*/
if (!driver_create_file(&viodasd_driver.driver, &driver_attr_probe))
need_delete_probe = 1;
return 0;
unset_handler:
vio_clearHandler(viomajorsubtype_blockio);
viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
unregister_blk:
unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
early_fail:
return rc;
}
module_init(viodasd_init);
void __exit viodasd_exit(void)
{
if (need_delete_probe)
driver_remove_file(&viodasd_driver.driver, &driver_attr_probe);
vio_unregister_driver(&viodasd_driver);
vio_clearHandler(viomajorsubtype_blockio);
viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
}
module_exit(viodasd_exit);
/* -*- linux-c -*-
* drivers/cdrom/viocd.c
*
* iSeries Virtual CD Rom
*
* Authors: Dave Boutcher <boutcher@us.ibm.com>
* Ryan Arnold <ryanarn@us.ibm.com>
* Colin Devilbiss <devilbis@us.ibm.com>
* Stephen Rothwell
*
* (C) Copyright 2000-2004 IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) anyu later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* This routine provides access to CD ROM drives owned and managed by an
* OS/400 partition running on the same box as this Linux partition.
*
* All operations are performed by sending messages back and forth to
* the OS/400 partition.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/cdrom.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/scatterlist.h>
#include <asm/vio.h>
#include <asm/iseries/hv_types.h>
#include <asm/iseries/hv_lp_event.h>
#include <asm/iseries/vio.h>
#include <asm/firmware.h>
#define VIOCD_DEVICE "iseries/vcd"
#define VIOCD_VERS "1.06"
/*
* Should probably make this a module parameter....sigh
*/
#define VIOCD_MAX_CD HVMAXARCHITECTEDVIRTUALCDROMS
static DEFINE_MUTEX(viocd_mutex);
static const struct vio_error_entry viocd_err_table[] = {
{0x0201, EINVAL, "Invalid Range"},
{0x0202, EINVAL, "Invalid Token"},
{0x0203, EIO, "DMA Error"},
{0x0204, EIO, "Use Error"},
{0x0205, EIO, "Release Error"},
{0x0206, EINVAL, "Invalid CD"},
{0x020C, EROFS, "Read Only Device"},
{0x020D, ENOMEDIUM, "Changed or Missing Volume (or Varied Off?)"},
{0x020E, EIO, "Optical System Error (Varied Off?)"},
{0x02FF, EIO, "Internal Error"},
{0x3010, EIO, "Changed Volume"},
{0xC100, EIO, "Optical System Error"},
{0x0000, 0, NULL},
};
/*
* This is the structure we use to exchange info between driver and interrupt
* handler
*/
struct viocd_waitevent {
struct completion com;
int rc;
u16 sub_result;
int changed;
};
/* this is a lookup table for the true capabilities of a device */
struct capability_entry {
char *type;
int capability;
};
static struct capability_entry capability_table[] __initdata = {
{ "6330", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
{ "6331", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
{ "6333", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
{ "632A", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
{ "6321", CDC_LOCK },
{ "632B", 0 },
{ NULL , CDC_LOCK },
};
/* These are our internal structures for keeping track of devices */
static int viocd_numdev;
struct disk_info {
struct gendisk *viocd_disk;
struct cdrom_device_info viocd_info;
struct device *dev;
const char *rsrcname;
const char *type;
const char *model;
};
static struct disk_info viocd_diskinfo[VIOCD_MAX_CD];
#define DEVICE_NR(di) ((di) - &viocd_diskinfo[0])
static spinlock_t viocd_reqlock;
#define MAX_CD_REQ 1
/* procfs support */
static int proc_viocd_show(struct seq_file *m, void *v)
{
int i;
for (i = 0; i < viocd_numdev; i++) {
seq_printf(m, "viocd device %d is iSeries resource %10.10s"
"type %4.4s, model %3.3s\n",
i, viocd_diskinfo[i].rsrcname,
viocd_diskinfo[i].type,
viocd_diskinfo[i].model);
}
return 0;
}
static int proc_viocd_open(struct inode *inode, struct file *file)
{
return single_open(file, proc_viocd_show, NULL);
}
static const struct file_operations proc_viocd_operations = {
.owner = THIS_MODULE,
.open = proc_viocd_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int viocd_blk_open(struct block_device *bdev, fmode_t mode)
{
struct disk_info *di = bdev->bd_disk->private_data;
int ret;
mutex_lock(&viocd_mutex);
ret = cdrom_open(&di->viocd_info, bdev, mode);
mutex_unlock(&viocd_mutex);
return ret;
}
static int viocd_blk_release(struct gendisk *disk, fmode_t mode)
{
struct disk_info *di = disk->private_data;
mutex_lock(&viocd_mutex);
cdrom_release(&di->viocd_info, mode);
mutex_unlock(&viocd_mutex);
return 0;
}
static int viocd_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
struct disk_info *di = bdev->bd_disk->private_data;
int ret;
mutex_lock(&viocd_mutex);
ret = cdrom_ioctl(&di->viocd_info, bdev, mode, cmd, arg);
mutex_unlock(&viocd_mutex);
return ret;
}
static unsigned int viocd_blk_check_events(struct gendisk *disk,
unsigned int clearing)
{
struct disk_info *di = disk->private_data;
return cdrom_check_events(&di->viocd_info, clearing);
}
static const struct block_device_operations viocd_fops = {
.owner = THIS_MODULE,
.open = viocd_blk_open,
.release = viocd_blk_release,
.ioctl = viocd_blk_ioctl,
.check_events = viocd_blk_check_events,
};
static int viocd_open(struct cdrom_device_info *cdi, int purpose)
{
struct disk_info *diskinfo = cdi->handle;
int device_no = DEVICE_NR(diskinfo);
HvLpEvent_Rc hvrc;
struct viocd_waitevent we;
init_completion(&we.com);
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_cdio | viocdopen,
HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
0, 0, 0);
if (hvrc != 0) {
pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
(int)hvrc);
return -EIO;
}
wait_for_completion(&we.com);
if (we.rc) {
const struct vio_error_entry *err =
vio_lookup_rc(viocd_err_table, we.sub_result);
pr_warning("bad rc %d:0x%04X on open: %s\n",
we.rc, we.sub_result, err->msg);
return -err->errno;
}
return 0;
}
static void viocd_release(struct cdrom_device_info *cdi)
{
int device_no = DEVICE_NR((struct disk_info *)cdi->handle);
HvLpEvent_Rc hvrc;
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_cdio | viocdclose,
HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp), 0,
VIOVERSION << 16, ((u64)device_no << 48), 0, 0, 0);
if (hvrc != 0)
pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
(int)hvrc);
}
/* Send a read or write request to OS/400 */
static int send_request(struct request *req)
{
HvLpEvent_Rc hvrc;
struct disk_info *diskinfo = req->rq_disk->private_data;
u64 len;
dma_addr_t dmaaddr;
int direction;
u16 cmd;
struct scatterlist sg;
BUG_ON(req->nr_phys_segments > 1);
if (rq_data_dir(req) == READ) {
direction = DMA_FROM_DEVICE;
cmd = viomajorsubtype_cdio | viocdread;
} else {
direction = DMA_TO_DEVICE;
cmd = viomajorsubtype_cdio | viocdwrite;
}
sg_init_table(&sg, 1);
if (blk_rq_map_sg(req->q, req, &sg) == 0) {
pr_warning("error setting up scatter/gather list\n");
return -1;
}
if (dma_map_sg(diskinfo->dev, &sg, 1, direction) == 0) {
pr_warning("error allocating sg tce\n");
return -1;
}
dmaaddr = sg_dma_address(&sg);
len = sg_dma_len(&sg);
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo, cmd,
HvLpEvent_AckInd_DoAck,
HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)req, VIOVERSION << 16,
((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
(u64)blk_rq_pos(req) * 512, len, 0);
if (hvrc != HvLpEvent_Rc_Good) {
pr_warning("hv error on op %d\n", (int)hvrc);
return -1;
}
return 0;
}
static int rwreq;
static void do_viocd_request(struct request_queue *q)
{
struct request *req;
while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
if (req->cmd_type != REQ_TYPE_FS)
__blk_end_request_all(req, -EIO);
else if (send_request(req) < 0) {
pr_warning("unable to send message to OS/400!\n");
__blk_end_request_all(req, -EIO);
} else
rwreq++;
}
}
static unsigned int viocd_check_events(struct cdrom_device_info *cdi,
unsigned int clearing, int disc_nr)
{
struct viocd_waitevent we;
HvLpEvent_Rc hvrc;
int device_no = DEVICE_NR((struct disk_info *)cdi->handle);
init_completion(&we.com);
/* Send the open event to OS/400 */
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_cdio | viocdcheck,
HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
0, 0, 0);
if (hvrc != 0) {
pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
(int)hvrc);
return 0;
}
wait_for_completion(&we.com);
/* Check the return code. If bad, assume no change */
if (we.rc) {
const struct vio_error_entry *err =
vio_lookup_rc(viocd_err_table, we.sub_result);
pr_warning("bad rc %d:0x%04X on check_change: %s; Assuming no change\n",
we.rc, we.sub_result, err->msg);
return 0;
}
return we.changed ? DISK_EVENT_MEDIA_CHANGE : 0;
}
static int viocd_lock_door(struct cdrom_device_info *cdi, int locking)
{
HvLpEvent_Rc hvrc;
u64 device_no = DEVICE_NR((struct disk_info *)cdi->handle);
/* NOTE: flags is 1 or 0 so it won't overwrite the device_no */
u64 flags = !!locking;
struct viocd_waitevent we;
init_completion(&we.com);
/* Send the lockdoor event to OS/400 */
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_cdio | viocdlockdoor,
HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)&we, VIOVERSION << 16,
(device_no << 48) | (flags << 32), 0, 0, 0);
if (hvrc != 0) {
pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
(int)hvrc);
return -EIO;
}
wait_for_completion(&we.com);
if (we.rc != 0)
return -EIO;
return 0;
}
static int viocd_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
unsigned int buflen = cgc->buflen;
int ret = -EIO;
switch (cgc->cmd[0]) {
case GPCMD_READ_DISC_INFO:
{
disc_information *di = (disc_information *)cgc->buffer;
if (buflen >= 2) {
di->disc_information_length = cpu_to_be16(1);
ret = 0;
}
if (buflen >= 3)
di->erasable =
(cdi->ops->capability & ~cdi->mask
& (CDC_DVD_RAM | CDC_RAM)) != 0;
}
break;
case GPCMD_GET_CONFIGURATION:
if (cgc->cmd[3] == CDF_RWRT) {
struct rwrt_feature_desc *rfd = (struct rwrt_feature_desc *)(cgc->buffer + sizeof(struct feature_header));
if ((buflen >=
(sizeof(struct feature_header) + sizeof(*rfd))) &&
(cdi->ops->capability & ~cdi->mask
& (CDC_DVD_RAM | CDC_RAM))) {
rfd->feature_code = cpu_to_be16(CDF_RWRT);
rfd->curr = 1;
ret = 0;
}
}
break;
default:
if (cgc->sense) {
/* indicate Unknown code */
cgc->sense->sense_key = 0x05;
cgc->sense->asc = 0x20;
cgc->sense->ascq = 0x00;
}
break;
}
cgc->stat = ret;
return ret;
}
static void restart_all_queues(int first_index)
{
int i;
for (i = first_index + 1; i < viocd_numdev; i++)
if (viocd_diskinfo[i].viocd_disk)
blk_run_queue(viocd_diskinfo[i].viocd_disk->queue);
for (i = 0; i <= first_index; i++)
if (viocd_diskinfo[i].viocd_disk)
blk_run_queue(viocd_diskinfo[i].viocd_disk->queue);
}
/* This routine handles incoming CD LP events */
static void vio_handle_cd_event(struct HvLpEvent *event)
{
struct viocdlpevent *bevent;
struct viocd_waitevent *pwe;
struct disk_info *di;
unsigned long flags;
struct request *req;
if (event == NULL)
/* Notification that a partition went away! */
return;
/* First, we should NEVER get an int here...only acks */
if (hvlpevent_is_int(event)) {
pr_warning("Yikes! got an int in viocd event handler!\n");
if (hvlpevent_need_ack(event)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
}
}
bevent = (struct viocdlpevent *)event;
switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
case viocdopen:
if (event->xRc == 0) {
di = &viocd_diskinfo[bevent->disk];
blk_queue_logical_block_size(di->viocd_disk->queue,
bevent->block_size);
set_capacity(di->viocd_disk,
bevent->media_size *
bevent->block_size / 512);
}
/* FALLTHROUGH !! */
case viocdlockdoor:
pwe = (struct viocd_waitevent *)event->xCorrelationToken;
return_complete:
pwe->rc = event->xRc;
pwe->sub_result = bevent->sub_result;
complete(&pwe->com);
break;
case viocdcheck:
pwe = (struct viocd_waitevent *)event->xCorrelationToken;
pwe->changed = bevent->flags;
goto return_complete;
case viocdclose:
break;
case viocdwrite:
case viocdread:
/*
* Since this is running in interrupt mode, we need to
* make sure we're not stepping on any global I/O operations
*/
di = &viocd_diskinfo[bevent->disk];
spin_lock_irqsave(&viocd_reqlock, flags);
dma_unmap_single(di->dev, bevent->token, bevent->len,
((event->xSubtype & VIOMINOR_SUBTYPE_MASK) == viocdread)
? DMA_FROM_DEVICE : DMA_TO_DEVICE);
req = (struct request *)bevent->event.xCorrelationToken;
rwreq--;
if (event->xRc != HvLpEvent_Rc_Good) {
const struct vio_error_entry *err =
vio_lookup_rc(viocd_err_table,
bevent->sub_result);
pr_warning("request %p failed with rc %d:0x%04X: %s\n",
req, event->xRc,
bevent->sub_result, err->msg);
__blk_end_request_all(req, -EIO);
} else
__blk_end_request_all(req, 0);
/* restart handling of incoming requests */
spin_unlock_irqrestore(&viocd_reqlock, flags);
restart_all_queues(bevent->disk);
break;
default:
pr_warning("message with invalid subtype %0x04X!\n",
event->xSubtype & VIOMINOR_SUBTYPE_MASK);
if (hvlpevent_need_ack(event)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
}
}
}
static int viocd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
void *arg)
{
return -EINVAL;
}
static struct cdrom_device_ops viocd_dops = {
.open = viocd_open,
.release = viocd_release,
.check_events = viocd_check_events,
.lock_door = viocd_lock_door,
.generic_packet = viocd_packet,
.audio_ioctl = viocd_audio_ioctl,
.capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM
};
static int find_capability(const char *type)
{
struct capability_entry *entry;
for(entry = capability_table; entry->type; ++entry)
if(!strncmp(entry->type, type, 4))
break;
return entry->capability;
}
static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct gendisk *gendisk;
int deviceno;
struct disk_info *d;
struct cdrom_device_info *c;
struct request_queue *q;
struct device_node *node = vdev->dev.of_node;
deviceno = vdev->unit_address;
if (deviceno >= VIOCD_MAX_CD)
return -ENODEV;
if (!node)
return -ENODEV;
if (deviceno >= viocd_numdev)
viocd_numdev = deviceno + 1;
d = &viocd_diskinfo[deviceno];
d->rsrcname = of_get_property(node, "linux,vio_rsrcname", NULL);
d->type = of_get_property(node, "linux,vio_type", NULL);
d->model = of_get_property(node, "linux,vio_model", NULL);
c = &d->viocd_info;
c->ops = &viocd_dops;
c->speed = 4;
c->capacity = 1;
c->handle = d;
c->mask = ~find_capability(d->type);
sprintf(c->name, VIOCD_DEVICE "%c", 'a' + deviceno);
if (register_cdrom(c) != 0) {
pr_warning("Cannot register viocd CD-ROM %s!\n", c->name);
goto out;
}
pr_info("cd %s is iSeries resource %10.10s type %4.4s, model %3.3s\n",
c->name, d->rsrcname, d->type, d->model);
q = blk_init_queue(do_viocd_request, &viocd_reqlock);
if (q == NULL) {
pr_warning("Cannot allocate queue for %s!\n", c->name);
goto out_unregister_cdrom;
}
gendisk = alloc_disk(1);
if (gendisk == NULL) {
pr_warning("Cannot create gendisk for %s!\n", c->name);
goto out_cleanup_queue;
}
gendisk->major = VIOCD_MAJOR;
gendisk->first_minor = deviceno;
strncpy(gendisk->disk_name, c->name,
sizeof(gendisk->disk_name));
blk_queue_max_segments(q, 1);
blk_queue_max_hw_sectors(q, 4096 / 512);
gendisk->queue = q;
gendisk->fops = &viocd_fops;
gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
set_capacity(gendisk, 0);
gendisk->private_data = d;
d->viocd_disk = gendisk;
d->dev = &vdev->dev;
gendisk->driverfs_dev = d->dev;
add_disk(gendisk);
return 0;
out_cleanup_queue:
blk_cleanup_queue(q);
out_unregister_cdrom:
unregister_cdrom(c);
out:
return -ENODEV;
}
static int viocd_remove(struct vio_dev *vdev)
{
struct disk_info *d = &viocd_diskinfo[vdev->unit_address];
unregister_cdrom(&d->viocd_info);
del_gendisk(d->viocd_disk);
blk_cleanup_queue(d->viocd_disk->queue);
put_disk(d->viocd_disk);
return 0;
}
/**
* viocd_device_table: Used by vio.c to match devices that we
* support.
*/
static struct vio_device_id viocd_device_table[] __devinitdata = {
{ "block", "IBM,iSeries-viocd" },
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, viocd_device_table);
static struct vio_driver viocd_driver = {
.id_table = viocd_device_table,
.probe = viocd_probe,
.remove = viocd_remove,
.driver = {
.name = "viocd",
.owner = THIS_MODULE,
}
};
static int __init viocd_init(void)
{
int ret = 0;
if (!firmware_has_feature(FW_FEATURE_ISERIES))
return -ENODEV;
if (viopath_hostLp == HvLpIndexInvalid) {
vio_set_hostlp();
/* If we don't have a host, bail out */
if (viopath_hostLp == HvLpIndexInvalid)
return -ENODEV;
}
pr_info("vers " VIOCD_VERS ", hosting partition %d\n", viopath_hostLp);
if (register_blkdev(VIOCD_MAJOR, VIOCD_DEVICE) != 0) {
pr_warning("Unable to get major %d for %s\n",
VIOCD_MAJOR, VIOCD_DEVICE);
return -EIO;
}
ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio,
MAX_CD_REQ + 2);
if (ret) {
pr_warning("error opening path to host partition %d\n",
viopath_hostLp);
goto out_unregister;
}
/* Initialize our request handler */
vio_setHandler(viomajorsubtype_cdio, vio_handle_cd_event);
spin_lock_init(&viocd_reqlock);
ret = vio_register_driver(&viocd_driver);
if (ret)
goto out_free_info;
proc_create("iSeries/viocd", S_IFREG|S_IRUGO, NULL,
&proc_viocd_operations);
return 0;
out_free_info:
vio_clearHandler(viomajorsubtype_cdio);
viopath_close(viopath_hostLp, viomajorsubtype_cdio, MAX_CD_REQ + 2);
out_unregister:
unregister_blkdev(VIOCD_MAJOR, VIOCD_DEVICE);
return ret;
}
static void __exit viocd_exit(void)
{
remove_proc_entry("iSeries/viocd", NULL);
vio_unregister_driver(&viocd_driver);
viopath_close(viopath_hostLp, viomajorsubtype_cdio, MAX_CD_REQ + 2);
vio_clearHandler(viomajorsubtype_cdio);
unregister_blkdev(VIOCD_MAJOR, VIOCD_DEVICE);
}
module_init(viocd_init);
module_exit(viocd_exit);
MODULE_LICENSE("GPL");
/* -*- linux-c -*-
* drivers/char/viotape.c
*
* iSeries Virtual Tape
*
* Authors: Dave Boutcher <boutcher@us.ibm.com>
* Ryan Arnold <ryanarn@us.ibm.com>
* Colin Devilbiss <devilbis@us.ibm.com>
* Stephen Rothwell
*
* (C) Copyright 2000-2004 IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) anyu later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* This routine provides access to tape drives owned and managed by an OS/400
* partition running on the same box as this Linux partition.
*
* All tape operations are performed by sending messages back and forth to
* the OS/400 partition. The format of the messages is defined in
* iseries/vio.h
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/spinlock.h>
#include <linux/mtio.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/major.h>
#include <linux/completion.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <asm/firmware.h>
#include <asm/vio.h>
#include <asm/iseries/vio.h>
#include <asm/iseries/hv_lp_event.h>
#include <asm/iseries/hv_call_event.h>
#include <asm/iseries/hv_lp_config.h>
#define VIOTAPE_VERSION "1.2"
#define VIOTAPE_MAXREQ 1
#define VIOTAPE_KERN_WARN KERN_WARNING "viotape: "
#define VIOTAPE_KERN_INFO KERN_INFO "viotape: "
static DEFINE_MUTEX(proc_viotape_mutex);
static int viotape_numdev;
/*
* The minor number follows the conventions of the SCSI tape drives. The
* rewind and mode are encoded in the minor #. We use this struct to break
* them out
*/
struct viot_devinfo_struct {
int devno;
int mode;
int rewind;
};
#define VIOTAPOP_RESET 0
#define VIOTAPOP_FSF 1
#define VIOTAPOP_BSF 2
#define VIOTAPOP_FSR 3
#define VIOTAPOP_BSR 4
#define VIOTAPOP_WEOF 5
#define VIOTAPOP_REW 6
#define VIOTAPOP_NOP 7
#define VIOTAPOP_EOM 8
#define VIOTAPOP_ERASE 9
#define VIOTAPOP_SETBLK 10
#define VIOTAPOP_SETDENSITY 11
#define VIOTAPOP_SETPOS 12
#define VIOTAPOP_GETPOS 13
#define VIOTAPOP_SETPART 14
#define VIOTAPOP_UNLOAD 15
enum viotaperc {
viotape_InvalidRange = 0x0601,
viotape_InvalidToken = 0x0602,
viotape_DMAError = 0x0603,
viotape_UseError = 0x0604,
viotape_ReleaseError = 0x0605,
viotape_InvalidTape = 0x0606,
viotape_InvalidOp = 0x0607,
viotape_TapeErr = 0x0608,
viotape_AllocTimedOut = 0x0640,
viotape_BOTEnc = 0x0641,
viotape_BlankTape = 0x0642,
viotape_BufferEmpty = 0x0643,
viotape_CleanCartFound = 0x0644,
viotape_CmdNotAllowed = 0x0645,
viotape_CmdNotSupported = 0x0646,
viotape_DataCheck = 0x0647,
viotape_DecompressErr = 0x0648,
viotape_DeviceTimeout = 0x0649,
viotape_DeviceUnavail = 0x064a,
viotape_DeviceBusy = 0x064b,
viotape_EndOfMedia = 0x064c,
viotape_EndOfTape = 0x064d,
viotape_EquipCheck = 0x064e,
viotape_InsufficientRs = 0x064f,
viotape_InvalidLogBlk = 0x0650,
viotape_LengthError = 0x0651,
viotape_LibDoorOpen = 0x0652,
viotape_LoadFailure = 0x0653,
viotape_NotCapable = 0x0654,
viotape_NotOperational = 0x0655,
viotape_NotReady = 0x0656,
viotape_OpCancelled = 0x0657,
viotape_PhyLinkErr = 0x0658,
viotape_RdyNotBOT = 0x0659,
viotape_TapeMark = 0x065a,
viotape_WriteProt = 0x065b
};
static const struct vio_error_entry viotape_err_table[] = {
{ viotape_InvalidRange, EIO, "Internal error" },
{ viotape_InvalidToken, EIO, "Internal error" },
{ viotape_DMAError, EIO, "DMA error" },
{ viotape_UseError, EIO, "Internal error" },
{ viotape_ReleaseError, EIO, "Internal error" },
{ viotape_InvalidTape, EIO, "Invalid tape device" },
{ viotape_InvalidOp, EIO, "Invalid operation" },
{ viotape_TapeErr, EIO, "Tape error" },
{ viotape_AllocTimedOut, EBUSY, "Allocate timed out" },
{ viotape_BOTEnc, EIO, "Beginning of tape encountered" },
{ viotape_BlankTape, EIO, "Blank tape" },
{ viotape_BufferEmpty, EIO, "Buffer empty" },
{ viotape_CleanCartFound, ENOMEDIUM, "Cleaning cartridge found" },
{ viotape_CmdNotAllowed, EIO, "Command not allowed" },
{ viotape_CmdNotSupported, EIO, "Command not supported" },
{ viotape_DataCheck, EIO, "Data check" },
{ viotape_DecompressErr, EIO, "Decompression error" },
{ viotape_DeviceTimeout, EBUSY, "Device timeout" },
{ viotape_DeviceUnavail, EIO, "Device unavailable" },
{ viotape_DeviceBusy, EBUSY, "Device busy" },
{ viotape_EndOfMedia, ENOSPC, "End of media" },
{ viotape_EndOfTape, ENOSPC, "End of tape" },
{ viotape_EquipCheck, EIO, "Equipment check" },
{ viotape_InsufficientRs, EOVERFLOW, "Insufficient tape resources" },
{ viotape_InvalidLogBlk, EIO, "Invalid logical block location" },
{ viotape_LengthError, EOVERFLOW, "Length error" },
{ viotape_LibDoorOpen, EBUSY, "Door open" },
{ viotape_LoadFailure, ENOMEDIUM, "Load failure" },
{ viotape_NotCapable, EIO, "Not capable" },
{ viotape_NotOperational, EIO, "Not operational" },
{ viotape_NotReady, EIO, "Not ready" },
{ viotape_OpCancelled, EIO, "Operation cancelled" },
{ viotape_PhyLinkErr, EIO, "Physical link error" },
{ viotape_RdyNotBOT, EIO, "Ready but not beginning of tape" },
{ viotape_TapeMark, EIO, "Tape mark" },
{ viotape_WriteProt, EROFS, "Write protection error" },
{ 0, 0, NULL },
};
/* Maximum number of tapes we support */
#define VIOTAPE_MAX_TAPE HVMAXARCHITECTEDVIRTUALTAPES
#define MAX_PARTITIONS 4
/* defines for current tape state */
#define VIOT_IDLE 0
#define VIOT_READING 1
#define VIOT_WRITING 2
/* Our info on the tapes */
static struct {
const char *rsrcname;
const char *type;
const char *model;
} viotape_unitinfo[VIOTAPE_MAX_TAPE];
static struct mtget viomtget[VIOTAPE_MAX_TAPE];
static struct class *tape_class;
static struct device *tape_device[VIOTAPE_MAX_TAPE];
/*
* maintain the current state of each tape (and partition)
* so that we know when to write EOF marks.
*/
static struct {
unsigned char cur_part;
unsigned char part_stat_rwi[MAX_PARTITIONS];
} state[VIOTAPE_MAX_TAPE];
/* We single-thread */
static struct semaphore reqSem;
/*
* When we send a request, we use this struct to get the response back
* from the interrupt handler
*/
struct op_struct {
void *buffer;
dma_addr_t dmaaddr;
size_t count;
int rc;
int non_blocking;
struct completion com;
struct device *dev;
struct op_struct *next;
};
static spinlock_t op_struct_list_lock;
static struct op_struct *op_struct_list;
/* forward declaration to resolve interdependence */
static int chg_state(int index, unsigned char new_state, struct file *file);
/* procfs support */
static int proc_viotape_show(struct seq_file *m, void *v)
{
int i;
seq_printf(m, "viotape driver version " VIOTAPE_VERSION "\n");
for (i = 0; i < viotape_numdev; i++) {
seq_printf(m, "viotape device %d is iSeries resource %10.10s"
"type %4.4s, model %3.3s\n",
i, viotape_unitinfo[i].rsrcname,
viotape_unitinfo[i].type,
viotape_unitinfo[i].model);
}
return 0;
}
static int proc_viotape_open(struct inode *inode, struct file *file)
{
return single_open(file, proc_viotape_show, NULL);
}
static const struct file_operations proc_viotape_operations = {
.owner = THIS_MODULE,
.open = proc_viotape_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* Decode the device minor number into its parts */
void get_dev_info(struct inode *ino, struct viot_devinfo_struct *devi)
{
devi->devno = iminor(ino) & 0x1F;
devi->mode = (iminor(ino) & 0x60) >> 5;
/* if bit is set in the minor, do _not_ rewind automatically */
devi->rewind = (iminor(ino) & 0x80) == 0;
}
/* This is called only from the exit and init paths, so no need for locking */
static void clear_op_struct_pool(void)
{
while (op_struct_list) {
struct op_struct *toFree = op_struct_list;
op_struct_list = op_struct_list->next;
kfree(toFree);
}
}
/* Likewise, this is only called from the init path */
static int add_op_structs(int structs)
{
int i;
for (i = 0; i < structs; ++i) {
struct op_struct *new_struct =
kmalloc(sizeof(*new_struct), GFP_KERNEL);
if (!new_struct) {
clear_op_struct_pool();
return -ENOMEM;
}
new_struct->next = op_struct_list;
op_struct_list = new_struct;
}
return 0;
}
/* Allocate an op structure from our pool */
static struct op_struct *get_op_struct(void)
{
struct op_struct *retval;
unsigned long flags;
spin_lock_irqsave(&op_struct_list_lock, flags);
retval = op_struct_list;
if (retval)
op_struct_list = retval->next;
spin_unlock_irqrestore(&op_struct_list_lock, flags);
if (retval) {
memset(retval, 0, sizeof(*retval));
init_completion(&retval->com);
}
return retval;
}
/* Return an op structure to our pool */
static void free_op_struct(struct op_struct *op_struct)
{
unsigned long flags;
spin_lock_irqsave(&op_struct_list_lock, flags);
op_struct->next = op_struct_list;
op_struct_list = op_struct;
spin_unlock_irqrestore(&op_struct_list_lock, flags);
}
/* Map our tape return codes to errno values */
int tape_rc_to_errno(int tape_rc, char *operation, int tapeno)
{
const struct vio_error_entry *err;
if (tape_rc == 0)
return 0;
err = vio_lookup_rc(viotape_err_table, tape_rc);
printk(VIOTAPE_KERN_WARN "error(%s) 0x%04x on Device %d (%-10s): %s\n",
operation, tape_rc, tapeno,
viotape_unitinfo[tapeno].rsrcname, err->msg);
return -err->errno;
}
/* Write */
static ssize_t viotap_write(struct file *file, const char *buf,
size_t count, loff_t * ppos)
{
HvLpEvent_Rc hvrc;
unsigned short flags = file->f_flags;
int noblock = ((flags & O_NONBLOCK) != 0);
ssize_t ret;
struct viot_devinfo_struct devi;
struct op_struct *op = get_op_struct();
if (op == NULL)
return -ENOMEM;
get_dev_info(file->f_path.dentry->d_inode, &devi);
/*
* We need to make sure we can send a request. We use
* a semaphore to keep track of # requests in use. If
* we are non-blocking, make sure we don't block on the
* semaphore
*/
if (noblock) {
if (down_trylock(&reqSem)) {
ret = -EWOULDBLOCK;
goto free_op;
}
} else
down(&reqSem);
/* Allocate a DMA buffer */
op->dev = tape_device[devi.devno];
op->buffer = dma_alloc_coherent(op->dev, count, &op->dmaaddr,
GFP_ATOMIC);
if (op->buffer == NULL) {
printk(VIOTAPE_KERN_WARN
"error allocating dma buffer for len %ld\n",
count);
ret = -EFAULT;
goto up_sem;
}
/* Copy the data into the buffer */
if (copy_from_user(op->buffer, buf, count)) {
printk(VIOTAPE_KERN_WARN "tape: error on copy from user\n");
ret = -EFAULT;
goto free_dma;
}
op->non_blocking = noblock;
init_completion(&op->com);
op->count = count;
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_tape | viotapewrite,
HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)(unsigned long)op, VIOVERSION << 16,
((u64)devi.devno << 48) | op->dmaaddr, count, 0, 0);
if (hvrc != HvLpEvent_Rc_Good) {
printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
(int)hvrc);
ret = -EIO;
goto free_dma;
}
if (noblock)
return count;
wait_for_completion(&op->com);
if (op->rc)
ret = tape_rc_to_errno(op->rc, "write", devi.devno);
else {
chg_state(devi.devno, VIOT_WRITING, file);
ret = op->count;
}
free_dma:
dma_free_coherent(op->dev, count, op->buffer, op->dmaaddr);
up_sem:
up(&reqSem);
free_op:
free_op_struct(op);
return ret;
}
/* read */
static ssize_t viotap_read(struct file *file, char *buf, size_t count,
loff_t *ptr)
{
HvLpEvent_Rc hvrc;
unsigned short flags = file->f_flags;
struct op_struct *op = get_op_struct();
int noblock = ((flags & O_NONBLOCK) != 0);
ssize_t ret;
struct viot_devinfo_struct devi;
if (op == NULL)
return -ENOMEM;
get_dev_info(file->f_path.dentry->d_inode, &devi);
/*
* We need to make sure we can send a request. We use
* a semaphore to keep track of # requests in use. If
* we are non-blocking, make sure we don't block on the
* semaphore
*/
if (noblock) {
if (down_trylock(&reqSem)) {
ret = -EWOULDBLOCK;
goto free_op;
}
} else
down(&reqSem);
chg_state(devi.devno, VIOT_READING, file);
/* Allocate a DMA buffer */
op->dev = tape_device[devi.devno];
op->buffer = dma_alloc_coherent(op->dev, count, &op->dmaaddr,
GFP_ATOMIC);
if (op->buffer == NULL) {
ret = -EFAULT;
goto up_sem;
}
op->count = count;
init_completion(&op->com);
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_tape | viotaperead,
HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)(unsigned long)op, VIOVERSION << 16,
((u64)devi.devno << 48) | op->dmaaddr, count, 0, 0);
if (hvrc != HvLpEvent_Rc_Good) {
printk(VIOTAPE_KERN_WARN "tape hv error on op %d\n",
(int)hvrc);
ret = -EIO;
goto free_dma;
}
wait_for_completion(&op->com);
if (op->rc)
ret = tape_rc_to_errno(op->rc, "read", devi.devno);
else {
ret = op->count;
if (ret && copy_to_user(buf, op->buffer, ret)) {
printk(VIOTAPE_KERN_WARN "error on copy_to_user\n");
ret = -EFAULT;
}
}
free_dma:
dma_free_coherent(op->dev, count, op->buffer, op->dmaaddr);
up_sem:
up(&reqSem);
free_op:
free_op_struct(op);
return ret;
}
/* ioctl */
static int viotap_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
HvLpEvent_Rc hvrc;
int ret;
struct viot_devinfo_struct devi;
struct mtop mtc;
u32 myOp;
struct op_struct *op = get_op_struct();
if (op == NULL)
return -ENOMEM;
get_dev_info(file->f_path.dentry->d_inode, &devi);
down(&reqSem);
ret = -EINVAL;
switch (cmd) {
case MTIOCTOP:
ret = -EFAULT;
/*
* inode is null if and only if we (the kernel)
* made the request
*/
if (inode == NULL)
memcpy(&mtc, (void *) arg, sizeof(struct mtop));
else if (copy_from_user((char *)&mtc, (char *)arg,
sizeof(struct mtop)))
goto free_op;
ret = -EIO;
switch (mtc.mt_op) {
case MTRESET:
myOp = VIOTAPOP_RESET;
break;
case MTFSF:
myOp = VIOTAPOP_FSF;
break;
case MTBSF:
myOp = VIOTAPOP_BSF;
break;
case MTFSR:
myOp = VIOTAPOP_FSR;
break;
case MTBSR:
myOp = VIOTAPOP_BSR;
break;
case MTWEOF:
myOp = VIOTAPOP_WEOF;
break;
case MTREW:
myOp = VIOTAPOP_REW;
break;
case MTNOP:
myOp = VIOTAPOP_NOP;
break;
case MTEOM:
myOp = VIOTAPOP_EOM;
break;
case MTERASE:
myOp = VIOTAPOP_ERASE;
break;
case MTSETBLK:
myOp = VIOTAPOP_SETBLK;
break;
case MTSETDENSITY:
myOp = VIOTAPOP_SETDENSITY;
break;
case MTTELL:
myOp = VIOTAPOP_GETPOS;
break;
case MTSEEK:
myOp = VIOTAPOP_SETPOS;
break;
case MTSETPART:
myOp = VIOTAPOP_SETPART;
break;
case MTOFFL:
myOp = VIOTAPOP_UNLOAD;
break;
default:
printk(VIOTAPE_KERN_WARN "MTIOCTOP called "
"with invalid op 0x%x\n", mtc.mt_op);
goto free_op;
}
/*
* if we moved the head, we are no longer
* reading or writing
*/
switch (mtc.mt_op) {
case MTFSF:
case MTBSF:
case MTFSR:
case MTBSR:
case MTTELL:
case MTSEEK:
case MTREW:
chg_state(devi.devno, VIOT_IDLE, file);
}
init_completion(&op->com);
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_tape | viotapeop,
HvLpEvent_AckInd_DoAck,
HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)(unsigned long)op,
VIOVERSION << 16,
((u64)devi.devno << 48), 0,
(((u64)myOp) << 32) | mtc.mt_count, 0);
if (hvrc != HvLpEvent_Rc_Good) {
printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
(int)hvrc);
goto free_op;
}
wait_for_completion(&op->com);
ret = tape_rc_to_errno(op->rc, "tape operation", devi.devno);
goto free_op;
case MTIOCGET:
ret = -EIO;
init_completion(&op->com);
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_tape | viotapegetstatus,
HvLpEvent_AckInd_DoAck,
HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)(unsigned long)op, VIOVERSION << 16,
((u64)devi.devno << 48), 0, 0, 0);
if (hvrc != HvLpEvent_Rc_Good) {
printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
(int)hvrc);
goto free_op;
}
wait_for_completion(&op->com);
/* Operation is complete - grab the error code */
ret = tape_rc_to_errno(op->rc, "get status", devi.devno);
free_op_struct(op);
up(&reqSem);
if ((ret == 0) && copy_to_user((void *)arg,
&viomtget[devi.devno],
sizeof(viomtget[0])))
ret = -EFAULT;
return ret;
case MTIOCPOS:
printk(VIOTAPE_KERN_WARN "Got an (unsupported) MTIOCPOS\n");
break;
default:
printk(VIOTAPE_KERN_WARN "got an unsupported ioctl 0x%0x\n",
cmd);
break;
}
free_op:
free_op_struct(op);
up(&reqSem);
return ret;
}
static long viotap_unlocked_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
long rc;
mutex_lock(&proc_viotape_mutex);
rc = viotap_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
mutex_unlock(&proc_viotape_mutex);
return rc;
}
static int viotap_open(struct inode *inode, struct file *file)
{
HvLpEvent_Rc hvrc;
struct viot_devinfo_struct devi;
int ret;
struct op_struct *op = get_op_struct();
if (op == NULL)
return -ENOMEM;
mutex_lock(&proc_viotape_mutex);
get_dev_info(file->f_path.dentry->d_inode, &devi);
/* Note: We currently only support one mode! */
if ((devi.devno >= viotape_numdev) || (devi.mode)) {
ret = -ENODEV;
goto free_op;
}
init_completion(&op->com);
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_tape | viotapeopen,
HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)(unsigned long)op, VIOVERSION << 16,
((u64)devi.devno << 48), 0, 0, 0);
if (hvrc != 0) {
printk(VIOTAPE_KERN_WARN "bad rc on signalLpEvent %d\n",
(int) hvrc);
ret = -EIO;
goto free_op;
}
wait_for_completion(&op->com);
ret = tape_rc_to_errno(op->rc, "open", devi.devno);
free_op:
free_op_struct(op);
mutex_unlock(&proc_viotape_mutex);
return ret;
}
static int viotap_release(struct inode *inode, struct file *file)
{
HvLpEvent_Rc hvrc;
struct viot_devinfo_struct devi;
int ret = 0;
struct op_struct *op = get_op_struct();
if (op == NULL)
return -ENOMEM;
init_completion(&op->com);
get_dev_info(file->f_path.dentry->d_inode, &devi);
if (devi.devno >= viotape_numdev) {
ret = -ENODEV;
goto free_op;
}
chg_state(devi.devno, VIOT_IDLE, file);
if (devi.rewind) {
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_tape | viotapeop,
HvLpEvent_AckInd_DoAck,
HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)(unsigned long)op, VIOVERSION << 16,
((u64)devi.devno << 48), 0,
((u64)VIOTAPOP_REW) << 32, 0);
wait_for_completion(&op->com);
tape_rc_to_errno(op->rc, "rewind", devi.devno);
}
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
HvLpEvent_Type_VirtualIo,
viomajorsubtype_tape | viotapeclose,
HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)(unsigned long)op, VIOVERSION << 16,
((u64)devi.devno << 48), 0, 0, 0);
if (hvrc != 0) {
printk(VIOTAPE_KERN_WARN "bad rc on signalLpEvent %d\n",
(int) hvrc);
ret = -EIO;
goto free_op;
}
wait_for_completion(&op->com);
if (op->rc)
printk(VIOTAPE_KERN_WARN "close failed\n");
free_op:
free_op_struct(op);
return ret;
}
const struct file_operations viotap_fops = {
.owner = THIS_MODULE,
.read = viotap_read,
.write = viotap_write,
.unlocked_ioctl = viotap_unlocked_ioctl,
.open = viotap_open,
.release = viotap_release,
.llseek = noop_llseek,
};
/* Handle interrupt events for tape */
static void vioHandleTapeEvent(struct HvLpEvent *event)
{
int tapeminor;
struct op_struct *op;
struct viotapelpevent *tevent = (struct viotapelpevent *)event;
if (event == NULL) {
/* Notification that a partition went away! */
if (!viopath_isactive(viopath_hostLp)) {
/* TODO! Clean up */
}
return;
}
tapeminor = event->xSubtype & VIOMINOR_SUBTYPE_MASK;
op = (struct op_struct *)event->xCorrelationToken;
switch (tapeminor) {
case viotapeopen:
case viotapeclose:
op->rc = tevent->sub_type_result;
complete(&op->com);
break;
case viotaperead:
op->rc = tevent->sub_type_result;
op->count = tevent->len;
complete(&op->com);
break;
case viotapewrite:
if (op->non_blocking) {
dma_free_coherent(op->dev, op->count,
op->buffer, op->dmaaddr);
free_op_struct(op);
up(&reqSem);
} else {
op->rc = tevent->sub_type_result;
op->count = tevent->len;
complete(&op->com);
}
break;
case viotapeop:
case viotapegetpos:
case viotapesetpos:
case viotapegetstatus:
if (op) {
op->count = tevent->u.op.count;
op->rc = tevent->sub_type_result;
if (!op->non_blocking)
complete(&op->com);
}
break;
default:
printk(VIOTAPE_KERN_WARN "weird ack\n");
}
}
static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
int i = vdev->unit_address;
int j;
struct device_node *node = vdev->dev.of_node;
if (i >= VIOTAPE_MAX_TAPE)
return -ENODEV;
if (!node)
return -ENODEV;
if (i >= viotape_numdev)
viotape_numdev = i + 1;
tape_device[i] = &vdev->dev;
viotape_unitinfo[i].rsrcname = of_get_property(node,
"linux,vio_rsrcname", NULL);
viotape_unitinfo[i].type = of_get_property(node, "linux,vio_type",
NULL);
viotape_unitinfo[i].model = of_get_property(node, "linux,vio_model",
NULL);
state[i].cur_part = 0;
for (j = 0; j < MAX_PARTITIONS; ++j)
state[i].part_stat_rwi[j] = VIOT_IDLE;
device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i), NULL,
"iseries!vt%d", i);
device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i | 0x80), NULL,
"iseries!nvt%d", i);
printk(VIOTAPE_KERN_INFO "tape iseries/vt%d is iSeries "
"resource %10.10s type %4.4s, model %3.3s\n",
i, viotape_unitinfo[i].rsrcname,
viotape_unitinfo[i].type, viotape_unitinfo[i].model);
return 0;
}
static int viotape_remove(struct vio_dev *vdev)
{
int i = vdev->unit_address;
device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i | 0x80));
device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i));
return 0;
}
/**
* viotape_device_table: Used by vio.c to match devices that we
* support.
*/
static struct vio_device_id viotape_device_table[] __devinitdata = {
{ "byte", "IBM,iSeries-viotape" },
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, viotape_device_table);
static struct vio_driver viotape_driver = {
.id_table = viotape_device_table,
.probe = viotape_probe,
.remove = viotape_remove,
.driver = {
.name = "viotape",
.owner = THIS_MODULE,
}
};
int __init viotap_init(void)
{
int ret;
if (!firmware_has_feature(FW_FEATURE_ISERIES))
return -ENODEV;
op_struct_list = NULL;
if ((ret = add_op_structs(VIOTAPE_MAXREQ)) < 0) {
printk(VIOTAPE_KERN_WARN "couldn't allocate op structs\n");
return ret;
}
spin_lock_init(&op_struct_list_lock);
sema_init(&reqSem, VIOTAPE_MAXREQ);
if (viopath_hostLp == HvLpIndexInvalid) {
vio_set_hostlp();
if (viopath_hostLp == HvLpIndexInvalid) {
ret = -ENODEV;
goto clear_op;
}
}
ret = viopath_open(viopath_hostLp, viomajorsubtype_tape,
VIOTAPE_MAXREQ + 2);
if (ret) {
printk(VIOTAPE_KERN_WARN
"error on viopath_open to hostlp %d\n", ret);
ret = -EIO;
goto clear_op;
}
printk(VIOTAPE_KERN_INFO "vers " VIOTAPE_VERSION
", hosting partition %d\n", viopath_hostLp);
vio_setHandler(viomajorsubtype_tape, vioHandleTapeEvent);
ret = register_chrdev(VIOTAPE_MAJOR, "viotape", &viotap_fops);
if (ret < 0) {
printk(VIOTAPE_KERN_WARN "Error registering viotape device\n");
goto clear_handler;
}
tape_class = class_create(THIS_MODULE, "tape");
if (IS_ERR(tape_class)) {
printk(VIOTAPE_KERN_WARN "Unable to allocat class\n");
ret = PTR_ERR(tape_class);
goto unreg_chrdev;
}
ret = vio_register_driver(&viotape_driver);
if (ret)
goto unreg_class;
proc_create("iSeries/viotape", S_IFREG|S_IRUGO, NULL,
&proc_viotape_operations);
return 0;
unreg_class:
class_destroy(tape_class);
unreg_chrdev:
unregister_chrdev(VIOTAPE_MAJOR, "viotape");
clear_handler:
vio_clearHandler(viomajorsubtype_tape);
viopath_close(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2);
clear_op:
clear_op_struct_pool();
return ret;
}
/* Give a new state to the tape object */
static int chg_state(int index, unsigned char new_state, struct file *file)
{
unsigned char *cur_state =
&state[index].part_stat_rwi[state[index].cur_part];
int rc = 0;
/* if the same state, don't bother */
if (*cur_state == new_state)
return 0;
/* write an EOF if changing from writing to some other state */
if (*cur_state == VIOT_WRITING) {
struct mtop write_eof = { MTWEOF, 1 };
rc = viotap_ioctl(NULL, file, MTIOCTOP,
(unsigned long)&write_eof);
}
*cur_state = new_state;
return rc;
}
/* Cleanup */
static void __exit viotap_exit(void)
{
remove_proc_entry("iSeries/viotape", NULL);
vio_unregister_driver(&viotape_driver);
class_destroy(tape_class);
unregister_chrdev(VIOTAPE_MAJOR, "viotape");
viopath_close(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2);
vio_clearHandler(viomajorsubtype_tape);
clear_op_struct_pool();
}
MODULE_LICENSE("GPL");
module_init(viotap_init);
module_exit(viotap_exit);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册