提交 d7d12ef2 编写于 作者: P Peter Oberparleiter 提交者: Martin Schwidefsky

[S390] cio: make steal lock procedure more robust

An Unconditional Reserve + Release operation (steal lock) for a
boxed device may fail when encountering special error cases
(e.g. unit checks or path errors). Fix this by using the more
robust ccw_request infrastructure for performing the steal lock
CCW program.
Signed-off-by: NPeter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: NMartin Schwidefsky <schwidefsky@de.ibm.com>
上级 52ef0608
......@@ -28,6 +28,7 @@ enum dev_state {
DEV_STATE_DISCONNECTED_SENSE_ID,
DEV_STATE_CMFCHANGE,
DEV_STATE_CMFUPDATE,
DEV_STATE_STEAL_LOCK,
/* last element! */
NR_DEV_STATES
};
......@@ -116,6 +117,9 @@ void ccw_device_verify_done(struct ccw_device *, int);
void ccw_device_disband_start(struct ccw_device *);
void ccw_device_disband_done(struct ccw_device *, int);
void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *);
void ccw_device_stlck_done(struct ccw_device *, void *, int);
int ccw_device_call_handler(struct ccw_device *);
int ccw_device_stlck(struct ccw_device *);
......
......@@ -640,6 +640,23 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
ccw_device_verify_start(cdev);
}
/*
* Handle path verification event in boxed state.
*/
static void ccw_device_boxed_verify(struct ccw_device *cdev,
enum dev_event dev_event)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
if (cdev->online) {
if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
else
ccw_device_online_verify(cdev, dev_event);
} else
css_schedule_eval(sch->schid);
}
/*
* Got an interrupt for a normal io (state online).
*/
......@@ -816,32 +833,6 @@ ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
cdev->private->flags.doverify = 1;
}
static void
ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
{
struct irb *irb;
switch (dev_event) {
case DEV_EVENT_INTERRUPT:
irb = (struct irb *) __LC_IRB;
/* Check for unsolicited interrupt. */
if ((scsw_stctl(&irb->scsw) ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
(!scsw_cc(&irb->scsw)))
/* FIXME: we should restart stlck here, but this
* is extremely unlikely ... */
goto out_wakeup;
ccw_device_accumulate_irb(cdev, irb);
/* We don't care about basic sense etc. */
break;
default: /* timeout */
break;
}
out_wakeup:
wake_up(&cdev->private->wait_q);
}
static void
ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
{
......@@ -1010,9 +1001,9 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
},
[DEV_STATE_BOXED] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_stlck_done,
[DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
[DEV_EVENT_VERIFY] = ccw_device_nop,
[DEV_EVENT_INTERRUPT] = ccw_device_nop,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
},
/* states to wait for i/o completion before doing something */
[DEV_STATE_TIMEOUT_KILL] = {
......@@ -1052,6 +1043,12 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
[DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
},
[DEV_STATE_STEAL_LOCK] = {
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
[DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
};
EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
......@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <asm/ccwdev.h>
#include <asm/idals.h>
......@@ -504,74 +505,65 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
return sch->lpm;
}
/*
* Try to break the lock on a boxed device.
*/
int
ccw_device_stlck(struct ccw_device *cdev)
{
void *buf, *buf2;
unsigned long flags;
struct subchannel *sch;
int ret;
struct stlck_data {
struct completion done;
int rc;
};
if (!cdev)
return -ENODEV;
void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc)
{
struct stlck_data *sdata = data;
if (cdev->drv && !cdev->private->options.force)
return -EINVAL;
sdata->rc = rc;
complete(&sdata->done);
}
sch = to_subchannel(cdev->dev.parent);
CIO_TRACE_EVENT(2, "stl lock");
CIO_TRACE_EVENT(2, dev_name(&cdev->dev));
/*
* Perform unconditional reserve + release.
*/
int ccw_device_stlck(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct stlck_data data;
u8 *buffer;
int rc;
buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
if (!buf2) {
kfree(buf);
return -ENOMEM;
/* Check if steal lock operation is valid for this device. */
if (cdev->drv) {
if (!cdev->private->options.force)
return -EINVAL;
}
spin_lock_irqsave(sch->lock, flags);
ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
if (ret)
goto out_unlock;
/*
* Setup ccw. We chain an unconditional reserve and a release so we
* only break the lock.
*/
cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
cdev->private->iccws[0].cda = (__u32) __pa(buf);
cdev->private->iccws[0].count = 32;
cdev->private->iccws[0].flags = CCW_FLAG_CC;
cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
cdev->private->iccws[1].cda = (__u32) __pa(buf2);
cdev->private->iccws[1].count = 32;
cdev->private->iccws[1].flags = 0;
ret = cio_start(sch, cdev->private->iccws, 0);
if (ret) {
cio_disable_subchannel(sch); //FIXME: return code?
buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
if (!buffer)
return -ENOMEM;
init_completion(&data.done);
data.rc = -EIO;
spin_lock_irq(sch->lock);
rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
if (rc)
goto out_unlock;
/* Perform operation. */
cdev->private->state = DEV_STATE_STEAL_LOCK,
ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
spin_unlock_irq(sch->lock);
/* Wait for operation to finish. */
if (wait_for_completion_interruptible(&data.done)) {
/* Got a signal. */
spin_lock_irq(sch->lock);
ccw_request_cancel(cdev);
spin_unlock_irq(sch->lock);
wait_for_completion(&data.done);
}
cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
spin_unlock_irqrestore(sch->lock, flags);
wait_event(cdev->private->wait_q,
cdev->private->irb.scsw.cmd.actl == 0);
spin_lock_irqsave(sch->lock, flags);
cio_disable_subchannel(sch); //FIXME: return code?
if ((cdev->private->irb.scsw.cmd.dstat !=
(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
(cdev->private->irb.scsw.cmd.cstat != 0))
ret = -EIO;
/* Clear irb. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
rc = data.rc;
/* Check results. */
spin_lock_irq(sch->lock);
cio_disable_subchannel(sch);
cdev->private->state = DEV_STATE_BOXED;
out_unlock:
kfree(buf);
kfree(buf2);
spin_unlock_irqrestore(sch->lock, flags);
return ret;
spin_unlock_irq(sch->lock);
kfree(buffer);
return rc;
}
void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
......
......@@ -507,3 +507,55 @@ void ccw_device_disband_start(struct ccw_device *cdev)
spid_build_cp(cdev, fn);
ccw_request_start(cdev);
}
static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
{
struct ccw_request *req = &cdev->private->req;
struct ccw1 *cp = cdev->private->iccws;
cp[0].cmd_code = CCW_CMD_STLCK;
cp[0].cda = (u32) (addr_t) buf1;
cp[0].count = 32;
cp[0].flags = CCW_FLAG_CC;
cp[1].cmd_code = CCW_CMD_RELEASE;
cp[1].cda = (u32) (addr_t) buf2;
cp[1].count = 32;
cp[1].flags = 0;
req->cp = cp;
}
static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
{
ccw_device_stlck_done(cdev, data, rc);
}
/**
* ccw_device_stlck_start - perform unconditional release
* @cdev: ccw device
* @data: data pointer to be passed to ccw_device_stlck_done
* @buf1: data pointer used in channel program
* @buf2: data pointer used in channel program
*
* Execute a channel program on @cdev to release an existing PGID reservation.
* When finished, call ccw_device_stlck_done with a return code specifying the
* result.
*/
void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1,
void *buf2)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
CIO_TRACE_EVENT(4, "stlck");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
/* Request setup. */
memset(req, 0, sizeof(*req));
req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES;
req->lpm = sch->schib.pmcw.pam & sch->opm;
req->data = data;
req->callback = stlck_callback;
stlck_build_cp(cdev, buf1, buf2);
ccw_request_start(cdev);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册