提交 45fabbb7 编写于 作者: E Elias Oltmanns 提交者: Jeff Garzik

libata: Implement disk shock protection support

On user request (through sysfs), the IDLE IMMEDIATE command with UNLOAD
FEATURE as specified in ATA-7 is issued to the device and processing of
the request queue is stopped thereafter until the specified timeout
expires or user space asks to resume normal operation. This is supposed
to prevent the heads of a hard drive from accidentally crashing onto the
platter when a heavy shock is anticipated (like a falling laptop
expected to hit the floor). In fact, the whole port stops processing
commands until the timeout has expired in order to avoid any resets due
to failed commands on another device.
Signed-off-by: NElias Oltmanns <eo@nebensachen.de>
Signed-off-by: NJeff Garzik <jgarzik@redhat.com>
上级 ea6ce53c
...@@ -316,6 +316,7 @@ static struct device_attribute *ahci_shost_attrs[] = { ...@@ -316,6 +316,7 @@ static struct device_attribute *ahci_shost_attrs[] = {
static struct device_attribute *ahci_sdev_attrs[] = { static struct device_attribute *ahci_sdev_attrs[] = {
&dev_attr_sw_activity, &dev_attr_sw_activity,
&dev_attr_unload_heads,
NULL NULL
}; };
......
...@@ -5378,6 +5378,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host) ...@@ -5378,6 +5378,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q); INIT_LIST_HEAD(&ap->eh_done_q);
init_waitqueue_head(&ap->eh_wait_q); init_waitqueue_head(&ap->eh_wait_q);
init_completion(&ap->park_req_pending);
init_timer_deferrable(&ap->fastdrain_timer); init_timer_deferrable(&ap->fastdrain_timer);
ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
ap->fastdrain_timer.data = (unsigned long)ap; ap->fastdrain_timer.data = (unsigned long)ap;
......
...@@ -2536,6 +2536,80 @@ int ata_eh_reset(struct ata_link *link, int classify, ...@@ -2536,6 +2536,80 @@ int ata_eh_reset(struct ata_link *link, int classify,
goto retry; goto retry;
} }
static inline void ata_eh_pull_park_action(struct ata_port *ap)
{
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
/*
* This function can be thought of as an extended version of
* ata_eh_about_to_do() specially crafted to accommodate the
* requirements of ATA_EH_PARK handling. Since the EH thread
* does not leave the do {} while () loop in ata_eh_recover as
* long as the timeout for a park request to *one* device on
* the port has not expired, and since we still want to pick
* up park requests to other devices on the same port or
* timeout updates for the same device, we have to pull
* ATA_EH_PARK actions from eh_info into eh_context.i
* ourselves at the beginning of each pass over the loop.
*
* Additionally, all write accesses to &ap->park_req_pending
* through INIT_COMPLETION() (see below) or complete_all()
* (see ata_scsi_park_store()) are protected by the host lock.
* As a result we have that park_req_pending.done is zero on
* exit from this function, i.e. when ATA_EH_PARK actions for
* *all* devices on port ap have been pulled into the
* respective eh_context structs. If, and only if,
* park_req_pending.done is non-zero by the time we reach
* wait_for_completion_timeout(), another ATA_EH_PARK action
* has been scheduled for at least one of the devices on port
* ap and we have to cycle over the do {} while () loop in
* ata_eh_recover() again.
*/
spin_lock_irqsave(ap->lock, flags);
INIT_COMPLETION(ap->park_req_pending);
ata_port_for_each_link(link, ap) {
ata_link_for_each_dev(dev, link) {
struct ata_eh_info *ehi = &link->eh_info;
link->eh_context.i.dev_action[dev->devno] |=
ehi->dev_action[dev->devno] & ATA_EH_PARK;
ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
}
}
spin_unlock_irqrestore(ap->lock, flags);
}
static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
{
struct ata_eh_context *ehc = &dev->link->eh_context;
struct ata_taskfile tf;
unsigned int err_mask;
ata_tf_init(dev, &tf);
if (park) {
ehc->unloaded_mask |= 1 << dev->devno;
tf.command = ATA_CMD_IDLEIMMEDIATE;
tf.feature = 0x44;
tf.lbal = 0x4c;
tf.lbam = 0x4e;
tf.lbah = 0x55;
} else {
ehc->unloaded_mask &= ~(1 << dev->devno);
tf.command = ATA_CMD_CHK_POWER;
}
tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
tf.protocol |= ATA_PROT_NODATA;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (park && (err_mask || tf.lbal != 0xc4)) {
ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
ehc->unloaded_mask &= ~(1 << dev->devno);
}
}
static int ata_eh_revalidate_and_attach(struct ata_link *link, static int ata_eh_revalidate_and_attach(struct ata_link *link,
struct ata_device **r_failed_dev) struct ata_device **r_failed_dev)
{ {
...@@ -2845,7 +2919,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, ...@@ -2845,7 +2919,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
struct ata_device *dev; struct ata_device *dev;
int nr_failed_devs; int nr_failed_devs;
int rc; int rc;
unsigned long flags; unsigned long flags, deadline;
DPRINTK("ENTER\n"); DPRINTK("ENTER\n");
...@@ -2919,6 +2993,56 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, ...@@ -2919,6 +2993,56 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
} }
} }
do {
unsigned long now;
/*
* clears ATA_EH_PARK in eh_info and resets
* ap->park_req_pending
*/
ata_eh_pull_park_action(ap);
deadline = jiffies;
ata_port_for_each_link(link, ap) {
ata_link_for_each_dev(dev, link) {
struct ata_eh_context *ehc = &link->eh_context;
unsigned long tmp;
if (dev->class != ATA_DEV_ATA)
continue;
if (!(ehc->i.dev_action[dev->devno] &
ATA_EH_PARK))
continue;
tmp = dev->unpark_deadline;
if (time_before(deadline, tmp))
deadline = tmp;
else if (time_before_eq(tmp, jiffies))
continue;
if (ehc->unloaded_mask & (1 << dev->devno))
continue;
ata_eh_park_issue_cmd(dev, 1);
}
}
now = jiffies;
if (time_before_eq(deadline, now))
break;
deadline = wait_for_completion_timeout(&ap->park_req_pending,
deadline - now);
} while (deadline);
ata_port_for_each_link(link, ap) {
ata_link_for_each_dev(dev, link) {
if (!(link->eh_context.unloaded_mask &
(1 << dev->devno)))
continue;
ata_eh_park_issue_cmd(dev, 0);
ata_eh_done(link, dev, ATA_EH_PARK);
}
}
/* the rest */ /* the rest */
ata_port_for_each_link(link, ap) { ata_port_for_each_link(link, ap) {
struct ata_eh_context *ehc = &link->eh_context; struct ata_eh_context *ehc = &link->eh_context;
......
...@@ -183,6 +183,105 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, ...@@ -183,6 +183,105 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
ata_scsi_lpm_show, ata_scsi_lpm_put); ata_scsi_lpm_show, ata_scsi_lpm_put);
EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
static ssize_t ata_scsi_park_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(device);
struct ata_port *ap;
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
unsigned int uninitialized_var(msecs);
int rc = 0;
ap = ata_shost_to_port(sdev->host);
spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev) {
rc = -ENODEV;
goto unlock;
}
if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
rc = -EOPNOTSUPP;
goto unlock;
}
link = dev->link;
if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
link->eh_context.unloaded_mask & (1 << dev->devno) &&
time_after(dev->unpark_deadline, jiffies))
msecs = jiffies_to_msecs(dev->unpark_deadline - jiffies);
else
msecs = 0;
unlock:
spin_unlock_irq(ap->lock);
return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
}
static ssize_t ata_scsi_park_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct scsi_device *sdev = to_scsi_device(device);
struct ata_port *ap;
struct ata_device *dev;
long int input;
unsigned long flags;
int rc;
rc = strict_strtol(buf, 10, &input);
if (rc || input < -2)
return -EINVAL;
if (input > ATA_TMOUT_MAX_PARK) {
rc = -EOVERFLOW;
input = ATA_TMOUT_MAX_PARK;
}
ap = ata_shost_to_port(sdev->host);
spin_lock_irqsave(ap->lock, flags);
dev = ata_scsi_find_dev(ap, sdev);
if (unlikely(!dev)) {
rc = -ENODEV;
goto unlock;
}
if (dev->class != ATA_DEV_ATA) {
rc = -EOPNOTSUPP;
goto unlock;
}
if (input >= 0) {
if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
rc = -EOPNOTSUPP;
goto unlock;
}
dev->unpark_deadline = ata_deadline(jiffies, input);
dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
ata_port_schedule_eh(ap);
complete(&ap->park_req_pending);
} else {
switch (input) {
case -1:
dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
break;
case -2:
dev->flags |= ATA_DFLAG_NO_UNLOAD;
break;
}
}
unlock:
spin_unlock_irqrestore(ap->lock, flags);
return rc ? rc : len;
}
DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
{ {
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
...@@ -269,6 +368,12 @@ DEVICE_ATTR(sw_activity, S_IWUGO | S_IRUGO, ata_scsi_activity_show, ...@@ -269,6 +368,12 @@ DEVICE_ATTR(sw_activity, S_IWUGO | S_IRUGO, ata_scsi_activity_show,
ata_scsi_activity_store); ata_scsi_activity_store);
EXPORT_SYMBOL_GPL(dev_attr_sw_activity); EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
struct device_attribute *ata_common_sdev_attrs[] = {
&dev_attr_unload_heads,
NULL
};
EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *)) void (*done)(struct scsi_cmnd *))
{ {
...@@ -954,6 +1059,9 @@ static int atapi_drain_needed(struct request *rq) ...@@ -954,6 +1059,9 @@ static int atapi_drain_needed(struct request *rq)
static int ata_scsi_dev_config(struct scsi_device *sdev, static int ata_scsi_dev_config(struct scsi_device *sdev,
struct ata_device *dev) struct ata_device *dev)
{ {
if (!ata_id_has_unload(dev->id))
dev->flags |= ATA_DFLAG_NO_UNLOAD;
/* configure max sectors */ /* configure max sectors */
blk_queue_max_sectors(sdev->request_queue, dev->max_sectors); blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
......
...@@ -146,6 +146,7 @@ enum { ...@@ -146,6 +146,7 @@ enum {
ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */ ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */
ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
ATA_DFLAG_INIT_MASK = (1 << 24) - 1, ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
ATA_DFLAG_DETACH = (1 << 24), ATA_DFLAG_DETACH = (1 << 24),
...@@ -244,6 +245,7 @@ enum { ...@@ -244,6 +245,7 @@ enum {
ATA_TMOUT_BOOT = 30000, /* heuristic */ ATA_TMOUT_BOOT = 30000, /* heuristic */
ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */ ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
ATA_TMOUT_INTERNAL_QUICK = 5000, ATA_TMOUT_INTERNAL_QUICK = 5000,
ATA_TMOUT_MAX_PARK = 30000,
/* FIXME: GoVault needs 2s but we can't afford that without /* FIXME: GoVault needs 2s but we can't afford that without
* parallel probing. 800ms is enough for iVDR disk * parallel probing. 800ms is enough for iVDR disk
...@@ -319,8 +321,9 @@ enum { ...@@ -319,8 +321,9 @@ enum {
ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
ATA_EH_ENABLE_LINK = (1 << 3), ATA_EH_ENABLE_LINK = (1 << 3),
ATA_EH_LPM = (1 << 4), /* link power management action */ ATA_EH_LPM = (1 << 4), /* link power management action */
ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE, ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK,
ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET | ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET |
ATA_EH_ENABLE_LINK | ATA_EH_LPM, ATA_EH_ENABLE_LINK | ATA_EH_LPM,
...@@ -454,6 +457,7 @@ enum link_pm { ...@@ -454,6 +457,7 @@ enum link_pm {
MEDIUM_POWER, MEDIUM_POWER,
}; };
extern struct device_attribute dev_attr_link_power_management_policy; extern struct device_attribute dev_attr_link_power_management_policy;
extern struct device_attribute dev_attr_unload_heads;
extern struct device_attribute dev_attr_em_message_type; extern struct device_attribute dev_attr_em_message_type;
extern struct device_attribute dev_attr_em_message; extern struct device_attribute dev_attr_em_message;
extern struct device_attribute dev_attr_sw_activity; extern struct device_attribute dev_attr_sw_activity;
...@@ -566,6 +570,7 @@ struct ata_device { ...@@ -566,6 +570,7 @@ struct ata_device {
/* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */ /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
u64 n_sectors; /* size of device, if ATA */ u64 n_sectors; /* size of device, if ATA */
unsigned int class; /* ATA_DEV_xxx */ unsigned int class; /* ATA_DEV_xxx */
unsigned long unpark_deadline;
u8 pio_mode; u8 pio_mode;
u8 dma_mode; u8 dma_mode;
...@@ -623,6 +628,7 @@ struct ata_eh_context { ...@@ -623,6 +628,7 @@ struct ata_eh_context {
[ATA_EH_CMD_TIMEOUT_TABLE_SIZE]; [ATA_EH_CMD_TIMEOUT_TABLE_SIZE];
unsigned int classes[ATA_MAX_DEVICES]; unsigned int classes[ATA_MAX_DEVICES];
unsigned int did_probe_mask; unsigned int did_probe_mask;
unsigned int unloaded_mask;
unsigned int saved_ncq_enabled; unsigned int saved_ncq_enabled;
u8 saved_xfer_mode[ATA_MAX_DEVICES]; u8 saved_xfer_mode[ATA_MAX_DEVICES];
/* timestamp for the last reset attempt or success */ /* timestamp for the last reset attempt or success */
...@@ -712,6 +718,7 @@ struct ata_port { ...@@ -712,6 +718,7 @@ struct ata_port {
struct list_head eh_done_q; struct list_head eh_done_q;
wait_queue_head_t eh_wait_q; wait_queue_head_t eh_wait_q;
int eh_tries; int eh_tries;
struct completion park_req_pending;
pm_message_t pm_mesg; pm_message_t pm_mesg;
int *pm_result; int *pm_result;
...@@ -1102,6 +1109,7 @@ extern void ata_std_error_handler(struct ata_port *ap); ...@@ -1102,6 +1109,7 @@ extern void ata_std_error_handler(struct ata_port *ap);
*/ */
extern const struct ata_port_operations ata_base_port_ops; extern const struct ata_port_operations ata_base_port_ops;
extern const struct ata_port_operations sata_port_ops; extern const struct ata_port_operations sata_port_ops;
extern struct device_attribute *ata_common_sdev_attrs[];
#define ATA_BASE_SHT(drv_name) \ #define ATA_BASE_SHT(drv_name) \
.module = THIS_MODULE, \ .module = THIS_MODULE, \
...@@ -1116,7 +1124,8 @@ extern const struct ata_port_operations sata_port_ops; ...@@ -1116,7 +1124,8 @@ extern const struct ata_port_operations sata_port_ops;
.proc_name = drv_name, \ .proc_name = drv_name, \
.slave_configure = ata_scsi_slave_config, \ .slave_configure = ata_scsi_slave_config, \
.slave_destroy = ata_scsi_slave_destroy, \ .slave_destroy = ata_scsi_slave_destroy, \
.bios_param = ata_std_bios_param .bios_param = ata_std_bios_param, \
.sdev_attrs = ata_common_sdev_attrs
#define ATA_NCQ_SHT(drv_name) \ #define ATA_NCQ_SHT(drv_name) \
ATA_BASE_SHT(drv_name), \ ATA_BASE_SHT(drv_name), \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册