提交 029f5468 编写于 作者: J Jeff Garzik

Merge branch 'upstream'

Conflicts:

	drivers/scsi/libata-core.c
	drivers/scsi/pdc_adma.c
	drivers/scsi/sata_mv.c
	drivers/scsi/sata_nv.c
	drivers/scsi/sata_promise.c
	drivers/scsi/sata_qstor.c
	drivers/scsi/sata_sx4.c
	drivers/scsi/sata_vsc.c
	include/linux/libata.h
...@@ -165,7 +165,7 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \ ...@@ -165,7 +165,7 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
zalon7xx-objs := zalon.o ncr53c8xx.o zalon7xx-objs := zalon.o ncr53c8xx.o
NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
libata-objs := libata-core.o libata-scsi.o libata-bmdma.o libata-objs := libata-core.o libata-scsi.o libata-bmdma.o libata-eh.o
oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
# Files generated that shall be removed upon make clean # Files generated that shall be removed upon make clean
......
此差异已折叠。
/*
* libata-eh.c - libata error handling
*
* Maintained by: Jeff Garzik <jgarzik@pobox.com>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
* Copyright 2006 Tejun Heo <htejun@gmail.com>
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
* USA.
*
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/DocBook/libata.*
*
* Hardware documentation available from http://www.t13.org/ and
* http://www.sata-io.org/
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include "libata.h"
/**
* ata_scsi_timed_out - SCSI layer time out callback
* @cmd: timed out SCSI command
*
* Handles SCSI layer timeout. We race with normal completion of
* the qc for @cmd. If the qc is already gone, we lose and let
* the scsi command finish (EH_HANDLED). Otherwise, the qc has
* timed out and EH should be invoked. Prevent ata_qc_complete()
* from finishing it by setting EH_SCHEDULED and return
* EH_NOT_HANDLED.
*
* LOCKING:
* Called from timer context
*
* RETURNS:
* EH_HANDLED or EH_NOT_HANDLED
*/
enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
unsigned long flags;
struct ata_queued_cmd *qc;
enum scsi_eh_timer_return ret = EH_HANDLED;
DPRINTK("ENTER\n");
spin_lock_irqsave(&ap->host_set->lock, flags);
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc) {
WARN_ON(qc->scsicmd != cmd);
qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
qc->err_mask |= AC_ERR_TIMEOUT;
ret = EH_NOT_HANDLED;
}
spin_unlock_irqrestore(&ap->host_set->lock, flags);
DPRINTK("EXIT, ret=%d\n", ret);
return ret;
}
/**
* ata_scsi_error - SCSI layer error handler callback
* @host: SCSI host on which error occurred
*
* Handles SCSI-layer-thrown error events.
*
* LOCKING:
* Inherited from SCSI layer (none, can sleep)
*
* RETURNS:
* Zero.
*/
int ata_scsi_error(struct Scsi_Host *host)
{
struct ata_port *ap = (struct ata_port *)&host->hostdata[0];
DPRINTK("ENTER\n");
/* synchronize with IRQ handler and port task */
spin_unlock_wait(&ap->host_set->lock);
ata_port_flush_task(ap);
WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
ap->ops->eng_timeout(ap);
WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
scsi_eh_flush_done_q(&ap->eh_done_q);
DPRINTK("EXIT\n");
return 0;
}
/**
* ata_qc_timeout - Handle timeout of queued command
* @qc: Command that timed out
*
* Some part of the kernel (currently, only the SCSI layer)
* has noticed that the active command on port @ap has not
* completed after a specified length of time. Handle this
* condition by disabling DMA (if necessary) and completing
* transactions, with error if necessary.
*
* This also handles the case of the "lost interrupt", where
* for some reason (possibly hardware bug, possibly driver bug)
* an interrupt was not delivered to the driver, even though the
* transaction completed successfully.
*
* LOCKING:
* Inherited from SCSI layer (none, can sleep)
*/
static void ata_qc_timeout(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_host_set *host_set = ap->host_set;
u8 host_stat = 0, drv_stat;
unsigned long flags;
DPRINTK("ENTER\n");
ap->hsm_task_state = HSM_ST_IDLE;
spin_lock_irqsave(&host_set->lock, flags);
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
case ATA_PROT_ATAPI_DMA:
host_stat = ap->ops->bmdma_status(ap);
/* before we do anything else, clear DMA-Start bit */
ap->ops->bmdma_stop(qc);
/* fall through */
default:
ata_altstatus(ap);
drv_stat = ata_chk_status(ap);
/* ack bmdma irq events */
ap->ops->irq_clear(ap);
printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
ap->id, qc->tf.command, drv_stat, host_stat);
/* complete taskfile transaction */
qc->err_mask |= ac_err_mask(drv_stat);
break;
}
spin_unlock_irqrestore(&host_set->lock, flags);
ata_eh_qc_complete(qc);
DPRINTK("EXIT\n");
}
/**
* ata_eng_timeout - Handle timeout of queued command
* @ap: Port on which timed-out command is active
*
* Some part of the kernel (currently, only the SCSI layer)
* has noticed that the active command on port @ap has not
* completed after a specified length of time. Handle this
* condition by disabling DMA (if necessary) and completing
* transactions, with error if necessary.
*
* This also handles the case of the "lost interrupt", where
* for some reason (possibly hardware bug, possibly driver bug)
* an interrupt was not delivered to the driver, even though the
* transaction completed successfully.
*
* LOCKING:
* Inherited from SCSI layer (none, can sleep)
*/
void ata_eng_timeout(struct ata_port *ap)
{
DPRINTK("ENTER\n");
ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
DPRINTK("EXIT\n");
}
static void ata_eh_scsidone(struct scsi_cmnd *scmd)
{
/* nada */
}
static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct scsi_cmnd *scmd = qc->scsicmd;
unsigned long flags;
spin_lock_irqsave(&ap->host_set->lock, flags);
qc->scsidone = ata_eh_scsidone;
__ata_qc_complete(qc);
WARN_ON(ata_tag_valid(qc->tag));
spin_unlock_irqrestore(&ap->host_set->lock, flags);
scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
}
/**
* ata_eh_qc_complete - Complete an active ATA command from EH
* @qc: Command to complete
*
* Indicate to the mid and upper layers that an ATA command has
* completed. To be used from EH.
*/
void ata_eh_qc_complete(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
scmd->retries = scmd->allowed;
__ata_eh_qc_complete(qc);
}
/**
* ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
* @qc: Command to retry
*
* Indicate to the mid and upper layers that an ATA command
* should be retried. To be used from EH.
*
* SCSI midlayer limits the number of retries to scmd->allowed.
* scmd->retries is decremented for commands which get retried
* due to unrelated failures (qc->err_mask is zero).
*/
void ata_eh_qc_retry(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
if (!qc->err_mask && scmd->retries)
scmd->retries--;
__ata_eh_qc_complete(qc);
}
...@@ -53,7 +53,6 @@ ...@@ -53,7 +53,6 @@
typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd); typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
static struct ata_device * static struct ata_device *
ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev); ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev);
enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
#define RW_RECOVERY_MPAGE 0x1 #define RW_RECOVERY_MPAGE 0x1
#define RW_RECOVERY_MPAGE_LEN 12 #define RW_RECOVERY_MPAGE_LEN 12
...@@ -545,17 +544,12 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc) ...@@ -545,17 +544,12 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
/*
* Read the controller registers.
*/
WARN_ON(qc->ap->ops->tf_read == NULL);
qc->ap->ops->tf_read(qc->ap, tf);
/* /*
* Use ata_to_sense_error() to map status register bits * Use ata_to_sense_error() to map status register bits
* onto sense key, asc & ascq. * onto sense key, asc & ascq.
*/ */
if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { if (qc->err_mask ||
tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
ata_to_sense_error(qc->ap->id, tf->command, tf->feature, ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
&sb[1], &sb[2], &sb[3]); &sb[1], &sb[2], &sb[3]);
sb[1] &= 0x0f; sb[1] &= 0x0f;
...@@ -620,17 +614,12 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc) ...@@ -620,17 +614,12 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
/*
* Read the controller registers.
*/
WARN_ON(qc->ap->ops->tf_read == NULL);
qc->ap->ops->tf_read(qc->ap, tf);
/* /*
* Use ata_to_sense_error() to map status register bits * Use ata_to_sense_error() to map status register bits
* onto sense key, asc & ascq. * onto sense key, asc & ascq.
*/ */
if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { if (qc->err_mask ||
tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
ata_to_sense_error(qc->ap->id, tf->command, tf->feature, ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
&sb[2], &sb[12], &sb[13]); &sb[2], &sb[12], &sb[13]);
sb[2] &= 0x0f; sb[2] &= 0x0f;
...@@ -723,141 +712,6 @@ int ata_scsi_slave_config(struct scsi_device *sdev) ...@@ -723,141 +712,6 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
return 0; /* scsi layer doesn't check return value, sigh */ return 0; /* scsi layer doesn't check return value, sigh */
} }
/**
* ata_scsi_timed_out - SCSI layer time out callback
* @cmd: timed out SCSI command
*
* Handles SCSI layer timeout. We race with normal completion of
* the qc for @cmd. If the qc is already gone, we lose and let
* the scsi command finish (EH_HANDLED). Otherwise, the qc has
* timed out and EH should be invoked. Prevent ata_qc_complete()
* from finishing it by setting EH_SCHEDULED and return
* EH_NOT_HANDLED.
*
* LOCKING:
* Called from timer context
*
* RETURNS:
* EH_HANDLED or EH_NOT_HANDLED
*/
enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
unsigned long flags;
struct ata_queued_cmd *qc;
enum scsi_eh_timer_return ret = EH_HANDLED;
DPRINTK("ENTER\n");
spin_lock_irqsave(&ap->host_set->lock, flags);
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc) {
WARN_ON(qc->scsicmd != cmd);
qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
qc->err_mask |= AC_ERR_TIMEOUT;
ret = EH_NOT_HANDLED;
}
spin_unlock_irqrestore(&ap->host_set->lock, flags);
DPRINTK("EXIT, ret=%d\n", ret);
return ret;
}
/**
* ata_scsi_error - SCSI layer error handler callback
* @host: SCSI host on which error occurred
*
* Handles SCSI-layer-thrown error events.
*
* LOCKING:
* Inherited from SCSI layer (none, can sleep)
*
* RETURNS:
* Zero.
*/
int ata_scsi_error(struct Scsi_Host *host)
{
struct ata_port *ap;
unsigned long flags;
DPRINTK("ENTER\n");
ap = (struct ata_port *) &host->hostdata[0];
spin_lock_irqsave(&ap->host_set->lock, flags);
WARN_ON(ap->flags & ATA_FLAG_IN_EH);
ap->flags |= ATA_FLAG_IN_EH;
WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
spin_unlock_irqrestore(&ap->host_set->lock, flags);
ata_port_flush_task(ap);
ap->ops->eng_timeout(ap);
WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
scsi_eh_flush_done_q(&ap->eh_done_q);
spin_lock_irqsave(&ap->host_set->lock, flags);
ap->flags &= ~ATA_FLAG_IN_EH;
spin_unlock_irqrestore(&ap->host_set->lock, flags);
DPRINTK("EXIT\n");
return 0;
}
static void ata_eh_scsidone(struct scsi_cmnd *scmd)
{
/* nada */
}
static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct scsi_cmnd *scmd = qc->scsicmd;
unsigned long flags;
spin_lock_irqsave(&ap->host_set->lock, flags);
qc->scsidone = ata_eh_scsidone;
__ata_qc_complete(qc);
WARN_ON(ata_tag_valid(qc->tag));
spin_unlock_irqrestore(&ap->host_set->lock, flags);
scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
}
/**
* ata_eh_qc_complete - Complete an active ATA command from EH
* @qc: Command to complete
*
* Indicate to the mid and upper layers that an ATA command has
* completed. To be used from EH.
*/
void ata_eh_qc_complete(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
scmd->retries = scmd->allowed;
__ata_eh_qc_complete(qc);
}
/**
* ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
* @qc: Command to retry
*
* Indicate to the mid and upper layers that an ATA command
* should be retried. To be used from EH.
*
* SCSI midlayer limits the number of retries to scmd->allowed.
* This function might need to adjust scmd->retries for commands
* which get retried due to unrelated NCQ failures.
*/
void ata_eh_qc_retry(struct ata_queued_cmd *qc)
{
__ata_eh_qc_complete(qc);
}
/** /**
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
* @qc: Storage for translated ATA taskfile * @qc: Storage for translated ATA taskfile
...@@ -1197,6 +1051,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm ...@@ -1197,6 +1051,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
u64 block; u64 block;
u32 n_block; u32 n_block;
qc->flags |= ATA_QCFLAG_IO;
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 || if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
...@@ -1343,11 +1198,14 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) ...@@ -1343,11 +1198,14 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
*/ */
if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
((cdb[2] & 0x20) || need_sense)) { ((cdb[2] & 0x20) || need_sense)) {
qc->ap->ops->tf_read(qc->ap, &qc->tf);
ata_gen_ata_desc_sense(qc); ata_gen_ata_desc_sense(qc);
} else { } else {
if (!need_sense) { if (!need_sense) {
cmd->result = SAM_STAT_GOOD; cmd->result = SAM_STAT_GOOD;
} else { } else {
qc->ap->ops->tf_read(qc->ap, &qc->tf);
/* TODO: decide which descriptor format to use /* TODO: decide which descriptor format to use
* for 48b LBA devices and call that here * for 48b LBA devices and call that here
* instead of the fixed desc, which is only * instead of the fixed desc, which is only
...@@ -2139,13 +1997,15 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 ...@@ -2139,13 +1997,15 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8
static void atapi_sense_complete(struct ata_queued_cmd *qc) static void atapi_sense_complete(struct ata_queued_cmd *qc)
{ {
if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
/* FIXME: not quite right; we don't want the /* FIXME: not quite right; we don't want the
* translation of taskfile registers into * translation of taskfile registers into
* a sense descriptors, since that's only * a sense descriptors, since that's only
* correct for ATA, not ATAPI * correct for ATA, not ATAPI
*/ */
qc->ap->ops->tf_read(qc->ap, &qc->tf);
ata_gen_ata_desc_sense(qc); ata_gen_ata_desc_sense(qc);
}
qc->scsidone(qc->scsicmd); qc->scsidone(qc->scsicmd);
ata_qc_free(qc); ata_qc_free(qc);
...@@ -2213,17 +2073,15 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) ...@@ -2213,17 +2073,15 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
cmd->result = SAM_STAT_CHECK_CONDITION; cmd->result = SAM_STAT_CHECK_CONDITION;
atapi_request_sense(qc); atapi_request_sense(qc);
return; return;
} } else if (unlikely(err_mask)) {
else if (unlikely(err_mask))
/* FIXME: not quite right; we don't want the /* FIXME: not quite right; we don't want the
* translation of taskfile registers into * translation of taskfile registers into
* a sense descriptors, since that's only * a sense descriptors, since that's only
* correct for ATA, not ATAPI * correct for ATA, not ATAPI
*/ */
qc->ap->ops->tf_read(qc->ap, &qc->tf);
ata_gen_ata_desc_sense(qc); ata_gen_ata_desc_sense(qc);
} else {
else {
u8 *scsicmd = cmd->cmnd; u8 *scsicmd = cmd->cmnd;
if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
...@@ -2737,7 +2595,7 @@ void ata_scsi_scan_host(struct ata_port *ap) ...@@ -2737,7 +2595,7 @@ void ata_scsi_scan_host(struct ata_port *ap)
struct ata_device *dev; struct ata_device *dev;
unsigned int i; unsigned int i;
if (ap->flags & ATA_FLAG_PORT_DISABLED) if (ap->flags & ATA_FLAG_DISABLED)
return; return;
for (i = 0; i < ATA_MAX_DEVICES; i++) { for (i = 0; i < ATA_MAX_DEVICES; i++) {
......
...@@ -45,7 +45,20 @@ extern int libata_fua; ...@@ -45,7 +45,20 @@ extern int libata_fua;
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
struct ata_device *dev); struct ata_device *dev);
extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
extern void ata_dev_disable(struct ata_port *ap, struct ata_device *dev);
extern void ata_port_flush_task(struct ata_port *ap); extern void ata_port_flush_task(struct ata_port *ap);
extern unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen);
extern int ata_down_sata_spd_limit(struct ata_port *ap);
extern int ata_set_sata_spd_needed(struct ata_port *ap);
extern int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev,
int force_pio0);
extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
extern int ata_do_reset(struct ata_port *ap,
ata_reset_fn_t reset,
ata_postreset_fn_t postreset,
int verbose, unsigned int *classes);
extern void ata_qc_free(struct ata_queued_cmd *qc); extern void ata_qc_free(struct ata_queued_cmd *qc);
extern void ata_qc_issue(struct ata_queued_cmd *qc); extern void ata_qc_issue(struct ata_queued_cmd *qc);
extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
...@@ -60,7 +73,6 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); ...@@ -60,7 +73,6 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
extern struct scsi_transport_template ata_scsi_transport_template; extern struct scsi_transport_template ata_scsi_transport_template;
extern void ata_scsi_scan_host(struct ata_port *ap); extern void ata_scsi_scan_host(struct ata_port *ap);
extern int ata_scsi_error(struct Scsi_Host *host);
extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen); unsigned int buflen);
...@@ -90,4 +102,7 @@ extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, ...@@ -90,4 +102,7 @@ extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
unsigned int (*actor) (struct ata_scsi_args *args, unsigned int (*actor) (struct ata_scsi_args *args,
u8 *rbuf, unsigned int buflen)); u8 *rbuf, unsigned int buflen));
/* libata-eh.c */
extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
#endif /* __LIBATA_H__ */ #endif /* __LIBATA_H__ */
...@@ -456,7 +456,7 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set) ...@@ -456,7 +456,7 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
continue; continue;
handled = 1; handled = 1;
adma_enter_reg_mode(ap); adma_enter_reg_mode(ap);
if (ap->flags & ATA_FLAG_PORT_DISABLED) if (ap->flags & ATA_FLAG_DISABLED)
continue; continue;
pp = ap->private_data; pp = ap->private_data;
if (!pp || pp->state != adma_state_pkt) if (!pp || pp->state != adma_state_pkt)
...@@ -481,7 +481,7 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set) ...@@ -481,7 +481,7 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
for (port_no = 0; port_no < host_set->n_ports; ++port_no) { for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
struct ata_port *ap; struct ata_port *ap;
ap = host_set->ports[port_no]; ap = host_set->ports[port_no];
if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
struct ata_queued_cmd *qc; struct ata_queued_cmd *qc;
struct adma_port_priv *pp = ap->private_data; struct adma_port_priv *pp = ap->private_data;
if (!pp || pp->state != adma_state_mmio) if (!pp || pp->state != adma_state_mmio)
......
...@@ -1397,7 +1397,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, ...@@ -1397,7 +1397,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
} }
} }
if (ap && (ap->flags & ATA_FLAG_PORT_DISABLED)) if (ap && (ap->flags & ATA_FLAG_DISABLED))
continue; continue;
err_mask = ac_err_mask(ata_status); err_mask = ac_err_mask(ata_status);
......
...@@ -280,7 +280,7 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance, ...@@ -280,7 +280,7 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
ap = host_set->ports[i]; ap = host_set->ports[i];
if (ap && if (ap &&
!(ap->flags & ATA_FLAG_PORT_DISABLED)) { !(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc; struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag); qc = ata_qc_from_tag(ap, ap->active_tag);
......
...@@ -535,7 +535,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r ...@@ -535,7 +535,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
ap = host_set->ports[i]; ap = host_set->ports[i];
tmp = mask & (1 << (i + 1)); tmp = mask & (1 << (i + 1));
if (tmp && ap && if (tmp && ap &&
!(ap->flags & ATA_FLAG_PORT_DISABLED)) { !(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc; struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag); qc = ata_qc_from_tag(ap, ap->active_tag);
......
...@@ -395,7 +395,7 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set) ...@@ -395,7 +395,7 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
sff1, sff0, port_no, sHST, sDST); sff1, sff0, port_no, sHST, sDST);
handled = 1; handled = 1;
if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc; struct ata_queued_cmd *qc;
struct qs_port_priv *pp = ap->private_data; struct qs_port_priv *pp = ap->private_data;
if (!pp || pp->state != qs_state_pkt) if (!pp || pp->state != qs_state_pkt)
...@@ -428,7 +428,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set) ...@@ -428,7 +428,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
struct ata_port *ap; struct ata_port *ap;
ap = host_set->ports[port_no]; ap = host_set->ports[port_no];
if (ap && if (ap &&
!(ap->flags & ATA_FLAG_PORT_DISABLED)) { !(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc; struct ata_queued_cmd *qc;
struct qs_port_priv *pp = ap->private_data; struct qs_port_priv *pp = ap->private_data;
if (!pp || pp->state != qs_state_mmio) if (!pp || pp->state != qs_state_mmio)
......
...@@ -770,7 +770,7 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs * ...@@ -770,7 +770,7 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
for (i = 0; i < host_set->n_ports; i++) for (i = 0; i < host_set->n_ports; i++)
if (status & (1 << i)) { if (status & (1 << i)) {
struct ata_port *ap = host_set->ports[i]; struct ata_port *ap = host_set->ports[i];
if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
sil24_host_intr(host_set->ports[i]); sil24_host_intr(host_set->ports[i]);
handled++; handled++;
} else } else
......
...@@ -834,7 +834,7 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re ...@@ -834,7 +834,7 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
tmp = mask & (1 << i); tmp = mask & (1 << i);
VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
if (tmp && ap && if (tmp && ap &&
!(ap->flags & ATA_FLAG_PORT_DISABLED)) { !(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc; struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag); qc = ata_qc_from_tag(ap, ap->active_tag);
......
...@@ -229,7 +229,7 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, ...@@ -229,7 +229,7 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
handled++; handled++;
} }
if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc; struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag); qc = ata_qc_from_tag(ap, ap->active_tag);
......
...@@ -120,10 +120,12 @@ enum { ...@@ -120,10 +120,12 @@ enum {
ATA_SHT_USE_CLUSTERING = 1, ATA_SHT_USE_CLUSTERING = 1,
/* struct ata_device stuff */ /* struct ata_device stuff */
ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */
ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */
ATA_DFLAG_LBA = (1 << 2), /* device supports LBA */ ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */
ATA_DFLAG_CDB_INTR = (1 << 3), /* device asserts INTRQ when ready for CDB */ ATA_DFLAG_CFG_MASK = (1 << 8) - 1,
ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */
ATA_DEV_UNKNOWN = 0, /* unknown device */ ATA_DEV_UNKNOWN = 0, /* unknown device */
ATA_DEV_ATA = 1, /* ATA device */ ATA_DEV_ATA = 1, /* ATA device */
...@@ -133,33 +135,35 @@ enum { ...@@ -133,33 +135,35 @@ enum {
ATA_DEV_NONE = 5, /* no device */ ATA_DEV_NONE = 5, /* no device */
/* struct ata_port flags */ /* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 1), /* host supports slave dev */ ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
/* (doesn't imply presence) */ /* (doesn't imply presence) */
ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */ ATA_FLAG_SATA = (1 << 1),
ATA_FLAG_SATA = (1 << 3), ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */
ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */ ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */
ATA_FLAG_SRST = (1 << 5), /* (obsolete) use ATA SRST, not E.D.D. */ ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */
ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */
ATA_FLAG_SATA_RESET = (1 << 7), /* (obsolete) use COMRESET */ ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
* doesn't handle PIO interrupts */ ATA_FLAG_IRQ_MASK = (1 << 9), /* Mask IRQ in PIO xfers */
ATA_FLAG_DEBUGMSG = (1 << 10), ATA_FLAG_PIO_POLLING = (1 << 10), /* use polling PIO if LLD
ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */ * doesn't handle PIO interrupts */
ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */ ATA_FLAG_DEBUGMSG = (1 << 17),
ATA_FLAG_FLUSH_PORT_TASK = (1 << 18), /* flush port task */
ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */
ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */ ATA_FLAG_DISABLED = (1 << 19), /* port is disabled, ignore it */
ATA_FLAG_SUSPENDED = (1 << 20), /* port is suspended */
ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* Flush port task */
ATA_FLAG_IN_EH = (1 << 16), /* EH in progress */ /* bits 24:31 of ap->flags are reserved for LLDD specific flags */
ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ /* struct ata_queued_cmd flags */
ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */
ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */
ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */ ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
ATA_QCFLAG_EH_SCHEDULED = (1 << 4), /* EH scheduled */
/* host set flags */ /* host set flags */
ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */ ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */
...@@ -205,10 +209,13 @@ enum { ...@@ -205,10 +209,13 @@ enum {
/* size of buffer to pad xfers ending on unaligned boundaries */ /* size of buffer to pad xfers ending on unaligned boundaries */
ATA_DMA_PAD_SZ = 4, ATA_DMA_PAD_SZ = 4,
ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE, ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
/* Masks for port functions */ /* masks for port functions */
ATA_PORT_PRIMARY = (1 << 0), ATA_PORT_PRIMARY = (1 << 0),
ATA_PORT_SECONDARY = (1 << 1), ATA_PORT_SECONDARY = (1 << 1),
/* how hard are we gonna try to probe/recover devices */
ATA_PROBE_MAX_TRIES = 3,
}; };
enum hsm_task_states { enum hsm_task_states {
...@@ -394,6 +401,7 @@ struct ata_port { ...@@ -394,6 +401,7 @@ struct ata_port {
unsigned int mwdma_mask; unsigned int mwdma_mask;
unsigned int udma_mask; unsigned int udma_mask;
unsigned int cbl; /* cable type; ATA_CBL_xxx */ unsigned int cbl; /* cable type; ATA_CBL_xxx */
unsigned int sata_spd_limit; /* SATA PHY speed limit */
struct ata_device device[ATA_MAX_DEVICES]; struct ata_device device[ATA_MAX_DEVICES];
...@@ -519,9 +527,6 @@ extern void ata_host_set_remove(struct ata_host_set *host_set); ...@@ -519,9 +527,6 @@ extern void ata_host_set_remove(struct ata_host_set *host_set);
extern int ata_scsi_detect(struct scsi_host_template *sht); extern int ata_scsi_detect(struct scsi_host_template *sht);
extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
extern int ata_scsi_error(struct Scsi_Host *host);
extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
extern int ata_scsi_release(struct Scsi_Host *host); extern int ata_scsi_release(struct Scsi_Host *host);
extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
extern int ata_scsi_device_resume(struct scsi_device *); extern int ata_scsi_device_resume(struct scsi_device *);
...@@ -570,7 +575,6 @@ extern void ata_bmdma_stop(struct ata_queued_cmd *qc); ...@@ -570,7 +575,6 @@ extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
extern u8 ata_bmdma_status(struct ata_port *ap); extern u8 ata_bmdma_status(struct ata_port *ap);
extern void ata_bmdma_irq_clear(struct ata_port *ap); extern void ata_bmdma_irq_clear(struct ata_port *ap);
extern void __ata_qc_complete(struct ata_queued_cmd *qc); extern void __ata_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eng_timeout(struct ata_port *ap);
extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
struct scsi_cmnd *cmd, struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *)); void (*done)(struct scsi_cmnd *));
...@@ -625,6 +629,14 @@ extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bit ...@@ -625,6 +629,14 @@ extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bit
extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long); extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long);
#endif /* CONFIG_PCI */ #endif /* CONFIG_PCI */
/*
* EH
*/
extern int ata_scsi_error(struct Scsi_Host *host);
extern void ata_eng_timeout(struct ata_port *ap);
extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
static inline int static inline int
ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc) ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
...@@ -678,6 +690,11 @@ static inline unsigned int ata_class_disabled(unsigned int class) ...@@ -678,6 +690,11 @@ static inline unsigned int ata_class_disabled(unsigned int class)
return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP; return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP;
} }
static inline unsigned int ata_class_absent(unsigned int class)
{
return !ata_class_enabled(class) && !ata_class_disabled(class);
}
static inline unsigned int ata_dev_enabled(const struct ata_device *dev) static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
{ {
return ata_class_enabled(dev->class); return ata_class_enabled(dev->class);
...@@ -688,6 +705,11 @@ static inline unsigned int ata_dev_disabled(const struct ata_device *dev) ...@@ -688,6 +705,11 @@ static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
return ata_class_disabled(dev->class); return ata_class_disabled(dev->class);
} }
static inline unsigned int ata_dev_absent(const struct ata_device *dev)
{
return ata_class_absent(dev->class);
}
static inline u8 ata_chk_status(struct ata_port *ap) static inline u8 ata_chk_status(struct ata_port *ap)
{ {
return ap->ops->check_status(ap); return ap->ops->check_status(ap);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册