提交 a1480a16 编写于 作者: L Linus Torvalds

Merge branch 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata

Pull libata updates from Tejun Heo:

 - Hannes's patchset implements support for better error reporting
   introduced by the new ATA command spec.

 - the deperecated pci_ dma API usages have been replaced by dma_ ones.

 - a bunch of hardware specific updates and some cleanups.

* 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata:
  ata: remove deprecated use of pci api
  ahci: st: st_configure_oob must be called after IP is clocked.
  ahci: st: Update the ahci_st DT documentation
  ahci: st: Update the DT example for how to obtain the PHY.
  sata_dwc_460ex: indent an if statement
  libata: Add tracepoints
  libata-eh: Set 'information' field for autosense
  libata: Implement support for sense data reporting
  libata: Implement NCQ autosense
  libata: use status bit definitions in ata_dump_status()
  ide,ata: Rename ATA_IDX to ATA_SENSE
  libata: whitespace fixes in ata_to_sense_error()
  libata: whitespace cleanup in ata_get_cmd_descript()
  libata: use READ_LOG_DMA_EXT
  libata: remove ATA_FLAG_LOWTAG
  sata_dwc_460ex: re-use hsdev->dev instead of dwc_dev
  sata_dwc_460ex: move to generic DMA driver
  sata_dwc_460ex: join messages back
  sata: xgene: add ACPI support for APM X-Gene SATA ports
  ata: sata_mv: add proper definitions for LP_PHY_CTL register values
...@@ -3,29 +3,48 @@ STMicroelectronics STi SATA controller ...@@ -3,29 +3,48 @@ STMicroelectronics STi SATA controller
This binding describes a SATA device. This binding describes a SATA device.
Required properties: Required properties:
- compatible : Must be "st,sti-ahci" - compatible : Must be "st,ahci"
- reg : Physical base addresses and length of register sets - reg : Physical base addresses and length of register sets
- interrupts : Interrupt associated with the SATA device - interrupts : Interrupt associated with the SATA device
- interrupt-names : Associated name must be; "hostc" - interrupt-names : Associated name must be; "hostc"
- resets : The power-down and soft-reset lines of SATA IP
- reset-names : Associated names must be; "pwr-dwn" and "sw-rst"
- clocks : The phandle for the clock - clocks : The phandle for the clock
- clock-names : Associated name must be; "ahci_clk" - clock-names : Associated name must be; "ahci_clk"
- phys : The phandle for the PHY device - phys : The phandle for the PHY port
- phy-names : Associated name must be; "ahci_phy" - phy-names : Associated name must be; "ahci_phy"
Optional properties:
- resets : The power-down, soft-reset and power-reset lines of SATA IP
- reset-names : Associated names must be; "pwr-dwn", "sw-rst" and "pwr-rst"
Example: Example:
/* Example for stih416 */
sata0: sata@fe380000 { sata0: sata@fe380000 {
compatible = "st,sti-ahci"; compatible = "st,ahci";
reg = <0xfe380000 0x1000>; reg = <0xfe380000 0x1000>;
interrupts = <GIC_SPI 157 IRQ_TYPE_NONE>; interrupts = <GIC_SPI 157 IRQ_TYPE_NONE>;
interrupt-names = "hostc"; interrupt-names = "hostc";
phys = <&miphy365x_phy MIPHY_PORT_0 MIPHY_TYPE_SATA>; phys = <&phy_port0 PHY_TYPE_SATA>;
phy-names = "ahci_phy"; phy-names = "ahci_phy";
resets = <&powerdown STIH416_SATA0_POWERDOWN>, resets = <&powerdown STIH416_SATA0_POWERDOWN>,
<&softreset STIH416_SATA0_SOFTRESET>; <&softreset STIH416_SATA0_SOFTRESET>;
reset-names = "pwr-dwn", "sw-rst"; reset-names = "pwr-dwn", "sw-rst";
clocks = <&clk_s_a0_ls CLK_ICN_REG>; clocks = <&clk_s_a0_ls CLK_ICN_REG>;
clock-names = "ahci_clk"; clock-names = "ahci_clk";
};
/* Example for stih407 family silicon */
sata0: sata@9b20000 {
compatible = "st,ahci";
reg = <0x9b20000 0x1000>;
interrupts = <GIC_SPI 159 IRQ_TYPE_NONE>;
interrupt-names = "hostc";
phys = <&phy_port0 PHY_TYPE_SATA>;
phy-names = "ahci_phy";
resets = <&powerdown STIH407_SATA0_POWERDOWN>,
<&softreset STIH407_SATA0_SOFTRESET>,
<&softreset STIH407_SATA0_PWR_SOFTRESET>;
reset-names = "pwr-dwn", "sw-rst", "pwr-rst";
clocks = <&clk_s_c0_flexgen CLK_ICN_REG>;
clock-names = "ahci_clk";
}; };
...@@ -111,7 +111,8 @@ obj-$(CONFIG_ATA_GENERIC) += ata_generic.o ...@@ -111,7 +111,8 @@ obj-$(CONFIG_ATA_GENERIC) += ata_generic.o
# Should be last libata driver # Should be last libata driver
obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o
libata-y := libata-core.o libata-scsi.o libata-eh.o libata-transport.o libata-y := libata-core.o libata-scsi.o libata-eh.o \
libata-transport.o libata-trace.o
libata-$(CONFIG_ATA_SFF) += libata-sff.o libata-$(CONFIG_ATA_SFF) += libata-sff.o
libata-$(CONFIG_SATA_PMP) += libata-pmp.o libata-$(CONFIG_SATA_PMP) += libata-pmp.o
libata-$(CONFIG_ATA_ACPI) += libata-acpi.o libata-$(CONFIG_ATA_ACPI) += libata-acpi.o
......
...@@ -181,10 +181,10 @@ static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) ...@@ -181,10 +181,10 @@ static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
int rc; int rc;
if (using_dac && if (using_dac &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc) { if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"64-bit DMA enable failed\n"); "64-bit DMA enable failed\n");
...@@ -192,12 +192,12 @@ static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) ...@@ -192,12 +192,12 @@ static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
} }
} }
} else { } else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n"); dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc; return rc;
} }
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n"); "32-bit consistent DMA enable failed\n");
......
...@@ -738,10 +738,10 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) ...@@ -738,10 +738,10 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
return 0; return 0;
if (using_dac && if (using_dac &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc) { if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"64-bit DMA enable failed\n"); "64-bit DMA enable failed\n");
...@@ -749,12 +749,12 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) ...@@ -749,12 +749,12 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
} }
} }
} else { } else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n"); dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc; return rc;
} }
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n"); "32-bit consistent DMA enable failed\n");
......
...@@ -68,8 +68,6 @@ static int st_ahci_deassert_resets(struct device *dev) ...@@ -68,8 +68,6 @@ static int st_ahci_deassert_resets(struct device *dev)
} }
} }
st_ahci_configure_oob(drv_data->hpriv->mmio);
if (drv_data->sw_rst) { if (drv_data->sw_rst) {
err = reset_control_deassert(drv_data->sw_rst); err = reset_control_deassert(drv_data->sw_rst);
if (err) { if (err) {
...@@ -172,6 +170,8 @@ static int st_ahci_probe(struct platform_device *pdev) ...@@ -172,6 +170,8 @@ static int st_ahci_probe(struct platform_device *pdev)
if (err) if (err)
return err; return err;
st_ahci_configure_oob(drv_data->hpriv->mmio);
err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
&ahci_platform_sht); &ahci_platform_sht);
if (err) { if (err) {
...@@ -222,6 +222,8 @@ static int st_ahci_resume(struct device *dev) ...@@ -222,6 +222,8 @@ static int st_ahci_resume(struct device *dev)
return err; return err;
} }
st_ahci_configure_oob(drv_data->hpriv->mmio);
return ahci_platform_resume_host(dev); return ahci_platform_resume_host(dev);
} }
#endif #endif
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
* NOTE: PM support is not currently available. * NOTE: PM support is not currently available.
* *
*/ */
#include <linux/acpi.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/ahci_platform.h> #include <linux/ahci_platform.h>
...@@ -718,6 +719,14 @@ static int xgene_ahci_probe(struct platform_device *pdev) ...@@ -718,6 +719,14 @@ static int xgene_ahci_probe(struct platform_device *pdev)
return rc; return rc;
} }
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgene_ahci_acpi_match[] = {
{ "APMC0D0D", },
{ }
};
MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
#endif
static const struct of_device_id xgene_ahci_of_match[] = { static const struct of_device_id xgene_ahci_of_match[] = {
{.compatible = "apm,xgene-ahci"}, {.compatible = "apm,xgene-ahci"},
{}, {},
...@@ -730,6 +739,7 @@ static struct platform_driver xgene_ahci_driver = { ...@@ -730,6 +739,7 @@ static struct platform_driver xgene_ahci_driver = {
.driver = { .driver = {
.name = DRV_NAME, .name = DRV_NAME,
.of_match_table = xgene_ahci_of_match, .of_match_table = xgene_ahci_of_match,
.acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match),
}, },
}; };
......
...@@ -70,6 +70,9 @@ ...@@ -70,6 +70,9 @@
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#define CREATE_TRACE_POINTS
#include <trace/events/libata.h>
#include "libata.h" #include "libata.h"
#include "libata-transport.h" #include "libata-transport.h"
...@@ -691,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) ...@@ -691,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
* RETURNS: * RETURNS:
* Block address read from @tf. * Block address read from @tf.
*/ */
u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
{ {
u64 block = 0; u64 block = 0;
if (tf->flags & ATA_TFLAG_LBA) { if (!dev || tf->flags & ATA_TFLAG_LBA) {
if (tf->flags & ATA_TFLAG_LBA48) { if (tf->flags & ATA_TFLAG_LBA48) {
block |= (u64)tf->hob_lbah << 40; block |= (u64)tf->hob_lbah << 40;
block |= (u64)tf->hob_lbam << 32; block |= (u64)tf->hob_lbam << 32;
...@@ -2144,6 +2147,24 @@ static int ata_dev_config_ncq(struct ata_device *dev, ...@@ -2144,6 +2147,24 @@ static int ata_dev_config_ncq(struct ata_device *dev,
return 0; return 0;
} }
static void ata_dev_config_sense_reporting(struct ata_device *dev)
{
unsigned int err_mask;
if (!ata_id_has_sense_reporting(dev->id))
return;
if (ata_id_sense_reporting_enabled(dev->id))
return;
err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
if (err_mask) {
ata_dev_dbg(dev,
"failed to enable Sense Data Reporting, Emask 0x%x\n",
err_mask);
}
}
/** /**
* ata_dev_configure - Configure the specified ATA/ATAPI device * ata_dev_configure - Configure the specified ATA/ATAPI device
* @dev: Target device to configure * @dev: Target device to configure
...@@ -2366,7 +2387,7 @@ int ata_dev_configure(struct ata_device *dev) ...@@ -2366,7 +2387,7 @@ int ata_dev_configure(struct ata_device *dev)
dev->devslp_timing[i] = sata_setting[j]; dev->devslp_timing[i] = sata_setting[j];
} }
} }
ata_dev_config_sense_reporting(dev);
dev->cdb_len = 16; dev->cdb_len = 16;
} }
...@@ -4897,6 +4918,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) ...@@ -4897,6 +4918,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
*/ */
if (unlikely(ata_tag_internal(qc->tag))) { if (unlikely(ata_tag_internal(qc->tag))) {
fill_result_tf(qc); fill_result_tf(qc);
trace_ata_qc_complete_internal(qc);
__ata_qc_complete(qc); __ata_qc_complete(qc);
return; return;
} }
...@@ -4907,6 +4929,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) ...@@ -4907,6 +4929,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
*/ */
if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
fill_result_tf(qc); fill_result_tf(qc);
trace_ata_qc_complete_failed(qc);
ata_qc_schedule_eh(qc); ata_qc_schedule_eh(qc);
return; return;
} }
...@@ -4917,6 +4940,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) ...@@ -4917,6 +4940,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
if (qc->flags & ATA_QCFLAG_RESULT_TF) if (qc->flags & ATA_QCFLAG_RESULT_TF)
fill_result_tf(qc); fill_result_tf(qc);
trace_ata_qc_complete_done(qc);
/* Some commands need post-processing after successful /* Some commands need post-processing after successful
* completion. * completion.
*/ */
...@@ -5064,7 +5088,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc) ...@@ -5064,7 +5088,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
} }
ap->ops->qc_prep(qc); ap->ops->qc_prep(qc);
trace_ata_qc_issue(qc);
qc->err_mask |= ap->ops->qc_issue(qc); qc->err_mask |= ap->ops->qc_issue(qc);
if (unlikely(qc->err_mask)) if (unlikely(qc->err_mask))
goto err; goto err;
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <linux/libata.h> #include <linux/libata.h>
#include <trace/events/libata.h>
#include "libata.h" #include "libata.h"
enum { enum {
...@@ -1510,13 +1511,18 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log, ...@@ -1510,13 +1511,18 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
ata_tf_init(dev, &tf); ata_tf_init(dev, &tf);
tf.command = ATA_CMD_READ_LOG_EXT; if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id)) {
tf.command = ATA_CMD_READ_LOG_DMA_EXT;
tf.protocol = ATA_PROT_DMA;
} else {
tf.command = ATA_CMD_READ_LOG_EXT;
tf.protocol = ATA_PROT_PIO;
}
tf.lbal = log; tf.lbal = log;
tf.lbam = page; tf.lbam = page;
tf.nsect = sectors; tf.nsect = sectors;
tf.hob_nsect = sectors >> 8; tf.hob_nsect = sectors >> 8;
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
tf.protocol = ATA_PROT_PIO;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
buf, sectors * ATA_SECT_SIZE, 0); buf, sectors * ATA_SECT_SIZE, 0);
...@@ -1575,6 +1581,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev, ...@@ -1575,6 +1581,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
tf->hob_lbah = buf[10]; tf->hob_lbah = buf[10];
tf->nsect = buf[12]; tf->nsect = buf[12];
tf->hob_nsect = buf[13]; tf->hob_nsect = buf[13];
if (ata_id_has_ncq_autosense(dev->id))
tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
return 0; return 0;
} }
...@@ -1610,6 +1618,70 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) ...@@ -1610,6 +1618,70 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
return err_mask; return err_mask;
} }
/**
* ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
* @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
* @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
* @dfl_sense_key: default sense key to use
*
* Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
* SENSE. This function is EH helper.
*
* LOCKING:
* Kernel thread context (may sleep).
*
* RETURNS:
* encoded sense data on success, 0 on failure or if sense data
* is not available.
*/
static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
struct scsi_cmnd *cmd)
{
struct ata_device *dev = qc->dev;
struct ata_taskfile tf;
unsigned int err_mask;
if (!cmd)
return 0;
DPRINTK("ATA request sense\n");
ata_dev_warn(dev, "request sense\n");
if (!ata_id_sense_reporting_enabled(dev->id)) {
ata_dev_warn(qc->dev, "sense data reporting disabled\n");
return 0;
}
ata_tf_init(dev, &tf);
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
tf.command = ATA_CMD_REQ_SENSE_DATA;
tf.protocol = ATA_PROT_NODATA;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
/*
* ACS-4 states:
* The device may set the SENSE DATA AVAILABLE bit to one in the
* STATUS field and clear the ERROR bit to zero in the STATUS field
* to indicate that the command returned completion without an error
* and the sense data described in table 306 is available.
*
* IOW the 'ATA_SENSE' bit might not be set even though valid
* sense data is available.
* So check for both.
*/
if ((tf.command & ATA_SENSE) ||
tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
tf.lbah, tf.lbam, tf.lbal);
} else {
ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
tf.command, err_mask);
}
return err_mask;
}
/** /**
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
* @dev: device to perform REQUEST_SENSE to * @dev: device to perform REQUEST_SENSE to
...@@ -1772,6 +1844,19 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) ...@@ -1772,6 +1844,19 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
memcpy(&qc->result_tf, &tf, sizeof(tf)); memcpy(&qc->result_tf, &tf, sizeof(tf));
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
if (qc->result_tf.auxiliary) {
char sense_key, asc, ascq;
sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
asc = (qc->result_tf.auxiliary >> 8) & 0xff;
ascq = qc->result_tf.auxiliary & 0xff;
ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
sense_key, asc, ascq);
ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
}
ehc->i.err_mask &= ~AC_ERR_DEV; ehc->i.err_mask &= ~AC_ERR_DEV;
} }
...@@ -1801,6 +1886,27 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, ...@@ -1801,6 +1886,27 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
return ATA_EH_RESET; return ATA_EH_RESET;
} }
/*
* Sense data reporting does not work if the
* device fault bit is set.
*/
if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
tmp = ata_eh_request_sense(qc, qc->scsicmd);
if (tmp)
qc->err_mask |= tmp;
else
ata_scsi_set_sense_information(qc->scsicmd, tf);
} else {
ata_dev_warn(qc->dev, "sense data available but port frozen\n");
}
}
/* Set by NCQ autosense or request sense above */
if (qc->flags & ATA_QCFLAG_SENSE_VALID)
return 0;
if (stat & (ATA_ERR | ATA_DF)) if (stat & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV; qc->err_mask |= AC_ERR_DEV;
else else
...@@ -2186,6 +2292,7 @@ static void ata_eh_link_autopsy(struct ata_link *link) ...@@ -2186,6 +2292,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
all_err_mask |= qc->err_mask; all_err_mask |= qc->err_mask;
if (qc->flags & ATA_QCFLAG_IO) if (qc->flags & ATA_QCFLAG_IO)
eflags |= ATA_EFLAG_IS_IO; eflags |= ATA_EFLAG_IS_IO;
trace_ata_eh_link_autopsy_qc(qc);
} }
/* enforce default EH actions */ /* enforce default EH actions */
...@@ -2220,7 +2327,7 @@ static void ata_eh_link_autopsy(struct ata_link *link) ...@@ -2220,7 +2327,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
eflags |= ATA_EFLAG_DUBIOUS_XFER; eflags |= ATA_EFLAG_DUBIOUS_XFER;
ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
} }
trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
DPRINTK("EXIT\n"); DPRINTK("EXIT\n");
} }
...@@ -2289,27 +2396,27 @@ const char *ata_get_cmd_descript(u8 command) ...@@ -2289,27 +2396,27 @@ const char *ata_get_cmd_descript(u8 command)
const char *text; const char *text;
} cmd_descr[] = { } cmd_descr[] = {
{ ATA_CMD_DEV_RESET, "DEVICE RESET" }, { ATA_CMD_DEV_RESET, "DEVICE RESET" },
{ ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
{ ATA_CMD_STANDBY, "STANDBY" }, { ATA_CMD_STANDBY, "STANDBY" },
{ ATA_CMD_IDLE, "IDLE" }, { ATA_CMD_IDLE, "IDLE" },
{ ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
{ ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
{ ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" }, { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" },
{ ATA_CMD_NOP, "NOP" }, { ATA_CMD_NOP, "NOP" },
{ ATA_CMD_FLUSH, "FLUSH CACHE" }, { ATA_CMD_FLUSH, "FLUSH CACHE" },
{ ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
{ ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
{ ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
{ ATA_CMD_SERVICE, "SERVICE" }, { ATA_CMD_SERVICE, "SERVICE" },
{ ATA_CMD_READ, "READ DMA" }, { ATA_CMD_READ, "READ DMA" },
{ ATA_CMD_READ_EXT, "READ DMA EXT" }, { ATA_CMD_READ_EXT, "READ DMA EXT" },
{ ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
{ ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
{ ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
{ ATA_CMD_WRITE, "WRITE DMA" }, { ATA_CMD_WRITE, "WRITE DMA" },
{ ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
{ ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
{ ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
{ ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
{ ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
...@@ -2325,7 +2432,7 @@ const char *ata_get_cmd_descript(u8 command) ...@@ -2325,7 +2432,7 @@ const char *ata_get_cmd_descript(u8 command)
{ ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
{ ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
{ ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
{ ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
{ ATA_CMD_SET_FEATURES, "SET FEATURES" }, { ATA_CMD_SET_FEATURES, "SET FEATURES" },
{ ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
{ ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
...@@ -2342,12 +2449,12 @@ const char *ata_get_cmd_descript(u8 command) ...@@ -2342,12 +2449,12 @@ const char *ata_get_cmd_descript(u8 command)
{ ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
{ ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
{ ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
{ ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
{ ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" },
{ ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
{ ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
{ ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
{ ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
{ ATA_CMD_PMP_READ, "READ BUFFER" }, { ATA_CMD_PMP_READ, "READ BUFFER" },
{ ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" }, { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" },
{ ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
...@@ -2364,12 +2471,12 @@ const char *ata_get_cmd_descript(u8 command) ...@@ -2364,12 +2471,12 @@ const char *ata_get_cmd_descript(u8 command)
{ ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
{ ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
{ ATA_CMD_DSM, "DATA SET MANAGEMENT" }, { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
{ ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
{ ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
{ ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
{ ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
{ ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
{ ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
{ ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" }, { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" },
{ ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" }, { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" },
{ ATA_CMD_READ_LONG, "READ LONG (with retries)" }, { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
...@@ -2543,14 +2650,15 @@ static void ata_eh_link_report(struct ata_link *link) ...@@ -2543,14 +2650,15 @@ static void ata_eh_link_report(struct ata_link *link)
#ifdef CONFIG_ATA_VERBOSE_ERROR #ifdef CONFIG_ATA_VERBOSE_ERROR
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
ATA_ERR)) { ATA_SENSE | ATA_ERR)) {
if (res->command & ATA_BUSY) if (res->command & ATA_BUSY)
ata_dev_err(qc->dev, "status: { Busy }\n"); ata_dev_err(qc->dev, "status: { Busy }\n");
else else
ata_dev_err(qc->dev, "status: { %s%s%s%s}\n", ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
res->command & ATA_DRDY ? "DRDY " : "", res->command & ATA_DRDY ? "DRDY " : "",
res->command & ATA_DF ? "DF " : "", res->command & ATA_DF ? "DF " : "",
res->command & ATA_DRQ ? "DRQ " : "", res->command & ATA_DRQ ? "DRQ " : "",
res->command & ATA_SENSE ? "SENSE " : "",
res->command & ATA_ERR ? "ERR " : ""); res->command & ATA_ERR ? "ERR " : "");
} }
......
...@@ -270,13 +270,28 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR, ...@@ -270,13 +270,28 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
ata_scsi_park_show, ata_scsi_park_store); ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads); EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
{ {
if (!cmd)
return;
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq); scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
} }
void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
const struct ata_taskfile *tf)
{
u64 information;
if (!cmd)
return;
information = ata_tf_read_block(tf, NULL);
scsi_set_sense_information(cmd->sense_buffer, information);
}
static ssize_t static ssize_t
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr, ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
...@@ -799,26 +814,27 @@ static void ata_dump_status(unsigned id, struct ata_taskfile *tf) ...@@ -799,26 +814,27 @@ static void ata_dump_status(unsigned id, struct ata_taskfile *tf)
if (stat & ATA_BUSY) { if (stat & ATA_BUSY) {
printk("Busy }\n"); /* Data is not valid in this case */ printk("Busy }\n"); /* Data is not valid in this case */
} else { } else {
if (stat & 0x40) printk("DriveReady "); if (stat & ATA_DRDY) printk("DriveReady ");
if (stat & 0x20) printk("DeviceFault "); if (stat & ATA_DF) printk("DeviceFault ");
if (stat & 0x10) printk("SeekComplete "); if (stat & ATA_DSC) printk("SeekComplete ");
if (stat & 0x08) printk("DataRequest "); if (stat & ATA_DRQ) printk("DataRequest ");
if (stat & 0x04) printk("CorrectedError "); if (stat & ATA_CORR) printk("CorrectedError ");
if (stat & 0x02) printk("Index "); if (stat & ATA_SENSE) printk("Sense ");
if (stat & 0x01) printk("Error "); if (stat & ATA_ERR) printk("Error ");
printk("}\n"); printk("}\n");
if (err) { if (err) {
printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err); printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err);
if (err & 0x04) printk("DriveStatusError "); if (err & ATA_ABORTED) printk("DriveStatusError ");
if (err & 0x80) { if (err & ATA_ICRC) {
if (err & 0x04) printk("BadCRC "); if (err & ATA_ABORTED)
printk("BadCRC ");
else printk("Sector "); else printk("Sector ");
} }
if (err & 0x40) printk("UncorrectableError "); if (err & ATA_UNC) printk("UncorrectableError ");
if (err & 0x10) printk("SectorIdNotFound "); if (err & ATA_IDNF) printk("SectorIdNotFound ");
if (err & 0x02) printk("TrackZeroNotFound "); if (err & ATA_TRK0NF) printk("TrackZeroNotFound ");
if (err & 0x01) printk("AddrMarkNotFound "); if (err & ATA_AMNF) printk("AddrMarkNotFound ");
printk("}\n"); printk("}\n");
} }
} }
...@@ -849,40 +865,59 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, ...@@ -849,40 +865,59 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
/* Based on the 3ware driver translation table */ /* Based on the 3ware driver translation table */
static const unsigned char sense_table[][4] = { static const unsigned char sense_table[][4] = {
/* BBD|ECC|ID|MAR */ /* BBD|ECC|ID|MAR */
{0xd1, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command {0xd1, ABORTED_COMMAND, 0x00, 0x00},
// Device busy Aborted command
/* BBD|ECC|ID */ /* BBD|ECC|ID */
{0xd0, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command {0xd0, ABORTED_COMMAND, 0x00, 0x00},
// Device busy Aborted command
/* ECC|MC|MARK */ /* ECC|MC|MARK */
{0x61, HARDWARE_ERROR, 0x00, 0x00}, // Device fault Hardware error {0x61, HARDWARE_ERROR, 0x00, 0x00},
// Device fault Hardware error
/* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */ /* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */
{0x84, ABORTED_COMMAND, 0x47, 0x00}, // Data CRC error SCSI parity error {0x84, ABORTED_COMMAND, 0x47, 0x00},
// Data CRC error SCSI parity error
/* MC|ID|ABRT|TRK0|MARK */ /* MC|ID|ABRT|TRK0|MARK */
{0x37, NOT_READY, 0x04, 0x00}, // Unit offline Not ready {0x37, NOT_READY, 0x04, 0x00},
// Unit offline Not ready
/* MCR|MARK */ /* MCR|MARK */
{0x09, NOT_READY, 0x04, 0x00}, // Unrecovered disk error Not ready {0x09, NOT_READY, 0x04, 0x00},
// Unrecovered disk error Not ready
/* Bad address mark */ /* Bad address mark */
{0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field {0x01, MEDIUM_ERROR, 0x13, 0x00},
/* TRK0 */ // Address mark not found for data field
{0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error /* TRK0 - Track 0 not found */
{0x02, HARDWARE_ERROR, 0x00, 0x00},
// Hardware error
/* Abort: 0x04 is not translated here, see below */ /* Abort: 0x04 is not translated here, see below */
/* Media change request */ /* Media change request */
{0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline {0x08, NOT_READY, 0x04, 0x00},
/* SRV/IDNF */ // FIXME: faking offline
{0x10, ILLEGAL_REQUEST, 0x21, 0x00}, // ID not found Logical address out of range /* SRV/IDNF - ID not found */
/* MC */ {0x10, ILLEGAL_REQUEST, 0x21, 0x00},
{0x20, UNIT_ATTENTION, 0x28, 0x00}, // Media Changed Not ready to ready change, medium may have changed // Logical address out of range
/* ECC */ /* MC - Media Changed */
{0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error {0x20, UNIT_ATTENTION, 0x28, 0x00},
// Not ready to ready change, medium may have changed
/* ECC - Uncorrectable ECC error */
{0x40, MEDIUM_ERROR, 0x11, 0x04},
// Unrecovered read error
/* BBD - block marked bad */ /* BBD - block marked bad */
{0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error {0x80, MEDIUM_ERROR, 0x11, 0x04},
// Block marked bad Medium error, unrecovered read error
{0xFF, 0xFF, 0xFF, 0xFF}, // END mark {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
}; };
static const unsigned char stat_table[][4] = { static const unsigned char stat_table[][4] = {
/* Must be first because BUSY means no other bits valid */ /* Must be first because BUSY means no other bits valid */
{0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now {0x80, ABORTED_COMMAND, 0x47, 0x00},
{0x20, HARDWARE_ERROR, 0x44, 0x00}, // Device fault, internal target failure // Busy, fake parity for now
{0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now {0x40, ILLEGAL_REQUEST, 0x21, 0x04},
{0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered // Device ready, unaligned write command
{0x20, HARDWARE_ERROR, 0x44, 0x00},
// Device fault, internal target failure
{0x08, ABORTED_COMMAND, 0x47, 0x00},
// Timed out in xfer, fake parity for now
{0x04, RECOVERED_ERROR, 0x11, 0x00},
// Recovered ECC error Medium error, recovered
{0xFF, 0xFF, 0xFF, 0xFF}, // END mark {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
}; };
...@@ -1757,7 +1792,9 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) ...@@ -1757,7 +1792,9 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
((cdb[2] & 0x20) || need_sense)) { ((cdb[2] & 0x20) || need_sense)) {
ata_gen_passthru_sense(qc); ata_gen_passthru_sense(qc);
} else { } else {
if (!need_sense) { if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
cmd->result = SAM_STAT_CHECK_CONDITION;
} else if (!need_sense) {
cmd->result = SAM_STAT_GOOD; cmd->result = SAM_STAT_GOOD;
} else { } else {
/* TODO: decide which descriptor format to use /* TODO: decide which descriptor format to use
...@@ -4240,10 +4277,7 @@ int ata_sas_allocate_tag(struct ata_port *ap) ...@@ -4240,10 +4277,7 @@ int ata_sas_allocate_tag(struct ata_port *ap)
unsigned int i, tag; unsigned int i, tag;
for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) { for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) {
if (ap->flags & ATA_FLAG_LOWTAG) tag = tag < max_queue ? tag : 0;
tag = 1;
else
tag = tag < max_queue ? tag : 0;
/* the last tag is reserved for internal command. */ /* the last tag is reserved for internal command. */
if (tag == ATA_TAG_INTERNAL) if (tag == ATA_TAG_INTERNAL)
......
...@@ -3220,11 +3220,11 @@ void ata_pci_bmdma_init(struct ata_host *host) ...@@ -3220,11 +3220,11 @@ void ata_pci_bmdma_init(struct ata_host *host)
* ->sff_irq_clear method. Try to initialize bmdma_addr * ->sff_irq_clear method. Try to initialize bmdma_addr
* regardless of dma masks. * regardless of dma masks.
*/ */
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
ata_bmdma_nodma(host, "failed to set dma mask"); ata_bmdma_nodma(host, "failed to set dma mask");
if (!rc) { if (!rc) {
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
ata_bmdma_nodma(host, ata_bmdma_nodma(host,
"failed to set consistent dma mask"); "failed to set consistent dma mask");
......
/*
* libata-trace.c - trace functions for libata
*
* Copyright 2015 Hannes Reinecke
* Copyright 2015 SUSE Linux GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/trace_seq.h>
#include <trace/events/libata.h>
const char *
libata_trace_parse_status(struct trace_seq *p, unsigned char status)
{
const char *ret = trace_seq_buffer_ptr(p);
trace_seq_printf(p, "{ ");
if (status & ATA_BUSY)
trace_seq_printf(p, "BUSY ");
if (status & ATA_DRDY)
trace_seq_printf(p, "DRDY ");
if (status & ATA_DF)
trace_seq_printf(p, "DF ");
if (status & ATA_DSC)
trace_seq_printf(p, "DSC ");
if (status & ATA_DRQ)
trace_seq_printf(p, "DRQ ");
if (status & ATA_CORR)
trace_seq_printf(p, "CORR ");
if (status & ATA_SENSE)
trace_seq_printf(p, "SENSE ");
if (status & ATA_ERR)
trace_seq_printf(p, "ERR ");
trace_seq_putc(p, '}');
trace_seq_putc(p, 0);
return ret;
}
const char *
libata_trace_parse_eh_action(struct trace_seq *p, unsigned int eh_action)
{
const char *ret = trace_seq_buffer_ptr(p);
trace_seq_printf(p, "%x", eh_action);
if (eh_action) {
trace_seq_printf(p, "{ ");
if (eh_action & ATA_EH_REVALIDATE)
trace_seq_printf(p, "REVALIDATE ");
if (eh_action & (ATA_EH_SOFTRESET | ATA_EH_HARDRESET))
trace_seq_printf(p, "RESET ");
else if (eh_action & ATA_EH_SOFTRESET)
trace_seq_printf(p, "SOFTRESET ");
else if (eh_action & ATA_EH_HARDRESET)
trace_seq_printf(p, "HARDRESET ");
if (eh_action & ATA_EH_ENABLE_LINK)
trace_seq_printf(p, "ENABLE_LINK ");
if (eh_action & ATA_EH_PARK)
trace_seq_printf(p, "PARK ");
trace_seq_putc(p, '}');
}
trace_seq_putc(p, 0);
return ret;
}
const char *
libata_trace_parse_eh_err_mask(struct trace_seq *p, unsigned int eh_err_mask)
{
const char *ret = trace_seq_buffer_ptr(p);
trace_seq_printf(p, "%x", eh_err_mask);
if (eh_err_mask) {
trace_seq_printf(p, "{ ");
if (eh_err_mask & AC_ERR_DEV)
trace_seq_printf(p, "DEV ");
if (eh_err_mask & AC_ERR_HSM)
trace_seq_printf(p, "HSM ");
if (eh_err_mask & AC_ERR_TIMEOUT)
trace_seq_printf(p, "TIMEOUT ");
if (eh_err_mask & AC_ERR_MEDIA)
trace_seq_printf(p, "MEDIA ");
if (eh_err_mask & AC_ERR_ATA_BUS)
trace_seq_printf(p, "ATA_BUS ");
if (eh_err_mask & AC_ERR_HOST_BUS)
trace_seq_printf(p, "HOST_BUS ");
if (eh_err_mask & AC_ERR_SYSTEM)
trace_seq_printf(p, "SYSTEM ");
if (eh_err_mask & AC_ERR_INVALID)
trace_seq_printf(p, "INVALID ");
if (eh_err_mask & AC_ERR_OTHER)
trace_seq_printf(p, "OTHER ");
if (eh_err_mask & AC_ERR_NODEV_HINT)
trace_seq_printf(p, "NODEV_HINT ");
if (eh_err_mask & AC_ERR_NCQ)
trace_seq_printf(p, "NCQ ");
trace_seq_putc(p, '}');
}
trace_seq_putc(p, 0);
return ret;
}
const char *
libata_trace_parse_qc_flags(struct trace_seq *p, unsigned int qc_flags)
{
const char *ret = trace_seq_buffer_ptr(p);
trace_seq_printf(p, "%x", qc_flags);
if (qc_flags) {
trace_seq_printf(p, "{ ");
if (qc_flags & ATA_QCFLAG_ACTIVE)
trace_seq_printf(p, "ACTIVE ");
if (qc_flags & ATA_QCFLAG_DMAMAP)
trace_seq_printf(p, "DMAMAP ");
if (qc_flags & ATA_QCFLAG_IO)
trace_seq_printf(p, "IO ");
if (qc_flags & ATA_QCFLAG_RESULT_TF)
trace_seq_printf(p, "RESULT_TF ");
if (qc_flags & ATA_QCFLAG_CLEAR_EXCL)
trace_seq_printf(p, "CLEAR_EXCL ");
if (qc_flags & ATA_QCFLAG_QUIET)
trace_seq_printf(p, "QUIET ");
if (qc_flags & ATA_QCFLAG_RETRY)
trace_seq_printf(p, "RETRY ");
if (qc_flags & ATA_QCFLAG_FAILED)
trace_seq_printf(p, "FAILED ");
if (qc_flags & ATA_QCFLAG_SENSE_VALID)
trace_seq_printf(p, "SENSE_VALID ");
if (qc_flags & ATA_QCFLAG_EH_SCHEDULED)
trace_seq_printf(p, "EH_SCHEDULED ");
trace_seq_putc(p, '}');
}
trace_seq_putc(p, 0);
return ret;
}
...@@ -67,7 +67,8 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag); ...@@ -67,7 +67,8 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags, u64 block, u32 n_block, unsigned int tf_flags,
unsigned int tag); unsigned int tag);
extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev); extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
struct ata_device *dev);
extern unsigned ata_exec_internal(struct ata_device *dev, extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb, struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen, int dma_dir, void *buf, unsigned int buflen,
...@@ -137,6 +138,9 @@ extern int ata_scsi_add_hosts(struct ata_host *host, ...@@ -137,6 +138,9 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
struct scsi_host_template *sht); struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync); extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
extern int ata_scsi_offline_dev(struct ata_device *dev); extern int ata_scsi_offline_dev(struct ata_device *dev);
extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev); extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work); extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
......
...@@ -475,11 +475,11 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host) ...@@ -475,11 +475,11 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
atp867x_fixup(host); atp867x_fixup(host);
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
return rc; return rc;
} }
......
...@@ -164,11 +164,11 @@ static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -164,11 +164,11 @@ static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENODEV; return -ENODEV;
} }
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n"); printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
return -ENODEV; return -ENODEV;
} }
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n"); printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
return -ENODEV; return -ENODEV;
} }
......
...@@ -221,10 +221,10 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -221,10 +221,10 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc) if (rc)
return rc; return rc;
host->iomap = pcim_iomap_table(pdev); host->iomap = pcim_iomap_table(pdev);
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
......
...@@ -122,10 +122,10 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) ...@@ -122,10 +122,10 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
return rc; return rc;
host->iomap = pcim_iomap_table(dev); host->iomap = pcim_iomap_table(dev);
rc = pci_set_dma_mask(dev, ATA_DMA_MASK); rc = dma_set_mask(&dev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK); rc = dma_set_coherent_mask(&dev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
pci_set_master(dev); pci_set_master(dev);
......
...@@ -730,11 +730,11 @@ static int pdc2027x_init_one(struct pci_dev *pdev, ...@@ -730,11 +730,11 @@ static int pdc2027x_init_one(struct pci_dev *pdev,
return rc; return rc;
host->iomap = pcim_iomap_table(pdev); host->iomap = pcim_iomap_table(pdev);
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
......
...@@ -1029,10 +1029,10 @@ static int scc_host_init(struct ata_host *host) ...@@ -1029,10 +1029,10 @@ static int scc_host_init(struct ata_host *host)
if (rc) if (rc)
return rc; return rc;
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
......
...@@ -374,10 +374,10 @@ static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -374,10 +374,10 @@ static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->iomap = pcim_iomap_table(pdev); host->iomap = pcim_iomap_table(pdev);
/* Setup DMA masks */ /* Setup DMA masks */
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
pci_set_master(pdev); pci_set_master(pdev);
......
...@@ -593,12 +593,12 @@ static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base) ...@@ -593,12 +593,12 @@ static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
{ {
int rc; int rc;
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n"); dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc; return rc;
} }
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n");
return rc; return rc;
......
此差异已折叠。
...@@ -856,13 +856,13 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -856,13 +856,13 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
/* Set dma_mask. This devices doesn't support 64bit addressing. */ /* Set dma_mask. This devices doesn't support 64bit addressing. */
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n"); dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc; return rc;
} }
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n");
return rc; return rc;
......
...@@ -306,6 +306,11 @@ enum { ...@@ -306,6 +306,11 @@ enum {
MV5_PHY_CTL = 0x0C, MV5_PHY_CTL = 0x0C,
SATA_IFCFG = 0x050, SATA_IFCFG = 0x050,
LP_PHY_CTL = 0x058, LP_PHY_CTL = 0x058,
LP_PHY_CTL_PIN_PU_PLL = (1 << 0),
LP_PHY_CTL_PIN_PU_RX = (1 << 1),
LP_PHY_CTL_PIN_PU_TX = (1 << 2),
LP_PHY_CTL_GEN_TX_3G = (1 << 5),
LP_PHY_CTL_GEN_RX_3G = (1 << 9),
MV_M2_PREAMP_MASK = 0x7e0, MV_M2_PREAMP_MASK = 0x7e0,
...@@ -1391,10 +1396,17 @@ static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) ...@@ -1391,10 +1396,17 @@ static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
/* /*
* Set PHY speed according to SControl speed. * Set PHY speed according to SControl speed.
*/ */
if ((val & 0xf0) == 0x10) u32 lp_phy_val =
writelfl(0x7, lp_phy_addr); LP_PHY_CTL_PIN_PU_PLL |
else LP_PHY_CTL_PIN_PU_RX |
writelfl(0x227, lp_phy_addr); LP_PHY_CTL_PIN_PU_TX;
if ((val & 0xf0) != 0x10)
lp_phy_val |=
LP_PHY_CTL_GEN_TX_3G |
LP_PHY_CTL_GEN_RX_3G;
writelfl(lp_phy_val, lp_phy_addr);
} }
} }
writelfl(val, addr); writelfl(val, addr);
...@@ -4308,10 +4320,10 @@ static int pci_go_64(struct pci_dev *pdev) ...@@ -4308,10 +4320,10 @@ static int pci_go_64(struct pci_dev *pdev)
{ {
int rc; int rc;
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc) { if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"64-bit DMA enable failed\n"); "64-bit DMA enable failed\n");
...@@ -4319,12 +4331,12 @@ static int pci_go_64(struct pci_dev *pdev) ...@@ -4319,12 +4331,12 @@ static int pci_go_64(struct pci_dev *pdev)
} }
} }
} else { } else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n"); dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc; return rc;
} }
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n"); "32-bit consistent DMA enable failed\n");
......
...@@ -756,10 +756,10 @@ static int nv_adma_slave_config(struct scsi_device *sdev) ...@@ -756,10 +756,10 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
blk_queue_bounce_limit(sdev1->request_queue, blk_queue_bounce_limit(sdev1->request_queue,
ATA_DMA_MASK); ATA_DMA_MASK);
pci_set_dma_mask(pdev, ATA_DMA_MASK); dma_set_mask(&pdev->dev, ATA_DMA_MASK);
} else { } else {
/** This shouldn't fail as it was set to this value before */ /** This shouldn't fail as it was set to this value before */
pci_set_dma_mask(pdev, pp->adma_dma_mask); dma_set_mask(&pdev->dev, pp->adma_dma_mask);
if (sdev0) if (sdev0)
blk_queue_bounce_limit(sdev0->request_queue, blk_queue_bounce_limit(sdev0->request_queue,
pp->adma_dma_mask); pp->adma_dma_mask);
...@@ -1133,10 +1133,10 @@ static int nv_adma_port_start(struct ata_port *ap) ...@@ -1133,10 +1133,10 @@ static int nv_adma_port_start(struct ata_port *ap)
/* Ensure DMA mask is set to 32-bit before allocating legacy PRD and /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
pad buffers */ pad buffers */
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) if (rc)
return rc; return rc;
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) if (rc)
return rc; return rc;
...@@ -1161,8 +1161,8 @@ static int nv_adma_port_start(struct ata_port *ap) ...@@ -1161,8 +1161,8 @@ static int nv_adma_port_start(struct ata_port *ap)
These are allowed to fail since we store the value that ends up These are allowed to fail since we store the value that ends up
being used to set as the bounce limit in slave_config later if being used to set as the bounce limit in slave_config later if
needed. */ needed. */
pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
pp->adma_dma_mask = *dev->dma_mask; pp->adma_dma_mask = *dev->dma_mask;
mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
......
...@@ -1246,10 +1246,10 @@ static int pdc_ata_init_one(struct pci_dev *pdev, ...@@ -1246,10 +1246,10 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
/* initialize adapter */ /* initialize adapter */
pdc_host_init(host); pdc_host_init(host);
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
......
...@@ -557,10 +557,10 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base) ...@@ -557,10 +557,10 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT); int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
if (have_64bit_bus && if (have_64bit_bus &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc) { if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"64-bit DMA enable failed\n"); "64-bit DMA enable failed\n");
...@@ -568,12 +568,12 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base) ...@@ -568,12 +568,12 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
} }
} }
} else { } else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n"); dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc; return rc;
} }
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n"); "32-bit consistent DMA enable failed\n");
......
...@@ -770,10 +770,10 @@ static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -770,10 +770,10 @@ static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc; return rc;
host->iomap = pcim_iomap_table(pdev); host->iomap = pcim_iomap_table(pdev);
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
......
...@@ -246,7 +246,7 @@ enum { ...@@ -246,7 +246,7 @@ enum {
/* host flags */ /* host flags */
SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
ATA_FLAG_AN | ATA_FLAG_PMP | ATA_FLAG_LOWTAG, ATA_FLAG_AN | ATA_FLAG_PMP,
SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
IRQ_STAT_4PORTS = 0xf, IRQ_STAT_4PORTS = 0xf,
...@@ -1312,10 +1312,10 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1312,10 +1312,10 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
host->iomap = iomap; host->iomap = iomap;
/* configure and activate the device */ /* configure and activate the device */
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc) { if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"64-bit DMA enable failed\n"); "64-bit DMA enable failed\n");
...@@ -1323,12 +1323,12 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1323,12 +1323,12 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
} }
} else { } else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n"); dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc; return rc;
} }
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n"); "32-bit consistent DMA enable failed\n");
......
...@@ -496,10 +496,10 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en ...@@ -496,10 +496,10 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
ata_port_pbar_desc(ap, 5, offset, "port"); ata_port_pbar_desc(ap, 5, offset, "port");
} }
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
......
...@@ -1476,10 +1476,10 @@ static int pdc_sata_init_one(struct pci_dev *pdev, ...@@ -1476,10 +1476,10 @@ static int pdc_sata_init_one(struct pci_dev *pdev,
} }
/* configure and activate */ /* configure and activate */
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
......
...@@ -502,10 +502,10 @@ static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) ...@@ -502,10 +502,10 @@ static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
for (i = 0; i < host->n_ports; i++) for (i = 0; i < host->n_ports; i++)
vt6421_init_addrs(host->ports[i]); vt6421_init_addrs(host->ports[i]);
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc) if (rc)
return rc; return rc;
......
...@@ -387,10 +387,10 @@ static int vsc_sata_init_one(struct pci_dev *pdev, ...@@ -387,10 +387,10 @@ static int vsc_sata_init_one(struct pci_dev *pdev,
/* /*
* Use 32 bit DMA mask, because 64 bit address support is poor. * Use 32 bit DMA mask, because 64 bit address support is poor.
*/ */
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) if (rc)
return rc; return rc;
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) if (rc)
return rc; return rc;
......
...@@ -148,8 +148,8 @@ u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat) ...@@ -148,8 +148,8 @@ u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
printk(KERN_CONT "DataRequest "); printk(KERN_CONT "DataRequest ");
if (stat & ATA_CORR) if (stat & ATA_CORR)
printk(KERN_CONT "CorrectedError "); printk(KERN_CONT "CorrectedError ");
if (stat & ATA_IDX) if (stat & ATA_SENSE)
printk(KERN_CONT "Index "); printk(KERN_CONT "Sense ");
if (stat & ATA_ERR) if (stat & ATA_ERR)
printk(KERN_CONT "Error "); printk(KERN_CONT "Error ");
} }
......
...@@ -273,7 +273,7 @@ int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id, int irq_ctx) ...@@ -273,7 +273,7 @@ int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id, int irq_ctx)
(hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) { (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) {
a = tp_ops->read_altstatus(hwif); a = tp_ops->read_altstatus(hwif);
s = tp_ops->read_status(hwif); s = tp_ops->read_status(hwif);
if ((a ^ s) & ~ATA_IDX) if ((a ^ s) & ~ATA_SENSE)
/* ancient Seagate drives, broken interfaces */ /* ancient Seagate drives, broken interfaces */
printk(KERN_INFO "%s: probing with STATUS(0x%02x) " printk(KERN_INFO "%s: probing with STATUS(0x%02x) "
"instead of ALTSTATUS(0x%02x)\n", "instead of ALTSTATUS(0x%02x)\n",
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
...@@ -2586,3 +2587,33 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq) ...@@ -2586,3 +2587,33 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
} }
} }
EXPORT_SYMBOL(scsi_build_sense_buffer); EXPORT_SYMBOL(scsi_build_sense_buffer);
/**
* scsi_set_sense_information - set the information field in a
* formatted sense data buffer
* @buf: Where to build sense data
* @info: 64-bit information value to be set
*
**/
void scsi_set_sense_information(u8 *buf, u64 info)
{
if ((buf[0] & 0x7f) == 0x72) {
u8 *ucp, len;
len = buf[7];
ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
if (!ucp) {
buf[7] = len + 0xa;
ucp = buf + 8 + len;
}
ucp[0] = 0;
ucp[1] = 0xa;
ucp[2] = 0x80; /* Valid bit */
ucp[3] = 0;
put_unaligned_be64(info, &ucp[4]);
} else if ((buf[0] & 0x7f) == 0x70) {
buf[0] |= 0x80;
put_unaligned_be64(info, &buf[3]);
}
}
EXPORT_SYMBOL(scsi_set_sense_information);
...@@ -94,6 +94,8 @@ enum { ...@@ -94,6 +94,8 @@ enum {
ATA_ID_SECTOR_SIZE = 106, ATA_ID_SECTOR_SIZE = 106,
ATA_ID_WWN = 108, ATA_ID_WWN = 108,
ATA_ID_LOGICAL_SECTOR_SIZE = 117, /* and 118 */ ATA_ID_LOGICAL_SECTOR_SIZE = 117, /* and 118 */
ATA_ID_COMMAND_SET_3 = 119,
ATA_ID_COMMAND_SET_4 = 120,
ATA_ID_LAST_LUN = 126, ATA_ID_LAST_LUN = 126,
ATA_ID_DLF = 128, ATA_ID_DLF = 128,
ATA_ID_CSFO = 129, ATA_ID_CSFO = 129,
...@@ -177,7 +179,7 @@ enum { ...@@ -177,7 +179,7 @@ enum {
ATA_DSC = (1 << 4), /* drive seek complete */ ATA_DSC = (1 << 4), /* drive seek complete */
ATA_DRQ = (1 << 3), /* data request i/o */ ATA_DRQ = (1 << 3), /* data request i/o */
ATA_CORR = (1 << 2), /* corrected data error */ ATA_CORR = (1 << 2), /* corrected data error */
ATA_IDX = (1 << 1), /* index */ ATA_SENSE = (1 << 1), /* sense code available */
ATA_ERR = (1 << 0), /* have an error */ ATA_ERR = (1 << 0), /* have an error */
ATA_SRST = (1 << 2), /* software reset */ ATA_SRST = (1 << 2), /* software reset */
ATA_ICRC = (1 << 7), /* interface CRC error */ ATA_ICRC = (1 << 7), /* interface CRC error */
...@@ -382,6 +384,8 @@ enum { ...@@ -382,6 +384,8 @@ enum {
SATA_SSP = 0x06, /* Software Settings Preservation */ SATA_SSP = 0x06, /* Software Settings Preservation */
SATA_DEVSLP = 0x09, /* Device Sleep */ SATA_DEVSLP = 0x09, /* Device Sleep */
SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
/* feature values for SET_MAX */ /* feature values for SET_MAX */
ATA_SET_MAX_ADDR = 0x00, ATA_SET_MAX_ADDR = 0x00,
ATA_SET_MAX_PASSWD = 0x01, ATA_SET_MAX_PASSWD = 0x01,
...@@ -525,6 +529,8 @@ struct ata_bmdma_prd { ...@@ -525,6 +529,8 @@ struct ata_bmdma_prd {
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) #define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4)) #define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)) #define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
#define ata_id_has_ncq_autosense(id) \
((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id) static inline bool ata_id_has_hipm(const u16 *id)
{ {
...@@ -696,6 +702,27 @@ static inline bool ata_id_wcache_enabled(const u16 *id) ...@@ -696,6 +702,27 @@ static inline bool ata_id_wcache_enabled(const u16 *id)
return id[ATA_ID_CFS_ENABLE_1] & (1 << 5); return id[ATA_ID_CFS_ENABLE_1] & (1 << 5);
} }
static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
{
if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
return false;
return id[ATA_ID_COMMAND_SET_3] & (1 << 3);
}
static inline bool ata_id_has_sense_reporting(const u16 *id)
{
if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
return false;
return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
}
static inline bool ata_id_sense_reporting_enabled(const u16 *id)
{
if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
return false;
return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
}
/** /**
* ata_id_major_version - get ATA level of drive * ata_id_major_version - get ATA level of drive
* @id: Identify data * @id: Identify data
......
...@@ -231,8 +231,7 @@ enum { ...@@ -231,8 +231,7 @@ enum {
ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
* led */ * led */
ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */
ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */ ATA_FLAG_SAS_HOST = (1 << 24), /* SAS host */
ATA_FLAG_SAS_HOST = (1 << 25), /* SAS host */
/* bits 24:31 of ap->flags are reserved for LLD specific flags */ /* bits 24:31 of ap->flags are reserved for LLD specific flags */
......
...@@ -59,6 +59,7 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, ...@@ -59,6 +59,7 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
u64 * info_out); u64 * info_out);
extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq); extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
extern void scsi_set_sense_information(u8 *buf, u64 info);
extern int scsi_ioctl_reset(struct scsi_device *, int __user *); extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
......
#undef TRACE_SYSTEM
#define TRACE_SYSTEM libata
#if !defined(_TRACE_LIBATA_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_LIBATA_H
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#define ata_opcode_name(opcode) { opcode, #opcode }
#define show_opcode_name(val) \
__print_symbolic(val, \
ata_opcode_name(ATA_CMD_DEV_RESET), \
ata_opcode_name(ATA_CMD_CHK_POWER), \
ata_opcode_name(ATA_CMD_STANDBY), \
ata_opcode_name(ATA_CMD_IDLE), \
ata_opcode_name(ATA_CMD_EDD), \
ata_opcode_name(ATA_CMD_DOWNLOAD_MICRO), \
ata_opcode_name(ATA_CMD_DOWNLOAD_MICRO_DMA), \
ata_opcode_name(ATA_CMD_NOP), \
ata_opcode_name(ATA_CMD_FLUSH), \
ata_opcode_name(ATA_CMD_FLUSH_EXT), \
ata_opcode_name(ATA_CMD_ID_ATA), \
ata_opcode_name(ATA_CMD_ID_ATAPI), \
ata_opcode_name(ATA_CMD_SERVICE), \
ata_opcode_name(ATA_CMD_READ), \
ata_opcode_name(ATA_CMD_READ_EXT), \
ata_opcode_name(ATA_CMD_READ_QUEUED), \
ata_opcode_name(ATA_CMD_READ_STREAM_EXT), \
ata_opcode_name(ATA_CMD_READ_STREAM_DMA_EXT), \
ata_opcode_name(ATA_CMD_WRITE), \
ata_opcode_name(ATA_CMD_WRITE_EXT), \
ata_opcode_name(ATA_CMD_WRITE_QUEUED), \
ata_opcode_name(ATA_CMD_WRITE_STREAM_EXT), \
ata_opcode_name(ATA_CMD_WRITE_STREAM_DMA_EXT), \
ata_opcode_name(ATA_CMD_WRITE_FUA_EXT), \
ata_opcode_name(ATA_CMD_WRITE_QUEUED_FUA_EXT), \
ata_opcode_name(ATA_CMD_FPDMA_READ), \
ata_opcode_name(ATA_CMD_FPDMA_WRITE), \
ata_opcode_name(ATA_CMD_FPDMA_SEND), \
ata_opcode_name(ATA_CMD_FPDMA_RECV), \
ata_opcode_name(ATA_CMD_PIO_READ), \
ata_opcode_name(ATA_CMD_PIO_READ_EXT), \
ata_opcode_name(ATA_CMD_PIO_WRITE), \
ata_opcode_name(ATA_CMD_PIO_WRITE_EXT), \
ata_opcode_name(ATA_CMD_READ_MULTI), \
ata_opcode_name(ATA_CMD_READ_MULTI_EXT), \
ata_opcode_name(ATA_CMD_WRITE_MULTI), \
ata_opcode_name(ATA_CMD_WRITE_MULTI_EXT), \
ata_opcode_name(ATA_CMD_WRITE_MULTI_FUA_EXT), \
ata_opcode_name(ATA_CMD_SET_FEATURES), \
ata_opcode_name(ATA_CMD_SET_MULTI), \
ata_opcode_name(ATA_CMD_PACKET), \
ata_opcode_name(ATA_CMD_VERIFY), \
ata_opcode_name(ATA_CMD_VERIFY_EXT), \
ata_opcode_name(ATA_CMD_WRITE_UNCORR_EXT), \
ata_opcode_name(ATA_CMD_STANDBYNOW1), \
ata_opcode_name(ATA_CMD_IDLEIMMEDIATE), \
ata_opcode_name(ATA_CMD_SLEEP), \
ata_opcode_name(ATA_CMD_INIT_DEV_PARAMS), \
ata_opcode_name(ATA_CMD_READ_NATIVE_MAX), \
ata_opcode_name(ATA_CMD_READ_NATIVE_MAX_EXT), \
ata_opcode_name(ATA_CMD_SET_MAX), \
ata_opcode_name(ATA_CMD_SET_MAX_EXT), \
ata_opcode_name(ATA_CMD_READ_LOG_EXT), \
ata_opcode_name(ATA_CMD_WRITE_LOG_EXT), \
ata_opcode_name(ATA_CMD_READ_LOG_DMA_EXT), \
ata_opcode_name(ATA_CMD_WRITE_LOG_DMA_EXT), \
ata_opcode_name(ATA_CMD_TRUSTED_NONDATA), \
ata_opcode_name(ATA_CMD_TRUSTED_RCV), \
ata_opcode_name(ATA_CMD_TRUSTED_RCV_DMA), \
ata_opcode_name(ATA_CMD_TRUSTED_SND), \
ata_opcode_name(ATA_CMD_TRUSTED_SND_DMA), \
ata_opcode_name(ATA_CMD_PMP_READ), \
ata_opcode_name(ATA_CMD_PMP_READ_DMA), \
ata_opcode_name(ATA_CMD_PMP_WRITE), \
ata_opcode_name(ATA_CMD_PMP_WRITE_DMA), \
ata_opcode_name(ATA_CMD_CONF_OVERLAY), \
ata_opcode_name(ATA_CMD_SEC_SET_PASS), \
ata_opcode_name(ATA_CMD_SEC_UNLOCK), \
ata_opcode_name(ATA_CMD_SEC_ERASE_PREP), \
ata_opcode_name(ATA_CMD_SEC_ERASE_UNIT), \
ata_opcode_name(ATA_CMD_SEC_FREEZE_LOCK), \
ata_opcode_name(ATA_CMD_SEC_DISABLE_PASS), \
ata_opcode_name(ATA_CMD_CONFIG_STREAM), \
ata_opcode_name(ATA_CMD_SMART), \
ata_opcode_name(ATA_CMD_MEDIA_LOCK), \
ata_opcode_name(ATA_CMD_MEDIA_UNLOCK), \
ata_opcode_name(ATA_CMD_DSM), \
ata_opcode_name(ATA_CMD_CHK_MED_CRD_TYP), \
ata_opcode_name(ATA_CMD_CFA_REQ_EXT_ERR), \
ata_opcode_name(ATA_CMD_CFA_WRITE_NE), \
ata_opcode_name(ATA_CMD_CFA_TRANS_SECT), \
ata_opcode_name(ATA_CMD_CFA_ERASE), \
ata_opcode_name(ATA_CMD_CFA_WRITE_MULT_NE), \
ata_opcode_name(ATA_CMD_REQ_SENSE_DATA), \
ata_opcode_name(ATA_CMD_SANITIZE_DEVICE), \
ata_opcode_name(ATA_CMD_RESTORE), \
ata_opcode_name(ATA_CMD_READ_LONG), \
ata_opcode_name(ATA_CMD_READ_LONG_ONCE), \
ata_opcode_name(ATA_CMD_WRITE_LONG), \
ata_opcode_name(ATA_CMD_WRITE_LONG_ONCE))
#define ata_error_name(result) { result, #result }
#define show_error_name(val) \
__print_symbolic(val, \
ata_error_name(ATA_ICRC), \
ata_error_name(ATA_UNC), \
ata_error_name(ATA_MC), \
ata_error_name(ATA_IDNF), \
ata_error_name(ATA_MCR), \
ata_error_name(ATA_ABORTED), \
ata_error_name(ATA_TRK0NF), \
ata_error_name(ATA_AMNF))
#define ata_protocol_name(proto) { proto, #proto }
#define show_protocol_name(val) \
__print_symbolic(val, \
ata_protocol_name(ATA_PROT_UNKNOWN), \
ata_protocol_name(ATA_PROT_NODATA), \
ata_protocol_name(ATA_PROT_PIO), \
ata_protocol_name(ATA_PROT_DMA), \
ata_protocol_name(ATA_PROT_NCQ), \
ata_protocol_name(ATAPI_PROT_NODATA), \
ata_protocol_name(ATAPI_PROT_PIO), \
ata_protocol_name(ATAPI_PROT_DMA))
const char *libata_trace_parse_status(struct trace_seq*, unsigned char);
#define __parse_status(s) libata_trace_parse_status(p, s)
const char *libata_trace_parse_eh_action(struct trace_seq *, unsigned int);
#define __parse_eh_action(a) libata_trace_parse_eh_action(p, a)
const char *libata_trace_parse_eh_err_mask(struct trace_seq *, unsigned int);
#define __parse_eh_err_mask(m) libata_trace_parse_eh_err_mask(p, m)
const char *libata_trace_parse_qc_flags(struct trace_seq *, unsigned int);
#define __parse_qc_flags(f) libata_trace_parse_qc_flags(p, f)
TRACE_EVENT(ata_qc_issue,
TP_PROTO(struct ata_queued_cmd *qc),
TP_ARGS(qc),
TP_STRUCT__entry(
__field( unsigned int, ata_port )
__field( unsigned int, ata_dev )
__field( unsigned int, tag )
__field( unsigned char, cmd )
__field( unsigned char, dev )
__field( unsigned char, lbal )
__field( unsigned char, lbam )
__field( unsigned char, lbah )
__field( unsigned char, nsect )
__field( unsigned char, feature )
__field( unsigned char, hob_lbal )
__field( unsigned char, hob_lbam )
__field( unsigned char, hob_lbah )
__field( unsigned char, hob_nsect )
__field( unsigned char, hob_feature )
__field( unsigned char, ctl )
__field( unsigned char, proto )
__field( unsigned long, flags )
),
TP_fast_assign(
__entry->ata_port = qc->ap->print_id;
__entry->ata_dev = qc->dev->link->pmp + qc->dev->devno;
__entry->tag = qc->tag;
__entry->proto = qc->tf.protocol;
__entry->cmd = qc->tf.command;
__entry->dev = qc->tf.device;
__entry->lbal = qc->tf.lbal;
__entry->lbam = qc->tf.lbam;
__entry->lbah = qc->tf.lbah;
__entry->hob_lbal = qc->tf.hob_lbal;
__entry->hob_lbam = qc->tf.hob_lbam;
__entry->hob_lbah = qc->tf.hob_lbah;
__entry->feature = qc->tf.feature;
__entry->hob_feature = qc->tf.hob_feature;
__entry->nsect = qc->tf.nsect;
__entry->hob_nsect = qc->tf.hob_nsect;
),
TP_printk("ata_port=%u ata_dev=%u tag=%d proto=%s cmd=%s " \
" tf=(%02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x)",
__entry->ata_port, __entry->ata_dev, __entry->tag,
show_protocol_name(__entry->proto),
show_opcode_name(__entry->cmd),
__entry->cmd, __entry->feature, __entry->nsect,
__entry->lbal, __entry->lbam, __entry->lbah,
__entry->hob_feature, __entry->hob_nsect,
__entry->hob_lbal, __entry->hob_lbam, __entry->hob_lbah,
__entry->dev)
);
DECLARE_EVENT_CLASS(ata_qc_complete_template,
TP_PROTO(struct ata_queued_cmd *qc),
TP_ARGS(qc),
TP_STRUCT__entry(
__field( unsigned int, ata_port )
__field( unsigned int, ata_dev )
__field( unsigned int, tag )
__field( unsigned char, status )
__field( unsigned char, dev )
__field( unsigned char, lbal )
__field( unsigned char, lbam )
__field( unsigned char, lbah )
__field( unsigned char, nsect )
__field( unsigned char, error )
__field( unsigned char, hob_lbal )
__field( unsigned char, hob_lbam )
__field( unsigned char, hob_lbah )
__field( unsigned char, hob_nsect )
__field( unsigned char, hob_feature )
__field( unsigned char, ctl )
__field( unsigned long, flags )
),
TP_fast_assign(
__entry->ata_port = qc->ap->print_id;
__entry->ata_dev = qc->dev->link->pmp + qc->dev->devno;
__entry->tag = qc->tag;
__entry->status = qc->result_tf.command;
__entry->dev = qc->result_tf.device;
__entry->lbal = qc->result_tf.lbal;
__entry->lbam = qc->result_tf.lbam;
__entry->lbah = qc->result_tf.lbah;
__entry->hob_lbal = qc->result_tf.hob_lbal;
__entry->hob_lbam = qc->result_tf.hob_lbam;
__entry->hob_lbah = qc->result_tf.hob_lbah;
__entry->error = qc->result_tf.feature;
__entry->hob_feature = qc->result_tf.hob_feature;
__entry->nsect = qc->result_tf.nsect;
__entry->hob_nsect = qc->result_tf.hob_nsect;
),
TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s status=%s " \
" res=(%02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x)",
__entry->ata_port, __entry->ata_dev, __entry->tag,
__parse_qc_flags(__entry->flags),
__parse_status(__entry->status),
__entry->status, __entry->error, __entry->nsect,
__entry->lbal, __entry->lbam, __entry->lbah,
__entry->hob_feature, __entry->hob_nsect,
__entry->hob_lbal, __entry->hob_lbam, __entry->hob_lbah,
__entry->dev)
);
DEFINE_EVENT(ata_qc_complete_template, ata_qc_complete_internal,
TP_PROTO(struct ata_queued_cmd *qc),
TP_ARGS(qc));
DEFINE_EVENT(ata_qc_complete_template, ata_qc_complete_failed,
TP_PROTO(struct ata_queued_cmd *qc),
TP_ARGS(qc));
DEFINE_EVENT(ata_qc_complete_template, ata_qc_complete_done,
TP_PROTO(struct ata_queued_cmd *qc),
TP_ARGS(qc));
TRACE_EVENT(ata_eh_link_autopsy,
TP_PROTO(struct ata_device *dev, unsigned int eh_action, unsigned int eh_err_mask),
TP_ARGS(dev, eh_action, eh_err_mask),
TP_STRUCT__entry(
__field( unsigned int, ata_port )
__field( unsigned int, ata_dev )
__field( unsigned int, eh_action )
__field( unsigned int, eh_err_mask)
),
TP_fast_assign(
__entry->ata_port = dev->link->ap->print_id;
__entry->ata_dev = dev->link->pmp + dev->devno;
__entry->eh_action = eh_action;
__entry->eh_err_mask = eh_err_mask;
),
TP_printk("ata_port=%u ata_dev=%u eh_action=%s err_mask=%s",
__entry->ata_port, __entry->ata_dev,
__parse_eh_action(__entry->eh_action),
__parse_eh_err_mask(__entry->eh_err_mask))
);
TRACE_EVENT(ata_eh_link_autopsy_qc,
TP_PROTO(struct ata_queued_cmd *qc),
TP_ARGS(qc),
TP_STRUCT__entry(
__field( unsigned int, ata_port )
__field( unsigned int, ata_dev )
__field( unsigned int, tag )
__field( unsigned int, qc_flags )
__field( unsigned int, eh_err_mask)
),
TP_fast_assign(
__entry->ata_port = qc->ap->print_id;
__entry->ata_dev = qc->dev->link->pmp + qc->dev->devno;
__entry->tag = qc->tag;
__entry->qc_flags = qc->flags;
__entry->eh_err_mask = qc->err_mask;
),
TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s err_mask=%s",
__entry->ata_port, __entry->ata_dev, __entry->tag,
__parse_qc_flags(__entry->qc_flags),
__parse_eh_err_mask(__entry->eh_err_mask))
);
#endif /* _TRACE_LIBATA_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册