提交 5a5dbd18 编写于 作者: M Mark Lord 提交者: Jeff Garzik

libata: add support for READ/WRITE LONG

The READ/WRITE LONG commands are theoretically obsolete,
but the majority of drives in existance still implement them.

The WRITE_LONG and WRITE_LONG_ONCE commands are of particular
interest for fault injection testing -- eg. creating "media errors"
at specific locations on a disk.

The fussy bit is that these commands require a non-standard
sector size, usually 520 bytes instead of 512.

This patch adds support to libata for READ/WRITE LONG commands
issued via SG_IO/ATA_16.
Signed-off-by: NMark Lord <mlord@pobox.com>
Signed-off-by: NJeff Garzik <jeff@garzik.org>
上级 12340106
...@@ -4106,10 +4106,10 @@ void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, ...@@ -4106,10 +4106,10 @@ void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
/** /**
* ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data. * ata_pio_sector - Transfer a sector of data.
* @qc: Command on going * @qc: Command on going
* *
* Transfer ATA_SECT_SIZE of data from/to the ATA device. * Transfer qc->sect_size bytes of data from/to the ATA device.
* *
* LOCKING: * LOCKING:
* Inherited from caller. * Inherited from caller.
...@@ -4124,7 +4124,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) ...@@ -4124,7 +4124,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
unsigned int offset; unsigned int offset;
unsigned char *buf; unsigned char *buf;
if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE) if (qc->curbytes == qc->nbytes - qc->sect_size)
ap->hsm_task_state = HSM_ST_LAST; ap->hsm_task_state = HSM_ST_LAST;
page = sg[qc->cursg].page; page = sg[qc->cursg].page;
...@@ -4144,17 +4144,17 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) ...@@ -4144,17 +4144,17 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
buf = kmap_atomic(page, KM_IRQ0); buf = kmap_atomic(page, KM_IRQ0);
/* do the actual data transfer */ /* do the actual data transfer */
ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write); ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
kunmap_atomic(buf, KM_IRQ0); kunmap_atomic(buf, KM_IRQ0);
local_irq_restore(flags); local_irq_restore(flags);
} else { } else {
buf = page_address(page); buf = page_address(page);
ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write); ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
} }
qc->curbytes += ATA_SECT_SIZE; qc->curbytes += qc->sect_size;
qc->cursg_ofs += ATA_SECT_SIZE; qc->cursg_ofs += qc->sect_size;
if (qc->cursg_ofs == (&sg[qc->cursg])->length) { if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
qc->cursg++; qc->cursg++;
...@@ -4163,10 +4163,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) ...@@ -4163,10 +4163,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
} }
/** /**
* ata_pio_sectors - Transfer one or many 512-byte sectors. * ata_pio_sectors - Transfer one or many sectors.
* @qc: Command on going * @qc: Command on going
* *
* Transfer one or many ATA_SECT_SIZE of data from/to the * Transfer one or many sectors of data from/to the
* ATA device for the DRQ request. * ATA device for the DRQ request.
* *
* LOCKING: * LOCKING:
...@@ -4181,7 +4181,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc) ...@@ -4181,7 +4181,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc)
WARN_ON(qc->dev->multi_count == 0); WARN_ON(qc->dev->multi_count == 0);
nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE, nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
qc->dev->multi_count); qc->dev->multi_count);
while (nsect--) while (nsect--)
ata_pio_sector(qc); ata_pio_sector(qc);
......
...@@ -2678,6 +2678,18 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) ...@@ -2678,6 +2678,18 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
tf->device = qc->dev->devno ? tf->device = qc->dev->devno ?
tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
/* READ/WRITE LONG use a non-standard sect_size */
qc->sect_size = ATA_SECT_SIZE;
switch (tf->command) {
case ATA_CMD_READ_LONG:
case ATA_CMD_READ_LONG_ONCE:
case ATA_CMD_WRITE_LONG:
case ATA_CMD_WRITE_LONG_ONCE:
if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
goto invalid_fld;
qc->sect_size = scmd->request_bufflen;
}
/* /*
* Filter SET_FEATURES - XFER MODE command -- otherwise, * Filter SET_FEATURES - XFER MODE command -- otherwise,
* SET_FEATURES - XFER MODE must be preceded/succeeded * SET_FEATURES - XFER MODE must be preceded/succeeded
......
...@@ -164,6 +164,12 @@ enum { ...@@ -164,6 +164,12 @@ enum {
/* READ_LOG_EXT pages */ /* READ_LOG_EXT pages */
ATA_LOG_SATA_NCQ = 0x10, ATA_LOG_SATA_NCQ = 0x10,
/* READ/WRITE LONG (obsolete) */
ATA_CMD_READ_LONG = 0x22,
ATA_CMD_READ_LONG_ONCE = 0x23,
ATA_CMD_WRITE_LONG = 0x32,
ATA_CMD_WRITE_LONG_ONCE = 0x33,
/* SETFEATURES stuff */ /* SETFEATURES stuff */
SETFEATURES_XFER = 0x03, SETFEATURES_XFER = 0x03,
XFER_UDMA_7 = 0x47, XFER_UDMA_7 = 0x47,
......
...@@ -427,6 +427,7 @@ struct ata_queued_cmd { ...@@ -427,6 +427,7 @@ struct ata_queued_cmd {
int dma_dir; int dma_dir;
unsigned int pad_len; unsigned int pad_len;
unsigned int sect_size;
unsigned int nbytes; unsigned int nbytes;
unsigned int curbytes; unsigned int curbytes;
...@@ -1182,6 +1183,7 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc) ...@@ -1182,6 +1183,7 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
qc->n_elem = 0; qc->n_elem = 0;
qc->err_mask = 0; qc->err_mask = 0;
qc->pad_len = 0; qc->pad_len = 0;
qc->sect_size = ATA_SECT_SIZE;
ata_tf_init(qc->dev, &qc->tf); ata_tf_init(qc->dev, &qc->tf);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册