提交 2f4cf91c 编写于 作者: F FUJITA Tomonori 提交者: James Bottomley

[SCSI] ips: convert to use the data buffer accessors

- remove the unnecessary map_single path.

- convert to use the new accessors for the sg lists and the
parameters.

Jens Axboe <jens.axboe@oracle.com> did the for_each_sg cleanup.

TODO: use scsi_for_each_sg() in the breakup handling.
Signed-off-by: NFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: N"Salyzyn, Mark" <mark_salyzyn@adaptec.com>
Signed-off-by: NJames Bottomley <James.Bottomley@SteelEye.com>
上级 bb350d1d
...@@ -1104,7 +1104,7 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)) ...@@ -1104,7 +1104,7 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
/* A Reset IOCTL is only sent by the boot CD in extreme cases. */ /* A Reset IOCTL is only sent by the boot CD in extreme cases. */
/* There can never be any system activity ( network or disk ), but check */ /* There can never be any system activity ( network or disk ), but check */
/* anyway just as a good practice. */ /* anyway just as a good practice. */
pt = (ips_passthru_t *) SC->request_buffer; pt = (ips_passthru_t *) scsi_sglist(SC);
if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) && if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) &&
(pt->CoppCP.cmd.reset.adapter_flag == 1)) { (pt->CoppCP.cmd.reset.adapter_flag == 1)) {
if (ha->scb_activelist.count != 0) { if (ha->scb_activelist.count != 0) {
...@@ -1507,30 +1507,22 @@ static int ips_is_passthru(struct scsi_cmnd *SC) ...@@ -1507,30 +1507,22 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) && if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) &&
(SC->device->channel == 0) && (SC->device->channel == 0) &&
(SC->device->id == IPS_ADAPTER_ID) && (SC->device->id == IPS_ADAPTER_ID) &&
(SC->device->lun == 0) && SC->request_buffer) { (SC->device->lun == 0) && scsi_sglist(SC)) {
if ((!SC->use_sg) && SC->request_bufflen && struct scatterlist *sg = scsi_sglist(SC);
(((char *) SC->request_buffer)[0] == 'C') && char *buffer;
(((char *) SC->request_buffer)[1] == 'O') &&
(((char *) SC->request_buffer)[2] == 'P') && /* kmap_atomic() ensures addressability of the user buffer.*/
(((char *) SC->request_buffer)[3] == 'P')) /* local_irq_save() protects the KM_IRQ0 address slot. */
return 1; local_irq_save(flags);
else if (SC->use_sg) { buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
struct scatterlist *sg = SC->request_buffer; if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
char *buffer; buffer[2] == 'P' && buffer[3] == 'P') {
kunmap_atomic(buffer - sg->offset, KM_IRQ0);
/* kmap_atomic() ensures addressability of the user buffer.*/ local_irq_restore(flags);
/* local_irq_save() protects the KM_IRQ0 address slot. */ return 1;
local_irq_save(flags); }
buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; kunmap_atomic(buffer - sg->offset, KM_IRQ0);
if (buffer && buffer[0] == 'C' && buffer[1] == 'O' && local_irq_restore(flags);
buffer[2] == 'P' && buffer[3] == 'P') {
kunmap_atomic(buffer - sg->offset, KM_IRQ0);
local_irq_restore(flags);
return 1;
}
kunmap_atomic(buffer - sg->offset, KM_IRQ0);
local_irq_restore(flags);
}
} }
return 0; return 0;
} }
...@@ -1581,18 +1573,14 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr) ...@@ -1581,18 +1573,14 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
{ {
ips_passthru_t *pt; ips_passthru_t *pt;
int length = 0; int length = 0;
int ret; int i, ret;
struct scatterlist *sg = scsi_sglist(SC);
METHOD_TRACE("ips_make_passthru", 1); METHOD_TRACE("ips_make_passthru", 1);
if (!SC->use_sg) { scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
length = SC->request_bufflen; length += sg[i].length;
} else {
struct scatterlist *sg = SC->request_buffer;
int i;
for (i = 0; i < SC->use_sg; i++)
length += sg[i].length;
}
if (length < sizeof (ips_passthru_t)) { if (length < sizeof (ips_passthru_t)) {
/* wrong size */ /* wrong size */
DEBUG_VAR(1, "(%s%d) Passthru structure wrong size", DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
...@@ -2016,7 +2004,7 @@ ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb) ...@@ -2016,7 +2004,7 @@ ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
METHOD_TRACE("ips_cleanup_passthru", 1); METHOD_TRACE("ips_cleanup_passthru", 1);
if ((!scb) || (!scb->scsi_cmd) || (!scb->scsi_cmd->request_buffer)) { if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) {
DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru", DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru",
ips_name, ha->host_num); ips_name, ha->host_num);
...@@ -2766,41 +2754,26 @@ ips_next(ips_ha_t * ha, int intr) ...@@ -2766,41 +2754,26 @@ ips_next(ips_ha_t * ha, int intr)
/* copy in the CDB */ /* copy in the CDB */
memcpy(scb->cdb, SC->cmnd, SC->cmd_len); memcpy(scb->cdb, SC->cmnd, SC->cmd_len);
/* Now handle the data buffer */ scb->sg_count = scsi_dma_map(SC);
if (SC->use_sg) { BUG_ON(scb->sg_count < 0);
if (scb->sg_count) {
struct scatterlist *sg; struct scatterlist *sg;
int i; int i;
sg = SC->request_buffer;
scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg,
SC->sc_data_direction);
scb->flags |= IPS_SCB_MAP_SG; scb->flags |= IPS_SCB_MAP_SG;
for (i = 0; i < scb->sg_count; i++) {
scsi_for_each_sg(SC, sg, scb->sg_count, i) {
if (ips_fill_scb_sg_single if (ips_fill_scb_sg_single
(ha, sg_dma_address(&sg[i]), scb, i, (ha, sg_dma_address(sg), scb, i,
sg_dma_len(&sg[i])) < 0) sg_dma_len(sg)) < 0)
break; break;
} }
scb->dcdb.transfer_length = scb->data_len; scb->dcdb.transfer_length = scb->data_len;
} else { } else {
if (SC->request_bufflen) { scb->data_busaddr = 0L;
scb->data_busaddr = scb->sg_len = 0;
pci_map_single(ha->pcidev, scb->data_len = 0;
SC->request_buffer, scb->dcdb.transfer_length = 0;
SC->request_bufflen,
SC->sc_data_direction);
scb->flags |= IPS_SCB_MAP_SINGLE;
ips_fill_scb_sg_single(ha, scb->data_busaddr,
scb, 0,
SC->request_bufflen);
scb->dcdb.transfer_length = scb->data_len;
} else {
scb->data_busaddr = 0L;
scb->sg_len = 0;
scb->data_len = 0;
scb->dcdb.transfer_length = 0;
}
} }
scb->dcdb.cmd_attribute = scb->dcdb.cmd_attribute =
...@@ -3277,52 +3250,32 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb) ...@@ -3277,52 +3250,32 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
* the rest of the data and continue. * the rest of the data and continue.
*/ */
if ((scb->breakup) || (scb->sg_break)) { if ((scb->breakup) || (scb->sg_break)) {
struct scatterlist *sg;
int sg_dma_index, ips_sg_index = 0;
/* we had a data breakup */ /* we had a data breakup */
scb->data_len = 0; scb->data_len = 0;
if (scb->sg_count) { sg = scsi_sglist(scb->scsi_cmd);
/* S/G request */
struct scatterlist *sg;
int ips_sg_index = 0;
int sg_dma_index;
sg = scb->scsi_cmd->request_buffer;
/* Spin forward to last dma chunk */
sg_dma_index = scb->breakup;
/* Take care of possible partial on last chunk */
ips_fill_scb_sg_single(ha,
sg_dma_address(&sg
[sg_dma_index]),
scb, ips_sg_index++,
sg_dma_len(&sg
[sg_dma_index]));
for (; sg_dma_index < scb->sg_count;
sg_dma_index++) {
if (ips_fill_scb_sg_single
(ha,
sg_dma_address(&sg[sg_dma_index]),
scb, ips_sg_index++,
sg_dma_len(&sg[sg_dma_index])) < 0)
break;
} /* Spin forward to last dma chunk */
sg_dma_index = scb->breakup;
} else { /* Take care of possible partial on last chunk */
/* Non S/G Request */ ips_fill_scb_sg_single(ha,
(void) ips_fill_scb_sg_single(ha, sg_dma_address(&sg[sg_dma_index]),
scb-> scb, ips_sg_index++,
data_busaddr + sg_dma_len(&sg[sg_dma_index]));
(scb->sg_break *
ha->max_xfer), for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
scb, 0, sg_dma_index++) {
scb->scsi_cmd-> if (ips_fill_scb_sg_single
request_bufflen - (ha,
(scb->sg_break * sg_dma_address(&sg[sg_dma_index]),
ha->max_xfer)); scb, ips_sg_index++,
} sg_dma_len(&sg[sg_dma_index])) < 0)
break;
}
scb->dcdb.transfer_length = scb->data_len; scb->dcdb.transfer_length = scb->data_len;
scb->dcdb.cmd_attribute |= scb->dcdb.cmd_attribute |=
...@@ -3553,32 +3506,27 @@ ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr) ...@@ -3553,32 +3506,27 @@ ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
static void static void
ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count) ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
{ {
if (scmd->use_sg) { int i;
int i; unsigned int min_cnt, xfer_cnt;
unsigned int min_cnt, xfer_cnt; char *cdata = (char *) data;
char *cdata = (char *) data; unsigned char *buffer;
unsigned char *buffer; unsigned long flags;
unsigned long flags; struct scatterlist *sg = scsi_sglist(scmd);
struct scatterlist *sg = scmd->request_buffer;
for (i = 0, xfer_cnt = 0; for (i = 0, xfer_cnt = 0;
(i < scmd->use_sg) && (xfer_cnt < count); i++) { (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
min_cnt = min(count - xfer_cnt, sg[i].length); min_cnt = min(count - xfer_cnt, sg[i].length);
/* kmap_atomic() ensures addressability of the data buffer.*/ /* kmap_atomic() ensures addressability of the data buffer.*/
/* local_irq_save() protects the KM_IRQ0 address slot. */ /* local_irq_save() protects the KM_IRQ0 address slot. */
local_irq_save(flags); local_irq_save(flags);
buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset; buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
memcpy(buffer, &cdata[xfer_cnt], min_cnt); memcpy(buffer, &cdata[xfer_cnt], min_cnt);
kunmap_atomic(buffer - sg[i].offset, KM_IRQ0); kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
local_irq_restore(flags); local_irq_restore(flags);
xfer_cnt += min_cnt; xfer_cnt += min_cnt;
} }
} else {
unsigned int min_cnt = min(count, scmd->request_bufflen);
memcpy(scmd->request_buffer, data, min_cnt);
}
} }
/****************************************************************************/ /****************************************************************************/
...@@ -3591,32 +3539,27 @@ ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count) ...@@ -3591,32 +3539,27 @@ ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
static void static void
ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count) ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
{ {
if (scmd->use_sg) { int i;
int i; unsigned int min_cnt, xfer_cnt;
unsigned int min_cnt, xfer_cnt; char *cdata = (char *) data;
char *cdata = (char *) data; unsigned char *buffer;
unsigned char *buffer; unsigned long flags;
unsigned long flags; struct scatterlist *sg = scsi_sglist(scmd);
struct scatterlist *sg = scmd->request_buffer;
for (i = 0, xfer_cnt = 0; for (i = 0, xfer_cnt = 0;
(i < scmd->use_sg) && (xfer_cnt < count); i++) { (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
min_cnt = min(count - xfer_cnt, sg[i].length); min_cnt = min(count - xfer_cnt, sg[i].length);
/* kmap_atomic() ensures addressability of the data buffer.*/ /* kmap_atomic() ensures addressability of the data buffer.*/
/* local_irq_save() protects the KM_IRQ0 address slot. */ /* local_irq_save() protects the KM_IRQ0 address slot. */
local_irq_save(flags); local_irq_save(flags);
buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset; buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
memcpy(&cdata[xfer_cnt], buffer, min_cnt); memcpy(&cdata[xfer_cnt], buffer, min_cnt);
kunmap_atomic(buffer - sg[i].offset, KM_IRQ0); kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
local_irq_restore(flags); local_irq_restore(flags);
xfer_cnt += min_cnt; xfer_cnt += min_cnt;
} }
} else {
unsigned int min_cnt = min(count, scmd->request_bufflen);
memcpy(data, scmd->request_buffer, min_cnt);
}
} }
/****************************************************************************/ /****************************************************************************/
...@@ -4250,7 +4193,7 @@ ips_rdcap(ips_ha_t * ha, ips_scb_t * scb) ...@@ -4250,7 +4193,7 @@ ips_rdcap(ips_ha_t * ha, ips_scb_t * scb)
METHOD_TRACE("ips_rdcap", 1); METHOD_TRACE("ips_rdcap", 1);
if (scb->scsi_cmd->request_bufflen < 8) if (scsi_bufflen(scb->scsi_cmd) < 8)
return (0); return (0);
cap.lba = cap.lba =
...@@ -4635,8 +4578,7 @@ ips_freescb(ips_ha_t * ha, ips_scb_t * scb) ...@@ -4635,8 +4578,7 @@ ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
METHOD_TRACE("ips_freescb", 1); METHOD_TRACE("ips_freescb", 1);
if (scb->flags & IPS_SCB_MAP_SG) if (scb->flags & IPS_SCB_MAP_SG)
pci_unmap_sg(ha->pcidev, scb->scsi_cmd->request_buffer, scsi_dma_unmap(scb->scsi_cmd);
scb->scsi_cmd->use_sg, IPS_DMA_DIR(scb));
else if (scb->flags & IPS_SCB_MAP_SINGLE) else if (scb->flags & IPS_SCB_MAP_SINGLE)
pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len, pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
IPS_DMA_DIR(scb)); IPS_DMA_DIR(scb));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册