提交 cecfed31 编写于 作者: C Christoph Hellwig 提交者: Martin K. Petersen

scsi: snic: switch to generic DMA API

Switch from the legacy PCI DMA API to the generic DMA API.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NJohannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: NMartin K. Petersen <martin.petersen@oracle.com>
上级 ec44a676
...@@ -111,8 +111,8 @@ snic_queue_report_tgt_req(struct snic *snic) ...@@ -111,8 +111,8 @@ snic_queue_report_tgt_req(struct snic *snic)
SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0); SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE); pa = dma_map_single(&snic->pdev->dev, buf, buf_len, DMA_FROM_DEVICE);
if (pci_dma_mapping_error(snic->pdev, pa)) { if (dma_mapping_error(&snic->pdev->dev, pa)) {
SNIC_HOST_ERR(snic->shost, SNIC_HOST_ERR(snic->shost,
"Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n", "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
buf); buf);
...@@ -138,7 +138,8 @@ snic_queue_report_tgt_req(struct snic *snic) ...@@ -138,7 +138,8 @@ snic_queue_report_tgt_req(struct snic *snic)
ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len); ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
if (ret) { if (ret) {
pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE); dma_unmap_single(&snic->pdev->dev, pa, buf_len,
DMA_FROM_DEVICE);
kfree(buf); kfree(buf);
rqi->sge_va = 0; rqi->sge_va = 0;
snic_release_untagged_req(snic, rqi); snic_release_untagged_req(snic, rqi);
......
...@@ -102,7 +102,8 @@ snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) ...@@ -102,7 +102,8 @@ snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
struct snic_req_info *rqi = NULL; struct snic_req_info *rqi = NULL;
unsigned long flags; unsigned long flags;
pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); dma_unmap_single(&snic->pdev->dev, buf->dma_addr, buf->len,
DMA_TO_DEVICE);
rqi = req_to_rqi(req); rqi = req_to_rqi(req);
spin_lock_irqsave(&snic->spl_cmd_lock, flags); spin_lock_irqsave(&snic->spl_cmd_lock, flags);
...@@ -172,8 +173,8 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) ...@@ -172,8 +173,8 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
snic_print_desc(__func__, os_buf, len); snic_print_desc(__func__, os_buf, len);
/* Map request buffer */ /* Map request buffer */
pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE); pa = dma_map_single(&snic->pdev->dev, os_buf, len, DMA_TO_DEVICE);
if (pci_dma_mapping_error(snic->pdev, pa)) { if (dma_mapping_error(&snic->pdev->dev, pa)) {
SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n"); SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
return -ENOMEM; return -ENOMEM;
...@@ -186,7 +187,7 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) ...@@ -186,7 +187,7 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
spin_lock_irqsave(&snic->wq_lock[q_num], flags); spin_lock_irqsave(&snic->wq_lock[q_num], flags);
desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type); desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
if (desc_avail <= 0) { if (desc_avail <= 0) {
pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE); dma_unmap_single(&snic->pdev->dev, pa, len, DMA_TO_DEVICE);
req->req_pa = 0; req->req_pa = 0;
spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
atomic64_inc(&snic->s_stats.misc.wq_alloc_fail); atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
...@@ -350,29 +351,29 @@ snic_req_free(struct snic *snic, struct snic_req_info *rqi) ...@@ -350,29 +351,29 @@ snic_req_free(struct snic *snic, struct snic_req_info *rqi)
if (rqi->abort_req) { if (rqi->abort_req) {
if (rqi->abort_req->req_pa) if (rqi->abort_req->req_pa)
pci_unmap_single(snic->pdev, dma_unmap_single(&snic->pdev->dev,
rqi->abort_req->req_pa, rqi->abort_req->req_pa,
sizeof(struct snic_host_req), sizeof(struct snic_host_req),
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
} }
if (rqi->dr_req) { if (rqi->dr_req) {
if (rqi->dr_req->req_pa) if (rqi->dr_req->req_pa)
pci_unmap_single(snic->pdev, dma_unmap_single(&snic->pdev->dev,
rqi->dr_req->req_pa, rqi->dr_req->req_pa,
sizeof(struct snic_host_req), sizeof(struct snic_host_req),
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]); mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
} }
if (rqi->req->req_pa) if (rqi->req->req_pa)
pci_unmap_single(snic->pdev, dma_unmap_single(&snic->pdev->dev,
rqi->req->req_pa, rqi->req->req_pa,
rqi->req_len, rqi->req_len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]); mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
} }
...@@ -384,10 +385,10 @@ snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi) ...@@ -384,10 +385,10 @@ snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
sgd = req_to_sgl(rqi_to_req(rqi)); sgd = req_to_sgl(rqi_to_req(rqi));
SNIC_BUG_ON(sgd[0].addr == 0); SNIC_BUG_ON(sgd[0].addr == 0);
pci_unmap_single(snic->pdev, dma_unmap_single(&snic->pdev->dev,
le64_to_cpu(sgd[0].addr), le64_to_cpu(sgd[0].addr),
le32_to_cpu(sgd[0].len), le32_to_cpu(sgd[0].len),
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
} }
/* /*
......
...@@ -435,37 +435,17 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -435,37 +435,17 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* limitation for the device. Try 43-bit first, and * limitation for the device. Try 43-bit first, and
* fail to 32-bit. * fail to 32-bit.
*/ */
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43)); ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43));
if (ret) { if (ret) {
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) { if (ret) {
SNIC_HOST_ERR(shost, SNIC_HOST_ERR(shost,
"No Usable DMA Configuration, aborting %d\n", "No Usable DMA Configuration, aborting %d\n",
ret); ret);
goto err_rel_regions;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
SNIC_HOST_ERR(shost,
"Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
ret);
goto err_rel_regions;
}
} else {
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
if (ret) {
SNIC_HOST_ERR(shost,
"Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
ret);
goto err_rel_regions; goto err_rel_regions;
} }
} }
/* Map vNIC resources from BAR0 */ /* Map vNIC resources from BAR0 */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n"); SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
......
...@@ -146,10 +146,10 @@ snic_release_req_buf(struct snic *snic, ...@@ -146,10 +146,10 @@ snic_release_req_buf(struct snic *snic,
CMD_FLAGS(sc)); CMD_FLAGS(sc));
if (req->u.icmnd.sense_addr) if (req->u.icmnd.sense_addr)
pci_unmap_single(snic->pdev, dma_unmap_single(&snic->pdev->dev,
le64_to_cpu(req->u.icmnd.sense_addr), le64_to_cpu(req->u.icmnd.sense_addr),
SCSI_SENSE_BUFFERSIZE, SCSI_SENSE_BUFFERSIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
scsi_dma_unmap(sc); scsi_dma_unmap(sc);
...@@ -185,12 +185,11 @@ snic_queue_icmnd_req(struct snic *snic, ...@@ -185,12 +185,11 @@ snic_queue_icmnd_req(struct snic *snic,
} }
} }
pa = pci_map_single(snic->pdev, pa = dma_map_single(&snic->pdev->dev,
sc->sense_buffer, sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE, SCSI_SENSE_BUFFERSIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(&snic->pdev->dev, pa)) {
if (pci_dma_mapping_error(snic->pdev, pa)) {
SNIC_HOST_ERR(snic->shost, SNIC_HOST_ERR(snic->shost,
"QIcmnd:PCI Map Failed for sns buf %p tag %x\n", "QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
sc->sense_buffer, snic_cmd_tag(sc)); sc->sense_buffer, snic_cmd_tag(sc));
......
...@@ -225,10 +225,9 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, ...@@ -225,10 +225,9 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
{ {
svnic_dev_desc_ring_size(ring, desc_count, desc_size); svnic_dev_desc_ring_size(ring, desc_count, desc_size);
ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
ring->size_unaligned, ring->size_unaligned, &ring->base_addr_unaligned,
&ring->base_addr_unaligned); GFP_KERNEL);
if (!ring->descs_unaligned) { if (!ring->descs_unaligned) {
pr_err("Failed to allocate ring (size=%d), aborting\n", pr_err("Failed to allocate ring (size=%d), aborting\n",
(int)ring->size); (int)ring->size);
...@@ -251,7 +250,7 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, ...@@ -251,7 +250,7 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{ {
if (ring->descs) { if (ring->descs) {
pci_free_consistent(vdev->pdev, dma_free_coherent(&vdev->pdev->dev,
ring->size_unaligned, ring->size_unaligned,
ring->descs_unaligned, ring->descs_unaligned,
ring->base_addr_unaligned); ring->base_addr_unaligned);
...@@ -470,9 +469,9 @@ int svnic_dev_fw_info(struct vnic_dev *vdev, ...@@ -470,9 +469,9 @@ int svnic_dev_fw_info(struct vnic_dev *vdev,
int err = 0; int err = 0;
if (!vdev->fw_info) { if (!vdev->fw_info) {
vdev->fw_info = pci_alloc_consistent(vdev->pdev, vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info), sizeof(struct vnic_devcmd_fw_info),
&vdev->fw_info_pa); &vdev->fw_info_pa, GFP_KERNEL);
if (!vdev->fw_info) if (!vdev->fw_info)
return -ENOMEM; return -ENOMEM;
...@@ -534,8 +533,8 @@ int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) ...@@ -534,8 +533,8 @@ int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
int wait = VNIC_DVCMD_TMO; int wait = VNIC_DVCMD_TMO;
if (!vdev->stats) { if (!vdev->stats) {
vdev->stats = pci_alloc_consistent(vdev->pdev, vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats), &vdev->stats_pa); sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
if (!vdev->stats) if (!vdev->stats)
return -ENOMEM; return -ENOMEM;
} }
...@@ -607,9 +606,9 @@ int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) ...@@ -607,9 +606,9 @@ int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
int wait = VNIC_DVCMD_TMO; int wait = VNIC_DVCMD_TMO;
if (!vdev->notify) { if (!vdev->notify) {
vdev->notify = pci_alloc_consistent(vdev->pdev, vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify), sizeof(struct vnic_devcmd_notify),
&vdev->notify_pa); &vdev->notify_pa, GFP_KERNEL);
if (!vdev->notify) if (!vdev->notify)
return -ENOMEM; return -ENOMEM;
} }
...@@ -697,21 +696,21 @@ void svnic_dev_unregister(struct vnic_dev *vdev) ...@@ -697,21 +696,21 @@ void svnic_dev_unregister(struct vnic_dev *vdev)
{ {
if (vdev) { if (vdev) {
if (vdev->notify) if (vdev->notify)
pci_free_consistent(vdev->pdev, dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify), sizeof(struct vnic_devcmd_notify),
vdev->notify, vdev->notify,
vdev->notify_pa); vdev->notify_pa);
if (vdev->linkstatus) if (vdev->linkstatus)
pci_free_consistent(vdev->pdev, dma_free_coherent(&vdev->pdev->dev,
sizeof(u32), sizeof(u32),
vdev->linkstatus, vdev->linkstatus,
vdev->linkstatus_pa); vdev->linkstatus_pa);
if (vdev->stats) if (vdev->stats)
pci_free_consistent(vdev->pdev, dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats), sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa); vdev->stats, vdev->stats_pa);
if (vdev->fw_info) if (vdev->fw_info)
pci_free_consistent(vdev->pdev, dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info), sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa); vdev->fw_info, vdev->fw_info_pa);
if (vdev->devcmd2) if (vdev->devcmd2)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册