提交 91fb2b60 编写于 作者: L Logan Gunthorpe 提交者: Christoph Hellwig

nvme-pci: convert to using dma_map_sgtable()

The dma_map operations now support P2PDMA pages directly. So remove
the calls to pci_p2pdma_[un]map_sg_attrs() and replace them with calls
to dma_map_sgtable().

dma_map_sgtable() returns more complete error codes than dma_map_sg()
and allows differentiating EREMOTEIO errors in case an unsupported
P2PDMA transfer is requested. When this happens, return BLK_STS_TARGET
so the request isn't retried.
Signed-off-by: NLogan Gunthorpe <logang@deltatee.com>
Reviewed-by: NMax Gurtovoy <mgurtovoy@nvidia.com>
Reviewed-by: NChaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NChristoph Hellwig <hch@lst.de>
上级 2f859441
...@@ -230,11 +230,10 @@ struct nvme_iod { ...@@ -230,11 +230,10 @@ struct nvme_iod {
bool use_sgl; bool use_sgl;
int aborted; int aborted;
int npages; /* In the PRP list. 0 means small pool in use */ int npages; /* In the PRP list. 0 means small pool in use */
int nents; /* Used in scatterlist */
dma_addr_t first_dma; dma_addr_t first_dma;
unsigned int dma_len; /* length of single DMA segment mapping */ unsigned int dma_len; /* length of single DMA segment mapping */
dma_addr_t meta_dma; dma_addr_t meta_dma;
struct scatterlist *sg; struct sg_table sgt;
}; };
static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
...@@ -524,7 +523,7 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) ...@@ -524,7 +523,7 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
static void **nvme_pci_iod_list(struct request *req) static void **nvme_pci_iod_list(struct request *req)
{ {
struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req));
} }
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
...@@ -576,17 +575,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) ...@@ -576,17 +575,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
} }
} }
static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
if (is_pci_p2pdma_page(sg_page(iod->sg)))
pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
rq_dma_dir(req));
else
dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
}
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{ {
struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
...@@ -597,9 +585,10 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) ...@@ -597,9 +585,10 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
return; return;
} }
WARN_ON_ONCE(!iod->nents); WARN_ON_ONCE(!iod->sgt.nents);
dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
nvme_unmap_sg(dev, req);
if (iod->npages == 0) if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
iod->first_dma); iod->first_dma);
...@@ -607,7 +596,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) ...@@ -607,7 +596,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
nvme_free_sgls(dev, req); nvme_free_sgls(dev, req);
else else
nvme_free_prps(dev, req); nvme_free_prps(dev, req);
mempool_free(iod->sg, dev->iod_mempool); mempool_free(iod->sgt.sgl, dev->iod_mempool);
} }
static void nvme_print_sgl(struct scatterlist *sgl, int nents) static void nvme_print_sgl(struct scatterlist *sgl, int nents)
...@@ -630,7 +619,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, ...@@ -630,7 +619,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool; struct dma_pool *pool;
int length = blk_rq_payload_bytes(req); int length = blk_rq_payload_bytes(req);
struct scatterlist *sg = iod->sg; struct scatterlist *sg = iod->sgt.sgl;
int dma_len = sg_dma_len(sg); int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg); u64 dma_addr = sg_dma_address(sg);
int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
...@@ -703,16 +692,16 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, ...@@ -703,16 +692,16 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_len = sg_dma_len(sg); dma_len = sg_dma_len(sg);
} }
done: done:
cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
return BLK_STS_OK; return BLK_STS_OK;
free_prps: free_prps:
nvme_free_prps(dev, req); nvme_free_prps(dev, req);
return BLK_STS_RESOURCE; return BLK_STS_RESOURCE;
bad_sgl: bad_sgl:
WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
"Invalid SGL for payload:%d nents:%d\n", "Invalid SGL for payload:%d nents:%d\n",
blk_rq_payload_bytes(req), iod->nents); blk_rq_payload_bytes(req), iod->sgt.nents);
return BLK_STS_IOERR; return BLK_STS_IOERR;
} }
...@@ -738,12 +727,13 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, ...@@ -738,12 +727,13 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
} }
static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
struct request *req, struct nvme_rw_command *cmd, int entries) struct request *req, struct nvme_rw_command *cmd)
{ {
struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool; struct dma_pool *pool;
struct nvme_sgl_desc *sg_list; struct nvme_sgl_desc *sg_list;
struct scatterlist *sg = iod->sg; struct scatterlist *sg = iod->sgt.sgl;
unsigned int entries = iod->sgt.nents;
dma_addr_t sgl_dma; dma_addr_t sgl_dma;
int i = 0; int i = 0;
...@@ -841,7 +831,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, ...@@ -841,7 +831,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
{ {
struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret = BLK_STS_RESOURCE; blk_status_t ret = BLK_STS_RESOURCE;
int nr_mapped; int rc;
if (blk_rq_nr_phys_segments(req) == 1) { if (blk_rq_nr_phys_segments(req) == 1) {
struct bio_vec bv = req_bvec(req); struct bio_vec bv = req_bvec(req);
...@@ -859,26 +849,25 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, ...@@ -859,26 +849,25 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
} }
iod->dma_len = 0; iod->dma_len = 0;
iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
if (!iod->sg) if (!iod->sgt.sgl)
return BLK_STS_RESOURCE; return BLK_STS_RESOURCE;
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
iod->nents = blk_rq_map_sg(req->q, req, iod->sg); iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
if (!iod->nents) if (!iod->sgt.orig_nents)
goto out_free_sg; goto out_free_sg;
if (is_pci_p2pdma_page(sg_page(iod->sg))) rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg, DMA_ATTR_NO_WARN);
iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN); if (rc) {
else if (rc == -EREMOTEIO)
nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, ret = BLK_STS_TARGET;
rq_dma_dir(req), DMA_ATTR_NO_WARN);
if (!nr_mapped)
goto out_free_sg; goto out_free_sg;
}
iod->use_sgl = nvme_pci_use_sgls(dev, req); iod->use_sgl = nvme_pci_use_sgls(dev, req);
if (iod->use_sgl) if (iod->use_sgl)
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped); ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
else else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
if (ret != BLK_STS_OK) if (ret != BLK_STS_OK)
...@@ -886,9 +875,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, ...@@ -886,9 +875,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
return BLK_STS_OK; return BLK_STS_OK;
out_unmap_sg: out_unmap_sg:
nvme_unmap_sg(dev, req); dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
out_free_sg: out_free_sg:
mempool_free(iod->sg, dev->iod_mempool); mempool_free(iod->sgt.sgl, dev->iod_mempool);
return ret; return ret;
} }
...@@ -912,7 +901,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) ...@@ -912,7 +901,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
iod->aborted = 0; iod->aborted = 0;
iod->npages = -1; iod->npages = -1;
iod->nents = 0; iod->sgt.nents = 0;
ret = nvme_setup_cmd(req->q->queuedata, req); ret = nvme_setup_cmd(req->q->queuedata, req);
if (ret) if (ret)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册