From add0d5e477f6fe01e7ab1556590bb47fa12a13e2 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 15 Jul 2020 09:07:36 +0800 Subject: [PATCH] alinux: nvme-pci: Improve mapping single segment requests using SGLs fix #29327388 Now the blk-mq did not support multi-page bvec, which means each bvec can only contain one page. Though the physical segment is 1 in one bio, the bio still can contains multiple bvecs which are physically contiguous, so we can not use one bvec length to map the request, instead we should use the full length of the request to mapping the request, when the physical segment is 1 in a request. In future if we support multi-page bvecs, this patch can be dropped. Signed-off-by: Baolin Wang Reviewed-by: Joseph Qi --- drivers/nvme/host/pci.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 02c85691d25e..1c1cb2c2eb54 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -830,11 +830,13 @@ static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, struct bio_vec *bv) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + unsigned int length = blk_rq_bytes(req); - iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); + iod->first_dma = dma_map_page(dev->dev, bv->bv_page, bv->bv_offset, + length, rq_dma_dir(req)); if (dma_mapping_error(dev->dev, iod->first_dma)) return BLK_STS_RESOURCE; - iod->dma_len = bv->bv_len; + iod->dma_len = length; cmnd->flags = NVME_CMD_SGL_METABUF; cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); -- GitLab