提交 d082dc15 编写于 作者: J James Smart 提交者: Christoph Hellwig

nvmet-fc: fix target sgl list on large transfers

The existing code to carve up the sg list expected an sg element-per-page
which can be very incorrect with iommu's remapping multiple memory pages
to fewer bus addresses. To hit this error required a large io payload
(greater than 256k) and a system that maps on a per-page basis. It's
possible that large ios could get by fine if the system condensed the
sgl list into the first 64 elements.

This patch corrects the sg list handling by specifically walking the
sg list element by element and attempting to divide the transfer up
on a per-sg element boundary. While doing so, it still tries to keep
sequences under 256k, but will exceed that rule if a single sg element
is larger than 256k.

Fixes: 48fa362b ("nvmet-fc: simplify sg list handling")
Cc: <stable@vger.kernel.org> # 4.14
Signed-off-by: NJames Smart <james.smart@broadcom.com>
Signed-off-by: NChristoph Hellwig <hch@lst.de>
上级 8f3ea359
...@@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod { ...@@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
struct work_struct work; struct work_struct work;
} __aligned(sizeof(unsigned long long)); } __aligned(sizeof(unsigned long long));
/* desired maximum for a single sequence - if sg list allows it */
#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
enum nvmet_fcp_datadir { enum nvmet_fcp_datadir {
NVMET_FCP_NODATA, NVMET_FCP_NODATA,
...@@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod { ...@@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
struct nvme_fc_cmd_iu cmdiubuf; struct nvme_fc_cmd_iu cmdiubuf;
struct nvme_fc_ersp_iu rspiubuf; struct nvme_fc_ersp_iu rspiubuf;
dma_addr_t rspdma; dma_addr_t rspdma;
struct scatterlist *next_sg;
struct scatterlist *data_sg; struct scatterlist *data_sg;
int data_sg_cnt; int data_sg_cnt;
u32 offset; u32 offset;
...@@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, ...@@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
INIT_LIST_HEAD(&newrec->assoc_list); INIT_LIST_HEAD(&newrec->assoc_list);
kref_init(&newrec->ref); kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt); ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS, newrec->max_sg_cnt = template->max_sgl_segments;
template->max_sgl_segments);
ret = nvmet_fc_alloc_ls_iodlist(newrec); ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) { if (ret) {
...@@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) ...@@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
((fod->io_dir == NVMET_FCP_WRITE) ? ((fod->io_dir == NVMET_FCP_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE)); DMA_FROM_DEVICE : DMA_TO_DEVICE));
/* note: write from initiator perspective */ /* note: write from initiator perspective */
fod->next_sg = fod->data_sg;
return 0; return 0;
...@@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, ...@@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod, u8 op) struct nvmet_fc_fcp_iod *fod, u8 op)
{ {
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
struct scatterlist *sg = fod->next_sg;
unsigned long flags; unsigned long flags;
u32 tlen; u32 remaininglen = fod->req.transfer_len - fod->offset;
u32 tlen = 0;
int ret; int ret;
fcpreq->op = op; fcpreq->op = op;
fcpreq->offset = fod->offset; fcpreq->offset = fod->offset;
fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE, /*
(fod->req.transfer_len - fod->offset)); * for next sequence:
* break at a sg element boundary
* attempt to keep sequence length capped at
* NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
* be longer if a single sg element is larger
* than that amount. This is done to avoid creating
* a new sg list to use for the tgtport api.
*/
fcpreq->sg = sg;
fcpreq->sg_cnt = 0;
while (tlen < remaininglen &&
fcpreq->sg_cnt < tgtport->max_sg_cnt &&
tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
fcpreq->sg_cnt++;
tlen += sg_dma_len(sg);
sg = sg_next(sg);
}
if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
fcpreq->sg_cnt++;
tlen += min_t(u32, sg_dma_len(sg), remaininglen);
sg = sg_next(sg);
}
if (tlen < remaininglen)
fod->next_sg = sg;
else
fod->next_sg = NULL;
fcpreq->transfer_length = tlen; fcpreq->transfer_length = tlen;
fcpreq->transferred_length = 0; fcpreq->transferred_length = 0;
fcpreq->fcp_error = 0; fcpreq->fcp_error = 0;
fcpreq->rsplen = 0; fcpreq->rsplen = 0;
fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
/* /*
* If the last READDATA request: check if LLDD supports * If the last READDATA request: check if LLDD supports
* combined xfr with response. * combined xfr with response.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册