提交 98d87f70 编写于 作者: H Hans Holmberg 提交者: Jens Axboe

lightnvm: remove nvm_submit_io_sync_fn

Move the redundant sync handling interface and wait for a completion in
the lightnvm core instead.
Reviewed-by: NJavier González <javier@javigon.com>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NHans Holmberg <hans@owltronix.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 556f36e9
...@@ -752,12 +752,36 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) ...@@ -752,12 +752,36 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
} }
EXPORT_SYMBOL(nvm_submit_io); EXPORT_SYMBOL(nvm_submit_io);
static void nvm_sync_end_io(struct nvm_rq *rqd)
{
struct completion *waiting = rqd->private;
complete(waiting);
}
static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd)
{
DECLARE_COMPLETION_ONSTACK(wait);
int ret = 0;
rqd->end_io = nvm_sync_end_io;
rqd->private = &wait;
ret = dev->ops->submit_io(dev, rqd);
if (ret)
return ret;
wait_for_completion_io(&wait);
return 0;
}
int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{ {
struct nvm_dev *dev = tgt_dev->parent; struct nvm_dev *dev = tgt_dev->parent;
int ret; int ret;
if (!dev->ops->submit_io_sync) if (!dev->ops->submit_io)
return -ENODEV; return -ENODEV;
nvm_rq_tgt_to_dev(tgt_dev, rqd); nvm_rq_tgt_to_dev(tgt_dev, rqd);
...@@ -765,9 +789,7 @@ int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) ...@@ -765,9 +789,7 @@ int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
rqd->dev = tgt_dev; rqd->dev = tgt_dev;
rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd); rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
/* In case of error, fail with right address format */ ret = nvm_submit_io_wait(dev, rqd);
ret = dev->ops->submit_io_sync(dev, rqd);
nvm_rq_dev_to_tgt(tgt_dev, rqd);
return ret; return ret;
} }
...@@ -788,12 +810,13 @@ EXPORT_SYMBOL(nvm_end_io); ...@@ -788,12 +810,13 @@ EXPORT_SYMBOL(nvm_end_io);
static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd) static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
{ {
if (!dev->ops->submit_io_sync) if (!dev->ops->submit_io)
return -ENODEV; return -ENODEV;
rqd->dev = NULL;
rqd->flags = nvm_set_flags(&dev->geo, rqd); rqd->flags = nvm_set_flags(&dev->geo, rqd);
return dev->ops->submit_io_sync(dev, rqd); return nvm_submit_io_wait(dev, rqd);
} }
static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa) static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
......
...@@ -690,34 +690,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) ...@@ -690,34 +690,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
return 0; return 0;
} }
static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
{
struct request_queue *q = dev->q;
struct request *rq;
struct nvme_nvm_command cmd;
int ret = 0;
memset(&cmd, 0, sizeof(struct nvme_nvm_command));
rq = nvme_nvm_alloc_request(q, rqd, &cmd);
if (IS_ERR(rq))
return PTR_ERR(rq);
/* I/Os can fail and the error is signaled through rqd. Callers must
* handle the error accordingly.
*/
blk_execute_rq(q, NULL, rq, 0);
if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
ret = -EINTR;
rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
rqd->error = nvme_req(rq)->status;
blk_mq_free_request(rq);
return ret;
}
static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name, static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name,
int size) int size)
{ {
...@@ -754,7 +726,6 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { ...@@ -754,7 +726,6 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
.get_chk_meta = nvme_nvm_get_chk_meta, .get_chk_meta = nvme_nvm_get_chk_meta,
.submit_io = nvme_nvm_submit_io, .submit_io = nvme_nvm_submit_io,
.submit_io_sync = nvme_nvm_submit_io_sync,
.create_dma_pool = nvme_nvm_create_dma_pool, .create_dma_pool = nvme_nvm_create_dma_pool,
.destroy_dma_pool = nvme_nvm_destroy_dma_pool, .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
......
...@@ -89,7 +89,6 @@ typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); ...@@ -89,7 +89,6 @@ typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int, typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
struct nvm_chk_meta *); struct nvm_chk_meta *);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int); typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int);
typedef void (nvm_destroy_dma_pool_fn)(void *); typedef void (nvm_destroy_dma_pool_fn)(void *);
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
...@@ -104,7 +103,6 @@ struct nvm_dev_ops { ...@@ -104,7 +103,6 @@ struct nvm_dev_ops {
nvm_get_chk_meta_fn *get_chk_meta; nvm_get_chk_meta_fn *get_chk_meta;
nvm_submit_io_fn *submit_io; nvm_submit_io_fn *submit_io;
nvm_submit_io_sync_fn *submit_io_sync;
nvm_create_dma_pool_fn *create_dma_pool; nvm_create_dma_pool_fn *create_dma_pool;
nvm_destroy_dma_pool_fn *destroy_dma_pool; nvm_destroy_dma_pool_fn *destroy_dma_pool;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册