提交 16f26c3a 编写于 作者: M Matias Bjørling 提交者: Jens Axboe

lightnvm: replace req queue with nvmdev for lld

In the case where a request queue is passed to the low lever lightnvm
device drive integration, the device driver might pass its admin
commands through another queue. Instead pass nvm_dev, and let the
low level drive the appropriate queue.
Reported-by: NChristoph Hellwig <hch@infradead.org>
Signed-off-by: NMatias Bjørling <m@bjorling.me>
Signed-off-by: NJens Axboe <axboe@fb.com>
上级 57b4bd06
......@@ -444,8 +444,9 @@ static void null_lnvm_end_io(struct request *rq, int error)
blk_put_request(rq);
}
static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
struct request_queue *q = dev->q;
struct request *rq;
struct bio *bio = rqd->bio;
......@@ -470,7 +471,7 @@ static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
return 0;
}
static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
{
sector_t size = gb * 1024 * 1024 * 1024ULL;
sector_t blksize;
......@@ -523,7 +524,7 @@ static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
return 0;
}
static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
{
mempool_t *virtmem_pool;
......@@ -541,7 +542,7 @@ static void null_lnvm_destroy_dma_pool(void *pool)
mempool_destroy(pool);
}
static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
gfp_t mem_flags, dma_addr_t *dma_handler)
{
return mempool_alloc(pool, mem_flags);
......
......@@ -74,7 +74,7 @@ EXPORT_SYMBOL(nvm_unregister_target);
void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
dma_addr_t *dma_handler)
{
return dev->ops->dev_dma_alloc(dev->q, dev->ppalist_pool, mem_flags,
return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
dma_handler);
}
EXPORT_SYMBOL(nvm_dev_dma_alloc);
......@@ -246,7 +246,7 @@ static int nvm_init(struct nvm_dev *dev)
if (!dev->q || !dev->ops)
return ret;
if (dev->ops->identity(dev->q, &dev->identity)) {
if (dev->ops->identity(dev, &dev->identity)) {
pr_err("nvm: device could not be identified\n");
goto err;
}
......@@ -326,8 +326,7 @@ int nvm_register(struct request_queue *q, char *disk_name,
}
if (dev->ops->max_phys_sect > 1) {
dev->ppalist_pool = dev->ops->create_dma_pool(dev->q,
"ppalist");
dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
if (!dev->ppalist_pool) {
pr_err("nvm: could not create ppa pool\n");
ret = -ENOMEM;
......
......@@ -195,7 +195,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
}
if (dev->ops->get_l2p_tbl) {
ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages,
ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
gennvm_block_map, dev);
if (ret) {
pr_err("gennvm: could not read L2P table.\n");
......@@ -346,7 +346,7 @@ static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
gennvm_generic_to_addr_mode(dev, rqd);
rqd->dev = dev;
return dev->ops->submit_io(dev->q, rqd);
return dev->ops->submit_io(dev, rqd);
}
static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
......@@ -382,7 +382,7 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
if (!dev->ops->set_bb_tbl)
return;
if (dev->ops->set_bb_tbl(dev->q, rqd, 1))
if (dev->ops->set_bb_tbl(dev, rqd, 1))
return;
gennvm_addr_to_generic_mode(dev, rqd);
......@@ -450,7 +450,7 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
gennvm_generic_to_addr_mode(dev, &rqd);
ret = dev->ops->erase_block(dev->q, &rqd);
ret = dev->ops->erase_block(dev, &rqd);
if (plane_cnt)
nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
......
......@@ -1016,7 +1016,7 @@ static int rrpc_map_init(struct rrpc *rrpc)
return 0;
/* Bring up the mapping table from device */
ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages,
ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
rrpc_l2p_update, rrpc);
if (ret) {
pr_err("nvm: rrpc: could not read L2P table.\n");
......
......@@ -271,9 +271,9 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
return 0;
}
static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
{
struct nvme_ns *ns = q->queuedata;
struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
struct nvme_nvm_id *nvme_nvm_id;
struct nvme_nvm_command c = {};
......@@ -308,10 +308,10 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
return ret;
}
static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
nvm_l2p_update_fn *update_l2p, void *priv)
{
struct nvme_ns *ns = q->queuedata;
struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
struct nvme_nvm_command c = {};
u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
......@@ -415,10 +415,10 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
return ret;
}
static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd,
static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
int type)
{
struct nvme_ns *ns = q->queuedata;
struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
struct nvme_nvm_command c = {};
int ret = 0;
......@@ -463,8 +463,9 @@ static void nvme_nvm_end_io(struct request *rq, int error)
blk_mq_free_request(rq);
}
static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
struct request_queue *q = dev->q;
struct nvme_ns *ns = q->queuedata;
struct request *rq;
struct bio *bio = rqd->bio;
......@@ -502,8 +503,9 @@ static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
return 0;
}
static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd)
static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
{
struct request_queue *q = dev->q;
struct nvme_ns *ns = q->queuedata;
struct nvme_nvm_command c = {};
......@@ -515,9 +517,9 @@ static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd)
return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
}
static void *nvme_nvm_create_dma_pool(struct request_queue *q, char *name)
static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
{
struct nvme_ns *ns = q->queuedata;
struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0);
......@@ -530,7 +532,7 @@ static void nvme_nvm_destroy_dma_pool(void *pool)
dma_pool_destroy(dma_pool);
}
static void *nvme_nvm_dev_dma_alloc(struct request_queue *q, void *pool,
static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
gfp_t mem_flags, dma_addr_t *dma_handler)
{
return dma_pool_alloc(pool, mem_flags, dma_handler);
......
......@@ -183,17 +183,17 @@ struct nvm_block;
typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *);
typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32,
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
nvm_l2p_update_fn *, void *);
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
nvm_bb_update_fn *, void *);
typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int);
typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *);
typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *);
typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
typedef void (nvm_destroy_dma_pool_fn)(void *);
typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t,
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
dma_addr_t *);
typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册