提交 0e5e4f0e 编写于 作者: K Keith Busch 提交者: Matthew Wilcox

NVMe: Add discard support for capable devices

This adds discard support to block queues if the nvme device is capable of
deallocating blocks as indicated by the controller's optional command support.
A discard flagged bio request will submit an NVMe deallocate Data Set
Management command for the requested blocks.
Signed-off-by: NKeith Busch <keith.busch@intel.com>
Signed-off-by: NMatthew Wilcox <matthew.r.wilcox@intel.com>
上级 a12183c6
...@@ -80,6 +80,7 @@ struct nvme_dev { ...@@ -80,6 +80,7 @@ struct nvme_dev {
char model[40]; char model[40];
char firmware_rev[8]; char firmware_rev[8];
u32 max_hw_sectors; u32 max_hw_sectors;
u16 oncs;
}; };
/* /*
...@@ -510,6 +511,44 @@ static int nvme_map_bio(struct device *dev, struct nvme_iod *iod, ...@@ -510,6 +511,44 @@ static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
return length; return length;
} }
/*
* We reuse the small pool to allocate the 16-byte range here as it is not
* worth having a special pool for these or additional cases to handle freeing
* the iod.
*/
static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
struct bio *bio, struct nvme_iod *iod, int cmdid)
{
struct nvme_dsm_range *range;
struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
&iod->first_dma);
if (!range)
return -ENOMEM;
iod_list(iod)[0] = (__le64 *)range;
iod->npages = 0;
range->cattr = cpu_to_le32(0);
range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
range->slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
cmnd->dsm.command_id = cmdid;
cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
cmnd->dsm.nr = 0;
cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
writel(nvmeq->sq_tail, nvmeq->q_db);
return 0;
}
static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
int cmdid) int cmdid)
{ {
...@@ -567,6 +606,12 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, ...@@ -567,6 +606,12 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
if (unlikely(cmdid < 0)) if (unlikely(cmdid < 0))
goto free_iod; goto free_iod;
if (bio->bi_rw & REQ_DISCARD) {
result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
if (result)
goto free_cmdid;
return result;
}
if ((bio->bi_rw & REQ_FLUSH) && !psegs) if ((bio->bi_rw & REQ_FLUSH) && !psegs)
return nvme_submit_flush(nvmeq, ns, cmdid); return nvme_submit_flush(nvmeq, ns, cmdid);
...@@ -1347,6 +1392,16 @@ static void nvme_put_ns_idx(int index) ...@@ -1347,6 +1392,16 @@ static void nvme_put_ns_idx(int index)
spin_unlock(&dev_list_lock); spin_unlock(&dev_list_lock);
} }
static void nvme_config_discard(struct nvme_ns *ns)
{
u32 logical_block_size = queue_logical_block_size(ns->queue);
ns->queue->limits.discard_zeroes_data = 0;
ns->queue->limits.discard_alignment = logical_block_size;
ns->queue->limits.discard_granularity = logical_block_size;
ns->queue->limits.max_discard_sectors = 0xffffffff;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
}
static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
struct nvme_id_ns *id, struct nvme_lba_range_type *rt) struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
{ {
...@@ -1366,7 +1421,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, ...@@ -1366,7 +1421,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
blk_queue_make_request(ns->queue, nvme_make_request); blk_queue_make_request(ns->queue, nvme_make_request);
ns->dev = dev; ns->dev = dev;
ns->queue->queuedata = ns; ns->queue->queuedata = ns;
...@@ -1392,6 +1446,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, ...@@ -1392,6 +1446,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
if (dev->oncs & NVME_CTRL_ONCS_DSM)
nvme_config_discard(ns);
return ns; return ns;
out_free_queue: out_free_queue:
...@@ -1520,6 +1577,7 @@ static int nvme_dev_add(struct nvme_dev *dev) ...@@ -1520,6 +1577,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
ctrl = mem; ctrl = mem;
nn = le32_to_cpup(&ctrl->nn); nn = le32_to_cpup(&ctrl->nn);
dev->oncs = le16_to_cpup(&ctrl->oncs);
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
......
...@@ -107,6 +107,12 @@ struct nvme_id_ctrl { ...@@ -107,6 +107,12 @@ struct nvme_id_ctrl {
__u8 vs[1024]; __u8 vs[1024];
}; };
enum {
NVME_CTRL_ONCS_COMPARE = 1 << 0,
NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
NVME_CTRL_ONCS_DSM = 1 << 2,
};
struct nvme_lbaf { struct nvme_lbaf {
__le16 ms; __le16 ms;
__u8 ds; __u8 ds;
...@@ -246,6 +252,31 @@ enum { ...@@ -246,6 +252,31 @@ enum {
NVME_RW_DSM_COMPRESSED = 1 << 7, NVME_RW_DSM_COMPRESSED = 1 << 7,
}; };
struct nvme_dsm_cmd {
__u8 opcode;
__u8 flags;
__u16 command_id;
__le32 nsid;
__u64 rsvd2[2];
__le64 prp1;
__le64 prp2;
__le32 nr;
__le32 attributes;
__u32 rsvd12[4];
};
enum {
NVME_DSMGMT_IDR = 1 << 0,
NVME_DSMGMT_IDW = 1 << 1,
NVME_DSMGMT_AD = 1 << 2,
};
struct nvme_dsm_range {
__le32 cattr;
__le32 nlb;
__le64 slba;
};
/* Admin commands */ /* Admin commands */
enum nvme_admin_opcode { enum nvme_admin_opcode {
...@@ -372,6 +403,7 @@ struct nvme_command { ...@@ -372,6 +403,7 @@ struct nvme_command {
struct nvme_create_sq create_sq; struct nvme_create_sq create_sq;
struct nvme_delete_queue delete_queue; struct nvme_delete_queue delete_queue;
struct nvme_download_firmware dlfw; struct nvme_download_firmware dlfw;
struct nvme_dsm_cmd dsm;
}; };
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册