提交 8e234317 编写于 作者: C Chaitanya Kulkarni 提交者: Yang Yingliang

nvme-pci: use unsigned for io queue depth

mainline inclusion
from mainline-5.9-rc1
commit 61f3b896
category: bugfix
bugzilla: 175286
CVE: NA

---------------------------

The NVMe PCIe declares module parameter io_queue_depth as int. Change
this to u16 as queue depth can never be negative. Now to reflect this
update module parameter getter function from param_get_int() ->
param_get_uint() and respective setter function with type of n changed
from int to u16 with param_set_int() to param_set_ushort(). Finally
update struct nvme_dev q_depth member to u16 and use u16 in min_t()
when calculating dev->q_depth in the nvme_pci_enable() (since q_depth is
now u16) and use unsigned int instead of int when calculating
dev->tagset.queue_depth as target variable tagset->queue_depth is of type
unsigned int in nvme_dev_add().
Signed-off-by: NChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Reviewed-by: NSagi Grimberg <sagi@grimberg.me>
Signed-off-by: NChristoph Hellwig <hch@lst.de>

Conflicts:
	drivers/nvme/host/pci.c
	[ Feature patch 3b6592f7("nvme: utilize two queue maps, one
	  for reads and one for writes") is not applied.
	  ed92ad37("nvme-pci: only set nr_maps to 2 if poll queues
	  are supported") is not applied.
	  aa22c8e6("nvme-pci: set ctrl sqsize to the device q_depth")
	  is not applied. ]
Signed-off-by: NZhihao Cheng <chengzhihao1@huawei.com>
Reviewed-by: NHou Tao <houtao1@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 309f6c1f
...@@ -66,10 +66,10 @@ MODULE_PARM_DESC(sgl_threshold, ...@@ -66,10 +66,10 @@ MODULE_PARM_DESC(sgl_threshold,
static int io_queue_depth_set(const char *val, const struct kernel_param *kp); static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops io_queue_depth_ops = { static const struct kernel_param_ops io_queue_depth_ops = {
.set = io_queue_depth_set, .set = io_queue_depth_set,
.get = param_get_int, .get = param_get_uint,
}; };
static int io_queue_depth = 1024; static unsigned int io_queue_depth = 1024;
module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
...@@ -93,7 +93,7 @@ struct nvme_dev { ...@@ -93,7 +93,7 @@ struct nvme_dev {
unsigned online_queues; unsigned online_queues;
unsigned max_qid; unsigned max_qid;
unsigned int num_vecs; unsigned int num_vecs;
int q_depth; u16 q_depth;
u32 db_stride; u32 db_stride;
void __iomem *bar; void __iomem *bar;
unsigned long bar_mapped_size; unsigned long bar_mapped_size;
...@@ -126,13 +126,14 @@ struct nvme_dev { ...@@ -126,13 +126,14 @@ struct nvme_dev {
static int io_queue_depth_set(const char *val, const struct kernel_param *kp) static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
{ {
int n = 0, ret; int ret;
u16 n;
ret = kstrtoint(val, 10, &n); ret = kstrtou16(val, 10, &n);
if (ret != 0 || n < 2) if (ret != 0 || n < 2)
return -EINVAL; return -EINVAL;
return param_set_int(val, kp); return param_set_ushort(val, kp);
} }
static inline unsigned int sq_idx(unsigned int qid, u32 stride) static inline unsigned int sq_idx(unsigned int qid, u32 stride)
...@@ -2062,8 +2063,8 @@ static int nvme_dev_add(struct nvme_dev *dev) ...@@ -2062,8 +2063,8 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.nr_hw_queues = dev->online_queues - 1; dev->tagset.nr_hw_queues = dev->online_queues - 1;
dev->tagset.timeout = NVME_IO_TIMEOUT; dev->tagset.timeout = NVME_IO_TIMEOUT;
dev->tagset.numa_node = dev_to_node(dev->dev); dev->tagset.numa_node = dev_to_node(dev->dev);
dev->tagset.queue_depth = dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth,
min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; BLK_MQ_MAX_DEPTH) - 1;
dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false); dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false);
if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) { if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) {
dev->tagset.cmd_size = max(dev->tagset.cmd_size, dev->tagset.cmd_size = max(dev->tagset.cmd_size,
...@@ -2121,7 +2122,7 @@ static int nvme_pci_enable(struct nvme_dev *dev) ...@@ -2121,7 +2122,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1, dev->q_depth = min_t(u16, NVME_CAP_MQES(dev->ctrl.cap) + 1,
io_queue_depth); io_queue_depth);
dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
dev->dbs = dev->bar + 4096; dev->dbs = dev->bar + 4096;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册