提交 04ceadab 编写于 作者: J jiangtao 提交者: Yang Yingliang

nvme-fabrics: fix kabi broken by "reject I/O to offline device"

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
Link: https://gitee.com/openeuler/kernel/issues/I4JFPM?from=project-issue

-------------------------------------------------

Kabi is broken by adding new member variables to "nvme_ctrl" and
"nvmf_ctrl_options" structure in "reject I/O to offline device" patch.
So kabi was repaired by constructing a new structure.
Signed-off-by: Njiangtao <jiangtao62@huawei.com>
Reviewed-by: Nchengjike <chengjike.cheng@huawei.com>
Reviewed-by: NAo Sun <sunao.sun@huawei.com>
Reviewed-by: NZhenwei Yang <yangzhenwei@huawei.com>
Reviewed-by: NHou Tao <houtao1@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 941b7dce
......@@ -132,24 +132,31 @@ static void nvme_queue_scan(struct nvme_ctrl *ctrl)
static void nvme_failfast_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
struct nvme_ctrl, failfast_work);
struct nvme_ctrl_plus *ctrl_plus = container_of(to_delayed_work(work),
struct nvme_ctrl_plus, failfast_work);
struct nvme_ctrl *ctrl = &ctrl_plus->ctrl;
if (ctrl->state != NVME_CTRL_CONNECTING)
return;
set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl_plus->flags);
dev_info(ctrl->device, "failfast expired\n");
nvme_kick_requeue_lists(ctrl);
}
static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
{
if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
struct nvmf_ctrl_options_plus *ops_plus = NULL;
if (!ctrl->opts)
return;
ops_plus = nvmf_opt_to_plus(ctrl->opts);
if (ops_plus->fast_io_fail_tmo == -1)
return;
schedule_delayed_work(&ctrl->failfast_work,
ctrl->opts->fast_io_fail_tmo * HZ);
schedule_delayed_work(&nvme_ctrl_to_plus(ctrl)->failfast_work,
ops_plus->fast_io_fail_tmo * HZ);
}
static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
......@@ -157,8 +164,8 @@ static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
if (!ctrl->opts)
return;
cancel_delayed_work_sync(&ctrl->failfast_work);
clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
cancel_delayed_work_sync(&nvme_ctrl_to_plus(ctrl)->failfast_work);
clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &nvme_ctrl_to_plus(ctrl)->flags);
}
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
......@@ -3105,10 +3112,11 @@ static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
int value = nvmf_opt_to_plus(ctrl->opts)->fast_io_fail_tmo;
if (ctrl->opts->fast_io_fail_tmo == -1)
if (value == -1)
return sysfs_emit(buf, "off\n");
return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
return sysfs_emit(buf, "%d\n", value);
}
static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
......@@ -3123,9 +3131,9 @@ static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
return -EINVAL;
if (fast_io_fail_tmo < 0)
opts->fast_io_fail_tmo = -1;
nvmf_opt_to_plus(opts)->fast_io_fail_tmo = -1;
else
opts->fast_io_fail_tmo = fast_io_fail_tmo;
nvmf_opt_to_plus(opts)->fast_io_fail_tmo = fast_io_fail_tmo;
return count;
}
static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
......@@ -3854,7 +3862,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
int ret;
ctrl->state = NVME_CTRL_NEW;
clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &nvme_ctrl_to_plus(ctrl)->flags);
spin_lock_init(&ctrl->lock);
mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
......@@ -3870,7 +3878,8 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
INIT_DELAYED_WORK(&nvme_ctrl_to_plus(ctrl)->failfast_work,
nvme_failfast_work);
BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
PAGE_SIZE);
......
......@@ -550,7 +550,8 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
{
if (ctrl->state != NVME_CTRL_DELETING &&
ctrl->state != NVME_CTRL_DEAD &&
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
!test_bit(NVME_CTRL_FAILFAST_EXPIRED,
&nvme_ctrl_to_plus(ctrl)->flags) &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
......@@ -628,7 +629,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
opts->kato = NVME_DEFAULT_KATO;
opts->duplicate_connect = false;
opts->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO;
nvmf_opt_to_plus(opts)->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO;
options = o = kstrdup(buf, GFP_KERNEL);
if (!options)
......@@ -762,7 +763,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
if (token >= 0)
pr_warn("I/O will fail on after %d sec reconnect\n",
token);
opts->fast_io_fail_tmo = token;
nvmf_opt_to_plus(opts)->fast_io_fail_tmo = token;
break;
case NVMF_OPT_HOSTNQN:
if (opts->host) {
......@@ -850,9 +851,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
} else {
opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
opts->reconnect_delay);
if (ctrl_loss_tmo < opts->fast_io_fail_tmo)
if (ctrl_loss_tmo < nvmf_opt_to_plus(opts)->fast_io_fail_tmo)
pr_warn("failfast tmo (%d) > ctrl_loss_tmo (%d)\n",
opts->fast_io_fail_tmo,
nvmf_opt_to_plus(opts)->fast_io_fail_tmo,
ctrl_loss_tmo);
}
......@@ -916,7 +917,7 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts)
kfree(opts->trsvcid);
kfree(opts->subsysnqn);
kfree(opts->host_traddr);
kfree(opts);
kfree(nvmf_opt_to_plus(opts));
}
EXPORT_SYMBOL_GPL(nvmf_free_options);
......@@ -925,18 +926,21 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
NVMF_OPT_FAIL_FAST_TMO)
static struct nvme_ctrl *
nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
{
struct nvmf_ctrl_options_plus *opts_plus;
struct nvmf_ctrl_options *opts;
struct nvmf_transport_ops *ops;
struct nvme_ctrl *ctrl;
int ret;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
opts_plus = kzalloc(sizeof(*opts_plus), GFP_KERNEL);
if (!opts_plus)
return ERR_PTR(-ENOMEM);
opts = &opts_plus->ops;
ret = nvmf_parse_options(opts, buf);
if (ret)
goto out_free_opts;
......
......@@ -106,9 +106,16 @@ struct nvmf_ctrl_options {
unsigned int kato;
struct nvmf_host *host;
int max_reconnects;
int fast_io_fail_tmo;
};
struct nvmf_ctrl_options_plus {
struct nvmf_ctrl_options ops;
int fast_io_fail_tmo;
};
#define nvmf_opt_to_plus(ps) \
container_of(ps, struct nvmf_ctrl_options_plus, ops)
/*
* struct nvmf_transport_ops - used to register a specific
* fabric implementation of NVMe fabrics.
......
......@@ -162,7 +162,10 @@ struct nvme_fc_ctrl {
struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
struct nvme_ctrl ctrl;
union {
struct nvme_ctrl ctrl;
struct nvme_ctrl_plus ctrl_plus;
};
};
static inline struct nvme_fc_ctrl *
......
......@@ -198,7 +198,8 @@ static bool nvme_available_path(struct nvme_ns_head *head)
struct nvme_ns *ns;
list_for_each_entry_rcu(ns, &head->list, siblings) {
if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
if (test_bit(NVME_CTRL_FAILFAST_EXPIRED,
&nvme_ctrl_to_plus(ns->ctrl)->flags))
continue;
switch (ns->ctrl->state) {
case NVME_CTRL_LIVE:
......
......@@ -211,7 +211,6 @@ struct nvme_ctrl {
struct work_struct scan_work;
struct work_struct async_event_work;
struct delayed_work ka_work;
struct delayed_work failfast_work;
struct nvme_command ka_cmd;
struct work_struct fw_act_work;
unsigned long events;
......@@ -246,14 +245,23 @@ struct nvme_ctrl {
u16 icdoff;
u16 maxcmd;
int nr_reconnects;
unsigned long flags;
#define NVME_CTRL_FAILFAST_EXPIRED 0
struct nvmf_ctrl_options *opts;
struct page *discard_page;
unsigned long discard_page_busy;
};
#define NVME_CTRL_FAILFAST_EXPIRED 0
struct nvme_ctrl_plus {
struct nvme_ctrl ctrl;
unsigned long flags;
struct delayed_work failfast_work;
};
#define nvme_ctrl_to_plus(t) \
container_of(t, struct nvme_ctrl_plus, ctrl)
struct nvme_subsystem {
int instance;
struct device dev;
......
......@@ -105,7 +105,10 @@ struct nvme_dev {
u64 cmb_size;
u32 cmbsz;
u32 cmbloc;
struct nvme_ctrl ctrl;
union {
struct nvme_ctrl ctrl;
struct nvme_ctrl_plus ctrl_plus;
};
struct completion ioq_wait;
mempool_t *iod_mempool;
......
......@@ -118,7 +118,10 @@ struct nvme_rdma_ctrl {
struct sockaddr_storage addr;
struct sockaddr_storage src_addr;
struct nvme_ctrl ctrl;
union {
struct nvme_ctrl ctrl;
struct nvme_ctrl_plus ctrl_plus;
};
bool use_inline_data;
};
......
......@@ -42,7 +42,10 @@ struct nvme_loop_ctrl {
struct list_head list;
struct blk_mq_tag_set tag_set;
struct nvme_loop_iod async_event_iod;
struct nvme_ctrl ctrl;
union {
struct nvme_ctrl ctrl;
struct nvme_ctrl_plus ctrl_plus;
};
struct nvmet_ctrl *target_ctrl;
struct nvmet_port *port;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册