提交 d087f6ad 编写于 作者: R Roland Dreier

Merge branches 'core', 'cxgb4', 'ipoib', 'iser', 'iwcm', 'mad', 'misc',...

Merge branches 'core', 'cxgb4', 'ipoib', 'iser', 'iwcm', 'mad', 'misc', 'mlx4', 'mlx5', 'ocrdma' and 'srp' into for-next
上级 9376932d 678ea9b5 e42fa209 8d4aca7f 2f0304d2 1471cb6c db1044d4 a57f23f6 a8f731eb 96c51abe da05be29
master alk-4.19.24 alk-4.19.30 alk-4.19.34 alk-4.19.36 alk-4.19.43 alk-4.19.48 alk-4.19.57 ck-4.19.67 ck-4.19.81 ck-4.19.91 github/fork/deepanshu1422/fix-typo-in-comment github/fork/haosdent/fix-typo linux-next v4.19.91 v4.19.90 v4.19.89 v4.19.88 v4.19.87 v4.19.86 v4.19.85 v4.19.84 v4.19.83 v4.19.82 v4.19.81 v4.19.80 v4.19.79 v4.19.78 v4.19.77 v4.19.76 v4.19.75 v4.19.74 v4.19.73 v4.19.72 v4.19.71 v4.19.70 v4.19.69 v4.19.68 v4.19.67 v4.19.66 v4.19.65 v4.19.64 v4.19.63 v4.19.62 v4.19.61 v4.19.60 v4.19.59 v4.19.58 v4.19.57 v4.19.56 v4.19.55 v4.19.54 v4.19.53 v4.19.52 v4.19.51 v4.19.50 v4.19.49 v4.19.48 v4.19.47 v4.19.46 v4.19.45 v4.19.44 v4.19.43 v4.19.42 v4.19.41 v4.19.40 v4.19.39 v4.19.38 v4.19.37 v4.19.36 v4.19.35 v4.19.34 v4.19.33 v4.19.32 v4.19.31 v4.19.30 v4.19.29 v4.19.28 v4.19.27 v4.19.26 v4.19.25 v4.19.24 v4.19.23 v4.19.22 v4.19.21 v4.19.20 v4.19.19 v4.19.18 v4.19.17 v4.19.16 v4.19.15 v4.19.14 v4.19.13 v4.19.12 v4.19.11 v4.19.10 v4.19.9 v4.19.8 v4.19.7 v4.19.6 v4.19.5 v4.19.4 v4.19.3 v4.19.2 v4.19.1 v4.19 v4.19-rc8 v4.19-rc7 v4.19-rc6 v4.19-rc5 v4.19-rc4 v4.19-rc3 v4.19-rc2 v4.19-rc1 ck-release-21 ck-release-20 ck-release-19.2 ck-release-19.1 ck-release-19 ck-release-18 ck-release-17.2 ck-release-17.1 ck-release-17 ck-release-16 ck-release-15.1 ck-release-15 ck-release-14 ck-release-13.2 ck-release-13 ck-release-12 ck-release-11 ck-release-10 ck-release-9 ck-release-7 alk-release-15 alk-release-14 alk-release-13.2 alk-release-13 alk-release-12 alk-release-11 alk-release-10 alk-release-9 alk-release-7
无相关合并请求
......@@ -26,6 +26,11 @@ Creating MAD agents
ioctl. Also, all agents registered through a file descriptor will
be unregistered when the descriptor is closed.
2014 -- a new registration ioctl is now provided which allows additional
fields to be provided during registration.
Users of this registration call are implicitly setting the use of
pkey_index (see below).
Receiving MADs
MADs are received using read(). The receive side now supports
......@@ -104,10 +109,10 @@ P_Key Index Handling
The old ib_umad interface did not allow setting the P_Key index for
MADs that are sent and did not provide a way for obtaining the P_Key
index of received MADs. A new layout for struct ib_user_mad_hdr
with a pkey_index member has been defined; however, to preserve
binary compatibility with older applications, this new layout will
not be used unless the IB_USER_MAD_ENABLE_PKEY ioctl is called
before a file descriptor is used for anything else.
with a pkey_index member has been defined; however, to preserve binary
compatibility with older applications, this new layout will not be used
unless one of IB_USER_MAD_ENABLE_PKEY or IB_USER_MAD_REGISTER_AGENT2 ioctl's
are called before a file descriptor is used for anything else.
In September 2008, the IB_USER_MAD_ABI_VERSION will be incremented
to 6, the new layout of struct ib_user_mad_hdr will be used by
......
......@@ -94,14 +94,14 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
port_priv = ib_get_agent_port(device, port_num);
if (!port_priv) {
printk(KERN_ERR SPFX "Unable to find port agent\n");
dev_err(&device->dev, "Unable to find port agent\n");
return;
}
agent = port_priv->agent[qpn];
ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num);
if (IS_ERR(ah)) {
printk(KERN_ERR SPFX "ib_create_ah_from_wc error %ld\n",
dev_err(&device->dev, "ib_create_ah_from_wc error %ld\n",
PTR_ERR(ah));
return;
}
......@@ -110,7 +110,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_KERNEL);
if (IS_ERR(send_buf)) {
printk(KERN_ERR SPFX "ib_create_send_mad error\n");
dev_err(&device->dev, "ib_create_send_mad error\n");
goto err1;
}
......@@ -125,7 +125,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
}
if (ib_post_send_mad(send_buf, NULL)) {
printk(KERN_ERR SPFX "ib_post_send_mad error\n");
dev_err(&device->dev, "ib_post_send_mad error\n");
goto err2;
}
return;
......@@ -151,7 +151,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
/* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
if (!port_priv) {
printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n");
dev_err(&device->dev, "No memory for ib_agent_port_private\n");
ret = -ENOMEM;
goto error1;
}
......@@ -161,7 +161,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
port_priv->agent[0] = ib_register_mad_agent(device, port_num,
IB_QPT_SMI, NULL, 0,
&agent_send_handler,
NULL, NULL);
NULL, NULL, 0);
if (IS_ERR(port_priv->agent[0])) {
ret = PTR_ERR(port_priv->agent[0]);
goto error2;
......@@ -172,7 +172,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
port_priv->agent[1] = ib_register_mad_agent(device, port_num,
IB_QPT_GSI, NULL, 0,
&agent_send_handler,
NULL, NULL);
NULL, NULL, 0);
if (IS_ERR(port_priv->agent[1])) {
ret = PTR_ERR(port_priv->agent[1]);
goto error3;
......@@ -202,7 +202,7 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
port_priv = __ib_get_agent_port(device, port_num);
if (port_priv == NULL) {
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
printk(KERN_ERR SPFX "Port %d not found\n", port_num);
dev_err(&device->dev, "Port %d not found\n", port_num);
return -ENODEV;
}
list_del(&port_priv->port_list);
......
......@@ -3753,7 +3753,7 @@ static void cm_add_one(struct ib_device *ib_device)
struct cm_port *port;
struct ib_mad_reg_req reg_req = {
.mgmt_class = IB_MGMT_CLASS_CM,
.mgmt_class_version = IB_CM_CLASS_VERSION
.mgmt_class_version = IB_CM_CLASS_VERSION,
};
struct ib_port_modify port_modify = {
.set_port_cap_mask = IB_PORT_CM_SUP
......@@ -3801,7 +3801,8 @@ static void cm_add_one(struct ib_device *ib_device)
0,
cm_send_handler,
cm_recv_handler,
port);
port,
0);
if (IS_ERR(port->mad_agent))
goto error2;
......
......@@ -46,6 +46,7 @@
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/sysctl.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_addr.h>
......@@ -65,6 +66,20 @@ struct iwcm_work {
struct list_head free_list;
};
static unsigned int default_backlog = 256;
static struct ctl_table_header *iwcm_ctl_table_hdr;
static struct ctl_table iwcm_ctl_table[] = {
{
.procname = "default_backlog",
.data = &default_backlog,
.maxlen = sizeof(default_backlog),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
/*
* The following services provide a mechanism for pre-allocating iwcm_work
* elements. The design pre-allocates them based on the cm_id type:
......@@ -425,6 +440,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
if (!backlog)
backlog = default_backlog;
ret = alloc_work_entries(cm_id_priv, backlog);
if (ret)
return ret;
......@@ -1030,11 +1048,20 @@ static int __init iw_cm_init(void)
if (!iwcm_wq)
return -ENOMEM;
iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
iwcm_ctl_table);
if (!iwcm_ctl_table_hdr) {
pr_err("iw_cm: couldn't register sysctl paths\n");
destroy_workqueue(iwcm_wq);
return -ENOMEM;
}
return 0;
}
static void __exit iw_cm_cleanup(void)
{
unregister_net_sysctl_table(iwcm_ctl_table_hdr);
destroy_workqueue(iwcm_wq);
}
......
此差异已折叠。
......@@ -42,9 +42,6 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_smi.h>
#define PFX "ib_mad: "
#define IB_MAD_QPS_CORE 2 /* Always QP0 and QP1 as a minimum */
/* QP and CQ parameters */
......
......@@ -1184,7 +1184,7 @@ static void ib_sa_add_one(struct ib_device *device)
sa_dev->port[i].agent =
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
NULL, 0, send_handler,
recv_handler, sa_dev);
recv_handler, sa_dev, 0);
if (IS_ERR(sa_dev->port[i].agent))
goto err;
......
......@@ -33,6 +33,8 @@
* SOFTWARE.
*/
#define pr_fmt(fmt) "user_mad: " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
......@@ -504,13 +506,15 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) {
copy_offset = IB_MGMT_MAD_HDR;
rmpp_active = 0;
} else {
if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
&& ib_mad_kernel_rmpp_agent(agent)) {
copy_offset = IB_MGMT_RMPP_HDR;
rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE;
IB_MGMT_RMPP_FLAG_ACTIVE;
} else {
copy_offset = IB_MGMT_MAD_HDR;
rmpp_active = 0;
}
data_len = count - hdr_size(file) - hdr_len;
......@@ -556,14 +560,22 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
rmpp_mad->mad_hdr.tid = *tid;
}
spin_lock_irq(&file->send_lock);
ret = is_duplicate(file, packet);
if (!ret)
if (!ib_mad_kernel_rmpp_agent(agent)
&& ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
&& (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
spin_lock_irq(&file->send_lock);
list_add_tail(&packet->list, &file->send_list);
spin_unlock_irq(&file->send_lock);
if (ret) {
ret = -EINVAL;
goto err_msg;
spin_unlock_irq(&file->send_lock);
} else {
spin_lock_irq(&file->send_lock);
ret = is_duplicate(file, packet);
if (!ret)
list_add_tail(&packet->list, &file->send_list);
spin_unlock_irq(&file->send_lock);
if (ret) {
ret = -EINVAL;
goto err_msg;
}
}
ret = ib_post_send_mad(packet->msg, NULL);
......@@ -614,6 +626,8 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
mutex_lock(&file->mutex);
if (!file->port->ib_dev) {
dev_notice(file->port->dev,
"ib_umad_reg_agent: invalid device\n");
ret = -EPIPE;
goto out;
}
......@@ -624,6 +638,9 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
}
if (ureq.qpn != 0 && ureq.qpn != 1) {
dev_notice(file->port->dev,
"ib_umad_reg_agent: invalid QPN %d specified\n",
ureq.qpn);
ret = -EINVAL;
goto out;
}
......@@ -632,11 +649,15 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
if (!__get_agent(file, agent_id))
goto found;
dev_notice(file->port->dev,
"ib_umad_reg_agent: Max Agents (%u) reached\n",
IB_UMAD_MAX_AGENTS);
ret = -ENOMEM;
goto out;
found:
if (ureq.mgmt_class) {
memset(&req, 0, sizeof(req));
req.mgmt_class = ureq.mgmt_class;
req.mgmt_class_version = ureq.mgmt_class_version;
memcpy(req.oui, ureq.oui, sizeof req.oui);
......@@ -657,7 +678,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
ureq.mgmt_class ? &req : NULL,
ureq.rmpp_version,
send_handler, recv_handler, file);
send_handler, recv_handler, file, 0);
if (IS_ERR(agent)) {
ret = PTR_ERR(agent);
agent = NULL;
......@@ -673,10 +694,11 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
if (!file->already_used) {
file->already_used = 1;
if (!file->use_pkey_index) {
printk(KERN_WARNING "user_mad: process %s did not enable "
"P_Key index support.\n", current->comm);
printk(KERN_WARNING "user_mad: Documentation/infiniband/user_mad.txt "
"has info on the new ABI.\n");
dev_warn(file->port->dev,
"process %s did not enable P_Key index support.\n",
current->comm);
dev_warn(file->port->dev,
" Documentation/infiniband/user_mad.txt has info on the new ABI.\n");
}
}
......@@ -694,6 +716,119 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
return ret;
}
static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
{
struct ib_user_mad_reg_req2 ureq;
struct ib_mad_reg_req req;
struct ib_mad_agent *agent = NULL;
int agent_id;
int ret;
mutex_lock(&file->port->file_mutex);
mutex_lock(&file->mutex);
if (!file->port->ib_dev) {
dev_notice(file->port->dev,
"ib_umad_reg_agent2: invalid device\n");
ret = -EPIPE;
goto out;
}
if (copy_from_user(&ureq, arg, sizeof(ureq))) {
ret = -EFAULT;
goto out;
}
if (ureq.qpn != 0 && ureq.qpn != 1) {
dev_notice(file->port->dev,
"ib_umad_reg_agent2: invalid QPN %d specified\n",
ureq.qpn);
ret = -EINVAL;
goto out;
}
if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) {
dev_notice(file->port->dev,
"ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n",
ureq.flags, IB_USER_MAD_REG_FLAGS_CAP);
ret = -EINVAL;
if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP,
(u32 __user *) (arg + offsetof(struct
ib_user_mad_reg_req2, flags))))
ret = -EFAULT;
goto out;
}
for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
if (!__get_agent(file, agent_id))
goto found;
dev_notice(file->port->dev,
"ib_umad_reg_agent2: Max Agents (%u) reached\n",
IB_UMAD_MAX_AGENTS);
ret = -ENOMEM;
goto out;
found:
if (ureq.mgmt_class) {
memset(&req, 0, sizeof(req));
req.mgmt_class = ureq.mgmt_class;
req.mgmt_class_version = ureq.mgmt_class_version;
if (ureq.oui & 0xff000000) {
dev_notice(file->port->dev,
"ib_umad_reg_agent2 failed: oui invalid 0x%08x\n",
ureq.oui);
ret = -EINVAL;
goto out;
}
req.oui[2] = ureq.oui & 0x0000ff;
req.oui[1] = (ureq.oui & 0x00ff00) >> 8;
req.oui[0] = (ureq.oui & 0xff0000) >> 16;
memcpy(req.method_mask, ureq.method_mask,
sizeof(req.method_mask));
}
agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
ureq.mgmt_class ? &req : NULL,
ureq.rmpp_version,
send_handler, recv_handler, file,
ureq.flags);
if (IS_ERR(agent)) {
ret = PTR_ERR(agent);
agent = NULL;
goto out;
}
if (put_user(agent_id,
(u32 __user *)(arg +
offsetof(struct ib_user_mad_reg_req2, id)))) {
ret = -EFAULT;
goto out;
}
if (!file->already_used) {
file->already_used = 1;
file->use_pkey_index = 1;
}
file->agent[agent_id] = agent;
ret = 0;
out:
mutex_unlock(&file->mutex);
if (ret && agent)
ib_unregister_mad_agent(agent);
mutex_unlock(&file->port->file_mutex);
return ret;
}
static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
{
struct ib_mad_agent *agent = NULL;
......@@ -749,6 +884,8 @@ static long ib_umad_ioctl(struct file *filp, unsigned int cmd,
return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg);
case IB_USER_MAD_ENABLE_PKEY:
return ib_umad_enable_pkey(filp->private_data);
case IB_USER_MAD_REGISTER_AGENT2:
return ib_umad_reg_agent2(filp->private_data, (void __user *) arg);
default:
return -ENOIOCTLCMD;
}
......@@ -765,6 +902,8 @@ static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd,
return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg));
case IB_USER_MAD_ENABLE_PKEY:
return ib_umad_enable_pkey(filp->private_data);
case IB_USER_MAD_REGISTER_AGENT2:
return ib_umad_reg_agent2(filp->private_data, compat_ptr(arg));
default:
return -ENOIOCTLCMD;
}
......@@ -983,7 +1122,7 @@ static CLASS_ATTR_STRING(abi_version, S_IRUGO,
static dev_t overflow_maj;
static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS);
static int find_overflow_devnum(void)
static int find_overflow_devnum(struct ib_device *device)
{
int ret;
......@@ -991,7 +1130,8 @@ static int find_overflow_devnum(void)
ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2,
"infiniband_mad");
if (ret) {
printk(KERN_ERR "user_mad: couldn't register dynamic device number\n");
dev_err(&device->dev,
"couldn't register dynamic device number\n");
return ret;
}
}
......@@ -1014,7 +1154,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
if (devnum >= IB_UMAD_MAX_PORTS) {
spin_unlock(&port_lock);
devnum = find_overflow_devnum();
devnum = find_overflow_devnum(device);
if (devnum < 0)
return -1;
......@@ -1200,14 +1340,14 @@ static int __init ib_umad_init(void)
ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2,
"infiniband_mad");
if (ret) {
printk(KERN_ERR "user_mad: couldn't register device number\n");
pr_err("couldn't register device number\n");
goto out;
}
umad_class = class_create(THIS_MODULE, "infiniband_mad");
if (IS_ERR(umad_class)) {
ret = PTR_ERR(umad_class);
printk(KERN_ERR "user_mad: couldn't create class infiniband_mad\n");
pr_err("couldn't create class infiniband_mad\n");
goto out_chrdev;
}
......@@ -1215,13 +1355,13 @@ static int __init ib_umad_init(void)
ret = class_create_file(umad_class, &class_attr_abi_version.attr);
if (ret) {
printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n");
pr_err("couldn't create abi_version attribute\n");
goto out_class;
}
ret = ib_register_client(&umad_client);
if (ret) {
printk(KERN_ERR "user_mad: couldn't register ib_umad client\n");
pr_err("couldn't register ib_umad client\n");
goto out_class;
}
......
......@@ -260,11 +260,14 @@ static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
mq->msg_pool.host, dma_unmap_addr(mq, mapping));
}
static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
int msg_size)
static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq,
size_t q_size, size_t msg_size)
{
u8 *pool_start;
if (q_size > SIZE_MAX / msg_size)
return -EINVAL;
pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
&mq->host_dma, GFP_KERNEL);
if (!pool_start)
......
......@@ -182,6 +182,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
chp = get_chp(dev, qid);
if (chp) {
t4_clear_cq_armed(&chp->cq);
spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
......
......@@ -1066,7 +1066,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
struct c4iw_cq *schp)
{
int count;
int flushed;
int rq_flushed, sq_flushed;
unsigned long flag;
PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
......@@ -1084,27 +1084,40 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
c4iw_flush_hw_cq(rchp);
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, flag);
if (flushed) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
/* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, flag);
spin_lock(&qhp->lock);
if (schp != rchp)
c4iw_flush_hw_cq(schp);
flushed = c4iw_flush_sq(qhp);
sq_flushed = c4iw_flush_sq(qhp);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, flag);
if (flushed) {
spin_lock_irqsave(&schp->comp_handler_lock, flag);
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
if (schp == rchp) {
if (t4_clear_cq_armed(&rchp->cq) &&
(rq_flushed || sq_flushed)) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
} else {
if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
spin_lock_irqsave(&schp->comp_handler_lock, flag);
(*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context);
spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
}
}
}
......
......@@ -531,6 +531,10 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq)
return !wq->rq.queue[wq->rq.size].status.db_off;
}
enum t4_cq_flags {
CQ_ARMED = 1,
};
struct t4_cq {
struct t4_cqe *queue;
dma_addr_t dma_addr;
......@@ -551,12 +555,19 @@ struct t4_cq {
u16 cidx_inc;
u8 gen;
u8 error;
unsigned long flags;
};
static inline int t4_clear_cq_armed(struct t4_cq *cq)
{
return test_and_clear_bit(CQ_ARMED, &cq->flags);
}
static inline int t4_arm_cq(struct t4_cq *cq, int se)
{
u32 val;
set_bit(CQ_ARMED, &cq->flags);
while (cq->cidx_inc > CIDXINC_MASK) {
val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
INGRESSQID(cq->cqid);
......
......@@ -726,7 +726,7 @@ static int add_pkey(struct ipath_devdata *dd, u16 key)
* @dd: the infinipath device
* @pkeys: the PKEY table
*/
static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys, u8 port)
{
struct ipath_portdata *pd;
int i;
......@@ -759,6 +759,7 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
}
if (changed) {
u64 pkey;
struct ib_event event;
pkey = (u64) dd->ipath_pkeys[0] |
((u64) dd->ipath_pkeys[1] << 16) |
......@@ -768,12 +769,17 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
(unsigned long long) pkey);
ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
pkey);
event.event = IB_EVENT_PKEY_CHANGE;
event.device = &dd->verbs_dev->ibdev;
event.element.port_num = port;
ib_dispatch_event(&event);
}
return 0;
}
static int recv_subn_set_pkeytable(struct ib_smp *smp,
struct ib_device *ibdev)
struct ib_device *ibdev, u8 port)
{
u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
__be16 *p = (__be16 *) smp->data;
......@@ -784,7 +790,7 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
for (i = 0; i < n; i++)
q[i] = be16_to_cpu(p[i]);
if (startpx != 0 || set_pkeys(dev->dd, q) != 0)
if (startpx != 0 || set_pkeys(dev->dd, q, port) != 0)
smp->status |= IB_SMP_INVALID_FIELD;
return recv_subn_get_pkeytable(smp, ibdev);
......@@ -1342,7 +1348,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
ret = recv_subn_set_portinfo(smp, ibdev, port_num);
goto bail;
case IB_SMP_ATTR_PKEY_TABLE:
ret = recv_subn_set_pkeytable(smp, ibdev);
ret = recv_subn_set_pkeytable(smp, ibdev, port_num);
goto bail;
case IB_SMP_ATTR_SM_INFO:
if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
......
......@@ -891,7 +891,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
q ? IB_QPT_GSI : IB_QPT_SMI,
NULL, 0, send_handler,
NULL, NULL);
NULL, NULL, 0);
if (IS_ERR(agent)) {
ret = PTR_ERR(agent);
goto err;
......
......@@ -910,8 +910,7 @@ static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
const struct default_rules *pdefault_rules = default_table;
u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
for (i = 0; i < sizeof(default_table)/sizeof(default_table[0]); i++,
pdefault_rules++) {
for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
__u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
memset(&field_types, 0, sizeof(field_types));
......@@ -965,8 +964,7 @@ static int __mlx4_ib_create_default_rules(
int size = 0;
int i;
for (i = 0; i < sizeof(pdefault_rules->rules_create_list)/
sizeof(pdefault_rules->rules_create_list[0]); i++) {
for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
int ret;
union ib_flow_spec ib_spec;
switch (pdefault_rules->rules_create_list[i]) {
......
......@@ -2501,7 +2501,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags);
for (nreq = 0; wr; nreq++, wr = wr->next) {
if (unlikely(wr->opcode >= sizeof(mlx5_ib_opcode) / sizeof(mlx5_ib_opcode[0]))) {
if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
mlx5_ib_warn(dev, "\n");
err = -EINVAL;
*bad_wr = wr;
......
......@@ -294,7 +294,7 @@ int mthca_create_agents(struct mthca_dev *dev)
agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
q ? IB_QPT_GSI : IB_QPT_SMI,
NULL, 0, send_handler,
NULL, NULL);
NULL, NULL, 0);
if (IS_ERR(agent)) {
ret = PTR_ERR(agent);
goto err;
......
......@@ -40,7 +40,7 @@
#include <be_roce.h>
#include "ocrdma_sli.h"
#define OCRDMA_ROCE_DRV_VERSION "10.2.145.0u"
#define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u"
#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
......@@ -137,6 +137,7 @@ struct mqe_ctx {
u16 cqe_status;
u16 ext_status;
bool cmd_done;
bool fw_error_state;
};
struct ocrdma_hw_mr {
......@@ -235,7 +236,10 @@ struct ocrdma_dev {
struct list_head entry;
struct rcu_head rcu;
int id;
u64 stag_arr[OCRDMA_MAX_STAG];
u64 *stag_arr;
u8 sl; /* service level */
bool pfc_state;
atomic_t update_sl;
u16 pvid;
u32 asic_id;
......@@ -518,4 +522,22 @@ static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev)
OCRDMA_SLI_ASIC_GEN_NUM_SHIFT;
}
static inline u8 ocrdma_get_pfc_prio(u8 *pfc, u8 prio)
{
return *(pfc + prio);
}
static inline u8 ocrdma_get_app_prio(u8 *app_prio, u8 prio)
{
return *(app_prio + prio);
}
static inline u8 ocrdma_is_enabled_and_synced(u32 state)
{ /* May also be used to interpret TC-state, QCN-state
* Appl-state and Logical-link-state in future.
*/
return (state & OCRDMA_STATE_FLAG_ENABLED) &&
(state & OCRDMA_STATE_FLAG_SYNC);
}
#endif
......@@ -35,6 +35,8 @@
#include "ocrdma_ah.h"
#include "ocrdma_hw.h"
#define OCRDMA_VID_PCP_SHIFT 0xD
static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
struct ib_ah_attr *attr, int pdid)
{
......@@ -55,7 +57,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
if (vlan_tag && (vlan_tag < 0x1000)) {
eth.eth_type = cpu_to_be16(0x8100);
eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
vlan_tag |= (attr->sl & 7) << 13;
vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
eth.vlan_tag = cpu_to_be16(vlan_tag);
eth_sz = sizeof(struct ocrdma_eth_vlan);
vlan_enabled = true;
......@@ -100,6 +102,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
if (!(attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
if (atomic_cmpxchg(&dev->update_sl, 1, 0))
ocrdma_init_service_level(dev);
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
......
......@@ -525,7 +525,7 @@ static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
cmd->eqn = eq->id;
cmd->cqe_count = cq->size / sizeof(struct ocrdma_mcqe);
cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe);
ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
cq->dma, PAGE_SIZE_4K);
......@@ -661,7 +661,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
{
struct ocrdma_qp *qp = NULL;
struct ocrdma_cq *cq = NULL;
struct ib_event ib_evt = { 0 };
struct ib_event ib_evt;
int cq_event = 0;
int qp_event = 1;
int srq_event = 0;
......@@ -674,6 +674,8 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID)
cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK];
memset(&ib_evt, 0, sizeof(ib_evt));
ib_evt.device = &dev->ibdev;
switch (type) {
......@@ -771,6 +773,10 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
OCRDMA_AE_PVID_MCQE_TAG_MASK) >>
OCRDMA_AE_PVID_MCQE_TAG_SHIFT);
break;
case OCRDMA_ASYNC_EVENT_COS_VALUE:
atomic_set(&dev->update_sl, 1);
break;
default:
/* Not interested evts. */
break;
......@@ -962,8 +968,12 @@ static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
msecs_to_jiffies(30000));
if (status)
return 0;
else
else {
dev->mqe_ctx.fw_error_state = true;
pr_err("%s(%d) mailbox timeout: fw not responding\n",
__func__, dev->id);
return -1;
}
}
/* issue a mailbox command on the MQ */
......@@ -975,6 +985,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
struct ocrdma_mbx_rsp *rsp = NULL;
mutex_lock(&dev->mqe_ctx.lock);
if (dev->mqe_ctx.fw_error_state)
goto mbx_err;
ocrdma_post_mqe(dev, mqe);
status = ocrdma_wait_mqe_cmpl(dev);
if (status)
......@@ -1078,7 +1090,8 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
attr->max_mw = rsp->max_mw;
attr->max_mr = rsp->max_mr;
attr->max_mr_size = ~0ull;
attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) |
rsp->max_mr_size_lo;
attr->max_fmr = 0;
attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
......@@ -1252,7 +1265,9 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
dev->hba_port_num = hba_attribs->phy_port;
dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
OCRDMA_HBA_ATTRB_PTNUM_MASK)
>> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
strncpy(dev->model_number,
hba_attribs->controller_model_number, 31);
}
......@@ -1302,7 +1317,8 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
goto mbx_err;
rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
*lnk_speed = rsp->phys_port_speed;
*lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
>> OCRDMA_PHY_PS_SHIFT;
mbx_err:
kfree(cmd);
......@@ -1328,11 +1344,16 @@ static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
goto mbx_err;
rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
dev->phy.phy_type = le16_to_cpu(rsp->phy_type);
dev->phy.phy_type =
(rsp->ityp_ptyp & OCRDMA_PHY_TYPE_MASK);
dev->phy.interface_type =
(rsp->ityp_ptyp & OCRDMA_IF_TYPE_MASK)
>> OCRDMA_IF_TYPE_SHIFT;
dev->phy.auto_speeds_supported =
le16_to_cpu(rsp->auto_speeds_supported);
(rsp->fspeed_aspeed & OCRDMA_ASPEED_SUPP_MASK);
dev->phy.fixed_speeds_supported =
le16_to_cpu(rsp->fixed_speeds_supported);
(rsp->fspeed_aspeed & OCRDMA_FSPEED_SUPP_MASK)
>> OCRDMA_FSPEED_SUPP_SHIFT;
mbx_err:
kfree(cmd);
return status;
......@@ -1457,8 +1478,8 @@ static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
pbes[i].pa_lo = (u32) (pa & 0xffffffff);
pbes[i].pa_hi = (u32) upper_32_bits(pa);
pbes[i].pa_lo = (u32)cpu_to_le32(pa & 0xffffffff);
pbes[i].pa_hi = (u32)cpu_to_le32(upper_32_bits(pa));
pa += PAGE_SIZE;
}
cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
......@@ -1501,6 +1522,7 @@ static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
dev->av_tbl.pa);
dev->av_tbl.va = NULL;
dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
dev->av_tbl.pbl.pa);
kfree(cmd);
......@@ -1624,14 +1646,16 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
OCRDMA_CREATE_CQ_TYPE_SHIFT;
cq->phase_change = false;
cmd->cmd.cqe_count = (cq->len / cqe_size);
cmd->cmd.pdid_cqecnt = (cq->len / cqe_size);
} else {
cmd->cmd.cqe_count = (cq->len / cqe_size) - 1;
cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1;
cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
cq->phase_change = true;
}
cmd->cmd.pd_id = pd_id; /* valid only for v3 */
/* pd_id valid only for v3 */
cmd->cmd.pdid_cqecnt |= (pd_id <<
OCRDMA_CREATE_CQ_CMD_PDID_SHIFT);
ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status)
......@@ -2206,7 +2230,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
qp->rq_cq = cq;
if (pd->dpp_enabled && pd->num_dpp_qp) {
if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
(attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
dpp_cq_id);
}
......@@ -2264,6 +2289,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
return -EINVAL;
if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0))
ocrdma_init_service_level(qp->dev);
cmd->params.tclass_sq_psn |=
(ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
cmd->params.rnt_rc_sl_fl |=
......@@ -2297,6 +2324,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
cmd->params.vlan_dmac_b4_to_b5 |=
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
cmd->params.rnt_rc_sl_fl |=
(qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
}
return 0;
}
......@@ -2604,6 +2633,168 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
return status;
}
static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
struct ocrdma_dcbx_cfg *dcbxcfg)
{
int status = 0;
dma_addr_t pa;
struct ocrdma_mqe cmd;
struct ocrdma_get_dcbx_cfg_req *req = NULL;
struct ocrdma_get_dcbx_cfg_rsp *rsp = NULL;
struct pci_dev *pdev = dev->nic_info.pdev;
struct ocrdma_mqe_sge *mqe_sge = cmd.u.nonemb_req.sge;
memset(&cmd, 0, sizeof(struct ocrdma_mqe));
cmd.hdr.pyld_len = max_t (u32, sizeof(struct ocrdma_get_dcbx_cfg_rsp),
sizeof(struct ocrdma_get_dcbx_cfg_req));
req = dma_alloc_coherent(&pdev->dev, cmd.hdr.pyld_len, &pa, GFP_KERNEL);
if (!req) {
status = -ENOMEM;
goto mem_err;
}
cmd.hdr.spcl_sge_cnt_emb |= (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
OCRDMA_MQE_HDR_SGE_CNT_MASK;
mqe_sge->pa_lo = (u32) (pa & 0xFFFFFFFFUL);
mqe_sge->pa_hi = (u32) upper_32_bits(pa);
mqe_sge->len = cmd.hdr.pyld_len;
memset(req, 0, sizeof(struct ocrdma_get_dcbx_cfg_req));
ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG,
OCRDMA_SUBSYS_DCBX, cmd.hdr.pyld_len);
req->param_type = ptype;
status = ocrdma_mbx_cmd(dev, &cmd);
if (status)
goto mbx_err;
rsp = (struct ocrdma_get_dcbx_cfg_rsp *)req;
ocrdma_le32_to_cpu(rsp, sizeof(struct ocrdma_get_dcbx_cfg_rsp));
memcpy(dcbxcfg, &rsp->cfg, sizeof(struct ocrdma_dcbx_cfg));
mbx_err:
dma_free_coherent(&pdev->dev, cmd.hdr.pyld_len, req, pa);
mem_err:
return status;
}
#define OCRDMA_MAX_SERVICE_LEVEL_INDEX 0x08
#define OCRDMA_DEFAULT_SERVICE_LEVEL 0x05
static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype,
struct ocrdma_dcbx_cfg *dcbxcfg,
u8 *srvc_lvl)
{
int status = -EINVAL, indx, slindx;
int ventry_cnt;
struct ocrdma_app_parameter *app_param;
u8 valid, proto_sel;
u8 app_prio, pfc_prio;
u16 proto;
if (!(dcbxcfg->tcv_aev_opv_st & OCRDMA_DCBX_STATE_MASK)) {
pr_info("%s ocrdma%d DCBX is disabled\n",
dev_name(&dev->nic_info.pdev->dev), dev->id);
goto out;
}
if (!ocrdma_is_enabled_and_synced(dcbxcfg->pfc_state)) {
pr_info("%s ocrdma%d priority flow control(%s) is %s%s\n",
dev_name(&dev->nic_info.pdev->dev), dev->id,
(ptype > 0 ? "operational" : "admin"),
(dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_ENABLED) ?
"enabled" : "disabled",
(dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_SYNC) ?
"" : ", not sync'ed");
goto out;
} else {
pr_info("%s ocrdma%d priority flow control is enabled and sync'ed\n",
dev_name(&dev->nic_info.pdev->dev), dev->id);
}
ventry_cnt = (dcbxcfg->tcv_aev_opv_st >>
OCRDMA_DCBX_APP_ENTRY_SHIFT)
& OCRDMA_DCBX_STATE_MASK;
for (indx = 0; indx < ventry_cnt; indx++) {
app_param = &dcbxcfg->app_param[indx];
valid = (app_param->valid_proto_app >>
OCRDMA_APP_PARAM_VALID_SHIFT)
& OCRDMA_APP_PARAM_VALID_MASK;
proto_sel = (app_param->valid_proto_app
>> OCRDMA_APP_PARAM_PROTO_SEL_SHIFT)
& OCRDMA_APP_PARAM_PROTO_SEL_MASK;
proto = app_param->valid_proto_app &
OCRDMA_APP_PARAM_APP_PROTO_MASK;
if (
valid && proto == OCRDMA_APP_PROTO_ROCE &&
proto_sel == OCRDMA_PROTO_SELECT_L2) {
for (slindx = 0; slindx <
OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) {
app_prio = ocrdma_get_app_prio(
(u8 *)app_param->app_prio,
slindx);
pfc_prio = ocrdma_get_pfc_prio(
(u8 *)dcbxcfg->pfc_prio,
slindx);
if (app_prio && pfc_prio) {
*srvc_lvl = slindx;
status = 0;
goto out;
}
}
if (slindx == OCRDMA_MAX_SERVICE_LEVEL_INDEX) {
pr_info("%s ocrdma%d application priority not set for 0x%x protocol\n",
dev_name(&dev->nic_info.pdev->dev),
dev->id, proto);
}
}
}
out:
return status;
}
void ocrdma_init_service_level(struct ocrdma_dev *dev)
{
int status = 0, indx;
struct ocrdma_dcbx_cfg dcbxcfg;
u8 srvc_lvl = OCRDMA_DEFAULT_SERVICE_LEVEL;
int ptype = OCRDMA_PARAMETER_TYPE_OPER;
for (indx = 0; indx < 2; indx++) {
status = ocrdma_mbx_get_dcbx_config(dev, ptype, &dcbxcfg);
if (status) {
pr_err("%s(): status=%d\n", __func__, status);
ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
continue;
}
status = ocrdma_parse_dcbxcfg_rsp(dev, ptype,
&dcbxcfg, &srvc_lvl);
if (status) {
ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
continue;
}
break;
}
if (status)
pr_info("%s ocrdma%d service level default\n",
dev_name(&dev->nic_info.pdev->dev), dev->id);
else
pr_info("%s ocrdma%d service level %d\n",
dev_name(&dev->nic_info.pdev->dev), dev->id,
srvc_lvl);
dev->pfc_state = ocrdma_is_enabled_and_synced(dcbxcfg.pfc_state);
dev->sl = srvc_lvl;
}
int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
{
int i;
......@@ -2709,13 +2900,15 @@ int ocrdma_init_hw(struct ocrdma_dev *dev)
goto conf_err;
status = ocrdma_mbx_get_phy_info(dev);
if (status)
goto conf_err;
goto info_attrb_err;
status = ocrdma_mbx_get_ctrl_attribs(dev);
if (status)
goto conf_err;
goto info_attrb_err;
return 0;
info_attrb_err:
ocrdma_mbx_delete_ah_tbl(dev);
conf_err:
ocrdma_destroy_mq(dev);
mq_err:
......
......@@ -135,4 +135,6 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
char *port_speed_string(struct ocrdma_dev *dev);
void ocrdma_init_service_level(struct ocrdma_dev *);
#endif /* __OCRDMA_HW_H__ */
......@@ -324,6 +324,11 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
if (!dev->qp_tbl)
goto alloc_err;
}
dev->stag_arr = kzalloc(sizeof(u64) * OCRDMA_MAX_STAG, GFP_KERNEL);
if (dev->stag_arr == NULL)
goto alloc_err;
spin_lock_init(&dev->av_tbl.lock);
spin_lock_init(&dev->flush_q_lock);
return 0;
......@@ -334,6 +339,7 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
static void ocrdma_free_resources(struct ocrdma_dev *dev)
{
kfree(dev->stag_arr);
kfree(dev->qp_tbl);
kfree(dev->cq_tbl);
kfree(dev->sgid_tbl);
......@@ -353,15 +359,25 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
{
struct ocrdma_dev *dev = dev_get_drvdata(device);
return scnprintf(buf, PAGE_SIZE, "%s", &dev->attr.fw_ver[0]);
return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->attr.fw_ver[0]);
}
static ssize_t show_hca_type(struct device *device,
struct device_attribute *attr, char *buf)
{
struct ocrdma_dev *dev = dev_get_drvdata(device);
return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]);
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
static struct device_attribute *ocrdma_attributes[] = {
&dev_attr_hw_rev,
&dev_attr_fw_ver
&dev_attr_fw_ver,
&dev_attr_hca_type
};
static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
......@@ -372,6 +388,58 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]);
}
static void ocrdma_init_ipv4_gids(struct ocrdma_dev *dev,
struct net_device *net)
{
struct in_device *in_dev;
union ib_gid gid;
in_dev = in_dev_get(net);
if (in_dev) {
for_ifa(in_dev) {
ipv6_addr_set_v4mapped(ifa->ifa_address,
(struct in6_addr *)&gid);
ocrdma_add_sgid(dev, &gid);
}
endfor_ifa(in_dev);
in_dev_put(in_dev);
}
}
static void ocrdma_init_ipv6_gids(struct ocrdma_dev *dev,
struct net_device *net)
{
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_dev *in6_dev;
union ib_gid *pgid;
struct inet6_ifaddr *ifp;
in6_dev = in6_dev_get(net);
if (in6_dev) {
read_lock_bh(&in6_dev->lock);
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
pgid = (union ib_gid *)&ifp->addr;
ocrdma_add_sgid(dev, pgid);
}
read_unlock_bh(&in6_dev->lock);
in6_dev_put(in6_dev);
}
#endif
}
static void ocrdma_init_gid_table(struct ocrdma_dev *dev)
{
struct net_device *net_dev;
for_each_netdev(&init_net, net_dev) {
struct net_device *real_dev = rdma_vlan_dev_real_dev(net_dev) ?
rdma_vlan_dev_real_dev(net_dev) : net_dev;
if (real_dev == dev->nic_info.netdev) {
ocrdma_init_ipv4_gids(dev, net_dev);
ocrdma_init_ipv6_gids(dev, net_dev);
}
}
}
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
{
int status = 0, i;
......@@ -399,6 +467,8 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (status)
goto alloc_err;
ocrdma_init_service_level(dev);
ocrdma_init_gid_table(dev);
status = ocrdma_register_device(dev);
if (status)
goto alloc_err;
......@@ -508,6 +578,12 @@ static int ocrdma_close(struct ocrdma_dev *dev)
return 0;
}
static void ocrdma_shutdown(struct ocrdma_dev *dev)
{
ocrdma_close(dev);
ocrdma_remove(dev);
}
/* event handling via NIC driver ensures that all the NIC specific
* initialization done before RoCE driver notifies
* event to stack.
......@@ -521,6 +597,9 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
case BE_DEV_DOWN:
ocrdma_close(dev);
break;
case BE_DEV_SHUTDOWN:
ocrdma_shutdown(dev);
break;
}
}
......
......@@ -44,35 +44,39 @@ enum {
#define OCRDMA_SUBSYS_ROCE 10
enum {
OCRDMA_CMD_QUERY_CONFIG = 1,
OCRDMA_CMD_ALLOC_PD,
OCRDMA_CMD_DEALLOC_PD,
OCRDMA_CMD_CREATE_AH_TBL,
OCRDMA_CMD_DELETE_AH_TBL,
OCRDMA_CMD_CREATE_QP,
OCRDMA_CMD_QUERY_QP,
OCRDMA_CMD_MODIFY_QP,
OCRDMA_CMD_DELETE_QP,
OCRDMA_CMD_RSVD1,
OCRDMA_CMD_ALLOC_LKEY,
OCRDMA_CMD_DEALLOC_LKEY,
OCRDMA_CMD_REGISTER_NSMR,
OCRDMA_CMD_REREGISTER_NSMR,
OCRDMA_CMD_REGISTER_NSMR_CONT,
OCRDMA_CMD_QUERY_NSMR,
OCRDMA_CMD_ALLOC_MW,
OCRDMA_CMD_QUERY_MW,
OCRDMA_CMD_CREATE_SRQ,
OCRDMA_CMD_QUERY_SRQ,
OCRDMA_CMD_MODIFY_SRQ,
OCRDMA_CMD_DELETE_SRQ,
OCRDMA_CMD_ATTACH_MCAST,
OCRDMA_CMD_DETACH_MCAST,
OCRDMA_CMD_GET_RDMA_STATS,
OCRDMA_CMD_ALLOC_PD = 2,
OCRDMA_CMD_DEALLOC_PD = 3,
OCRDMA_CMD_CREATE_AH_TBL = 4,
OCRDMA_CMD_DELETE_AH_TBL = 5,
OCRDMA_CMD_CREATE_QP = 6,
OCRDMA_CMD_QUERY_QP = 7,
OCRDMA_CMD_MODIFY_QP = 8 ,
OCRDMA_CMD_DELETE_QP = 9,
OCRDMA_CMD_RSVD1 = 10,
OCRDMA_CMD_ALLOC_LKEY = 11,
OCRDMA_CMD_DEALLOC_LKEY = 12,
OCRDMA_CMD_REGISTER_NSMR = 13,
OCRDMA_CMD_REREGISTER_NSMR = 14,
OCRDMA_CMD_REGISTER_NSMR_CONT = 15,
OCRDMA_CMD_QUERY_NSMR = 16,
OCRDMA_CMD_ALLOC_MW = 17,
OCRDMA_CMD_QUERY_MW = 18,
OCRDMA_CMD_CREATE_SRQ = 19,
OCRDMA_CMD_QUERY_SRQ = 20,
OCRDMA_CMD_MODIFY_SRQ = 21,
OCRDMA_CMD_DELETE_SRQ = 22,
OCRDMA_CMD_ATTACH_MCAST = 23,
OCRDMA_CMD_DETACH_MCAST = 24,
OCRDMA_CMD_CREATE_RBQ = 25,
OCRDMA_CMD_DESTROY_RBQ = 26,
OCRDMA_CMD_GET_RDMA_STATS = 27,
OCRDMA_CMD_MAX
};
......@@ -103,7 +107,7 @@ enum {
#define OCRDMA_MAX_QP 2048
#define OCRDMA_MAX_CQ 2048
#define OCRDMA_MAX_STAG 8192
#define OCRDMA_MAX_STAG 16384
enum {
OCRDMA_DB_RQ_OFFSET = 0xE0,
......@@ -422,7 +426,12 @@ struct ocrdma_ae_qp_mcqe {
#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14
#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5
#define OCRDMA_ASYNC_EVENT_PVID_STATE 0x3
enum ocrdma_async_grp5_events {
OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01,
OCRDMA_ASYNC_EVENT_COS_VALUE = 0x02,
OCRDMA_ASYNC_EVENT_PVID_STATE = 0x03
};
enum OCRDMA_ASYNC_EVENT_TYPE {
OCRDMA_CQ_ERROR = 0x00,
......@@ -525,8 +534,8 @@ struct ocrdma_mbx_query_config {
u32 max_ird_ord_per_qp;
u32 max_shared_ird_ord;
u32 max_mr;
u32 max_mr_size_lo;
u32 max_mr_size_hi;
u32 max_mr_size_lo;
u32 max_num_mr_pbl;
u32 max_mw;
u32 max_fmr;
......@@ -580,17 +589,26 @@ enum {
OCRDMA_FN_MODE_RDMA = 0x4
};
enum {
OCRDMA_IF_TYPE_MASK = 0xFFFF0000,
OCRDMA_IF_TYPE_SHIFT = 0x10,
OCRDMA_PHY_TYPE_MASK = 0x0000FFFF,
OCRDMA_FUTURE_DETAILS_MASK = 0xFFFF0000,
OCRDMA_FUTURE_DETAILS_SHIFT = 0x10,
OCRDMA_EX_PHY_DETAILS_MASK = 0x0000FFFF,
OCRDMA_FSPEED_SUPP_MASK = 0xFFFF0000,
OCRDMA_FSPEED_SUPP_SHIFT = 0x10,
OCRDMA_ASPEED_SUPP_MASK = 0x0000FFFF
};
struct ocrdma_get_phy_info_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
u16 phy_type;
u16 interface_type;
u32 ityp_ptyp;
u32 misc_params;
u16 ext_phy_details;
u16 rsvd;
u16 auto_speeds_supported;
u16 fixed_speeds_supported;
u32 ftrdtl_exphydtl;
u32 fspeed_aspeed;
u32 future_use[2];
};
......@@ -603,19 +621,34 @@ enum {
OCRDMA_PHY_SPEED_40GBPS = 0x20
};
enum {
OCRDMA_PORT_NUM_MASK = 0x3F,
OCRDMA_PT_MASK = 0xC0,
OCRDMA_PT_SHIFT = 0x6,
OCRDMA_LINK_DUP_MASK = 0x0000FF00,
OCRDMA_LINK_DUP_SHIFT = 0x8,
OCRDMA_PHY_PS_MASK = 0x00FF0000,
OCRDMA_PHY_PS_SHIFT = 0x10,
OCRDMA_PHY_PFLT_MASK = 0xFF000000,
OCRDMA_PHY_PFLT_SHIFT = 0x18,
OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000,
OCRDMA_QOS_LNKSP_SHIFT = 0x10,
OCRDMA_LLST_MASK = 0xFF,
OCRDMA_PLFC_MASK = 0x00000400,
OCRDMA_PLFC_SHIFT = 0x8,
OCRDMA_PLRFC_MASK = 0x00000200,
OCRDMA_PLRFC_SHIFT = 0x8,
OCRDMA_PLTFC_MASK = 0x00000100,
OCRDMA_PLTFC_SHIFT = 0x8
};
struct ocrdma_get_link_speed_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
u8 pt_port_num;
u8 link_duplex;
u8 phys_port_speed;
u8 phys_port_fault;
u16 rsvd1;
u16 qos_lnk_speed;
u8 logical_lnk_status;
u8 rsvd2[3];
u32 pflt_pps_ld_pnum;
u32 qos_lsp;
u32 res_lls;
};
enum {
......@@ -666,8 +699,7 @@ struct ocrdma_create_cq_cmd {
u32 pgsz_pgcnt;
u32 ev_cnt_flags;
u32 eqn;
u16 cqe_count;
u16 pd_id;
u32 pdid_cqecnt;
u32 rsvd6;
struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES];
};
......@@ -677,6 +709,10 @@ struct ocrdma_create_cq {
struct ocrdma_create_cq_cmd cmd;
};
enum {
OCRDMA_CREATE_CQ_CMD_PDID_SHIFT = 0x10
};
enum {
OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF
};
......@@ -1231,7 +1267,6 @@ struct ocrdma_destroy_srq {
enum {
OCRDMA_ALLOC_PD_ENABLE_DPP = BIT(16),
OCRDMA_PD_MAX_DPP_ENABLED_QP = 8,
OCRDMA_DPP_PAGE_SIZE = 4096
};
......@@ -1896,12 +1931,62 @@ struct ocrdma_rdma_stats_resp {
struct ocrdma_rx_dbg_stats rx_dbg_stats;
} __packed;
enum {
OCRDMA_HBA_ATTRB_EPROM_VER_LO_MASK = 0xFF,
OCRDMA_HBA_ATTRB_EPROM_VER_HI_MASK = 0xFF00,
OCRDMA_HBA_ATTRB_EPROM_VER_HI_SHIFT = 0x08,
OCRDMA_HBA_ATTRB_CDBLEN_MASK = 0xFFFF,
OCRDMA_HBA_ATTRB_ASIC_REV_MASK = 0xFF0000,
OCRDMA_HBA_ATTRB_ASIC_REV_SHIFT = 0x10,
OCRDMA_HBA_ATTRB_GUID0_MASK = 0xFF000000,
OCRDMA_HBA_ATTRB_GUID0_SHIFT = 0x18,
OCRDMA_HBA_ATTRB_GUID13_MASK = 0xFF,
OCRDMA_HBA_ATTRB_GUID14_MASK = 0xFF00,
OCRDMA_HBA_ATTRB_GUID14_SHIFT = 0x08,
OCRDMA_HBA_ATTRB_GUID15_MASK = 0xFF0000,
OCRDMA_HBA_ATTRB_GUID15_SHIFT = 0x10,
OCRDMA_HBA_ATTRB_PCNT_MASK = 0xFF000000,
OCRDMA_HBA_ATTRB_PCNT_SHIFT = 0x18,
OCRDMA_HBA_ATTRB_LDTOUT_MASK = 0xFFFF,
OCRDMA_HBA_ATTRB_ISCSI_VER_MASK = 0xFF0000,
OCRDMA_HBA_ATTRB_ISCSI_VER_SHIFT = 0x10,
OCRDMA_HBA_ATTRB_MFUNC_DEV_MASK = 0xFF000000,
OCRDMA_HBA_ATTRB_MFUNC_DEV_SHIFT = 0x18,
OCRDMA_HBA_ATTRB_CV_MASK = 0xFF,
OCRDMA_HBA_ATTRB_HBA_ST_MASK = 0xFF00,
OCRDMA_HBA_ATTRB_HBA_ST_SHIFT = 0x08,
OCRDMA_HBA_ATTRB_MAX_DOMS_MASK = 0xFF0000,
OCRDMA_HBA_ATTRB_MAX_DOMS_SHIFT = 0x10,
OCRDMA_HBA_ATTRB_PTNUM_MASK = 0x3F000000,
OCRDMA_HBA_ATTRB_PTNUM_SHIFT = 0x18,
OCRDMA_HBA_ATTRB_PT_MASK = 0xC0000000,
OCRDMA_HBA_ATTRB_PT_SHIFT = 0x1E,
OCRDMA_HBA_ATTRB_ISCSI_FET_MASK = 0xFF,
OCRDMA_HBA_ATTRB_ASIC_GEN_MASK = 0xFF00,
OCRDMA_HBA_ATTRB_ASIC_GEN_SHIFT = 0x08,
OCRDMA_HBA_ATTRB_PCI_VID_MASK = 0xFFFF,
OCRDMA_HBA_ATTRB_PCI_DID_MASK = 0xFFFF0000,
OCRDMA_HBA_ATTRB_PCI_DID_SHIFT = 0x10,
OCRDMA_HBA_ATTRB_PCI_SVID_MASK = 0xFFFF,
OCRDMA_HBA_ATTRB_PCI_SSID_MASK = 0xFFFF0000,
OCRDMA_HBA_ATTRB_PCI_SSID_SHIFT = 0x10,
OCRDMA_HBA_ATTRB_PCI_BUSNUM_MASK = 0xFF,
OCRDMA_HBA_ATTRB_PCI_DEVNUM_MASK = 0xFF00,
OCRDMA_HBA_ATTRB_PCI_DEVNUM_SHIFT = 0x08,
OCRDMA_HBA_ATTRB_PCI_FUNCNUM_MASK = 0xFF0000,
OCRDMA_HBA_ATTRB_PCI_FUNCNUM_SHIFT = 0x10,
OCRDMA_HBA_ATTRB_IF_TYPE_MASK = 0xFF000000,
OCRDMA_HBA_ATTRB_IF_TYPE_SHIFT = 0x18,
OCRDMA_HBA_ATTRB_NETFIL_MASK =0xFF
};
struct mgmt_hba_attribs {
u8 flashrom_version_string[32];
u8 manufacturer_name[32];
u32 supported_modes;
u32 rsvd0[3];
u32 rsvd_eprom_verhi_verlo;
u32 mbx_ds_ver;
u32 epfw_ds_ver;
u8 ncsi_ver_string[12];
u32 default_extended_timeout;
u8 controller_model_number[32];
......@@ -1914,34 +1999,26 @@ struct mgmt_hba_attribs {
u8 driver_version_string[32];
u8 fw_on_flash_version_string[32];
u32 functionalities_supported;
u16 max_cdblength;
u8 asic_revision;
u8 generational_guid[16];
u8 hba_port_count;
u16 default_link_down_timeout;
u8 iscsi_ver_min_max;
u8 multifunction_device;
u8 cache_valid;
u8 hba_status;
u8 max_domains_supported;
u8 phy_port;
u32 guid0_asicrev_cdblen;
u8 generational_guid[12];
u32 portcnt_guid15;
u32 mfuncdev_iscsi_ldtout;
u32 ptpnum_maxdoms_hbast_cv;
u32 firmware_post_status;
u32 hba_mtu[8];
u32 rsvd1[4];
u32 res_asicgen_iscsi_feaures;
u32 rsvd1[3];
};
struct mgmt_controller_attrib {
struct mgmt_hba_attribs hba_attribs;
u16 pci_vendor_id;
u16 pci_device_id;
u16 pci_sub_vendor_id;
u16 pci_sub_system_id;
u8 pci_bus_number;
u8 pci_device_number;
u8 pci_function_number;
u8 interface_type;
u64 unique_identifier;
u32 rsvd0[5];
u32 pci_did_vid;
u32 pci_ssid_svid;
u32 ityp_fnum_devnum_bnum;
u32 uid_hi;
u32 uid_lo;
u32 res_nnetfil;
u32 rsvd0[4];
};
struct ocrdma_get_ctrl_attribs_rsp {
......@@ -1949,5 +2026,79 @@ struct ocrdma_get_ctrl_attribs_rsp {
struct mgmt_controller_attrib ctrl_attribs;
};
#define OCRDMA_SUBSYS_DCBX 0x10
enum OCRDMA_DCBX_OPCODE {
OCRDMA_CMD_GET_DCBX_CONFIG = 0x01
};
enum OCRDMA_DCBX_PARAM_TYPE {
OCRDMA_PARAMETER_TYPE_ADMIN = 0x00,
OCRDMA_PARAMETER_TYPE_OPER = 0x01,
OCRDMA_PARAMETER_TYPE_PEER = 0x02
};
enum OCRDMA_DCBX_APP_PROTO {
OCRDMA_APP_PROTO_ROCE = 0x8915
};
enum OCRDMA_DCBX_PROTO {
OCRDMA_PROTO_SELECT_L2 = 0x00,
OCRDMA_PROTO_SELECT_L4 = 0x01
};
enum OCRDMA_DCBX_APP_PARAM {
OCRDMA_APP_PARAM_APP_PROTO_MASK = 0xFFFF,
OCRDMA_APP_PARAM_PROTO_SEL_MASK = 0xFF,
OCRDMA_APP_PARAM_PROTO_SEL_SHIFT = 0x10,
OCRDMA_APP_PARAM_VALID_MASK = 0xFF,
OCRDMA_APP_PARAM_VALID_SHIFT = 0x18
};
enum OCRDMA_DCBX_STATE_FLAGS {
OCRDMA_STATE_FLAG_ENABLED = 0x01,
OCRDMA_STATE_FLAG_ADDVERTISED = 0x02,
OCRDMA_STATE_FLAG_WILLING = 0x04,
OCRDMA_STATE_FLAG_SYNC = 0x08,
OCRDMA_STATE_FLAG_UNSUPPORTED = 0x40000000,
OCRDMA_STATE_FLAG_NEG_FAILD = 0x80000000
};
enum OCRDMA_TCV_AEV_OPV_ST {
OCRDMA_DCBX_TC_SUPPORT_MASK = 0xFF,
OCRDMA_DCBX_TC_SUPPORT_SHIFT = 0x18,
OCRDMA_DCBX_APP_ENTRY_SHIFT = 0x10,
OCRDMA_DCBX_OP_PARAM_SHIFT = 0x08,
OCRDMA_DCBX_STATE_MASK = 0xFF
};
struct ocrdma_app_parameter {
u32 valid_proto_app;
u32 oui;
u32 app_prio[2];
};
struct ocrdma_dcbx_cfg {
u32 tcv_aev_opv_st;
u32 tc_state;
u32 pfc_state;
u32 qcn_state;
u32 appl_state;
u32 ll_state;
u32 tc_bw[2];
u32 tc_prio[8];
u32 pfc_prio[2];
struct ocrdma_app_parameter app_param[15];
};
struct ocrdma_get_dcbx_cfg_req {
struct ocrdma_mbx_hdr hdr;
u32 param_type;
} __packed;
struct ocrdma_get_dcbx_cfg_rsp {
struct ocrdma_mbx_rsp hdr;
struct ocrdma_dcbx_cfg cfg;
} __packed;
#endif /* __OCRDMA_SLI_H__ */
......@@ -69,11 +69,11 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
attr->max_mr_size = ~0ull;
attr->max_mr_size = dev->attr.max_mr_size;
attr->page_size_cap = 0xffff000;
attr->vendor_id = dev->nic_info.pdev->vendor;
attr->vendor_part_id = dev->nic_info.pdev->device;
attr->hw_ver = 0;
attr->hw_ver = dev->asic_id;
attr->max_qp = dev->attr.max_qp;
attr->max_ah = OCRDMA_MAX_AH;
attr->max_qp_wr = dev->attr.max_wqe;
......@@ -268,7 +268,8 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
pd->dpp_enabled =
ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
pd->num_dpp_qp =
pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
pd->dpp_enabled ? (dev->nic_info.db_page_size /
dev->attr.wqe_size) : 0;
}
retry:
......@@ -328,7 +329,10 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
struct ocrdma_pd *pd = uctx->cntxt_pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
BUG_ON(uctx->pd_in_use);
if (uctx->pd_in_use) {
pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
__func__, dev->id, pd->id);
}
uctx->cntxt_pd = NULL;
status = _ocrdma_dealloc_pd(dev, pd);
return status;
......@@ -843,6 +847,13 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
if (mr->umem)
ib_umem_release(mr->umem);
kfree(mr);
/* Don't stop cleanup, in case FW is unresponsive */
if (dev->mqe_ctx.fw_error_state) {
status = 0;
pr_err("%s(%d) fw not responding.\n",
__func__, dev->id);
}
return status;
}
......@@ -2054,6 +2065,13 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
while (wr) {
if (qp->qp_type == IB_QPT_UD &&
(wr->opcode != IB_WR_SEND &&
wr->opcode != IB_WR_SEND_WITH_IMM)) {
*bad_wr = wr;
status = -EINVAL;
break;
}
if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
wr->num_sge > qp->sq.max_sges) {
*bad_wr = wr;
......@@ -2488,6 +2506,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
*stop = true;
expand = false;
}
} else if (is_hw_sq_empty(qp)) {
/* Do nothing */
expand = false;
*polled = false;
*stop = false;
} else {
*polled = true;
expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
......@@ -2593,6 +2616,11 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
*stop = true;
expand = false;
}
} else if (is_hw_rq_empty(qp)) {
/* Do nothing */
expand = false;
*polled = false;
*stop = false;
} else {
*polled = true;
expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
......
......@@ -2476,7 +2476,7 @@ int qib_create_agents(struct qib_ibdev *dev)
ibp = &dd->pport[p].ibport_data;
agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
NULL, 0, send_handler,
NULL, NULL);
NULL, NULL, 0);
if (IS_ERR(agent)) {
ret = PTR_ERR(agent);
goto err;
......
......@@ -86,7 +86,6 @@ enum {
IPOIB_FLAG_INITIALIZED = 1,
IPOIB_FLAG_ADMIN_UP = 2,
IPOIB_PKEY_ASSIGNED = 3,
IPOIB_PKEY_STOP = 4,
IPOIB_FLAG_SUBINTERFACE = 5,
IPOIB_MCAST_RUN = 6,
IPOIB_STOP_REAPER = 7,
......@@ -312,7 +311,6 @@ struct ipoib_dev_priv {
struct list_head multicast_list;
struct rb_root multicast_tree;
struct delayed_work pkey_poll_task;
struct delayed_work mcast_task;
struct work_struct carrier_on_task;
struct work_struct flush_light;
......@@ -473,10 +471,11 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
void ipoib_pkey_event(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
int ipoib_ib_dev_open(struct net_device *dev);
int ipoib_ib_dev_open(struct net_device *dev, int flush);
int ipoib_ib_dev_up(struct net_device *dev);
int ipoib_ib_dev_down(struct net_device *dev, int flush);
int ipoib_ib_dev_stop(struct net_device *dev, int flush);
void ipoib_pkey_dev_check_presence(struct net_device *dev);
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
void ipoib_dev_cleanup(struct net_device *dev);
......@@ -532,8 +531,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf);
void ipoib_setup(struct net_device *dev);
void ipoib_pkey_poll(struct work_struct *work);
int ipoib_pkey_dev_delay_open(struct net_device *dev);
void ipoib_pkey_open(struct ipoib_dev_priv *priv);
void ipoib_drain_cq(struct net_device *dev);
void ipoib_set_ethtool_ops(struct net_device *dev);
......
......@@ -281,10 +281,8 @@ void ipoib_delete_debug_files(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
if (priv->mcg_dentry)
debugfs_remove(priv->mcg_dentry);
if (priv->path_dentry)
debugfs_remove(priv->path_dentry);
debugfs_remove(priv->mcg_dentry);
debugfs_remove(priv->path_dentry);
}
int ipoib_register_debugfs(void)
......
......@@ -664,17 +664,18 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx)
drain_tx_cq((struct net_device *)ctx);
}
int ipoib_ib_dev_open(struct net_device *dev)
int ipoib_ib_dev_open(struct net_device *dev, int flush)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
ipoib_pkey_dev_check_presence(dev);
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
(!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
return -1;
}
set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
ret = ipoib_init_qp(dev);
if (ret) {
......@@ -705,16 +706,17 @@ int ipoib_ib_dev_open(struct net_device *dev)
dev_stop:
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
napi_enable(&priv->napi);
ipoib_ib_dev_stop(dev, 1);
ipoib_ib_dev_stop(dev, flush);
return -1;
}
static void ipoib_pkey_dev_check_presence(struct net_device *dev)
void ipoib_pkey_dev_check_presence(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
u16 pkey_index = 0;
if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
if (!(priv->pkey & 0x7fff) ||
ib_find_pkey(priv->ca, priv->port, priv->pkey,
&priv->pkey_index))
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
else
set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
......@@ -745,14 +747,6 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush)
clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
netif_carrier_off(dev);
/* Shutdown the P_Key thread if still active */
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
mutex_lock(&pkey_mutex);
set_bit(IPOIB_PKEY_STOP, &priv->flags);
cancel_delayed_work_sync(&priv->pkey_poll_task);
mutex_unlock(&pkey_mutex);
}
ipoib_mcast_stop_thread(dev, flush);
ipoib_mcast_dev_flush(dev);
......@@ -924,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
(unsigned long) dev);
if (dev->flags & IFF_UP) {
if (ipoib_ib_dev_open(dev)) {
if (ipoib_ib_dev_open(dev, 1)) {
ipoib_transport_dev_cleanup(dev);
return -ENODEV;
}
......@@ -966,13 +960,27 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
return 1;
}
/*
* returns 0 if pkey value was found in a different slot.
*/
static inline int update_child_pkey(struct ipoib_dev_priv *priv)
{
u16 old_index = priv->pkey_index;
priv->pkey_index = 0;
ipoib_pkey_dev_check_presence(priv->dev);
if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
(old_index == priv->pkey_index))
return 1;
return 0;
}
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
enum ipoib_flush_level level)
{
struct ipoib_dev_priv *cpriv;
struct net_device *dev = priv->dev;
u16 new_index;
int result;
down_read(&priv->vlan_rwsem);
......@@ -986,16 +994,20 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
up_read(&priv->vlan_rwsem);
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
/* for non-child devices must check/update the pkey value here */
if (level == IPOIB_FLUSH_HEAVY &&
!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
update_parent_pkey(priv);
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
level != IPOIB_FLUSH_HEAVY) {
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
return;
}
if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
/* interface is down. update pkey and leave. */
if (level == IPOIB_FLUSH_HEAVY) {
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
update_parent_pkey(priv);
else
update_child_pkey(priv);
}
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
return;
}
......@@ -1005,20 +1017,13 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
* (parent) devices should always takes what present in pkey index 0
*/
if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
ipoib_ib_dev_down(dev, 0);
ipoib_ib_dev_stop(dev, 0);
if (ipoib_pkey_dev_delay_open(dev))
return;
}
/* restart QP only if P_Key index is changed */
if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
new_index == priv->pkey_index) {
result = update_child_pkey(priv);
if (result) {
/* restart QP only if P_Key index is changed */
ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
return;
}
priv->pkey_index = new_index;
} else {
result = update_parent_pkey(priv);
/* restart QP only if P_Key value changed */
......@@ -1038,8 +1043,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
ipoib_ib_dev_down(dev, 0);
if (level == IPOIB_FLUSH_HEAVY) {
ipoib_ib_dev_stop(dev, 0);
ipoib_ib_dev_open(dev);
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
ipoib_ib_dev_stop(dev, 0);
if (ipoib_ib_dev_open(dev, 0) != 0)
return;
if (netif_queue_stopped(dev))
netif_start_queue(dev);
}
/*
......@@ -1094,54 +1103,4 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
ipoib_transport_dev_cleanup(dev);
}
/*
* Delayed P_Key Assigment Interim Support
*
* The following is initial implementation of delayed P_Key assigment
* mechanism. It is using the same approach implemented for the multicast
* group join. The single goal of this implementation is to quickly address
* Bug #2507. This implementation will probably be removed when the P_Key
* change async notification is available.
*/
void ipoib_pkey_poll(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
struct net_device *dev = priv->dev;
ipoib_pkey_dev_check_presence(dev);
if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
ipoib_open(dev);
else {
mutex_lock(&pkey_mutex);
if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
queue_delayed_work(ipoib_workqueue,
&priv->pkey_poll_task,
HZ);
mutex_unlock(&pkey_mutex);
}
}
int ipoib_pkey_dev_delay_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
/* Look for the interface pkey value in the IB Port P_Key table and */
/* set the interface pkey assigment flag */
ipoib_pkey_dev_check_presence(dev);
/* P_Key value not assigned yet - start polling */
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
mutex_lock(&pkey_mutex);
clear_bit(IPOIB_PKEY_STOP, &priv->flags);
queue_delayed_work(ipoib_workqueue,
&priv->pkey_poll_task,
HZ);
mutex_unlock(&pkey_mutex);
return 1;
}
return 0;
}
......@@ -108,11 +108,11 @@ int ipoib_open(struct net_device *dev)
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
if (ipoib_pkey_dev_delay_open(dev))
return 0;
if (ipoib_ib_dev_open(dev))
if (ipoib_ib_dev_open(dev, 1)) {
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0;
goto err_disable;
}
if (ipoib_ib_dev_up(dev))
goto err_stop;
......@@ -1379,7 +1379,6 @@ void ipoib_setup(struct net_device *dev)
INIT_LIST_HEAD(&priv->dead_ahs);
INIT_LIST_HEAD(&priv->multicast_list);
INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
......
......@@ -596,20 +596,28 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
struct iser_conn *ib_conn;
struct iscsi_endpoint *ep;
ep = iscsi_create_endpoint(sizeof(*ib_conn));
ep = iscsi_create_endpoint(0);
if (!ep)
return ERR_PTR(-ENOMEM);
ib_conn = ep->dd_data;
ib_conn = kzalloc(sizeof(*ib_conn), GFP_KERNEL);
if (!ib_conn) {
err = -ENOMEM;
goto failure;
}
ep->dd_data = ib_conn;
ib_conn->ep = ep;
iser_conn_init(ib_conn);
err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
non_blocking);
err = iser_connect(ib_conn, NULL, dst_addr, non_blocking);
if (err)
return ERR_PTR(err);
goto failure;
return ep;
failure:
iscsi_destroy_endpoint(ep);
return ERR_PTR(err);
}
static int
......@@ -619,15 +627,16 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
int rc;
ib_conn = ep->dd_data;
rc = wait_event_interruptible_timeout(ib_conn->wait,
ib_conn->state == ISER_CONN_UP,
msecs_to_jiffies(timeout_ms));
rc = wait_for_completion_interruptible_timeout(&ib_conn->up_completion,
msecs_to_jiffies(timeout_ms));
/* if conn establishment failed, return error code to iscsi */
if (!rc &&
(ib_conn->state == ISER_CONN_TERMINATING ||
ib_conn->state == ISER_CONN_DOWN))
rc = -1;
if (rc == 0) {
mutex_lock(&ib_conn->state_mutex);
if (ib_conn->state == ISER_CONN_TERMINATING ||
ib_conn->state == ISER_CONN_DOWN)
rc = -1;
mutex_unlock(&ib_conn->state_mutex);
}
iser_info("ib conn %p rc = %d\n", ib_conn, rc);
......@@ -646,19 +655,25 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
ib_conn = ep->dd_data;
iser_info("ep %p ib conn %p state %d\n", ep, ib_conn, ib_conn->state);
mutex_lock(&ib_conn->state_mutex);
iser_conn_terminate(ib_conn);
/*
* if iser_conn and iscsi_conn are bound, we must wait iscsi_conn_stop
* call and ISER_CONN_DOWN state before freeing the iser resources.
* otherwise we are safe to free resources immediately.
* if iser_conn and iscsi_conn are bound, we must wait for
* iscsi_conn_stop and flush errors completion before freeing
* the iser resources. Otherwise we are safe to free resources
* immediately.
*/
if (ib_conn->iscsi_conn) {
INIT_WORK(&ib_conn->release_work, iser_release_work);
queue_work(release_wq, &ib_conn->release_work);
mutex_unlock(&ib_conn->state_mutex);
} else {
ib_conn->state = ISER_CONN_DOWN;
mutex_unlock(&ib_conn->state_mutex);
iser_conn_release(ib_conn);
}
iscsi_destroy_endpoint(ep);
}
static umode_t iser_attr_is_visible(int param_type, int param)
......
......@@ -326,7 +326,6 @@ struct iser_conn {
struct iser_device *device; /* device context */
struct rdma_cm_id *cma_id; /* CMA ID */
struct ib_qp *qp; /* QP */
wait_queue_head_t wait; /* waitq for conn/disconn */
unsigned qp_max_recv_dtos; /* num of rx buffers */
unsigned qp_max_recv_dtos_mask; /* above minus 1 */
unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */
......@@ -335,6 +334,9 @@ struct iser_conn {
char name[ISER_OBJECT_NAME_SIZE];
struct work_struct release_work;
struct completion stop_completion;
struct mutex state_mutex;
struct completion flush_completion;
struct completion up_completion;
struct list_head conn_list; /* entry in ig conn list */
char *login_buf;
......@@ -448,8 +450,8 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
int iser_connect(struct iser_conn *ib_conn,
struct sockaddr_in *src_addr,
struct sockaddr_in *dst_addr,
struct sockaddr *src_addr,
struct sockaddr *dst_addr,
int non_blocking);
int iser_reg_page_vec(struct iser_conn *ib_conn,
......
......@@ -491,10 +491,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
}
/**
* releases the QP objects, returns 0 on success,
* -1 on failure
* releases the QP object
*/
static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
static void iser_free_ib_conn_res(struct iser_conn *ib_conn)
{
int cq_index;
BUG_ON(ib_conn == NULL);
......@@ -513,8 +512,6 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
}
ib_conn->qp = NULL;
return 0;
}
/**
......@@ -568,31 +565,40 @@ static void iser_device_try_release(struct iser_device *device)
mutex_unlock(&ig.device_list_mutex);
}
/**
* Called with state mutex held
**/
static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
enum iser_ib_conn_state comp,
enum iser_ib_conn_state exch)
{
int ret;
spin_lock_bh(&ib_conn->lock);
if ((ret = (ib_conn->state == comp)))
ib_conn->state = exch;
spin_unlock_bh(&ib_conn->lock);
return ret;
}
void iser_release_work(struct work_struct *work)
{
struct iser_conn *ib_conn;
int rc;
ib_conn = container_of(work, struct iser_conn, release_work);
/* wait for .conn_stop callback */
wait_for_completion(&ib_conn->stop_completion);
rc = wait_for_completion_timeout(&ib_conn->stop_completion, 30 * HZ);
WARN_ON(rc == 0);
/* wait for the qp`s post send and post receive buffers to empty */
wait_event_interruptible(ib_conn->wait,
ib_conn->state == ISER_CONN_DOWN);
rc = wait_for_completion_timeout(&ib_conn->flush_completion, 30 * HZ);
WARN_ON(rc == 0);
ib_conn->state = ISER_CONN_DOWN;
mutex_lock(&ib_conn->state_mutex);
ib_conn->state = ISER_CONN_DOWN;
mutex_unlock(&ib_conn->state_mutex);
iser_conn_release(ib_conn);
}
......@@ -604,23 +610,27 @@ void iser_conn_release(struct iser_conn *ib_conn)
{
struct iser_device *device = ib_conn->device;
BUG_ON(ib_conn->state == ISER_CONN_UP);
mutex_lock(&ig.connlist_mutex);
list_del(&ib_conn->conn_list);
mutex_unlock(&ig.connlist_mutex);
mutex_lock(&ib_conn->state_mutex);
BUG_ON(ib_conn->state != ISER_CONN_DOWN);
iser_free_rx_descriptors(ib_conn);
iser_free_ib_conn_res(ib_conn);
ib_conn->device = NULL;
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
if (device != NULL)
iser_device_try_release(device);
mutex_unlock(&ib_conn->state_mutex);
/* if cma handler context, the caller actually destroy the id */
if (ib_conn->cma_id != NULL) {
rdma_destroy_id(ib_conn->cma_id);
ib_conn->cma_id = NULL;
}
iscsi_destroy_endpoint(ib_conn->ep);
kfree(ib_conn);
}
/**
......@@ -642,22 +652,31 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
ib_conn,err);
}
/**
* Called with state mutex held
**/
static void iser_connect_error(struct rdma_cm_id *cma_id)
{
struct iser_conn *ib_conn;
ib_conn = (struct iser_conn *)cma_id->context;
ib_conn->state = ISER_CONN_DOWN;
wake_up_interruptible(&ib_conn->wait);
}
/**
* Called with state mutex held
**/
static void iser_addr_handler(struct rdma_cm_id *cma_id)
{
struct iser_device *device;
struct iser_conn *ib_conn;
int ret;
ib_conn = (struct iser_conn *)cma_id->context;
if (ib_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
device = iser_device_find_by_ib_device(cma_id);
if (!device) {
iser_err("device lookup/creation failed\n");
......@@ -665,7 +684,6 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
return;
}
ib_conn = (struct iser_conn *)cma_id->context;
ib_conn->device = device;
/* connection T10-PI support */
......@@ -689,18 +707,27 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
}
}
/**
* Called with state mutex held
**/
static void iser_route_handler(struct rdma_cm_id *cma_id)
{
struct rdma_conn_param conn_param;
int ret;
struct iser_cm_hdr req_hdr;
struct iser_conn *ib_conn = (struct iser_conn *)cma_id->context;
struct iser_device *device = ib_conn->device;
if (ib_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context);
if (ret)
goto failure;
memset(&conn_param, 0, sizeof conn_param);
conn_param.responder_resources = 4;
conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
conn_param.initiator_depth = 1;
conn_param.retry_count = 7;
conn_param.rnr_retry_count = 6;
......@@ -728,12 +755,16 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id)
struct ib_qp_attr attr;
struct ib_qp_init_attr init_attr;
ib_conn = (struct iser_conn *)cma_id->context;
if (ib_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
(void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
ib_conn = (struct iser_conn *)cma_id->context;
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_PENDING, ISER_CONN_UP))
wake_up_interruptible(&ib_conn->wait);
ib_conn->state = ISER_CONN_UP;
complete(&ib_conn->up_completion);
}
static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
......@@ -752,19 +783,25 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
iser_err("iscsi_iser connection isn't bound\n");
}
/* Complete the termination process if no posts are pending */
/* Complete the termination process if no posts are pending. This code
* block also exists in iser_handle_comp_error(), but it is needed here
* for cases of no flushes at all, e.g. discovery over rdma.
*/
if (ib_conn->post_recv_buf_count == 0 &&
(atomic_read(&ib_conn->post_send_buf_count) == 0)) {
ib_conn->state = ISER_CONN_DOWN;
wake_up_interruptible(&ib_conn->wait);
complete(&ib_conn->flush_completion);
}
}
static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
struct iser_conn *ib_conn;
ib_conn = (struct iser_conn *)cma_id->context;
iser_info("event %d status %d conn %p id %p\n",
event->event, event->status, cma_id->context, cma_id);
mutex_lock(&ib_conn->state_mutex);
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
iser_addr_handler(cma_id);
......@@ -785,24 +822,28 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
iser_disconnected_handler(cma_id);
break;
default:
iser_err("Unexpected RDMA CM event (%d)\n", event->event);
break;
}
mutex_unlock(&ib_conn->state_mutex);
return 0;
}
void iser_conn_init(struct iser_conn *ib_conn)
{
ib_conn->state = ISER_CONN_INIT;
init_waitqueue_head(&ib_conn->wait);
ib_conn->post_recv_buf_count = 0;
atomic_set(&ib_conn->post_send_buf_count, 0);
init_completion(&ib_conn->stop_completion);
init_completion(&ib_conn->flush_completion);
init_completion(&ib_conn->up_completion);
INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock);
mutex_init(&ib_conn->state_mutex);
}
/**
......@@ -810,22 +851,21 @@ void iser_conn_init(struct iser_conn *ib_conn)
* sleeps until the connection is established or rejected
*/
int iser_connect(struct iser_conn *ib_conn,
struct sockaddr_in *src_addr,
struct sockaddr_in *dst_addr,
struct sockaddr *src_addr,
struct sockaddr *dst_addr,
int non_blocking)
{
struct sockaddr *src, *dst;
int err = 0;
sprintf(ib_conn->name, "%pI4:%d",
&dst_addr->sin_addr.s_addr, dst_addr->sin_port);
mutex_lock(&ib_conn->state_mutex);
sprintf(ib_conn->name, "%pISp", dst_addr);
iser_info("connecting to: %s\n", ib_conn->name);
/* the device is known only --after-- address resolution */
ib_conn->device = NULL;
iser_info("connecting to: %pI4, port 0x%x\n",
&dst_addr->sin_addr, dst_addr->sin_port);
ib_conn->state = ISER_CONN_PENDING;
ib_conn->cma_id = rdma_create_id(iser_cma_handler,
......@@ -837,23 +877,21 @@ int iser_connect(struct iser_conn *ib_conn,
goto id_failure;
}
src = (struct sockaddr *)src_addr;
dst = (struct sockaddr *)dst_addr;
err = rdma_resolve_addr(ib_conn->cma_id, src, dst, 1000);
err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
if (err) {
iser_err("rdma_resolve_addr failed: %d\n", err);
goto addr_failure;
}
if (!non_blocking) {
wait_event_interruptible(ib_conn->wait,
(ib_conn->state != ISER_CONN_PENDING));
wait_for_completion_interruptible(&ib_conn->up_completion);
if (ib_conn->state != ISER_CONN_UP) {
err = -EIO;
goto connect_failure;
}
}
mutex_unlock(&ib_conn->state_mutex);
mutex_lock(&ig.connlist_mutex);
list_add(&ib_conn->conn_list, &ig.connlist);
......@@ -865,6 +903,7 @@ int iser_connect(struct iser_conn *ib_conn,
addr_failure:
ib_conn->state = ISER_CONN_DOWN;
connect_failure:
mutex_unlock(&ib_conn->state_mutex);
iser_conn_release(ib_conn);
return err;
}
......@@ -1049,18 +1088,19 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
if (ib_conn->post_recv_buf_count == 0 &&
atomic_read(&ib_conn->post_send_buf_count) == 0) {
/* getting here when the state is UP means that the conn is *
* being terminated asynchronously from the iSCSI layer's *
* perspective. */
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
ISER_CONN_TERMINATING))
/**
* getting here when the state is UP means that the conn is
* being terminated asynchronously from the iSCSI layer's
* perspective. It is safe to peek at the connection state
* since iscsi_conn_failure is allowed to be called twice.
**/
if (ib_conn->state == ISER_CONN_UP)
iscsi_conn_failure(ib_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED);
/* no more non completed posts to the QP, complete the
* termination process w.o worrying on disconnect event */
ib_conn->state = ISER_CONN_DOWN;
wake_up_interruptible(&ib_conn->wait);
complete(&ib_conn->flush_completion);
}
}
......
......@@ -130,6 +130,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
static struct scsi_transport_template *ib_srp_transport_template;
static struct workqueue_struct *srp_remove_wq;
static struct ib_client srp_client = {
.name = "srp",
......@@ -731,7 +732,7 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
spin_unlock_irq(&target->lock);
if (changed)
queue_work(system_long_wq, &target->remove_work);
queue_work(srp_remove_wq, &target->remove_work);
return changed;
}
......@@ -1643,10 +1644,14 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
SCSI_SENSE_BUFFERSIZE));
}
if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
srp_free_req(target, req, scmnd,
be32_to_cpu(rsp->req_lim_delta));
......@@ -3261,9 +3266,10 @@ static void srp_remove_one(struct ib_device *device)
spin_unlock(&host->target_lock);
/*
* Wait for target port removal tasks.
* Wait for tl_err and target port removal tasks.
*/
flush_workqueue(system_long_wq);
flush_workqueue(srp_remove_wq);
kfree(host);
}
......@@ -3313,16 +3319,22 @@ static int __init srp_init_module(void)
indirect_sg_entries = cmd_sg_entries;
}
srp_remove_wq = create_workqueue("srp_remove");
if (!srp_remove_wq) {
ret = -ENOMEM;
goto out;
}
ret = -ENOMEM;
ib_srp_transport_template =
srp_attach_transport(&ib_srp_transport_functions);
if (!ib_srp_transport_template)
return -ENOMEM;
goto destroy_wq;
ret = class_register(&srp_class);
if (ret) {
pr_err("couldn't register class infiniband_srp\n");
srp_release_transport(ib_srp_transport_template);
return ret;
goto release_tr;
}
ib_sa_register_client(&srp_sa_client);
......@@ -3330,13 +3342,22 @@ static int __init srp_init_module(void)
ret = ib_register_client(&srp_client);
if (ret) {
pr_err("couldn't register IB client\n");
srp_release_transport(ib_srp_transport_template);
ib_sa_unregister_client(&srp_sa_client);
class_unregister(&srp_class);
return ret;
goto unreg_sa;
}
return 0;
out:
return ret;
unreg_sa:
ib_sa_unregister_client(&srp_sa_client);
class_unregister(&srp_class);
release_tr:
srp_release_transport(ib_srp_transport_template);
destroy_wq:
destroy_workqueue(srp_remove_wq);
goto out;
}
static void __exit srp_cleanup_module(void)
......@@ -3345,6 +3366,7 @@ static void __exit srp_cleanup_module(void)
ib_sa_unregister_client(&srp_sa_client);
class_unregister(&srp_class);
srp_release_transport(ib_srp_transport_template);
destroy_workqueue(srp_remove_wq);
}
module_init(srp_init_module);
......
......@@ -198,6 +198,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
case IB_EVENT_PKEY_CHANGE:
case IB_EVENT_SM_CHANGE:
case IB_EVENT_CLIENT_REREGISTER:
case IB_EVENT_GID_CHANGE:
/* Refresh port data asynchronously. */
if (event->element.port_num <= sdev->device->phys_port_cnt) {
sport = &sdev->port[event->element.port_num - 1];
......@@ -563,7 +564,7 @@ static int srpt_refresh_port(struct srpt_port *sport)
&reg_req, 0,
srpt_mad_send_handler,
srpt_mad_recv_handler,
sport);
sport, 0);
if (IS_ERR(sport->mad_agent)) {
ret = PTR_ERR(sport->mad_agent);
sport->mad_agent = NULL;
......
......@@ -890,5 +890,6 @@ void be_roce_dev_remove(struct be_adapter *);
*/
void be_roce_dev_open(struct be_adapter *);
void be_roce_dev_close(struct be_adapter *);
void be_roce_dev_shutdown(struct be_adapter *);
#endif /* BE_H */
......@@ -4958,6 +4958,7 @@ static void be_shutdown(struct pci_dev *pdev)
if (!adapter)
return;
be_roce_dev_shutdown(adapter);
cancel_delayed_work_sync(&adapter->work);
cancel_delayed_work_sync(&adapter->func_recovery_work);
......
......@@ -120,7 +120,8 @@ static void _be_roce_dev_open(struct be_adapter *adapter)
{
if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler)
ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 0);
ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
BE_DEV_UP);
}
void be_roce_dev_open(struct be_adapter *adapter)
......@@ -136,7 +137,8 @@ static void _be_roce_dev_close(struct be_adapter *adapter)
{
if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler)
ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 1);
ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
BE_DEV_DOWN);
}
void be_roce_dev_close(struct be_adapter *adapter)
......@@ -148,6 +150,18 @@ void be_roce_dev_close(struct be_adapter *adapter)
}
}
void be_roce_dev_shutdown(struct be_adapter *adapter)
{
if (be_roce_supported(adapter)) {
mutex_lock(&be_adapter_list_lock);
if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler)
ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
BE_DEV_SHUTDOWN);
mutex_unlock(&be_adapter_list_lock);
}
}
int be_roce_register_driver(struct ocrdma_driver *drv)
{
struct be_adapter *dev;
......
......@@ -62,7 +62,8 @@ struct ocrdma_driver {
enum {
BE_DEV_UP = 0,
BE_DEV_DOWN = 1
BE_DEV_DOWN = 1,
BE_DEV_SHUTDOWN = 2
};
/* APIs for RoCE driver to register callback handlers,
......
......@@ -1310,6 +1310,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL,
.wrapper = mlx4_MAD_IFC_wrapper
},
{
.opcode = MLX4_CMD_MAD_DEMUX,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_CMD_EPERM_wrapper
},
{
.opcode = MLX4_CMD_QUERY_IF_STAT,
.has_inbox = false,
......
......@@ -136,7 +136,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[7] = "FSM (MAC anti-spoofing) support",
[8] = "Dynamic QP updates support",
[9] = "Device managed flow steering IPoIB support",
[10] = "TCP/IP offloads/flow-steering for VXLAN support"
[10] = "TCP/IP offloads/flow-steering for VXLAN support",
[11] = "MAD DEMUX (Secure-Host) support"
};
int i;
......@@ -571,6 +572,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
#define QUERY_DEV_CAP_VXLAN 0x9e
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
......@@ -748,6 +750,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(dev_cap->max_counters, outbox,
QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
MLX4_GET(field32, outbox,
QUERY_DEV_CAP_MAD_DEMUX_OFFSET);
if (field32 & (1 << 0))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX;
MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
if (field32 & (1 << 16))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
......@@ -2016,3 +2023,85 @@ void mlx4_opreq_action(struct work_struct *work)
out:
mlx4_free_cmd_mailbox(dev, mailbox);
}
static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *mailbox)
{
#define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10
#define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20
#define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40
#define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70
u32 set_attr_mask, getresp_attr_mask;
u32 trap_attr_mask, traprepress_attr_mask;
MLX4_GET(set_attr_mask, mailbox->buf,
MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET);
mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n",
set_attr_mask);
MLX4_GET(getresp_attr_mask, mailbox->buf,
MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET);
mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n",
getresp_attr_mask);
MLX4_GET(trap_attr_mask, mailbox->buf,
MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET);
mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n",
trap_attr_mask);
MLX4_GET(traprepress_attr_mask, mailbox->buf,
MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET);
mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n",
traprepress_attr_mask);
if (set_attr_mask && getresp_attr_mask && trap_attr_mask &&
traprepress_attr_mask)
return 1;
return 0;
}
int mlx4_config_mad_demux(struct mlx4_dev *dev)
{
struct mlx4_cmd_mailbox *mailbox;
int secure_host_active;
int err;
/* Check if mad_demux is supported */
if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX))
return 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX");
return -ENOMEM;
}
/* Query mad_demux to find out which MADs are handled by internal sma */
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */,
MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err) {
mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n",
err);
goto out;
}
secure_host_active = mlx4_check_smp_firewall_active(dev, mailbox);
/* Config mad_demux to handle all MADs returned by the query above */
err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */,
MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err) {
mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err);
goto out;
}
if (secure_host_active)
mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n");
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
......@@ -1831,6 +1831,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
goto err_mr_table_free;
}
err = mlx4_config_mad_demux(dev);
if (err) {
mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
goto err_mcg_table_free;
}
}
err = mlx4_init_eq_table(dev);
......
......@@ -1313,5 +1313,6 @@ void mlx4_init_quotas(struct mlx4_dev *dev);
int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
/* Returns the VF index of slave */
int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
int mlx4_config_mad_demux(struct mlx4_dev *dev);
#endif /* MLX4_H */
......@@ -473,7 +473,8 @@ static void __srp_start_tl_fail_timers(struct srp_rport *rport)
if (delay > 0)
queue_delayed_work(system_long_wq, &rport->reconnect_work,
1UL * delay * HZ);
if (srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) &&
srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev),
rport->state);
scsi_target_block(&shost->shost_gendev);
......
......@@ -116,6 +116,7 @@ enum {
/* special QP and management commands */
MLX4_CMD_CONF_SPECIAL_QP = 0x23,
MLX4_CMD_MAD_IFC = 0x24,
MLX4_CMD_MAD_DEMUX = 0x203,
/* multicast commands */
MLX4_CMD_READ_MCG = 0x25,
......@@ -185,6 +186,12 @@ enum {
MLX4_SET_PORT_VXLAN = 0xB
};
enum {
MLX4_CMD_MAD_DEMUX_CONFIG = 0,
MLX4_CMD_MAD_DEMUX_QUERY_STATE = 1,
MLX4_CMD_MAD_DEMUX_QUERY_RESTR = 2, /* Query mad demux restrictions */
};
enum {
MLX4_CMD_WRAPPED,
MLX4_CMD_NATIVE
......
......@@ -172,6 +172,7 @@ enum {
MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8,
MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9,
MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10,
MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11,
};
enum {
......
......@@ -40,6 +40,7 @@
#include <linux/list.h>
#include <rdma/ib_verbs.h>
#include <uapi/rdma/ib_user_mad.h>
/* Management base version */
#define IB_MGMT_BASE_VERSION 1
......@@ -355,9 +356,13 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
* @hi_tid: Access layer assigned transaction ID for this client.
* Unsolicited MADs sent by this client will have the upper 32-bits
* of their TID set to this value.
* @flags: registration flags
* @port_num: Port number on which QP is registered
* @rmpp_version: If set, indicates the RMPP version used by this agent.
*/
enum {
IB_MAD_USER_RMPP = IB_USER_MAD_USER_RMPP,
};
struct ib_mad_agent {
struct ib_device *device;
struct ib_qp *qp;
......@@ -367,6 +372,7 @@ struct ib_mad_agent {
ib_mad_snoop_handler snoop_handler;
void *context;
u32 hi_tid;
u32 flags;
u8 port_num;
u8 rmpp_version;
};
......@@ -426,6 +432,7 @@ struct ib_mad_recv_wc {
* in the range from 0x30 to 0x4f. Otherwise not used.
* @method_mask: The caller will receive unsolicited MADs for any method
* where @method_mask = 1.
*
*/
struct ib_mad_reg_req {
u8 mgmt_class;
......@@ -451,6 +458,7 @@ struct ib_mad_reg_req {
* @recv_handler: The completion callback routine invoked for a received
* MAD.
* @context: User specified context associated with the registration.
* @registration_flags: Registration flags to set for this agent
*/
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
u8 port_num,
......@@ -459,7 +467,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
u8 rmpp_version,
ib_mad_send_handler send_handler,
ib_mad_recv_handler recv_handler,
void *context);
void *context,
u32 registration_flags);
enum ib_mad_snoop_flags {
/*IB_MAD_SNOOP_POSTED_SENDS = 1,*/
......@@ -661,4 +670,11 @@ void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num);
*/
void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
/**
* ib_mad_kernel_rmpp_agent - Returns if the agent is performing RMPP.
* @agent: the agent in question
* @return: true if agent is performing rmpp, false otherwise.
*/
int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent);
#endif /* IB_MAD_H */
......@@ -191,6 +191,45 @@ struct ib_user_mad_reg_req {
__u8 rmpp_version;
};
/**
* ib_user_mad_reg_req2 - MAD registration request
*
* @id - Set by the _kernel_; used by userspace to identify the
* registered agent in future requests.
* @qpn - Queue pair number; must be 0 or 1.
* @mgmt_class - Indicates which management class of MADs should be
* receive by the caller. This field is only required if
* the user wishes to receive unsolicited MADs, otherwise
* it should be 0.
* @mgmt_class_version - Indicates which version of MADs for the given
* management class to receive.
* @res - Ignored.
* @flags - additional registration flags; Must be in the set of
* flags defined in IB_USER_MAD_REG_FLAGS_CAP
* @method_mask - The caller wishes to receive unsolicited MADs for the
* methods whose bit(s) is(are) set.
* @oui - Indicates IEEE OUI to use when mgmt_class is a vendor
* class in the range from 0x30 to 0x4f. Otherwise not
* used.
* @rmpp_version - If set, indicates the RMPP version to use.
*/
enum {
IB_USER_MAD_USER_RMPP = (1 << 0),
};
#define IB_USER_MAD_REG_FLAGS_CAP (IB_USER_MAD_USER_RMPP)
struct ib_user_mad_reg_req2 {
__u32 id;
__u32 qpn;
__u8 mgmt_class;
__u8 mgmt_class_version;
__u16 res;
__u32 flags;
__u64 method_mask[2];
__u32 oui;
__u8 rmpp_version;
__u8 reserved[3];
};
#define IB_IOCTL_MAGIC 0x1b
#define IB_USER_MAD_REGISTER_AGENT _IOWR(IB_IOCTL_MAGIC, 1, \
......@@ -200,4 +239,7 @@ struct ib_user_mad_reg_req {
#define IB_USER_MAD_ENABLE_PKEY _IO(IB_IOCTL_MAGIC, 3)
#define IB_USER_MAD_REGISTER_AGENT2 _IOWR(IB_IOCTL_MAGIC, 4, \
struct ib_user_mad_reg_req2)
#endif /* IB_USER_MAD_H */
......@@ -34,6 +34,7 @@
#define RDMA_USER_CM_H
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in6.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_user_sa.h>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部