提交 d087f6ad 编写于 作者: R Roland Dreier

Merge branches 'core', 'cxgb4', 'ipoib', 'iser', 'iwcm', 'mad', 'misc',...

Merge branches 'core', 'cxgb4', 'ipoib', 'iser', 'iwcm', 'mad', 'misc', 'mlx4', 'mlx5', 'ocrdma' and 'srp' into for-next
上级 9376932d 678ea9b5 e42fa209 8d4aca7f 2f0304d2 1471cb6c db1044d4 a57f23f6 a8f731eb 96c51abe da05be29
master alk-4.19.24 alk-4.19.30 alk-4.19.34 alk-4.19.36 alk-4.19.43 alk-4.19.48 alk-4.19.57 ck-4.19.67 ck-4.19.81 ck-4.19.91 github/fork/deepanshu1422/fix-typo-in-comment github/fork/haosdent/fix-typo linux-next v4.19.91 v4.19.90 v4.19.89 v4.19.88 v4.19.87 v4.19.86 v4.19.85 v4.19.84 v4.19.83 v4.19.82 v4.19.81 v4.19.80 v4.19.79 v4.19.78 v4.19.77 v4.19.76 v4.19.75 v4.19.74 v4.19.73 v4.19.72 v4.19.71 v4.19.70 v4.19.69 v4.19.68 v4.19.67 v4.19.66 v4.19.65 v4.19.64 v4.19.63 v4.19.62 v4.19.61 v4.19.60 v4.19.59 v4.19.58 v4.19.57 v4.19.56 v4.19.55 v4.19.54 v4.19.53 v4.19.52 v4.19.51 v4.19.50 v4.19.49 v4.19.48 v4.19.47 v4.19.46 v4.19.45 v4.19.44 v4.19.43 v4.19.42 v4.19.41 v4.19.40 v4.19.39 v4.19.38 v4.19.37 v4.19.36 v4.19.35 v4.19.34 v4.19.33 v4.19.32 v4.19.31 v4.19.30 v4.19.29 v4.19.28 v4.19.27 v4.19.26 v4.19.25 v4.19.24 v4.19.23 v4.19.22 v4.19.21 v4.19.20 v4.19.19 v4.19.18 v4.19.17 v4.19.16 v4.19.15 v4.19.14 v4.19.13 v4.19.12 v4.19.11 v4.19.10 v4.19.9 v4.19.8 v4.19.7 v4.19.6 v4.19.5 v4.19.4 v4.19.3 v4.19.2 v4.19.1 v4.19 v4.19-rc8 v4.19-rc7 v4.19-rc6 v4.19-rc5 v4.19-rc4 v4.19-rc3 v4.19-rc2 v4.19-rc1 ck-release-21 ck-release-20 ck-release-19.2 ck-release-19.1 ck-release-19 ck-release-18 ck-release-17.2 ck-release-17.1 ck-release-17 ck-release-16 ck-release-15.1 ck-release-15 ck-release-14 ck-release-13.2 ck-release-13 ck-release-12 ck-release-11 ck-release-10 ck-release-9 ck-release-7 alk-release-15 alk-release-14 alk-release-13.2 alk-release-13 alk-release-12 alk-release-11 alk-release-10 alk-release-9 alk-release-7
无相关合并请求
...@@ -26,6 +26,11 @@ Creating MAD agents ...@@ -26,6 +26,11 @@ Creating MAD agents
ioctl. Also, all agents registered through a file descriptor will ioctl. Also, all agents registered through a file descriptor will
be unregistered when the descriptor is closed. be unregistered when the descriptor is closed.
2014 -- a new registration ioctl is now provided which allows additional
fields to be provided during registration.
Users of this registration call are implicitly setting the use of
pkey_index (see below).
Receiving MADs Receiving MADs
MADs are received using read(). The receive side now supports MADs are received using read(). The receive side now supports
...@@ -104,10 +109,10 @@ P_Key Index Handling ...@@ -104,10 +109,10 @@ P_Key Index Handling
The old ib_umad interface did not allow setting the P_Key index for The old ib_umad interface did not allow setting the P_Key index for
MADs that are sent and did not provide a way for obtaining the P_Key MADs that are sent and did not provide a way for obtaining the P_Key
index of received MADs. A new layout for struct ib_user_mad_hdr index of received MADs. A new layout for struct ib_user_mad_hdr
with a pkey_index member has been defined; however, to preserve with a pkey_index member has been defined; however, to preserve binary
binary compatibility with older applications, this new layout will compatibility with older applications, this new layout will not be used
not be used unless the IB_USER_MAD_ENABLE_PKEY ioctl is called unless one of IB_USER_MAD_ENABLE_PKEY or IB_USER_MAD_REGISTER_AGENT2 ioctl's
before a file descriptor is used for anything else. are called before a file descriptor is used for anything else.
In September 2008, the IB_USER_MAD_ABI_VERSION will be incremented In September 2008, the IB_USER_MAD_ABI_VERSION will be incremented
to 6, the new layout of struct ib_user_mad_hdr will be used by to 6, the new layout of struct ib_user_mad_hdr will be used by
......
...@@ -94,14 +94,14 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, ...@@ -94,14 +94,14 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
port_priv = ib_get_agent_port(device, port_num); port_priv = ib_get_agent_port(device, port_num);
if (!port_priv) { if (!port_priv) {
printk(KERN_ERR SPFX "Unable to find port agent\n"); dev_err(&device->dev, "Unable to find port agent\n");
return; return;
} }
agent = port_priv->agent[qpn]; agent = port_priv->agent[qpn];
ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num);
if (IS_ERR(ah)) { if (IS_ERR(ah)) {
printk(KERN_ERR SPFX "ib_create_ah_from_wc error %ld\n", dev_err(&device->dev, "ib_create_ah_from_wc error %ld\n",
PTR_ERR(ah)); PTR_ERR(ah));
return; return;
} }
...@@ -110,7 +110,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, ...@@ -110,7 +110,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_KERNEL); GFP_KERNEL);
if (IS_ERR(send_buf)) { if (IS_ERR(send_buf)) {
printk(KERN_ERR SPFX "ib_create_send_mad error\n"); dev_err(&device->dev, "ib_create_send_mad error\n");
goto err1; goto err1;
} }
...@@ -125,7 +125,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, ...@@ -125,7 +125,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
} }
if (ib_post_send_mad(send_buf, NULL)) { if (ib_post_send_mad(send_buf, NULL)) {
printk(KERN_ERR SPFX "ib_post_send_mad error\n"); dev_err(&device->dev, "ib_post_send_mad error\n");
goto err2; goto err2;
} }
return; return;
...@@ -151,7 +151,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) ...@@ -151,7 +151,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
/* Create new device info */ /* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
if (!port_priv) { if (!port_priv) {
printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n"); dev_err(&device->dev, "No memory for ib_agent_port_private\n");
ret = -ENOMEM; ret = -ENOMEM;
goto error1; goto error1;
} }
...@@ -161,7 +161,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) ...@@ -161,7 +161,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
port_priv->agent[0] = ib_register_mad_agent(device, port_num, port_priv->agent[0] = ib_register_mad_agent(device, port_num,
IB_QPT_SMI, NULL, 0, IB_QPT_SMI, NULL, 0,
&agent_send_handler, &agent_send_handler,
NULL, NULL); NULL, NULL, 0);
if (IS_ERR(port_priv->agent[0])) { if (IS_ERR(port_priv->agent[0])) {
ret = PTR_ERR(port_priv->agent[0]); ret = PTR_ERR(port_priv->agent[0]);
goto error2; goto error2;
...@@ -172,7 +172,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) ...@@ -172,7 +172,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
port_priv->agent[1] = ib_register_mad_agent(device, port_num, port_priv->agent[1] = ib_register_mad_agent(device, port_num,
IB_QPT_GSI, NULL, 0, IB_QPT_GSI, NULL, 0,
&agent_send_handler, &agent_send_handler,
NULL, NULL); NULL, NULL, 0);
if (IS_ERR(port_priv->agent[1])) { if (IS_ERR(port_priv->agent[1])) {
ret = PTR_ERR(port_priv->agent[1]); ret = PTR_ERR(port_priv->agent[1]);
goto error3; goto error3;
...@@ -202,7 +202,7 @@ int ib_agent_port_close(struct ib_device *device, int port_num) ...@@ -202,7 +202,7 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
port_priv = __ib_get_agent_port(device, port_num); port_priv = __ib_get_agent_port(device, port_num);
if (port_priv == NULL) { if (port_priv == NULL) {
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
printk(KERN_ERR SPFX "Port %d not found\n", port_num); dev_err(&device->dev, "Port %d not found\n", port_num);
return -ENODEV; return -ENODEV;
} }
list_del(&port_priv->port_list); list_del(&port_priv->port_list);
......
...@@ -3753,7 +3753,7 @@ static void cm_add_one(struct ib_device *ib_device) ...@@ -3753,7 +3753,7 @@ static void cm_add_one(struct ib_device *ib_device)
struct cm_port *port; struct cm_port *port;
struct ib_mad_reg_req reg_req = { struct ib_mad_reg_req reg_req = {
.mgmt_class = IB_MGMT_CLASS_CM, .mgmt_class = IB_MGMT_CLASS_CM,
.mgmt_class_version = IB_CM_CLASS_VERSION .mgmt_class_version = IB_CM_CLASS_VERSION,
}; };
struct ib_port_modify port_modify = { struct ib_port_modify port_modify = {
.set_port_cap_mask = IB_PORT_CM_SUP .set_port_cap_mask = IB_PORT_CM_SUP
...@@ -3801,7 +3801,8 @@ static void cm_add_one(struct ib_device *ib_device) ...@@ -3801,7 +3801,8 @@ static void cm_add_one(struct ib_device *ib_device)
0, 0,
cm_send_handler, cm_send_handler,
cm_recv_handler, cm_recv_handler,
port); port,
0);
if (IS_ERR(port->mad_agent)) if (IS_ERR(port->mad_agent))
goto error2; goto error2;
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sysctl.h>
#include <rdma/iw_cm.h> #include <rdma/iw_cm.h>
#include <rdma/ib_addr.h> #include <rdma/ib_addr.h>
...@@ -65,6 +66,20 @@ struct iwcm_work { ...@@ -65,6 +66,20 @@ struct iwcm_work {
struct list_head free_list; struct list_head free_list;
}; };
static unsigned int default_backlog = 256;
static struct ctl_table_header *iwcm_ctl_table_hdr;
static struct ctl_table iwcm_ctl_table[] = {
{
.procname = "default_backlog",
.data = &default_backlog,
.maxlen = sizeof(default_backlog),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
/* /*
* The following services provide a mechanism for pre-allocating iwcm_work * The following services provide a mechanism for pre-allocating iwcm_work
* elements. The design pre-allocates them based on the cm_id type: * elements. The design pre-allocates them based on the cm_id type:
...@@ -425,6 +440,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) ...@@ -425,6 +440,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
if (!backlog)
backlog = default_backlog;
ret = alloc_work_entries(cm_id_priv, backlog); ret = alloc_work_entries(cm_id_priv, backlog);
if (ret) if (ret)
return ret; return ret;
...@@ -1030,11 +1048,20 @@ static int __init iw_cm_init(void) ...@@ -1030,11 +1048,20 @@ static int __init iw_cm_init(void)
if (!iwcm_wq) if (!iwcm_wq)
return -ENOMEM; return -ENOMEM;
iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
iwcm_ctl_table);
if (!iwcm_ctl_table_hdr) {
pr_err("iw_cm: couldn't register sysctl paths\n");
destroy_workqueue(iwcm_wq);
return -ENOMEM;
}
return 0; return 0;
} }
static void __exit iw_cm_cleanup(void) static void __exit iw_cm_cleanup(void)
{ {
unregister_net_sysctl_table(iwcm_ctl_table_hdr);
destroy_workqueue(iwcm_wq); destroy_workqueue(iwcm_wq);
} }
......
...@@ -33,6 +33,9 @@ ...@@ -33,6 +33,9 @@
* SOFTWARE. * SOFTWARE.
* *
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -195,7 +198,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ...@@ -195,7 +198,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
u8 rmpp_version, u8 rmpp_version,
ib_mad_send_handler send_handler, ib_mad_send_handler send_handler,
ib_mad_recv_handler recv_handler, ib_mad_recv_handler recv_handler,
void *context) void *context,
u32 registration_flags)
{ {
struct ib_mad_port_private *port_priv; struct ib_mad_port_private *port_priv;
struct ib_mad_agent *ret = ERR_PTR(-EINVAL); struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
...@@ -211,68 +215,109 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ...@@ -211,68 +215,109 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
/* Validate parameters */ /* Validate parameters */
qpn = get_spl_qp_index(qp_type); qpn = get_spl_qp_index(qp_type);
if (qpn == -1) if (qpn == -1) {
dev_notice(&device->dev,
"ib_register_mad_agent: invalid QP Type %d\n",
qp_type);
goto error1; goto error1;
}
if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
dev_notice(&device->dev,
"ib_register_mad_agent: invalid RMPP Version %u\n",
rmpp_version);
goto error1; goto error1;
}
/* Validate MAD registration request if supplied */ /* Validate MAD registration request if supplied */
if (mad_reg_req) { if (mad_reg_req) {
if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
dev_notice(&device->dev,
"ib_register_mad_agent: invalid Class Version %u\n",
mad_reg_req->mgmt_class_version);
goto error1; goto error1;
if (!recv_handler) }
if (!recv_handler) {
dev_notice(&device->dev,
"ib_register_mad_agent: no recv_handler\n");
goto error1; goto error1;
}
if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
/* /*
* IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
* one in this range currently allowed * one in this range currently allowed
*/ */
if (mad_reg_req->mgmt_class != if (mad_reg_req->mgmt_class !=
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
dev_notice(&device->dev,
"ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
mad_reg_req->mgmt_class);
goto error1; goto error1;
}
} else if (mad_reg_req->mgmt_class == 0) { } else if (mad_reg_req->mgmt_class == 0) {
/* /*
* Class 0 is reserved in IBA and is used for * Class 0 is reserved in IBA and is used for
* aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
*/ */
dev_notice(&device->dev,
"ib_register_mad_agent: Invalid Mgmt Class 0\n");
goto error1; goto error1;
} else if (is_vendor_class(mad_reg_req->mgmt_class)) { } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
/* /*
* If class is in "new" vendor range, * If class is in "new" vendor range,
* ensure supplied OUI is not zero * ensure supplied OUI is not zero
*/ */
if (!is_vendor_oui(mad_reg_req->oui)) if (!is_vendor_oui(mad_reg_req->oui)) {
dev_notice(&device->dev,
"ib_register_mad_agent: No OUI specified for class 0x%x\n",
mad_reg_req->mgmt_class);
goto error1; goto error1;
}
} }
/* Make sure class supplied is consistent with RMPP */ /* Make sure class supplied is consistent with RMPP */
if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
if (rmpp_version) if (rmpp_version) {
dev_notice(&device->dev,
"ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
mad_reg_req->mgmt_class);
goto error1; goto error1;
}
} }
/* Make sure class supplied is consistent with QP type */ /* Make sure class supplied is consistent with QP type */
if (qp_type == IB_QPT_SMI) { if (qp_type == IB_QPT_SMI) {
if ((mad_reg_req->mgmt_class != if ((mad_reg_req->mgmt_class !=
IB_MGMT_CLASS_SUBN_LID_ROUTED) && IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
(mad_reg_req->mgmt_class != (mad_reg_req->mgmt_class !=
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
dev_notice(&device->dev,
"ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
mad_reg_req->mgmt_class);
goto error1; goto error1;
}
} else { } else {
if ((mad_reg_req->mgmt_class == if ((mad_reg_req->mgmt_class ==
IB_MGMT_CLASS_SUBN_LID_ROUTED) || IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
(mad_reg_req->mgmt_class == (mad_reg_req->mgmt_class ==
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
dev_notice(&device->dev,
"ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
mad_reg_req->mgmt_class);
goto error1; goto error1;
}
} }
} else { } else {
/* No registration request supplied */ /* No registration request supplied */
if (!send_handler) if (!send_handler)
goto error1; goto error1;
if (registration_flags & IB_MAD_USER_RMPP)
goto error1;
} }
/* Validate device and port */ /* Validate device and port */
port_priv = ib_get_mad_port(device, port_num); port_priv = ib_get_mad_port(device, port_num);
if (!port_priv) { if (!port_priv) {
dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
ret = ERR_PTR(-ENODEV); ret = ERR_PTR(-ENODEV);
goto error1; goto error1;
} }
...@@ -280,6 +325,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ...@@ -280,6 +325,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
/* Verify the QP requested is supported. For example, Ethernet devices /* Verify the QP requested is supported. For example, Ethernet devices
* will not have QP0 */ * will not have QP0 */
if (!port_priv->qp_info[qpn].qp) { if (!port_priv->qp_info[qpn].qp) {
dev_notice(&device->dev,
"ib_register_mad_agent: QP %d not supported\n", qpn);
ret = ERR_PTR(-EPROTONOSUPPORT); ret = ERR_PTR(-EPROTONOSUPPORT);
goto error1; goto error1;
} }
...@@ -316,6 +363,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ...@@ -316,6 +363,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
mad_agent_priv->agent.context = context; mad_agent_priv->agent.context = context;
mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
mad_agent_priv->agent.port_num = port_num; mad_agent_priv->agent.port_num = port_num;
mad_agent_priv->agent.flags = registration_flags;
spin_lock_init(&mad_agent_priv->lock); spin_lock_init(&mad_agent_priv->lock);
INIT_LIST_HEAD(&mad_agent_priv->send_list); INIT_LIST_HEAD(&mad_agent_priv->send_list);
INIT_LIST_HEAD(&mad_agent_priv->wait_list); INIT_LIST_HEAD(&mad_agent_priv->wait_list);
...@@ -706,7 +754,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, ...@@ -706,7 +754,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
smi_handle_dr_smp_send(smp, device->node_type, port_num) == smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
IB_SMI_DISCARD) { IB_SMI_DISCARD) {
ret = -EINVAL; ret = -EINVAL;
printk(KERN_ERR PFX "Invalid directed route\n"); dev_err(&device->dev, "Invalid directed route\n");
goto out; goto out;
} }
...@@ -718,7 +766,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, ...@@ -718,7 +766,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
local = kmalloc(sizeof *local, GFP_ATOMIC); local = kmalloc(sizeof *local, GFP_ATOMIC);
if (!local) { if (!local) {
ret = -ENOMEM; ret = -ENOMEM;
printk(KERN_ERR PFX "No memory for ib_mad_local_private\n"); dev_err(&device->dev, "No memory for ib_mad_local_private\n");
goto out; goto out;
} }
local->mad_priv = NULL; local->mad_priv = NULL;
...@@ -726,7 +774,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, ...@@ -726,7 +774,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
if (!mad_priv) { if (!mad_priv) {
ret = -ENOMEM; ret = -ENOMEM;
printk(KERN_ERR PFX "No memory for local response MAD\n"); dev_err(&device->dev, "No memory for local response MAD\n");
kfree(local); kfree(local);
goto out; goto out;
} }
...@@ -837,9 +885,9 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, ...@@ -837,9 +885,9 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
if (!seg) { if (!seg) {
printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem " dev_err(&send_buf->mad_agent->device->dev,
"alloc failed for len %zd, gfp %#x\n", "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
sizeof (*seg) + seg_size, gfp_mask); sizeof (*seg) + seg_size, gfp_mask);
free_send_rmpp_list(send_wr); free_send_rmpp_list(send_wr);
return -ENOMEM; return -ENOMEM;
} }
...@@ -862,6 +910,12 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, ...@@ -862,6 +910,12 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
return 0; return 0;
} }
int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent)
{
return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
}
EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
u32 remote_qpn, u16 pkey_index, u32 remote_qpn, u16 pkey_index,
int rmpp_active, int rmpp_active,
...@@ -878,10 +932,12 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, ...@@ -878,10 +932,12 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
pad = get_pad_size(hdr_len, data_len); pad = get_pad_size(hdr_len, data_len);
message_size = hdr_len + data_len + pad; message_size = hdr_len + data_len + pad;
if ((!mad_agent->rmpp_version && if (ib_mad_kernel_rmpp_agent(mad_agent)) {
(rmpp_active || message_size > sizeof(struct ib_mad))) || if (!rmpp_active && message_size > sizeof(struct ib_mad))
(!rmpp_active && message_size > sizeof(struct ib_mad))) return ERR_PTR(-EINVAL);
return ERR_PTR(-EINVAL); } else
if (rmpp_active || message_size > sizeof(struct ib_mad))
return ERR_PTR(-EINVAL);
size = rmpp_active ? hdr_len : sizeof(struct ib_mad); size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
...@@ -1135,7 +1191,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, ...@@ -1135,7 +1191,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
&mad_agent_priv->send_list); &mad_agent_priv->send_list);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
if (mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
ret = ib_send_rmpp_mad(mad_send_wr); ret = ib_send_rmpp_mad(mad_send_wr);
if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
ret = ib_send_mad(mad_send_wr); ret = ib_send_mad(mad_send_wr);
...@@ -1199,7 +1255,8 @@ EXPORT_SYMBOL(ib_redirect_mad_qp); ...@@ -1199,7 +1255,8 @@ EXPORT_SYMBOL(ib_redirect_mad_qp);
int ib_process_mad_wc(struct ib_mad_agent *mad_agent, int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
struct ib_wc *wc) struct ib_wc *wc)
{ {
printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n"); dev_err(&mad_agent->device->dev,
"ib_process_mad_wc() not implemented yet\n");
return 0; return 0;
} }
EXPORT_SYMBOL(ib_process_mad_wc); EXPORT_SYMBOL(ib_process_mad_wc);
...@@ -1211,7 +1268,7 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method, ...@@ -1211,7 +1268,7 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method,
for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
if ((*method)->agent[i]) { if ((*method)->agent[i]) {
printk(KERN_ERR PFX "Method %d already in use\n", i); pr_err("Method %d already in use\n", i);
return -EINVAL; return -EINVAL;
} }
} }
...@@ -1223,8 +1280,7 @@ static int allocate_method_table(struct ib_mad_mgmt_method_table **method) ...@@ -1223,8 +1280,7 @@ static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
/* Allocate management method table */ /* Allocate management method table */
*method = kzalloc(sizeof **method, GFP_ATOMIC); *method = kzalloc(sizeof **method, GFP_ATOMIC);
if (!*method) { if (!*method) {
printk(KERN_ERR PFX "No memory for " pr_err("No memory for ib_mad_mgmt_method_table\n");
"ib_mad_mgmt_method_table\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -1319,8 +1375,8 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, ...@@ -1319,8 +1375,8 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
/* Allocate management class table for "new" class version */ /* Allocate management class table for "new" class version */
*class = kzalloc(sizeof **class, GFP_ATOMIC); *class = kzalloc(sizeof **class, GFP_ATOMIC);
if (!*class) { if (!*class) {
printk(KERN_ERR PFX "No memory for " dev_err(&agent_priv->agent.device->dev,
"ib_mad_mgmt_class_table\n"); "No memory for ib_mad_mgmt_class_table\n");
ret = -ENOMEM; ret = -ENOMEM;
goto error1; goto error1;
} }
...@@ -1386,8 +1442,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, ...@@ -1386,8 +1442,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
/* Allocate mgmt vendor class table for "new" class version */ /* Allocate mgmt vendor class table for "new" class version */
vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
if (!vendor) { if (!vendor) {
printk(KERN_ERR PFX "No memory for " dev_err(&agent_priv->agent.device->dev,
"ib_mad_mgmt_vendor_class_table\n"); "No memory for ib_mad_mgmt_vendor_class_table\n");
goto error1; goto error1;
} }
...@@ -1397,8 +1453,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, ...@@ -1397,8 +1453,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
/* Allocate table for this management vendor class */ /* Allocate table for this management vendor class */
vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
if (!vendor_class) { if (!vendor_class) {
printk(KERN_ERR PFX "No memory for " dev_err(&agent_priv->agent.device->dev,
"ib_mad_mgmt_vendor_class\n"); "No memory for ib_mad_mgmt_vendor_class\n");
goto error2; goto error2;
} }
...@@ -1429,7 +1485,7 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, ...@@ -1429,7 +1485,7 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
goto check_in_use; goto check_in_use;
} }
} }
printk(KERN_ERR PFX "All OUI slots in use\n"); dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
goto error3; goto error3;
check_in_use: check_in_use:
...@@ -1640,9 +1696,9 @@ find_mad_agent(struct ib_mad_port_private *port_priv, ...@@ -1640,9 +1696,9 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
if (mad_agent->agent.recv_handler) if (mad_agent->agent.recv_handler)
atomic_inc(&mad_agent->refcount); atomic_inc(&mad_agent->refcount);
else { else {
printk(KERN_NOTICE PFX "No receive handler for client " dev_notice(&port_priv->device->dev,
"%p on port %d\n", "No receive handler for client %p on port %d\n",
&mad_agent->agent, port_priv->port_num); &mad_agent->agent, port_priv->port_num);
mad_agent = NULL; mad_agent = NULL;
} }
} }
...@@ -1658,8 +1714,8 @@ static int validate_mad(struct ib_mad *mad, u32 qp_num) ...@@ -1658,8 +1714,8 @@ static int validate_mad(struct ib_mad *mad, u32 qp_num)
/* Make sure MAD base version is understood */ /* Make sure MAD base version is understood */
if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
printk(KERN_ERR PFX "MAD received with unsupported base " pr_err("MAD received with unsupported base version %d\n",
"version %d\n", mad->mad_hdr.base_version); mad->mad_hdr.base_version);
goto out; goto out;
} }
...@@ -1685,6 +1741,7 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, ...@@ -1685,6 +1741,7 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
return !mad_agent_priv->agent.rmpp_version || return !mad_agent_priv->agent.rmpp_version ||
!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE) || IB_MGMT_RMPP_FLAG_ACTIVE) ||
(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
...@@ -1812,7 +1869,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, ...@@ -1812,7 +1869,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
if (mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
mad_recv_wc); mad_recv_wc);
if (!mad_recv_wc) { if (!mad_recv_wc) {
...@@ -1827,23 +1884,39 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, ...@@ -1827,23 +1884,39 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
if (!mad_send_wr) { if (!mad_send_wr) {
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
ib_free_recv_mad(mad_recv_wc); if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
deref_mad_agent(mad_agent_priv); && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
return; && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
} & IB_MGMT_RMPP_FLAG_ACTIVE)) {
ib_mark_mad_done(mad_send_wr); /* user rmpp is in effect
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); * and this is an active RMPP MAD
*/
mad_recv_wc->wc->wr_id = 0;
mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
mad_recv_wc);
atomic_dec(&mad_agent_priv->refcount);
} else {
/* not user rmpp, revert to normal behavior and
* drop the mad */
ib_free_recv_mad(mad_recv_wc);
deref_mad_agent(mad_agent_priv);
return;
}
} else {
ib_mark_mad_done(mad_send_wr);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
/* Defined behavior is to complete response before request */ /* Defined behavior is to complete response before request */
mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
mad_recv_wc); mad_recv_wc);
atomic_dec(&mad_agent_priv->refcount); atomic_dec(&mad_agent_priv->refcount);
mad_send_wc.status = IB_WC_SUCCESS; mad_send_wc.status = IB_WC_SUCCESS;
mad_send_wc.vendor_err = 0; mad_send_wc.vendor_err = 0;
mad_send_wc.send_buf = &mad_send_wr->send_buf; mad_send_wc.send_buf = &mad_send_wr->send_buf;
ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
}
} else { } else {
mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
mad_recv_wc); mad_recv_wc);
...@@ -1911,8 +1984,8 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, ...@@ -1911,8 +1984,8 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
if (!response) { if (!response) {
printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory " dev_err(&port_priv->device->dev,
"for response buffer\n"); "ib_mad_recv_done_handler no memory for response buffer\n");
goto out; goto out;
} }
...@@ -2083,7 +2156,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, ...@@ -2083,7 +2156,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
mad_agent_priv = mad_send_wr->mad_agent_priv; mad_agent_priv = mad_send_wr->mad_agent_priv;
spin_lock_irqsave(&mad_agent_priv->lock, flags); spin_lock_irqsave(&mad_agent_priv->lock, flags);
if (mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
if (ret == IB_RMPP_RESULT_CONSUMED) if (ret == IB_RMPP_RESULT_CONSUMED)
goto done; goto done;
...@@ -2176,7 +2249,8 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, ...@@ -2176,7 +2249,8 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
&bad_send_wr); &bad_send_wr);
if (ret) { if (ret) {
printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); dev_err(&port_priv->device->dev,
"ib_post_send failed: %d\n", ret);
mad_send_wr = queued_send_wr; mad_send_wr = queued_send_wr;
wc->status = IB_WC_LOC_QP_OP_ERR; wc->status = IB_WC_LOC_QP_OP_ERR;
goto retry; goto retry;
...@@ -2248,8 +2322,9 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv, ...@@ -2248,8 +2322,9 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv,
IB_QP_STATE | IB_QP_CUR_STATE); IB_QP_STATE | IB_QP_CUR_STATE);
kfree(attr); kfree(attr);
if (ret) if (ret)
printk(KERN_ERR PFX "mad_error_handler - " dev_err(&port_priv->device->dev,
"ib_modify_qp to RTS : %d\n", ret); "mad_error_handler - ib_modify_qp to RTS : %d\n",
ret);
else else
mark_sends_for_retry(qp_info); mark_sends_for_retry(qp_info);
} }
...@@ -2408,7 +2483,8 @@ static void local_completions(struct work_struct *work) ...@@ -2408,7 +2483,8 @@ static void local_completions(struct work_struct *work)
if (local->mad_priv) { if (local->mad_priv) {
recv_mad_agent = local->recv_mad_agent; recv_mad_agent = local->recv_mad_agent;
if (!recv_mad_agent) { if (!recv_mad_agent) {
printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); dev_err(&mad_agent_priv->agent.device->dev,
"No receive MAD agent for local completion\n");
free_mad = 1; free_mad = 1;
goto local_send_completion; goto local_send_completion;
} }
...@@ -2476,7 +2552,7 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -2476,7 +2552,7 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
ret = ib_retry_rmpp(mad_send_wr); ret = ib_retry_rmpp(mad_send_wr);
switch (ret) { switch (ret) {
case IB_RMPP_RESULT_UNHANDLED: case IB_RMPP_RESULT_UNHANDLED:
...@@ -2589,7 +2665,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, ...@@ -2589,7 +2665,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
} else { } else {
mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
if (!mad_priv) { if (!mad_priv) {
printk(KERN_ERR PFX "No memory for receive buffer\n"); dev_err(&qp_info->port_priv->device->dev,
"No memory for receive buffer\n");
ret = -ENOMEM; ret = -ENOMEM;
break; break;
} }
...@@ -2625,7 +2702,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, ...@@ -2625,7 +2702,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
sizeof mad_priv->header, sizeof mad_priv->header,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
kmem_cache_free(ib_mad_cache, mad_priv); kmem_cache_free(ib_mad_cache, mad_priv);
printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); dev_err(&qp_info->port_priv->device->dev,
"ib_post_recv failed: %d\n", ret);
break; break;
} }
} while (post); } while (post);
...@@ -2681,7 +2759,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) ...@@ -2681,7 +2759,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
attr = kmalloc(sizeof *attr, GFP_KERNEL); attr = kmalloc(sizeof *attr, GFP_KERNEL);
if (!attr) { if (!attr) {
printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); dev_err(&port_priv->device->dev,
"Couldn't kmalloc ib_qp_attr\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -2705,16 +2784,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) ...@@ -2705,16 +2784,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
ret = ib_modify_qp(qp, attr, IB_QP_STATE | ret = ib_modify_qp(qp, attr, IB_QP_STATE |
IB_QP_PKEY_INDEX | IB_QP_QKEY); IB_QP_PKEY_INDEX | IB_QP_QKEY);
if (ret) { if (ret) {
printk(KERN_ERR PFX "Couldn't change QP%d state to " dev_err(&port_priv->device->dev,
"INIT: %d\n", i, ret); "Couldn't change QP%d state to INIT: %d\n",
i, ret);
goto out; goto out;
} }
attr->qp_state = IB_QPS_RTR; attr->qp_state = IB_QPS_RTR;
ret = ib_modify_qp(qp, attr, IB_QP_STATE); ret = ib_modify_qp(qp, attr, IB_QP_STATE);
if (ret) { if (ret) {
printk(KERN_ERR PFX "Couldn't change QP%d state to " dev_err(&port_priv->device->dev,
"RTR: %d\n", i, ret); "Couldn't change QP%d state to RTR: %d\n",
i, ret);
goto out; goto out;
} }
...@@ -2722,16 +2803,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) ...@@ -2722,16 +2803,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
attr->sq_psn = IB_MAD_SEND_Q_PSN; attr->sq_psn = IB_MAD_SEND_Q_PSN;
ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
if (ret) { if (ret) {
printk(KERN_ERR PFX "Couldn't change QP%d state to " dev_err(&port_priv->device->dev,
"RTS: %d\n", i, ret); "Couldn't change QP%d state to RTS: %d\n",
i, ret);
goto out; goto out;
} }
} }
ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
if (ret) { if (ret) {
printk(KERN_ERR PFX "Failed to request completion " dev_err(&port_priv->device->dev,
"notification: %d\n", ret); "Failed to request completion notification: %d\n",
ret);
goto out; goto out;
} }
...@@ -2741,7 +2824,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) ...@@ -2741,7 +2824,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
if (ret) { if (ret) {
printk(KERN_ERR PFX "Couldn't post receive WRs\n"); dev_err(&port_priv->device->dev,
"Couldn't post receive WRs\n");
goto out; goto out;
} }
} }
...@@ -2755,7 +2839,8 @@ static void qp_event_handler(struct ib_event *event, void *qp_context) ...@@ -2755,7 +2839,8 @@ static void qp_event_handler(struct ib_event *event, void *qp_context)
struct ib_mad_qp_info *qp_info = qp_context; struct ib_mad_qp_info *qp_info = qp_context;
/* It's worse than that! He's dead, Jim! */ /* It's worse than that! He's dead, Jim! */
printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n", dev_err(&qp_info->port_priv->device->dev,
"Fatal error (%d) on MAD QP (%d)\n",
event->event, qp_info->qp->qp_num); event->event, qp_info->qp->qp_num);
} }
...@@ -2801,8 +2886,9 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info, ...@@ -2801,8 +2886,9 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
qp_init_attr.event_handler = qp_event_handler; qp_init_attr.event_handler = qp_event_handler;
qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
if (IS_ERR(qp_info->qp)) { if (IS_ERR(qp_info->qp)) {
printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n", dev_err(&qp_info->port_priv->device->dev,
get_spl_qp_index(qp_type)); "Couldn't create ib_mad QP%d\n",
get_spl_qp_index(qp_type));
ret = PTR_ERR(qp_info->qp); ret = PTR_ERR(qp_info->qp);
goto error; goto error;
} }
...@@ -2840,7 +2926,7 @@ static int ib_mad_port_open(struct ib_device *device, ...@@ -2840,7 +2926,7 @@ static int ib_mad_port_open(struct ib_device *device,
/* Create new device info */ /* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
if (!port_priv) { if (!port_priv) {
printk(KERN_ERR PFX "No memory for ib_mad_port_private\n"); dev_err(&device->dev, "No memory for ib_mad_port_private\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -2860,21 +2946,21 @@ static int ib_mad_port_open(struct ib_device *device, ...@@ -2860,21 +2946,21 @@ static int ib_mad_port_open(struct ib_device *device,
ib_mad_thread_completion_handler, ib_mad_thread_completion_handler,
NULL, port_priv, cq_size, 0); NULL, port_priv, cq_size, 0);
if (IS_ERR(port_priv->cq)) { if (IS_ERR(port_priv->cq)) {
printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
ret = PTR_ERR(port_priv->cq); ret = PTR_ERR(port_priv->cq);
goto error3; goto error3;
} }
port_priv->pd = ib_alloc_pd(device); port_priv->pd = ib_alloc_pd(device);
if (IS_ERR(port_priv->pd)) { if (IS_ERR(port_priv->pd)) {
printk(KERN_ERR PFX "Couldn't create ib_mad PD\n"); dev_err(&device->dev, "Couldn't create ib_mad PD\n");
ret = PTR_ERR(port_priv->pd); ret = PTR_ERR(port_priv->pd);
goto error4; goto error4;
} }
port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(port_priv->mr)) { if (IS_ERR(port_priv->mr)) {
printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n"); dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n");
ret = PTR_ERR(port_priv->mr); ret = PTR_ERR(port_priv->mr);
goto error5; goto error5;
} }
...@@ -2902,7 +2988,7 @@ static int ib_mad_port_open(struct ib_device *device, ...@@ -2902,7 +2988,7 @@ static int ib_mad_port_open(struct ib_device *device,
ret = ib_mad_port_start(port_priv); ret = ib_mad_port_start(port_priv);
if (ret) { if (ret) {
printk(KERN_ERR PFX "Couldn't start port\n"); dev_err(&device->dev, "Couldn't start port\n");
goto error9; goto error9;
} }
...@@ -2946,7 +3032,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) ...@@ -2946,7 +3032,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
port_priv = __ib_get_mad_port(device, port_num); port_priv = __ib_get_mad_port(device, port_num);
if (port_priv == NULL) { if (port_priv == NULL) {
spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
printk(KERN_ERR PFX "Port %d not found\n", port_num); dev_err(&device->dev, "Port %d not found\n", port_num);
return -ENODEV; return -ENODEV;
} }
list_del_init(&port_priv->port_list); list_del_init(&port_priv->port_list);
...@@ -2984,14 +3070,12 @@ static void ib_mad_init_device(struct ib_device *device) ...@@ -2984,14 +3070,12 @@ static void ib_mad_init_device(struct ib_device *device)
for (i = start; i <= end; i++) { for (i = start; i <= end; i++) {
if (ib_mad_port_open(device, i)) { if (ib_mad_port_open(device, i)) {
printk(KERN_ERR PFX "Couldn't open %s port %d\n", dev_err(&device->dev, "Couldn't open port %d\n", i);
device->name, i);
goto error; goto error;
} }
if (ib_agent_port_open(device, i)) { if (ib_agent_port_open(device, i)) {
printk(KERN_ERR PFX "Couldn't open %s port %d " dev_err(&device->dev,
"for agents\n", "Couldn't open port %d for agents\n", i);
device->name, i);
goto error_agent; goto error_agent;
} }
} }
...@@ -2999,20 +3083,17 @@ static void ib_mad_init_device(struct ib_device *device) ...@@ -2999,20 +3083,17 @@ static void ib_mad_init_device(struct ib_device *device)
error_agent: error_agent:
if (ib_mad_port_close(device, i)) if (ib_mad_port_close(device, i))
printk(KERN_ERR PFX "Couldn't close %s port %d\n", dev_err(&device->dev, "Couldn't close port %d\n", i);
device->name, i);
error: error:
i--; i--;
while (i >= start) { while (i >= start) {
if (ib_agent_port_close(device, i)) if (ib_agent_port_close(device, i))
printk(KERN_ERR PFX "Couldn't close %s port %d " dev_err(&device->dev,
"for agents\n", "Couldn't close port %d for agents\n", i);
device->name, i);
if (ib_mad_port_close(device, i)) if (ib_mad_port_close(device, i))
printk(KERN_ERR PFX "Couldn't close %s port %d\n", dev_err(&device->dev, "Couldn't close port %d\n", i);
device->name, i);
i--; i--;
} }
} }
...@@ -3033,12 +3114,12 @@ static void ib_mad_remove_device(struct ib_device *device) ...@@ -3033,12 +3114,12 @@ static void ib_mad_remove_device(struct ib_device *device)
} }
for (i = 0; i < num_ports; i++, cur_port++) { for (i = 0; i < num_ports; i++, cur_port++) {
if (ib_agent_port_close(device, cur_port)) if (ib_agent_port_close(device, cur_port))
printk(KERN_ERR PFX "Couldn't close %s port %d " dev_err(&device->dev,
"for agents\n", "Couldn't close port %d for agents\n",
device->name, cur_port); cur_port);
if (ib_mad_port_close(device, cur_port)) if (ib_mad_port_close(device, cur_port))
printk(KERN_ERR PFX "Couldn't close %s port %d\n", dev_err(&device->dev, "Couldn't close port %d\n",
device->name, cur_port); cur_port);
} }
} }
...@@ -3064,7 +3145,7 @@ static int __init ib_mad_init_module(void) ...@@ -3064,7 +3145,7 @@ static int __init ib_mad_init_module(void)
SLAB_HWCACHE_ALIGN, SLAB_HWCACHE_ALIGN,
NULL); NULL);
if (!ib_mad_cache) { if (!ib_mad_cache) {
printk(KERN_ERR PFX "Couldn't create ib_mad cache\n"); pr_err("Couldn't create ib_mad cache\n");
ret = -ENOMEM; ret = -ENOMEM;
goto error1; goto error1;
} }
...@@ -3072,7 +3153,7 @@ static int __init ib_mad_init_module(void) ...@@ -3072,7 +3153,7 @@ static int __init ib_mad_init_module(void)
INIT_LIST_HEAD(&ib_mad_port_list); INIT_LIST_HEAD(&ib_mad_port_list);
if (ib_register_client(&mad_client)) { if (ib_register_client(&mad_client)) {
printk(KERN_ERR PFX "Couldn't register ib_mad client\n"); pr_err("Couldn't register ib_mad client\n");
ret = -EINVAL; ret = -EINVAL;
goto error2; goto error2;
} }
......
...@@ -42,9 +42,6 @@ ...@@ -42,9 +42,6 @@
#include <rdma/ib_mad.h> #include <rdma/ib_mad.h>
#include <rdma/ib_smi.h> #include <rdma/ib_smi.h>
#define PFX "ib_mad: "
#define IB_MAD_QPS_CORE 2 /* Always QP0 and QP1 as a minimum */ #define IB_MAD_QPS_CORE 2 /* Always QP0 and QP1 as a minimum */
/* QP and CQ parameters */ /* QP and CQ parameters */
......
...@@ -1184,7 +1184,7 @@ static void ib_sa_add_one(struct ib_device *device) ...@@ -1184,7 +1184,7 @@ static void ib_sa_add_one(struct ib_device *device)
sa_dev->port[i].agent = sa_dev->port[i].agent =
ib_register_mad_agent(device, i + s, IB_QPT_GSI, ib_register_mad_agent(device, i + s, IB_QPT_GSI,
NULL, 0, send_handler, NULL, 0, send_handler,
recv_handler, sa_dev); recv_handler, sa_dev, 0);
if (IS_ERR(sa_dev->port[i].agent)) if (IS_ERR(sa_dev->port[i].agent))
goto err; goto err;
......
...@@ -33,6 +33,8 @@ ...@@ -33,6 +33,8 @@
* SOFTWARE. * SOFTWARE.
*/ */
#define pr_fmt(fmt) "user_mad: " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/device.h> #include <linux/device.h>
...@@ -504,13 +506,15 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, ...@@ -504,13 +506,15 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) {
copy_offset = IB_MGMT_MAD_HDR; if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
rmpp_active = 0; && ib_mad_kernel_rmpp_agent(agent)) {
} else {
copy_offset = IB_MGMT_RMPP_HDR; copy_offset = IB_MGMT_RMPP_HDR;
rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE; IB_MGMT_RMPP_FLAG_ACTIVE;
} else {
copy_offset = IB_MGMT_MAD_HDR;
rmpp_active = 0;
} }
data_len = count - hdr_size(file) - hdr_len; data_len = count - hdr_size(file) - hdr_len;
...@@ -556,14 +560,22 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, ...@@ -556,14 +560,22 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
rmpp_mad->mad_hdr.tid = *tid; rmpp_mad->mad_hdr.tid = *tid;
} }
spin_lock_irq(&file->send_lock); if (!ib_mad_kernel_rmpp_agent(agent)
ret = is_duplicate(file, packet); && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
if (!ret) && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
spin_lock_irq(&file->send_lock);
list_add_tail(&packet->list, &file->send_list); list_add_tail(&packet->list, &file->send_list);
spin_unlock_irq(&file->send_lock); spin_unlock_irq(&file->send_lock);
if (ret) { } else {
ret = -EINVAL; spin_lock_irq(&file->send_lock);
goto err_msg; ret = is_duplicate(file, packet);
if (!ret)
list_add_tail(&packet->list, &file->send_list);
spin_unlock_irq(&file->send_lock);
if (ret) {
ret = -EINVAL;
goto err_msg;
}
} }
ret = ib_post_send_mad(packet->msg, NULL); ret = ib_post_send_mad(packet->msg, NULL);
...@@ -614,6 +626,8 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, ...@@ -614,6 +626,8 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
mutex_lock(&file->mutex); mutex_lock(&file->mutex);
if (!file->port->ib_dev) { if (!file->port->ib_dev) {
dev_notice(file->port->dev,
"ib_umad_reg_agent: invalid device\n");
ret = -EPIPE; ret = -EPIPE;
goto out; goto out;
} }
...@@ -624,6 +638,9 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, ...@@ -624,6 +638,9 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
} }
if (ureq.qpn != 0 && ureq.qpn != 1) { if (ureq.qpn != 0 && ureq.qpn != 1) {
dev_notice(file->port->dev,
"ib_umad_reg_agent: invalid QPN %d specified\n",
ureq.qpn);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -632,11 +649,15 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, ...@@ -632,11 +649,15 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
if (!__get_agent(file, agent_id)) if (!__get_agent(file, agent_id))
goto found; goto found;
dev_notice(file->port->dev,
"ib_umad_reg_agent: Max Agents (%u) reached\n",
IB_UMAD_MAX_AGENTS);
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
found: found:
if (ureq.mgmt_class) { if (ureq.mgmt_class) {
memset(&req, 0, sizeof(req));
req.mgmt_class = ureq.mgmt_class; req.mgmt_class = ureq.mgmt_class;
req.mgmt_class_version = ureq.mgmt_class_version; req.mgmt_class_version = ureq.mgmt_class_version;
memcpy(req.oui, ureq.oui, sizeof req.oui); memcpy(req.oui, ureq.oui, sizeof req.oui);
...@@ -657,7 +678,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, ...@@ -657,7 +678,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
ureq.mgmt_class ? &req : NULL, ureq.mgmt_class ? &req : NULL,
ureq.rmpp_version, ureq.rmpp_version,
send_handler, recv_handler, file); send_handler, recv_handler, file, 0);
if (IS_ERR(agent)) { if (IS_ERR(agent)) {
ret = PTR_ERR(agent); ret = PTR_ERR(agent);
agent = NULL; agent = NULL;
...@@ -673,10 +694,11 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, ...@@ -673,10 +694,11 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
if (!file->already_used) { if (!file->already_used) {
file->already_used = 1; file->already_used = 1;
if (!file->use_pkey_index) { if (!file->use_pkey_index) {
printk(KERN_WARNING "user_mad: process %s did not enable " dev_warn(file->port->dev,
"P_Key index support.\n", current->comm); "process %s did not enable P_Key index support.\n",
printk(KERN_WARNING "user_mad: Documentation/infiniband/user_mad.txt " current->comm);
"has info on the new ABI.\n"); dev_warn(file->port->dev,
" Documentation/infiniband/user_mad.txt has info on the new ABI.\n");
} }
} }
...@@ -694,6 +716,119 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, ...@@ -694,6 +716,119 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
return ret; return ret;
} }
static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
{
struct ib_user_mad_reg_req2 ureq;
struct ib_mad_reg_req req;
struct ib_mad_agent *agent = NULL;
int agent_id;
int ret;
mutex_lock(&file->port->file_mutex);
mutex_lock(&file->mutex);
if (!file->port->ib_dev) {
dev_notice(file->port->dev,
"ib_umad_reg_agent2: invalid device\n");
ret = -EPIPE;
goto out;
}
if (copy_from_user(&ureq, arg, sizeof(ureq))) {
ret = -EFAULT;
goto out;
}
if (ureq.qpn != 0 && ureq.qpn != 1) {
dev_notice(file->port->dev,
"ib_umad_reg_agent2: invalid QPN %d specified\n",
ureq.qpn);
ret = -EINVAL;
goto out;
}
if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) {
dev_notice(file->port->dev,
"ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n",
ureq.flags, IB_USER_MAD_REG_FLAGS_CAP);
ret = -EINVAL;
if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP,
(u32 __user *) (arg + offsetof(struct
ib_user_mad_reg_req2, flags))))
ret = -EFAULT;
goto out;
}
for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
if (!__get_agent(file, agent_id))
goto found;
dev_notice(file->port->dev,
"ib_umad_reg_agent2: Max Agents (%u) reached\n",
IB_UMAD_MAX_AGENTS);
ret = -ENOMEM;
goto out;
found:
if (ureq.mgmt_class) {
memset(&req, 0, sizeof(req));
req.mgmt_class = ureq.mgmt_class;
req.mgmt_class_version = ureq.mgmt_class_version;
if (ureq.oui & 0xff000000) {
dev_notice(file->port->dev,
"ib_umad_reg_agent2 failed: oui invalid 0x%08x\n",
ureq.oui);
ret = -EINVAL;
goto out;
}
req.oui[2] = ureq.oui & 0x0000ff;
req.oui[1] = (ureq.oui & 0x00ff00) >> 8;
req.oui[0] = (ureq.oui & 0xff0000) >> 16;
memcpy(req.method_mask, ureq.method_mask,
sizeof(req.method_mask));
}
agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
ureq.mgmt_class ? &req : NULL,
ureq.rmpp_version,
send_handler, recv_handler, file,
ureq.flags);
if (IS_ERR(agent)) {
ret = PTR_ERR(agent);
agent = NULL;
goto out;
}
if (put_user(agent_id,
(u32 __user *)(arg +
offsetof(struct ib_user_mad_reg_req2, id)))) {
ret = -EFAULT;
goto out;
}
if (!file->already_used) {
file->already_used = 1;
file->use_pkey_index = 1;
}
file->agent[agent_id] = agent;
ret = 0;
out:
mutex_unlock(&file->mutex);
if (ret && agent)
ib_unregister_mad_agent(agent);
mutex_unlock(&file->port->file_mutex);
return ret;
}
static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
{ {
struct ib_mad_agent *agent = NULL; struct ib_mad_agent *agent = NULL;
...@@ -749,6 +884,8 @@ static long ib_umad_ioctl(struct file *filp, unsigned int cmd, ...@@ -749,6 +884,8 @@ static long ib_umad_ioctl(struct file *filp, unsigned int cmd,
return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg); return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg);
case IB_USER_MAD_ENABLE_PKEY: case IB_USER_MAD_ENABLE_PKEY:
return ib_umad_enable_pkey(filp->private_data); return ib_umad_enable_pkey(filp->private_data);
case IB_USER_MAD_REGISTER_AGENT2:
return ib_umad_reg_agent2(filp->private_data, (void __user *) arg);
default: default:
return -ENOIOCTLCMD; return -ENOIOCTLCMD;
} }
...@@ -765,6 +902,8 @@ static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd, ...@@ -765,6 +902,8 @@ static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd,
return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg)); return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg));
case IB_USER_MAD_ENABLE_PKEY: case IB_USER_MAD_ENABLE_PKEY:
return ib_umad_enable_pkey(filp->private_data); return ib_umad_enable_pkey(filp->private_data);
case IB_USER_MAD_REGISTER_AGENT2:
return ib_umad_reg_agent2(filp->private_data, compat_ptr(arg));
default: default:
return -ENOIOCTLCMD; return -ENOIOCTLCMD;
} }
...@@ -983,7 +1122,7 @@ static CLASS_ATTR_STRING(abi_version, S_IRUGO, ...@@ -983,7 +1122,7 @@ static CLASS_ATTR_STRING(abi_version, S_IRUGO,
static dev_t overflow_maj; static dev_t overflow_maj;
static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS); static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS);
static int find_overflow_devnum(void) static int find_overflow_devnum(struct ib_device *device)
{ {
int ret; int ret;
...@@ -991,7 +1130,8 @@ static int find_overflow_devnum(void) ...@@ -991,7 +1130,8 @@ static int find_overflow_devnum(void)
ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2, ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2,
"infiniband_mad"); "infiniband_mad");
if (ret) { if (ret) {
printk(KERN_ERR "user_mad: couldn't register dynamic device number\n"); dev_err(&device->dev,
"couldn't register dynamic device number\n");
return ret; return ret;
} }
} }
...@@ -1014,7 +1154,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, ...@@ -1014,7 +1154,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
if (devnum >= IB_UMAD_MAX_PORTS) { if (devnum >= IB_UMAD_MAX_PORTS) {
spin_unlock(&port_lock); spin_unlock(&port_lock);
devnum = find_overflow_devnum(); devnum = find_overflow_devnum(device);
if (devnum < 0) if (devnum < 0)
return -1; return -1;
...@@ -1200,14 +1340,14 @@ static int __init ib_umad_init(void) ...@@ -1200,14 +1340,14 @@ static int __init ib_umad_init(void)
ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2, ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2,
"infiniband_mad"); "infiniband_mad");
if (ret) { if (ret) {
printk(KERN_ERR "user_mad: couldn't register device number\n"); pr_err("couldn't register device number\n");
goto out; goto out;
} }
umad_class = class_create(THIS_MODULE, "infiniband_mad"); umad_class = class_create(THIS_MODULE, "infiniband_mad");
if (IS_ERR(umad_class)) { if (IS_ERR(umad_class)) {
ret = PTR_ERR(umad_class); ret = PTR_ERR(umad_class);
printk(KERN_ERR "user_mad: couldn't create class infiniband_mad\n"); pr_err("couldn't create class infiniband_mad\n");
goto out_chrdev; goto out_chrdev;
} }
...@@ -1215,13 +1355,13 @@ static int __init ib_umad_init(void) ...@@ -1215,13 +1355,13 @@ static int __init ib_umad_init(void)
ret = class_create_file(umad_class, &class_attr_abi_version.attr); ret = class_create_file(umad_class, &class_attr_abi_version.attr);
if (ret) { if (ret) {
printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n"); pr_err("couldn't create abi_version attribute\n");
goto out_class; goto out_class;
} }
ret = ib_register_client(&umad_client); ret = ib_register_client(&umad_client);
if (ret) { if (ret) {
printk(KERN_ERR "user_mad: couldn't register ib_umad client\n"); pr_err("couldn't register ib_umad client\n");
goto out_class; goto out_class;
} }
......
...@@ -260,11 +260,14 @@ static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) ...@@ -260,11 +260,14 @@ static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
mq->msg_pool.host, dma_unmap_addr(mq, mapping)); mq->msg_pool.host, dma_unmap_addr(mq, mapping));
} }
static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq,
int msg_size) size_t q_size, size_t msg_size)
{ {
u8 *pool_start; u8 *pool_start;
if (q_size > SIZE_MAX / msg_size)
return -EINVAL;
pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size, pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
&mq->host_dma, GFP_KERNEL); &mq->host_dma, GFP_KERNEL);
if (!pool_start) if (!pool_start)
......
...@@ -182,6 +182,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) ...@@ -182,6 +182,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
chp = get_chp(dev, qid); chp = get_chp(dev, qid);
if (chp) { if (chp) {
t4_clear_cq_armed(&chp->cq);
spin_lock_irqsave(&chp->comp_handler_lock, flag); spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag); spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
......
...@@ -1066,7 +1066,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, ...@@ -1066,7 +1066,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
struct c4iw_cq *schp) struct c4iw_cq *schp)
{ {
int count; int count;
int flushed; int rq_flushed, sq_flushed;
unsigned long flag; unsigned long flag;
PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
...@@ -1084,27 +1084,40 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, ...@@ -1084,27 +1084,40 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
c4iw_flush_hw_cq(rchp); c4iw_flush_hw_cq(rchp);
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock); spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, flag); spin_unlock_irqrestore(&rchp->lock, flag);
if (flushed) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
/* locking hierarchy: cq lock first, then qp lock. */ /* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, flag); spin_lock_irqsave(&schp->lock, flag);
spin_lock(&qhp->lock); spin_lock(&qhp->lock);
if (schp != rchp) if (schp != rchp)
c4iw_flush_hw_cq(schp); c4iw_flush_hw_cq(schp);
flushed = c4iw_flush_sq(qhp); sq_flushed = c4iw_flush_sq(qhp);
spin_unlock(&qhp->lock); spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, flag); spin_unlock_irqrestore(&schp->lock, flag);
if (flushed) {
spin_lock_irqsave(&schp->comp_handler_lock, flag); if (schp == rchp) {
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); if (t4_clear_cq_armed(&rchp->cq) &&
spin_unlock_irqrestore(&schp->comp_handler_lock, flag); (rq_flushed || sq_flushed)) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
} else {
if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
spin_lock_irqsave(&schp->comp_handler_lock, flag);
(*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context);
spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
}
} }
} }
......
...@@ -531,6 +531,10 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq) ...@@ -531,6 +531,10 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq)
return !wq->rq.queue[wq->rq.size].status.db_off; return !wq->rq.queue[wq->rq.size].status.db_off;
} }
enum t4_cq_flags {
CQ_ARMED = 1,
};
struct t4_cq { struct t4_cq {
struct t4_cqe *queue; struct t4_cqe *queue;
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -551,12 +555,19 @@ struct t4_cq { ...@@ -551,12 +555,19 @@ struct t4_cq {
u16 cidx_inc; u16 cidx_inc;
u8 gen; u8 gen;
u8 error; u8 error;
unsigned long flags;
}; };
static inline int t4_clear_cq_armed(struct t4_cq *cq)
{
return test_and_clear_bit(CQ_ARMED, &cq->flags);
}
static inline int t4_arm_cq(struct t4_cq *cq, int se) static inline int t4_arm_cq(struct t4_cq *cq, int se)
{ {
u32 val; u32 val;
set_bit(CQ_ARMED, &cq->flags);
while (cq->cidx_inc > CIDXINC_MASK) { while (cq->cidx_inc > CIDXINC_MASK) {
val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
INGRESSQID(cq->cqid); INGRESSQID(cq->cqid);
......
...@@ -726,7 +726,7 @@ static int add_pkey(struct ipath_devdata *dd, u16 key) ...@@ -726,7 +726,7 @@ static int add_pkey(struct ipath_devdata *dd, u16 key)
* @dd: the infinipath device * @dd: the infinipath device
* @pkeys: the PKEY table * @pkeys: the PKEY table
*/ */
static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys, u8 port)
{ {
struct ipath_portdata *pd; struct ipath_portdata *pd;
int i; int i;
...@@ -759,6 +759,7 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) ...@@ -759,6 +759,7 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
} }
if (changed) { if (changed) {
u64 pkey; u64 pkey;
struct ib_event event;
pkey = (u64) dd->ipath_pkeys[0] | pkey = (u64) dd->ipath_pkeys[0] |
((u64) dd->ipath_pkeys[1] << 16) | ((u64) dd->ipath_pkeys[1] << 16) |
...@@ -768,12 +769,17 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) ...@@ -768,12 +769,17 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
(unsigned long long) pkey); (unsigned long long) pkey);
ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
pkey); pkey);
event.event = IB_EVENT_PKEY_CHANGE;
event.device = &dd->verbs_dev->ibdev;
event.element.port_num = port;
ib_dispatch_event(&event);
} }
return 0; return 0;
} }
static int recv_subn_set_pkeytable(struct ib_smp *smp, static int recv_subn_set_pkeytable(struct ib_smp *smp,
struct ib_device *ibdev) struct ib_device *ibdev, u8 port)
{ {
u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
__be16 *p = (__be16 *) smp->data; __be16 *p = (__be16 *) smp->data;
...@@ -784,7 +790,7 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp, ...@@ -784,7 +790,7 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
q[i] = be16_to_cpu(p[i]); q[i] = be16_to_cpu(p[i]);
if (startpx != 0 || set_pkeys(dev->dd, q) != 0) if (startpx != 0 || set_pkeys(dev->dd, q, port) != 0)
smp->status |= IB_SMP_INVALID_FIELD; smp->status |= IB_SMP_INVALID_FIELD;
return recv_subn_get_pkeytable(smp, ibdev); return recv_subn_get_pkeytable(smp, ibdev);
...@@ -1342,7 +1348,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, ...@@ -1342,7 +1348,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
ret = recv_subn_set_portinfo(smp, ibdev, port_num); ret = recv_subn_set_portinfo(smp, ibdev, port_num);
goto bail; goto bail;
case IB_SMP_ATTR_PKEY_TABLE: case IB_SMP_ATTR_PKEY_TABLE:
ret = recv_subn_set_pkeytable(smp, ibdev); ret = recv_subn_set_pkeytable(smp, ibdev, port_num);
goto bail; goto bail;
case IB_SMP_ATTR_SM_INFO: case IB_SMP_ATTR_SM_INFO:
if (dev->port_cap_flags & IB_PORT_SM_DISABLED) { if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
......
...@@ -891,7 +891,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) ...@@ -891,7 +891,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
agent = ib_register_mad_agent(&dev->ib_dev, p + 1, agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
q ? IB_QPT_GSI : IB_QPT_SMI, q ? IB_QPT_GSI : IB_QPT_SMI,
NULL, 0, send_handler, NULL, 0, send_handler,
NULL, NULL); NULL, NULL, 0);
if (IS_ERR(agent)) { if (IS_ERR(agent)) {
ret = PTR_ERR(agent); ret = PTR_ERR(agent);
goto err; goto err;
......
...@@ -910,8 +910,7 @@ static int __mlx4_ib_default_rules_match(struct ib_qp *qp, ...@@ -910,8 +910,7 @@ static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
const struct default_rules *pdefault_rules = default_table; const struct default_rules *pdefault_rules = default_table;
u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port); u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
for (i = 0; i < sizeof(default_table)/sizeof(default_table[0]); i++, for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
pdefault_rules++) {
__u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS]; __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
memset(&field_types, 0, sizeof(field_types)); memset(&field_types, 0, sizeof(field_types));
...@@ -965,8 +964,7 @@ static int __mlx4_ib_create_default_rules( ...@@ -965,8 +964,7 @@ static int __mlx4_ib_create_default_rules(
int size = 0; int size = 0;
int i; int i;
for (i = 0; i < sizeof(pdefault_rules->rules_create_list)/ for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
sizeof(pdefault_rules->rules_create_list[0]); i++) {
int ret; int ret;
union ib_flow_spec ib_spec; union ib_flow_spec ib_spec;
switch (pdefault_rules->rules_create_list[i]) { switch (pdefault_rules->rules_create_list[i]) {
......
...@@ -2501,7 +2501,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -2501,7 +2501,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags); spin_lock_irqsave(&qp->sq.lock, flags);
for (nreq = 0; wr; nreq++, wr = wr->next) { for (nreq = 0; wr; nreq++, wr = wr->next) {
if (unlikely(wr->opcode >= sizeof(mlx5_ib_opcode) / sizeof(mlx5_ib_opcode[0]))) { if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
mlx5_ib_warn(dev, "\n"); mlx5_ib_warn(dev, "\n");
err = -EINVAL; err = -EINVAL;
*bad_wr = wr; *bad_wr = wr;
......
...@@ -294,7 +294,7 @@ int mthca_create_agents(struct mthca_dev *dev) ...@@ -294,7 +294,7 @@ int mthca_create_agents(struct mthca_dev *dev)
agent = ib_register_mad_agent(&dev->ib_dev, p + 1, agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
q ? IB_QPT_GSI : IB_QPT_SMI, q ? IB_QPT_GSI : IB_QPT_SMI,
NULL, 0, send_handler, NULL, 0, send_handler,
NULL, NULL); NULL, NULL, 0);
if (IS_ERR(agent)) { if (IS_ERR(agent)) {
ret = PTR_ERR(agent); ret = PTR_ERR(agent);
goto err; goto err;
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#include <be_roce.h> #include <be_roce.h>
#include "ocrdma_sli.h" #include "ocrdma_sli.h"
#define OCRDMA_ROCE_DRV_VERSION "10.2.145.0u" #define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u"
#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
...@@ -137,6 +137,7 @@ struct mqe_ctx { ...@@ -137,6 +137,7 @@ struct mqe_ctx {
u16 cqe_status; u16 cqe_status;
u16 ext_status; u16 ext_status;
bool cmd_done; bool cmd_done;
bool fw_error_state;
}; };
struct ocrdma_hw_mr { struct ocrdma_hw_mr {
...@@ -235,7 +236,10 @@ struct ocrdma_dev { ...@@ -235,7 +236,10 @@ struct ocrdma_dev {
struct list_head entry; struct list_head entry;
struct rcu_head rcu; struct rcu_head rcu;
int id; int id;
u64 stag_arr[OCRDMA_MAX_STAG]; u64 *stag_arr;
u8 sl; /* service level */
bool pfc_state;
atomic_t update_sl;
u16 pvid; u16 pvid;
u32 asic_id; u32 asic_id;
...@@ -518,4 +522,22 @@ static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev) ...@@ -518,4 +522,22 @@ static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev)
OCRDMA_SLI_ASIC_GEN_NUM_SHIFT; OCRDMA_SLI_ASIC_GEN_NUM_SHIFT;
} }
static inline u8 ocrdma_get_pfc_prio(u8 *pfc, u8 prio)
{
return *(pfc + prio);
}
static inline u8 ocrdma_get_app_prio(u8 *app_prio, u8 prio)
{
return *(app_prio + prio);
}
static inline u8 ocrdma_is_enabled_and_synced(u32 state)
{ /* May also be used to interpret TC-state, QCN-state
* Appl-state and Logical-link-state in future.
*/
return (state & OCRDMA_STATE_FLAG_ENABLED) &&
(state & OCRDMA_STATE_FLAG_SYNC);
}
#endif #endif
...@@ -35,6 +35,8 @@ ...@@ -35,6 +35,8 @@
#include "ocrdma_ah.h" #include "ocrdma_ah.h"
#include "ocrdma_hw.h" #include "ocrdma_hw.h"
#define OCRDMA_VID_PCP_SHIFT 0xD
static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
struct ib_ah_attr *attr, int pdid) struct ib_ah_attr *attr, int pdid)
{ {
...@@ -55,7 +57,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, ...@@ -55,7 +57,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
if (vlan_tag && (vlan_tag < 0x1000)) { if (vlan_tag && (vlan_tag < 0x1000)) {
eth.eth_type = cpu_to_be16(0x8100); eth.eth_type = cpu_to_be16(0x8100);
eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
vlan_tag |= (attr->sl & 7) << 13; vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
eth.vlan_tag = cpu_to_be16(vlan_tag); eth.vlan_tag = cpu_to_be16(vlan_tag);
eth_sz = sizeof(struct ocrdma_eth_vlan); eth_sz = sizeof(struct ocrdma_eth_vlan);
vlan_enabled = true; vlan_enabled = true;
...@@ -100,6 +102,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) ...@@ -100,6 +102,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
if (!(attr->ah_flags & IB_AH_GRH)) if (!(attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (atomic_cmpxchg(&dev->update_sl, 1, 0))
ocrdma_init_service_level(dev);
ah = kzalloc(sizeof(*ah), GFP_ATOMIC); ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah) if (!ah)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
......
...@@ -525,7 +525,7 @@ static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev, ...@@ -525,7 +525,7 @@ static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS; cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
cmd->eqn = eq->id; cmd->eqn = eq->id;
cmd->cqe_count = cq->size / sizeof(struct ocrdma_mcqe); cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe);
ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE, ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
cq->dma, PAGE_SIZE_4K); cq->dma, PAGE_SIZE_4K);
...@@ -661,7 +661,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, ...@@ -661,7 +661,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
{ {
struct ocrdma_qp *qp = NULL; struct ocrdma_qp *qp = NULL;
struct ocrdma_cq *cq = NULL; struct ocrdma_cq *cq = NULL;
struct ib_event ib_evt = { 0 }; struct ib_event ib_evt;
int cq_event = 0; int cq_event = 0;
int qp_event = 1; int qp_event = 1;
int srq_event = 0; int srq_event = 0;
...@@ -674,6 +674,8 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, ...@@ -674,6 +674,8 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID)
cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK]; cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK];
memset(&ib_evt, 0, sizeof(ib_evt));
ib_evt.device = &dev->ibdev; ib_evt.device = &dev->ibdev;
switch (type) { switch (type) {
...@@ -771,6 +773,10 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev, ...@@ -771,6 +773,10 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
OCRDMA_AE_PVID_MCQE_TAG_MASK) >> OCRDMA_AE_PVID_MCQE_TAG_MASK) >>
OCRDMA_AE_PVID_MCQE_TAG_SHIFT); OCRDMA_AE_PVID_MCQE_TAG_SHIFT);
break; break;
case OCRDMA_ASYNC_EVENT_COS_VALUE:
atomic_set(&dev->update_sl, 1);
break;
default: default:
/* Not interested evts. */ /* Not interested evts. */
break; break;
...@@ -962,8 +968,12 @@ static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev) ...@@ -962,8 +968,12 @@ static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
msecs_to_jiffies(30000)); msecs_to_jiffies(30000));
if (status) if (status)
return 0; return 0;
else else {
dev->mqe_ctx.fw_error_state = true;
pr_err("%s(%d) mailbox timeout: fw not responding\n",
__func__, dev->id);
return -1; return -1;
}
} }
/* issue a mailbox command on the MQ */ /* issue a mailbox command on the MQ */
...@@ -975,6 +985,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe) ...@@ -975,6 +985,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
struct ocrdma_mbx_rsp *rsp = NULL; struct ocrdma_mbx_rsp *rsp = NULL;
mutex_lock(&dev->mqe_ctx.lock); mutex_lock(&dev->mqe_ctx.lock);
if (dev->mqe_ctx.fw_error_state)
goto mbx_err;
ocrdma_post_mqe(dev, mqe); ocrdma_post_mqe(dev, mqe);
status = ocrdma_wait_mqe_cmpl(dev); status = ocrdma_wait_mqe_cmpl(dev);
if (status) if (status)
...@@ -1078,7 +1090,8 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev, ...@@ -1078,7 +1090,8 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT; OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
attr->max_mw = rsp->max_mw; attr->max_mw = rsp->max_mw;
attr->max_mr = rsp->max_mr; attr->max_mr = rsp->max_mr;
attr->max_mr_size = ~0ull; attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) |
rsp->max_mr_size_lo;
attr->max_fmr = 0; attr->max_fmr = 0;
attr->max_pages_per_frmr = rsp->max_pages_per_frmr; attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
attr->max_num_mr_pbl = rsp->max_num_mr_pbl; attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
...@@ -1252,7 +1265,9 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev) ...@@ -1252,7 +1265,9 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va; ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs; hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
dev->hba_port_num = hba_attribs->phy_port; dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
OCRDMA_HBA_ATTRB_PTNUM_MASK)
>> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
strncpy(dev->model_number, strncpy(dev->model_number,
hba_attribs->controller_model_number, 31); hba_attribs->controller_model_number, 31);
} }
...@@ -1302,7 +1317,8 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed) ...@@ -1302,7 +1317,8 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
goto mbx_err; goto mbx_err;
rsp = (struct ocrdma_get_link_speed_rsp *)cmd; rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
*lnk_speed = rsp->phys_port_speed; *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
>> OCRDMA_PHY_PS_SHIFT;
mbx_err: mbx_err:
kfree(cmd); kfree(cmd);
...@@ -1328,11 +1344,16 @@ static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev) ...@@ -1328,11 +1344,16 @@ static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
goto mbx_err; goto mbx_err;
rsp = (struct ocrdma_get_phy_info_rsp *)cmd; rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
dev->phy.phy_type = le16_to_cpu(rsp->phy_type); dev->phy.phy_type =
(rsp->ityp_ptyp & OCRDMA_PHY_TYPE_MASK);
dev->phy.interface_type =
(rsp->ityp_ptyp & OCRDMA_IF_TYPE_MASK)
>> OCRDMA_IF_TYPE_SHIFT;
dev->phy.auto_speeds_supported = dev->phy.auto_speeds_supported =
le16_to_cpu(rsp->auto_speeds_supported); (rsp->fspeed_aspeed & OCRDMA_ASPEED_SUPP_MASK);
dev->phy.fixed_speeds_supported = dev->phy.fixed_speeds_supported =
le16_to_cpu(rsp->fixed_speeds_supported); (rsp->fspeed_aspeed & OCRDMA_FSPEED_SUPP_MASK)
>> OCRDMA_FSPEED_SUPP_SHIFT;
mbx_err: mbx_err:
kfree(cmd); kfree(cmd);
return status; return status;
...@@ -1457,8 +1478,8 @@ static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev) ...@@ -1457,8 +1478,8 @@ static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va; pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) { for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
pbes[i].pa_lo = (u32) (pa & 0xffffffff); pbes[i].pa_lo = (u32)cpu_to_le32(pa & 0xffffffff);
pbes[i].pa_hi = (u32) upper_32_bits(pa); pbes[i].pa_hi = (u32)cpu_to_le32(upper_32_bits(pa));
pa += PAGE_SIZE; pa += PAGE_SIZE;
} }
cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF); cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
...@@ -1501,6 +1522,7 @@ static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev) ...@@ -1501,6 +1522,7 @@ static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va, dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
dev->av_tbl.pa); dev->av_tbl.pa);
dev->av_tbl.va = NULL;
dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va, dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
dev->av_tbl.pbl.pa); dev->av_tbl.pbl.pa);
kfree(cmd); kfree(cmd);
...@@ -1624,14 +1646,16 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, ...@@ -1624,14 +1646,16 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP << cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
OCRDMA_CREATE_CQ_TYPE_SHIFT; OCRDMA_CREATE_CQ_TYPE_SHIFT;
cq->phase_change = false; cq->phase_change = false;
cmd->cmd.cqe_count = (cq->len / cqe_size); cmd->cmd.pdid_cqecnt = (cq->len / cqe_size);
} else { } else {
cmd->cmd.cqe_count = (cq->len / cqe_size) - 1; cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1;
cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID; cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
cq->phase_change = true; cq->phase_change = true;
} }
cmd->cmd.pd_id = pd_id; /* valid only for v3 */ /* pd_id valid only for v3 */
cmd->cmd.pdid_cqecnt |= (pd_id <<
OCRDMA_CREATE_CQ_CMD_PDID_SHIFT);
ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size); ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status) if (status)
...@@ -2206,7 +2230,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs, ...@@ -2206,7 +2230,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK; OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
qp->rq_cq = cq; qp->rq_cq = cq;
if (pd->dpp_enabled && pd->num_dpp_qp) { if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
(attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq, ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
dpp_cq_id); dpp_cq_id);
} }
...@@ -2264,6 +2289,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, ...@@ -2264,6 +2289,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
if ((ah_attr->ah_flags & IB_AH_GRH) == 0) if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
return -EINVAL; return -EINVAL;
if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0))
ocrdma_init_service_level(qp->dev);
cmd->params.tclass_sq_psn |= cmd->params.tclass_sq_psn |=
(ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
cmd->params.rnt_rc_sl_fl |= cmd->params.rnt_rc_sl_fl |=
...@@ -2297,6 +2324,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, ...@@ -2297,6 +2324,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
cmd->params.vlan_dmac_b4_to_b5 |= cmd->params.vlan_dmac_b4_to_b5 |=
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
cmd->params.rnt_rc_sl_fl |=
(qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
} }
return 0; return 0;
} }
...@@ -2604,6 +2633,168 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq) ...@@ -2604,6 +2633,168 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
return status; return status;
} }
static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
struct ocrdma_dcbx_cfg *dcbxcfg)
{
int status = 0;
dma_addr_t pa;
struct ocrdma_mqe cmd;
struct ocrdma_get_dcbx_cfg_req *req = NULL;
struct ocrdma_get_dcbx_cfg_rsp *rsp = NULL;
struct pci_dev *pdev = dev->nic_info.pdev;
struct ocrdma_mqe_sge *mqe_sge = cmd.u.nonemb_req.sge;
memset(&cmd, 0, sizeof(struct ocrdma_mqe));
cmd.hdr.pyld_len = max_t (u32, sizeof(struct ocrdma_get_dcbx_cfg_rsp),
sizeof(struct ocrdma_get_dcbx_cfg_req));
req = dma_alloc_coherent(&pdev->dev, cmd.hdr.pyld_len, &pa, GFP_KERNEL);
if (!req) {
status = -ENOMEM;
goto mem_err;
}
cmd.hdr.spcl_sge_cnt_emb |= (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
OCRDMA_MQE_HDR_SGE_CNT_MASK;
mqe_sge->pa_lo = (u32) (pa & 0xFFFFFFFFUL);
mqe_sge->pa_hi = (u32) upper_32_bits(pa);
mqe_sge->len = cmd.hdr.pyld_len;
memset(req, 0, sizeof(struct ocrdma_get_dcbx_cfg_req));
ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG,
OCRDMA_SUBSYS_DCBX, cmd.hdr.pyld_len);
req->param_type = ptype;
status = ocrdma_mbx_cmd(dev, &cmd);
if (status)
goto mbx_err;
rsp = (struct ocrdma_get_dcbx_cfg_rsp *)req;
ocrdma_le32_to_cpu(rsp, sizeof(struct ocrdma_get_dcbx_cfg_rsp));
memcpy(dcbxcfg, &rsp->cfg, sizeof(struct ocrdma_dcbx_cfg));
mbx_err:
dma_free_coherent(&pdev->dev, cmd.hdr.pyld_len, req, pa);
mem_err:
return status;
}
#define OCRDMA_MAX_SERVICE_LEVEL_INDEX 0x08
#define OCRDMA_DEFAULT_SERVICE_LEVEL 0x05
static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype,
struct ocrdma_dcbx_cfg *dcbxcfg,
u8 *srvc_lvl)
{
int status = -EINVAL, indx, slindx;
int ventry_cnt;
struct ocrdma_app_parameter *app_param;
u8 valid, proto_sel;
u8 app_prio, pfc_prio;
u16 proto;
if (!(dcbxcfg->tcv_aev_opv_st & OCRDMA_DCBX_STATE_MASK)) {
pr_info("%s ocrdma%d DCBX is disabled\n",
dev_name(&dev->nic_info.pdev->dev), dev->id);
goto out;
}
if (!ocrdma_is_enabled_and_synced(dcbxcfg->pfc_state)) {
pr_info("%s ocrdma%d priority flow control(%s) is %s%s\n",
dev_name(&dev->nic_info.pdev->dev), dev->id,
(ptype > 0 ? "operational" : "admin"),
(dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_ENABLED) ?
"enabled" : "disabled",
(dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_SYNC) ?
"" : ", not sync'ed");
goto out;
} else {
pr_info("%s ocrdma%d priority flow control is enabled and sync'ed\n",
dev_name(&dev->nic_info.pdev->dev), dev->id);
}
ventry_cnt = (dcbxcfg->tcv_aev_opv_st >>
OCRDMA_DCBX_APP_ENTRY_SHIFT)
& OCRDMA_DCBX_STATE_MASK;
for (indx = 0; indx < ventry_cnt; indx++) {
app_param = &dcbxcfg->app_param[indx];
valid = (app_param->valid_proto_app >>
OCRDMA_APP_PARAM_VALID_SHIFT)
& OCRDMA_APP_PARAM_VALID_MASK;
proto_sel = (app_param->valid_proto_app
>> OCRDMA_APP_PARAM_PROTO_SEL_SHIFT)
& OCRDMA_APP_PARAM_PROTO_SEL_MASK;
proto = app_param->valid_proto_app &
OCRDMA_APP_PARAM_APP_PROTO_MASK;
if (
valid && proto == OCRDMA_APP_PROTO_ROCE &&
proto_sel == OCRDMA_PROTO_SELECT_L2) {
for (slindx = 0; slindx <
OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) {
app_prio = ocrdma_get_app_prio(
(u8 *)app_param->app_prio,
slindx);
pfc_prio = ocrdma_get_pfc_prio(
(u8 *)dcbxcfg->pfc_prio,
slindx);
if (app_prio && pfc_prio) {
*srvc_lvl = slindx;
status = 0;
goto out;
}
}
if (slindx == OCRDMA_MAX_SERVICE_LEVEL_INDEX) {
pr_info("%s ocrdma%d application priority not set for 0x%x protocol\n",
dev_name(&dev->nic_info.pdev->dev),
dev->id, proto);
}
}
}
out:
return status;
}
void ocrdma_init_service_level(struct ocrdma_dev *dev)
{
int status = 0, indx;
struct ocrdma_dcbx_cfg dcbxcfg;
u8 srvc_lvl = OCRDMA_DEFAULT_SERVICE_LEVEL;
int ptype = OCRDMA_PARAMETER_TYPE_OPER;
for (indx = 0; indx < 2; indx++) {
status = ocrdma_mbx_get_dcbx_config(dev, ptype, &dcbxcfg);
if (status) {
pr_err("%s(): status=%d\n", __func__, status);
ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
continue;
}
status = ocrdma_parse_dcbxcfg_rsp(dev, ptype,
&dcbxcfg, &srvc_lvl);
if (status) {
ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
continue;
}
break;
}
if (status)
pr_info("%s ocrdma%d service level default\n",
dev_name(&dev->nic_info.pdev->dev), dev->id);
else
pr_info("%s ocrdma%d service level %d\n",
dev_name(&dev->nic_info.pdev->dev), dev->id,
srvc_lvl);
dev->pfc_state = ocrdma_is_enabled_and_synced(dcbxcfg.pfc_state);
dev->sl = srvc_lvl;
}
int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah) int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
{ {
int i; int i;
...@@ -2709,13 +2900,15 @@ int ocrdma_init_hw(struct ocrdma_dev *dev) ...@@ -2709,13 +2900,15 @@ int ocrdma_init_hw(struct ocrdma_dev *dev)
goto conf_err; goto conf_err;
status = ocrdma_mbx_get_phy_info(dev); status = ocrdma_mbx_get_phy_info(dev);
if (status) if (status)
goto conf_err; goto info_attrb_err;
status = ocrdma_mbx_get_ctrl_attribs(dev); status = ocrdma_mbx_get_ctrl_attribs(dev);
if (status) if (status)
goto conf_err; goto info_attrb_err;
return 0; return 0;
info_attrb_err:
ocrdma_mbx_delete_ah_tbl(dev);
conf_err: conf_err:
ocrdma_destroy_mq(dev); ocrdma_destroy_mq(dev);
mq_err: mq_err:
......
...@@ -135,4 +135,6 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq); ...@@ -135,4 +135,6 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset); int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
char *port_speed_string(struct ocrdma_dev *dev); char *port_speed_string(struct ocrdma_dev *dev);
void ocrdma_init_service_level(struct ocrdma_dev *);
#endif /* __OCRDMA_HW_H__ */ #endif /* __OCRDMA_HW_H__ */
...@@ -324,6 +324,11 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev) ...@@ -324,6 +324,11 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
if (!dev->qp_tbl) if (!dev->qp_tbl)
goto alloc_err; goto alloc_err;
} }
dev->stag_arr = kzalloc(sizeof(u64) * OCRDMA_MAX_STAG, GFP_KERNEL);
if (dev->stag_arr == NULL)
goto alloc_err;
spin_lock_init(&dev->av_tbl.lock); spin_lock_init(&dev->av_tbl.lock);
spin_lock_init(&dev->flush_q_lock); spin_lock_init(&dev->flush_q_lock);
return 0; return 0;
...@@ -334,6 +339,7 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev) ...@@ -334,6 +339,7 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
static void ocrdma_free_resources(struct ocrdma_dev *dev) static void ocrdma_free_resources(struct ocrdma_dev *dev)
{ {
kfree(dev->stag_arr);
kfree(dev->qp_tbl); kfree(dev->qp_tbl);
kfree(dev->cq_tbl); kfree(dev->cq_tbl);
kfree(dev->sgid_tbl); kfree(dev->sgid_tbl);
...@@ -353,15 +359,25 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, ...@@ -353,15 +359,25 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
{ {
struct ocrdma_dev *dev = dev_get_drvdata(device); struct ocrdma_dev *dev = dev_get_drvdata(device);
return scnprintf(buf, PAGE_SIZE, "%s", &dev->attr.fw_ver[0]); return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->attr.fw_ver[0]);
}
static ssize_t show_hca_type(struct device *device,
struct device_attribute *attr, char *buf)
{
struct ocrdma_dev *dev = dev_get_drvdata(device);
return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]);
} }
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
static struct device_attribute *ocrdma_attributes[] = { static struct device_attribute *ocrdma_attributes[] = {
&dev_attr_hw_rev, &dev_attr_hw_rev,
&dev_attr_fw_ver &dev_attr_fw_ver,
&dev_attr_hca_type
}; };
static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev) static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
...@@ -372,6 +388,58 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev) ...@@ -372,6 +388,58 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]); device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]);
} }
static void ocrdma_init_ipv4_gids(struct ocrdma_dev *dev,
struct net_device *net)
{
struct in_device *in_dev;
union ib_gid gid;
in_dev = in_dev_get(net);
if (in_dev) {
for_ifa(in_dev) {
ipv6_addr_set_v4mapped(ifa->ifa_address,
(struct in6_addr *)&gid);
ocrdma_add_sgid(dev, &gid);
}
endfor_ifa(in_dev);
in_dev_put(in_dev);
}
}
static void ocrdma_init_ipv6_gids(struct ocrdma_dev *dev,
struct net_device *net)
{
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_dev *in6_dev;
union ib_gid *pgid;
struct inet6_ifaddr *ifp;
in6_dev = in6_dev_get(net);
if (in6_dev) {
read_lock_bh(&in6_dev->lock);
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
pgid = (union ib_gid *)&ifp->addr;
ocrdma_add_sgid(dev, pgid);
}
read_unlock_bh(&in6_dev->lock);
in6_dev_put(in6_dev);
}
#endif
}
static void ocrdma_init_gid_table(struct ocrdma_dev *dev)
{
struct net_device *net_dev;
for_each_netdev(&init_net, net_dev) {
struct net_device *real_dev = rdma_vlan_dev_real_dev(net_dev) ?
rdma_vlan_dev_real_dev(net_dev) : net_dev;
if (real_dev == dev->nic_info.netdev) {
ocrdma_init_ipv4_gids(dev, net_dev);
ocrdma_init_ipv6_gids(dev, net_dev);
}
}
}
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
{ {
int status = 0, i; int status = 0, i;
...@@ -399,6 +467,8 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) ...@@ -399,6 +467,8 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (status) if (status)
goto alloc_err; goto alloc_err;
ocrdma_init_service_level(dev);
ocrdma_init_gid_table(dev);
status = ocrdma_register_device(dev); status = ocrdma_register_device(dev);
if (status) if (status)
goto alloc_err; goto alloc_err;
...@@ -508,6 +578,12 @@ static int ocrdma_close(struct ocrdma_dev *dev) ...@@ -508,6 +578,12 @@ static int ocrdma_close(struct ocrdma_dev *dev)
return 0; return 0;
} }
static void ocrdma_shutdown(struct ocrdma_dev *dev)
{
ocrdma_close(dev);
ocrdma_remove(dev);
}
/* event handling via NIC driver ensures that all the NIC specific /* event handling via NIC driver ensures that all the NIC specific
* initialization done before RoCE driver notifies * initialization done before RoCE driver notifies
* event to stack. * event to stack.
...@@ -521,6 +597,9 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event) ...@@ -521,6 +597,9 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
case BE_DEV_DOWN: case BE_DEV_DOWN:
ocrdma_close(dev); ocrdma_close(dev);
break; break;
case BE_DEV_SHUTDOWN:
ocrdma_shutdown(dev);
break;
} }
} }
......
...@@ -44,35 +44,39 @@ enum { ...@@ -44,35 +44,39 @@ enum {
#define OCRDMA_SUBSYS_ROCE 10 #define OCRDMA_SUBSYS_ROCE 10
enum { enum {
OCRDMA_CMD_QUERY_CONFIG = 1, OCRDMA_CMD_QUERY_CONFIG = 1,
OCRDMA_CMD_ALLOC_PD, OCRDMA_CMD_ALLOC_PD = 2,
OCRDMA_CMD_DEALLOC_PD, OCRDMA_CMD_DEALLOC_PD = 3,
OCRDMA_CMD_CREATE_AH_TBL, OCRDMA_CMD_CREATE_AH_TBL = 4,
OCRDMA_CMD_DELETE_AH_TBL, OCRDMA_CMD_DELETE_AH_TBL = 5,
OCRDMA_CMD_CREATE_QP, OCRDMA_CMD_CREATE_QP = 6,
OCRDMA_CMD_QUERY_QP, OCRDMA_CMD_QUERY_QP = 7,
OCRDMA_CMD_MODIFY_QP, OCRDMA_CMD_MODIFY_QP = 8 ,
OCRDMA_CMD_DELETE_QP, OCRDMA_CMD_DELETE_QP = 9,
OCRDMA_CMD_RSVD1, OCRDMA_CMD_RSVD1 = 10,
OCRDMA_CMD_ALLOC_LKEY, OCRDMA_CMD_ALLOC_LKEY = 11,
OCRDMA_CMD_DEALLOC_LKEY, OCRDMA_CMD_DEALLOC_LKEY = 12,
OCRDMA_CMD_REGISTER_NSMR, OCRDMA_CMD_REGISTER_NSMR = 13,
OCRDMA_CMD_REREGISTER_NSMR, OCRDMA_CMD_REREGISTER_NSMR = 14,
OCRDMA_CMD_REGISTER_NSMR_CONT, OCRDMA_CMD_REGISTER_NSMR_CONT = 15,
OCRDMA_CMD_QUERY_NSMR, OCRDMA_CMD_QUERY_NSMR = 16,
OCRDMA_CMD_ALLOC_MW, OCRDMA_CMD_ALLOC_MW = 17,
OCRDMA_CMD_QUERY_MW, OCRDMA_CMD_QUERY_MW = 18,
OCRDMA_CMD_CREATE_SRQ, OCRDMA_CMD_CREATE_SRQ = 19,
OCRDMA_CMD_QUERY_SRQ, OCRDMA_CMD_QUERY_SRQ = 20,
OCRDMA_CMD_MODIFY_SRQ, OCRDMA_CMD_MODIFY_SRQ = 21,
OCRDMA_CMD_DELETE_SRQ, OCRDMA_CMD_DELETE_SRQ = 22,
OCRDMA_CMD_ATTACH_MCAST, OCRDMA_CMD_ATTACH_MCAST = 23,
OCRDMA_CMD_DETACH_MCAST, OCRDMA_CMD_DETACH_MCAST = 24,
OCRDMA_CMD_GET_RDMA_STATS,
OCRDMA_CMD_CREATE_RBQ = 25,
OCRDMA_CMD_DESTROY_RBQ = 26,
OCRDMA_CMD_GET_RDMA_STATS = 27,
OCRDMA_CMD_MAX OCRDMA_CMD_MAX
}; };
...@@ -103,7 +107,7 @@ enum { ...@@ -103,7 +107,7 @@ enum {
#define OCRDMA_MAX_QP 2048 #define OCRDMA_MAX_QP 2048
#define OCRDMA_MAX_CQ 2048 #define OCRDMA_MAX_CQ 2048
#define OCRDMA_MAX_STAG 8192 #define OCRDMA_MAX_STAG 16384
enum { enum {
OCRDMA_DB_RQ_OFFSET = 0xE0, OCRDMA_DB_RQ_OFFSET = 0xE0,
...@@ -422,7 +426,12 @@ struct ocrdma_ae_qp_mcqe { ...@@ -422,7 +426,12 @@ struct ocrdma_ae_qp_mcqe {
#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14 #define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14
#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5 #define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5
#define OCRDMA_ASYNC_EVENT_PVID_STATE 0x3
enum ocrdma_async_grp5_events {
OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01,
OCRDMA_ASYNC_EVENT_COS_VALUE = 0x02,
OCRDMA_ASYNC_EVENT_PVID_STATE = 0x03
};
enum OCRDMA_ASYNC_EVENT_TYPE { enum OCRDMA_ASYNC_EVENT_TYPE {
OCRDMA_CQ_ERROR = 0x00, OCRDMA_CQ_ERROR = 0x00,
...@@ -525,8 +534,8 @@ struct ocrdma_mbx_query_config { ...@@ -525,8 +534,8 @@ struct ocrdma_mbx_query_config {
u32 max_ird_ord_per_qp; u32 max_ird_ord_per_qp;
u32 max_shared_ird_ord; u32 max_shared_ird_ord;
u32 max_mr; u32 max_mr;
u32 max_mr_size_lo;
u32 max_mr_size_hi; u32 max_mr_size_hi;
u32 max_mr_size_lo;
u32 max_num_mr_pbl; u32 max_num_mr_pbl;
u32 max_mw; u32 max_mw;
u32 max_fmr; u32 max_fmr;
...@@ -580,17 +589,26 @@ enum { ...@@ -580,17 +589,26 @@ enum {
OCRDMA_FN_MODE_RDMA = 0x4 OCRDMA_FN_MODE_RDMA = 0x4
}; };
enum {
OCRDMA_IF_TYPE_MASK = 0xFFFF0000,
OCRDMA_IF_TYPE_SHIFT = 0x10,
OCRDMA_PHY_TYPE_MASK = 0x0000FFFF,
OCRDMA_FUTURE_DETAILS_MASK = 0xFFFF0000,
OCRDMA_FUTURE_DETAILS_SHIFT = 0x10,
OCRDMA_EX_PHY_DETAILS_MASK = 0x0000FFFF,
OCRDMA_FSPEED_SUPP_MASK = 0xFFFF0000,
OCRDMA_FSPEED_SUPP_SHIFT = 0x10,
OCRDMA_ASPEED_SUPP_MASK = 0x0000FFFF
};
struct ocrdma_get_phy_info_rsp { struct ocrdma_get_phy_info_rsp {
struct ocrdma_mqe_hdr hdr; struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp; struct ocrdma_mbx_rsp rsp;
u16 phy_type; u32 ityp_ptyp;
u16 interface_type;
u32 misc_params; u32 misc_params;
u16 ext_phy_details; u32 ftrdtl_exphydtl;
u16 rsvd; u32 fspeed_aspeed;
u16 auto_speeds_supported;
u16 fixed_speeds_supported;
u32 future_use[2]; u32 future_use[2];
}; };
...@@ -603,19 +621,34 @@ enum { ...@@ -603,19 +621,34 @@ enum {
OCRDMA_PHY_SPEED_40GBPS = 0x20 OCRDMA_PHY_SPEED_40GBPS = 0x20
}; };
enum {
OCRDMA_PORT_NUM_MASK = 0x3F,
OCRDMA_PT_MASK = 0xC0,
OCRDMA_PT_SHIFT = 0x6,
OCRDMA_LINK_DUP_MASK = 0x0000FF00,
OCRDMA_LINK_DUP_SHIFT = 0x8,
OCRDMA_PHY_PS_MASK = 0x00FF0000,
OCRDMA_PHY_PS_SHIFT = 0x10,
OCRDMA_PHY_PFLT_MASK = 0xFF000000,
OCRDMA_PHY_PFLT_SHIFT = 0x18,
OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000,
OCRDMA_QOS_LNKSP_SHIFT = 0x10,
OCRDMA_LLST_MASK = 0xFF,
OCRDMA_PLFC_MASK = 0x00000400,
OCRDMA_PLFC_SHIFT = 0x8,
OCRDMA_PLRFC_MASK = 0x00000200,
OCRDMA_PLRFC_SHIFT = 0x8,
OCRDMA_PLTFC_MASK = 0x00000100,
OCRDMA_PLTFC_SHIFT = 0x8
};
struct ocrdma_get_link_speed_rsp { struct ocrdma_get_link_speed_rsp {
struct ocrdma_mqe_hdr hdr; struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp; struct ocrdma_mbx_rsp rsp;
u8 pt_port_num; u32 pflt_pps_ld_pnum;
u8 link_duplex; u32 qos_lsp;
u8 phys_port_speed; u32 res_lls;
u8 phys_port_fault;
u16 rsvd1;
u16 qos_lnk_speed;
u8 logical_lnk_status;
u8 rsvd2[3];
}; };
enum { enum {
...@@ -666,8 +699,7 @@ struct ocrdma_create_cq_cmd { ...@@ -666,8 +699,7 @@ struct ocrdma_create_cq_cmd {
u32 pgsz_pgcnt; u32 pgsz_pgcnt;
u32 ev_cnt_flags; u32 ev_cnt_flags;
u32 eqn; u32 eqn;
u16 cqe_count; u32 pdid_cqecnt;
u16 pd_id;
u32 rsvd6; u32 rsvd6;
struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES]; struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES];
}; };
...@@ -677,6 +709,10 @@ struct ocrdma_create_cq { ...@@ -677,6 +709,10 @@ struct ocrdma_create_cq {
struct ocrdma_create_cq_cmd cmd; struct ocrdma_create_cq_cmd cmd;
}; };
enum {
OCRDMA_CREATE_CQ_CMD_PDID_SHIFT = 0x10
};
enum { enum {
OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF
}; };
...@@ -1231,7 +1267,6 @@ struct ocrdma_destroy_srq { ...@@ -1231,7 +1267,6 @@ struct ocrdma_destroy_srq {
enum { enum {
OCRDMA_ALLOC_PD_ENABLE_DPP = BIT(16), OCRDMA_ALLOC_PD_ENABLE_DPP = BIT(16),
OCRDMA_PD_MAX_DPP_ENABLED_QP = 8,
OCRDMA_DPP_PAGE_SIZE = 4096 OCRDMA_DPP_PAGE_SIZE = 4096
}; };
...@@ -1896,12 +1931,62 @@ struct ocrdma_rdma_stats_resp { ...@@ -1896,12 +1931,62 @@ struct ocrdma_rdma_stats_resp {
struct ocrdma_rx_dbg_stats rx_dbg_stats; struct ocrdma_rx_dbg_stats rx_dbg_stats;
} __packed; } __packed;
enum {
OCRDMA_HBA_ATTRB_EPROM_VER_LO_MASK = 0xFF,
OCRDMA_HBA_ATTRB_EPROM_VER_HI_MASK = 0xFF00,
OCRDMA_HBA_ATTRB_EPROM_VER_HI_SHIFT = 0x08,
OCRDMA_HBA_ATTRB_CDBLEN_MASK = 0xFFFF,
OCRDMA_HBA_ATTRB_ASIC_REV_MASK = 0xFF0000,
OCRDMA_HBA_ATTRB_ASIC_REV_SHIFT = 0x10,
OCRDMA_HBA_ATTRB_GUID0_MASK = 0xFF000000,
OCRDMA_HBA_ATTRB_GUID0_SHIFT = 0x18,
OCRDMA_HBA_ATTRB_GUID13_MASK = 0xFF,
OCRDMA_HBA_ATTRB_GUID14_MASK = 0xFF00,
OCRDMA_HBA_ATTRB_GUID14_SHIFT = 0x08,
OCRDMA_HBA_ATTRB_GUID15_MASK = 0xFF0000,
OCRDMA_HBA_ATTRB_GUID15_SHIFT = 0x10,
OCRDMA_HBA_ATTRB_PCNT_MASK = 0xFF000000,
OCRDMA_HBA_ATTRB_PCNT_SHIFT = 0x18,
OCRDMA_HBA_ATTRB_LDTOUT_MASK = 0xFFFF,
OCRDMA_HBA_ATTRB_ISCSI_VER_MASK = 0xFF0000,
OCRDMA_HBA_ATTRB_ISCSI_VER_SHIFT = 0x10,
OCRDMA_HBA_ATTRB_MFUNC_DEV_MASK = 0xFF000000,
OCRDMA_HBA_ATTRB_MFUNC_DEV_SHIFT = 0x18,
OCRDMA_HBA_ATTRB_CV_MASK = 0xFF,
OCRDMA_HBA_ATTRB_HBA_ST_MASK = 0xFF00,
OCRDMA_HBA_ATTRB_HBA_ST_SHIFT = 0x08,
OCRDMA_HBA_ATTRB_MAX_DOMS_MASK = 0xFF0000,
OCRDMA_HBA_ATTRB_MAX_DOMS_SHIFT = 0x10,
OCRDMA_HBA_ATTRB_PTNUM_MASK = 0x3F000000,
OCRDMA_HBA_ATTRB_PTNUM_SHIFT = 0x18,
OCRDMA_HBA_ATTRB_PT_MASK = 0xC0000000,
OCRDMA_HBA_ATTRB_PT_SHIFT = 0x1E,
OCRDMA_HBA_ATTRB_ISCSI_FET_MASK = 0xFF,
OCRDMA_HBA_ATTRB_ASIC_GEN_MASK = 0xFF00,
OCRDMA_HBA_ATTRB_ASIC_GEN_SHIFT = 0x08,
OCRDMA_HBA_ATTRB_PCI_VID_MASK = 0xFFFF,
OCRDMA_HBA_ATTRB_PCI_DID_MASK = 0xFFFF0000,
OCRDMA_HBA_ATTRB_PCI_DID_SHIFT = 0x10,
OCRDMA_HBA_ATTRB_PCI_SVID_MASK = 0xFFFF,
OCRDMA_HBA_ATTRB_PCI_SSID_MASK = 0xFFFF0000,
OCRDMA_HBA_ATTRB_PCI_SSID_SHIFT = 0x10,
OCRDMA_HBA_ATTRB_PCI_BUSNUM_MASK = 0xFF,
OCRDMA_HBA_ATTRB_PCI_DEVNUM_MASK = 0xFF00,
OCRDMA_HBA_ATTRB_PCI_DEVNUM_SHIFT = 0x08,
OCRDMA_HBA_ATTRB_PCI_FUNCNUM_MASK = 0xFF0000,
OCRDMA_HBA_ATTRB_PCI_FUNCNUM_SHIFT = 0x10,
OCRDMA_HBA_ATTRB_IF_TYPE_MASK = 0xFF000000,
OCRDMA_HBA_ATTRB_IF_TYPE_SHIFT = 0x18,
OCRDMA_HBA_ATTRB_NETFIL_MASK =0xFF
};
struct mgmt_hba_attribs { struct mgmt_hba_attribs {
u8 flashrom_version_string[32]; u8 flashrom_version_string[32];
u8 manufacturer_name[32]; u8 manufacturer_name[32];
u32 supported_modes; u32 supported_modes;
u32 rsvd0[3]; u32 rsvd_eprom_verhi_verlo;
u32 mbx_ds_ver;
u32 epfw_ds_ver;
u8 ncsi_ver_string[12]; u8 ncsi_ver_string[12];
u32 default_extended_timeout; u32 default_extended_timeout;
u8 controller_model_number[32]; u8 controller_model_number[32];
...@@ -1914,34 +1999,26 @@ struct mgmt_hba_attribs { ...@@ -1914,34 +1999,26 @@ struct mgmt_hba_attribs {
u8 driver_version_string[32]; u8 driver_version_string[32];
u8 fw_on_flash_version_string[32]; u8 fw_on_flash_version_string[32];
u32 functionalities_supported; u32 functionalities_supported;
u16 max_cdblength; u32 guid0_asicrev_cdblen;
u8 asic_revision; u8 generational_guid[12];
u8 generational_guid[16]; u32 portcnt_guid15;
u8 hba_port_count; u32 mfuncdev_iscsi_ldtout;
u16 default_link_down_timeout; u32 ptpnum_maxdoms_hbast_cv;
u8 iscsi_ver_min_max;
u8 multifunction_device;
u8 cache_valid;
u8 hba_status;
u8 max_domains_supported;
u8 phy_port;
u32 firmware_post_status; u32 firmware_post_status;
u32 hba_mtu[8]; u32 hba_mtu[8];
u32 rsvd1[4]; u32 res_asicgen_iscsi_feaures;
u32 rsvd1[3];
}; };
struct mgmt_controller_attrib { struct mgmt_controller_attrib {
struct mgmt_hba_attribs hba_attribs; struct mgmt_hba_attribs hba_attribs;
u16 pci_vendor_id; u32 pci_did_vid;
u16 pci_device_id; u32 pci_ssid_svid;
u16 pci_sub_vendor_id; u32 ityp_fnum_devnum_bnum;
u16 pci_sub_system_id; u32 uid_hi;
u8 pci_bus_number; u32 uid_lo;
u8 pci_device_number; u32 res_nnetfil;
u8 pci_function_number; u32 rsvd0[4];
u8 interface_type;
u64 unique_identifier;
u32 rsvd0[5];
}; };
struct ocrdma_get_ctrl_attribs_rsp { struct ocrdma_get_ctrl_attribs_rsp {
...@@ -1949,5 +2026,79 @@ struct ocrdma_get_ctrl_attribs_rsp { ...@@ -1949,5 +2026,79 @@ struct ocrdma_get_ctrl_attribs_rsp {
struct mgmt_controller_attrib ctrl_attribs; struct mgmt_controller_attrib ctrl_attribs;
}; };
#define OCRDMA_SUBSYS_DCBX 0x10
enum OCRDMA_DCBX_OPCODE {
OCRDMA_CMD_GET_DCBX_CONFIG = 0x01
};
enum OCRDMA_DCBX_PARAM_TYPE {
OCRDMA_PARAMETER_TYPE_ADMIN = 0x00,
OCRDMA_PARAMETER_TYPE_OPER = 0x01,
OCRDMA_PARAMETER_TYPE_PEER = 0x02
};
enum OCRDMA_DCBX_APP_PROTO {
OCRDMA_APP_PROTO_ROCE = 0x8915
};
enum OCRDMA_DCBX_PROTO {
OCRDMA_PROTO_SELECT_L2 = 0x00,
OCRDMA_PROTO_SELECT_L4 = 0x01
};
enum OCRDMA_DCBX_APP_PARAM {
OCRDMA_APP_PARAM_APP_PROTO_MASK = 0xFFFF,
OCRDMA_APP_PARAM_PROTO_SEL_MASK = 0xFF,
OCRDMA_APP_PARAM_PROTO_SEL_SHIFT = 0x10,
OCRDMA_APP_PARAM_VALID_MASK = 0xFF,
OCRDMA_APP_PARAM_VALID_SHIFT = 0x18
};
enum OCRDMA_DCBX_STATE_FLAGS {
OCRDMA_STATE_FLAG_ENABLED = 0x01,
OCRDMA_STATE_FLAG_ADDVERTISED = 0x02,
OCRDMA_STATE_FLAG_WILLING = 0x04,
OCRDMA_STATE_FLAG_SYNC = 0x08,
OCRDMA_STATE_FLAG_UNSUPPORTED = 0x40000000,
OCRDMA_STATE_FLAG_NEG_FAILD = 0x80000000
};
enum OCRDMA_TCV_AEV_OPV_ST {
OCRDMA_DCBX_TC_SUPPORT_MASK = 0xFF,
OCRDMA_DCBX_TC_SUPPORT_SHIFT = 0x18,
OCRDMA_DCBX_APP_ENTRY_SHIFT = 0x10,
OCRDMA_DCBX_OP_PARAM_SHIFT = 0x08,
OCRDMA_DCBX_STATE_MASK = 0xFF
};
struct ocrdma_app_parameter {
u32 valid_proto_app;
u32 oui;
u32 app_prio[2];
};
struct ocrdma_dcbx_cfg {
u32 tcv_aev_opv_st;
u32 tc_state;
u32 pfc_state;
u32 qcn_state;
u32 appl_state;
u32 ll_state;
u32 tc_bw[2];
u32 tc_prio[8];
u32 pfc_prio[2];
struct ocrdma_app_parameter app_param[15];
};
struct ocrdma_get_dcbx_cfg_req {
struct ocrdma_mbx_hdr hdr;
u32 param_type;
} __packed;
struct ocrdma_get_dcbx_cfg_rsp {
struct ocrdma_mbx_rsp hdr;
struct ocrdma_dcbx_cfg cfg;
} __packed;
#endif /* __OCRDMA_SLI_H__ */ #endif /* __OCRDMA_SLI_H__ */
...@@ -69,11 +69,11 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) ...@@ -69,11 +69,11 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
attr->max_mr_size = ~0ull; attr->max_mr_size = dev->attr.max_mr_size;
attr->page_size_cap = 0xffff000; attr->page_size_cap = 0xffff000;
attr->vendor_id = dev->nic_info.pdev->vendor; attr->vendor_id = dev->nic_info.pdev->vendor;
attr->vendor_part_id = dev->nic_info.pdev->device; attr->vendor_part_id = dev->nic_info.pdev->device;
attr->hw_ver = 0; attr->hw_ver = dev->asic_id;
attr->max_qp = dev->attr.max_qp; attr->max_qp = dev->attr.max_qp;
attr->max_ah = OCRDMA_MAX_AH; attr->max_ah = OCRDMA_MAX_AH;
attr->max_qp_wr = dev->attr.max_wqe; attr->max_qp_wr = dev->attr.max_wqe;
...@@ -268,7 +268,8 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, ...@@ -268,7 +268,8 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
pd->dpp_enabled = pd->dpp_enabled =
ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
pd->num_dpp_qp = pd->num_dpp_qp =
pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0; pd->dpp_enabled ? (dev->nic_info.db_page_size /
dev->attr.wqe_size) : 0;
} }
retry: retry:
...@@ -328,7 +329,10 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) ...@@ -328,7 +329,10 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
struct ocrdma_pd *pd = uctx->cntxt_pd; struct ocrdma_pd *pd = uctx->cntxt_pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
BUG_ON(uctx->pd_in_use); if (uctx->pd_in_use) {
pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
__func__, dev->id, pd->id);
}
uctx->cntxt_pd = NULL; uctx->cntxt_pd = NULL;
status = _ocrdma_dealloc_pd(dev, pd); status = _ocrdma_dealloc_pd(dev, pd);
return status; return status;
...@@ -843,6 +847,13 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr) ...@@ -843,6 +847,13 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
if (mr->umem) if (mr->umem)
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
kfree(mr); kfree(mr);
/* Don't stop cleanup, in case FW is unresponsive */
if (dev->mqe_ctx.fw_error_state) {
status = 0;
pr_err("%s(%d) fw not responding.\n",
__func__, dev->id);
}
return status; return status;
} }
...@@ -2054,6 +2065,13 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -2054,6 +2065,13 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
} }
while (wr) { while (wr) {
if (qp->qp_type == IB_QPT_UD &&
(wr->opcode != IB_WR_SEND &&
wr->opcode != IB_WR_SEND_WITH_IMM)) {
*bad_wr = wr;
status = -EINVAL;
break;
}
if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
wr->num_sge > qp->sq.max_sges) { wr->num_sge > qp->sq.max_sges) {
*bad_wr = wr; *bad_wr = wr;
...@@ -2488,6 +2506,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, ...@@ -2488,6 +2506,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
*stop = true; *stop = true;
expand = false; expand = false;
} }
} else if (is_hw_sq_empty(qp)) {
/* Do nothing */
expand = false;
*polled = false;
*stop = false;
} else { } else {
*polled = true; *polled = true;
expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
...@@ -2593,6 +2616,11 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, ...@@ -2593,6 +2616,11 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
*stop = true; *stop = true;
expand = false; expand = false;
} }
} else if (is_hw_rq_empty(qp)) {
/* Do nothing */
expand = false;
*polled = false;
*stop = false;
} else { } else {
*polled = true; *polled = true;
expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
......
...@@ -2476,7 +2476,7 @@ int qib_create_agents(struct qib_ibdev *dev) ...@@ -2476,7 +2476,7 @@ int qib_create_agents(struct qib_ibdev *dev)
ibp = &dd->pport[p].ibport_data; ibp = &dd->pport[p].ibport_data;
agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI, agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
NULL, 0, send_handler, NULL, 0, send_handler,
NULL, NULL); NULL, NULL, 0);
if (IS_ERR(agent)) { if (IS_ERR(agent)) {
ret = PTR_ERR(agent); ret = PTR_ERR(agent);
goto err; goto err;
......
...@@ -86,7 +86,6 @@ enum { ...@@ -86,7 +86,6 @@ enum {
IPOIB_FLAG_INITIALIZED = 1, IPOIB_FLAG_INITIALIZED = 1,
IPOIB_FLAG_ADMIN_UP = 2, IPOIB_FLAG_ADMIN_UP = 2,
IPOIB_PKEY_ASSIGNED = 3, IPOIB_PKEY_ASSIGNED = 3,
IPOIB_PKEY_STOP = 4,
IPOIB_FLAG_SUBINTERFACE = 5, IPOIB_FLAG_SUBINTERFACE = 5,
IPOIB_MCAST_RUN = 6, IPOIB_MCAST_RUN = 6,
IPOIB_STOP_REAPER = 7, IPOIB_STOP_REAPER = 7,
...@@ -312,7 +311,6 @@ struct ipoib_dev_priv { ...@@ -312,7 +311,6 @@ struct ipoib_dev_priv {
struct list_head multicast_list; struct list_head multicast_list;
struct rb_root multicast_tree; struct rb_root multicast_tree;
struct delayed_work pkey_poll_task;
struct delayed_work mcast_task; struct delayed_work mcast_task;
struct work_struct carrier_on_task; struct work_struct carrier_on_task;
struct work_struct flush_light; struct work_struct flush_light;
...@@ -473,10 +471,11 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work); ...@@ -473,10 +471,11 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
void ipoib_pkey_event(struct work_struct *work); void ipoib_pkey_event(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev); void ipoib_ib_dev_cleanup(struct net_device *dev);
int ipoib_ib_dev_open(struct net_device *dev); int ipoib_ib_dev_open(struct net_device *dev, int flush);
int ipoib_ib_dev_up(struct net_device *dev); int ipoib_ib_dev_up(struct net_device *dev);
int ipoib_ib_dev_down(struct net_device *dev, int flush); int ipoib_ib_dev_down(struct net_device *dev, int flush);
int ipoib_ib_dev_stop(struct net_device *dev, int flush); int ipoib_ib_dev_stop(struct net_device *dev, int flush);
void ipoib_pkey_dev_check_presence(struct net_device *dev);
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
void ipoib_dev_cleanup(struct net_device *dev); void ipoib_dev_cleanup(struct net_device *dev);
...@@ -532,8 +531,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf); ...@@ -532,8 +531,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf);
void ipoib_setup(struct net_device *dev); void ipoib_setup(struct net_device *dev);
void ipoib_pkey_poll(struct work_struct *work); void ipoib_pkey_open(struct ipoib_dev_priv *priv);
int ipoib_pkey_dev_delay_open(struct net_device *dev);
void ipoib_drain_cq(struct net_device *dev); void ipoib_drain_cq(struct net_device *dev);
void ipoib_set_ethtool_ops(struct net_device *dev); void ipoib_set_ethtool_ops(struct net_device *dev);
......
...@@ -281,10 +281,8 @@ void ipoib_delete_debug_files(struct net_device *dev) ...@@ -281,10 +281,8 @@ void ipoib_delete_debug_files(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
if (priv->mcg_dentry) debugfs_remove(priv->mcg_dentry);
debugfs_remove(priv->mcg_dentry); debugfs_remove(priv->path_dentry);
if (priv->path_dentry)
debugfs_remove(priv->path_dentry);
} }
int ipoib_register_debugfs(void) int ipoib_register_debugfs(void)
......
...@@ -664,17 +664,18 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx) ...@@ -664,17 +664,18 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx)
drain_tx_cq((struct net_device *)ctx); drain_tx_cq((struct net_device *)ctx);
} }
int ipoib_ib_dev_open(struct net_device *dev) int ipoib_ib_dev_open(struct net_device *dev, int flush)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret; int ret;
if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) { ipoib_pkey_dev_check_presence(dev);
ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
(!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
return -1; return -1;
} }
set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
ret = ipoib_init_qp(dev); ret = ipoib_init_qp(dev);
if (ret) { if (ret) {
...@@ -705,16 +706,17 @@ int ipoib_ib_dev_open(struct net_device *dev) ...@@ -705,16 +706,17 @@ int ipoib_ib_dev_open(struct net_device *dev)
dev_stop: dev_stop:
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
napi_enable(&priv->napi); napi_enable(&priv->napi);
ipoib_ib_dev_stop(dev, 1); ipoib_ib_dev_stop(dev, flush);
return -1; return -1;
} }
static void ipoib_pkey_dev_check_presence(struct net_device *dev) void ipoib_pkey_dev_check_presence(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
u16 pkey_index = 0;
if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) if (!(priv->pkey & 0x7fff) ||
ib_find_pkey(priv->ca, priv->port, priv->pkey,
&priv->pkey_index))
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
else else
set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
...@@ -745,14 +747,6 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush) ...@@ -745,14 +747,6 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush)
clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
netif_carrier_off(dev); netif_carrier_off(dev);
/* Shutdown the P_Key thread if still active */
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
mutex_lock(&pkey_mutex);
set_bit(IPOIB_PKEY_STOP, &priv->flags);
cancel_delayed_work_sync(&priv->pkey_poll_task);
mutex_unlock(&pkey_mutex);
}
ipoib_mcast_stop_thread(dev, flush); ipoib_mcast_stop_thread(dev, flush);
ipoib_mcast_dev_flush(dev); ipoib_mcast_dev_flush(dev);
...@@ -924,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) ...@@ -924,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
(unsigned long) dev); (unsigned long) dev);
if (dev->flags & IFF_UP) { if (dev->flags & IFF_UP) {
if (ipoib_ib_dev_open(dev)) { if (ipoib_ib_dev_open(dev, 1)) {
ipoib_transport_dev_cleanup(dev); ipoib_transport_dev_cleanup(dev);
return -ENODEV; return -ENODEV;
} }
...@@ -966,13 +960,27 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv) ...@@ -966,13 +960,27 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
return 1; return 1;
} }
/*
* returns 0 if pkey value was found in a different slot.
*/
static inline int update_child_pkey(struct ipoib_dev_priv *priv)
{
u16 old_index = priv->pkey_index;
priv->pkey_index = 0;
ipoib_pkey_dev_check_presence(priv->dev);
if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
(old_index == priv->pkey_index))
return 1;
return 0;
}
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
enum ipoib_flush_level level) enum ipoib_flush_level level)
{ {
struct ipoib_dev_priv *cpriv; struct ipoib_dev_priv *cpriv;
struct net_device *dev = priv->dev; struct net_device *dev = priv->dev;
u16 new_index;
int result; int result;
down_read(&priv->vlan_rwsem); down_read(&priv->vlan_rwsem);
...@@ -986,16 +994,20 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, ...@@ -986,16 +994,20 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
up_read(&priv->vlan_rwsem); up_read(&priv->vlan_rwsem);
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
/* for non-child devices must check/update the pkey value here */ level != IPOIB_FLUSH_HEAVY) {
if (level == IPOIB_FLUSH_HEAVY &&
!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
update_parent_pkey(priv);
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
return; return;
} }
if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
/* interface is down. update pkey and leave. */
if (level == IPOIB_FLUSH_HEAVY) {
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
update_parent_pkey(priv);
else
update_child_pkey(priv);
}
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
return; return;
} }
...@@ -1005,20 +1017,13 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, ...@@ -1005,20 +1017,13 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
* (parent) devices should always takes what present in pkey index 0 * (parent) devices should always takes what present in pkey index 0
*/ */
if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { result = update_child_pkey(priv);
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); if (result) {
ipoib_ib_dev_down(dev, 0); /* restart QP only if P_Key index is changed */
ipoib_ib_dev_stop(dev, 0);
if (ipoib_pkey_dev_delay_open(dev))
return;
}
/* restart QP only if P_Key index is changed */
if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
new_index == priv->pkey_index) {
ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
return; return;
} }
priv->pkey_index = new_index;
} else { } else {
result = update_parent_pkey(priv); result = update_parent_pkey(priv);
/* restart QP only if P_Key value changed */ /* restart QP only if P_Key value changed */
...@@ -1038,8 +1043,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, ...@@ -1038,8 +1043,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
ipoib_ib_dev_down(dev, 0); ipoib_ib_dev_down(dev, 0);
if (level == IPOIB_FLUSH_HEAVY) { if (level == IPOIB_FLUSH_HEAVY) {
ipoib_ib_dev_stop(dev, 0); if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
ipoib_ib_dev_open(dev); ipoib_ib_dev_stop(dev, 0);
if (ipoib_ib_dev_open(dev, 0) != 0)
return;
if (netif_queue_stopped(dev))
netif_start_queue(dev);
} }
/* /*
...@@ -1094,54 +1103,4 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) ...@@ -1094,54 +1103,4 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
ipoib_transport_dev_cleanup(dev); ipoib_transport_dev_cleanup(dev);
} }
/*
* Delayed P_Key Assigment Interim Support
*
* The following is initial implementation of delayed P_Key assigment
* mechanism. It is using the same approach implemented for the multicast
* group join. The single goal of this implementation is to quickly address
* Bug #2507. This implementation will probably be removed when the P_Key
* change async notification is available.
*/
void ipoib_pkey_poll(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
struct net_device *dev = priv->dev;
ipoib_pkey_dev_check_presence(dev);
if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
ipoib_open(dev);
else {
mutex_lock(&pkey_mutex);
if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
queue_delayed_work(ipoib_workqueue,
&priv->pkey_poll_task,
HZ);
mutex_unlock(&pkey_mutex);
}
}
int ipoib_pkey_dev_delay_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
/* Look for the interface pkey value in the IB Port P_Key table and */
/* set the interface pkey assigment flag */
ipoib_pkey_dev_check_presence(dev);
/* P_Key value not assigned yet - start polling */
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
mutex_lock(&pkey_mutex);
clear_bit(IPOIB_PKEY_STOP, &priv->flags);
queue_delayed_work(ipoib_workqueue,
&priv->pkey_poll_task,
HZ);
mutex_unlock(&pkey_mutex);
return 1;
}
return 0;
}
...@@ -108,11 +108,11 @@ int ipoib_open(struct net_device *dev) ...@@ -108,11 +108,11 @@ int ipoib_open(struct net_device *dev)
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
if (ipoib_pkey_dev_delay_open(dev)) if (ipoib_ib_dev_open(dev, 1)) {
return 0; if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0;
if (ipoib_ib_dev_open(dev))
goto err_disable; goto err_disable;
}
if (ipoib_ib_dev_up(dev)) if (ipoib_ib_dev_up(dev))
goto err_stop; goto err_stop;
...@@ -1379,7 +1379,6 @@ void ipoib_setup(struct net_device *dev) ...@@ -1379,7 +1379,6 @@ void ipoib_setup(struct net_device *dev)
INIT_LIST_HEAD(&priv->dead_ahs); INIT_LIST_HEAD(&priv->dead_ahs);
INIT_LIST_HEAD(&priv->multicast_list); INIT_LIST_HEAD(&priv->multicast_list);
INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
......
...@@ -596,20 +596,28 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, ...@@ -596,20 +596,28 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
struct iser_conn *ib_conn; struct iser_conn *ib_conn;
struct iscsi_endpoint *ep; struct iscsi_endpoint *ep;
ep = iscsi_create_endpoint(sizeof(*ib_conn)); ep = iscsi_create_endpoint(0);
if (!ep) if (!ep)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ib_conn = ep->dd_data; ib_conn = kzalloc(sizeof(*ib_conn), GFP_KERNEL);
if (!ib_conn) {
err = -ENOMEM;
goto failure;
}
ep->dd_data = ib_conn;
ib_conn->ep = ep; ib_conn->ep = ep;
iser_conn_init(ib_conn); iser_conn_init(ib_conn);
err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, err = iser_connect(ib_conn, NULL, dst_addr, non_blocking);
non_blocking);
if (err) if (err)
return ERR_PTR(err); goto failure;
return ep; return ep;
failure:
iscsi_destroy_endpoint(ep);
return ERR_PTR(err);
} }
static int static int
...@@ -619,15 +627,16 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) ...@@ -619,15 +627,16 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
int rc; int rc;
ib_conn = ep->dd_data; ib_conn = ep->dd_data;
rc = wait_event_interruptible_timeout(ib_conn->wait, rc = wait_for_completion_interruptible_timeout(&ib_conn->up_completion,
ib_conn->state == ISER_CONN_UP, msecs_to_jiffies(timeout_ms));
msecs_to_jiffies(timeout_ms));
/* if conn establishment failed, return error code to iscsi */ /* if conn establishment failed, return error code to iscsi */
if (!rc && if (rc == 0) {
(ib_conn->state == ISER_CONN_TERMINATING || mutex_lock(&ib_conn->state_mutex);
ib_conn->state == ISER_CONN_DOWN)) if (ib_conn->state == ISER_CONN_TERMINATING ||
rc = -1; ib_conn->state == ISER_CONN_DOWN)
rc = -1;
mutex_unlock(&ib_conn->state_mutex);
}
iser_info("ib conn %p rc = %d\n", ib_conn, rc); iser_info("ib conn %p rc = %d\n", ib_conn, rc);
...@@ -646,19 +655,25 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) ...@@ -646,19 +655,25 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
ib_conn = ep->dd_data; ib_conn = ep->dd_data;
iser_info("ep %p ib conn %p state %d\n", ep, ib_conn, ib_conn->state); iser_info("ep %p ib conn %p state %d\n", ep, ib_conn, ib_conn->state);
mutex_lock(&ib_conn->state_mutex);
iser_conn_terminate(ib_conn); iser_conn_terminate(ib_conn);
/* /*
* if iser_conn and iscsi_conn are bound, we must wait iscsi_conn_stop * if iser_conn and iscsi_conn are bound, we must wait for
* call and ISER_CONN_DOWN state before freeing the iser resources. * iscsi_conn_stop and flush errors completion before freeing
* otherwise we are safe to free resources immediately. * the iser resources. Otherwise we are safe to free resources
* immediately.
*/ */
if (ib_conn->iscsi_conn) { if (ib_conn->iscsi_conn) {
INIT_WORK(&ib_conn->release_work, iser_release_work); INIT_WORK(&ib_conn->release_work, iser_release_work);
queue_work(release_wq, &ib_conn->release_work); queue_work(release_wq, &ib_conn->release_work);
mutex_unlock(&ib_conn->state_mutex);
} else { } else {
ib_conn->state = ISER_CONN_DOWN;
mutex_unlock(&ib_conn->state_mutex);
iser_conn_release(ib_conn); iser_conn_release(ib_conn);
} }
iscsi_destroy_endpoint(ep);
} }
static umode_t iser_attr_is_visible(int param_type, int param) static umode_t iser_attr_is_visible(int param_type, int param)
......
...@@ -326,7 +326,6 @@ struct iser_conn { ...@@ -326,7 +326,6 @@ struct iser_conn {
struct iser_device *device; /* device context */ struct iser_device *device; /* device context */
struct rdma_cm_id *cma_id; /* CMA ID */ struct rdma_cm_id *cma_id; /* CMA ID */
struct ib_qp *qp; /* QP */ struct ib_qp *qp; /* QP */
wait_queue_head_t wait; /* waitq for conn/disconn */
unsigned qp_max_recv_dtos; /* num of rx buffers */ unsigned qp_max_recv_dtos; /* num of rx buffers */
unsigned qp_max_recv_dtos_mask; /* above minus 1 */ unsigned qp_max_recv_dtos_mask; /* above minus 1 */
unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */ unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */
...@@ -335,6 +334,9 @@ struct iser_conn { ...@@ -335,6 +334,9 @@ struct iser_conn {
char name[ISER_OBJECT_NAME_SIZE]; char name[ISER_OBJECT_NAME_SIZE];
struct work_struct release_work; struct work_struct release_work;
struct completion stop_completion; struct completion stop_completion;
struct mutex state_mutex;
struct completion flush_completion;
struct completion up_completion;
struct list_head conn_list; /* entry in ig conn list */ struct list_head conn_list; /* entry in ig conn list */
char *login_buf; char *login_buf;
...@@ -448,8 +450,8 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task, ...@@ -448,8 +450,8 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir); enum iser_data_dir cmd_dir);
int iser_connect(struct iser_conn *ib_conn, int iser_connect(struct iser_conn *ib_conn,
struct sockaddr_in *src_addr, struct sockaddr *src_addr,
struct sockaddr_in *dst_addr, struct sockaddr *dst_addr,
int non_blocking); int non_blocking);
int iser_reg_page_vec(struct iser_conn *ib_conn, int iser_reg_page_vec(struct iser_conn *ib_conn,
......
...@@ -491,10 +491,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) ...@@ -491,10 +491,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
} }
/** /**
* releases the QP objects, returns 0 on success, * releases the QP object
* -1 on failure
*/ */
static int iser_free_ib_conn_res(struct iser_conn *ib_conn) static void iser_free_ib_conn_res(struct iser_conn *ib_conn)
{ {
int cq_index; int cq_index;
BUG_ON(ib_conn == NULL); BUG_ON(ib_conn == NULL);
...@@ -513,8 +512,6 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn) ...@@ -513,8 +512,6 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
} }
ib_conn->qp = NULL; ib_conn->qp = NULL;
return 0;
} }
/** /**
...@@ -568,31 +565,40 @@ static void iser_device_try_release(struct iser_device *device) ...@@ -568,31 +565,40 @@ static void iser_device_try_release(struct iser_device *device)
mutex_unlock(&ig.device_list_mutex); mutex_unlock(&ig.device_list_mutex);
} }
/**
* Called with state mutex held
**/
static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
enum iser_ib_conn_state comp, enum iser_ib_conn_state comp,
enum iser_ib_conn_state exch) enum iser_ib_conn_state exch)
{ {
int ret; int ret;
spin_lock_bh(&ib_conn->lock);
if ((ret = (ib_conn->state == comp))) if ((ret = (ib_conn->state == comp)))
ib_conn->state = exch; ib_conn->state = exch;
spin_unlock_bh(&ib_conn->lock);
return ret; return ret;
} }
void iser_release_work(struct work_struct *work) void iser_release_work(struct work_struct *work)
{ {
struct iser_conn *ib_conn; struct iser_conn *ib_conn;
int rc;
ib_conn = container_of(work, struct iser_conn, release_work); ib_conn = container_of(work, struct iser_conn, release_work);
/* wait for .conn_stop callback */ /* wait for .conn_stop callback */
wait_for_completion(&ib_conn->stop_completion); rc = wait_for_completion_timeout(&ib_conn->stop_completion, 30 * HZ);
WARN_ON(rc == 0);
/* wait for the qp`s post send and post receive buffers to empty */ /* wait for the qp`s post send and post receive buffers to empty */
wait_event_interruptible(ib_conn->wait, rc = wait_for_completion_timeout(&ib_conn->flush_completion, 30 * HZ);
ib_conn->state == ISER_CONN_DOWN); WARN_ON(rc == 0);
ib_conn->state = ISER_CONN_DOWN;
mutex_lock(&ib_conn->state_mutex);
ib_conn->state = ISER_CONN_DOWN;
mutex_unlock(&ib_conn->state_mutex);
iser_conn_release(ib_conn); iser_conn_release(ib_conn);
} }
...@@ -604,23 +610,27 @@ void iser_conn_release(struct iser_conn *ib_conn) ...@@ -604,23 +610,27 @@ void iser_conn_release(struct iser_conn *ib_conn)
{ {
struct iser_device *device = ib_conn->device; struct iser_device *device = ib_conn->device;
BUG_ON(ib_conn->state == ISER_CONN_UP);
mutex_lock(&ig.connlist_mutex); mutex_lock(&ig.connlist_mutex);
list_del(&ib_conn->conn_list); list_del(&ib_conn->conn_list);
mutex_unlock(&ig.connlist_mutex); mutex_unlock(&ig.connlist_mutex);
mutex_lock(&ib_conn->state_mutex);
BUG_ON(ib_conn->state != ISER_CONN_DOWN);
iser_free_rx_descriptors(ib_conn); iser_free_rx_descriptors(ib_conn);
iser_free_ib_conn_res(ib_conn); iser_free_ib_conn_res(ib_conn);
ib_conn->device = NULL; ib_conn->device = NULL;
/* on EVENT_ADDR_ERROR there's no device yet for this conn */ /* on EVENT_ADDR_ERROR there's no device yet for this conn */
if (device != NULL) if (device != NULL)
iser_device_try_release(device); iser_device_try_release(device);
mutex_unlock(&ib_conn->state_mutex);
/* if cma handler context, the caller actually destroy the id */ /* if cma handler context, the caller actually destroy the id */
if (ib_conn->cma_id != NULL) { if (ib_conn->cma_id != NULL) {
rdma_destroy_id(ib_conn->cma_id); rdma_destroy_id(ib_conn->cma_id);
ib_conn->cma_id = NULL; ib_conn->cma_id = NULL;
} }
iscsi_destroy_endpoint(ib_conn->ep); kfree(ib_conn);
} }
/** /**
...@@ -642,22 +652,31 @@ void iser_conn_terminate(struct iser_conn *ib_conn) ...@@ -642,22 +652,31 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
ib_conn,err); ib_conn,err);
} }
/**
* Called with state mutex held
**/
static void iser_connect_error(struct rdma_cm_id *cma_id) static void iser_connect_error(struct rdma_cm_id *cma_id)
{ {
struct iser_conn *ib_conn; struct iser_conn *ib_conn;
ib_conn = (struct iser_conn *)cma_id->context; ib_conn = (struct iser_conn *)cma_id->context;
ib_conn->state = ISER_CONN_DOWN; ib_conn->state = ISER_CONN_DOWN;
wake_up_interruptible(&ib_conn->wait);
} }
/**
* Called with state mutex held
**/
static void iser_addr_handler(struct rdma_cm_id *cma_id) static void iser_addr_handler(struct rdma_cm_id *cma_id)
{ {
struct iser_device *device; struct iser_device *device;
struct iser_conn *ib_conn; struct iser_conn *ib_conn;
int ret; int ret;
ib_conn = (struct iser_conn *)cma_id->context;
if (ib_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
device = iser_device_find_by_ib_device(cma_id); device = iser_device_find_by_ib_device(cma_id);
if (!device) { if (!device) {
iser_err("device lookup/creation failed\n"); iser_err("device lookup/creation failed\n");
...@@ -665,7 +684,6 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) ...@@ -665,7 +684,6 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
return; return;
} }
ib_conn = (struct iser_conn *)cma_id->context;
ib_conn->device = device; ib_conn->device = device;
/* connection T10-PI support */ /* connection T10-PI support */
...@@ -689,18 +707,27 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) ...@@ -689,18 +707,27 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
} }
} }
/**
* Called with state mutex held
**/
static void iser_route_handler(struct rdma_cm_id *cma_id) static void iser_route_handler(struct rdma_cm_id *cma_id)
{ {
struct rdma_conn_param conn_param; struct rdma_conn_param conn_param;
int ret; int ret;
struct iser_cm_hdr req_hdr; struct iser_cm_hdr req_hdr;
struct iser_conn *ib_conn = (struct iser_conn *)cma_id->context;
struct iser_device *device = ib_conn->device;
if (ib_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context);
if (ret) if (ret)
goto failure; goto failure;
memset(&conn_param, 0, sizeof conn_param); memset(&conn_param, 0, sizeof conn_param);
conn_param.responder_resources = 4; conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
conn_param.initiator_depth = 1; conn_param.initiator_depth = 1;
conn_param.retry_count = 7; conn_param.retry_count = 7;
conn_param.rnr_retry_count = 6; conn_param.rnr_retry_count = 6;
...@@ -728,12 +755,16 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id) ...@@ -728,12 +755,16 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id)
struct ib_qp_attr attr; struct ib_qp_attr attr;
struct ib_qp_init_attr init_attr; struct ib_qp_init_attr init_attr;
ib_conn = (struct iser_conn *)cma_id->context;
if (ib_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
(void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
ib_conn = (struct iser_conn *)cma_id->context; ib_conn->state = ISER_CONN_UP;
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_PENDING, ISER_CONN_UP)) complete(&ib_conn->up_completion);
wake_up_interruptible(&ib_conn->wait);
} }
static void iser_disconnected_handler(struct rdma_cm_id *cma_id) static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
...@@ -752,19 +783,25 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id) ...@@ -752,19 +783,25 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
iser_err("iscsi_iser connection isn't bound\n"); iser_err("iscsi_iser connection isn't bound\n");
} }
/* Complete the termination process if no posts are pending */ /* Complete the termination process if no posts are pending. This code
* block also exists in iser_handle_comp_error(), but it is needed here
* for cases of no flushes at all, e.g. discovery over rdma.
*/
if (ib_conn->post_recv_buf_count == 0 && if (ib_conn->post_recv_buf_count == 0 &&
(atomic_read(&ib_conn->post_send_buf_count) == 0)) { (atomic_read(&ib_conn->post_send_buf_count) == 0)) {
ib_conn->state = ISER_CONN_DOWN; complete(&ib_conn->flush_completion);
wake_up_interruptible(&ib_conn->wait);
} }
} }
static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{ {
struct iser_conn *ib_conn;
ib_conn = (struct iser_conn *)cma_id->context;
iser_info("event %d status %d conn %p id %p\n", iser_info("event %d status %d conn %p id %p\n",
event->event, event->status, cma_id->context, cma_id); event->event, event->status, cma_id->context, cma_id);
mutex_lock(&ib_conn->state_mutex);
switch (event->event) { switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED: case RDMA_CM_EVENT_ADDR_RESOLVED:
iser_addr_handler(cma_id); iser_addr_handler(cma_id);
...@@ -785,24 +822,28 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve ...@@ -785,24 +822,28 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_ADDR_CHANGE: case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
iser_disconnected_handler(cma_id); iser_disconnected_handler(cma_id);
break; break;
default: default:
iser_err("Unexpected RDMA CM event (%d)\n", event->event); iser_err("Unexpected RDMA CM event (%d)\n", event->event);
break; break;
} }
mutex_unlock(&ib_conn->state_mutex);
return 0; return 0;
} }
void iser_conn_init(struct iser_conn *ib_conn) void iser_conn_init(struct iser_conn *ib_conn)
{ {
ib_conn->state = ISER_CONN_INIT; ib_conn->state = ISER_CONN_INIT;
init_waitqueue_head(&ib_conn->wait);
ib_conn->post_recv_buf_count = 0; ib_conn->post_recv_buf_count = 0;
atomic_set(&ib_conn->post_send_buf_count, 0); atomic_set(&ib_conn->post_send_buf_count, 0);
init_completion(&ib_conn->stop_completion); init_completion(&ib_conn->stop_completion);
init_completion(&ib_conn->flush_completion);
init_completion(&ib_conn->up_completion);
INIT_LIST_HEAD(&ib_conn->conn_list); INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock); spin_lock_init(&ib_conn->lock);
mutex_init(&ib_conn->state_mutex);
} }
/** /**
...@@ -810,22 +851,21 @@ void iser_conn_init(struct iser_conn *ib_conn) ...@@ -810,22 +851,21 @@ void iser_conn_init(struct iser_conn *ib_conn)
* sleeps until the connection is established or rejected * sleeps until the connection is established or rejected
*/ */
int iser_connect(struct iser_conn *ib_conn, int iser_connect(struct iser_conn *ib_conn,
struct sockaddr_in *src_addr, struct sockaddr *src_addr,
struct sockaddr_in *dst_addr, struct sockaddr *dst_addr,
int non_blocking) int non_blocking)
{ {
struct sockaddr *src, *dst;
int err = 0; int err = 0;
sprintf(ib_conn->name, "%pI4:%d", mutex_lock(&ib_conn->state_mutex);
&dst_addr->sin_addr.s_addr, dst_addr->sin_port);
sprintf(ib_conn->name, "%pISp", dst_addr);
iser_info("connecting to: %s\n", ib_conn->name);
/* the device is known only --after-- address resolution */ /* the device is known only --after-- address resolution */
ib_conn->device = NULL; ib_conn->device = NULL;
iser_info("connecting to: %pI4, port 0x%x\n",
&dst_addr->sin_addr, dst_addr->sin_port);
ib_conn->state = ISER_CONN_PENDING; ib_conn->state = ISER_CONN_PENDING;
ib_conn->cma_id = rdma_create_id(iser_cma_handler, ib_conn->cma_id = rdma_create_id(iser_cma_handler,
...@@ -837,23 +877,21 @@ int iser_connect(struct iser_conn *ib_conn, ...@@ -837,23 +877,21 @@ int iser_connect(struct iser_conn *ib_conn,
goto id_failure; goto id_failure;
} }
src = (struct sockaddr *)src_addr; err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
dst = (struct sockaddr *)dst_addr;
err = rdma_resolve_addr(ib_conn->cma_id, src, dst, 1000);
if (err) { if (err) {
iser_err("rdma_resolve_addr failed: %d\n", err); iser_err("rdma_resolve_addr failed: %d\n", err);
goto addr_failure; goto addr_failure;
} }
if (!non_blocking) { if (!non_blocking) {
wait_event_interruptible(ib_conn->wait, wait_for_completion_interruptible(&ib_conn->up_completion);
(ib_conn->state != ISER_CONN_PENDING));
if (ib_conn->state != ISER_CONN_UP) { if (ib_conn->state != ISER_CONN_UP) {
err = -EIO; err = -EIO;
goto connect_failure; goto connect_failure;
} }
} }
mutex_unlock(&ib_conn->state_mutex);
mutex_lock(&ig.connlist_mutex); mutex_lock(&ig.connlist_mutex);
list_add(&ib_conn->conn_list, &ig.connlist); list_add(&ib_conn->conn_list, &ig.connlist);
...@@ -865,6 +903,7 @@ int iser_connect(struct iser_conn *ib_conn, ...@@ -865,6 +903,7 @@ int iser_connect(struct iser_conn *ib_conn,
addr_failure: addr_failure:
ib_conn->state = ISER_CONN_DOWN; ib_conn->state = ISER_CONN_DOWN;
connect_failure: connect_failure:
mutex_unlock(&ib_conn->state_mutex);
iser_conn_release(ib_conn); iser_conn_release(ib_conn);
return err; return err;
} }
...@@ -1049,18 +1088,19 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc, ...@@ -1049,18 +1088,19 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
if (ib_conn->post_recv_buf_count == 0 && if (ib_conn->post_recv_buf_count == 0 &&
atomic_read(&ib_conn->post_send_buf_count) == 0) { atomic_read(&ib_conn->post_send_buf_count) == 0) {
/* getting here when the state is UP means that the conn is * /**
* being terminated asynchronously from the iSCSI layer's * * getting here when the state is UP means that the conn is
* perspective. */ * being terminated asynchronously from the iSCSI layer's
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, * perspective. It is safe to peek at the connection state
ISER_CONN_TERMINATING)) * since iscsi_conn_failure is allowed to be called twice.
**/
if (ib_conn->state == ISER_CONN_UP)
iscsi_conn_failure(ib_conn->iscsi_conn, iscsi_conn_failure(ib_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED); ISCSI_ERR_CONN_FAILED);
/* no more non completed posts to the QP, complete the /* no more non completed posts to the QP, complete the
* termination process w.o worrying on disconnect event */ * termination process w.o worrying on disconnect event */
ib_conn->state = ISER_CONN_DOWN; complete(&ib_conn->flush_completion);
wake_up_interruptible(&ib_conn->wait);
} }
} }
......
...@@ -130,6 +130,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr); ...@@ -130,6 +130,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
static struct scsi_transport_template *ib_srp_transport_template; static struct scsi_transport_template *ib_srp_transport_template;
static struct workqueue_struct *srp_remove_wq;
static struct ib_client srp_client = { static struct ib_client srp_client = {
.name = "srp", .name = "srp",
...@@ -731,7 +732,7 @@ static bool srp_queue_remove_work(struct srp_target_port *target) ...@@ -731,7 +732,7 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
spin_unlock_irq(&target->lock); spin_unlock_irq(&target->lock);
if (changed) if (changed)
queue_work(system_long_wq, &target->remove_work); queue_work(srp_remove_wq, &target->remove_work);
return changed; return changed;
} }
...@@ -1643,10 +1644,14 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) ...@@ -1643,10 +1644,14 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
SCSI_SENSE_BUFFERSIZE)); SCSI_SENSE_BUFFERSIZE));
} }
if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
srp_free_req(target, req, scmnd, srp_free_req(target, req, scmnd,
be32_to_cpu(rsp->req_lim_delta)); be32_to_cpu(rsp->req_lim_delta));
...@@ -3261,9 +3266,10 @@ static void srp_remove_one(struct ib_device *device) ...@@ -3261,9 +3266,10 @@ static void srp_remove_one(struct ib_device *device)
spin_unlock(&host->target_lock); spin_unlock(&host->target_lock);
/* /*
* Wait for target port removal tasks. * Wait for tl_err and target port removal tasks.
*/ */
flush_workqueue(system_long_wq); flush_workqueue(system_long_wq);
flush_workqueue(srp_remove_wq);
kfree(host); kfree(host);
} }
...@@ -3313,16 +3319,22 @@ static int __init srp_init_module(void) ...@@ -3313,16 +3319,22 @@ static int __init srp_init_module(void)
indirect_sg_entries = cmd_sg_entries; indirect_sg_entries = cmd_sg_entries;
} }
srp_remove_wq = create_workqueue("srp_remove");
if (!srp_remove_wq) {
ret = -ENOMEM;
goto out;
}
ret = -ENOMEM;
ib_srp_transport_template = ib_srp_transport_template =
srp_attach_transport(&ib_srp_transport_functions); srp_attach_transport(&ib_srp_transport_functions);
if (!ib_srp_transport_template) if (!ib_srp_transport_template)
return -ENOMEM; goto destroy_wq;
ret = class_register(&srp_class); ret = class_register(&srp_class);
if (ret) { if (ret) {
pr_err("couldn't register class infiniband_srp\n"); pr_err("couldn't register class infiniband_srp\n");
srp_release_transport(ib_srp_transport_template); goto release_tr;
return ret;
} }
ib_sa_register_client(&srp_sa_client); ib_sa_register_client(&srp_sa_client);
...@@ -3330,13 +3342,22 @@ static int __init srp_init_module(void) ...@@ -3330,13 +3342,22 @@ static int __init srp_init_module(void)
ret = ib_register_client(&srp_client); ret = ib_register_client(&srp_client);
if (ret) { if (ret) {
pr_err("couldn't register IB client\n"); pr_err("couldn't register IB client\n");
srp_release_transport(ib_srp_transport_template); goto unreg_sa;
ib_sa_unregister_client(&srp_sa_client);
class_unregister(&srp_class);
return ret;
} }
return 0; out:
return ret;
unreg_sa:
ib_sa_unregister_client(&srp_sa_client);
class_unregister(&srp_class);
release_tr:
srp_release_transport(ib_srp_transport_template);
destroy_wq:
destroy_workqueue(srp_remove_wq);
goto out;
} }
static void __exit srp_cleanup_module(void) static void __exit srp_cleanup_module(void)
...@@ -3345,6 +3366,7 @@ static void __exit srp_cleanup_module(void) ...@@ -3345,6 +3366,7 @@ static void __exit srp_cleanup_module(void)
ib_sa_unregister_client(&srp_sa_client); ib_sa_unregister_client(&srp_sa_client);
class_unregister(&srp_class); class_unregister(&srp_class);
srp_release_transport(ib_srp_transport_template); srp_release_transport(ib_srp_transport_template);
destroy_workqueue(srp_remove_wq);
} }
module_init(srp_init_module); module_init(srp_init_module);
......
...@@ -198,6 +198,7 @@ static void srpt_event_handler(struct ib_event_handler *handler, ...@@ -198,6 +198,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
case IB_EVENT_PKEY_CHANGE: case IB_EVENT_PKEY_CHANGE:
case IB_EVENT_SM_CHANGE: case IB_EVENT_SM_CHANGE:
case IB_EVENT_CLIENT_REREGISTER: case IB_EVENT_CLIENT_REREGISTER:
case IB_EVENT_GID_CHANGE:
/* Refresh port data asynchronously. */ /* Refresh port data asynchronously. */
if (event->element.port_num <= sdev->device->phys_port_cnt) { if (event->element.port_num <= sdev->device->phys_port_cnt) {
sport = &sdev->port[event->element.port_num - 1]; sport = &sdev->port[event->element.port_num - 1];
...@@ -563,7 +564,7 @@ static int srpt_refresh_port(struct srpt_port *sport) ...@@ -563,7 +564,7 @@ static int srpt_refresh_port(struct srpt_port *sport)
&reg_req, 0, &reg_req, 0,
srpt_mad_send_handler, srpt_mad_send_handler,
srpt_mad_recv_handler, srpt_mad_recv_handler,
sport); sport, 0);
if (IS_ERR(sport->mad_agent)) { if (IS_ERR(sport->mad_agent)) {
ret = PTR_ERR(sport->mad_agent); ret = PTR_ERR(sport->mad_agent);
sport->mad_agent = NULL; sport->mad_agent = NULL;
......
...@@ -890,5 +890,6 @@ void be_roce_dev_remove(struct be_adapter *); ...@@ -890,5 +890,6 @@ void be_roce_dev_remove(struct be_adapter *);
*/ */
void be_roce_dev_open(struct be_adapter *); void be_roce_dev_open(struct be_adapter *);
void be_roce_dev_close(struct be_adapter *); void be_roce_dev_close(struct be_adapter *);
void be_roce_dev_shutdown(struct be_adapter *);
#endif /* BE_H */ #endif /* BE_H */
...@@ -4958,6 +4958,7 @@ static void be_shutdown(struct pci_dev *pdev) ...@@ -4958,6 +4958,7 @@ static void be_shutdown(struct pci_dev *pdev)
if (!adapter) if (!adapter)
return; return;
be_roce_dev_shutdown(adapter);
cancel_delayed_work_sync(&adapter->work); cancel_delayed_work_sync(&adapter->work);
cancel_delayed_work_sync(&adapter->func_recovery_work); cancel_delayed_work_sync(&adapter->func_recovery_work);
......
...@@ -120,7 +120,8 @@ static void _be_roce_dev_open(struct be_adapter *adapter) ...@@ -120,7 +120,8 @@ static void _be_roce_dev_open(struct be_adapter *adapter)
{ {
if (ocrdma_drv && adapter->ocrdma_dev && if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler) ocrdma_drv->state_change_handler)
ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 0); ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
BE_DEV_UP);
} }
void be_roce_dev_open(struct be_adapter *adapter) void be_roce_dev_open(struct be_adapter *adapter)
...@@ -136,7 +137,8 @@ static void _be_roce_dev_close(struct be_adapter *adapter) ...@@ -136,7 +137,8 @@ static void _be_roce_dev_close(struct be_adapter *adapter)
{ {
if (ocrdma_drv && adapter->ocrdma_dev && if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler) ocrdma_drv->state_change_handler)
ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 1); ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
BE_DEV_DOWN);
} }
void be_roce_dev_close(struct be_adapter *adapter) void be_roce_dev_close(struct be_adapter *adapter)
...@@ -148,6 +150,18 @@ void be_roce_dev_close(struct be_adapter *adapter) ...@@ -148,6 +150,18 @@ void be_roce_dev_close(struct be_adapter *adapter)
} }
} }
void be_roce_dev_shutdown(struct be_adapter *adapter)
{
if (be_roce_supported(adapter)) {
mutex_lock(&be_adapter_list_lock);
if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler)
ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
BE_DEV_SHUTDOWN);
mutex_unlock(&be_adapter_list_lock);
}
}
int be_roce_register_driver(struct ocrdma_driver *drv) int be_roce_register_driver(struct ocrdma_driver *drv)
{ {
struct be_adapter *dev; struct be_adapter *dev;
......
...@@ -62,7 +62,8 @@ struct ocrdma_driver { ...@@ -62,7 +62,8 @@ struct ocrdma_driver {
enum { enum {
BE_DEV_UP = 0, BE_DEV_UP = 0,
BE_DEV_DOWN = 1 BE_DEV_DOWN = 1,
BE_DEV_SHUTDOWN = 2
}; };
/* APIs for RoCE driver to register callback handlers, /* APIs for RoCE driver to register callback handlers,
......
...@@ -1310,6 +1310,15 @@ static struct mlx4_cmd_info cmd_info[] = { ...@@ -1310,6 +1310,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL, .verify = NULL,
.wrapper = mlx4_MAD_IFC_wrapper .wrapper = mlx4_MAD_IFC_wrapper
}, },
{
.opcode = MLX4_CMD_MAD_DEMUX,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_CMD_EPERM_wrapper
},
{ {
.opcode = MLX4_CMD_QUERY_IF_STAT, .opcode = MLX4_CMD_QUERY_IF_STAT,
.has_inbox = false, .has_inbox = false,
......
...@@ -136,7 +136,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) ...@@ -136,7 +136,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[7] = "FSM (MAC anti-spoofing) support", [7] = "FSM (MAC anti-spoofing) support",
[8] = "Dynamic QP updates support", [8] = "Dynamic QP updates support",
[9] = "Device managed flow steering IPoIB support", [9] = "Device managed flow steering IPoIB support",
[10] = "TCP/IP offloads/flow-steering for VXLAN support" [10] = "TCP/IP offloads/flow-steering for VXLAN support",
[11] = "MAD DEMUX (Secure-Host) support"
}; };
int i; int i;
...@@ -571,6 +572,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -571,6 +572,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
#define QUERY_DEV_CAP_VXLAN 0x9e #define QUERY_DEV_CAP_VXLAN 0x9e
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
dev_cap->flags2 = 0; dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev); mailbox = mlx4_alloc_cmd_mailbox(dev);
...@@ -748,6 +750,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -748,6 +750,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(dev_cap->max_counters, outbox, MLX4_GET(dev_cap->max_counters, outbox,
QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
MLX4_GET(field32, outbox,
QUERY_DEV_CAP_MAD_DEMUX_OFFSET);
if (field32 & (1 << 0))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX;
MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
if (field32 & (1 << 16)) if (field32 & (1 << 16))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
...@@ -2016,3 +2023,85 @@ void mlx4_opreq_action(struct work_struct *work) ...@@ -2016,3 +2023,85 @@ void mlx4_opreq_action(struct work_struct *work)
out: out:
mlx4_free_cmd_mailbox(dev, mailbox); mlx4_free_cmd_mailbox(dev, mailbox);
} }
static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *mailbox)
{
#define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10
#define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20
#define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40
#define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70
u32 set_attr_mask, getresp_attr_mask;
u32 trap_attr_mask, traprepress_attr_mask;
MLX4_GET(set_attr_mask, mailbox->buf,
MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET);
mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n",
set_attr_mask);
MLX4_GET(getresp_attr_mask, mailbox->buf,
MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET);
mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n",
getresp_attr_mask);
MLX4_GET(trap_attr_mask, mailbox->buf,
MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET);
mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n",
trap_attr_mask);
MLX4_GET(traprepress_attr_mask, mailbox->buf,
MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET);
mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n",
traprepress_attr_mask);
if (set_attr_mask && getresp_attr_mask && trap_attr_mask &&
traprepress_attr_mask)
return 1;
return 0;
}
int mlx4_config_mad_demux(struct mlx4_dev *dev)
{
struct mlx4_cmd_mailbox *mailbox;
int secure_host_active;
int err;
/* Check if mad_demux is supported */
if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX))
return 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX");
return -ENOMEM;
}
/* Query mad_demux to find out which MADs are handled by internal sma */
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */,
MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err) {
mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n",
err);
goto out;
}
secure_host_active = mlx4_check_smp_firewall_active(dev, mailbox);
/* Config mad_demux to handle all MADs returned by the query above */
err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */,
MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err) {
mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err);
goto out;
}
if (secure_host_active)
mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n");
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
...@@ -1831,6 +1831,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) ...@@ -1831,6 +1831,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
goto err_mr_table_free; goto err_mr_table_free;
} }
err = mlx4_config_mad_demux(dev);
if (err) {
mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
goto err_mcg_table_free;
}
} }
err = mlx4_init_eq_table(dev); err = mlx4_init_eq_table(dev);
......
...@@ -1313,5 +1313,6 @@ void mlx4_init_quotas(struct mlx4_dev *dev); ...@@ -1313,5 +1313,6 @@ void mlx4_init_quotas(struct mlx4_dev *dev);
int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
/* Returns the VF index of slave */ /* Returns the VF index of slave */
int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
int mlx4_config_mad_demux(struct mlx4_dev *dev);
#endif /* MLX4_H */ #endif /* MLX4_H */
...@@ -473,7 +473,8 @@ static void __srp_start_tl_fail_timers(struct srp_rport *rport) ...@@ -473,7 +473,8 @@ static void __srp_start_tl_fail_timers(struct srp_rport *rport)
if (delay > 0) if (delay > 0)
queue_delayed_work(system_long_wq, &rport->reconnect_work, queue_delayed_work(system_long_wq, &rport->reconnect_work,
1UL * delay * HZ); 1UL * delay * HZ);
if (srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) &&
srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev), pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev),
rport->state); rport->state);
scsi_target_block(&shost->shost_gendev); scsi_target_block(&shost->shost_gendev);
......
...@@ -116,6 +116,7 @@ enum { ...@@ -116,6 +116,7 @@ enum {
/* special QP and management commands */ /* special QP and management commands */
MLX4_CMD_CONF_SPECIAL_QP = 0x23, MLX4_CMD_CONF_SPECIAL_QP = 0x23,
MLX4_CMD_MAD_IFC = 0x24, MLX4_CMD_MAD_IFC = 0x24,
MLX4_CMD_MAD_DEMUX = 0x203,
/* multicast commands */ /* multicast commands */
MLX4_CMD_READ_MCG = 0x25, MLX4_CMD_READ_MCG = 0x25,
...@@ -185,6 +186,12 @@ enum { ...@@ -185,6 +186,12 @@ enum {
MLX4_SET_PORT_VXLAN = 0xB MLX4_SET_PORT_VXLAN = 0xB
}; };
enum {
MLX4_CMD_MAD_DEMUX_CONFIG = 0,
MLX4_CMD_MAD_DEMUX_QUERY_STATE = 1,
MLX4_CMD_MAD_DEMUX_QUERY_RESTR = 2, /* Query mad demux restrictions */
};
enum { enum {
MLX4_CMD_WRAPPED, MLX4_CMD_WRAPPED,
MLX4_CMD_NATIVE MLX4_CMD_NATIVE
......
...@@ -172,6 +172,7 @@ enum { ...@@ -172,6 +172,7 @@ enum {
MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8, MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8,
MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9, MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9,
MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10,
MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11,
}; };
enum { enum {
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <uapi/rdma/ib_user_mad.h>
/* Management base version */ /* Management base version */
#define IB_MGMT_BASE_VERSION 1 #define IB_MGMT_BASE_VERSION 1
...@@ -355,9 +356,13 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent, ...@@ -355,9 +356,13 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
* @hi_tid: Access layer assigned transaction ID for this client. * @hi_tid: Access layer assigned transaction ID for this client.
* Unsolicited MADs sent by this client will have the upper 32-bits * Unsolicited MADs sent by this client will have the upper 32-bits
* of their TID set to this value. * of their TID set to this value.
* @flags: registration flags
* @port_num: Port number on which QP is registered * @port_num: Port number on which QP is registered
* @rmpp_version: If set, indicates the RMPP version used by this agent. * @rmpp_version: If set, indicates the RMPP version used by this agent.
*/ */
enum {
IB_MAD_USER_RMPP = IB_USER_MAD_USER_RMPP,
};
struct ib_mad_agent { struct ib_mad_agent {
struct ib_device *device; struct ib_device *device;
struct ib_qp *qp; struct ib_qp *qp;
...@@ -367,6 +372,7 @@ struct ib_mad_agent { ...@@ -367,6 +372,7 @@ struct ib_mad_agent {
ib_mad_snoop_handler snoop_handler; ib_mad_snoop_handler snoop_handler;
void *context; void *context;
u32 hi_tid; u32 hi_tid;
u32 flags;
u8 port_num; u8 port_num;
u8 rmpp_version; u8 rmpp_version;
}; };
...@@ -426,6 +432,7 @@ struct ib_mad_recv_wc { ...@@ -426,6 +432,7 @@ struct ib_mad_recv_wc {
* in the range from 0x30 to 0x4f. Otherwise not used. * in the range from 0x30 to 0x4f. Otherwise not used.
* @method_mask: The caller will receive unsolicited MADs for any method * @method_mask: The caller will receive unsolicited MADs for any method
* where @method_mask = 1. * where @method_mask = 1.
*
*/ */
struct ib_mad_reg_req { struct ib_mad_reg_req {
u8 mgmt_class; u8 mgmt_class;
...@@ -451,6 +458,7 @@ struct ib_mad_reg_req { ...@@ -451,6 +458,7 @@ struct ib_mad_reg_req {
* @recv_handler: The completion callback routine invoked for a received * @recv_handler: The completion callback routine invoked for a received
* MAD. * MAD.
* @context: User specified context associated with the registration. * @context: User specified context associated with the registration.
* @registration_flags: Registration flags to set for this agent
*/ */
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
u8 port_num, u8 port_num,
...@@ -459,7 +467,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ...@@ -459,7 +467,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
u8 rmpp_version, u8 rmpp_version,
ib_mad_send_handler send_handler, ib_mad_send_handler send_handler,
ib_mad_recv_handler recv_handler, ib_mad_recv_handler recv_handler,
void *context); void *context,
u32 registration_flags);
enum ib_mad_snoop_flags { enum ib_mad_snoop_flags {
/*IB_MAD_SNOOP_POSTED_SENDS = 1,*/ /*IB_MAD_SNOOP_POSTED_SENDS = 1,*/
...@@ -661,4 +670,11 @@ void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num); ...@@ -661,4 +670,11 @@ void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num);
*/ */
void ib_free_send_mad(struct ib_mad_send_buf *send_buf); void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
/**
* ib_mad_kernel_rmpp_agent - Returns if the agent is performing RMPP.
* @agent: the agent in question
* @return: true if agent is performing rmpp, false otherwise.
*/
int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent);
#endif /* IB_MAD_H */ #endif /* IB_MAD_H */
...@@ -191,6 +191,45 @@ struct ib_user_mad_reg_req { ...@@ -191,6 +191,45 @@ struct ib_user_mad_reg_req {
__u8 rmpp_version; __u8 rmpp_version;
}; };
/**
* ib_user_mad_reg_req2 - MAD registration request
*
* @id - Set by the _kernel_; used by userspace to identify the
* registered agent in future requests.
* @qpn - Queue pair number; must be 0 or 1.
* @mgmt_class - Indicates which management class of MADs should be
* receive by the caller. This field is only required if
* the user wishes to receive unsolicited MADs, otherwise
* it should be 0.
* @mgmt_class_version - Indicates which version of MADs for the given
* management class to receive.
* @res - Ignored.
* @flags - additional registration flags; Must be in the set of
* flags defined in IB_USER_MAD_REG_FLAGS_CAP
* @method_mask - The caller wishes to receive unsolicited MADs for the
* methods whose bit(s) is(are) set.
* @oui - Indicates IEEE OUI to use when mgmt_class is a vendor
* class in the range from 0x30 to 0x4f. Otherwise not
* used.
* @rmpp_version - If set, indicates the RMPP version to use.
*/
enum {
IB_USER_MAD_USER_RMPP = (1 << 0),
};
#define IB_USER_MAD_REG_FLAGS_CAP (IB_USER_MAD_USER_RMPP)
struct ib_user_mad_reg_req2 {
__u32 id;
__u32 qpn;
__u8 mgmt_class;
__u8 mgmt_class_version;
__u16 res;
__u32 flags;
__u64 method_mask[2];
__u32 oui;
__u8 rmpp_version;
__u8 reserved[3];
};
#define IB_IOCTL_MAGIC 0x1b #define IB_IOCTL_MAGIC 0x1b
#define IB_USER_MAD_REGISTER_AGENT _IOWR(IB_IOCTL_MAGIC, 1, \ #define IB_USER_MAD_REGISTER_AGENT _IOWR(IB_IOCTL_MAGIC, 1, \
...@@ -200,4 +239,7 @@ struct ib_user_mad_reg_req { ...@@ -200,4 +239,7 @@ struct ib_user_mad_reg_req {
#define IB_USER_MAD_ENABLE_PKEY _IO(IB_IOCTL_MAGIC, 3) #define IB_USER_MAD_ENABLE_PKEY _IO(IB_IOCTL_MAGIC, 3)
#define IB_USER_MAD_REGISTER_AGENT2 _IOWR(IB_IOCTL_MAGIC, 4, \
struct ib_user_mad_reg_req2)
#endif /* IB_USER_MAD_H */ #endif /* IB_USER_MAD_H */
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#define RDMA_USER_CM_H #define RDMA_USER_CM_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/socket.h>
#include <linux/in6.h> #include <linux/in6.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
#include <rdma/ib_user_sa.h> #include <rdma/ib_user_sa.h>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部