提交 89fbb69c 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

...@@ -37,58 +37,41 @@ ...@@ -37,58 +37,41 @@
* $Id: agent.c 1389 2004-12-27 22:56:47Z roland $ * $Id: agent.c 1389 2004-12-27 22:56:47Z roland $
*/ */
#include <linux/dma-mapping.h> #include "agent.h"
#include "smi.h"
#include <asm/bug.h>
#include <rdma/ib_smi.h> #define SPFX "ib_agent: "
#include "smi.h" struct ib_agent_port_private {
#include "agent_priv.h" struct list_head port_list;
#include "mad_priv.h" struct ib_mad_agent *agent[2];
#include "agent.h" };
spinlock_t ib_agent_port_list_lock; static DEFINE_SPINLOCK(ib_agent_port_list_lock);
static LIST_HEAD(ib_agent_port_list); static LIST_HEAD(ib_agent_port_list);
/* static struct ib_agent_port_private *
* Caller must hold ib_agent_port_list_lock __ib_get_agent_port(struct ib_device *device, int port_num)
*/
static inline struct ib_agent_port_private *
__ib_get_agent_port(struct ib_device *device, int port_num,
struct ib_mad_agent *mad_agent)
{ {
struct ib_agent_port_private *entry; struct ib_agent_port_private *entry;
BUG_ON(!(!!device ^ !!mad_agent)); /* Exactly one MUST be (!NULL) */ list_for_each_entry(entry, &ib_agent_port_list, port_list) {
if (entry->agent[0]->device == device &&
if (device) { entry->agent[0]->port_num == port_num)
list_for_each_entry(entry, &ib_agent_port_list, port_list) { return entry;
if (entry->smp_agent->device == device &&
entry->port_num == port_num)
return entry;
}
} else {
list_for_each_entry(entry, &ib_agent_port_list, port_list) {
if ((entry->smp_agent == mad_agent) ||
(entry->perf_mgmt_agent == mad_agent))
return entry;
}
} }
return NULL; return NULL;
} }
static inline struct ib_agent_port_private * static struct ib_agent_port_private *
ib_get_agent_port(struct ib_device *device, int port_num, ib_get_agent_port(struct ib_device *device, int port_num)
struct ib_mad_agent *mad_agent)
{ {
struct ib_agent_port_private *entry; struct ib_agent_port_private *entry;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&ib_agent_port_list_lock, flags); spin_lock_irqsave(&ib_agent_port_list_lock, flags);
entry = __ib_get_agent_port(device, port_num, mad_agent); entry = __ib_get_agent_port(device, port_num);
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
return entry; return entry;
} }
...@@ -100,192 +83,76 @@ int smi_check_local_dr_smp(struct ib_smp *smp, ...@@ -100,192 +83,76 @@ int smi_check_local_dr_smp(struct ib_smp *smp,
if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
return 1; return 1;
port_priv = ib_get_agent_port(device, port_num, NULL);
port_priv = ib_get_agent_port(device, port_num);
if (!port_priv) { if (!port_priv) {
printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d " printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d "
"not open\n", "not open\n", device->name, port_num);
device->name, port_num);
return 1; return 1;
} }
return smi_check_local_smp(port_priv->smp_agent, smp); return smi_check_local_smp(port_priv->agent[0], smp);
} }
static int agent_mad_send(struct ib_mad_agent *mad_agent, int agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
struct ib_agent_port_private *port_priv, struct ib_wc *wc, struct ib_device *device,
struct ib_mad_private *mad_priv, int port_num, int qpn)
struct ib_grh *grh,
struct ib_wc *wc)
{ {
struct ib_agent_send_wr *agent_send_wr; struct ib_agent_port_private *port_priv;
struct ib_sge gather_list; struct ib_mad_agent *agent;
struct ib_send_wr send_wr; struct ib_mad_send_buf *send_buf;
struct ib_send_wr *bad_send_wr; struct ib_ah *ah;
struct ib_ah_attr ah_attr; int ret;
unsigned long flags;
int ret = 1;
agent_send_wr = kmalloc(sizeof(*agent_send_wr), GFP_KERNEL);
if (!agent_send_wr)
goto out;
agent_send_wr->mad = mad_priv;
gather_list.addr = dma_map_single(mad_agent->device->dma_device,
&mad_priv->mad,
sizeof(mad_priv->mad),
DMA_TO_DEVICE);
gather_list.length = sizeof(mad_priv->mad);
gather_list.lkey = mad_agent->mr->lkey;
send_wr.next = NULL;
send_wr.opcode = IB_WR_SEND;
send_wr.sg_list = &gather_list;
send_wr.num_sge = 1;
send_wr.wr.ud.remote_qpn = wc->src_qp; /* DQPN */
send_wr.wr.ud.timeout_ms = 0;
send_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
ah_attr.dlid = wc->slid; port_priv = ib_get_agent_port(device, port_num);
ah_attr.port_num = mad_agent->port_num; if (!port_priv) {
ah_attr.src_path_bits = wc->dlid_path_bits; printk(KERN_ERR SPFX "Unable to find port agent\n");
ah_attr.sl = wc->sl; return -ENODEV;
ah_attr.static_rate = 0;
ah_attr.ah_flags = 0; /* No GRH */
if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
if (wc->wc_flags & IB_WC_GRH) {
ah_attr.ah_flags = IB_AH_GRH;
/* Should sgid be looked up ? */
ah_attr.grh.sgid_index = 0;
ah_attr.grh.hop_limit = grh->hop_limit;
ah_attr.grh.flow_label = be32_to_cpu(
grh->version_tclass_flow) & 0xfffff;
ah_attr.grh.traffic_class = (be32_to_cpu(
grh->version_tclass_flow) >> 20) & 0xff;
memcpy(ah_attr.grh.dgid.raw,
grh->sgid.raw,
sizeof(ah_attr.grh.dgid));
}
} }
agent_send_wr->ah = ib_create_ah(mad_agent->qp->pd, &ah_attr); agent = port_priv->agent[qpn];
if (IS_ERR(agent_send_wr->ah)) { ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num);
printk(KERN_ERR SPFX "No memory for address handle\n"); if (IS_ERR(ah)) {
kfree(agent_send_wr); ret = PTR_ERR(ah);
goto out; printk(KERN_ERR SPFX "ib_create_ah_from_wc error:%d\n", ret);
return ret;
} }
send_wr.wr.ud.ah = agent_send_wr->ah; send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0,
if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
send_wr.wr.ud.pkey_index = wc->pkey_index; GFP_KERNEL);
send_wr.wr.ud.remote_qkey = IB_QP1_QKEY; if (IS_ERR(send_buf)) {
} else { /* for SMPs */ ret = PTR_ERR(send_buf);
send_wr.wr.ud.pkey_index = 0; printk(KERN_ERR SPFX "ib_create_send_mad error:%d\n", ret);
send_wr.wr.ud.remote_qkey = 0; goto err1;
} }
send_wr.wr.ud.mad_hdr = &mad_priv->mad.mad.mad_hdr;
send_wr.wr_id = (unsigned long)agent_send_wr;
pci_unmap_addr_set(agent_send_wr, mapping, gather_list.addr); memcpy(send_buf->mad, mad, sizeof *mad);
send_buf->ah = ah;
/* Send */ if ((ret = ib_post_send_mad(send_buf, NULL))) {
spin_lock_irqsave(&port_priv->send_list_lock, flags); printk(KERN_ERR SPFX "ib_post_send_mad error:%d\n", ret);
if (ib_post_send_mad(mad_agent, &send_wr, &bad_send_wr)) { goto err2;
spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
dma_unmap_single(mad_agent->device->dma_device,
pci_unmap_addr(agent_send_wr, mapping),
sizeof(mad_priv->mad),
DMA_TO_DEVICE);
ib_destroy_ah(agent_send_wr->ah);
kfree(agent_send_wr);
} else {
list_add_tail(&agent_send_wr->send_list,
&port_priv->send_posted_list);
spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
ret = 0;
} }
return 0;
out: err2:
ib_free_send_mad(send_buf);
err1:
ib_destroy_ah(ah);
return ret; return ret;
} }
int agent_send(struct ib_mad_private *mad,
struct ib_grh *grh,
struct ib_wc *wc,
struct ib_device *device,
int port_num)
{
struct ib_agent_port_private *port_priv;
struct ib_mad_agent *mad_agent;
port_priv = ib_get_agent_port(device, port_num, NULL);
if (!port_priv) {
printk(KERN_DEBUG SPFX "agent_send %s port %d not open\n",
device->name, port_num);
return 1;
}
/* Get mad agent based on mgmt_class in MAD */
switch (mad->mad.mad.mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
case IB_MGMT_CLASS_SUBN_LID_ROUTED:
mad_agent = port_priv->smp_agent;
break;
case IB_MGMT_CLASS_PERF_MGMT:
mad_agent = port_priv->perf_mgmt_agent;
break;
default:
return 1;
}
return agent_mad_send(mad_agent, port_priv, mad, grh, wc);
}
static void agent_send_handler(struct ib_mad_agent *mad_agent, static void agent_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_send_wc) struct ib_mad_send_wc *mad_send_wc)
{ {
struct ib_agent_port_private *port_priv; ib_destroy_ah(mad_send_wc->send_buf->ah);
struct ib_agent_send_wr *agent_send_wr; ib_free_send_mad(mad_send_wc->send_buf);
unsigned long flags;
/* Find matching MAD agent */
port_priv = ib_get_agent_port(NULL, 0, mad_agent);
if (!port_priv) {
printk(KERN_ERR SPFX "agent_send_handler: no matching MAD "
"agent %p\n", mad_agent);
return;
}
agent_send_wr = (struct ib_agent_send_wr *)(unsigned long)mad_send_wc->wr_id;
spin_lock_irqsave(&port_priv->send_list_lock, flags);
/* Remove completed send from posted send MAD list */
list_del(&agent_send_wr->send_list);
spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
dma_unmap_single(mad_agent->device->dma_device,
pci_unmap_addr(agent_send_wr, mapping),
sizeof(agent_send_wr->mad->mad),
DMA_TO_DEVICE);
ib_destroy_ah(agent_send_wr->ah);
/* Release allocated memory */
kmem_cache_free(ib_mad_cache, agent_send_wr->mad);
kfree(agent_send_wr);
} }
int ib_agent_port_open(struct ib_device *device, int port_num) int ib_agent_port_open(struct ib_device *device, int port_num)
{ {
int ret;
struct ib_agent_port_private *port_priv; struct ib_agent_port_private *port_priv;
unsigned long flags; unsigned long flags;
int ret;
/* First, check if port already open for SMI */
port_priv = ib_get_agent_port(device, port_num, NULL);
if (port_priv) {
printk(KERN_DEBUG SPFX "%s port %d already open\n",
device->name, port_num);
return 0;
}
/* Create new device info */ /* Create new device info */
port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
...@@ -294,32 +161,25 @@ int ib_agent_port_open(struct ib_device *device, int port_num) ...@@ -294,32 +161,25 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
ret = -ENOMEM; ret = -ENOMEM;
goto error1; goto error1;
} }
memset(port_priv, 0, sizeof *port_priv); memset(port_priv, 0, sizeof *port_priv);
port_priv->port_num = port_num;
spin_lock_init(&port_priv->send_list_lock);
INIT_LIST_HEAD(&port_priv->send_posted_list);
/* Obtain send only MAD agent for SM class (SMI QP) */ /* Obtain send only MAD agent for SMI QP */
port_priv->smp_agent = ib_register_mad_agent(device, port_num, port_priv->agent[0] = ib_register_mad_agent(device, port_num,
IB_QPT_SMI, IB_QPT_SMI, NULL, 0,
NULL, 0,
&agent_send_handler, &agent_send_handler,
NULL, NULL); NULL, NULL);
if (IS_ERR(port_priv->agent[0])) {
if (IS_ERR(port_priv->smp_agent)) { ret = PTR_ERR(port_priv->agent[0]);
ret = PTR_ERR(port_priv->smp_agent);
goto error2; goto error2;
} }
/* Obtain send only MAD agent for PerfMgmt class (GSI QP) */ /* Obtain send only MAD agent for GSI QP */
port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num, port_priv->agent[1] = ib_register_mad_agent(device, port_num,
IB_QPT_GSI, IB_QPT_GSI, NULL, 0,
NULL, 0, &agent_send_handler,
&agent_send_handler, NULL, NULL);
NULL, NULL); if (IS_ERR(port_priv->agent[1])) {
if (IS_ERR(port_priv->perf_mgmt_agent)) { ret = PTR_ERR(port_priv->agent[1]);
ret = PTR_ERR(port_priv->perf_mgmt_agent);
goto error3; goto error3;
} }
...@@ -330,7 +190,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) ...@@ -330,7 +190,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
return 0; return 0;
error3: error3:
ib_unregister_mad_agent(port_priv->smp_agent); ib_unregister_mad_agent(port_priv->agent[0]);
error2: error2:
kfree(port_priv); kfree(port_priv);
error1: error1:
...@@ -343,7 +203,7 @@ int ib_agent_port_close(struct ib_device *device, int port_num) ...@@ -343,7 +203,7 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&ib_agent_port_list_lock, flags); spin_lock_irqsave(&ib_agent_port_list_lock, flags);
port_priv = __ib_get_agent_port(device, port_num, NULL); port_priv = __ib_get_agent_port(device, port_num);
if (port_priv == NULL) { if (port_priv == NULL) {
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
printk(KERN_ERR SPFX "Port %d not found\n", port_num); printk(KERN_ERR SPFX "Port %d not found\n", port_num);
...@@ -352,9 +212,8 @@ int ib_agent_port_close(struct ib_device *device, int port_num) ...@@ -352,9 +212,8 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
list_del(&port_priv->port_list); list_del(&port_priv->port_list);
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
ib_unregister_mad_agent(port_priv->perf_mgmt_agent); ib_unregister_mad_agent(port_priv->agent[1]);
ib_unregister_mad_agent(port_priv->smp_agent); ib_unregister_mad_agent(port_priv->agent[0]);
kfree(port_priv); kfree(port_priv);
return 0; return 0;
} }
...@@ -39,17 +39,14 @@ ...@@ -39,17 +39,14 @@
#ifndef __AGENT_H_ #ifndef __AGENT_H_
#define __AGENT_H_ #define __AGENT_H_
extern spinlock_t ib_agent_port_list_lock; #include <rdma/ib_mad.h>
extern int ib_agent_port_open(struct ib_device *device, extern int ib_agent_port_open(struct ib_device *device, int port_num);
int port_num);
extern int ib_agent_port_close(struct ib_device *device, int port_num); extern int ib_agent_port_close(struct ib_device *device, int port_num);
extern int agent_send(struct ib_mad_private *mad, extern int agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
struct ib_grh *grh, struct ib_wc *wc, struct ib_device *device,
struct ib_wc *wc, int port_num, int qpn);
struct ib_device *device,
int port_num);
#endif /* __AGENT_H_ */ #endif /* __AGENT_H_ */
/*
* Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* $Id: agent_priv.h 1640 2005-01-24 22:39:02Z halr $
*/
#ifndef __IB_AGENT_PRIV_H__
#define __IB_AGENT_PRIV_H__
#include <linux/pci.h>
#define SPFX "ib_agent: "
struct ib_agent_send_wr {
struct list_head send_list;
struct ib_ah *ah;
struct ib_mad_private *mad;
DECLARE_PCI_UNMAP_ADDR(mapping)
};
struct ib_agent_port_private {
struct list_head port_list;
struct list_head send_posted_list;
spinlock_t send_list_lock;
int port_num;
struct ib_mad_agent *smp_agent; /* SM class */
struct ib_mad_agent *perf_mgmt_agent; /* PerfMgmt class */
};
#endif /* __IB_AGENT_PRIV_H__ */
此差异已折叠。
...@@ -186,6 +186,7 @@ static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg, ...@@ -186,6 +186,7 @@ static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
req_msg->offset40 = cpu_to_be32((be32_to_cpu( req_msg->offset40 = cpu_to_be32((be32_to_cpu(
req_msg->offset40) & req_msg->offset40) &
0xFFFFFFF9) | 0x2); 0xFFFFFFF9) | 0x2);
break;
default: default:
req_msg->offset40 = cpu_to_be32(be32_to_cpu( req_msg->offset40 = cpu_to_be32(be32_to_cpu(
req_msg->offset40) & req_msg->offset40) &
......
...@@ -514,6 +514,12 @@ int ib_query_port(struct ib_device *device, ...@@ -514,6 +514,12 @@ int ib_query_port(struct ib_device *device,
u8 port_num, u8 port_num,
struct ib_port_attr *port_attr) struct ib_port_attr *port_attr)
{ {
if (device->node_type == IB_NODE_SWITCH) {
if (port_num)
return -EINVAL;
} else if (port_num < 1 || port_num > device->phys_port_cnt)
return -EINVAL;
return device->query_port(device, port_num, port_attr); return device->query_port(device, port_num, port_attr);
} }
EXPORT_SYMBOL(ib_query_port); EXPORT_SYMBOL(ib_query_port);
...@@ -583,6 +589,12 @@ int ib_modify_port(struct ib_device *device, ...@@ -583,6 +589,12 @@ int ib_modify_port(struct ib_device *device,
u8 port_num, int port_modify_mask, u8 port_num, int port_modify_mask,
struct ib_port_modify *port_modify) struct ib_port_modify *port_modify)
{ {
if (device->node_type == IB_NODE_SWITCH) {
if (port_num)
return -EINVAL;
} else if (port_num < 1 || port_num > device->phys_port_cnt)
return -EINVAL;
return device->modify_port(device, port_num, port_modify_mask, return device->modify_port(device, port_num, port_modify_mask,
port_modify); port_modify);
} }
......
此差异已折叠。
...@@ -118,9 +118,10 @@ struct ib_mad_send_wr_private { ...@@ -118,9 +118,10 @@ struct ib_mad_send_wr_private {
struct ib_mad_list_head mad_list; struct ib_mad_list_head mad_list;
struct list_head agent_list; struct list_head agent_list;
struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_buf send_buf;
DECLARE_PCI_UNMAP_ADDR(mapping)
struct ib_send_wr send_wr; struct ib_send_wr send_wr;
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
u64 wr_id; /* client WR ID */
__be64 tid; __be64 tid;
unsigned long timeout; unsigned long timeout;
int retries; int retries;
...@@ -141,10 +142,7 @@ struct ib_mad_local_private { ...@@ -141,10 +142,7 @@ struct ib_mad_local_private {
struct list_head completion_list; struct list_head completion_list;
struct ib_mad_private *mad_priv; struct ib_mad_private *mad_priv;
struct ib_mad_agent_private *recv_mad_agent; struct ib_mad_agent_private *recv_mad_agent;
struct ib_send_wr send_wr; struct ib_mad_send_wr_private *mad_send_wr;
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
u64 wr_id; /* client WR ID */
__be64 tid;
}; };
struct ib_mad_mgmt_method_table { struct ib_mad_mgmt_method_table {
......
...@@ -103,12 +103,12 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) ...@@ -103,12 +103,12 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
static int data_offset(u8 mgmt_class) static int data_offset(u8 mgmt_class)
{ {
if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
return offsetof(struct ib_sa_mad, data); return IB_MGMT_SA_HDR;
else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
(mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
return offsetof(struct ib_vendor_mad, data); return IB_MGMT_VENDOR_HDR;
else else
return offsetof(struct ib_rmpp_mad, data); return IB_MGMT_RMPP_HDR;
} }
static void format_ack(struct ib_rmpp_mad *ack, static void format_ack(struct ib_rmpp_mad *ack,
...@@ -135,55 +135,52 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv, ...@@ -135,55 +135,52 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
struct ib_mad_recv_wc *recv_wc) struct ib_mad_recv_wc *recv_wc)
{ {
struct ib_mad_send_buf *msg; struct ib_mad_send_buf *msg;
struct ib_send_wr *bad_send_wr; int ret;
int hdr_len, ret;
hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
recv_wc->wc->pkey_index, rmpp_recv->ah, 1, recv_wc->wc->pkey_index, 1, IB_MGMT_RMPP_HDR,
hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len, IB_MGMT_RMPP_DATA, GFP_KERNEL);
GFP_KERNEL);
if (!msg) if (!msg)
return; return;
format_ack((struct ib_rmpp_mad *) msg->mad, format_ack(msg->mad, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad,
(struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); rmpp_recv);
ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr, msg->ah = rmpp_recv->ah;
&bad_send_wr); ret = ib_post_send_mad(msg, NULL);
if (ret) if (ret)
ib_free_send_mad(msg); ib_free_send_mad(msg);
} }
static int alloc_response_msg(struct ib_mad_agent *agent, static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
struct ib_mad_recv_wc *recv_wc, struct ib_mad_recv_wc *recv_wc)
struct ib_mad_send_buf **msg)
{ {
struct ib_mad_send_buf *m; struct ib_mad_send_buf *msg;
struct ib_ah *ah; struct ib_ah *ah;
int hdr_len;
ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
recv_wc->recv_buf.grh, agent->port_num); recv_wc->recv_buf.grh, agent->port_num);
if (IS_ERR(ah)) if (IS_ERR(ah))
return PTR_ERR(ah); return (void *) ah;
hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
m = ib_create_send_mad(agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1,
recv_wc->wc->pkey_index, ah, 1, hdr_len, IB_MGMT_RMPP_HDR, IB_MGMT_RMPP_DATA,
sizeof(struct ib_rmpp_mad) - hdr_len, GFP_KERNEL);
GFP_KERNEL); if (IS_ERR(msg))
if (IS_ERR(m)) {
ib_destroy_ah(ah); ib_destroy_ah(ah);
return PTR_ERR(m); else
} msg->ah = ah;
*msg = m;
return 0; return msg;
} }
static void free_msg(struct ib_mad_send_buf *msg) void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
{ {
ib_destroy_ah(msg->send_wr.wr.ud.ah); struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad;
ib_free_send_mad(msg);
if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_ACK)
ib_destroy_ah(mad_send_wc->send_buf->ah);
ib_free_send_mad(mad_send_wc->send_buf);
} }
static void nack_recv(struct ib_mad_agent_private *agent, static void nack_recv(struct ib_mad_agent_private *agent,
...@@ -191,14 +188,13 @@ static void nack_recv(struct ib_mad_agent_private *agent, ...@@ -191,14 +188,13 @@ static void nack_recv(struct ib_mad_agent_private *agent,
{ {
struct ib_mad_send_buf *msg; struct ib_mad_send_buf *msg;
struct ib_rmpp_mad *rmpp_mad; struct ib_rmpp_mad *rmpp_mad;
struct ib_send_wr *bad_send_wr;
int ret; int ret;
ret = alloc_response_msg(&agent->agent, recv_wc, &msg); msg = alloc_response_msg(&agent->agent, recv_wc);
if (ret) if (IS_ERR(msg))
return; return;
rmpp_mad = (struct ib_rmpp_mad *) msg->mad; rmpp_mad = msg->mad;
memcpy(rmpp_mad, recv_wc->recv_buf.mad, memcpy(rmpp_mad, recv_wc->recv_buf.mad,
data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class)); data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class));
...@@ -210,9 +206,11 @@ static void nack_recv(struct ib_mad_agent_private *agent, ...@@ -210,9 +206,11 @@ static void nack_recv(struct ib_mad_agent_private *agent,
rmpp_mad->rmpp_hdr.seg_num = 0; rmpp_mad->rmpp_hdr.seg_num = 0;
rmpp_mad->rmpp_hdr.paylen_newwin = 0; rmpp_mad->rmpp_hdr.paylen_newwin = 0;
ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr); ret = ib_post_send_mad(msg, NULL);
if (ret) if (ret) {
free_msg(msg); ib_destroy_ah(msg->ah);
ib_free_send_mad(msg);
}
} }
static void recv_timeout_handler(void *data) static void recv_timeout_handler(void *data)
...@@ -585,7 +583,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -585,7 +583,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
int timeout; int timeout;
u32 paylen; u32 paylen;
rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; rmpp_mad = mad_send_wr->send_buf.mad;
ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num); rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num);
...@@ -612,7 +610,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -612,7 +610,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
} }
/* 2 seconds for an ACK until we can find the packet lifetime */ /* 2 seconds for an ACK until we can find the packet lifetime */
timeout = mad_send_wr->send_wr.wr.ud.timeout_ms; timeout = mad_send_wr->send_buf.timeout_ms;
if (!timeout || timeout > 2000) if (!timeout || timeout > 2000)
mad_send_wr->timeout = msecs_to_jiffies(2000); mad_send_wr->timeout = msecs_to_jiffies(2000);
mad_send_wr->seg_num++; mad_send_wr->seg_num++;
...@@ -640,7 +638,7 @@ static void abort_send(struct ib_mad_agent_private *agent, __be64 tid, ...@@ -640,7 +638,7 @@ static void abort_send(struct ib_mad_agent_private *agent, __be64 tid,
wc.status = IB_WC_REM_ABORT_ERR; wc.status = IB_WC_REM_ABORT_ERR;
wc.vendor_err = rmpp_status; wc.vendor_err = rmpp_status;
wc.wr_id = mad_send_wr->wr_id; wc.send_buf = &mad_send_wr->send_buf;
ib_mad_complete_send_wr(mad_send_wr, &wc); ib_mad_complete_send_wr(mad_send_wr, &wc);
return; return;
out: out:
...@@ -694,12 +692,12 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, ...@@ -694,12 +692,12 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
if (seg_num > mad_send_wr->last_ack) { if (seg_num > mad_send_wr->last_ack) {
mad_send_wr->last_ack = seg_num; mad_send_wr->last_ack = seg_num;
mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries; mad_send_wr->retries = mad_send_wr->send_buf.retries;
} }
mad_send_wr->newwin = newwin; mad_send_wr->newwin = newwin;
if (mad_send_wr->last_ack == mad_send_wr->total_seg) { if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
/* If no response is expected, the ACK completes the send */ /* If no response is expected, the ACK completes the send */
if (!mad_send_wr->send_wr.wr.ud.timeout_ms) { if (!mad_send_wr->send_buf.timeout_ms) {
struct ib_mad_send_wc wc; struct ib_mad_send_wc wc;
ib_mark_mad_done(mad_send_wr); ib_mark_mad_done(mad_send_wr);
...@@ -707,13 +705,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, ...@@ -707,13 +705,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
wc.status = IB_WC_SUCCESS; wc.status = IB_WC_SUCCESS;
wc.vendor_err = 0; wc.vendor_err = 0;
wc.wr_id = mad_send_wr->wr_id; wc.send_buf = &mad_send_wr->send_buf;
ib_mad_complete_send_wr(mad_send_wr, &wc); ib_mad_complete_send_wr(mad_send_wr, &wc);
return; return;
} }
if (mad_send_wr->refcount == 1) if (mad_send_wr->refcount == 1)
ib_reset_mad_timeout(mad_send_wr, mad_send_wr-> ib_reset_mad_timeout(mad_send_wr,
send_wr.wr.ud.timeout_ms); mad_send_wr->send_buf.timeout_ms);
} else if (mad_send_wr->refcount == 1 && } else if (mad_send_wr->refcount == 1 &&
mad_send_wr->seg_num < mad_send_wr->newwin && mad_send_wr->seg_num < mad_send_wr->newwin &&
mad_send_wr->seg_num <= mad_send_wr->total_seg) { mad_send_wr->seg_num <= mad_send_wr->total_seg) {
...@@ -842,7 +840,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -842,7 +840,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
struct ib_rmpp_mad *rmpp_mad; struct ib_rmpp_mad *rmpp_mad;
int i, total_len, ret; int i, total_len, ret;
rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; rmpp_mad = mad_send_wr->send_buf.mad;
if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE)) IB_MGMT_RMPP_FLAG_ACTIVE))
return IB_RMPP_RESULT_UNHANDLED; return IB_RMPP_RESULT_UNHANDLED;
...@@ -863,7 +861,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -863,7 +861,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) / mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) /
(sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset); (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset);
mad_send_wr->pad = total_len - offsetof(struct ib_rmpp_mad, data) - mad_send_wr->pad = total_len - IB_MGMT_RMPP_HDR -
be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
/* We need to wait for the final ACK even if there isn't a response */ /* We need to wait for the final ACK even if there isn't a response */
...@@ -878,23 +876,15 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, ...@@ -878,23 +876,15 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
struct ib_mad_send_wc *mad_send_wc) struct ib_mad_send_wc *mad_send_wc)
{ {
struct ib_rmpp_mad *rmpp_mad; struct ib_rmpp_mad *rmpp_mad;
struct ib_mad_send_buf *msg;
int ret; int ret;
rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; rmpp_mad = mad_send_wr->send_buf.mad;
if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE)) IB_MGMT_RMPP_FLAG_ACTIVE))
return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
msg = (struct ib_mad_send_buf *) (unsigned long)
mad_send_wc->wr_id;
if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK)
ib_free_send_mad(msg);
else
free_msg(msg);
return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
}
if (mad_send_wc->status != IB_WC_SUCCESS || if (mad_send_wc->status != IB_WC_SUCCESS ||
mad_send_wr->status != IB_WC_SUCCESS) mad_send_wr->status != IB_WC_SUCCESS)
...@@ -905,7 +895,7 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, ...@@ -905,7 +895,7 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
if (mad_send_wr->last_ack == mad_send_wr->total_seg) { if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
mad_send_wr->timeout = mad_send_wr->timeout =
msecs_to_jiffies(mad_send_wr->send_wr.wr.ud.timeout_ms); msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
return IB_RMPP_RESULT_PROCESSED; /* Send done */ return IB_RMPP_RESULT_PROCESSED; /* Send done */
} }
...@@ -926,7 +916,7 @@ int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -926,7 +916,7 @@ int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
struct ib_rmpp_mad *rmpp_mad; struct ib_rmpp_mad *rmpp_mad;
int ret; int ret;
rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; rmpp_mad = mad_send_wr->send_buf.mad;
if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE)) IB_MGMT_RMPP_FLAG_ACTIVE))
return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
......
...@@ -51,6 +51,8 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, ...@@ -51,6 +51,8 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
struct ib_mad_send_wc *mad_send_wc); struct ib_mad_send_wc *mad_send_wc);
void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc);
void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent); void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent);
int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr); int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr);
......
...@@ -73,11 +73,10 @@ struct ib_sa_device { ...@@ -73,11 +73,10 @@ struct ib_sa_device {
struct ib_sa_query { struct ib_sa_query {
void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
void (*release)(struct ib_sa_query *); void (*release)(struct ib_sa_query *);
struct ib_sa_port *port; struct ib_sa_port *port;
struct ib_sa_mad *mad; struct ib_mad_send_buf *mad_buf;
struct ib_sa_sm_ah *sm_ah; struct ib_sa_sm_ah *sm_ah;
DECLARE_PCI_UNMAP_ADDR(mapping) int id;
int id;
}; };
struct ib_sa_service_query { struct ib_sa_service_query {
...@@ -426,6 +425,7 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query) ...@@ -426,6 +425,7 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
{ {
unsigned long flags; unsigned long flags;
struct ib_mad_agent *agent; struct ib_mad_agent *agent;
struct ib_mad_send_buf *mad_buf;
spin_lock_irqsave(&idr_lock, flags); spin_lock_irqsave(&idr_lock, flags);
if (idr_find(&query_idr, id) != query) { if (idr_find(&query_idr, id) != query) {
...@@ -433,9 +433,10 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query) ...@@ -433,9 +433,10 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
return; return;
} }
agent = query->port->agent; agent = query->port->agent;
mad_buf = query->mad_buf;
spin_unlock_irqrestore(&idr_lock, flags); spin_unlock_irqrestore(&idr_lock, flags);
ib_cancel_mad(agent, id); ib_cancel_mad(agent, mad_buf);
} }
EXPORT_SYMBOL(ib_sa_cancel_query); EXPORT_SYMBOL(ib_sa_cancel_query);
...@@ -457,71 +458,46 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent) ...@@ -457,71 +458,46 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
static int send_mad(struct ib_sa_query *query, int timeout_ms) static int send_mad(struct ib_sa_query *query, int timeout_ms)
{ {
struct ib_sa_port *port = query->port;
unsigned long flags; unsigned long flags;
int ret; int ret, id;
struct ib_sge gather_list;
struct ib_send_wr *bad_wr, wr = {
.opcode = IB_WR_SEND,
.sg_list = &gather_list,
.num_sge = 1,
.send_flags = IB_SEND_SIGNALED,
.wr = {
.ud = {
.mad_hdr = &query->mad->mad_hdr,
.remote_qpn = 1,
.remote_qkey = IB_QP1_QKEY,
.timeout_ms = timeout_ms,
}
}
};
retry: retry:
if (!idr_pre_get(&query_idr, GFP_ATOMIC)) if (!idr_pre_get(&query_idr, GFP_ATOMIC))
return -ENOMEM; return -ENOMEM;
spin_lock_irqsave(&idr_lock, flags); spin_lock_irqsave(&idr_lock, flags);
ret = idr_get_new(&query_idr, query, &query->id); ret = idr_get_new(&query_idr, query, &id);
spin_unlock_irqrestore(&idr_lock, flags); spin_unlock_irqrestore(&idr_lock, flags);
if (ret == -EAGAIN) if (ret == -EAGAIN)
goto retry; goto retry;
if (ret) if (ret)
return ret; return ret;
wr.wr_id = query->id; query->mad_buf->timeout_ms = timeout_ms;
query->mad_buf->context[0] = query;
query->id = id;
spin_lock_irqsave(&port->ah_lock, flags); spin_lock_irqsave(&query->port->ah_lock, flags);
kref_get(&port->sm_ah->ref); kref_get(&query->port->sm_ah->ref);
query->sm_ah = port->sm_ah; query->sm_ah = query->port->sm_ah;
wr.wr.ud.ah = port->sm_ah->ah; spin_unlock_irqrestore(&query->port->ah_lock, flags);
spin_unlock_irqrestore(&port->ah_lock, flags);
gather_list.addr = dma_map_single(port->agent->device->dma_device, query->mad_buf->ah = query->sm_ah->ah;
query->mad,
sizeof (struct ib_sa_mad),
DMA_TO_DEVICE);
gather_list.length = sizeof (struct ib_sa_mad);
gather_list.lkey = port->agent->mr->lkey;
pci_unmap_addr_set(query, mapping, gather_list.addr);
ret = ib_post_send_mad(port->agent, &wr, &bad_wr); ret = ib_post_send_mad(query->mad_buf, NULL);
if (ret) { if (ret) {
dma_unmap_single(port->agent->device->dma_device,
pci_unmap_addr(query, mapping),
sizeof (struct ib_sa_mad),
DMA_TO_DEVICE);
kref_put(&query->sm_ah->ref, free_sm_ah);
spin_lock_irqsave(&idr_lock, flags); spin_lock_irqsave(&idr_lock, flags);
idr_remove(&query_idr, query->id); idr_remove(&query_idr, id);
spin_unlock_irqrestore(&idr_lock, flags); spin_unlock_irqrestore(&idr_lock, flags);
kref_put(&query->sm_ah->ref, free_sm_ah);
} }
/* /*
* It's not safe to dereference query any more, because the * It's not safe to dereference query any more, because the
* send may already have completed and freed the query in * send may already have completed and freed the query in
* another context. So use wr.wr_id, which has a copy of the * another context.
* query's id.
*/ */
return ret ? ret : wr.wr_id; return ret ? ret : id;
} }
static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
...@@ -543,7 +519,6 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, ...@@ -543,7 +519,6 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
{ {
kfree(sa_query->mad);
kfree(container_of(sa_query, struct ib_sa_path_query, sa_query)); kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
} }
...@@ -583,43 +558,58 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, ...@@ -583,43 +558,58 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
{ {
struct ib_sa_path_query *query; struct ib_sa_path_query *query;
struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
struct ib_sa_port *port = &sa_dev->port[port_num - sa_dev->start_port]; struct ib_sa_port *port;
struct ib_mad_agent *agent = port->agent; struct ib_mad_agent *agent;
struct ib_sa_mad *mad;
int ret; int ret;
if (!sa_dev)
return -ENODEV;
port = &sa_dev->port[port_num - sa_dev->start_port];
agent = port->agent;
query = kmalloc(sizeof *query, gfp_mask); query = kmalloc(sizeof *query, gfp_mask);
if (!query) if (!query)
return -ENOMEM; return -ENOMEM;
query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask);
if (!query->sa_query.mad) { query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
kfree(query); 0, IB_MGMT_SA_HDR,
return -ENOMEM; IB_MGMT_SA_DATA, gfp_mask);
if (!query->sa_query.mad_buf) {
ret = -ENOMEM;
goto err1;
} }
query->callback = callback; query->callback = callback;
query->context = context; query->context = context;
init_mad(query->sa_query.mad, agent); mad = query->sa_query.mad_buf->mad;
init_mad(mad, agent);
query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
query->sa_query.release = ib_sa_path_rec_release; query->sa_query.release = ib_sa_path_rec_release;
query->sa_query.port = port; query->sa_query.port = port;
query->sa_query.mad->mad_hdr.method = IB_MGMT_METHOD_GET; mad->mad_hdr.method = IB_MGMT_METHOD_GET;
query->sa_query.mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
query->sa_query.mad->sa_hdr.comp_mask = comp_mask; mad->sa_hdr.comp_mask = comp_mask;
ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
rec, query->sa_query.mad->data);
*sa_query = &query->sa_query; *sa_query = &query->sa_query;
ret = send_mad(&query->sa_query, timeout_ms); ret = send_mad(&query->sa_query, timeout_ms);
if (ret < 0) { if (ret < 0)
*sa_query = NULL; goto err2;
kfree(query->sa_query.mad);
kfree(query); return ret;
}
err2:
*sa_query = NULL;
ib_free_send_mad(query->sa_query.mad_buf);
err1:
kfree(query);
return ret; return ret;
} }
EXPORT_SYMBOL(ib_sa_path_rec_get); EXPORT_SYMBOL(ib_sa_path_rec_get);
...@@ -643,7 +633,6 @@ static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, ...@@ -643,7 +633,6 @@ static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
{ {
kfree(sa_query->mad);
kfree(container_of(sa_query, struct ib_sa_service_query, sa_query)); kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
} }
...@@ -685,10 +674,17 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, ...@@ -685,10 +674,17 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
{ {
struct ib_sa_service_query *query; struct ib_sa_service_query *query;
struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
struct ib_sa_port *port = &sa_dev->port[port_num - sa_dev->start_port]; struct ib_sa_port *port;
struct ib_mad_agent *agent = port->agent; struct ib_mad_agent *agent;
struct ib_sa_mad *mad;
int ret; int ret;
if (!sa_dev)
return -ENODEV;
port = &sa_dev->port[port_num - sa_dev->start_port];
agent = port->agent;
if (method != IB_MGMT_METHOD_GET && if (method != IB_MGMT_METHOD_GET &&
method != IB_MGMT_METHOD_SET && method != IB_MGMT_METHOD_SET &&
method != IB_SA_METHOD_DELETE) method != IB_SA_METHOD_DELETE)
...@@ -697,37 +693,45 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, ...@@ -697,37 +693,45 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
query = kmalloc(sizeof *query, gfp_mask); query = kmalloc(sizeof *query, gfp_mask);
if (!query) if (!query)
return -ENOMEM; return -ENOMEM;
query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask);
if (!query->sa_query.mad) { query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
kfree(query); 0, IB_MGMT_SA_HDR,
return -ENOMEM; IB_MGMT_SA_DATA, gfp_mask);
if (!query->sa_query.mad_buf) {
ret = -ENOMEM;
goto err1;
} }
query->callback = callback; query->callback = callback;
query->context = context; query->context = context;
init_mad(query->sa_query.mad, agent); mad = query->sa_query.mad_buf->mad;
init_mad(mad, agent);
query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
query->sa_query.release = ib_sa_service_rec_release; query->sa_query.release = ib_sa_service_rec_release;
query->sa_query.port = port; query->sa_query.port = port;
query->sa_query.mad->mad_hdr.method = method; mad->mad_hdr.method = method;
query->sa_query.mad->mad_hdr.attr_id = mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
cpu_to_be16(IB_SA_ATTR_SERVICE_REC); mad->sa_hdr.comp_mask = comp_mask;
query->sa_query.mad->sa_hdr.comp_mask = comp_mask;
ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
rec, query->sa_query.mad->data); rec, mad->data);
*sa_query = &query->sa_query; *sa_query = &query->sa_query;
ret = send_mad(&query->sa_query, timeout_ms); ret = send_mad(&query->sa_query, timeout_ms);
if (ret < 0) { if (ret < 0)
*sa_query = NULL; goto err2;
kfree(query->sa_query.mad);
kfree(query); return ret;
}
err2:
*sa_query = NULL;
ib_free_send_mad(query->sa_query.mad_buf);
err1:
kfree(query);
return ret; return ret;
} }
EXPORT_SYMBOL(ib_sa_service_rec_query); EXPORT_SYMBOL(ib_sa_service_rec_query);
...@@ -751,7 +755,6 @@ static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, ...@@ -751,7 +755,6 @@ static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
{ {
kfree(sa_query->mad);
kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
} }
...@@ -768,60 +771,69 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, ...@@ -768,60 +771,69 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
{ {
struct ib_sa_mcmember_query *query; struct ib_sa_mcmember_query *query;
struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
struct ib_sa_port *port = &sa_dev->port[port_num - sa_dev->start_port]; struct ib_sa_port *port;
struct ib_mad_agent *agent = port->agent; struct ib_mad_agent *agent;
struct ib_sa_mad *mad;
int ret; int ret;
if (!sa_dev)
return -ENODEV;
port = &sa_dev->port[port_num - sa_dev->start_port];
agent = port->agent;
query = kmalloc(sizeof *query, gfp_mask); query = kmalloc(sizeof *query, gfp_mask);
if (!query) if (!query)
return -ENOMEM; return -ENOMEM;
query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask);
if (!query->sa_query.mad) { query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
kfree(query); 0, IB_MGMT_SA_HDR,
return -ENOMEM; IB_MGMT_SA_DATA, gfp_mask);
if (!query->sa_query.mad_buf) {
ret = -ENOMEM;
goto err1;
} }
query->callback = callback; query->callback = callback;
query->context = context; query->context = context;
init_mad(query->sa_query.mad, agent); mad = query->sa_query.mad_buf->mad;
init_mad(mad, agent);
query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
query->sa_query.release = ib_sa_mcmember_rec_release; query->sa_query.release = ib_sa_mcmember_rec_release;
query->sa_query.port = port; query->sa_query.port = port;
query->sa_query.mad->mad_hdr.method = method; mad->mad_hdr.method = method;
query->sa_query.mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
query->sa_query.mad->sa_hdr.comp_mask = comp_mask; mad->sa_hdr.comp_mask = comp_mask;
ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
rec, query->sa_query.mad->data); rec, mad->data);
*sa_query = &query->sa_query; *sa_query = &query->sa_query;
ret = send_mad(&query->sa_query, timeout_ms); ret = send_mad(&query->sa_query, timeout_ms);
if (ret < 0) { if (ret < 0)
*sa_query = NULL; goto err2;
kfree(query->sa_query.mad);
kfree(query);
}
return ret; return ret;
err2:
*sa_query = NULL;
ib_free_send_mad(query->sa_query.mad_buf);
err1:
kfree(query);
return ret;
} }
EXPORT_SYMBOL(ib_sa_mcmember_rec_query); EXPORT_SYMBOL(ib_sa_mcmember_rec_query);
static void send_handler(struct ib_mad_agent *agent, static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *mad_send_wc) struct ib_mad_send_wc *mad_send_wc)
{ {
struct ib_sa_query *query; struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&idr_lock, flags);
query = idr_find(&query_idr, mad_send_wc->wr_id);
spin_unlock_irqrestore(&idr_lock, flags);
if (!query)
return;
if (query->callback) if (query->callback)
switch (mad_send_wc->status) { switch (mad_send_wc->status) {
case IB_WC_SUCCESS: case IB_WC_SUCCESS:
...@@ -838,30 +850,25 @@ static void send_handler(struct ib_mad_agent *agent, ...@@ -838,30 +850,25 @@ static void send_handler(struct ib_mad_agent *agent,
break; break;
} }
dma_unmap_single(agent->device->dma_device,
pci_unmap_addr(query, mapping),
sizeof (struct ib_sa_mad),
DMA_TO_DEVICE);
kref_put(&query->sm_ah->ref, free_sm_ah);
query->release(query);
spin_lock_irqsave(&idr_lock, flags); spin_lock_irqsave(&idr_lock, flags);
idr_remove(&query_idr, mad_send_wc->wr_id); idr_remove(&query_idr, query->id);
spin_unlock_irqrestore(&idr_lock, flags); spin_unlock_irqrestore(&idr_lock, flags);
ib_free_send_mad(mad_send_wc->send_buf);
kref_put(&query->sm_ah->ref, free_sm_ah);
query->release(query);
} }
static void recv_handler(struct ib_mad_agent *mad_agent, static void recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_recv_wc *mad_recv_wc) struct ib_mad_recv_wc *mad_recv_wc)
{ {
struct ib_sa_query *query; struct ib_sa_query *query;
unsigned long flags; struct ib_mad_send_buf *mad_buf;
spin_lock_irqsave(&idr_lock, flags); mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id;
query = idr_find(&query_idr, mad_recv_wc->wc->wr_id); query = mad_buf->context[0];
spin_unlock_irqrestore(&idr_lock, flags);
if (query && query->callback) { if (query->callback) {
if (mad_recv_wc->wc->status == IB_WC_SUCCESS) if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
query->callback(query, query->callback(query,
mad_recv_wc->recv_buf.mad->mad_hdr.status ? mad_recv_wc->recv_buf.mad->mad_hdr.status ?
...@@ -975,6 +982,7 @@ static int __init ib_sa_init(void) ...@@ -975,6 +982,7 @@ static int __init ib_sa_init(void)
static void __exit ib_sa_cleanup(void) static void __exit ib_sa_cleanup(void)
{ {
ib_unregister_client(&sa_client); ib_unregister_client(&sa_client);
idr_destroy(&query_idr);
} }
module_init(ib_sa_init); module_init(ib_sa_init);
......
...@@ -39,6 +39,8 @@ ...@@ -39,6 +39,8 @@
#ifndef __SMI_H_ #ifndef __SMI_H_
#define __SMI_H_ #define __SMI_H_
#include <rdma/ib_smi.h>
int smi_handle_dr_smp_recv(struct ib_smp *smp, int smi_handle_dr_smp_recv(struct ib_smp *smp,
u8 node_type, u8 node_type,
int port_num, int port_num,
......
...@@ -65,6 +65,11 @@ struct port_table_attribute { ...@@ -65,6 +65,11 @@ struct port_table_attribute {
int index; int index;
}; };
static inline int ibdev_is_alive(const struct ib_device *dev)
{
return dev->reg_state == IB_DEV_REGISTERED;
}
static ssize_t port_attr_show(struct kobject *kobj, static ssize_t port_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf) struct attribute *attr, char *buf)
{ {
...@@ -74,6 +79,8 @@ static ssize_t port_attr_show(struct kobject *kobj, ...@@ -74,6 +79,8 @@ static ssize_t port_attr_show(struct kobject *kobj,
if (!port_attr->show) if (!port_attr->show)
return -EIO; return -EIO;
if (!ibdev_is_alive(p->ibdev))
return -ENODEV;
return port_attr->show(p, port_attr, buf); return port_attr->show(p, port_attr, buf);
} }
...@@ -581,6 +588,9 @@ static ssize_t show_node_type(struct class_device *cdev, char *buf) ...@@ -581,6 +588,9 @@ static ssize_t show_node_type(struct class_device *cdev, char *buf)
{ {
struct ib_device *dev = container_of(cdev, struct ib_device, class_dev); struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
if (!ibdev_is_alive(dev))
return -ENODEV;
switch (dev->node_type) { switch (dev->node_type) {
case IB_NODE_CA: return sprintf(buf, "%d: CA\n", dev->node_type); case IB_NODE_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
case IB_NODE_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type); case IB_NODE_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
...@@ -595,6 +605,9 @@ static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf) ...@@ -595,6 +605,9 @@ static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf)
struct ib_device_attr attr; struct ib_device_attr attr;
ssize_t ret; ssize_t ret;
if (!ibdev_is_alive(dev))
return -ENODEV;
ret = ib_query_device(dev, &attr); ret = ib_query_device(dev, &attr);
if (ret) if (ret)
return ret; return ret;
...@@ -612,6 +625,9 @@ static ssize_t show_node_guid(struct class_device *cdev, char *buf) ...@@ -612,6 +625,9 @@ static ssize_t show_node_guid(struct class_device *cdev, char *buf)
struct ib_device_attr attr; struct ib_device_attr attr;
ssize_t ret; ssize_t ret;
if (!ibdev_is_alive(dev))
return -ENODEV;
ret = ib_query_device(dev, &attr); ret = ib_query_device(dev, &attr);
if (ret) if (ret)
return ret; return ret;
......
...@@ -41,37 +41,81 @@ ...@@ -41,37 +41,81 @@
#include <linux/file.h> #include <linux/file.h>
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/cdev.h> #include <linux/cdev.h>
#include <linux/idr.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "ucm.h" #include <rdma/ib_cm.h>
#include <rdma/ib_user_cm.h>
MODULE_AUTHOR("Libor Michalek"); MODULE_AUTHOR("Libor Michalek");
MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access"); MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
static int ucm_debug_level; struct ib_ucm_device {
int devnum;
struct cdev dev;
struct class_device class_dev;
struct ib_device *ib_dev;
};
struct ib_ucm_file {
struct semaphore mutex;
struct file *filp;
struct ib_ucm_device *device;
struct list_head ctxs;
struct list_head events;
wait_queue_head_t poll_wait;
};
struct ib_ucm_context {
int id;
wait_queue_head_t wait;
atomic_t ref;
int events_reported;
struct ib_ucm_file *file;
struct ib_cm_id *cm_id;
__u64 uid;
struct list_head events; /* list of pending events. */
struct list_head file_list; /* member in file ctx list */
};
struct ib_ucm_event {
struct ib_ucm_context *ctx;
struct list_head file_list; /* member in file event list */
struct list_head ctx_list; /* member in ctx event list */
module_param_named(debug_level, ucm_debug_level, int, 0644); struct ib_cm_id *cm_id;
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); struct ib_ucm_event_resp resp;
void *data;
void *info;
int data_len;
int info_len;
};
enum { enum {
IB_UCM_MAJOR = 231, IB_UCM_MAJOR = 231,
IB_UCM_MINOR = 255 IB_UCM_BASE_MINOR = 224,
IB_UCM_MAX_DEVICES = 32
}; };
#define IB_UCM_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_MINOR) #define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
#define PFX "UCM: " static void ib_ucm_add_one(struct ib_device *device);
static void ib_ucm_remove_one(struct ib_device *device);
#define ucm_dbg(format, arg...) \ static struct ib_client ucm_client = {
do { \ .name = "ucm",
if (ucm_debug_level > 0) \ .add = ib_ucm_add_one,
printk(KERN_DEBUG PFX format, ## arg); \ .remove = ib_ucm_remove_one
} while (0) };
static struct semaphore ctx_id_mutex; static DECLARE_MUTEX(ctx_id_mutex);
static struct idr ctx_id_table; static DEFINE_IDR(ctx_id_table);
static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES);
static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
{ {
...@@ -152,17 +196,13 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) ...@@ -152,17 +196,13 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
goto error; goto error;
list_add_tail(&ctx->file_list, &file->ctxs); list_add_tail(&ctx->file_list, &file->ctxs);
ucm_dbg("Allocated CM ID <%d>\n", ctx->id);
return ctx; return ctx;
error: error:
kfree(ctx); kfree(ctx);
return NULL; return NULL;
} }
/*
* Event portion of the API, handle CM events
* and allow event polling.
*/
static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath, static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath,
struct ib_sa_path_rec *kpath) struct ib_sa_path_rec *kpath)
{ {
...@@ -209,6 +249,7 @@ static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq, ...@@ -209,6 +249,7 @@ static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
ureq->retry_count = kreq->retry_count; ureq->retry_count = kreq->retry_count;
ureq->rnr_retry_count = kreq->rnr_retry_count; ureq->rnr_retry_count = kreq->rnr_retry_count;
ureq->srq = kreq->srq; ureq->srq = kreq->srq;
ureq->port = kreq->port;
ib_ucm_event_path_get(&ureq->primary_path, kreq->primary_path); ib_ucm_event_path_get(&ureq->primary_path, kreq->primary_path);
ib_ucm_event_path_get(&ureq->alternate_path, kreq->alternate_path); ib_ucm_event_path_get(&ureq->alternate_path, kreq->alternate_path);
...@@ -295,6 +336,8 @@ static int ib_ucm_event_process(struct ib_cm_event *evt, ...@@ -295,6 +336,8 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
case IB_CM_SIDR_REQ_RECEIVED: case IB_CM_SIDR_REQ_RECEIVED:
uvt->resp.u.sidr_req_resp.pkey = uvt->resp.u.sidr_req_resp.pkey =
evt->param.sidr_req_rcvd.pkey; evt->param.sidr_req_rcvd.pkey;
uvt->resp.u.sidr_req_resp.port =
evt->param.sidr_req_rcvd.port;
uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
break; break;
case IB_CM_SIDR_REP_RECEIVED: case IB_CM_SIDR_REP_RECEIVED:
...@@ -387,9 +430,7 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file, ...@@ -387,9 +430,7 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd))) if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
/*
* wait
*/
down(&file->mutex); down(&file->mutex);
while (list_empty(&file->events)) { while (list_empty(&file->events)) {
...@@ -471,7 +512,6 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file, ...@@ -471,7 +512,6 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file,
return result; return result;
} }
static ssize_t ib_ucm_create_id(struct ib_ucm_file *file, static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
const char __user *inbuf, const char __user *inbuf,
int in_len, int out_len) int in_len, int out_len)
...@@ -494,29 +534,27 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file, ...@@ -494,29 +534,27 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
return -ENOMEM; return -ENOMEM;
ctx->uid = cmd.uid; ctx->uid = cmd.uid;
ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx); ctx->cm_id = ib_create_cm_id(file->device->ib_dev,
ib_ucm_event_handler, ctx);
if (IS_ERR(ctx->cm_id)) { if (IS_ERR(ctx->cm_id)) {
result = PTR_ERR(ctx->cm_id); result = PTR_ERR(ctx->cm_id);
goto err; goto err1;
} }
resp.id = ctx->id; resp.id = ctx->id;
if (copy_to_user((void __user *)(unsigned long)cmd.response, if (copy_to_user((void __user *)(unsigned long)cmd.response,
&resp, sizeof(resp))) { &resp, sizeof(resp))) {
result = -EFAULT; result = -EFAULT;
goto err; goto err2;
} }
return 0; return 0;
err: err2:
ib_destroy_cm_id(ctx->cm_id);
err1:
down(&ctx_id_mutex); down(&ctx_id_mutex);
idr_remove(&ctx_id_table, ctx->id); idr_remove(&ctx_id_table, ctx->id);
up(&ctx_id_mutex); up(&ctx_id_mutex);
if (!IS_ERR(ctx->cm_id))
ib_destroy_cm_id(ctx->cm_id);
kfree(ctx); kfree(ctx);
return result; return result;
} }
...@@ -1184,9 +1222,6 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf, ...@@ -1184,9 +1222,6 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
if (copy_from_user(&hdr, buf, sizeof(hdr))) if (copy_from_user(&hdr, buf, sizeof(hdr)))
return -EFAULT; return -EFAULT;
ucm_dbg("Write. cmd <%d> in <%d> out <%d> len <%Zu>\n",
hdr.cmd, hdr.in, hdr.out, len);
if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucm_cmd_table)) if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
return -EINVAL; return -EINVAL;
...@@ -1231,8 +1266,7 @@ static int ib_ucm_open(struct inode *inode, struct file *filp) ...@@ -1231,8 +1266,7 @@ static int ib_ucm_open(struct inode *inode, struct file *filp)
filp->private_data = file; filp->private_data = file;
file->filp = filp; file->filp = filp;
file->device = container_of(inode->i_cdev, struct ib_ucm_device, dev);
ucm_dbg("Created struct\n");
return 0; return 0;
} }
...@@ -1263,7 +1297,17 @@ static int ib_ucm_close(struct inode *inode, struct file *filp) ...@@ -1263,7 +1297,17 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
return 0; return 0;
} }
static struct file_operations ib_ucm_fops = { static void ib_ucm_release_class_dev(struct class_device *class_dev)
{
struct ib_ucm_device *dev;
dev = container_of(class_dev, struct ib_ucm_device, class_dev);
cdev_del(&dev->dev);
clear_bit(dev->devnum, dev_map);
kfree(dev);
}
static struct file_operations ucm_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = ib_ucm_open, .open = ib_ucm_open,
.release = ib_ucm_close, .release = ib_ucm_close,
...@@ -1271,55 +1315,142 @@ static struct file_operations ib_ucm_fops = { ...@@ -1271,55 +1315,142 @@ static struct file_operations ib_ucm_fops = {
.poll = ib_ucm_poll, .poll = ib_ucm_poll,
}; };
static struct class ucm_class = {
.name = "infiniband_cm",
.release = ib_ucm_release_class_dev
};
static struct class *ib_ucm_class; static ssize_t show_dev(struct class_device *class_dev, char *buf)
static struct cdev ib_ucm_cdev; {
struct ib_ucm_device *dev;
dev = container_of(class_dev, struct ib_ucm_device, class_dev);
return print_dev_t(buf, dev->dev.dev);
}
static CLASS_DEVICE_ATTR(dev, S_IRUGO, show_dev, NULL);
static int __init ib_ucm_init(void) static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
{ {
int result; struct ib_ucm_device *dev;
dev = container_of(class_dev, struct ib_ucm_device, class_dev);
return sprintf(buf, "%s\n", dev->ib_dev->name);
}
static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
result = register_chrdev_region(IB_UCM_DEV, 1, "infiniband_cm"); static void ib_ucm_add_one(struct ib_device *device)
if (result) { {
ucm_dbg("Error <%d> registering dev\n", result); struct ib_ucm_device *ucm_dev;
goto err_chr;
} if (!device->alloc_ucontext)
return;
ucm_dev = kmalloc(sizeof *ucm_dev, GFP_KERNEL);
if (!ucm_dev)
return;
cdev_init(&ib_ucm_cdev, &ib_ucm_fops); memset(ucm_dev, 0, sizeof *ucm_dev);
ucm_dev->ib_dev = device;
ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
if (ucm_dev->devnum >= IB_UCM_MAX_DEVICES)
goto err;
set_bit(ucm_dev->devnum, dev_map);
cdev_init(&ucm_dev->dev, &ucm_fops);
ucm_dev->dev.owner = THIS_MODULE;
kobject_set_name(&ucm_dev->dev.kobj, "ucm%d", ucm_dev->devnum);
if (cdev_add(&ucm_dev->dev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1))
goto err;
result = cdev_add(&ib_ucm_cdev, IB_UCM_DEV, 1); ucm_dev->class_dev.class = &ucm_class;
if (result) { ucm_dev->class_dev.dev = device->dma_device;
ucm_dbg("Error <%d> adding cdev\n", result); snprintf(ucm_dev->class_dev.class_id, BUS_ID_SIZE, "ucm%d",
ucm_dev->devnum);
if (class_device_register(&ucm_dev->class_dev))
goto err_cdev; goto err_cdev;
}
ib_ucm_class = class_create(THIS_MODULE, "infiniband_cm"); if (class_device_create_file(&ucm_dev->class_dev,
if (IS_ERR(ib_ucm_class)) { &class_device_attr_dev))
result = PTR_ERR(ib_ucm_class); goto err_class;
ucm_dbg("Error <%d> creating class\n", result); if (class_device_create_file(&ucm_dev->class_dev,
&class_device_attr_ibdev))
goto err_class; goto err_class;
ib_set_client_data(device, &ucm_client, ucm_dev);
return;
err_class:
class_device_unregister(&ucm_dev->class_dev);
err_cdev:
cdev_del(&ucm_dev->dev);
clear_bit(ucm_dev->devnum, dev_map);
err:
kfree(ucm_dev);
return;
}
static void ib_ucm_remove_one(struct ib_device *device)
{
struct ib_ucm_device *ucm_dev = ib_get_client_data(device, &ucm_client);
if (!ucm_dev)
return;
class_device_unregister(&ucm_dev->class_dev);
}
static ssize_t show_abi_version(struct class *class, char *buf)
{
return sprintf(buf, "%d\n", IB_USER_CM_ABI_VERSION);
}
static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
static int __init ib_ucm_init(void)
{
int ret;
ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES,
"infiniband_cm");
if (ret) {
printk(KERN_ERR "ucm: couldn't register device number\n");
goto err;
} }
class_device_create(ib_ucm_class, NULL, IB_UCM_DEV, NULL, "ucm"); ret = class_register(&ucm_class);
if (ret) {
printk(KERN_ERR "ucm: couldn't create class infiniband_cm\n");
goto err_chrdev;
}
idr_init(&ctx_id_table); ret = class_create_file(&ucm_class, &class_attr_abi_version);
init_MUTEX(&ctx_id_mutex); if (ret) {
printk(KERN_ERR "ucm: couldn't create abi_version attribute\n");
goto err_class;
}
ret = ib_register_client(&ucm_client);
if (ret) {
printk(KERN_ERR "ucm: couldn't register client\n");
goto err_class;
}
return 0; return 0;
err_class: err_class:
cdev_del(&ib_ucm_cdev); class_unregister(&ucm_class);
err_cdev: err_chrdev:
unregister_chrdev_region(IB_UCM_DEV, 1); unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
err_chr: err:
return result; return ret;
} }
static void __exit ib_ucm_cleanup(void) static void __exit ib_ucm_cleanup(void)
{ {
class_device_destroy(ib_ucm_class, IB_UCM_DEV); ib_unregister_client(&ucm_client);
class_destroy(ib_ucm_class); class_unregister(&ucm_class);
cdev_del(&ib_ucm_cdev); unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
unregister_chrdev_region(IB_UCM_DEV, 1); idr_destroy(&ctx_id_table);
} }
module_init(ib_ucm_init); module_init(ib_ucm_init);
......
此差异已折叠。
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
* Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
...@@ -38,29 +39,47 @@ ...@@ -38,29 +39,47 @@
#ifndef UVERBS_H #ifndef UVERBS_H
#define UVERBS_H #define UVERBS_H
/* Include device.h and fs.h until cdev.h is self-sufficient */
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
/*
* Our lifetime rules for these structs are the following:
*
* struct ib_uverbs_device: One reference is held by the module and
* released in ib_uverbs_remove_one(). Another reference is taken by
* ib_uverbs_open() each time the character special file is opened,
* and released in ib_uverbs_release_file() when the file is released.
*
* struct ib_uverbs_file: One reference is held by the VFS and
* released when the file is closed. Another reference is taken when
* an asynchronous event queue file is created and released when the
* event file is closed.
*
* struct ib_uverbs_event_file: One reference is held by the VFS and
* released when the file is closed. For asynchronous event files,
* another reference is held by the corresponding main context file
* and released when that file is closed. For completion event files,
* a reference is taken when a CQ is created that uses the file, and
* released when the CQ is destroyed.
*/
struct ib_uverbs_device { struct ib_uverbs_device {
struct kref ref;
int devnum; int devnum;
struct cdev dev; struct cdev *dev;
struct class_device class_dev; struct class_device *class_dev;
struct ib_device *ib_dev; struct ib_device *ib_dev;
int num_comp; int num_comp_vectors;
}; };
struct ib_uverbs_event_file { struct ib_uverbs_event_file {
struct kref ref; struct kref ref;
struct file *file;
struct ib_uverbs_file *uverbs_file; struct ib_uverbs_file *uverbs_file;
spinlock_t lock; spinlock_t lock;
int fd;
int is_async; int is_async;
wait_queue_head_t poll_wait; wait_queue_head_t poll_wait;
struct fasync_struct *async_queue; struct fasync_struct *async_queue;
...@@ -73,8 +92,7 @@ struct ib_uverbs_file { ...@@ -73,8 +92,7 @@ struct ib_uverbs_file {
struct ib_uverbs_device *device; struct ib_uverbs_device *device;
struct ib_ucontext *ucontext; struct ib_ucontext *ucontext;
struct ib_event_handler event_handler; struct ib_event_handler event_handler;
struct ib_uverbs_event_file async_file; struct ib_uverbs_event_file *async_file;
struct ib_uverbs_event_file comp_file[1];
}; };
struct ib_uverbs_event { struct ib_uverbs_event {
...@@ -110,10 +128,23 @@ extern struct idr ib_uverbs_cq_idr; ...@@ -110,10 +128,23 @@ extern struct idr ib_uverbs_cq_idr;
extern struct idr ib_uverbs_qp_idr; extern struct idr ib_uverbs_qp_idr;
extern struct idr ib_uverbs_srq_idr; extern struct idr ib_uverbs_srq_idr;
struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
int is_async, int *fd);
void ib_uverbs_release_event_file(struct kref *ref);
struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd);
void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
struct ib_uverbs_event_file *ev_file,
struct ib_ucq_object *uobj);
void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
struct ib_uevent_object *uobj);
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context); void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr); void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_event_handler(struct ib_event_handler *handler,
struct ib_event *event);
int ib_umem_get(struct ib_device *dev, struct ib_umem *mem, int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
void *addr, size_t size, int write); void *addr, size_t size, int write);
...@@ -125,21 +156,26 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem); ...@@ -125,21 +156,26 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem);
const char __user *buf, int in_len, \ const char __user *buf, int in_len, \
int out_len) int out_len)
IB_UVERBS_DECLARE_CMD(query_params);
IB_UVERBS_DECLARE_CMD(get_context); IB_UVERBS_DECLARE_CMD(get_context);
IB_UVERBS_DECLARE_CMD(query_device); IB_UVERBS_DECLARE_CMD(query_device);
IB_UVERBS_DECLARE_CMD(query_port); IB_UVERBS_DECLARE_CMD(query_port);
IB_UVERBS_DECLARE_CMD(query_gid);
IB_UVERBS_DECLARE_CMD(query_pkey);
IB_UVERBS_DECLARE_CMD(alloc_pd); IB_UVERBS_DECLARE_CMD(alloc_pd);
IB_UVERBS_DECLARE_CMD(dealloc_pd); IB_UVERBS_DECLARE_CMD(dealloc_pd);
IB_UVERBS_DECLARE_CMD(reg_mr); IB_UVERBS_DECLARE_CMD(reg_mr);
IB_UVERBS_DECLARE_CMD(dereg_mr); IB_UVERBS_DECLARE_CMD(dereg_mr);
IB_UVERBS_DECLARE_CMD(create_comp_channel);
IB_UVERBS_DECLARE_CMD(create_cq); IB_UVERBS_DECLARE_CMD(create_cq);
IB_UVERBS_DECLARE_CMD(poll_cq);
IB_UVERBS_DECLARE_CMD(req_notify_cq);
IB_UVERBS_DECLARE_CMD(destroy_cq); IB_UVERBS_DECLARE_CMD(destroy_cq);
IB_UVERBS_DECLARE_CMD(create_qp); IB_UVERBS_DECLARE_CMD(create_qp);
IB_UVERBS_DECLARE_CMD(modify_qp); IB_UVERBS_DECLARE_CMD(modify_qp);
IB_UVERBS_DECLARE_CMD(destroy_qp); IB_UVERBS_DECLARE_CMD(destroy_qp);
IB_UVERBS_DECLARE_CMD(post_send);
IB_UVERBS_DECLARE_CMD(post_recv);
IB_UVERBS_DECLARE_CMD(post_srq_recv);
IB_UVERBS_DECLARE_CMD(create_ah);
IB_UVERBS_DECLARE_CMD(destroy_ah);
IB_UVERBS_DECLARE_CMD(attach_mcast); IB_UVERBS_DECLARE_CMD(attach_mcast);
IB_UVERBS_DECLARE_CMD(detach_mcast); IB_UVERBS_DECLARE_CMD(detach_mcast);
IB_UVERBS_DECLARE_CMD(create_srq); IB_UVERBS_DECLARE_CMD(create_srq);
......
...@@ -523,16 +523,22 @@ EXPORT_SYMBOL(ib_dealloc_fmr); ...@@ -523,16 +523,22 @@ EXPORT_SYMBOL(ib_dealloc_fmr);
int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
{ {
return qp->device->attach_mcast ? if (!qp->device->attach_mcast)
qp->device->attach_mcast(qp, gid, lid) : return -ENOSYS;
-ENOSYS; if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
return -EINVAL;
return qp->device->attach_mcast(qp, gid, lid);
} }
EXPORT_SYMBOL(ib_attach_mcast); EXPORT_SYMBOL(ib_attach_mcast);
int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
{ {
return qp->device->detach_mcast ? if (!qp->device->detach_mcast)
qp->device->detach_mcast(qp, gid, lid) : return -ENOSYS;
-ENOSYS; if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
return -EINVAL;
return qp->device->detach_mcast(qp, gid, lid);
} }
EXPORT_SYMBOL(ib_detach_mcast); EXPORT_SYMBOL(ib_detach_mcast);
...@@ -7,4 +7,5 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o ...@@ -7,4 +7,5 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o
ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \ ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \
mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \ mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \
mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \ mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \
mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o \
mthca_catas.o
/* /*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
...@@ -706,9 +707,13 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) ...@@ -706,9 +707,13 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
dev->cmd.max_cmds = 1 << lg; dev->cmd.max_cmds = 1 << lg;
MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET);
MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
mthca_dbg(dev, "FW version %012llx, max commands %d\n", mthca_dbg(dev, "FW version %012llx, max commands %d\n",
(unsigned long long) dev->fw_ver, dev->cmd.max_cmds); (unsigned long long) dev->fw_ver, dev->cmd.max_cmds);
mthca_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x\n",
(unsigned long long) dev->catas_err.addr, dev->catas_err.size);
if (mthca_is_memfree(dev)) { if (mthca_is_memfree(dev)) {
MTHCA_GET(dev->fw.arbel.fw_pages, outbox, QUERY_FW_SIZE_OFFSET); MTHCA_GET(dev->fw.arbel.fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
...@@ -933,9 +938,9 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, ...@@ -933,9 +938,9 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
goto out; goto out;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
dev_lim->max_srq_sz = 1 << field; dev_lim->max_srq_sz = (1 << field) - 1;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
dev_lim->max_qp_sz = 1 << field; dev_lim->max_qp_sz = (1 << field) - 1;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET);
dev_lim->reserved_qps = 1 << (field & 0xf); dev_lim->reserved_qps = 1 << (field & 0xf);
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET);
...@@ -1045,6 +1050,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, ...@@ -1045,6 +1050,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars); dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars);
mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
dev_lim->max_pds, dev_lim->reserved_mgms); dev_lim->max_pds, dev_lim->reserved_mgms);
mthca_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz);
mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags); mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags);
......
...@@ -83,7 +83,8 @@ enum { ...@@ -83,7 +83,8 @@ enum {
MTHCA_EVENT_TYPE_PATH_MIG = 0x01, MTHCA_EVENT_TYPE_PATH_MIG = 0x01,
MTHCA_EVENT_TYPE_COMM_EST = 0x02, MTHCA_EVENT_TYPE_COMM_EST = 0x02,
MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03, MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03,
MTHCA_EVENT_TYPE_SRQ_LAST_WQE = 0x13, MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13,
MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14,
MTHCA_EVENT_TYPE_CQ_ERROR = 0x04, MTHCA_EVENT_TYPE_CQ_ERROR = 0x04,
MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06,
...@@ -110,8 +111,9 @@ enum { ...@@ -110,8 +111,9 @@ enum {
(1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \ (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
(1ULL << MTHCA_EVENT_TYPE_ECC_DETECT)) (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
#define MTHCA_SRQ_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \ #define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_SRQ_LAST_WQE) (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
#define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD) #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
#define MTHCA_EQ_DB_INC_CI (1 << 24) #define MTHCA_EQ_DB_INC_CI (1 << 24)
...@@ -141,6 +143,9 @@ struct mthca_eqe { ...@@ -141,6 +143,9 @@ struct mthca_eqe {
struct { struct {
__be32 qpn; __be32 qpn;
} __attribute__((packed)) qp; } __attribute__((packed)) qp;
struct {
__be32 srqn;
} __attribute__((packed)) srq;
struct { struct {
__be32 cqn; __be32 cqn;
u32 reserved1; u32 reserved1;
...@@ -305,6 +310,16 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) ...@@ -305,6 +310,16 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
IB_EVENT_SQ_DRAINED); IB_EVENT_SQ_DRAINED);
break; break;
case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:
mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
IB_EVENT_QP_LAST_WQE_REACHED);
break;
case MTHCA_EVENT_TYPE_SRQ_LIMIT:
mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
IB_EVENT_SRQ_LIMIT_REACHED);
break;
case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR: case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
IB_EVENT_QP_FATAL); IB_EVENT_QP_FATAL);
......
...@@ -487,7 +487,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, ...@@ -487,7 +487,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
} }
} }
int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db) int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
u32 qn, __be32 **db)
{ {
int group; int group;
int start, end, dir; int start, end, dir;
......
...@@ -173,7 +173,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, ...@@ -173,7 +173,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
int mthca_init_db_tab(struct mthca_dev *dev); int mthca_init_db_tab(struct mthca_dev *dev);
void mthca_cleanup_db_tab(struct mthca_dev *dev); void mthca_cleanup_db_tab(struct mthca_dev *dev);
int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db); int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
u32 qn, __be32 **db);
void mthca_free_db(struct mthca_dev *dev, int type, int db_index); void mthca_free_db(struct mthca_dev *dev, int type, int db_index);
#endif /* MTHCA_MEMFREE_H */ #endif /* MTHCA_MEMFREE_H */
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册