提交 45c448a1 编写于 作者: R Roland Dreier

Merge branches 'cxgb3', 'ehca', 'ipath', 'ipoib', 'misc', 'mlx4', 'mthca' and 'nes' into for-linus

...@@ -439,7 +439,7 @@ F: drivers/hwmon/ams/ ...@@ -439,7 +439,7 @@ F: drivers/hwmon/ams/
AMSO1100 RNIC DRIVER AMSO1100 RNIC DRIVER
M: Tom Tucker <tom@opengridcomputing.com> M: Tom Tucker <tom@opengridcomputing.com>
M: Steve Wise <swise@opengridcomputing.com> M: Steve Wise <swise@opengridcomputing.com>
L: general@lists.openfabrics.org L: linux-rdma@vger.kernel.org
S: Maintained S: Maintained
F: drivers/infiniband/hw/amso1100/ F: drivers/infiniband/hw/amso1100/
...@@ -1494,7 +1494,7 @@ F: drivers/net/cxgb3/ ...@@ -1494,7 +1494,7 @@ F: drivers/net/cxgb3/
CXGB3 IWARP RNIC DRIVER (IW_CXGB3) CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
M: Steve Wise <swise@chelsio.com> M: Steve Wise <swise@chelsio.com>
L: general@lists.openfabrics.org L: linux-rdma@vger.kernel.org
W: http://www.openfabrics.org W: http://www.openfabrics.org
S: Supported S: Supported
F: drivers/infiniband/hw/cxgb3/ F: drivers/infiniband/hw/cxgb3/
...@@ -1868,7 +1868,7 @@ F: fs/efs/ ...@@ -1868,7 +1868,7 @@ F: fs/efs/
EHCA (IBM GX bus InfiniBand adapter) DRIVER EHCA (IBM GX bus InfiniBand adapter) DRIVER
M: Hoang-Nam Nguyen <hnguyen@de.ibm.com> M: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
M: Christoph Raisch <raisch@de.ibm.com> M: Christoph Raisch <raisch@de.ibm.com>
L: general@lists.openfabrics.org L: linux-rdma@vger.kernel.org
S: Supported S: Supported
F: drivers/infiniband/hw/ehca/ F: drivers/infiniband/hw/ehca/
...@@ -2552,7 +2552,7 @@ INFINIBAND SUBSYSTEM ...@@ -2552,7 +2552,7 @@ INFINIBAND SUBSYSTEM
M: Roland Dreier <rolandd@cisco.com> M: Roland Dreier <rolandd@cisco.com>
M: Sean Hefty <sean.hefty@intel.com> M: Sean Hefty <sean.hefty@intel.com>
M: Hal Rosenstock <hal.rosenstock@gmail.com> M: Hal Rosenstock <hal.rosenstock@gmail.com>
L: general@lists.openfabrics.org (moderated for non-subscribers) L: linux-rdma@vger.kernel.org
W: http://www.openib.org/ W: http://www.openib.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git
S: Supported S: Supported
...@@ -2729,7 +2729,7 @@ F: drivers/net/ipg.c ...@@ -2729,7 +2729,7 @@ F: drivers/net/ipg.c
IPATH DRIVER IPATH DRIVER
M: Ralph Campbell <infinipath@qlogic.com> M: Ralph Campbell <infinipath@qlogic.com>
L: general@lists.openfabrics.org L: linux-rdma@vger.kernel.org
T: git git://git.qlogic.com/ipath-linux-2.6 T: git git://git.qlogic.com/ipath-linux-2.6
S: Supported S: Supported
F: drivers/infiniband/hw/ipath/ F: drivers/infiniband/hw/ipath/
...@@ -3485,7 +3485,7 @@ F: drivers/scsi/NCR_D700.* ...@@ -3485,7 +3485,7 @@ F: drivers/scsi/NCR_D700.*
NETEFFECT IWARP RNIC DRIVER (IW_NES) NETEFFECT IWARP RNIC DRIVER (IW_NES)
M: Faisal Latif <faisal.latif@intel.com> M: Faisal Latif <faisal.latif@intel.com>
M: Chien Tung <chien.tin.tung@intel.com> M: Chien Tung <chien.tin.tung@intel.com>
L: general@lists.openfabrics.org L: linux-rdma@vger.kernel.org
W: http://www.neteffect.com W: http://www.neteffect.com
S: Supported S: Supported
F: drivers/infiniband/hw/nes/ F: drivers/infiniband/hw/nes/
......
...@@ -362,6 +362,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) ...@@ -362,6 +362,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
* In either case, must tell the provider to reject. * In either case, must tell the provider to reject.
*/ */
cm_id_priv->state = IW_CM_STATE_DESTROYING; cm_id_priv->state = IW_CM_STATE_DESTROYING;
cm_id->device->iwcm->reject(cm_id, NULL, 0);
break; break;
case IW_CM_STATE_CONN_SENT: case IW_CM_STATE_CONN_SENT:
case IW_CM_STATE_DESTROYING: case IW_CM_STATE_DESTROYING:
......
...@@ -51,8 +51,7 @@ static struct list_head ib_mad_port_list; ...@@ -51,8 +51,7 @@ static struct list_head ib_mad_port_list;
static u32 ib_mad_client_id = 0; static u32 ib_mad_client_id = 0;
/* Port list lock */ /* Port list lock */
static spinlock_t ib_mad_port_list_lock; static DEFINE_SPINLOCK(ib_mad_port_list_lock);
/* Forward declarations */ /* Forward declarations */
static int method_in_use(struct ib_mad_mgmt_method_table **method, static int method_in_use(struct ib_mad_mgmt_method_table **method,
...@@ -2984,8 +2983,6 @@ static int __init ib_mad_init_module(void) ...@@ -2984,8 +2983,6 @@ static int __init ib_mad_init_module(void)
{ {
int ret; int ret;
spin_lock_init(&ib_mad_port_list_lock);
ib_mad_cache = kmem_cache_create("ib_mad", ib_mad_cache = kmem_cache_create("ib_mad",
sizeof(struct ib_mad_private), sizeof(struct ib_mad_private),
0, 0,
...@@ -3021,4 +3018,3 @@ static void __exit ib_mad_cleanup_module(void) ...@@ -3021,4 +3018,3 @@ static void __exit ib_mad_cleanup_module(void)
module_init(ib_mad_init_module); module_init(ib_mad_init_module);
module_exit(ib_mad_cleanup_module); module_exit(ib_mad_cleanup_module);
...@@ -106,6 +106,8 @@ struct mcast_group { ...@@ -106,6 +106,8 @@ struct mcast_group {
struct ib_sa_query *query; struct ib_sa_query *query;
int query_id; int query_id;
u16 pkey_index; u16 pkey_index;
u8 leave_state;
int retries;
}; };
struct mcast_member { struct mcast_member {
...@@ -350,6 +352,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state) ...@@ -350,6 +352,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
rec = group->rec; rec = group->rec;
rec.join_state = leave_state; rec.join_state = leave_state;
group->leave_state = leave_state;
ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
port->port_num, IB_SA_METHOD_DELETE, &rec, port->port_num, IB_SA_METHOD_DELETE, &rec,
...@@ -542,7 +545,11 @@ static void leave_handler(int status, struct ib_sa_mcmember_rec *rec, ...@@ -542,7 +545,11 @@ static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
{ {
struct mcast_group *group = context; struct mcast_group *group = context;
mcast_work_handler(&group->work); if (status && group->retries > 0 &&
!send_leave(group, group->leave_state))
group->retries--;
else
mcast_work_handler(&group->work);
} }
static struct mcast_group *acquire_group(struct mcast_port *port, static struct mcast_group *acquire_group(struct mcast_port *port,
...@@ -565,6 +572,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port, ...@@ -565,6 +572,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
if (!group) if (!group)
return NULL; return NULL;
group->retries = 3;
group->port = port; group->port = port;
group->rec.mgid = *mgid; group->rec.mgid = *mgid;
group->pkey_index = MCAST_INVALID_PKEY_INDEX; group->pkey_index = MCAST_INVALID_PKEY_INDEX;
......
...@@ -109,10 +109,10 @@ static struct ib_client sa_client = { ...@@ -109,10 +109,10 @@ static struct ib_client sa_client = {
.remove = ib_sa_remove_one .remove = ib_sa_remove_one
}; };
static spinlock_t idr_lock; static DEFINE_SPINLOCK(idr_lock);
static DEFINE_IDR(query_idr); static DEFINE_IDR(query_idr);
static spinlock_t tid_lock; static DEFINE_SPINLOCK(tid_lock);
static u32 tid; static u32 tid;
#define PATH_REC_FIELD(field) \ #define PATH_REC_FIELD(field) \
...@@ -1077,9 +1077,6 @@ static int __init ib_sa_init(void) ...@@ -1077,9 +1077,6 @@ static int __init ib_sa_init(void)
{ {
int ret; int ret;
spin_lock_init(&idr_lock);
spin_lock_init(&tid_lock);
get_random_bytes(&tid, sizeof tid); get_random_bytes(&tid, sizeof tid);
ret = ib_register_client(&sa_client); ret = ib_register_client(&sa_client);
......
...@@ -73,7 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr); ...@@ -73,7 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr);
DEFINE_IDR(ib_uverbs_qp_idr); DEFINE_IDR(ib_uverbs_qp_idr);
DEFINE_IDR(ib_uverbs_srq_idr); DEFINE_IDR(ib_uverbs_srq_idr);
static spinlock_t map_lock; static DEFINE_SPINLOCK(map_lock);
static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES]; static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
...@@ -584,14 +584,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, ...@@ -584,14 +584,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if (hdr.command < 0 || if (hdr.command < 0 ||
hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
!uverbs_cmd_table[hdr.command] || !uverbs_cmd_table[hdr.command])
!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
return -EINVAL; return -EINVAL;
if (!file->ucontext && if (!file->ucontext &&
hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT) hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT)
return -EINVAL; return -EINVAL;
if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
return -ENOSYS;
return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr, return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr,
hdr.in_words * 4, hdr.out_words * 4); hdr.in_words * 4, hdr.out_words * 4);
} }
...@@ -836,8 +838,6 @@ static int __init ib_uverbs_init(void) ...@@ -836,8 +838,6 @@ static int __init ib_uverbs_init(void)
{ {
int ret; int ret;
spin_lock_init(&map_lock);
ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES, ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
"infiniband_verbs"); "infiniband_verbs");
if (ret) { if (ret) {
......
...@@ -86,11 +86,7 @@ MODULE_DEVICE_TABLE(pci, c2_pci_table); ...@@ -86,11 +86,7 @@ MODULE_DEVICE_TABLE(pci, c2_pci_table);
static void c2_print_macaddr(struct net_device *netdev) static void c2_print_macaddr(struct net_device *netdev)
{ {
pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, " pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq);
"IRQ %u\n", netdev->name,
netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
netdev->irq);
} }
static void c2_set_rxbufsize(struct c2_port *c2_port) static void c2_set_rxbufsize(struct c2_port *c2_port)
......
...@@ -780,11 +780,11 @@ int c2_register_device(struct c2_dev *dev) ...@@ -780,11 +780,11 @@ int c2_register_device(struct c2_dev *dev)
/* Register pseudo network device */ /* Register pseudo network device */
dev->pseudo_netdev = c2_pseudo_netdev_init(dev); dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
if (!dev->pseudo_netdev) if (!dev->pseudo_netdev)
goto out3; goto out;
ret = register_netdev(dev->pseudo_netdev); ret = register_netdev(dev->pseudo_netdev);
if (ret) if (ret)
goto out2; goto out_free_netdev;
pr_debug("%s:%u\n", __func__, __LINE__); pr_debug("%s:%u\n", __func__, __LINE__);
strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
...@@ -851,6 +851,10 @@ int c2_register_device(struct c2_dev *dev) ...@@ -851,6 +851,10 @@ int c2_register_device(struct c2_dev *dev)
dev->ibdev.post_recv = c2_post_receive; dev->ibdev.post_recv = c2_post_receive;
dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL); dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
if (dev->ibdev.iwcm == NULL) {
ret = -ENOMEM;
goto out_unregister_netdev;
}
dev->ibdev.iwcm->add_ref = c2_add_ref; dev->ibdev.iwcm->add_ref = c2_add_ref;
dev->ibdev.iwcm->rem_ref = c2_rem_ref; dev->ibdev.iwcm->rem_ref = c2_rem_ref;
dev->ibdev.iwcm->get_qp = c2_get_qp; dev->ibdev.iwcm->get_qp = c2_get_qp;
...@@ -862,23 +866,25 @@ int c2_register_device(struct c2_dev *dev) ...@@ -862,23 +866,25 @@ int c2_register_device(struct c2_dev *dev)
ret = ib_register_device(&dev->ibdev); ret = ib_register_device(&dev->ibdev);
if (ret) if (ret)
goto out1; goto out_free_iwcm;
for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) { for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
ret = device_create_file(&dev->ibdev.dev, ret = device_create_file(&dev->ibdev.dev,
c2_dev_attributes[i]); c2_dev_attributes[i]);
if (ret) if (ret)
goto out0; goto out_unregister_ibdev;
} }
goto out3; goto out;
out0: out_unregister_ibdev:
ib_unregister_device(&dev->ibdev); ib_unregister_device(&dev->ibdev);
out1: out_free_iwcm:
kfree(dev->ibdev.iwcm);
out_unregister_netdev:
unregister_netdev(dev->pseudo_netdev); unregister_netdev(dev->pseudo_netdev);
out2: out_free_netdev:
free_netdev(dev->pseudo_netdev); free_netdev(dev->pseudo_netdev);
out3: out:
pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret); pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
return ret; return ret;
} }
......
...@@ -105,11 +105,9 @@ static void rnic_init(struct iwch_dev *rnicp) ...@@ -105,11 +105,9 @@ static void rnic_init(struct iwch_dev *rnicp)
static void open_rnic_dev(struct t3cdev *tdev) static void open_rnic_dev(struct t3cdev *tdev)
{ {
struct iwch_dev *rnicp; struct iwch_dev *rnicp;
static int vers_printed;
PDBG("%s t3cdev %p\n", __func__, tdev); PDBG("%s t3cdev %p\n", __func__, tdev);
if (!vers_printed++) printk_once(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
DRV_VERSION); DRV_VERSION);
rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp)); rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp));
if (!rnicp) { if (!rnicp) {
......
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
#include "ehca_tools.h" #include "ehca_tools.h"
#include "hcp_if.h" #include "hcp_if.h"
#define HCAD_VERSION "0028" #define HCAD_VERSION "0029"
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
...@@ -64,7 +64,7 @@ static int ehca_hw_level = 0; ...@@ -64,7 +64,7 @@ static int ehca_hw_level = 0;
static int ehca_poll_all_eqs = 1; static int ehca_poll_all_eqs = 1;
int ehca_debug_level = 0; int ehca_debug_level = 0;
int ehca_nr_ports = 2; int ehca_nr_ports = -1;
int ehca_use_hp_mr = 0; int ehca_use_hp_mr = 0;
int ehca_port_act_time = 30; int ehca_port_act_time = 30;
int ehca_static_rate = -1; int ehca_static_rate = -1;
...@@ -95,8 +95,8 @@ MODULE_PARM_DESC(hw_level, ...@@ -95,8 +95,8 @@ MODULE_PARM_DESC(hw_level,
"Hardware level (0: autosensing (default), " "Hardware level (0: autosensing (default), "
"0x10..0x14: eHCA, 0x20..0x23: eHCA2)"); "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
MODULE_PARM_DESC(nr_ports, MODULE_PARM_DESC(nr_ports,
"number of connected ports (-1: autodetect, 1: port one only, " "number of connected ports (-1: autodetect (default), "
"2: two ports (default)"); "1: port one only, 2: two ports)");
MODULE_PARM_DESC(use_hp_mr, MODULE_PARM_DESC(use_hp_mr,
"Use high performance MRs (default: no)"); "Use high performance MRs (default: no)");
MODULE_PARM_DESC(port_act_time, MODULE_PARM_DESC(port_act_time,
......
...@@ -786,7 +786,11 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) ...@@ -786,7 +786,11 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
wc->slid = cqe->rlid; wc->slid = cqe->rlid;
wc->dlid_path_bits = cqe->dlid; wc->dlid_path_bits = cqe->dlid;
wc->src_qp = cqe->remote_qp_number; wc->src_qp = cqe->remote_qp_number;
wc->wc_flags = cqe->w_completion_flags; /*
* HW has "Immed data present" and "GRH present" in bits 6 and 5.
* SW defines those in bits 1 and 0, so we can just shift and mask.
*/
wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
wc->ex.imm_data = cpu_to_be32(cqe->immediate_data); wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
wc->sl = cqe->service_level; wc->sl = cqe->service_level;
......
...@@ -125,14 +125,30 @@ struct ib_perf { ...@@ -125,14 +125,30 @@ struct ib_perf {
u8 data[192]; u8 data[192];
} __attribute__ ((packed)); } __attribute__ ((packed));
/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
struct tcslfl {
u32 tc:8;
u32 sl:4;
u32 fl:20;
} __attribute__ ((packed));
/* IP Version/TC/FL packed into 32 bits, as in GRH */
struct vertcfl {
u32 ver:4;
u32 tc:8;
u32 fl:20;
} __attribute__ ((packed));
static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad) struct ib_mad *in_mad, struct ib_mad *out_mad)
{ {
struct ib_perf *in_perf = (struct ib_perf *)in_mad; struct ib_perf *in_perf = (struct ib_perf *)in_mad;
struct ib_perf *out_perf = (struct ib_perf *)out_mad; struct ib_perf *out_perf = (struct ib_perf *)out_mad;
struct ib_class_port_info *poi = struct ib_class_port_info *poi =
(struct ib_class_port_info *)out_perf->data; (struct ib_class_port_info *)out_perf->data;
struct tcslfl *tcslfl =
(struct tcslfl *)&poi->redirect_tcslfl;
struct ehca_shca *shca = struct ehca_shca *shca =
container_of(ibdev, struct ehca_shca, ib_device); container_of(ibdev, struct ehca_shca, ib_device);
struct ehca_sport *sport = &shca->sport[port_num - 1]; struct ehca_sport *sport = &shca->sport[port_num - 1];
...@@ -158,10 +174,29 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, ...@@ -158,10 +174,29 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
poi->base_version = 1; poi->base_version = 1;
poi->class_version = 1; poi->class_version = 1;
poi->resp_time_value = 18; poi->resp_time_value = 18;
poi->redirect_lid = sport->saved_attr.lid;
poi->redirect_qp = sport->pma_qp_nr; /* copy local routing information from WC where applicable */
tcslfl->sl = in_wc->sl;
poi->redirect_lid =
sport->saved_attr.lid | in_wc->dlid_path_bits;
poi->redirect_qp = sport->pma_qp_nr;
poi->redirect_qkey = IB_QP1_QKEY; poi->redirect_qkey = IB_QP1_QKEY;
poi->redirect_pkey = IB_DEFAULT_PKEY_FULL;
ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
&poi->redirect_pkey);
/* if request was globally routed, copy route info */
if (in_grh) {
struct vertcfl *vertcfl =
(struct vertcfl *)&in_grh->version_tclass_flow;
memcpy(poi->redirect_gid, in_grh->dgid.raw,
sizeof(poi->redirect_gid));
tcslfl->tc = vertcfl->tc;
tcslfl->fl = vertcfl->fl;
} else
/* else only fill in default GID */
ehca_query_gid(ibdev, port_num, 0,
(union ib_gid *)&poi->redirect_gid);
ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x", ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
sport->saved_attr.lid, sport->pma_qp_nr); sport->saved_attr.lid, sport->pma_qp_nr);
...@@ -183,8 +218,7 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, ...@@ -183,8 +218,7 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *in_mad, struct ib_mad *out_mad)
struct ib_mad *out_mad)
{ {
int ret; int ret;
...@@ -196,7 +230,8 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, ...@@ -196,7 +230,8 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_SUCCESS; return IB_MAD_RESULT_SUCCESS;
ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp); ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad); ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
in_mad, out_mad);
return ret; return ret;
} }
...@@ -1616,7 +1616,7 @@ static int try_alloc_port(struct ipath_devdata *dd, int port, ...@@ -1616,7 +1616,7 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
pd->port_cnt = 1; pd->port_cnt = 1;
port_fp(fp) = pd; port_fp(fp) = pd;
pd->port_pid = get_pid(task_pid(current)); pd->port_pid = get_pid(task_pid(current));
strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
ipath_stats.sps_ports++; ipath_stats.sps_ports++;
ret = 0; ret = 0;
} else } else
......
...@@ -60,7 +60,7 @@ static int recv_subn_get_nodedescription(struct ib_smp *smp, ...@@ -60,7 +60,7 @@ static int recv_subn_get_nodedescription(struct ib_smp *smp,
if (smp->attr_mod) if (smp->attr_mod)
smp->status |= IB_SMP_INVALID_FIELD; smp->status |= IB_SMP_INVALID_FIELD;
strncpy(smp->data, ibdev->node_desc, sizeof(smp->data)); memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
return reply(smp); return reply(smp);
} }
......
...@@ -342,6 +342,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -342,6 +342,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
struct mlx4_ib_alloc_ucontext_resp resp; struct mlx4_ib_alloc_ucontext_resp resp;
int err; int err;
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
resp.qp_tab_size = dev->dev->caps.num_qps; resp.qp_tab_size = dev->dev->caps.num_qps;
resp.bf_reg_size = dev->dev->caps.bf_reg_size; resp.bf_reg_size = dev->dev->caps.bf_reg_size;
resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
...@@ -540,15 +543,11 @@ static struct device_attribute *mlx4_class_attributes[] = { ...@@ -540,15 +543,11 @@ static struct device_attribute *mlx4_class_attributes[] = {
static void *mlx4_ib_add(struct mlx4_dev *dev) static void *mlx4_ib_add(struct mlx4_dev *dev)
{ {
static int mlx4_ib_version_printed;
struct mlx4_ib_dev *ibdev; struct mlx4_ib_dev *ibdev;
int num_ports = 0; int num_ports = 0;
int i; int i;
if (!mlx4_ib_version_printed) { printk_once(KERN_INFO "%s", mlx4_ib_version);
printk(KERN_INFO "%s", mlx4_ib_version);
++mlx4_ib_version_printed;
}
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
num_ports++; num_ports++;
...@@ -673,6 +672,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ...@@ -673,6 +672,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_reg; goto err_reg;
} }
ibdev->ib_active = true;
return ibdev; return ibdev;
err_reg: err_reg:
...@@ -729,6 +730,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, ...@@ -729,6 +730,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
break; break;
case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
ibdev->ib_active = false;
ibev.event = IB_EVENT_DEVICE_FATAL; ibev.event = IB_EVENT_DEVICE_FATAL;
break; break;
......
...@@ -175,6 +175,7 @@ struct mlx4_ib_dev { ...@@ -175,6 +175,7 @@ struct mlx4_ib_dev {
spinlock_t sm_lock; spinlock_t sm_lock;
struct mutex cap_mask_mutex; struct mutex cap_mask_mutex;
bool ib_active;
}; };
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
......
...@@ -615,10 +615,12 @@ static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state) ...@@ -615,10 +615,12 @@ static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
} }
static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
{ {
if (send_cq == recv_cq) if (send_cq == recv_cq) {
spin_lock_irq(&send_cq->lock); spin_lock_irq(&send_cq->lock);
else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { __acquire(&recv_cq->lock);
} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_lock_irq(&send_cq->lock); spin_lock_irq(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else { } else {
...@@ -628,10 +630,12 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv ...@@ -628,10 +630,12 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv
} }
static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
__releases(&send_cq->lock) __releases(&recv_cq->lock)
{ {
if (send_cq == recv_cq) if (send_cq == recv_cq) {
__release(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock); spin_unlock_irq(&send_cq->lock);
else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_unlock(&recv_cq->lock); spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock); spin_unlock_irq(&send_cq->lock);
} else { } else {
......
...@@ -88,6 +88,7 @@ static void handle_catas(struct mthca_dev *dev) ...@@ -88,6 +88,7 @@ static void handle_catas(struct mthca_dev *dev)
event.device = &dev->ib_dev; event.device = &dev->ib_dev;
event.event = IB_EVENT_DEVICE_FATAL; event.event = IB_EVENT_DEVICE_FATAL;
event.element.port_num = 0; event.element.port_num = 0;
dev->active = false;
ib_dispatch_event(&event); ib_dispatch_event(&event);
......
...@@ -34,8 +34,6 @@ ...@@ -34,8 +34,6 @@
#ifndef MTHCA_CONFIG_REG_H #ifndef MTHCA_CONFIG_REG_H
#define MTHCA_CONFIG_REG_H #define MTHCA_CONFIG_REG_H
#include <asm/page.h>
#define MTHCA_HCR_BASE 0x80680 #define MTHCA_HCR_BASE 0x80680
#define MTHCA_HCR_SIZE 0x0001c #define MTHCA_HCR_SIZE 0x0001c
#define MTHCA_ECR_BASE 0x80700 #define MTHCA_ECR_BASE 0x80700
......
...@@ -357,6 +357,7 @@ struct mthca_dev { ...@@ -357,6 +357,7 @@ struct mthca_dev {
struct ib_ah *sm_ah[MTHCA_MAX_PORTS]; struct ib_ah *sm_ah[MTHCA_MAX_PORTS];
spinlock_t sm_lock; spinlock_t sm_lock;
u8 rate[MTHCA_MAX_PORTS]; u8 rate[MTHCA_MAX_PORTS];
bool active;
}; };
#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
......
...@@ -829,27 +829,34 @@ int mthca_init_eq_table(struct mthca_dev *dev) ...@@ -829,27 +829,34 @@ int mthca_init_eq_table(struct mthca_dev *dev)
if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
static const char *eq_name[] = { static const char *eq_name[] = {
[MTHCA_EQ_COMP] = DRV_NAME " (comp)", [MTHCA_EQ_COMP] = DRV_NAME "-comp",
[MTHCA_EQ_ASYNC] = DRV_NAME " (async)", [MTHCA_EQ_ASYNC] = DRV_NAME "-async",
[MTHCA_EQ_CMD] = DRV_NAME " (cmd)" [MTHCA_EQ_CMD] = DRV_NAME "-cmd"
}; };
for (i = 0; i < MTHCA_NUM_EQ; ++i) { for (i = 0; i < MTHCA_NUM_EQ; ++i) {
snprintf(dev->eq_table.eq[i].irq_name,
IB_DEVICE_NAME_MAX,
"%s@pci:%s", eq_name[i],
pci_name(dev->pdev));
err = request_irq(dev->eq_table.eq[i].msi_x_vector, err = request_irq(dev->eq_table.eq[i].msi_x_vector,
mthca_is_memfree(dev) ? mthca_is_memfree(dev) ?
mthca_arbel_msi_x_interrupt : mthca_arbel_msi_x_interrupt :
mthca_tavor_msi_x_interrupt, mthca_tavor_msi_x_interrupt,
0, eq_name[i], dev->eq_table.eq + i); 0, dev->eq_table.eq[i].irq_name,
dev->eq_table.eq + i);
if (err) if (err)
goto err_out_cmd; goto err_out_cmd;
dev->eq_table.eq[i].have_irq = 1; dev->eq_table.eq[i].have_irq = 1;
} }
} else { } else {
snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
DRV_NAME "@pci:%s", pci_name(dev->pdev));
err = request_irq(dev->pdev->irq, err = request_irq(dev->pdev->irq,
mthca_is_memfree(dev) ? mthca_is_memfree(dev) ?
mthca_arbel_interrupt : mthca_arbel_interrupt :
mthca_tavor_interrupt, mthca_tavor_interrupt,
IRQF_SHARED, DRV_NAME, dev); IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
if (err) if (err)
goto err_out_cmd; goto err_out_cmd;
dev->eq_table.have_irq = 1; dev->eq_table.have_irq = 1;
......
...@@ -1116,6 +1116,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type) ...@@ -1116,6 +1116,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
pci_set_drvdata(pdev, mdev); pci_set_drvdata(pdev, mdev);
mdev->hca_type = hca_type; mdev->hca_type = hca_type;
mdev->active = true;
return 0; return 0;
err_unregister: err_unregister:
...@@ -1215,15 +1217,11 @@ int __mthca_restart_one(struct pci_dev *pdev) ...@@ -1215,15 +1217,11 @@ int __mthca_restart_one(struct pci_dev *pdev)
static int __devinit mthca_init_one(struct pci_dev *pdev, static int __devinit mthca_init_one(struct pci_dev *pdev,
const struct pci_device_id *id) const struct pci_device_id *id)
{ {
static int mthca_version_printed = 0;
int ret; int ret;
mutex_lock(&mthca_device_mutex); mutex_lock(&mthca_device_mutex);
if (!mthca_version_printed) { printk_once(KERN_INFO "%s", mthca_version);
printk(KERN_INFO "%s", mthca_version);
++mthca_version_printed;
}
if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) { if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
printk(KERN_ERR PFX "%s has invalid driver data %lx\n", printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
......
...@@ -334,6 +334,9 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev, ...@@ -334,6 +334,9 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
struct mthca_ucontext *context; struct mthca_ucontext *context;
int err; int err;
if (!(to_mdev(ibdev)->active))
return ERR_PTR(-EAGAIN);
memset(&uresp, 0, sizeof uresp); memset(&uresp, 0, sizeof uresp);
uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
......
...@@ -113,6 +113,7 @@ struct mthca_eq { ...@@ -113,6 +113,7 @@ struct mthca_eq {
int nent; int nent;
struct mthca_buf_list *page_list; struct mthca_buf_list *page_list;
struct mthca_mr mr; struct mthca_mr mr;
char irq_name[IB_DEVICE_NAME_MAX];
}; };
struct mthca_av; struct mthca_av;
......
...@@ -1319,10 +1319,12 @@ int mthca_alloc_qp(struct mthca_dev *dev, ...@@ -1319,10 +1319,12 @@ int mthca_alloc_qp(struct mthca_dev *dev,
} }
static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
{ {
if (send_cq == recv_cq) if (send_cq == recv_cq) {
spin_lock_irq(&send_cq->lock); spin_lock_irq(&send_cq->lock);
else if (send_cq->cqn < recv_cq->cqn) { __acquire(&recv_cq->lock);
} else if (send_cq->cqn < recv_cq->cqn) {
spin_lock_irq(&send_cq->lock); spin_lock_irq(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else { } else {
...@@ -1332,10 +1334,12 @@ static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) ...@@ -1332,10 +1334,12 @@ static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
} }
static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
__releases(&send_cq->lock) __releases(&recv_cq->lock)
{ {
if (send_cq == recv_cq) if (send_cq == recv_cq) {
__release(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock); spin_unlock_irq(&send_cq->lock);
else if (send_cq->cqn < recv_cq->cqn) { } else if (send_cq->cqn < recv_cq->cqn) {
spin_unlock(&recv_cq->lock); spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock); spin_unlock_irq(&send_cq->lock);
} else { } else {
......
...@@ -30,7 +30,6 @@ ...@@ -30,7 +30,6 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/init.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/delay.h> #include <linux/delay.h>
......
...@@ -523,7 +523,7 @@ int nes_cm_disconn(struct nes_qp *); ...@@ -523,7 +523,7 @@ int nes_cm_disconn(struct nes_qp *);
void nes_cm_disconn_worker(void *); void nes_cm_disconn_worker(void *);
/* nes_verbs.c */ /* nes_verbs.c */
int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32); int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32);
int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
struct nes_ib_device *nes_init_ofa_device(struct net_device *); struct nes_ib_device *nes_init_ofa_device(struct net_device *);
void nes_destroy_ofa_device(struct nes_ib_device *); void nes_destroy_ofa_device(struct nes_ib_device *);
......
...@@ -2450,19 +2450,16 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod ...@@ -2450,19 +2450,16 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod
*/ */
int nes_cm_disconn(struct nes_qp *nesqp) int nes_cm_disconn(struct nes_qp *nesqp)
{ {
unsigned long flags; struct disconn_work *work;
spin_lock_irqsave(&nesqp->lock, flags);
if (nesqp->disconn_pending == 0) {
nesqp->disconn_pending++;
spin_unlock_irqrestore(&nesqp->lock, flags);
/* init our disconnect work element, to */
INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker);
queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work); work = kzalloc(sizeof *work, GFP_ATOMIC);
} else if (!work)
spin_unlock_irqrestore(&nesqp->lock, flags); return -ENOMEM; /* Timer will clean up */
nes_add_ref(&nesqp->ibqp);
work->nesqp = nesqp;
INIT_WORK(&work->work, nes_disconnect_worker);
queue_work(g_cm_core->disconn_wq, &work->work);
return 0; return 0;
} }
...@@ -2472,11 +2469,14 @@ int nes_cm_disconn(struct nes_qp *nesqp) ...@@ -2472,11 +2469,14 @@ int nes_cm_disconn(struct nes_qp *nesqp)
*/ */
static void nes_disconnect_worker(struct work_struct *work) static void nes_disconnect_worker(struct work_struct *work)
{ {
struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work); struct disconn_work *dwork = container_of(work, struct disconn_work, work);
struct nes_qp *nesqp = dwork->nesqp;
kfree(dwork);
nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n", nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n",
nesqp->last_aeq, nesqp->hwqp.qp_id); nesqp->last_aeq, nesqp->hwqp.qp_id);
nes_cm_disconn_true(nesqp); nes_cm_disconn_true(nesqp);
nes_rem_ref(&nesqp->ibqp);
} }
...@@ -2493,7 +2493,12 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) ...@@ -2493,7 +2493,12 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
u16 last_ae; u16 last_ae;
u8 original_hw_tcp_state; u8 original_hw_tcp_state;
u8 original_ibqp_state; u8 original_ibqp_state;
u8 issued_disconnect_reset = 0; enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK;
int issue_disconn = 0;
int issue_close = 0;
int issue_flush = 0;
u32 flush_q = NES_CQP_FLUSH_RQ;
struct ib_event ibevent;
if (!nesqp) { if (!nesqp) {
nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n"); nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n");
...@@ -2517,24 +2522,55 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) ...@@ -2517,24 +2522,55 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
original_ibqp_state = nesqp->ibqp_state; original_ibqp_state = nesqp->ibqp_state;
last_ae = nesqp->last_aeq; last_ae = nesqp->last_aeq;
if (nesqp->term_flags) {
issue_disconn = 1;
issue_close = 1;
nesqp->cm_id = NULL;
if (nesqp->flush_issued == 0) {
nesqp->flush_issued = 1;
issue_flush = 1;
}
} else if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
((original_ibqp_state == IB_QPS_RTS) &&
(last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
issue_disconn = 1;
if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET)
disconn_status = IW_CM_EVENT_STATUS_RESET;
}
if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
(original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
(last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
(last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
issue_close = 1;
nesqp->cm_id = NULL;
if (nesqp->flush_issued == 0) {
nesqp->flush_issued = 1;
issue_flush = 1;
}
}
spin_unlock_irqrestore(&nesqp->lock, flags);
nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state); if ((issue_flush) && (nesqp->destroyed == 0)) {
/* Flush the queue(s) */
if (nesqp->hw_iwarp_state >= NES_AEQE_IWARP_STATE_TERMINATE)
flush_q |= NES_CQP_FLUSH_SQ;
flush_wqes(nesvnic->nesdev, nesqp, flush_q, 1);
if ((nesqp->cm_id) && (cm_id->event_handler)) { if (nesqp->term_flags) {
if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || ibevent.device = nesqp->ibqp.device;
((original_ibqp_state == IB_QPS_RTS) && ibevent.event = nesqp->terminate_eventtype;
(last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { ibevent.element.qp = &nesqp->ibqp;
nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
}
}
if ((cm_id) && (cm_id->event_handler)) {
if (issue_disconn) {
atomic_inc(&cm_disconnects); atomic_inc(&cm_disconnects);
cm_event.event = IW_CM_EVENT_DISCONNECT; cm_event.event = IW_CM_EVENT_DISCONNECT;
if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) { cm_event.status = disconn_status;
cm_event.status = IW_CM_EVENT_STATUS_RESET;
nes_debug(NES_DBG_CM, "Generating a CM "
"Disconnect Event (status reset) for "
"QP%u, cm_id = %p. \n",
nesqp->hwqp.qp_id, cm_id);
} else
cm_event.status = IW_CM_EVENT_STATUS_OK;
cm_event.local_addr = cm_id->local_addr; cm_event.local_addr = cm_id->local_addr;
cm_event.remote_addr = cm_id->remote_addr; cm_event.remote_addr = cm_id->remote_addr;
cm_event.private_data = NULL; cm_event.private_data = NULL;
...@@ -2547,29 +2583,14 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) ...@@ -2547,29 +2583,14 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
nesqp->hwqp.sq_tail, cm_id, nesqp->hwqp.sq_tail, cm_id,
atomic_read(&nesqp->refcount)); atomic_read(&nesqp->refcount));
spin_unlock_irqrestore(&nesqp->lock, flags);
ret = cm_id->event_handler(cm_id, &cm_event); ret = cm_id->event_handler(cm_id, &cm_event);
if (ret) if (ret)
nes_debug(NES_DBG_CM, "OFA CM event_handler " nes_debug(NES_DBG_CM, "OFA CM event_handler "
"returned, ret=%d\n", ret); "returned, ret=%d\n", ret);
spin_lock_irqsave(&nesqp->lock, flags);
} }
nesqp->disconn_pending = 0; if (issue_close) {
/* There might have been another AE while the lock was released */
original_hw_tcp_state = nesqp->hw_tcp_state;
original_ibqp_state = nesqp->ibqp_state;
last_ae = nesqp->last_aeq;
if ((issued_disconnect_reset == 0) && (nesqp->cm_id) &&
((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
(original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
(last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
(last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
atomic_inc(&cm_closes); atomic_inc(&cm_closes);
nesqp->cm_id = NULL;
nesqp->in_disconnect = 0;
spin_unlock_irqrestore(&nesqp->lock, flags);
nes_disconnect(nesqp, 1); nes_disconnect(nesqp, 1);
cm_id->provider_data = nesqp; cm_id->provider_data = nesqp;
...@@ -2588,28 +2609,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) ...@@ -2588,28 +2609,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
} }
cm_id->rem_ref(cm_id); cm_id->rem_ref(cm_id);
spin_lock_irqsave(&nesqp->lock, flags);
if (nesqp->flush_issued == 0) {
nesqp->flush_issued = 1;
spin_unlock_irqrestore(&nesqp->lock, flags);
flush_wqes(nesvnic->nesdev, nesqp,
NES_CQP_FLUSH_RQ, 1);
} else
spin_unlock_irqrestore(&nesqp->lock, flags);
} else {
cm_id = nesqp->cm_id;
spin_unlock_irqrestore(&nesqp->lock, flags);
/* check to see if the inbound reset beat the outbound reset */
if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) {
nes_debug(NES_DBG_CM, "QP%u: Decing refcount "
"due to inbound reset beating the "
"outbound reset.\n", nesqp->hwqp.qp_id);
}
} }
} else {
nesqp->disconn_pending = 0;
spin_unlock_irqrestore(&nesqp->lock, flags);
} }
return 0; return 0;
......
...@@ -410,8 +410,6 @@ struct nes_cm_ops { ...@@ -410,8 +410,6 @@ struct nes_cm_ops {
int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *, int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
enum nes_timer_type, int, int); enum nes_timer_type, int, int);
int nes_cm_disconn(struct nes_qp *);
int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *); int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
int nes_reject(struct iw_cm_id *, const void *, u8); int nes_reject(struct iw_cm_id *, const void *, u8);
int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *); int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
......
此差异已折叠。
...@@ -241,6 +241,7 @@ enum nes_cqp_stag_wqeword_idx { ...@@ -241,6 +241,7 @@ enum nes_cqp_stag_wqeword_idx {
}; };
#define NES_CQP_OP_IWARP_STATE_SHIFT 28 #define NES_CQP_OP_IWARP_STATE_SHIFT 28
#define NES_CQP_OP_TERMLEN_SHIFT 28
enum nes_cqp_qp_bits { enum nes_cqp_qp_bits {
NES_CQP_QP_ARP_VALID = (1<<8), NES_CQP_QP_ARP_VALID = (1<<8),
...@@ -265,12 +266,16 @@ enum nes_cqp_qp_bits { ...@@ -265,12 +266,16 @@ enum nes_cqp_qp_bits {
NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT), NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT),
NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT), NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT),
NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT), NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT),
NES_CQP_QP_TERM_DONT_SEND_FIN = (1<<24),
NES_CQP_QP_TERM_DONT_SEND_TERM_MSG = (1<<25),
NES_CQP_QP_RESET = (1<<31), NES_CQP_QP_RESET = (1<<31),
}; };
enum nes_cqp_qp_wqe_word_idx { enum nes_cqp_qp_wqe_word_idx {
NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6, NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6,
NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7, NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7,
NES_CQP_QP_WQE_FLUSH_SQ_CODE = 8,
NES_CQP_QP_WQE_FLUSH_RQ_CODE = 9,
NES_CQP_QP_WQE_NEW_MSS_IDX = 15, NES_CQP_QP_WQE_NEW_MSS_IDX = 15,
}; };
...@@ -361,6 +366,7 @@ enum nes_cqp_arp_bits { ...@@ -361,6 +366,7 @@ enum nes_cqp_arp_bits {
enum nes_cqp_flush_bits { enum nes_cqp_flush_bits {
NES_CQP_FLUSH_SQ = (1<<30), NES_CQP_FLUSH_SQ = (1<<30),
NES_CQP_FLUSH_RQ = (1<<31), NES_CQP_FLUSH_RQ = (1<<31),
NES_CQP_FLUSH_MAJ_MIN = (1<<28),
}; };
enum nes_cqe_opcode_bits { enum nes_cqe_opcode_bits {
...@@ -633,11 +639,14 @@ enum nes_aeqe_bits { ...@@ -633,11 +639,14 @@ enum nes_aeqe_bits {
NES_AEQE_INBOUND_RDMA = (1<<19), NES_AEQE_INBOUND_RDMA = (1<<19),
NES_AEQE_IWARP_STATE_MASK = (7<<20), NES_AEQE_IWARP_STATE_MASK = (7<<20),
NES_AEQE_TCP_STATE_MASK = (0xf<<24), NES_AEQE_TCP_STATE_MASK = (0xf<<24),
NES_AEQE_Q2_DATA_WRITTEN = (0x3<<28),
NES_AEQE_VALID = (1<<31), NES_AEQE_VALID = (1<<31),
}; };
#define NES_AEQE_IWARP_STATE_SHIFT 20 #define NES_AEQE_IWARP_STATE_SHIFT 20
#define NES_AEQE_TCP_STATE_SHIFT 24 #define NES_AEQE_TCP_STATE_SHIFT 24
#define NES_AEQE_Q2_DATA_ETHERNET (1<<28)
#define NES_AEQE_Q2_DATA_MPA (1<<29)
enum nes_aeqe_iwarp_state { enum nes_aeqe_iwarp_state {
NES_AEQE_IWARP_STATE_NON_EXISTANT = 0, NES_AEQE_IWARP_STATE_NON_EXISTANT = 0,
...@@ -751,6 +760,15 @@ enum nes_iwarp_sq_wqe_bits { ...@@ -751,6 +760,15 @@ enum nes_iwarp_sq_wqe_bits {
NES_IWARP_SQ_OP_NOP = 12, NES_IWARP_SQ_OP_NOP = 12,
}; };
enum nes_iwarp_cqe_major_code {
NES_IWARP_CQE_MAJOR_FLUSH = 1,
NES_IWARP_CQE_MAJOR_DRV = 0x8000
};
enum nes_iwarp_cqe_minor_code {
NES_IWARP_CQE_MINOR_FLUSH = 1
};
#define NES_EEPROM_READ_REQUEST (1<<16) #define NES_EEPROM_READ_REQUEST (1<<16)
#define NES_MAC_ADDR_VALID (1<<20) #define NES_MAC_ADDR_VALID (1<<20)
...@@ -1119,6 +1137,7 @@ struct nes_adapter { ...@@ -1119,6 +1137,7 @@ struct nes_adapter {
u8 netdev_max; /* from host nic address count in EEPROM */ u8 netdev_max; /* from host nic address count in EEPROM */
u8 port_count; u8 port_count;
u8 virtwq; u8 virtwq;
u8 send_term_ok;
u8 et_use_adaptive_rx_coalesce; u8 et_use_adaptive_rx_coalesce;
u8 adapter_fcn_count; u8 adapter_fcn_count;
u8 pft_mcast_map[NES_PFT_SIZE]; u8 pft_mcast_map[NES_PFT_SIZE];
...@@ -1217,6 +1236,90 @@ struct nes_ib_device { ...@@ -1217,6 +1236,90 @@ struct nes_ib_device {
u32 num_pd; u32 num_pd;
}; };
enum nes_hdrct_flags {
DDP_LEN_FLAG = 0x80,
DDP_HDR_FLAG = 0x40,
RDMA_HDR_FLAG = 0x20
};
enum nes_term_layers {
LAYER_RDMA = 0,
LAYER_DDP = 1,
LAYER_MPA = 2
};
enum nes_term_error_types {
RDMAP_CATASTROPHIC = 0,
RDMAP_REMOTE_PROT = 1,
RDMAP_REMOTE_OP = 2,
DDP_CATASTROPHIC = 0,
DDP_TAGGED_BUFFER = 1,
DDP_UNTAGGED_BUFFER = 2,
DDP_LLP = 3
};
enum nes_term_rdma_errors {
RDMAP_INV_STAG = 0x00,
RDMAP_INV_BOUNDS = 0x01,
RDMAP_ACCESS = 0x02,
RDMAP_UNASSOC_STAG = 0x03,
RDMAP_TO_WRAP = 0x04,
RDMAP_INV_RDMAP_VER = 0x05,
RDMAP_UNEXPECTED_OP = 0x06,
RDMAP_CATASTROPHIC_LOCAL = 0x07,
RDMAP_CATASTROPHIC_GLOBAL = 0x08,
RDMAP_CANT_INV_STAG = 0x09,
RDMAP_UNSPECIFIED = 0xff
};
enum nes_term_ddp_errors {
DDP_CATASTROPHIC_LOCAL = 0x00,
DDP_TAGGED_INV_STAG = 0x00,
DDP_TAGGED_BOUNDS = 0x01,
DDP_TAGGED_UNASSOC_STAG = 0x02,
DDP_TAGGED_TO_WRAP = 0x03,
DDP_TAGGED_INV_DDP_VER = 0x04,
DDP_UNTAGGED_INV_QN = 0x01,
DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
DDP_UNTAGGED_INV_MO = 0x04,
DDP_UNTAGGED_INV_TOO_LONG = 0x05,
DDP_UNTAGGED_INV_DDP_VER = 0x06
};
enum nes_term_mpa_errors {
MPA_CLOSED = 0x01,
MPA_CRC = 0x02,
MPA_MARKER = 0x03,
MPA_REQ_RSP = 0x04,
};
struct nes_terminate_hdr {
u8 layer_etype;
u8 error_code;
u8 hdrct;
u8 rsvd;
};
/* Used to determine how to fill in terminate error codes */
#define IWARP_OPCODE_WRITE 0
#define IWARP_OPCODE_READREQ 1
#define IWARP_OPCODE_READRSP 2
#define IWARP_OPCODE_SEND 3
#define IWARP_OPCODE_SEND_INV 4
#define IWARP_OPCODE_SEND_SE 5
#define IWARP_OPCODE_SEND_SE_INV 6
#define IWARP_OPCODE_TERM 7
/* These values are used only during terminate processing */
#define TERM_DDP_LEN_TAGGED 14
#define TERM_DDP_LEN_UNTAGGED 18
#define TERM_RDMA_LEN 28
#define RDMA_OPCODE_MASK 0x0f
#define RDMA_READ_REQ_OPCODE 1
#define BAD_FRAME_OFFSET 64
#define CQE_MAJOR_DRV 0x8000
#define nes_vlan_rx vlan_hwaccel_receive_skb #define nes_vlan_rx vlan_hwaccel_receive_skb
#define nes_netif_rx netif_receive_skb #define nes_netif_rx netif_receive_skb
......
...@@ -183,6 +183,9 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada ...@@ -183,6 +183,9 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada
} else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) { } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) {
nesadapter->virtwq = 1; nesadapter->virtwq = 1;
} }
if (((major_ver == 3) && (minor_ver >= 16)) || (major_ver > 3))
nesadapter->send_term_ok = 1;
nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) + nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
(u32)((u8)eeprom_data); (u32)((u8)eeprom_data);
...@@ -548,7 +551,7 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev) ...@@ -548,7 +551,7 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
spin_unlock_irqrestore(&nesdev->cqp.lock, flags); spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
} }
if (cqp_request == NULL) { if (cqp_request == NULL) {
cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL); cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_ATOMIC);
if (cqp_request) { if (cqp_request) {
cqp_request->dynamic = 1; cqp_request->dynamic = 1;
INIT_LIST_HEAD(&cqp_request->list); INIT_LIST_HEAD(&cqp_request->list);
......
...@@ -667,15 +667,32 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop ...@@ -667,15 +667,32 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
*/ */
static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
{ {
struct nes_vnic *nesvnic = to_nesvnic(ibdev);
struct net_device *netdev = nesvnic->netdev;
memset(props, 0, sizeof(*props)); memset(props, 0, sizeof(*props));
props->max_mtu = IB_MTU_2048; props->max_mtu = IB_MTU_4096;
props->active_mtu = IB_MTU_2048;
if (netdev->mtu >= 4096)
props->active_mtu = IB_MTU_4096;
else if (netdev->mtu >= 2048)
props->active_mtu = IB_MTU_2048;
else if (netdev->mtu >= 1024)
props->active_mtu = IB_MTU_1024;
else if (netdev->mtu >= 512)
props->active_mtu = IB_MTU_512;
else
props->active_mtu = IB_MTU_256;
props->lid = 1; props->lid = 1;
props->lmc = 0; props->lmc = 0;
props->sm_lid = 0; props->sm_lid = 0;
props->sm_sl = 0; props->sm_sl = 0;
props->state = IB_PORT_ACTIVE; if (nesvnic->linkup)
props->state = IB_PORT_ACTIVE;
else
props->state = IB_PORT_DOWN;
props->phys_state = 0; props->phys_state = 0;
props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
...@@ -1505,13 +1522,46 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, ...@@ -1505,13 +1522,46 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
} }
/**
* nes_clean_cq
*/
static void nes_clean_cq(struct nes_qp *nesqp, struct nes_cq *nescq)
{
u32 cq_head;
u32 lo;
u32 hi;
u64 u64temp;
unsigned long flags = 0;
spin_lock_irqsave(&nescq->lock, flags);
cq_head = nescq->hw_cq.cq_head;
while (le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
rmb();
lo = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
hi = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]);
u64temp = (((u64)hi) << 32) | ((u64)lo);
u64temp &= ~(NES_SW_CONTEXT_ALIGN-1);
if (u64temp == (u64)(unsigned long)nesqp) {
/* Zero the context value so cqe will be ignored */
nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] = 0;
nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX] = 0;
}
if (++cq_head >= nescq->hw_cq.cq_size)
cq_head = 0;
}
spin_unlock_irqrestore(&nescq->lock, flags);
}
/** /**
* nes_destroy_qp * nes_destroy_qp
*/ */
static int nes_destroy_qp(struct ib_qp *ibqp) static int nes_destroy_qp(struct ib_qp *ibqp)
{ {
struct nes_qp *nesqp = to_nesqp(ibqp); struct nes_qp *nesqp = to_nesqp(ibqp);
/* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */
struct nes_ucontext *nes_ucontext; struct nes_ucontext *nes_ucontext;
struct ib_qp_attr attr; struct ib_qp_attr attr;
struct iw_cm_id *cm_id; struct iw_cm_id *cm_id;
...@@ -1548,7 +1598,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp) ...@@ -1548,7 +1598,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret); nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret);
} }
if (nesqp->user_mode) { if (nesqp->user_mode) {
if ((ibqp->uobject)&&(ibqp->uobject->context)) { if ((ibqp->uobject)&&(ibqp->uobject->context)) {
nes_ucontext = to_nesucontext(ibqp->uobject->context); nes_ucontext = to_nesucontext(ibqp->uobject->context);
...@@ -1560,6 +1609,13 @@ static int nes_destroy_qp(struct ib_qp *ibqp) ...@@ -1560,6 +1609,13 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
} }
if (nesqp->pbl_pbase) if (nesqp->pbl_pbase)
kunmap(nesqp->page); kunmap(nesqp->page);
} else {
/* Clean any pending completions from the cq(s) */
if (nesqp->nesscq)
nes_clean_cq(nesqp, nesqp->nesscq);
if ((nesqp->nesrcq) && (nesqp->nesrcq != nesqp->nesscq))
nes_clean_cq(nesqp, nesqp->nesrcq);
} }
nes_rem_ref(&nesqp->ibqp); nes_rem_ref(&nesqp->ibqp);
...@@ -2884,7 +2940,7 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -2884,7 +2940,7 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
* nes_hw_modify_qp * nes_hw_modify_qp
*/ */
int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp, int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
u32 next_iwarp_state, u32 wait_completion) u32 next_iwarp_state, u32 termlen, u32 wait_completion)
{ {
struct nes_hw_cqp_wqe *cqp_wqe; struct nes_hw_cqp_wqe *cqp_wqe;
/* struct iw_cm_id *cm_id = nesqp->cm_id; */ /* struct iw_cm_id *cm_id = nesqp->cm_id; */
...@@ -2916,6 +2972,13 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp, ...@@ -2916,6 +2972,13 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase); set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase);
/* If sending a terminate message, fill in the length (in words) */
if (((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) == NES_CQP_QP_IWARP_STATE_TERMINATE) &&
!(next_iwarp_state & NES_CQP_QP_TERM_DONT_SEND_TERM_MSG)) {
termlen = ((termlen + 3) >> 2) << NES_CQP_OP_TERMLEN_SHIFT;
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_NEW_MSS_IDX, termlen);
}
atomic_set(&cqp_request->refcount, 2); atomic_set(&cqp_request->refcount, 2);
nes_post_cqp_request(nesdev, cqp_request); nes_post_cqp_request(nesdev, cqp_request);
...@@ -3086,6 +3149,9 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -3086,6 +3149,9 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} }
nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n", nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n",
nesqp->hwqp.qp_id); nesqp->hwqp.qp_id);
if (nesqp->term_flags)
del_timer(&nesqp->terminate_timer);
next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR; next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
/* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */ /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
if (nesqp->hte_added) { if (nesqp->hte_added) {
...@@ -3163,7 +3229,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -3163,7 +3229,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (issue_modify_qp) { if (issue_modify_qp) {
nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n"); nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n");
ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 1); ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 1);
if (ret) if (ret)
nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)" nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)"
" failed for QP%u.\n", " failed for QP%u.\n",
...@@ -3328,6 +3394,12 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, ...@@ -3328,6 +3394,12 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
head = nesqp->hwqp.sq_head; head = nesqp->hwqp.sq_head;
while (ib_wr) { while (ib_wr) {
/* Check for QP error */
if (nesqp->term_flags) {
err = -EINVAL;
break;
}
/* Check for SQ overflow */ /* Check for SQ overflow */
if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
err = -EINVAL; err = -EINVAL;
...@@ -3484,6 +3556,12 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, ...@@ -3484,6 +3556,12 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
head = nesqp->hwqp.rq_head; head = nesqp->hwqp.rq_head;
while (ib_wr) { while (ib_wr) {
/* Check for QP error */
if (nesqp->term_flags) {
err = -EINVAL;
break;
}
if (ib_wr->num_sge > nesdev->nesadapter->max_sge) { if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
err = -EINVAL; err = -EINVAL;
break; break;
...@@ -3547,7 +3625,6 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) ...@@ -3547,7 +3625,6 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
{ {
u64 u64temp; u64 u64temp;
u64 wrid; u64 wrid;
/* u64 u64temp; */
unsigned long flags = 0; unsigned long flags = 0;
struct nes_vnic *nesvnic = to_nesvnic(ibcq->device); struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
struct nes_device *nesdev = nesvnic->nesdev; struct nes_device *nesdev = nesvnic->nesdev;
...@@ -3555,12 +3632,13 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) ...@@ -3555,12 +3632,13 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
struct nes_qp *nesqp; struct nes_qp *nesqp;
struct nes_hw_cqe cqe; struct nes_hw_cqe cqe;
u32 head; u32 head;
u32 wq_tail; u32 wq_tail = 0;
u32 cq_size; u32 cq_size;
u32 cqe_count = 0; u32 cqe_count = 0;
u32 wqe_index; u32 wqe_index;
u32 u32temp; u32 u32temp;
/* u32 counter; */ u32 move_cq_head = 1;
u32 err_code;
nes_debug(NES_DBG_CQ, "\n"); nes_debug(NES_DBG_CQ, "\n");
...@@ -3570,29 +3648,40 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) ...@@ -3570,29 +3648,40 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
cq_size = nescq->hw_cq.cq_size; cq_size = nescq->hw_cq.cq_size;
while (cqe_count < num_entries) { while (cqe_count < num_entries) {
if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & if ((le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
NES_CQE_VALID) { NES_CQE_VALID) == 0)
/* break;
* Make sure we read CQ entry contents *after*
* we've checked the valid bit. /*
*/ * Make sure we read CQ entry contents *after*
rmb(); * we've checked the valid bit.
*/
cqe = nescq->hw_cq.cq_vbase[head]; rmb();
nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); cqe = nescq->hw_cq.cq_vbase[head];
wqe_index = u32temp & u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
(nesdev->nesadapter->max_qp_wr - 1); wqe_index = u32temp & (nesdev->nesadapter->max_qp_wr - 1);
u32temp &= ~(NES_SW_CONTEXT_ALIGN-1); u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
/* parse CQE, get completion context from WQE (either rq or sq */ /* parse CQE, get completion context from WQE (either rq or sq) */
u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) | u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
((u64)u32temp); ((u64)u32temp);
nesqp = *((struct nes_qp **)&u64temp);
if (u64temp) {
nesqp = (struct nes_qp *)(unsigned long)u64temp;
memset(entry, 0, sizeof *entry); memset(entry, 0, sizeof *entry);
if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) { if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) {
entry->status = IB_WC_SUCCESS; entry->status = IB_WC_SUCCESS;
} else { } else {
entry->status = IB_WC_WR_FLUSH_ERR; err_code = le32_to_cpu(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]);
if (NES_IWARP_CQE_MAJOR_DRV == (err_code >> 16)) {
entry->status = err_code & 0x0000ffff;
/* The rest of the cqe's will be marked as flushed */
nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX] =
cpu_to_le32((NES_IWARP_CQE_MAJOR_FLUSH << 16) |
NES_IWARP_CQE_MINOR_FLUSH);
} else
entry->status = IB_WC_WR_FLUSH_ERR;
} }
entry->qp = &nesqp->ibqp; entry->qp = &nesqp->ibqp;
...@@ -3601,20 +3690,18 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) ...@@ -3601,20 +3690,18 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) { if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) {
if (nesqp->skip_lsmm) { if (nesqp->skip_lsmm) {
nesqp->skip_lsmm = 0; nesqp->skip_lsmm = 0;
wq_tail = nesqp->hwqp.sq_tail++; nesqp->hwqp.sq_tail++;
} }
/* Working on a SQ Completion*/ /* Working on a SQ Completion*/
wq_tail = wqe_index; wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) | wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail]. ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX]))); wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])));
entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]); wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]);
switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) { wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) {
case NES_IWARP_SQ_OP_RDMAW: case NES_IWARP_SQ_OP_RDMAW:
nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n"); nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n");
...@@ -3623,7 +3710,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) ...@@ -3623,7 +3710,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
case NES_IWARP_SQ_OP_RDMAR: case NES_IWARP_SQ_OP_RDMAR:
nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n"); nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n");
entry->opcode = IB_WC_RDMA_READ; entry->opcode = IB_WC_RDMA_READ;
entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]); wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]);
break; break;
case NES_IWARP_SQ_OP_SENDINV: case NES_IWARP_SQ_OP_SENDINV:
...@@ -3634,33 +3721,54 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) ...@@ -3634,33 +3721,54 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
entry->opcode = IB_WC_SEND; entry->opcode = IB_WC_SEND;
break; break;
} }
nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.sq_tail != nesqp->hwqp.sq_head)) {
move_cq_head = 0;
wq_tail = nesqp->hwqp.sq_tail;
}
} else { } else {
/* Working on a RQ Completion*/ /* Working on a RQ Completion*/
wq_tail = wqe_index;
nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]); entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) | wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32); ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
entry->opcode = IB_WC_RECV; entry->opcode = IB_WC_RECV;
nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)) {
move_cq_head = 0;
wq_tail = nesqp->hwqp.rq_tail;
}
} }
entry->wr_id = wrid; entry->wr_id = wrid;
entry++;
cqe_count++;
}
if (move_cq_head) {
nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
if (++head >= cq_size) if (++head >= cq_size)
head = 0; head = 0;
cqe_count++;
nescq->polled_completions++; nescq->polled_completions++;
if ((nescq->polled_completions > (cq_size / 2)) || if ((nescq->polled_completions > (cq_size / 2)) ||
(nescq->polled_completions == 255)) { (nescq->polled_completions == 255)) {
nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes" nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes"
" are pending %u of %u.\n", " are pending %u of %u.\n",
nescq->hw_cq.cq_number, nescq->polled_completions, cq_size); nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
nes_write32(nesdev->regs+NES_CQE_ALLOC, nes_write32(nesdev->regs+NES_CQE_ALLOC,
nescq->hw_cq.cq_number | (nescq->polled_completions << 16)); nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
nescq->polled_completions = 0; nescq->polled_completions = 0;
} }
entry++; } else {
} else /* Update the wqe index and set status to flush */
break; wqe_index = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
wqe_index = (wqe_index & (~(nesdev->nesadapter->max_qp_wr - 1))) | wq_tail;
nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] =
cpu_to_le32(wqe_index);
move_cq_head = 1; /* ready for next pass */
}
} }
if (nescq->polled_completions) { if (nescq->polled_completions) {
......
...@@ -40,6 +40,10 @@ struct nes_device; ...@@ -40,6 +40,10 @@ struct nes_device;
#define NES_MAX_USER_DB_REGIONS 4096 #define NES_MAX_USER_DB_REGIONS 4096
#define NES_MAX_USER_WQ_REGIONS 4096 #define NES_MAX_USER_WQ_REGIONS 4096
#define NES_TERM_SENT 0x01
#define NES_TERM_RCVD 0x02
#define NES_TERM_DONE 0x04
struct nes_ucontext { struct nes_ucontext {
struct ib_ucontext ibucontext; struct ib_ucontext ibucontext;
struct nes_device *nesdev; struct nes_device *nesdev;
...@@ -119,6 +123,11 @@ struct nes_wq { ...@@ -119,6 +123,11 @@ struct nes_wq {
spinlock_t lock; spinlock_t lock;
}; };
struct disconn_work {
struct work_struct work;
struct nes_qp *nesqp;
};
struct iw_cm_id; struct iw_cm_id;
struct ietf_mpa_frame; struct ietf_mpa_frame;
...@@ -127,7 +136,6 @@ struct nes_qp { ...@@ -127,7 +136,6 @@ struct nes_qp {
void *allocated_buffer; void *allocated_buffer;
struct iw_cm_id *cm_id; struct iw_cm_id *cm_id;
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct work_struct disconn_work;
struct nes_cq *nesscq; struct nes_cq *nesscq;
struct nes_cq *nesrcq; struct nes_cq *nesrcq;
struct nes_pd *nespd; struct nes_pd *nespd;
...@@ -155,9 +163,13 @@ struct nes_qp { ...@@ -155,9 +163,13 @@ struct nes_qp {
void *pbl_vbase; void *pbl_vbase;
dma_addr_t pbl_pbase; dma_addr_t pbl_pbase;
struct page *page; struct page *page;
struct timer_list terminate_timer;
enum ib_event_type terminate_eventtype;
wait_queue_head_t kick_waitq; wait_queue_head_t kick_waitq;
u16 in_disconnect; u16 in_disconnect;
u16 private_data_len; u16 private_data_len;
u16 term_sq_flush_code;
u16 term_rq_flush_code;
u8 active_conn; u8 active_conn;
u8 skip_lsmm; u8 skip_lsmm;
u8 user_mode; u8 user_mode;
...@@ -165,7 +177,7 @@ struct nes_qp { ...@@ -165,7 +177,7 @@ struct nes_qp {
u8 hw_iwarp_state; u8 hw_iwarp_state;
u8 flush_issued; u8 flush_issued;
u8 hw_tcp_state; u8 hw_tcp_state;
u8 disconn_pending; u8 term_flags;
u8 destroyed; u8 destroyed;
}; };
#endif /* NES_VERBS_H */ #endif /* NES_VERBS_H */
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
*/ */
#include <rdma/ib_cm.h> #include <rdma/ib_cm.h>
#include <rdma/ib_cache.h>
#include <net/dst.h> #include <net/dst.h>
#include <net/icmp.h> #include <net/icmp.h>
#include <linux/icmpv6.h> #include <linux/icmpv6.h>
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <rdma/ib_cache.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/tcp.h> #include <linux/tcp.h>
......
...@@ -604,8 +604,11 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) ...@@ -604,8 +604,11 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
skb_queue_len(&neigh->queue)); skb_queue_len(&neigh->queue));
goto err_drop; goto err_drop;
} }
} else } else {
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha)); ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
return;
}
} else { } else {
neigh->ah = NULL; neigh->ah = NULL;
...@@ -688,7 +691,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, ...@@ -688,7 +691,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
ipoib_dbg(priv, "Send unicast ARP to %04x\n", ipoib_dbg(priv, "Send unicast ARP to %04x\n",
be16_to_cpu(path->pathrec.dlid)); be16_to_cpu(path->pathrec.dlid));
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
return;
} else if ((path->query || !path_rec_start(dev, path)) && } else if ((path->query || !path_rec_start(dev, path)) &&
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
/* put pseudoheader back on for next time */ /* put pseudoheader back on for next time */
......
...@@ -720,7 +720,9 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) ...@@ -720,7 +720,9 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
} }
} }
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
return;
} }
unlock: unlock:
...@@ -758,6 +760,20 @@ void ipoib_mcast_dev_flush(struct net_device *dev) ...@@ -758,6 +760,20 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
} }
} }
static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen,
const u8 *broadcast)
{
if (addrlen != INFINIBAND_ALEN)
return 0;
/* reserved QPN, prefix, scope */
if (memcmp(addr, broadcast, 6))
return 0;
/* signature lower, pkey */
if (memcmp(addr + 7, broadcast + 7, 3))
return 0;
return 1;
}
void ipoib_mcast_restart_task(struct work_struct *work) void ipoib_mcast_restart_task(struct work_struct *work)
{ {
struct ipoib_dev_priv *priv = struct ipoib_dev_priv *priv =
...@@ -791,6 +807,11 @@ void ipoib_mcast_restart_task(struct work_struct *work) ...@@ -791,6 +807,11 @@ void ipoib_mcast_restart_task(struct work_struct *work)
for (mclist = dev->mc_list; mclist; mclist = mclist->next) { for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
union ib_gid mgid; union ib_gid mgid;
if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr,
mclist->dmi_addrlen,
dev->broadcast))
continue;
memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
mcast = __ipoib_mcast_find(dev, &mgid); mcast = __ipoib_mcast_find(dev, &mgid);
......
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/init.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/mlx4/cmd.h> #include <linux/mlx4/cmd.h>
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/init.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
...@@ -41,6 +40,10 @@ ...@@ -41,6 +40,10 @@
#include "mlx4.h" #include "mlx4.h"
#include "fw.h" #include "fw.h"
enum {
MLX4_IRQNAME_SIZE = 64
};
enum { enum {
MLX4_NUM_ASYNC_EQE = 0x100, MLX4_NUM_ASYNC_EQE = 0x100,
MLX4_NUM_SPARE_EQE = 0x80, MLX4_NUM_SPARE_EQE = 0x80,
...@@ -526,48 +529,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev) ...@@ -526,48 +529,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
iounmap(priv->clr_base); iounmap(priv->clr_base);
} }
int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int ret;
/*
* We assume that mapping one page is enough for the whole EQ
* context table. This is fine with all current HCAs, because
* we only use 32 EQs and each EQ uses 64 bytes of context
* memory, or 1 KB total.
*/
priv->eq_table.icm_virt = icm_virt;
priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
if (!priv->eq_table.icm_page)
return -ENOMEM;
priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
__free_page(priv->eq_table.icm_page);
return -ENOMEM;
}
ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
if (ret) {
pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
__free_page(priv->eq_table.icm_page);
}
return ret;
}
void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
__free_page(priv->eq_table.icm_page);
}
int mlx4_alloc_eq_table(struct mlx4_dev *dev) int mlx4_alloc_eq_table(struct mlx4_dev *dev)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
...@@ -615,7 +576,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) ...@@ -615,7 +576,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
priv->eq_table.clr_int = priv->clr_base + priv->eq_table.clr_int = priv->clr_base +
(priv->eq_table.inta_pin < 32 ? 4 : 0); (priv->eq_table.inta_pin < 32 ? 4 : 0);
priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL); priv->eq_table.irq_names =
kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
GFP_KERNEL);
if (!priv->eq_table.irq_names) { if (!priv->eq_table.irq_names) {
err = -ENOMEM; err = -ENOMEM;
goto err_out_bitmap; goto err_out_bitmap;
...@@ -638,17 +601,25 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) ...@@ -638,17 +601,25 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
goto err_out_comp; goto err_out_comp;
if (dev->flags & MLX4_FLAG_MSI_X) { if (dev->flags & MLX4_FLAG_MSI_X) {
static const char async_eq_name[] = "mlx4-async";
const char *eq_name; const char *eq_name;
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
if (i < dev->caps.num_comp_vectors) { if (i < dev->caps.num_comp_vectors) {
snprintf(priv->eq_table.irq_names + i * 16, 16, snprintf(priv->eq_table.irq_names +
"mlx4-comp-%d", i); i * MLX4_IRQNAME_SIZE,
eq_name = priv->eq_table.irq_names + i * 16; MLX4_IRQNAME_SIZE,
} else "mlx4-comp-%d@pci:%s", i,
eq_name = async_eq_name; pci_name(dev->pdev));
} else {
snprintf(priv->eq_table.irq_names +
i * MLX4_IRQNAME_SIZE,
MLX4_IRQNAME_SIZE,
"mlx4-async@pci:%s",
pci_name(dev->pdev));
}
eq_name = priv->eq_table.irq_names +
i * MLX4_IRQNAME_SIZE;
err = request_irq(priv->eq_table.eq[i].irq, err = request_irq(priv->eq_table.eq[i].irq,
mlx4_msi_x_interrupt, 0, eq_name, mlx4_msi_x_interrupt, 0, eq_name,
priv->eq_table.eq + i); priv->eq_table.eq + i);
...@@ -658,8 +629,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) ...@@ -658,8 +629,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
priv->eq_table.eq[i].have_irq = 1; priv->eq_table.eq[i].have_irq = 1;
} }
} else { } else {
snprintf(priv->eq_table.irq_names,
MLX4_IRQNAME_SIZE,
DRV_NAME "@pci:%s",
pci_name(dev->pdev));
err = request_irq(dev->pdev->irq, mlx4_interrupt, err = request_irq(dev->pdev->irq, mlx4_interrupt,
IRQF_SHARED, DRV_NAME, dev); IRQF_SHARED, priv->eq_table.irq_names, dev);
if (err) if (err)
goto err_out_async; goto err_out_async;
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/init.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
......
...@@ -525,7 +525,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, ...@@ -525,7 +525,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
goto err_unmap_aux; goto err_unmap_aux;
} }
err = mlx4_map_eq_icm(dev, init_hca->eqc_base); err = mlx4_init_icm_table(dev, &priv->eq_table.table,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
dev->caps.num_eqs, dev->caps.num_eqs,
0, 0);
if (err) { if (err) {
mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
goto err_unmap_cmpt; goto err_unmap_cmpt;
...@@ -668,7 +671,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, ...@@ -668,7 +671,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
err_unmap_eq: err_unmap_eq:
mlx4_unmap_eq_icm(dev); mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
err_unmap_cmpt: err_unmap_cmpt:
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
...@@ -698,11 +701,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev) ...@@ -698,11 +701,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
mlx4_unmap_eq_icm(dev);
mlx4_UNMAP_ICM_AUX(dev); mlx4_UNMAP_ICM_AUX(dev);
mlx4_free_icm(dev, priv->fw.aux_icm, 0); mlx4_free_icm(dev, priv->fw.aux_icm, 0);
...@@ -786,7 +789,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) ...@@ -786,7 +789,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
return 0; return 0;
err_close: err_close:
mlx4_close_hca(dev); mlx4_CLOSE_HCA(dev, 0);
err_free_icm: err_free_icm:
mlx4_free_icms(dev); mlx4_free_icms(dev);
...@@ -1070,18 +1073,12 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1070,18 +1073,12 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_disable_pdev; goto err_disable_pdev;
} }
err = pci_request_region(pdev, 0, DRV_NAME); err = pci_request_regions(pdev, DRV_NAME);
if (err) { if (err) {
dev_err(&pdev->dev, "Cannot request control region, aborting.\n"); dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
goto err_disable_pdev; goto err_disable_pdev;
} }
err = pci_request_region(pdev, 2, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n");
goto err_release_bar0;
}
pci_set_master(pdev); pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
...@@ -1090,7 +1087,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1090,7 +1087,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) { if (err) {
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
goto err_release_bar2; goto err_release_regions;
} }
} }
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
...@@ -1101,7 +1098,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1101,7 +1098,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (err) { if (err) {
dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
"aborting.\n"); "aborting.\n");
goto err_release_bar2; goto err_release_regions;
} }
} }
...@@ -1110,7 +1107,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1110,7 +1107,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
dev_err(&pdev->dev, "Device struct alloc failed, " dev_err(&pdev->dev, "Device struct alloc failed, "
"aborting.\n"); "aborting.\n");
err = -ENOMEM; err = -ENOMEM;
goto err_release_bar2; goto err_release_regions;
} }
dev = &priv->dev; dev = &priv->dev;
...@@ -1205,11 +1202,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1205,11 +1202,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
err_free_dev: err_free_dev:
kfree(priv); kfree(priv);
err_release_bar2: err_release_regions:
pci_release_region(pdev, 2); pci_release_regions(pdev);
err_release_bar0:
pci_release_region(pdev, 0);
err_disable_pdev: err_disable_pdev:
pci_disable_device(pdev); pci_disable_device(pdev);
...@@ -1265,8 +1259,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) ...@@ -1265,8 +1259,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
pci_disable_msix(pdev); pci_disable_msix(pdev);
kfree(priv); kfree(priv);
pci_release_region(pdev, 2); pci_release_regions(pdev);
pci_release_region(pdev, 0);
pci_disable_device(pdev); pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
} }
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/init.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/slab.h> #include <linux/slab.h>
......
...@@ -205,9 +205,7 @@ struct mlx4_eq_table { ...@@ -205,9 +205,7 @@ struct mlx4_eq_table {
void __iomem **uar_map; void __iomem **uar_map;
u32 clr_mask; u32 clr_mask;
struct mlx4_eq *eq; struct mlx4_eq *eq;
u64 icm_virt; struct mlx4_icm_table table;
struct page *icm_page;
dma_addr_t icm_dma;
struct mlx4_icm_table cmpt_table; struct mlx4_icm_table cmpt_table;
int have_irq; int have_irq;
u8 inta_pin; u8 inta_pin;
...@@ -373,9 +371,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, ...@@ -373,9 +371,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap, struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *init_hca); struct mlx4_init_hca_param *init_hca);
int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt);
void mlx4_unmap_eq_icm(struct mlx4_dev *dev);
int mlx4_cmd_init(struct mlx4_dev *dev); int mlx4_cmd_init(struct mlx4_dev *dev);
void mlx4_cmd_cleanup(struct mlx4_dev *dev); void mlx4_cmd_cleanup(struct mlx4_dev *dev);
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
......
...@@ -32,7 +32,6 @@ ...@@ -32,7 +32,6 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/init.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/mlx4/cmd.h> #include <linux/mlx4/cmd.h>
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/init.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <asm/page.h> #include <asm/page.h>
......
...@@ -32,8 +32,6 @@ ...@@ -32,8 +32,6 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/init.h>
#include "mlx4.h" #include "mlx4.h"
#include "fw.h" #include "fw.h"
......
...@@ -33,8 +33,6 @@ ...@@ -33,8 +33,6 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/init.h>
#include <linux/mlx4/cmd.h> #include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h> #include <linux/mlx4/qp.h>
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/init.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/delay.h> #include <linux/delay.h>
......
...@@ -31,8 +31,6 @@ ...@@ -31,8 +31,6 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/init.h>
#include <linux/mlx4/cmd.h> #include <linux/mlx4/cmd.h>
#include "mlx4.h" #include "mlx4.h"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册