提交 d172f5a4 编写于 作者: R Roland Dreier

Merge branches 'cma', 'cxgb4', 'ipoib', 'mlx4', 'mlx4-sriov', 'nes', 'qib' and 'srp' into for-linus

......@@ -167,6 +167,7 @@ int ib_find_cached_pkey(struct ib_device *device,
unsigned long flags;
int i;
int ret = -ENOENT;
int partial_ix = -1;
if (port_num < start_port(device) || port_num > end_port(device))
return -EINVAL;
......@@ -179,6 +180,46 @@ int ib_find_cached_pkey(struct ib_device *device,
for (i = 0; i < cache->table_len; ++i)
if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
if (cache->table[i] & 0x8000) {
*index = i;
ret = 0;
break;
} else
partial_ix = i;
}
if (ret && partial_ix >= 0) {
*index = partial_ix;
ret = 0;
}
read_unlock_irqrestore(&device->cache.lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_find_cached_pkey);
int ib_find_exact_cached_pkey(struct ib_device *device,
u8 port_num,
u16 pkey,
u16 *index)
{
struct ib_pkey_cache *cache;
unsigned long flags;
int i;
int ret = -ENOENT;
if (port_num < start_port(device) || port_num > end_port(device))
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
cache = device->cache.pkey_cache[port_num - start_port(device)];
*index = -1;
for (i = 0; i < cache->table_len; ++i)
if (cache->table[i] == pkey) {
*index = i;
ret = 0;
break;
......@@ -188,7 +229,7 @@ int ib_find_cached_pkey(struct ib_device *device,
return ret;
}
EXPORT_SYMBOL(ib_find_cached_pkey);
EXPORT_SYMBOL(ib_find_exact_cached_pkey);
int ib_get_cached_lmc(struct ib_device *device,
u8 port_num,
......
......@@ -707,18 +707,28 @@ int ib_find_pkey(struct ib_device *device,
{
int ret, i;
u16 tmp_pkey;
int partial_ix = -1;
for (i = 0; i < device->pkey_tbl_len[port_num - start_port(device)]; ++i) {
ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
if (ret)
return ret;
if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
*index = i;
return 0;
/* if there is full-member pkey take it.*/
if (tmp_pkey & 0x8000) {
*index = i;
return 0;
}
if (partial_ix < 0)
partial_ix = i;
}
}
/*no full-member, if exists take the limited*/
if (partial_ix >= 0) {
*index = partial_ix;
return 0;
}
return -ENOENT;
}
EXPORT_SYMBOL(ib_find_pkey);
......
......@@ -137,19 +137,25 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
return -ENOMEM;
wq->rq.qid = c4iw_get_qpid(rdev, uctx);
if (!wq->rq.qid)
goto err1;
if (!wq->rq.qid) {
ret = -ENOMEM;
goto free_sq_qid;
}
if (!user) {
wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
GFP_KERNEL);
if (!wq->sq.sw_sq)
goto err2;
if (!wq->sq.sw_sq) {
ret = -ENOMEM;
goto free_rq_qid;
}
wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
GFP_KERNEL);
if (!wq->rq.sw_rq)
goto err3;
if (!wq->rq.sw_rq) {
ret = -ENOMEM;
goto free_sw_sq;
}
}
/*
......@@ -157,15 +163,23 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
*/
wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
if (!wq->rq.rqt_hwaddr)
goto err4;
if (!wq->rq.rqt_hwaddr) {
ret = -ENOMEM;
goto free_sw_rq;
}
if (user) {
if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
goto err5;
ret = alloc_oc_sq(rdev, &wq->sq);
if (ret)
goto free_hwaddr;
ret = alloc_host_sq(rdev, &wq->sq);
if (ret)
goto free_sq;
} else
if (alloc_host_sq(rdev, &wq->sq))
goto err5;
ret = alloc_host_sq(rdev, &wq->sq);
if (ret)
goto free_hwaddr;
memset(wq->sq.queue, 0, wq->sq.memsize);
dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
......@@ -173,7 +187,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
wq->rq.memsize, &(wq->rq.dma_addr),
GFP_KERNEL);
if (!wq->rq.queue)
goto err6;
goto free_sq;
PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
__func__, wq->sq.queue,
(unsigned long long)virt_to_phys(wq->sq.queue),
......@@ -201,7 +215,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
goto err7;
goto free_dma;
}
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
......@@ -266,33 +280,33 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
ret = c4iw_ofld_send(rdev, skb);
if (ret)
goto err7;
goto free_dma;
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
if (ret)
goto err7;
goto free_dma;
PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
__func__, wq->sq.qid, wq->rq.qid, wq->db,
(unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
return 0;
err7:
free_dma:
dma_free_coherent(&(rdev->lldi.pdev->dev),
wq->rq.memsize, wq->rq.queue,
dma_unmap_addr(&wq->rq, mapping));
err6:
free_sq:
dealloc_sq(rdev, &wq->sq);
err5:
free_hwaddr:
c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
err4:
free_sw_rq:
kfree(wq->rq.sw_rq);
err3:
free_sw_sq:
kfree(wq->sq.sw_sq);
err2:
free_rq_qid:
c4iw_put_qpid(rdev, wq->rq.qid, uctx);
err1:
free_sq_qid:
c4iw_put_qpid(rdev, wq->sq.qid, uctx);
return -ENOMEM;
return ret;
}
static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
......
obj-$(CONFIG_MLX4_INFINIBAND) += mlx4_ib.o
mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o
mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o alias_GUID.o sysfs.o
此差异已折叠。
/*
* Copyright (c) 2012 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/ib_mad.h>
#include <linux/mlx4/cmd.h>
#include <linux/rbtree.h>
#include <linux/idr.h>
#include <rdma/ib_cm.h>
#include "mlx4_ib.h"
#define CM_CLEANUP_CACHE_TIMEOUT (5 * HZ)
struct id_map_entry {
struct rb_node node;
u32 sl_cm_id;
u32 pv_cm_id;
int slave_id;
int scheduled_delete;
struct mlx4_ib_dev *dev;
struct list_head list;
struct delayed_work timeout;
};
struct cm_generic_msg {
struct ib_mad_hdr hdr;
__be32 local_comm_id;
__be32 remote_comm_id;
};
struct cm_req_msg {
unsigned char unused[0x60];
union ib_gid primary_path_sgid;
};
static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
{
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
msg->local_comm_id = cpu_to_be32(cm_id);
}
static u32 get_local_comm_id(struct ib_mad *mad)
{
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
return be32_to_cpu(msg->local_comm_id);
}
static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
{
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
msg->remote_comm_id = cpu_to_be32(cm_id);
}
static u32 get_remote_comm_id(struct ib_mad *mad)
{
struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
return be32_to_cpu(msg->remote_comm_id);
}
static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
{
struct cm_req_msg *msg = (struct cm_req_msg *)mad;
return msg->primary_path_sgid;
}
/* Lock should be taken before called */
static struct id_map_entry *
id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
{
struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
struct rb_node *node = sl_id_map->rb_node;
while (node) {
struct id_map_entry *id_map_entry =
rb_entry(node, struct id_map_entry, node);
if (id_map_entry->sl_cm_id > sl_cm_id)
node = node->rb_left;
else if (id_map_entry->sl_cm_id < sl_cm_id)
node = node->rb_right;
else if (id_map_entry->slave_id > slave_id)
node = node->rb_left;
else if (id_map_entry->slave_id < slave_id)
node = node->rb_right;
else
return id_map_entry;
}
return NULL;
}
static void id_map_ent_timeout(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
struct id_map_entry *db_ent, *found_ent;
struct mlx4_ib_dev *dev = ent->dev;
struct mlx4_ib_sriov *sriov = &dev->sriov;
struct rb_root *sl_id_map = &sriov->sl_id_map;
int pv_id = (int) ent->pv_cm_id;
spin_lock(&sriov->id_map_lock);
db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id);
if (!db_ent)
goto out;
found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
if (found_ent && found_ent == ent)
rb_erase(&found_ent->node, sl_id_map);
idr_remove(&sriov->pv_id_table, pv_id);
out:
list_del(&ent->list);
spin_unlock(&sriov->id_map_lock);
kfree(ent);
}
static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
{
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
struct rb_root *sl_id_map = &sriov->sl_id_map;
struct id_map_entry *ent, *found_ent;
spin_lock(&sriov->id_map_lock);
ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id);
if (!ent)
goto out;
found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
if (found_ent && found_ent == ent)
rb_erase(&found_ent->node, sl_id_map);
idr_remove(&sriov->pv_id_table, pv_cm_id);
out:
spin_unlock(&sriov->id_map_lock);
}
static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
{
struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
struct id_map_entry *ent;
int slave_id = new->slave_id;
int sl_cm_id = new->sl_cm_id;
ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
if (ent) {
pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
sl_cm_id);
rb_replace_node(&ent->node, &new->node, sl_id_map);
return;
}
/* Go to the bottom of the tree */
while (*link) {
parent = *link;
ent = rb_entry(parent, struct id_map_entry, node);
if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
link = &(*link)->rb_left;
else
link = &(*link)->rb_right;
}
rb_link_node(&new->node, parent, link);
rb_insert_color(&new->node, sl_id_map);
}
static struct id_map_entry *
id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
{
int ret, id;
static int next_id;
struct id_map_entry *ent;
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
if (!ent) {
mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n");
return ERR_PTR(-ENOMEM);
}
ent->sl_cm_id = sl_cm_id;
ent->slave_id = slave_id;
ent->scheduled_delete = 0;
ent->dev = to_mdev(ibdev);
INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
do {
spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
ret = idr_get_new_above(&sriov->pv_id_table, ent,
next_id, &id);
if (!ret) {
next_id = ((unsigned) id + 1) & MAX_ID_MASK;
ent->pv_cm_id = (u32)id;
sl_id_map_add(ibdev, ent);
}
spin_unlock(&sriov->id_map_lock);
} while (ret == -EAGAIN && idr_pre_get(&sriov->pv_id_table, GFP_KERNEL));
/*the function idr_get_new_above can return -ENOSPC, so don't insert in that case.*/
if (!ret) {
spin_lock(&sriov->id_map_lock);
list_add_tail(&ent->list, &sriov->cm_list);
spin_unlock(&sriov->id_map_lock);
return ent;
}
/*error flow*/
kfree(ent);
mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
return ERR_PTR(-ENOMEM);
}
static struct id_map_entry *
id_map_get(struct ib_device *ibdev, int *pv_cm_id, int sl_cm_id, int slave_id)
{
struct id_map_entry *ent;
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
spin_lock(&sriov->id_map_lock);
if (*pv_cm_id == -1) {
ent = id_map_find_by_sl_id(ibdev, sl_cm_id, slave_id);
if (ent)
*pv_cm_id = (int) ent->pv_cm_id;
} else
ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id);
spin_unlock(&sriov->id_map_lock);
return ent;
}
static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
{
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
unsigned long flags;
spin_lock_irqsave(&sriov->going_down_lock, flags);
spin_lock(&sriov->id_map_lock);
/*make sure that there is no schedule inside the scheduled work.*/
if (!sriov->is_going_down) {
id->scheduled_delete = 1;
schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
}
spin_unlock(&sriov->id_map_lock);
spin_unlock_irqrestore(&sriov->going_down_lock, flags);
}
int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
struct ib_mad *mad)
{
struct id_map_entry *id;
u32 sl_cm_id;
int pv_cm_id = -1;
sl_cm_id = get_local_comm_id(mad);
if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
mad->mad_hdr.attr_id == CM_REP_ATTR_ID) {
id = id_map_alloc(ibdev, slave_id, sl_cm_id);
if (IS_ERR(id)) {
mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
__func__, slave_id, sl_cm_id);
return PTR_ERR(id);
}
} else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) {
return 0;
} else {
id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
}
if (!id) {
pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL!\n",
slave_id, sl_cm_id);
return -EINVAL;
}
set_local_comm_id(mad, id->pv_cm_id);
if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
schedule_delayed(ibdev, id);
else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
id_map_find_del(ibdev, pv_cm_id);
return 0;
}
int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
struct ib_mad *mad)
{
u32 pv_cm_id;
struct id_map_entry *id;
if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID) {
union ib_gid gid;
gid = gid_from_req_msg(ibdev, mad);
*slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
if (*slave < 0) {
mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
gid.global.interface_id);
return -ENOENT;
}
return 0;
}
pv_cm_id = get_remote_comm_id(mad);
id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
if (!id) {
pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id);
return -ENOENT;
}
*slave = id->slave_id;
set_remote_comm_id(mad, id->sl_cm_id);
if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
schedule_delayed(ibdev, id);
else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
id_map_find_del(ibdev, (int) pv_cm_id);
}
return 0;
}
void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
{
spin_lock_init(&dev->sriov.id_map_lock);
INIT_LIST_HEAD(&dev->sriov.cm_list);
dev->sriov.sl_id_map = RB_ROOT;
idr_init(&dev->sriov.pv_id_table);
idr_pre_get(&dev->sriov.pv_id_table, GFP_KERNEL);
}
/* slave = -1 ==> all slaves */
/* TBD -- call paravirt clean for single slave. Need for slave RESET event */
void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
{
struct mlx4_ib_sriov *sriov = &dev->sriov;
struct rb_root *sl_id_map = &sriov->sl_id_map;
struct list_head lh;
struct rb_node *nd;
int need_flush = 1;
struct id_map_entry *map, *tmp_map;
/* cancel all delayed work queue entries */
INIT_LIST_HEAD(&lh);
spin_lock(&sriov->id_map_lock);
list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
if (slave < 0 || slave == map->slave_id) {
if (map->scheduled_delete)
need_flush &= !!cancel_delayed_work(&map->timeout);
}
}
spin_unlock(&sriov->id_map_lock);
if (!need_flush)
flush_scheduled_work(); /* make sure all timers were flushed */
/* now, remove all leftover entries from databases*/
spin_lock(&sriov->id_map_lock);
if (slave < 0) {
while (rb_first(sl_id_map)) {
struct id_map_entry *ent =
rb_entry(rb_first(sl_id_map),
struct id_map_entry, node);
rb_erase(&ent->node, sl_id_map);
idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id);
}
list_splice_init(&dev->sriov.cm_list, &lh);
} else {
/* first, move nodes belonging to slave to db remove list */
nd = rb_first(sl_id_map);
while (nd) {
struct id_map_entry *ent =
rb_entry(nd, struct id_map_entry, node);
nd = rb_next(nd);
if (ent->slave_id == slave)
list_move_tail(&ent->list, &lh);
}
/* remove those nodes from databases */
list_for_each_entry_safe(map, tmp_map, &lh, list) {
rb_erase(&map->node, sl_id_map);
idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id);
}
/* add remaining nodes from cm_list */
list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
if (slave == map->slave_id)
list_move_tail(&map->list, &lh);
}
}
spin_unlock(&sriov->id_map_lock);
/* free any map entries left behind due to cancel_delayed_work above */
list_for_each_entry_safe(map, tmp_map, &lh, list) {
list_del(&map->list);
kfree(map);
}
}
......@@ -547,6 +547,26 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
checksum == cpu_to_be16(0xffff);
}
static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
unsigned tail, struct mlx4_cqe *cqe)
{
struct mlx4_ib_proxy_sqp_hdr *hdr;
ib_dma_sync_single_for_cpu(qp->ibqp.device,
qp->sqp_proxy_rcv[tail].map,
sizeof (struct mlx4_ib_proxy_sqp_hdr),
DMA_FROM_DEVICE);
hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
wc->dlid_path_bits = 0;
return 0;
}
static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
struct mlx4_ib_qp **cur_qp,
struct ib_wc *wc)
......@@ -559,6 +579,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
int is_error;
u32 g_mlpath_rqpn;
u16 wqe_ctr;
unsigned tail = 0;
repoll:
cqe = next_cqe_sw(cq);
......@@ -634,7 +655,8 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
mlx4_ib_free_srq_wqe(srq, wqe_ctr);
} else {
wq = &(*cur_qp)->rq;
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
tail = wq->tail & (wq->wqe_cnt - 1);
wc->wr_id = wq->wrid[tail];
++wq->tail;
}
......@@ -717,6 +739,13 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
break;
}
if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
if ((*cur_qp)->mlx4_ib_qp_type &
(MLX4_IB_QPT_PROXY_SMI_OWNER |
MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
return use_tunnel_data(*cur_qp, cq, wc, tail, cqe);
}
wc->slid = be16_to_cpu(cqe->rlid);
g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
wc->src_qp = g_mlpath_rqpn & 0xffffff;
......
此差异已折叠。
......@@ -59,6 +59,10 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
int mlx4_ib_sm_guid_assign = 1;
module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
static const char mlx4_ib_version[] =
DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
......@@ -70,6 +74,8 @@ struct update_gid_work {
int port;
};
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
static struct workqueue_struct *wq;
static void init_query_mad(struct ib_smp *mad)
......@@ -98,7 +104,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
......@@ -133,7 +140,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff;
props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
props->vendor_part_id = dev->dev->pdev->device;
props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
......@@ -182,11 +189,12 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
}
static int ib_link_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
struct ib_port_attr *props, int netw_view)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int ext_active_speed;
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
......@@ -198,7 +206,10 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port,
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL,
if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
in_mad, out_mad);
if (err)
goto out;
......@@ -211,7 +222,10 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port,
props->state = out_mad->data[32] & 0xf;
props->phys_state = out_mad->data[33] >> 4;
props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
if (netw_view)
props->gid_tbl_len = out_mad->data[50];
else
props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
......@@ -244,7 +258,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port,
in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port,
err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
NULL, NULL, in_mad, out_mad);
if (err)
goto out;
......@@ -270,7 +284,7 @@ static u8 state_to_phys_state(enum ib_port_state state)
}
static int eth_link_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
struct ib_port_attr *props, int netw_view)
{
struct mlx4_ib_dev *mdev = to_mdev(ibdev);
......@@ -320,26 +334,36 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
return err;
}
static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props, int netw_view)
{
int err;
memset(props, 0, sizeof *props);
err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
ib_link_query_port(ibdev, port, props) :
eth_link_query_port(ibdev, port, props);
ib_link_query_port(ibdev, port, props, netw_view) :
eth_link_query_port(ibdev, port, props, netw_view);
return err;
}
static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid)
static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
/* returns host view */
return __mlx4_ib_query_port(ibdev, port, props, 0);
}
int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid, int netw_view)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
int clear = 0;
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
......@@ -350,23 +374,38 @@ static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
if (mlx4_is_mfunc(dev->dev) && netw_view)
mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
memcpy(gid->raw, out_mad->data + 8, 8);
if (mlx4_is_mfunc(dev->dev) && !netw_view) {
if (index) {
/* For any index > 0, return the null guid */
err = 0;
clear = 1;
goto out;
}
}
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
NULL, NULL, in_mad, out_mad);
if (err)
goto out;
memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
out:
if (clear)
memset(gid->raw + 8, 0, 8);
kfree(in_mad);
kfree(out_mad);
return err;
......@@ -386,16 +425,17 @@ static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid)
{
if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
return __mlx4_ib_query_gid(ibdev, port, index, gid);
return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
else
return iboe_query_gid(ibdev, port, index, gid);
}
static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey, int netw_view)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
......@@ -407,7 +447,11 @@ static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
in_mad, out_mad);
if (err)
goto out;
......@@ -419,6 +463,11 @@ static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
return err;
}
static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
}
static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
struct ib_device_modify *props)
{
......@@ -431,6 +480,9 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
return 0;
if (mlx4_is_slave(to_mdev(ibdev)->dev))
return -EOPNOTSUPP;
spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
memcpy(ibdev->node_desc, props->node_desc, 64);
spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
......@@ -446,7 +498,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
memset(mailbox->buf, 0, 256);
memcpy(mailbox->buf, props->node_desc, 64);
mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
......@@ -849,6 +901,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
......@@ -858,8 +911,10 @@ static int init_node_data(struct mlx4_ib_dev *dev)
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
if (mlx4_is_master(dev->dev))
mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
......@@ -867,10 +922,11 @@ static int init_node_data(struct mlx4_ib_dev *dev)
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
out:
......@@ -959,7 +1015,7 @@ static void update_gids_task(struct work_struct *work)
err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
MLX4_CMD_WRAPPED);
if (err)
pr_warn("set port command failed\n");
else {
......@@ -1121,6 +1177,38 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event
return NOTIFY_DONE;
}
static void init_pkeys(struct mlx4_ib_dev *ibdev)
{
int port;
int slave;
int i;
if (mlx4_is_master(ibdev->dev)) {
for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
for (i = 0;
i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
++i) {
ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
/* master has the identity virt2phys pkey mapping */
(slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
}
}
}
/* initialize pkey cache */
for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
for (i = 0;
i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
++i)
ibdev->pkeys.phys_pkey_cache[port-1][i] =
(i) ? 0 : 0xFFFF;
}
}
}
static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
{
char name[32];
......@@ -1207,11 +1295,15 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
pr_info_once("%s", mlx4_ib_version);
if (mlx4_is_mfunc(dev)) {
pr_warn("IB not yet supported in SRIOV\n");
mlx4_foreach_non_ib_transport_port(i, dev)
num_ports++;
if (mlx4_is_mfunc(dev) && num_ports) {
dev_err(&dev->pdev->dev, "RoCE is not supported over SRIOV as yet\n");
return NULL;
}
num_ports = 0;
mlx4_foreach_ib_transport_port(i, dev)
num_ports++;
......@@ -1318,10 +1410,12 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
if (!mlx4_is_slave(ibdev->dev)) {
ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
}
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
......@@ -1357,11 +1451,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (mlx4_ib_mad_init(ibdev))
goto err_reg;
if (mlx4_ib_init_sriov(ibdev))
goto err_mad;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
iboe->nb.notifier_call = mlx4_ib_netdev_event;
err = register_netdevice_notifier(&iboe->nb);
if (err)
goto err_reg;
goto err_sriov;
}
for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
......@@ -1372,6 +1469,18 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_active = true;
if (mlx4_is_mfunc(ibdev->dev))
init_pkeys(ibdev);
/* create paravirt contexts for any VFs which are active */
if (mlx4_is_master(ibdev->dev)) {
for (j = 0; j < MLX4_MFUNC_MAX; j++) {
if (j == mlx4_master_func_num(ibdev->dev))
continue;
if (mlx4_is_slave_active(ibdev->dev, j))
do_slave_init(ibdev, j, 1);
}
}
return ibdev;
err_notif:
......@@ -1379,6 +1488,12 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
pr_warn("failure unregistering notifier\n");
flush_workqueue(wq);
err_sriov:
mlx4_ib_close_sriov(ibdev);
err_mad:
mlx4_ib_mad_cleanup(ibdev);
err_reg:
ib_unregister_device(&ibdev->ib_dev);
......@@ -1407,6 +1522,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
struct mlx4_ib_dev *ibdev = ibdev_ptr;
int p;
mlx4_ib_close_sriov(ibdev);
mlx4_ib_mad_cleanup(ibdev);
ib_unregister_device(&ibdev->ib_dev);
if (ibdev->iboe.nb.notifier_call) {
......@@ -1428,6 +1544,51 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
ib_dealloc_device(&ibdev->ib_dev);
}
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
{
struct mlx4_ib_demux_work **dm = NULL;
struct mlx4_dev *dev = ibdev->dev;
int i;
unsigned long flags;
if (!mlx4_is_master(dev))
return;
dm = kcalloc(dev->caps.num_ports, sizeof *dm, GFP_ATOMIC);
if (!dm) {
pr_err("failed to allocate memory for tunneling qp update\n");
goto out;
}
for (i = 0; i < dev->caps.num_ports; i++) {
dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
if (!dm[i]) {
pr_err("failed to allocate memory for tunneling qp update work struct\n");
for (i = 0; i < dev->caps.num_ports; i++) {
if (dm[i])
kfree(dm[i]);
}
goto out;
}
}
/* initialize or tear down tunnel QPs for the slave */
for (i = 0; i < dev->caps.num_ports; i++) {
INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
dm[i]->port = i + 1;
dm[i]->slave = slave;
dm[i]->do_init = do_init;
dm[i]->dev = ibdev;
spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
if (!ibdev->sriov.is_going_down)
queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
}
out:
if (dm)
kfree(dm);
return;
}
static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
enum mlx4_dev_event event, unsigned long param)
{
......@@ -1435,22 +1596,28 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
struct mlx4_eqe *eqe = NULL;
struct ib_event_work *ew;
int port = 0;
int p = 0;
if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
eqe = (struct mlx4_eqe *)param;
else
port = (u8)param;
if (port > ibdev->num_ports)
return;
p = (int) param;
switch (event) {
case MLX4_DEV_EVENT_PORT_UP:
if (p > ibdev->num_ports)
return;
if (mlx4_is_master(dev) &&
rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
IB_LINK_LAYER_INFINIBAND) {
mlx4_ib_invalidate_all_guid_record(ibdev, p);
}
ibev.event = IB_EVENT_PORT_ACTIVE;
break;
case MLX4_DEV_EVENT_PORT_DOWN:
if (p > ibdev->num_ports)
return;
ibev.event = IB_EVENT_PORT_ERR;
break;
......@@ -1469,7 +1636,21 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
INIT_WORK(&ew->work, handle_port_mgmt_change_event);
memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
ew->ib_dev = ibdev;
handle_port_mgmt_change_event(&ew->work);
/* need to queue only for port owner, which uses GEN_EQE */
if (mlx4_is_master(dev))
queue_work(wq, &ew->work);
else
handle_port_mgmt_change_event(&ew->work);
return;
case MLX4_DEV_EVENT_SLAVE_INIT:
/* here, p is the slave id */
do_slave_init(ibdev, p, 1);
return;
case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
/* here, p is the slave id */
do_slave_init(ibdev, p, 0);
return;
default:
......@@ -1477,7 +1658,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
}
ibev.device = ibdev_ptr;
ibev.element.port_num = port;
ibev.element.port_num = (u8) p;
ib_dispatch_event(&ibev);
}
......@@ -1497,18 +1678,28 @@ static int __init mlx4_ib_init(void)
if (!wq)
return -ENOMEM;
err = mlx4_ib_mcg_init();
if (err)
goto clean_wq;
err = mlx4_register_interface(&mlx4_ib_interface);
if (err) {
destroy_workqueue(wq);
return err;
}
if (err)
goto clean_mcg;
return 0;
clean_mcg:
mlx4_ib_mcg_destroy();
clean_wq:
destroy_workqueue(wq);
return err;
}
static void __exit mlx4_ib_cleanup(void)
{
mlx4_unregister_interface(&mlx4_ib_interface);
mlx4_ib_mcg_destroy();
destroy_workqueue(wq);
}
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -399,11 +399,20 @@ static inline void nes_write8(void __iomem *addr, u8 val)
writeb(val, addr);
}
enum nes_resource {
NES_RESOURCE_MW = 1,
NES_RESOURCE_FAST_MR,
NES_RESOURCE_PHYS_MR,
NES_RESOURCE_USER_MR,
NES_RESOURCE_PD,
NES_RESOURCE_QP,
NES_RESOURCE_CQ,
NES_RESOURCE_ARP
};
static inline int nes_alloc_resource(struct nes_adapter *nesadapter,
unsigned long *resource_array, u32 max_resources,
u32 *req_resource_num, u32 *next)
u32 *req_resource_num, u32 *next, enum nes_resource resource_type)
{
unsigned long flags;
u32 resource_num;
......@@ -414,7 +423,7 @@ static inline int nes_alloc_resource(struct nes_adapter *nesadapter,
if (resource_num >= max_resources) {
resource_num = find_first_zero_bit(resource_array, max_resources);
if (resource_num >= max_resources) {
printk(KERN_ERR PFX "%s: No available resourcess.\n", __func__);
printk(KERN_ERR PFX "%s: No available resources [type=%u].\n", __func__, resource_type);
spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
return -EMFILE;
}
......
......@@ -430,6 +430,8 @@ static void form_cm_frame(struct sk_buff *skb,
buf += sizeof(*tcph);
skb->ip_summed = CHECKSUM_PARTIAL;
if (!(cm_node->netdev->features & NETIF_F_IP_CSUM))
skb->ip_summed = CHECKSUM_NONE;
skb->protocol = htons(0x800);
skb->data_len = 0;
skb->mac_len = ETH_HLEN;
......@@ -1356,7 +1358,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
else
netdev = nesvnic->netdev;
neigh = dst_neigh_lookup(&rt->dst, &dst_ip);
neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
rcu_read_lock();
if (neigh) {
......@@ -1465,12 +1467,8 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->loopbackpartner = NULL;
/* get the mac addr for the remote node */
if (ipv4_is_loopback(htonl(cm_node->rem_addr))) {
arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE);
} else {
oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex);
}
oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex);
if (arpindex < 0) {
kfree(cm_node);
return NULL;
......@@ -3153,11 +3151,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
nesqp->nesqp_context->tcpPorts[1] =
cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
nesqp->nesqp_context->ip0 =
cpu_to_le32(ntohl(nesvnic->local_ipaddr));
else
nesqp->nesqp_context->ip0 =
nesqp->nesqp_context->ip0 =
cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
nesqp->nesqp_context->misc2 |= cpu_to_le32(
......@@ -3182,10 +3176,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
memset(&nes_quad, 0, sizeof(nes_quad));
nes_quad.DstIpAdrIndex =
cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
nes_quad.SrcIpadr = nesvnic->local_ipaddr;
else
nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
......@@ -3538,11 +3529,7 @@ static void cm_event_connected(struct nes_cm_event *event)
cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
nesqp->nesqp_context->tcpPorts[1] =
cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
nesqp->nesqp_context->ip0 =
cpu_to_le32(ntohl(nesvnic->local_ipaddr));
else
nesqp->nesqp_context->ip0 =
nesqp->nesqp_context->ip0 =
cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
nesqp->nesqp_context->misc2 |= cpu_to_le32(
......@@ -3571,10 +3558,7 @@ static void cm_event_connected(struct nes_cm_event *event)
nes_quad.DstIpAdrIndex =
cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
nes_quad.SrcIpadr = nesvnic->local_ipaddr;
else
nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
......
......@@ -3577,10 +3577,10 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p,"
" Tcp state = %s, iWARP state = %s\n",
" Tcp state = %d, iWARP state = %d\n",
async_event_id,
le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
tcp_state, iwarp_state);
aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
if (aeq_info & NES_AEQE_QP) {
......
此差异已折叠。
......@@ -699,7 +699,7 @@ int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 acti
arp_index = 0;
err = nes_alloc_resource(nesadapter, nesadapter->allocated_arps,
nesadapter->arp_table_size, (u32 *)&arp_index, &nesadapter->next_arp_index);
nesadapter->arp_table_size, (u32 *)&arp_index, &nesadapter->next_arp_index, NES_RESOURCE_ARP);
if (err) {
nes_debug(NES_DBG_NETDEV, "nes_alloc_resource returned error = %u\n", err);
return err;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册