提交 a5f66725 编写于 作者: D Doug Ledford

Merge branch 'misc' into k.o/for-next

...@@ -72,6 +72,7 @@ MODULE_LICENSE("Dual BSD/GPL"); ...@@ -72,6 +72,7 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CMA_MAX_CM_RETRIES 15 #define CMA_MAX_CM_RETRIES 15
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 18 #define CMA_IBOE_PACKET_LIFETIME 18
#define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
static const char * const cma_events[] = { static const char * const cma_events[] = {
[RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved",
...@@ -4281,8 +4282,12 @@ static void cma_add_one(struct ib_device *device) ...@@ -4281,8 +4282,12 @@ static void cma_add_one(struct ib_device *device)
for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
supported_gids = roce_gid_type_mask_support(device, i); supported_gids = roce_gid_type_mask_support(device, i);
WARN_ON(!supported_gids); WARN_ON(!supported_gids);
cma_dev->default_gid_type[i - rdma_start_port(device)] = if (supported_gids & CMA_PREFERRED_ROCE_GID_TYPE)
find_first_bit(&supported_gids, BITS_PER_LONG); cma_dev->default_gid_type[i - rdma_start_port(device)] =
CMA_PREFERRED_ROCE_GID_TYPE;
else
cma_dev->default_gid_type[i - rdma_start_port(device)] =
find_first_bit(&supported_gids, BITS_PER_LONG);
cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0; cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0;
} }
......
...@@ -44,6 +44,8 @@ ...@@ -44,6 +44,8 @@
static struct workqueue_struct *gid_cache_wq; static struct workqueue_struct *gid_cache_wq;
static struct workqueue_struct *gid_cache_wq;
enum gid_op_type { enum gid_op_type {
GID_DEL = 0, GID_DEL = 0,
GID_ADD GID_ADD
......
...@@ -1314,6 +1314,61 @@ int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr, ...@@ -1314,6 +1314,61 @@ int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
} }
EXPORT_SYMBOL(ib_modify_qp_with_udata); EXPORT_SYMBOL(ib_modify_qp_with_udata);
int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
{
int rc;
u32 netdev_speed;
struct net_device *netdev;
struct ethtool_link_ksettings lksettings;
if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
return -EINVAL;
if (!dev->get_netdev)
return -EOPNOTSUPP;
netdev = dev->get_netdev(dev, port_num);
if (!netdev)
return -ENODEV;
rtnl_lock();
rc = __ethtool_get_link_ksettings(netdev, &lksettings);
rtnl_unlock();
dev_put(netdev);
if (!rc) {
netdev_speed = lksettings.base.speed;
} else {
netdev_speed = SPEED_1000;
pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name,
netdev_speed);
}
if (netdev_speed <= SPEED_1000) {
*width = IB_WIDTH_1X;
*speed = IB_SPEED_SDR;
} else if (netdev_speed <= SPEED_10000) {
*width = IB_WIDTH_1X;
*speed = IB_SPEED_FDR10;
} else if (netdev_speed <= SPEED_20000) {
*width = IB_WIDTH_4X;
*speed = IB_SPEED_DDR;
} else if (netdev_speed <= SPEED_25000) {
*width = IB_WIDTH_1X;
*speed = IB_SPEED_EDR;
} else if (netdev_speed <= SPEED_40000) {
*width = IB_WIDTH_4X;
*speed = IB_SPEED_FDR10;
} else {
*width = IB_WIDTH_4X;
*speed = IB_SPEED_EDR;
}
return 0;
}
EXPORT_SYMBOL(ib_get_eth_speed);
int ib_modify_qp(struct ib_qp *qp, int ib_modify_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr, struct ib_qp_attr *qp_attr,
int qp_attr_mask) int qp_attr_mask)
......
...@@ -223,50 +223,6 @@ int bnxt_re_modify_device(struct ib_device *ibdev, ...@@ -223,50 +223,6 @@ int bnxt_re_modify_device(struct ib_device *ibdev,
return 0; return 0;
} }
static void __to_ib_speed_width(struct net_device *netdev, u8 *speed, u8 *width)
{
struct ethtool_link_ksettings lksettings;
u32 espeed;
if (netdev->ethtool_ops && netdev->ethtool_ops->get_link_ksettings) {
memset(&lksettings, 0, sizeof(lksettings));
rtnl_lock();
netdev->ethtool_ops->get_link_ksettings(netdev, &lksettings);
rtnl_unlock();
espeed = lksettings.base.speed;
} else {
espeed = SPEED_UNKNOWN;
}
switch (espeed) {
case SPEED_1000:
*speed = IB_SPEED_SDR;
*width = IB_WIDTH_1X;
break;
case SPEED_10000:
*speed = IB_SPEED_QDR;
*width = IB_WIDTH_1X;
break;
case SPEED_20000:
*speed = IB_SPEED_DDR;
*width = IB_WIDTH_4X;
break;
case SPEED_25000:
*speed = IB_SPEED_EDR;
*width = IB_WIDTH_1X;
break;
case SPEED_40000:
*speed = IB_SPEED_QDR;
*width = IB_WIDTH_4X;
break;
case SPEED_50000:
break;
default:
*speed = IB_SPEED_SDR;
*width = IB_WIDTH_1X;
break;
}
}
/* Port */ /* Port */
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num, int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr) struct ib_port_attr *port_attr)
...@@ -308,25 +264,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num, ...@@ -308,25 +264,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
* IB stack to avoid race in the NETDEV_UNREG path * IB stack to avoid race in the NETDEV_UNREG path
*/ */
if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
__to_ib_speed_width(rdev->netdev, &port_attr->active_speed, if (!ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed,
&port_attr->active_width); &port_attr->active_width))
return 0; return -EINVAL;
}
int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
int port_modify_mask,
struct ib_port_modify *port_modify)
{
switch (port_modify_mask) {
case IB_PORT_SHUTDOWN:
break;
case IB_PORT_INIT_TYPE:
break;
case IB_PORT_RESET_QKEY_CNTR:
break;
default:
break;
}
return 0; return 0;
} }
...@@ -846,6 +786,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp) ...@@ -846,6 +786,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
struct bnxt_re_dev *rdev = qp->rdev; struct bnxt_re_dev *rdev = qp->rdev;
int rc; int rc;
bnxt_qplib_del_flush_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) { if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP"); dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
...@@ -860,6 +801,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp) ...@@ -860,6 +801,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
return rc; return rc;
} }
bnxt_qplib_del_flush_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
&rdev->qp1_sqp->qplib_qp); &rdev->qp1_sqp->qplib_qp);
if (rc) { if (rc) {
...@@ -1404,6 +1346,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, ...@@ -1404,6 +1346,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
} }
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state); qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
if (!qp->sumem &&
qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
dev_dbg(rdev_to_dev(rdev),
"Move QP = %p to flush list\n",
qp);
bnxt_qplib_add_flush_qp(&qp->qplib_qp);
}
if (!qp->sumem &&
qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
dev_dbg(rdev_to_dev(rdev),
"Move QP = %p out of flush list\n",
qp);
bnxt_qplib_del_flush_qp(&qp->qplib_qp);
}
} }
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
qp->qplib_qp.modify_flags |= qp->qplib_qp.modify_flags |=
...@@ -2414,6 +2371,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, ...@@ -2414,6 +2371,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
} }
cq->qplib_cq.max_wqe = entries; cq->qplib_cq.max_wqe = entries;
cq->qplib_cq.cnq_hw_ring_id = rdev->nq.ring_id; cq->qplib_cq.cnq_hw_ring_id = rdev->nq.ring_id;
cq->qplib_cq.nq = &rdev->nq;
rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq); rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
if (rc) { if (rc) {
...@@ -2921,6 +2879,10 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) ...@@ -2921,6 +2879,10 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
sq->send_phantom = false; sq->send_phantom = false;
} }
} }
if (ncqe < budget)
ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
cqe + ncqe,
budget - ncqe);
if (!ncqe) if (!ncqe)
break; break;
......
...@@ -141,9 +141,6 @@ int bnxt_re_modify_device(struct ib_device *ibdev, ...@@ -141,9 +141,6 @@ int bnxt_re_modify_device(struct ib_device *ibdev,
struct ib_device_modify *device_modify); struct ib_device_modify *device_modify);
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num, int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr); struct ib_port_attr *port_attr);
int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
int port_modify_mask,
struct ib_port_modify *port_modify);
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num, int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable); struct ib_port_immutable *immutable);
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num, int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
......
...@@ -70,7 +70,6 @@ static char version[] = ...@@ -70,7 +70,6 @@ static char version[] =
MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>"); MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
MODULE_DESCRIPTION(BNXT_RE_DESC " Driver"); MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ROCE_DRV_MODULE_VERSION);
/* globals */ /* globals */
static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list); static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
...@@ -474,7 +473,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) ...@@ -474,7 +473,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->modify_device = bnxt_re_modify_device; ibdev->modify_device = bnxt_re_modify_device;
ibdev->query_port = bnxt_re_query_port; ibdev->query_port = bnxt_re_query_port;
ibdev->modify_port = bnxt_re_modify_port;
ibdev->get_port_immutable = bnxt_re_get_port_immutable; ibdev->get_port_immutable = bnxt_re_get_port_immutable;
ibdev->query_pkey = bnxt_re_query_pkey; ibdev->query_pkey = bnxt_re_query_pkey;
ibdev->query_gid = bnxt_re_query_gid; ibdev->query_gid = bnxt_re_query_gid;
...@@ -835,6 +833,42 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev) ...@@ -835,6 +833,42 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
mutex_unlock(&rdev->qp_lock); mutex_unlock(&rdev->qp_lock);
} }
static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
struct bnxt_qplib_gid gid;
u16 gid_idx, index;
int rc = 0;
if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
return 0;
if (!sgid_tbl) {
dev_err(rdev_to_dev(rdev), "QPLIB: SGID table not allocated");
return -EINVAL;
}
for (index = 0; index < sgid_tbl->active; index++) {
gid_idx = sgid_tbl->hw_id[index];
if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero)))
continue;
/* need to modify the VLAN enable setting of non VLAN GID only
* as setting is done for VLAN GID while adding GID
*/
if (sgid_tbl->vlan[index])
continue;
memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid));
rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
rdev->qplib_res.netdev->dev_addr);
}
return rc;
}
static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev) static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
{ {
u32 prio_map = 0, tmp_map = 0; u32 prio_map = 0, tmp_map = 0;
...@@ -854,8 +888,6 @@ static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev) ...@@ -854,8 +888,6 @@ static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
tmp_map = dcb_ieee_getapp_mask(netdev, &app); tmp_map = dcb_ieee_getapp_mask(netdev, &app);
prio_map |= tmp_map; prio_map |= tmp_map;
if (!prio_map)
prio_map = -EFAULT;
return prio_map; return prio_map;
} }
...@@ -881,10 +913,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev) ...@@ -881,10 +913,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
int rc; int rc;
/* Get priority for roce */ /* Get priority for roce */
rc = bnxt_re_get_priority_mask(rdev); prio_map = bnxt_re_get_priority_mask(rdev);
if (rc < 0)
return rc;
prio_map = (u8)rc;
if (prio_map == rdev->cur_prio_map) if (prio_map == rdev->cur_prio_map)
return 0; return 0;
...@@ -906,6 +935,16 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev) ...@@ -906,6 +935,16 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
return rc; return rc;
} }
/* Actual priorities are not programmed as they are already
* done by L2 driver; just enable or disable priority vlan tagging
*/
if ((prio_map == 0 && rdev->qplib_res.prio) ||
(prio_map != 0 && !rdev->qplib_res.prio)) {
rdev->qplib_res.prio = prio_map ? true : false;
bnxt_re_update_gid(rdev);
}
return 0; return 0;
} }
...@@ -998,7 +1037,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) ...@@ -998,7 +1037,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
/* Establish RCFW Communication Channel to initialize the context /* Establish RCFW Communication Channel to initialize the context
* memory for the function and all child VFs * memory for the function and all child VFs
*/ */
rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw); rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
BNXT_RE_MAX_QPC_COUNT);
if (rc) if (rc)
goto fail; goto fail;
......
...@@ -220,19 +220,20 @@ struct bnxt_qplib_q { ...@@ -220,19 +220,20 @@ struct bnxt_qplib_q {
u16 q_full_delta; u16 q_full_delta;
u16 max_sge; u16 max_sge;
u32 psn; u32 psn;
bool flush_in_progress;
bool condition; bool condition;
bool single; bool single;
bool send_phantom; bool send_phantom;
u32 phantom_wqe_cnt; u32 phantom_wqe_cnt;
u32 phantom_cqe_cnt; u32 phantom_cqe_cnt;
u32 next_cq_cons; u32 next_cq_cons;
bool flushed;
}; };
struct bnxt_qplib_qp { struct bnxt_qplib_qp {
struct bnxt_qplib_pd *pd; struct bnxt_qplib_pd *pd;
struct bnxt_qplib_dpi *dpi; struct bnxt_qplib_dpi *dpi;
u64 qp_handle; u64 qp_handle;
#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
u32 id; u32 id;
u8 type; u8 type;
u8 sig_type; u8 sig_type;
...@@ -296,6 +297,8 @@ struct bnxt_qplib_qp { ...@@ -296,6 +297,8 @@ struct bnxt_qplib_qp {
dma_addr_t sq_hdr_buf_map; dma_addr_t sq_hdr_buf_map;
void *rq_hdr_buf; void *rq_hdr_buf;
dma_addr_t rq_hdr_buf_map; dma_addr_t rq_hdr_buf_map;
struct list_head sq_flush;
struct list_head rq_flush;
}; };
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base) #define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
...@@ -351,6 +354,7 @@ struct bnxt_qplib_cq { ...@@ -351,6 +354,7 @@ struct bnxt_qplib_cq {
u16 period; u16 period;
struct bnxt_qplib_hwq hwq; struct bnxt_qplib_hwq hwq;
u32 cnq_hw_ring_id; u32 cnq_hw_ring_id;
struct bnxt_qplib_nq *nq;
bool resize_in_progress; bool resize_in_progress;
struct scatterlist *sghead; struct scatterlist *sghead;
u32 nmap; u32 nmap;
...@@ -360,6 +364,9 @@ struct bnxt_qplib_cq { ...@@ -360,6 +364,9 @@ struct bnxt_qplib_cq {
unsigned long flags; unsigned long flags;
#define CQ_FLAGS_RESIZE_IN_PROG 1 #define CQ_FLAGS_RESIZE_IN_PROG 1
wait_queue_head_t waitq; wait_queue_head_t waitq;
struct list_head sqf_head, rqf_head;
atomic_t arm_state;
spinlock_t compl_lock; /* synch CQ handlers */
}; };
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
...@@ -417,6 +424,13 @@ struct bnxt_qplib_nq { ...@@ -417,6 +424,13 @@ struct bnxt_qplib_nq {
(struct bnxt_qplib_nq *nq, (struct bnxt_qplib_nq *nq,
void *srq, void *srq,
u8 event); u8 event);
struct workqueue_struct *cqn_wq;
};
struct bnxt_qplib_nq_work {
struct work_struct work;
struct bnxt_qplib_nq *nq;
struct bnxt_qplib_cq *cq;
}; };
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
...@@ -453,4 +467,13 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq); ...@@ -453,4 +467,13 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
unsigned long *flags);
void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
unsigned long *flags);
int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe *cqe,
int num_cqes);
#endif /* __BNXT_QPLIB_FP_H__ */ #endif /* __BNXT_QPLIB_FP_H__ */
...@@ -44,6 +44,9 @@ ...@@ -44,6 +44,9 @@
#include "roce_hsi.h" #include "roce_hsi.h"
#include "qplib_res.h" #include "qplib_res.h"
#include "qplib_rcfw.h" #include "qplib_rcfw.h"
#include "qplib_sp.h"
#include "qplib_fp.h"
static void bnxt_qplib_service_creq(unsigned long data); static void bnxt_qplib_service_creq(unsigned long data);
/* Hardware communication channel */ /* Hardware communication channel */
...@@ -279,16 +282,29 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, ...@@ -279,16 +282,29 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_qp_event *qp_event) struct creq_qp_event *qp_event)
{ {
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
struct creq_qp_error_notification *err_event;
struct bnxt_qplib_crsq *crsqe; struct bnxt_qplib_crsq *crsqe;
unsigned long flags; unsigned long flags;
struct bnxt_qplib_qp *qp;
u16 cbit, blocked = 0; u16 cbit, blocked = 0;
u16 cookie; u16 cookie;
__le16 mcookie; __le16 mcookie;
u32 qp_id;
switch (qp_event->event) { switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
err_event = (struct creq_qp_error_notification *)qp_event;
qp_id = le32_to_cpu(err_event->xid);
qp = rcfw->qp_tbl[qp_id].qp_handle;
dev_dbg(&rcfw->pdev->dev, dev_dbg(&rcfw->pdev->dev,
"QPLIB: Received QP error notification"); "QPLIB: Received QP error notification");
dev_dbg(&rcfw->pdev->dev,
"QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
qp_id, err_event->req_err_state_reason,
err_event->res_err_state_reason);
bnxt_qplib_acquire_cq_locks(qp, &flags);
bnxt_qplib_mark_qp_error(qp);
bnxt_qplib_release_cq_locks(qp, &flags);
break; break;
default: default:
/* Command Response */ /* Command Response */
...@@ -507,6 +523,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, ...@@ -507,6 +523,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{ {
kfree(rcfw->qp_tbl);
kfree(rcfw->crsqe_tbl); kfree(rcfw->crsqe_tbl);
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
...@@ -514,7 +531,8 @@ void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) ...@@ -514,7 +531,8 @@ void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
} }
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw) struct bnxt_qplib_rcfw *rcfw,
int qp_tbl_sz)
{ {
rcfw->pdev = pdev; rcfw->pdev = pdev;
rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT; rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
...@@ -541,6 +559,12 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, ...@@ -541,6 +559,12 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
if (!rcfw->crsqe_tbl) if (!rcfw->crsqe_tbl)
goto fail; goto fail;
rcfw->qp_tbl_size = qp_tbl_sz;
rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node),
GFP_KERNEL);
if (!rcfw->qp_tbl)
goto fail;
return 0; return 0;
fail: fail:
......
...@@ -148,6 +148,11 @@ struct bnxt_qplib_rcfw_sbuf { ...@@ -148,6 +148,11 @@ struct bnxt_qplib_rcfw_sbuf {
u32 size; u32 size;
}; };
struct bnxt_qplib_qp_node {
u32 qp_id; /* QP id */
void *qp_handle; /* ptr to qplib_qp */
};
/* RCFW Communication Channels */ /* RCFW Communication Channels */
struct bnxt_qplib_rcfw { struct bnxt_qplib_rcfw {
struct pci_dev *pdev; struct pci_dev *pdev;
...@@ -181,11 +186,13 @@ struct bnxt_qplib_rcfw { ...@@ -181,11 +186,13 @@ struct bnxt_qplib_rcfw {
/* Actual Cmd and Resp Queues */ /* Actual Cmd and Resp Queues */
struct bnxt_qplib_hwq cmdq; struct bnxt_qplib_hwq cmdq;
struct bnxt_qplib_crsq *crsqe_tbl; struct bnxt_qplib_crsq *crsqe_tbl;
int qp_tbl_size;
struct bnxt_qplib_qp_node *qp_tbl;
}; };
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw); struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_rcfw *rcfw,
...@@ -207,4 +214,5 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, ...@@ -207,4 +214,5 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn); struct bnxt_qplib_ctx *ctx, int is_virtfn);
void bnxt_qplib_mark_qp_error(void *qp_handle);
#endif /* __BNXT_QPLIB_RCFW_H__ */ #endif /* __BNXT_QPLIB_RCFW_H__ */
...@@ -468,9 +468,11 @@ static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res, ...@@ -468,9 +468,11 @@ static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
kfree(sgid_tbl->tbl); kfree(sgid_tbl->tbl);
kfree(sgid_tbl->hw_id); kfree(sgid_tbl->hw_id);
kfree(sgid_tbl->ctx); kfree(sgid_tbl->ctx);
kfree(sgid_tbl->vlan);
sgid_tbl->tbl = NULL; sgid_tbl->tbl = NULL;
sgid_tbl->hw_id = NULL; sgid_tbl->hw_id = NULL;
sgid_tbl->ctx = NULL; sgid_tbl->ctx = NULL;
sgid_tbl->vlan = NULL;
sgid_tbl->max = 0; sgid_tbl->max = 0;
sgid_tbl->active = 0; sgid_tbl->active = 0;
} }
...@@ -491,8 +493,15 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res, ...@@ -491,8 +493,15 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
if (!sgid_tbl->ctx) if (!sgid_tbl->ctx)
goto out_free2; goto out_free2;
sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
if (!sgid_tbl->vlan)
goto out_free3;
sgid_tbl->max = max; sgid_tbl->max = max;
return 0; return 0;
out_free3:
kfree(sgid_tbl->ctx);
sgid_tbl->ctx = NULL;
out_free2: out_free2:
kfree(sgid_tbl->hw_id); kfree(sgid_tbl->hw_id);
sgid_tbl->hw_id = NULL; sgid_tbl->hw_id = NULL;
...@@ -514,6 +523,7 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res, ...@@ -514,6 +523,7 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
} }
memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max); memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
sgid_tbl->active = 0; sgid_tbl->active = 0;
} }
......
...@@ -116,6 +116,7 @@ struct bnxt_qplib_sgid_tbl { ...@@ -116,6 +116,7 @@ struct bnxt_qplib_sgid_tbl {
u16 max; u16 max;
u16 active; u16 active;
void *ctx; void *ctx;
u8 *vlan;
}; };
struct bnxt_qplib_pkey_tbl { struct bnxt_qplib_pkey_tbl {
...@@ -188,6 +189,7 @@ struct bnxt_qplib_res { ...@@ -188,6 +189,7 @@ struct bnxt_qplib_res {
struct bnxt_qplib_sgid_tbl sgid_tbl; struct bnxt_qplib_sgid_tbl sgid_tbl;
struct bnxt_qplib_pkey_tbl pkey_tbl; struct bnxt_qplib_pkey_tbl pkey_tbl;
struct bnxt_qplib_dpi_tbl dpi_tbl; struct bnxt_qplib_dpi_tbl dpi_tbl;
bool prio;
}; };
#define to_bnxt_qplib(ptr, type, member) \ #define to_bnxt_qplib(ptr, type, member) \
......
...@@ -213,6 +213,7 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, ...@@ -213,6 +213,7 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
} }
memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero)); sizeof(bnxt_qplib_gid_zero));
sgid_tbl->vlan[index] = 0;
sgid_tbl->active--; sgid_tbl->active--;
dev_dbg(&res->pdev->dev, dev_dbg(&res->pdev->dev,
"QPLIB: SGID deleted hw_id[0x%x] = 0x%x active = 0x%x", "QPLIB: SGID deleted hw_id[0x%x] = 0x%x active = 0x%x",
...@@ -265,28 +266,32 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, ...@@ -265,28 +266,32 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct cmdq_add_gid req; struct cmdq_add_gid req;
struct creq_add_gid_resp resp; struct creq_add_gid_resp resp;
u16 cmd_flags = 0; u16 cmd_flags = 0;
u32 temp32[4];
u16 temp16[3];
int rc; int rc;
RCFW_CMD_PREP(req, ADD_GID, cmd_flags); RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
memcpy(temp32, gid->data, sizeof(struct bnxt_qplib_gid)); req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
req.gid[0] = cpu_to_be32(temp32[3]); req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
req.gid[1] = cpu_to_be32(temp32[2]); req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
req.gid[2] = cpu_to_be32(temp32[1]); req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
req.gid[3] = cpu_to_be32(temp32[0]); /*
if (vlan_id != 0xFFFF) * driver should ensure that all RoCE traffic is always VLAN
req.vlan = cpu_to_le16((vlan_id & * tagged if RoCE traffic is running on non-zero VLAN ID or
CMDQ_ADD_GID_VLAN_VLAN_ID_MASK) | * RoCE traffic is running on non-zero Priority.
CMDQ_ADD_GID_VLAN_TPID_TPID_8100 | */
CMDQ_ADD_GID_VLAN_VLAN_EN); if ((vlan_id != 0xFFFF) || res->prio) {
if (vlan_id != 0xFFFF)
req.vlan = cpu_to_le16
(vlan_id & CMDQ_ADD_GID_VLAN_VLAN_ID_MASK);
req.vlan |= cpu_to_le16
(CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
CMDQ_ADD_GID_VLAN_VLAN_EN);
}
/* MAC in network format */ /* MAC in network format */
memcpy(temp16, smac, 6); req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
req.src_mac[0] = cpu_to_be16(temp16[0]); req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
req.src_mac[1] = cpu_to_be16(temp16[1]); req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
req.src_mac[2] = cpu_to_be16(temp16[2]);
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
(void *)&resp, NULL, 0); (void *)&resp, NULL, 0);
...@@ -297,6 +302,9 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, ...@@ -297,6 +302,9 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
/* Add GID to the sgid_tbl */ /* Add GID to the sgid_tbl */
memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
sgid_tbl->active++; sgid_tbl->active++;
if (vlan_id != 0xFFFF)
sgid_tbl->vlan[free_idx] = 1;
dev_dbg(&res->pdev->dev, dev_dbg(&res->pdev->dev,
"QPLIB: SGID added hw_id[0x%x] = 0x%x active = 0x%x", "QPLIB: SGID added hw_id[0x%x] = 0x%x active = 0x%x",
free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active); free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active);
...@@ -306,6 +314,43 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, ...@@ -306,6 +314,43 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
return 0; return 0;
} }
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
u8 *smac)
{
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
struct bnxt_qplib_res,
sgid_tbl);
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct creq_modify_gid_resp resp;
struct cmdq_modify_gid req;
int rc;
u16 cmd_flags = 0;
RCFW_CMD_PREP(req, MODIFY_GID, cmd_flags);
req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
if (res->prio) {
req.vlan |= cpu_to_le16
(CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
CMDQ_ADD_GID_VLAN_VLAN_EN);
}
/* MAC in network format */
req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
req.gid_index = cpu_to_le16(gid_idx);
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
(void *)&resp, NULL, 0);
return rc;
}
/* pkeys */ /* pkeys */
int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res, int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index, struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
......
...@@ -135,6 +135,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, ...@@ -135,6 +135,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id, struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
bool update, u32 *index); bool update, u32 *index);
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx, u8 *smac);
int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res, int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index, struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
u16 *pkey); u16 *pkey);
......
...@@ -1473,8 +1473,8 @@ struct cmdq_modify_gid { ...@@ -1473,8 +1473,8 @@ struct cmdq_modify_gid {
u8 resp_size; u8 resp_size;
u8 reserved8; u8 reserved8;
__le64 resp_addr; __le64 resp_addr;
__le32 gid[4]; __be32 gid[4];
__le16 src_mac[3]; __be16 src_mac[3];
__le16 vlan; __le16 vlan;
#define CMDQ_MODIFY_GID_VLAN_VLAN_ID_MASK 0xfffUL #define CMDQ_MODIFY_GID_VLAN_VLAN_ID_MASK 0xfffUL
#define CMDQ_MODIFY_GID_VLAN_VLAN_ID_SFT 0 #define CMDQ_MODIFY_GID_VLAN_VLAN_ID_SFT 0
......
...@@ -45,7 +45,6 @@ ...@@ -45,7 +45,6 @@
MODULE_AUTHOR("Boyd Faulkner, Steve Wise"); MODULE_AUTHOR("Boyd Faulkner, Steve Wise");
MODULE_DESCRIPTION("Chelsio T3 RDMA Driver"); MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
static void open_rnic_dev(struct t3cdev *); static void open_rnic_dev(struct t3cdev *);
static void close_rnic_dev(struct t3cdev *); static void close_rnic_dev(struct t3cdev *);
......
...@@ -44,7 +44,6 @@ ...@@ -44,7 +44,6 @@
MODULE_AUTHOR("Steve Wise"); MODULE_AUTHOR("Steve Wise");
MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver"); MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
static int allow_db_fc_on_t5; static int allow_db_fc_on_t5;
module_param(allow_db_fc_on_t5, int, 0644); module_param(allow_db_fc_on_t5, int, 0644);
......
...@@ -96,7 +96,6 @@ MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features"); ...@@ -96,7 +96,6 @@ MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Intel Omni-Path Architecture driver"); MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
MODULE_VERSION(HFI1_DRIVER_VERSION);
/* /*
* MAX_PKT_RCV is the max # if packets processed per receive interrupt. * MAX_PKT_RCV is the max # if packets processed per receive interrupt.
......
...@@ -601,7 +601,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) ...@@ -601,7 +601,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
wqe = rvt_get_swqe_ptr(qp, qp->s_last); wqe = rvt_get_swqe_ptr(qp, qp->s_last);
send_context = qp_to_send_context(qp, priv->s_sc); send_context = qp_to_send_context(qp, priv->s_sc);
seq_printf(s, seq_printf(s,
"N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x (%u %u %u %u %u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d\n", "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d\n",
iter->n, iter->n,
qp_idle(qp) ? "I" : "B", qp_idle(qp) ? "I" : "B",
qp->ibqp.qp_num, qp->ibqp.qp_num,
...@@ -624,6 +624,10 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) ...@@ -624,6 +624,10 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
qp->s_last, qp->s_acked, qp->s_cur, qp->s_last, qp->s_acked, qp->s_cur,
qp->s_tail, qp->s_head, qp->s_size, qp->s_tail, qp->s_head, qp->s_size,
qp->s_avail, qp->s_avail,
/* ack_queue ring pointers, size */
qp->s_tail_ack_queue, qp->r_head_ack_queue,
HFI1_MAX_RDMA_ATOMIC,
/* remote QP info */
qp->remote_qpn, qp->remote_qpn,
rdma_ah_get_dlid(&qp->remote_ah_attr), rdma_ah_get_dlid(&qp->remote_ah_attr),
rdma_ah_get_sl(&qp->remote_ah_attr), rdma_ah_get_sl(&qp->remote_ah_attr),
......
...@@ -77,7 +77,6 @@ MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2"); ...@@ -77,7 +77,6 @@ MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>"); MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver"); MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
static struct i40e_client i40iw_client; static struct i40e_client i40iw_client;
static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw"; static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
......
...@@ -70,7 +70,6 @@ ...@@ -70,7 +70,6 @@
MODULE_AUTHOR("Roland Dreier"); MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
int mlx4_ib_sm_guid_assign = 0; int mlx4_ib_sm_guid_assign = 0;
module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444); module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
......
...@@ -67,7 +67,6 @@ ...@@ -67,7 +67,6 @@
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
static char mlx5_version[] = static char mlx5_version[] =
DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
...@@ -1176,7 +1175,7 @@ static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k, ...@@ -1176,7 +1175,7 @@ static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
if (req->num_low_latency_bfregs > req->total_num_bfregs - 1) if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
return -EINVAL; return -EINVAL;
mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, alloated %d, using %d sys pages\n", mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, using %d sys pages\n",
MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no", MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
lib_uar_4k ? "yes" : "no", ref_bfregs, lib_uar_4k ? "yes" : "no", ref_bfregs,
req->total_num_bfregs, *num_sys_pages); req->total_num_bfregs, *num_sys_pages);
......
...@@ -49,7 +49,6 @@ ...@@ -49,7 +49,6 @@
MODULE_AUTHOR("Roland Dreier"); MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver"); MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
......
...@@ -63,7 +63,6 @@ ...@@ -63,7 +63,6 @@
MODULE_AUTHOR("NetEffect"); MODULE_AUTHOR("NetEffect");
MODULE_DESCRIPTION("NetEffect RNIC Low-level iWARP Driver"); MODULE_DESCRIPTION("NetEffect RNIC Low-level iWARP Driver");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
int interrupt_mod_interval = 0; int interrupt_mod_interval = 0;
......
...@@ -58,7 +58,6 @@ ...@@ -58,7 +58,6 @@
#include "ocrdma_stats.h" #include "ocrdma_stats.h"
#include <rdma/ocrdma-abi.h> #include <rdma/ocrdma-abi.h>
MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION); MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
MODULE_AUTHOR("Emulex Corporation"); MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
......
...@@ -47,7 +47,6 @@ ...@@ -47,7 +47,6 @@
MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver"); MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
MODULE_AUTHOR("QLogic Corporation"); MODULE_AUTHOR("QLogic Corporation");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(QEDR_MODULE_VERSION);
#define QEDR_WQ_MULTIPLIER_DFT (3) #define QEDR_WQ_MULTIPLIER_DFT (3)
...@@ -778,6 +777,7 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, ...@@ -778,6 +777,7 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
if (rc) if (rc)
goto init_err; goto init_err;
dev->user_dpm_enabled = dev_info.user_dpm_enabled;
dev->num_hwfns = dev_info.common.num_hwfns; dev->num_hwfns = dev_info.common.num_hwfns;
dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev); dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
......
...@@ -41,7 +41,6 @@ ...@@ -41,7 +41,6 @@
#include <linux/qed/roce_common.h> #include <linux/qed/roce_common.h>
#include "qedr_hsi_rdma.h" #include "qedr_hsi_rdma.h"
#define QEDR_MODULE_VERSION "8.10.10.0"
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA" #define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
#define DP_NAME(dev) ((dev)->ibdev.name) #define DP_NAME(dev) ((dev)->ibdev.name)
...@@ -163,6 +162,8 @@ struct qedr_dev { ...@@ -163,6 +162,8 @@ struct qedr_dev {
struct qedr_qp *gsi_qp; struct qedr_qp *gsi_qp;
unsigned long enet_state; unsigned long enet_state;
u8 user_dpm_enabled;
}; };
#define QEDR_MAX_SQ_PBL (0x8000) #define QEDR_MAX_SQ_PBL (0x8000)
......
...@@ -376,6 +376,9 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev, ...@@ -376,6 +376,9 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
memset(&uresp, 0, sizeof(uresp)); memset(&uresp, 0, sizeof(uresp));
uresp.dpm_enabled = dev->user_dpm_enabled;
uresp.wids_enabled = 1;
uresp.wid_count = oparams.wid_count;
uresp.db_pa = ctx->dpi_phys_addr; uresp.db_pa = ctx->dpi_phys_addr;
uresp.db_size = ctx->dpi_size; uresp.db_size = ctx->dpi_size;
uresp.max_send_wr = dev->attr.max_sqe; uresp.max_send_wr = dev->attr.max_sqe;
......
...@@ -66,7 +66,6 @@ MODULE_PARM_DESC(compat_ddr_negotiate, ...@@ -66,7 +66,6 @@ MODULE_PARM_DESC(compat_ddr_negotiate,
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel <ibsupport@intel.com>"); MODULE_AUTHOR("Intel <ibsupport@intel.com>");
MODULE_DESCRIPTION("Intel IB driver"); MODULE_DESCRIPTION("Intel IB driver");
MODULE_VERSION(QIB_DRIVER_VERSION);
/* /*
* QIB_PIO_MAXIBHDR is the max IB header size allowed for in our * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
......
...@@ -871,8 +871,6 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, ...@@ -871,8 +871,6 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
ib_dispatch_event(&event); ib_dispatch_event(&event);
} }
ret = subn_get_portinfo(smp, ibdev, port);
/* restore re-reg bit per o14-12.2.1 */ /* restore re-reg bit per o14-12.2.1 */
pip->clientrereg_resv_subnetto |= clientrereg; pip->clientrereg_resv_subnetto |= clientrereg;
......
...@@ -409,6 +409,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev) ...@@ -409,6 +409,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
us_ibdev->ib_dev.query_port = usnic_ib_query_port; us_ibdev->ib_dev.query_port = usnic_ib_query_port;
us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey; us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey;
us_ibdev->ib_dev.query_gid = usnic_ib_query_gid; us_ibdev->ib_dev.query_gid = usnic_ib_query_gid;
us_ibdev->ib_dev.get_netdev = usnic_get_netdev;
us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer; us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer;
us_ibdev->ib_dev.alloc_pd = usnic_ib_alloc_pd; us_ibdev->ib_dev.alloc_pd = usnic_ib_alloc_pd;
us_ibdev->ib_dev.dealloc_pd = usnic_ib_dealloc_pd; us_ibdev->ib_dev.dealloc_pd = usnic_ib_dealloc_pd;
...@@ -720,7 +721,6 @@ static void __exit usnic_ib_destroy(void) ...@@ -720,7 +721,6 @@ static void __exit usnic_ib_destroy(void)
MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver"); MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>"); MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR); module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR);
module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR); module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3"); MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3");
......
...@@ -226,27 +226,6 @@ static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp) ...@@ -226,27 +226,6 @@ static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
spin_unlock(&vf->lock); spin_unlock(&vf->lock);
} }
static void eth_speed_to_ib_speed(int speed, u8 *active_speed,
u8 *active_width)
{
if (speed <= 10000) {
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_FDR10;
} else if (speed <= 20000) {
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_DDR;
} else if (speed <= 30000) {
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_QDR;
} else if (speed <= 40000) {
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_FDR10;
} else {
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_EDR;
}
}
static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd) static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
{ {
if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN || if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
...@@ -326,12 +305,16 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port, ...@@ -326,12 +305,16 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props) struct ib_port_attr *props)
{ {
struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
struct ethtool_link_ksettings cmd;
usnic_dbg("\n"); usnic_dbg("\n");
mutex_lock(&us_ibdev->usdev_lock); mutex_lock(&us_ibdev->usdev_lock);
__ethtool_get_link_ksettings(us_ibdev->netdev, &cmd); if (!ib_get_eth_speed(ibdev, port, &props->active_speed,
&props->active_width)) {
mutex_unlock(&us_ibdev->usdev_lock);
return -EINVAL;
}
/* props being zeroed by the caller, avoid zeroing it here */ /* props being zeroed by the caller, avoid zeroing it here */
props->lid = 0; props->lid = 0;
...@@ -355,8 +338,6 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port, ...@@ -355,8 +338,6 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
props->pkey_tbl_len = 1; props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0; props->bad_pkey_cntr = 0;
props->qkey_viol_cntr = 0; props->qkey_viol_cntr = 0;
eth_speed_to_ib_speed(cmd.base.speed, &props->active_speed,
&props->active_width);
props->max_mtu = IB_MTU_4096; props->max_mtu = IB_MTU_4096;
props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu); props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
/* Userspace will adjust for hdrs */ /* Userspace will adjust for hdrs */
...@@ -424,6 +405,16 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index, ...@@ -424,6 +405,16 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
return 0; return 0;
} }
struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num)
{
struct usnic_ib_dev *us_ibdev = to_usdev(device);
if (us_ibdev->netdev)
dev_hold(us_ibdev->netdev);
return us_ibdev->netdev;
}
int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey) u16 *pkey)
{ {
......
...@@ -48,6 +48,7 @@ int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, ...@@ -48,6 +48,7 @@ int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
struct ib_qp_init_attr *qp_init_attr); struct ib_qp_init_attr *qp_init_attr);
int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index, int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid); union ib_gid *gid);
struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num);
int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey); u16 *pkey);
struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev, struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
......
...@@ -1119,5 +1119,4 @@ module_exit(pvrdma_cleanup); ...@@ -1119,5 +1119,4 @@ module_exit(pvrdma_cleanup);
MODULE_AUTHOR("VMware, Inc"); MODULE_AUTHOR("VMware, Inc");
MODULE_DESCRIPTION("VMware Paravirtual RDMA driver"); MODULE_DESCRIPTION("VMware Paravirtual RDMA driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
...@@ -38,7 +38,6 @@ ...@@ -38,7 +38,6 @@
MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib"); MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
MODULE_DESCRIPTION("Soft RDMA transport"); MODULE_DESCRIPTION("Soft RDMA transport");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION("0.2");
/* free resources for all ports on a device */ /* free resources for all ports on a device */
static void rxe_cleanup_ports(struct rxe_dev *rxe) static void rxe_cleanup_ports(struct rxe_dev *rxe)
......
...@@ -51,40 +51,16 @@ static int rxe_query_device(struct ib_device *dev, ...@@ -51,40 +51,16 @@ static int rxe_query_device(struct ib_device *dev,
return 0; return 0;
} }
static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed,
u8 *active_width)
{
if (speed <= 1000) {
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_SDR;
} else if (speed <= 10000) {
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_FDR10;
} else if (speed <= 20000) {
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_DDR;
} else if (speed <= 30000) {
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_QDR;
} else if (speed <= 40000) {
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_FDR10;
} else {
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_EDR;
}
}
static int rxe_query_port(struct ib_device *dev, static int rxe_query_port(struct ib_device *dev,
u8 port_num, struct ib_port_attr *attr) u8 port_num, struct ib_port_attr *attr)
{ {
struct rxe_dev *rxe = to_rdev(dev); struct rxe_dev *rxe = to_rdev(dev);
struct rxe_port *port; struct rxe_port *port;
u32 speed; int rc = -EINVAL;
if (unlikely(port_num != 1)) { if (unlikely(port_num != 1)) {
pr_warn("invalid port_number %d\n", port_num); pr_warn("invalid port_number %d\n", port_num);
goto err1; goto out;
} }
port = &rxe->port; port = &rxe->port;
...@@ -93,29 +69,12 @@ static int rxe_query_port(struct ib_device *dev, ...@@ -93,29 +69,12 @@ static int rxe_query_port(struct ib_device *dev,
*attr = port->attr; *attr = port->attr;
mutex_lock(&rxe->usdev_lock); mutex_lock(&rxe->usdev_lock);
if (rxe->ndev->ethtool_ops->get_link_ksettings) { rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
struct ethtool_link_ksettings ks; &attr->active_width);
rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks);
speed = ks.base.speed;
} else if (rxe->ndev->ethtool_ops->get_settings) {
struct ethtool_cmd cmd;
rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
speed = cmd.speed;
} else {
pr_warn("%s speed is unknown, defaulting to 1000\n",
rxe->ndev->name);
speed = 1000;
}
rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
&attr->active_width);
mutex_unlock(&rxe->usdev_lock); mutex_unlock(&rxe->usdev_lock);
return 0; out:
return rc;
err1:
return -EINVAL;
} }
static int rxe_query_gid(struct ib_device *device, static int rxe_query_gid(struct ib_device *device,
......
...@@ -60,7 +60,6 @@ const char ipoib_driver_version[] = DRV_VERSION; ...@@ -60,7 +60,6 @@ const char ipoib_driver_version[] = DRV_VERSION;
MODULE_AUTHOR("Roland Dreier"); MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
......
...@@ -77,7 +77,6 @@ ...@@ -77,7 +77,6 @@
MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover"); MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz"); MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz");
MODULE_VERSION(DRV_VER);
static struct scsi_host_template iscsi_iser_sht; static struct scsi_host_template iscsi_iser_sht;
static struct iscsi_transport iscsi_iser_transport; static struct iscsi_transport iscsi_iser_transport;
......
...@@ -2710,7 +2710,6 @@ static void __exit isert_exit(void) ...@@ -2710,7 +2710,6 @@ static void __exit isert_exit(void)
} }
MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
MODULE_VERSION("1.0");
MODULE_AUTHOR("nab@Linux-iSCSI.org"); MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
......
...@@ -1078,4 +1078,3 @@ module_exit(opa_vnic_deinit); ...@@ -1078,4 +1078,3 @@ module_exit(opa_vnic_deinit);
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation"); MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel OPA Virtual Network driver"); MODULE_DESCRIPTION("Intel OPA Virtual Network driver");
MODULE_VERSION(DRV_VERSION);
...@@ -62,7 +62,6 @@ ...@@ -62,7 +62,6 @@
MODULE_AUTHOR("Roland Dreier"); MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator"); MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_INFO(release_date, DRV_RELDATE); MODULE_INFO(release_date, DRV_RELDATE);
#if !defined(CONFIG_DYNAMIC_DEBUG) #if !defined(CONFIG_DYNAMIC_DEBUG)
......
...@@ -3565,6 +3565,7 @@ void ib_drain_qp(struct ib_qp *qp); ...@@ -3565,6 +3565,7 @@ void ib_drain_qp(struct ib_qp *qp);
int ib_resolve_eth_dmac(struct ib_device *device, int ib_resolve_eth_dmac(struct ib_device *device,
struct rdma_ah_attr *ah_attr); struct rdma_ah_attr *ah_attr);
int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
{ {
......
...@@ -49,6 +49,9 @@ struct qedr_alloc_ucontext_resp { ...@@ -49,6 +49,9 @@ struct qedr_alloc_ucontext_resp {
__u32 sges_per_recv_wr; __u32 sges_per_recv_wr;
__u32 sges_per_srq_wr; __u32 sges_per_srq_wr;
__u32 max_cqes; __u32 max_cqes;
__u8 dpm_enabled;
__u8 wids_enabled;
__u16 wid_count;
}; };
struct qedr_alloc_pd_ureq { struct qedr_alloc_pd_ureq {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册