提交 d796e641 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: (29 commits)
  IB/mthca: Simplify use of size0 in work request posting
  IB/mthca: Factor out setting WQE UD segment entries
  IB/mthca: Factor out setting WQE remote address and atomic segment entries
  IB/mlx4: Factor out setting other WQE segments
  IB/mlx4: Factor out setting WQE data segment entries
  IB/mthca: Factor out setting WQE data segment entries
  IB/mlx4: Return receive queue sizes for userspace QPs from query QP
  IB/mlx4: Increase max outstanding RDMA reads as target
  RDMA/cma: Remove local write permission from QP access flags
  IB/mthca: Use uninitialized_var() for f0
  IB/cm: Make internal function cm_get_ack_delay() static
  IB/ipath: Remove ipath_get_user_pages_nocopy()
  IB/ipath: Make a few functions static
  mlx4_core: Reset device when internal error is detected
  IB/iser: Make a couple of functions static
  IB/mthca: Fix printk format used for firmware version in warning
  IB/mthca: Schedule MSI support for removal
  IB/ehca: Fix warnings issued by checkpatch.pl
  IB/ehca: Restructure ehca_set_pagebuf()
  IB/ehca: MR/MW structure refactoring
  ...
......@@ -310,3 +310,13 @@ Why: The arch/powerpc tree is the merged architecture for ppc32 and ppc64
Who: linuxppc-dev@ozlabs.org
---------------------------
What: mthca driver's MSI support
When: January 2008
Files: drivers/infiniband/hw/mthca/*.[ch]
Why: All mthca hardware also supports MSI-X, which provides
strictly more functionality than MSI. So there is no point in
having both MSI-X and MSI support in the driver.
Who: Roland Dreier <rolandd@cisco.com>
---------------------------
......@@ -3374,7 +3374,7 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
}
EXPORT_SYMBOL(ib_cm_init_qp_attr);
void cm_get_ack_delay(struct cm_device *cm_dev)
static void cm_get_ack_delay(struct cm_device *cm_dev)
{
struct ib_device_attr attr;
......
......@@ -573,7 +573,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
break;
case RDMA_TRANSPORT_IWARP:
if (!id_priv->cm_id.iw) {
qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
qp_attr->qp_access_flags = 0;
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
} else
ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
......
......@@ -1914,6 +1914,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
fail3:
cxgb3_free_stid(ep->com.tdev, ep->stid);
fail2:
cm_id->rem_ref(cm_id);
put_ep(&ep->com);
fail1:
out:
......
......@@ -79,7 +79,7 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
av->av.ipd = (ah_mult > 0) ?
((ehca_mult - 1) / ah_mult) : 0;
} else
av->av.ipd = ehca_static_rate;
av->av.ipd = ehca_static_rate;
av->av.lnh = ah_attr->ah_flags;
av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
......
......@@ -204,11 +204,11 @@ struct ehca_mr {
spinlock_t mrlock;
enum ehca_mr_flag flags;
u32 num_pages; /* number of MR pages */
u32 num_4k; /* number of 4k "page" portions to form MR */
u32 num_kpages; /* number of kernel pages */
u32 num_hwpages; /* number of hw pages to form MR */
int acl; /* ACL (stored here for usage in reregister) */
u64 *start; /* virtual start address (stored here for */
/* usage in reregister) */
/* usage in reregister) */
u64 size; /* size (stored here for usage in reregister) */
u32 fmr_page_size; /* page size for FMR */
u32 fmr_max_pages; /* max pages for FMR */
......@@ -217,9 +217,6 @@ struct ehca_mr {
/* fw specific data */
struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
struct h_galpas galpas;
/* data for userspace bridge */
u32 nr_of_pages;
void *pagearray;
};
struct ehca_mw {
......@@ -241,26 +238,29 @@ enum ehca_mr_pgi_type {
struct ehca_mr_pginfo {
enum ehca_mr_pgi_type type;
u64 num_pages;
u64 page_cnt;
u64 num_4k; /* number of 4k "page" portions */
u64 page_4k_cnt; /* counter for 4k "page" portions */
u64 next_4k; /* next 4k "page" portion in buffer/chunk/listelem */
/* type EHCA_MR_PGI_PHYS section */
int num_phys_buf;
struct ib_phys_buf *phys_buf_array;
u64 next_buf;
/* type EHCA_MR_PGI_USER section */
struct ib_umem *region;
struct ib_umem_chunk *next_chunk;
u64 next_nmap;
/* type EHCA_MR_PGI_FMR section */
u64 *page_list;
u64 next_listelem;
/* next_4k also used within EHCA_MR_PGI_FMR */
u64 num_kpages;
u64 kpage_cnt;
u64 num_hwpages; /* number of hw pages */
u64 hwpage_cnt; /* counter for hw pages */
u64 next_hwpage; /* next hw page in buffer/chunk/listelem */
union {
struct { /* type EHCA_MR_PGI_PHYS section */
int num_phys_buf;
struct ib_phys_buf *phys_buf_array;
u64 next_buf;
} phy;
struct { /* type EHCA_MR_PGI_USER section */
struct ib_umem *region;
struct ib_umem_chunk *next_chunk;
u64 next_nmap;
} usr;
struct { /* type EHCA_MR_PGI_FMR section */
u64 fmr_pgsize;
u64 *page_list;
u64 next_listelem;
} fmr;
} u;
};
/* output parameters for MR/FMR hipz calls */
......@@ -391,6 +391,6 @@ struct ehca_alloc_qp_parms {
int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
#endif
......@@ -154,83 +154,83 @@ struct hcp_modify_qp_control_block {
u32 reserved_70_127[58]; /* 70 */
};
#define MQPCB_MASK_QKEY EHCA_BMASK_IBM(0,0)
#define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM(2,2)
#define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM(3,3)
#define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM(4,4)
#define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24,31)
#define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM(5,5)
#define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM(6,6)
#define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24,31)
#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM(7,7)
#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM(8,8)
#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM(9,9)
#define MQPCB_QP_STATE EHCA_BMASK_IBM(24,31)
#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11,11)
#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12,12)
#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13,13)
#define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14,14)
#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15,15)
#define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16,16)
#define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17,17)
#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18,18)
#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19,19)
#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20,20)
#define MQPCB_PATH_MTU EHCA_BMASK_IBM(24,31)
#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21,21)
#define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24,31)
#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22,22)
#define MQPCB_DLID EHCA_BMASK_IBM(16,31)
#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23,23)
#define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29,31)
#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24,24)
#define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25,31)
#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25,25)
#define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24,31)
#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26,26)
#define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24,31)
#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27,27)
#define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24,31)
#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28,28)
#define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12,31)
#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30,30)
#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31,31)
#define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28,31)
#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32,32)
#define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31,31)
#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33,33)
#define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31)
#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34,34)
#define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27,31)
#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35,35)
#define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24,31)
#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36,36)
#define MQPCB_DLID_AL EHCA_BMASK_IBM(16,31)
#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37,37)
#define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31)
#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38,38)
#define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25,31)
#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39,39)
#define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24,31)
#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40,40)
#define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24,31)
#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41,41)
#define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24,31)
#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42,42)
#define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12,31)
#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44,44)
#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45,45)
#define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16,31)
#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46,46)
#define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16,31)
#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47,47)
#define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31,31)
#define MQPCB_QP_NUMBER EHCA_BMASK_IBM(8,31)
#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48,48)
#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31,31)
#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49,49)
#define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16,31)
#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50,50)
#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51,51)
#define MQPCB_MASK_QKEY EHCA_BMASK_IBM( 0, 0)
#define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM( 2, 2)
#define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM( 3, 3)
#define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM( 4, 4)
#define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM( 5, 5)
#define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM( 6, 6)
#define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7)
#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8)
#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9)
#define MQPCB_QP_STATE EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11)
#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12)
#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13)
#define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14, 14)
#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15, 15)
#define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16, 16)
#define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17, 17)
#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18)
#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19)
#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20)
#define MQPCB_PATH_MTU EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21)
#define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22)
#define MQPCB_DLID EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23)
#define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29, 31)
#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24)
#define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25, 31)
#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25)
#define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26)
#define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27)
#define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28)
#define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12, 31)
#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30)
#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31)
#define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28, 31)
#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32)
#define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31, 31)
#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33)
#define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34)
#define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27, 31)
#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35)
#define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36)
#define MQPCB_DLID_AL EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37)
#define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38)
#define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25, 31)
#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39)
#define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40)
#define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41)
#define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42)
#define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12, 31)
#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44)
#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45)
#define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46)
#define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47)
#define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31, 31)
#define MQPCB_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48)
#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31, 31)
#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49)
#define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16, 31)
#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50)
#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51)
#endif /* __EHCA_CLASSES_PSERIES_H__ */
......@@ -97,7 +97,7 @@ int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
return ret;
}
struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
{
struct ehca_qp *ret = NULL;
unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
......
......@@ -96,7 +96,8 @@ int ehca_create_eq(struct ehca_shca *shca,
for (i = 0; i < nr_pages; i++) {
u64 rpage;
if (!(vpage = ipz_qpageit_get_inc(&eq->ipz_queue))) {
vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
if (!vpage) {
ret = H_RESOURCE;
goto create_eq_exit2;
}
......
......@@ -127,6 +127,7 @@ int ehca_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props)
{
int ret = 0;
u64 h_ret;
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
ib_device);
struct hipz_query_port *rblock;
......@@ -137,7 +138,8 @@ int ehca_query_port(struct ib_device *ibdev,
return -ENOMEM;
}
if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_port1;
......@@ -197,6 +199,7 @@ int ehca_query_sma_attr(struct ehca_shca *shca,
u8 port, struct ehca_sma_attr *attr)
{
int ret = 0;
u64 h_ret;
struct hipz_query_port *rblock;
rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
......@@ -205,7 +208,8 @@ int ehca_query_sma_attr(struct ehca_shca *shca,
return -ENOMEM;
}
if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_sma_attr1;
......@@ -230,9 +234,11 @@ int ehca_query_sma_attr(struct ehca_shca *shca,
int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
int ret = 0;
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
u64 h_ret;
struct ehca_shca *shca;
struct hipz_query_port *rblock;
shca = container_of(ibdev, struct ehca_shca, ib_device);
if (index > 16) {
ehca_err(&shca->ib_device, "Invalid index: %x.", index);
return -EINVAL;
......@@ -244,7 +250,8 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
return -ENOMEM;
}
if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_pkey1;
......@@ -262,6 +269,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *gid)
{
int ret = 0;
u64 h_ret;
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
ib_device);
struct hipz_query_port *rblock;
......@@ -277,7 +285,8 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
return -ENOMEM;
}
if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_gid1;
......@@ -302,11 +311,12 @@ int ehca_modify_port(struct ib_device *ibdev,
struct ib_port_modify *props)
{
int ret = 0;
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
struct ehca_shca *shca;
struct hipz_query_port *rblock;
u32 cap;
u64 hret;
shca = container_of(ibdev, struct ehca_shca, ib_device);
if ((props->set_port_cap_mask | props->clr_port_cap_mask)
& ~allowed_port_caps) {
ehca_err(&shca->ib_device, "Non-changeable bits set in masks "
......@@ -325,7 +335,8 @@ int ehca_modify_port(struct ib_device *ibdev,
goto modify_port1;
}
if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
if (hret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto modify_port2;
......@@ -337,7 +348,8 @@ int ehca_modify_port(struct ib_device *ibdev,
hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
cap, props->init_type, port_modify_mask);
if (hret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Modify port failed hret=%lx", hret);
ehca_err(&shca->ib_device, "Modify port failed hret=%lx",
hret);
ret = -EINVAL;
}
......
......@@ -49,26 +49,26 @@
#include "hipz_fns.h"
#include "ipz_pt_fn.h"
#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31)
#define EQE_EE_IDENTIFIER EHCA_BMASK_IBM(2,7)
#define EQE_CQ_NUMBER EHCA_BMASK_IBM(8,31)
#define EQE_QP_NUMBER EHCA_BMASK_IBM(8,31)
#define EQE_QP_TOKEN EHCA_BMASK_IBM(32,63)
#define EQE_CQ_TOKEN EHCA_BMASK_IBM(32,63)
#define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
#define NEQE_EVENT_CODE EHCA_BMASK_IBM(2,7)
#define NEQE_PORT_NUMBER EHCA_BMASK_IBM(8,15)
#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16,16)
#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16,16)
#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63)
#define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7)
#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
#define EQE_EE_IDENTIFIER EHCA_BMASK_IBM( 2, 7)
#define EQE_CQ_NUMBER EHCA_BMASK_IBM( 8, 31)
#define EQE_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
#define EQE_QP_TOKEN EHCA_BMASK_IBM(32, 63)
#define EQE_CQ_TOKEN EHCA_BMASK_IBM(32, 63)
#define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
#define NEQE_EVENT_CODE EHCA_BMASK_IBM( 2, 7)
#define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15)
#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16)
#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63)
#define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7)
static void queue_comp_task(struct ehca_cq *__cq);
static struct ehca_comp_pool* pool;
static struct ehca_comp_pool *pool;
#ifdef CONFIG_HOTPLUG_CPU
static struct notifier_block comp_pool_callback_nb;
#endif
......@@ -85,8 +85,8 @@ static inline void comp_event_callback(struct ehca_cq *cq)
return;
}
static void print_error_data(struct ehca_shca * shca, void* data,
u64* rblock, int length)
static void print_error_data(struct ehca_shca *shca, void *data,
u64 *rblock, int length)
{
u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
u64 resource = rblock[1];
......@@ -94,7 +94,7 @@ static void print_error_data(struct ehca_shca * shca, void* data,
switch (type) {
case 0x1: /* Queue Pair */
{
struct ehca_qp *qp = (struct ehca_qp*)data;
struct ehca_qp *qp = (struct ehca_qp *)data;
/* only print error data if AER is set */
if (rblock[6] == 0)
......@@ -107,7 +107,7 @@ static void print_error_data(struct ehca_shca * shca, void* data,
}
case 0x4: /* Completion Queue */
{
struct ehca_cq *cq = (struct ehca_cq*)data;
struct ehca_cq *cq = (struct ehca_cq *)data;
ehca_err(&shca->ib_device,
"CQ 0x%x (resource=%lx) has errors.",
......@@ -572,7 +572,7 @@ void ehca_tasklet_eq(unsigned long data)
ehca_process_eq((struct ehca_shca*)data, 1);
}
static inline int find_next_online_cpu(struct ehca_comp_pool* pool)
static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
{
int cpu;
unsigned long flags;
......@@ -636,7 +636,7 @@ static void queue_comp_task(struct ehca_cq *__cq)
__queue_comp_task(__cq, cct);
}
static void run_comp_task(struct ehca_cpu_comp_task* cct)
static void run_comp_task(struct ehca_cpu_comp_task *cct)
{
struct ehca_cq *cq;
unsigned long flags;
......@@ -666,12 +666,12 @@ static void run_comp_task(struct ehca_cpu_comp_task* cct)
static int comp_task(void *__cct)
{
struct ehca_cpu_comp_task* cct = __cct;
struct ehca_cpu_comp_task *cct = __cct;
int cql_empty;
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_INTERRUPTIBLE);
while(!kthread_should_stop()) {
while (!kthread_should_stop()) {
add_wait_queue(&cct->wait_queue, &wait);
spin_lock_irq(&cct->task_lock);
......@@ -745,7 +745,7 @@ static void take_over_work(struct ehca_comp_pool *pool,
list_splice_init(&cct->cq_list, &list);
while(!list_empty(&list)) {
while (!list_empty(&list)) {
cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
list_del(&cq->entry);
......@@ -768,7 +768,7 @@ static int comp_pool_callback(struct notifier_block *nfb,
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
if(!create_comp_task(pool, cpu)) {
if (!create_comp_task(pool, cpu)) {
ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
return NOTIFY_BAD;
}
......@@ -838,7 +838,7 @@ int ehca_create_comp_pool(void)
#ifdef CONFIG_HOTPLUG_CPU
comp_pool_callback_nb.notifier_call = comp_pool_callback;
comp_pool_callback_nb.priority =0;
comp_pool_callback_nb.priority = 0;
register_cpu_notifier(&comp_pool_callback_nb);
#endif
......
......@@ -81,8 +81,9 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
int num_phys_buf,
int mr_access_flags, u64 *iova_start);
struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
int mr_access_flags, struct ib_udata *udata);
struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int mr_access_flags,
struct ib_udata *udata);
int ehca_rereg_phys_mr(struct ib_mr *mr,
int mr_rereg_mask,
......@@ -192,7 +193,7 @@ void ehca_poll_eqs(unsigned long data);
void *ehca_alloc_fw_ctrlblock(gfp_t flags);
void ehca_free_fw_ctrlblock(void *ptr);
#else
#define ehca_alloc_fw_ctrlblock(flags) ((void *) get_zeroed_page(flags))
#define ehca_alloc_fw_ctrlblock(flags) ((void *)get_zeroed_page(flags))
#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
#endif
......
......@@ -107,7 +107,7 @@ static DEFINE_SPINLOCK(shca_list_lock);
static struct timer_list poll_eqs_timer;
#ifdef CONFIG_PPC_64K_PAGES
static struct kmem_cache *ctblk_cache = NULL;
static struct kmem_cache *ctblk_cache;
void *ehca_alloc_fw_ctrlblock(gfp_t flags)
{
......@@ -200,8 +200,8 @@ static void ehca_destroy_slab_caches(void)
#endif
}
#define EHCA_HCAAVER EHCA_BMASK_IBM(32,39)
#define EHCA_REVID EHCA_BMASK_IBM(40,63)
#define EHCA_HCAAVER EHCA_BMASK_IBM(32, 39)
#define EHCA_REVID EHCA_BMASK_IBM(40, 63)
static struct cap_descr {
u64 mask;
......@@ -263,22 +263,27 @@ int ehca_sense_attributes(struct ehca_shca *shca)
ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
if ((hcaaver == 1) && (revid == 0))
shca->hw_level = 0x11;
else if ((hcaaver == 1) && (revid == 1))
shca->hw_level = 0x12;
else if ((hcaaver == 1) && (revid == 2))
shca->hw_level = 0x13;
else if ((hcaaver == 2) && (revid == 0))
shca->hw_level = 0x21;
else if ((hcaaver == 2) && (revid == 0x10))
shca->hw_level = 0x22;
else {
if (hcaaver == 1) {
if (revid <= 3)
shca->hw_level = 0x10 | (revid + 1);
else
shca->hw_level = 0x14;
} else if (hcaaver == 2) {
if (revid == 0)
shca->hw_level = 0x21;
else if (revid == 0x10)
shca->hw_level = 0x22;
else if (revid == 0x20 || revid == 0x21)
shca->hw_level = 0x23;
}
if (!shca->hw_level) {
ehca_gen_warn("unknown hardware version"
" - assuming default level");
shca->hw_level = 0x22;
}
}
} else
shca->hw_level = ehca_hw_level;
ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
shca->sport[0].rate = IB_RATE_30_GBPS;
......@@ -290,7 +295,7 @@ int ehca_sense_attributes(struct ehca_shca *shca)
if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap))
ehca_gen_dbg(" %s", hca_cap_descr[i].descr);
port = (struct hipz_query_port *) rblock;
port = (struct hipz_query_port *)rblock;
h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
if (h_ret != H_SUCCESS) {
ehca_gen_err("Cannot query port properties. h_ret=%lx",
......@@ -439,7 +444,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
return -EPERM;
}
ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10, 0);
ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1), 10, 0);
if (IS_ERR(ibcq)) {
ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
return PTR_ERR(ibcq);
......@@ -666,7 +671,7 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
}
/* create internal protection domain */
ibpd = ehca_alloc_pd(&shca->ib_device, (void*)(-1), NULL);
ibpd = ehca_alloc_pd(&shca->ib_device, (void *)(-1), NULL);
if (IS_ERR(ibpd)) {
ehca_err(&shca->ib_device, "Cannot create internal PD.");
ret = PTR_ERR(ibpd);
......@@ -863,18 +868,21 @@ int __init ehca_module_init(void)
printk(KERN_INFO "eHCA Infiniband Device Driver "
"(Rel.: SVNEHCA_0023)\n");
if ((ret = ehca_create_comp_pool())) {
ret = ehca_create_comp_pool();
if (ret) {
ehca_gen_err("Cannot create comp pool.");
return ret;
}
if ((ret = ehca_create_slab_caches())) {
ret = ehca_create_slab_caches();
if (ret) {
ehca_gen_err("Cannot create SLAB caches");
ret = -ENOMEM;
goto module_init1;
}
if ((ret = ibmebus_register_driver(&ehca_driver))) {
ret = ibmebus_register_driver(&ehca_driver);
if (ret) {
ehca_gen_err("Cannot register eHCA device driver");
ret = -EINVAL;
goto module_init2;
......
......@@ -101,15 +101,10 @@ int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
u64 *page_list,
int list_len);
int ehca_set_pagebuf(struct ehca_mr *e_mr,
struct ehca_mr_pginfo *pginfo,
int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
u32 number,
u64 *kpage);
int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
struct ehca_mr_pginfo *pginfo,
u64 *rpage);
int ehca_mr_is_maxmr(u64 size,
u64 *iova_start);
......@@ -121,20 +116,6 @@ void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl);
void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
int *ib_acl);
int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc);
int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc);
int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc);
int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc);
int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc);
int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc);
int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc);
void ehca_mr_deletenew(struct ehca_mr *mr);
#endif /*_EHCA_MRMW_H_*/
......@@ -53,13 +53,13 @@ struct ehca_vsgentry {
u32 length;
};
#define GRH_FLAG_MASK EHCA_BMASK_IBM(7,7)
#define GRH_IPVERSION_MASK EHCA_BMASK_IBM(0,3)
#define GRH_TCLASS_MASK EHCA_BMASK_IBM(4,12)
#define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13,31)
#define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32,47)
#define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48,55)
#define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56,63)
#define GRH_FLAG_MASK EHCA_BMASK_IBM( 7, 7)
#define GRH_IPVERSION_MASK EHCA_BMASK_IBM( 0, 3)
#define GRH_TCLASS_MASK EHCA_BMASK_IBM( 4, 12)
#define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13, 31)
#define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32, 47)
#define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48, 55)
#define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56, 63)
/*
* Unreliable Datagram Address Vector Format
......@@ -206,10 +206,10 @@ struct ehca_wqe {
};
#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0,0)
#define WC_IMM_DATA EHCA_BMASK_IBM(1,1)
#define WC_GRH_PRESENT EHCA_BMASK_IBM(2,2)
#define WC_SE_BIT EHCA_BMASK_IBM(3,3)
#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0, 0)
#define WC_IMM_DATA EHCA_BMASK_IBM(1, 1)
#define WC_GRH_PRESENT EHCA_BMASK_IBM(2, 2)
#define WC_SE_BIT EHCA_BMASK_IBM(3, 3)
#define WC_STATUS_ERROR_BIT 0x80000000
#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
#define WC_STATUS_PURGE_BIT 0x10
......
......@@ -602,10 +602,10 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
/* UD circumvention */
parms.act_nr_send_sges -= 2;
parms.act_nr_recv_sges -= 2;
swqe_size = offsetof(struct ehca_wqe,
u.ud_av.sg_list[parms.act_nr_send_sges]);
rwqe_size = offsetof(struct ehca_wqe,
u.ud_av.sg_list[parms.act_nr_recv_sges]);
swqe_size = offsetof(struct ehca_wqe, u.ud_av.sg_list[
parms.act_nr_send_sges]);
rwqe_size = offsetof(struct ehca_wqe, u.ud_av.sg_list[
parms.act_nr_recv_sges]);
}
if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
......@@ -690,8 +690,8 @@ struct ehca_qp *internal_create_qp(struct ib_pd *pd,
if (my_qp->send_cq) {
ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
if (ret) {
ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x",
ret);
ehca_err(pd->device,
"Couldn't assign qp to send_cq ret=%x", ret);
goto create_qp_exit4;
}
}
......@@ -749,7 +749,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
struct ehca_qp *ret;
ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0);
return IS_ERR(ret) ? (struct ib_qp *) ret : &ret->ib_qp;
return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp;
}
int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
......@@ -780,7 +780,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
if (IS_ERR(my_qp))
return (struct ib_srq *) my_qp;
return (struct ib_srq *)my_qp;
/* copy back return values */
srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr;
......@@ -875,7 +875,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
my_qp, qp_num, h_ret);
return ehca2ib_return_code(h_ret);
}
bad_send_wqe_p = (void*)((u64)bad_send_wqe_p & (~(1L<<63)));
bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63)));
ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
qp_num, bad_send_wqe_p);
/* convert wqe pointer to vadr */
......@@ -890,7 +890,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
}
/* loop sets wqe's purge bit */
wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs);
wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
*bad_wqe_cnt = 0;
while (wqe->optype != 0xff && wqe->wqef != 0xff) {
if (ehca_debug_level)
......@@ -898,7 +898,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
wqe->nr_of_data_seg = 0; /* suppress data access */
wqe->wqef = WQEF_PURGE; /* WQE to be purged */
q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs);
wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
*bad_wqe_cnt = (*bad_wqe_cnt)+1;
}
/*
......@@ -1003,7 +1003,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
goto modify_qp_exit1;
}
ehca_dbg(ibqp->device,"ehca_qp=%p qp_num=%x current qp_state=%x "
ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x "
"new qp_state=%x attribute_mask=%x",
my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
......@@ -1019,7 +1019,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
goto modify_qp_exit1;
}
if ((mqpcb->qp_state = ib2ehca_qp_state(qp_new_state)))
mqpcb->qp_state = ib2ehca_qp_state(qp_new_state);
if (mqpcb->qp_state)
update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
else {
ret = -EINVAL;
......@@ -1077,7 +1078,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
spin_lock_irqsave(&my_qp->spinlock_s, flags);
squeue_locked = 1;
/* mark next free wqe */
wqe = (struct ehca_wqe*)
wqe = (struct ehca_wqe *)
ipz_qeit_get(&my_qp->ipz_squeue);
wqe->optype = wqe->wqef = 0xff;
ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
......@@ -1312,7 +1313,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
ehca_err(ibqp->device, "hipz_h_modify_qp() failed rc=%lx "
"ehca_qp=%p qp_num=%x",h_ret, my_qp, ibqp->qp_num);
"ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
goto modify_qp_exit2;
}
......@@ -1411,7 +1412,7 @@ int ehca_query_qp(struct ib_qp *qp,
}
if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
ehca_err(qp->device,"Invalid attribute mask "
ehca_err(qp->device, "Invalid attribute mask "
"ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
my_qp, qp->qp_num, qp_attr_mask);
return -EINVAL;
......@@ -1419,7 +1420,7 @@ int ehca_query_qp(struct ib_qp *qp,
qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!qpcb) {
ehca_err(qp->device,"Out of memory for qpcb "
ehca_err(qp->device, "Out of memory for qpcb "
"ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
return -ENOMEM;
}
......@@ -1431,7 +1432,7 @@ int ehca_query_qp(struct ib_qp *qp,
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
ehca_err(qp->device,"hipz_h_query_qp() failed "
ehca_err(qp->device, "hipz_h_query_qp() failed "
"ehca_qp=%p qp_num=%x h_ret=%lx",
my_qp, qp->qp_num, h_ret);
goto query_qp_exit1;
......@@ -1442,7 +1443,7 @@ int ehca_query_qp(struct ib_qp *qp,
if (qp_attr->cur_qp_state == -EINVAL) {
ret = -EINVAL;
ehca_err(qp->device,"Got invalid ehca_qp_state=%x "
ehca_err(qp->device, "Got invalid ehca_qp_state=%x "
"ehca_qp=%p qp_num=%x",
qpcb->qp_state, my_qp, qp->qp_num);
goto query_qp_exit1;
......
......@@ -79,7 +79,8 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
}
if (ehca_debug_level) {
ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue);
ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
ipz_rqueue);
ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
}
......@@ -99,7 +100,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr;
struct ib_sge *sge = send_wr->sg_list;
ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
"send_flags=%x opcode=%x",idx, send_wr->wr_id,
"send_flags=%x opcode=%x", idx, send_wr->wr_id,
send_wr->num_sge, send_wr->send_flags,
send_wr->opcode);
if (mad_hdr) {
......@@ -116,7 +117,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
mad_hdr->attr_mod);
}
for (j = 0; j < send_wr->num_sge; j++) {
u8 *data = (u8 *) abs_to_virt(sge->addr);
u8 *data = (u8 *)abs_to_virt(sge->addr);
ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
"lkey=%x",
idx, j, data, sge->length, sge->lkey);
......@@ -534,9 +535,11 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
cqe_count++;
if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number);
struct ehca_qp *qp;
int purgeflag;
unsigned long flags;
qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
if (!qp) {
ehca_err(cq->device, "cq_num=%x qp_num=%x "
"could not find qp -> ignore cqe",
......@@ -551,8 +554,8 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
spin_unlock_irqrestore(&qp->spinlock_s, flags);
if (purgeflag) {
ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x "
"src_qp=%x",
ehca_dbg(cq->device,
"Got CQE with purged bit qp_num=%x src_qp=%x",
cqe->local_qp_number, cqe->remote_qp_number);
if (ehca_debug_level)
ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
......
......@@ -93,14 +93,14 @@ extern int ehca_debug_level;
#define ehca_gen_dbg(format, arg...) \
do { \
if (unlikely(ehca_debug_level)) \
printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n",\
printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \
get_paca()->paca_index, __FUNCTION__, ## arg); \
} while (0)
#define ehca_gen_warn(format, arg...) \
do { \
if (unlikely(ehca_debug_level)) \
printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n",\
printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \
get_paca()->paca_index, __FUNCTION__, ## arg); \
} while (0)
......@@ -114,12 +114,12 @@ extern int ehca_debug_level;
* <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex>
*/
#define ehca_dmp(adr, len, format, args...) \
do { \
unsigned int x; \
do { \
unsigned int x; \
unsigned int l = (unsigned int)(len); \
unsigned char *deb = (unsigned char*)(adr); \
unsigned char *deb = (unsigned char *)(adr); \
for (x = 0; x < l; x += 16) { \
printk("EHCA_DMP:%s " format \
printk(KERN_INFO "EHCA_DMP:%s " format \
" adr=%p ofs=%04x %016lx %016lx\n", \
__FUNCTION__, ##args, deb, x, \
*((u64 *)&deb[0]), *((u64 *)&deb[8])); \
......@@ -128,16 +128,16 @@ extern int ehca_debug_level;
} while (0)
/* define a bitmask, little endian version */
#define EHCA_BMASK(pos,length) (((pos)<<16)+(length))
#define EHCA_BMASK(pos, length) (((pos) << 16) + (length))
/* define a bitmask, the ibm way... */
#define EHCA_BMASK_IBM(from,to) (((63-to)<<16)+((to)-(from)+1))
#define EHCA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
/* internal function, don't use */
#define EHCA_BMASK_SHIFTPOS(mask) (((mask)>>16)&0xffff)
#define EHCA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
/* internal function, don't use */
#define EHCA_BMASK_MASK(mask) (0xffffffffffffffffULL >> ((64-(mask))&0xffff))
#define EHCA_BMASK_MASK(mask) (~0ULL >> ((64 - (mask)) & 0xffff))
/**
* EHCA_BMASK_SET - return value shifted and masked by mask
......@@ -145,14 +145,14 @@ extern int ehca_debug_level;
* variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask
* in variable
*/
#define EHCA_BMASK_SET(mask,value) \
((EHCA_BMASK_MASK(mask) & ((u64)(value)))<<EHCA_BMASK_SHIFTPOS(mask))
#define EHCA_BMASK_SET(mask, value) \
((EHCA_BMASK_MASK(mask) & ((u64)(value))) << EHCA_BMASK_SHIFTPOS(mask))
/**
* EHCA_BMASK_GET - extract a parameter from value by mask
*/
#define EHCA_BMASK_GET(mask,value) \
(EHCA_BMASK_MASK(mask)& (((u64)(value))>>EHCA_BMASK_SHIFTPOS(mask)))
#define EHCA_BMASK_GET(mask, value) \
(EHCA_BMASK_MASK(mask) & (((u64)(value)) >> EHCA_BMASK_SHIFTPOS(mask)))
/* Converts ehca to ib return code */
......@@ -161,8 +161,11 @@ static inline int ehca2ib_return_code(u64 ehca_rc)
switch (ehca_rc) {
case H_SUCCESS:
return 0;
case H_RESOURCE: /* Resource in use */
case H_BUSY:
return -EBUSY;
case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
case H_CONSTRAINED: /* resource constraint */
case H_NO_MEM:
return -ENOMEM;
default:
......
......@@ -70,7 +70,7 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context)
static void ehca_mm_open(struct vm_area_struct *vma)
{
u32 *count = (u32*)vma->vm_private_data;
u32 *count = (u32 *)vma->vm_private_data;
if (!count) {
ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
vma->vm_start, vma->vm_end);
......@@ -86,7 +86,7 @@ static void ehca_mm_open(struct vm_area_struct *vma)
static void ehca_mm_close(struct vm_area_struct *vma)
{
u32 *count = (u32*)vma->vm_private_data;
u32 *count = (u32 *)vma->vm_private_data;
if (!count) {
ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
vma->vm_start, vma->vm_end);
......@@ -215,7 +215,8 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
case 2: /* qp rqueue_addr */
ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
qp->ib_qp.qp_num);
ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, &qp->mm_count_rqueue);
ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
&qp->mm_count_rqueue);
if (unlikely(ret)) {
ehca_err(qp->ib_qp.device,
"ehca_mmap_queue(rq) failed rc=%x qp_num=%x",
......@@ -227,7 +228,8 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
case 3: /* qp squeue_addr */
ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
qp->ib_qp.qp_num);
ret = ehca_mmap_queue(vma, &qp->ipz_squeue, &qp->mm_count_squeue);
ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
&qp->mm_count_squeue);
if (unlikely(ret)) {
ehca_err(qp->ib_qp.device,
"ehca_mmap_queue(sq) failed rc=%x qp_num=%x",
......
......@@ -501,8 +501,8 @@ u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
return H_PARAMETER;
}
return hipz_h_register_rpage(adapter_handle,pagesize,queue_type,
qp_handle.handle,logical_address_of_page,
return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
qp_handle.handle, logical_address_of_page,
count);
}
......@@ -522,9 +522,9 @@ u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
qp_handle.handle, /* r6 */
0, 0, 0, 0, 0, 0);
if (log_addr_next_sq_wqe2processed)
*log_addr_next_sq_wqe2processed = (void*)outs[0];
*log_addr_next_sq_wqe2processed = (void *)outs[0];
if (log_addr_next_rq_wqe2processed)
*log_addr_next_rq_wqe2processed = (void*)outs[1];
*log_addr_next_rq_wqe2processed = (void *)outs[1];
return ret;
}
......
......@@ -50,7 +50,7 @@ int hcall_map_page(u64 physaddr, u64 *mapaddr)
int hcall_unmap_page(u64 mapaddr)
{
iounmap((volatile void __iomem*)mapaddr);
iounmap((volatile void __iomem *) mapaddr);
return 0;
}
......
......@@ -53,10 +53,10 @@
#define hipz_galpa_load_cq(gal, offset) \
hipz_galpa_load(gal, CQTEMM_OFFSET(offset))
#define hipz_galpa_store_qp(gal,offset, value) \
#define hipz_galpa_store_qp(gal, offset, value) \
hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value)
#define hipz_galpa_load_qp(gal, offset) \
hipz_galpa_load(gal,QPTEMM_OFFSET(offset))
hipz_galpa_load(gal, QPTEMM_OFFSET(offset))
static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes)
{
......
......@@ -161,11 +161,11 @@ struct hipz_qptemm {
/* 0x1000 */
};
#define QPX_SQADDER EHCA_BMASK_IBM(48,63)
#define QPX_RQADDER EHCA_BMASK_IBM(48,63)
#define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3,3)
#define QPX_SQADDER EHCA_BMASK_IBM(48, 63)
#define QPX_RQADDER EHCA_BMASK_IBM(48, 63)
#define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3, 3)
#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm,x)
#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm, x)
/* MRMWPT Entry Memory Map */
struct hipz_mrmwmm {
......@@ -187,7 +187,7 @@ struct hipz_mrmwmm {
};
#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm,x)
#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm, x)
struct hipz_qpedmm {
/* 0x00 */
......@@ -238,7 +238,7 @@ struct hipz_qpedmm {
u64 qpedx_rrva3;
};
#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm,x)
#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm, x)
/* CQ Table Entry Memory Map */
struct hipz_cqtemm {
......@@ -263,12 +263,12 @@ struct hipz_cqtemm {
/* 0x1000 */
};
#define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32,63)
#define CQX_FECADDER EHCA_BMASK_IBM(32,63)
#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0,0)
#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0,0)
#define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32, 63)
#define CQX_FECADDER EHCA_BMASK_IBM(32, 63)
#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0, 0)
#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0, 0)
#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm,x)
#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm, x)
/* EQ Table Entry Memory Map */
struct hipz_eqtemm {
......@@ -293,7 +293,7 @@ struct hipz_eqtemm {
};
#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm,x)
#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm, x)
/* access control defines for MR/MW */
#define HIPZ_ACCESSCTRL_L_WRITE 0x00800000
......
......@@ -114,7 +114,7 @@ int ipz_queue_ctor(struct ipz_queue *queue,
*/
f = 0;
while (f < nr_of_pages) {
u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
int k;
if (!kpage)
goto ipz_queue_ctor_exit0; /*NOMEM*/
......
......@@ -240,7 +240,7 @@ void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
{
void *ret = ipz_qeit_get(queue);
u32 qe = *(u8 *) ret;
u32 qe = *(u8 *)ret;
if ((qe >> 7) != (queue->toggle_state & 1))
return NULL;
ipz_qeit_eq_get_inc(queue); /* this is a good one */
......@@ -250,7 +250,7 @@ static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue)
{
void *ret = ipz_qeit_get(queue);
u32 qe = *(u8 *) ret;
u32 qe = *(u8 *)ret;
if ((qe >> 7) != (queue->toggle_state & 1))
return NULL;
return ret;
......
......@@ -1889,7 +1889,7 @@ void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
/* Below is "non-zero" to force override, but both actual LEDs are off */
#define LED_OVER_BOTH_OFF (8)
void ipath_run_led_override(unsigned long opaque)
static void ipath_run_led_override(unsigned long opaque)
{
struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
int timeoff;
......
......@@ -426,8 +426,8 @@ static int ipath_eeprom_internal_read(struct ipath_devdata *dd,
* @buffer: data to write
* @len: number of bytes to write
*/
int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
const void *buffer, int len)
static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
const void *buffer, int len)
{
u8 single_byte;
int sub_len;
......
......@@ -70,7 +70,7 @@ static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum)
* If rewrite is true, and bits are set in the sendbufferror registers,
* we'll write to the buffer, for error recovery on parity errors.
*/
void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
static void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
{
u32 piobcnt;
unsigned long sbuf[4];
......
......@@ -776,7 +776,6 @@ void ipath_get_eeprom_info(struct ipath_devdata *);
int ipath_update_eeprom_log(struct ipath_devdata *dd);
void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
void ipath_disarm_senderrbufs(struct ipath_devdata *, int);
/*
* Set LED override, only the two LSBs have "public" meaning, but
......@@ -820,7 +819,6 @@ static inline u64 ipath_mdio_req(int cmd, int dev, int reg, int data)
#define IPATH_MDIO_CTRL_8355_REG_10 0x1D
int ipath_get_user_pages(unsigned long, size_t, struct page **);
int ipath_get_user_pages_nocopy(unsigned long, struct page **);
void ipath_release_user_pages(struct page **, size_t);
void ipath_release_user_pages_on_close(struct page **, size_t);
int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
......
......@@ -507,7 +507,7 @@ static int want_buffer(struct ipath_devdata *dd)
*
* Called when we run out of PIO buffers.
*/
void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
static void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
{
unsigned long flags;
......
......@@ -171,32 +171,6 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
return ret;
}
/**
* ipath_get_user_pages_nocopy - lock a single page for I/O and mark shared
* @start_page: the page to lock
* @p: the output page structure
*
* This is similar to ipath_get_user_pages, but it's always one page, and we
* mark the page as locked for I/O, and shared. This is used for the user
* process page that contains the destination address for the rcvhdrq tail
* update, so we need to have the vma. If we don't do this, the page can be
* taken away from us on fork, even if the child never touches it, and then
* the user process never sees the tail register updates.
*/
int ipath_get_user_pages_nocopy(unsigned long page, struct page **p)
{
struct vm_area_struct *vma;
int ret;
down_write(&current->mm->mmap_sem);
ret = __get_user_pages(page, 1, p, &vma);
up_write(&current->mm->mmap_sem);
return ret;
}
void ipath_release_user_pages(struct page **p, size_t num_pages)
{
down_write(&current->mm->mmap_sem);
......
......@@ -488,7 +488,7 @@ bail:;
* This is called from ipath_do_rcv_timer() at interrupt level to check for
* QPs which need retransmits and to collect performance numbers.
*/
void ipath_ib_timer(struct ipath_ibdev *dev)
static void ipath_ib_timer(struct ipath_ibdev *dev)
{
struct ipath_qp *resend = NULL;
struct list_head *last;
......
......@@ -782,8 +782,6 @@ void ipath_update_mmap_info(struct ipath_ibdev *dev,
int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);
void ipath_insert_rnr_queue(struct ipath_qp *qp);
int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
......@@ -807,8 +805,6 @@ void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
int ipath_ib_piobufavail(struct ipath_ibdev *);
void ipath_ib_timer(struct ipath_ibdev *);
unsigned ipath_get_npkeys(struct ipath_devdata *);
u32 ipath_get_cr_errpkey(struct ipath_devdata *);
......
......@@ -1183,6 +1183,43 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq
return cur + nreq >= wq->max_post;
}
static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
u64 remote_addr, u32 rkey)
{
rseg->raddr = cpu_to_be64(remote_addr);
rseg->rkey = cpu_to_be32(rkey);
rseg->reserved = 0;
}
static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
{
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
} else {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
aseg->compare = 0;
}
}
static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
struct ib_send_wr *wr)
{
memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
}
static void set_data_seg(struct mlx4_wqe_data_seg *dseg,
struct ib_sge *sg)
{
dseg->byte_count = cpu_to_be32(sg->length);
dseg->lkey = cpu_to_be32(sg->lkey);
dseg->addr = cpu_to_be64(sg->addr);
}
int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
......@@ -1238,26 +1275,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
((struct mlx4_wqe_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.atomic.remote_addr);
((struct mlx4_wqe_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.atomic.rkey);
((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0;
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
wr->wr.atomic.rkey);
wqe += sizeof (struct mlx4_wqe_raddr_seg);
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
((struct mlx4_wqe_atomic_seg *) wqe)->swap_add =
cpu_to_be64(wr->wr.atomic.swap);
((struct mlx4_wqe_atomic_seg *) wqe)->compare =
cpu_to_be64(wr->wr.atomic.compare_add);
} else {
((struct mlx4_wqe_atomic_seg *) wqe)->swap_add =
cpu_to_be64(wr->wr.atomic.compare_add);
((struct mlx4_wqe_atomic_seg *) wqe)->compare = 0;
}
set_atomic_seg(wqe, wr);
wqe += sizeof (struct mlx4_wqe_atomic_seg);
size += (sizeof (struct mlx4_wqe_raddr_seg) +
sizeof (struct mlx4_wqe_atomic_seg)) / 16;
......@@ -1266,15 +1290,10 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
((struct mlx4_wqe_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.rdma.remote_addr);
((struct mlx4_wqe_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.rdma.rkey);
((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0;
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
wqe += sizeof (struct mlx4_wqe_raddr_seg);
size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
break;
default:
......@@ -1284,13 +1303,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case IB_QPT_UD:
memcpy(((struct mlx4_wqe_datagram_seg *) wqe)->av,
&to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
((struct mlx4_wqe_datagram_seg *) wqe)->dqpn =
cpu_to_be32(wr->wr.ud.remote_qpn);
((struct mlx4_wqe_datagram_seg *) wqe)->qkey =
cpu_to_be32(wr->wr.ud.remote_qkey);
set_datagram_seg(wqe, wr);
wqe += sizeof (struct mlx4_wqe_datagram_seg);
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
break;
......@@ -1313,12 +1326,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
for (i = 0; i < wr->num_sge; ++i) {
((struct mlx4_wqe_data_seg *) wqe)->byte_count =
cpu_to_be32(wr->sg_list[i].length);
((struct mlx4_wqe_data_seg *) wqe)->lkey =
cpu_to_be32(wr->sg_list[i].lkey);
((struct mlx4_wqe_data_seg *) wqe)->addr =
cpu_to_be64(wr->sg_list[i].addr);
set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mlx4_wqe_data_seg);
size += sizeof (struct mlx4_wqe_data_seg) / 16;
......@@ -1498,7 +1506,7 @@ static int to_ib_qp_access_flags(int mlx4_flags)
static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr,
struct mlx4_qp_path *path)
{
memset(ib_ah_attr, 0, sizeof *path);
memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1;
if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
......@@ -1515,7 +1523,7 @@ static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr,
ib_ah_attr->grh.traffic_class =
(be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
ib_ah_attr->grh.flow_label =
be32_to_cpu(path->tclass_flowlabel) & 0xffffff;
be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
memcpy(ib_ah_attr->grh.dgid.raw,
path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
}
......@@ -1560,7 +1568,10 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
}
qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f;
qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1;
if (qp_attr->qp_state == IB_QPS_INIT)
qp_attr->port_num = qp->port;
else
qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1;
/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING;
......@@ -1578,17 +1589,25 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
done:
qp_attr->cur_qp_state = qp_attr->qp_state;
qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
if (!ibqp->uobject) {
qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
qp_attr->cap.max_send_sge = qp->sq.max_gs;
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
qp_attr->cap.max_inline_data = (1 << qp->sq.wqe_shift) -
send_wqe_overhead(qp->ibqp.qp_type) -
sizeof (struct mlx4_wqe_inline_seg);
qp_init_attr->cap = qp_attr->cap;
qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
qp_attr->cap.max_send_sge = qp->sq.max_gs;
} else {
qp_attr->cap.max_send_wr = 0;
qp_attr->cap.max_send_sge = 0;
}
/*
* We don't support inline sends for kernel QPs (yet), and we
* don't know what userspace's value should be.
*/
qp_attr->cap.max_inline_data = 0;
qp_init_attr->cap = qp_attr->cap;
return 0;
}
......@@ -67,7 +67,7 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
static int msi = 0;
module_param(msi, int, 0444);
MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero");
MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero (deprecated, use MSI-X instead)");
#else /* CONFIG_PCI_MSI */
......@@ -1117,9 +1117,21 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
if (msi_x && !mthca_enable_msi_x(mdev))
mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
if (msi && !(mdev->mthca_flags & MTHCA_FLAG_MSI_X) &&
!pci_enable_msi(pdev))
mdev->mthca_flags |= MTHCA_FLAG_MSI;
else if (msi) {
static int warned;
if (!warned) {
printk(KERN_WARNING PFX "WARNING: MSI support will be "
"removed from the ib_mthca driver in January 2008.\n");
printk(KERN_WARNING " If you are using MSI and cannot "
"switch to MSI-X, please tell "
"<general@lists.openfabrics.org>.\n");
++warned;
}
if (!pci_enable_msi(pdev))
mdev->mthca_flags |= MTHCA_FLAG_MSI;
}
if (mthca_cmd_init(mdev)) {
mthca_err(mdev, "Failed to init command interface, aborting.\n");
......@@ -1135,7 +1147,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
goto err_cmd;
if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
mthca_warn(mdev, "HCA FW version %d.%d.%3d is old (%d.%d.%3d is current).\n",
mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n",
(int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
(int) (mdev->fw_ver & 0xffff),
(int) (mthca_hca_table[hca_type].latest_fw >> 32),
......
......@@ -1578,6 +1578,45 @@ static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
return cur + nreq >= wq->max;
}
static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
u64 remote_addr, u32 rkey)
{
rseg->raddr = cpu_to_be64(remote_addr);
rseg->rkey = cpu_to_be32(rkey);
rseg->reserved = 0;
}
static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
struct ib_send_wr *wr)
{
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
} else {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
aseg->compare = 0;
}
}
static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
struct ib_send_wr *wr)
{
useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
}
static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
struct ib_send_wr *wr)
{
memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
}
int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
......@@ -1590,8 +1629,15 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int nreq;
int i;
int size;
int size0 = 0;
u32 f0 = 0;
/*
* f0 and size0 are only used if nreq != 0, and they will
* always be initialized the first time through the main loop
* before nreq is incremented. So nreq cannot become non-zero
* without initializing f0 and size0, and they are in fact
* never used uninitialized.
*/
int uninitialized_var(size0);
u32 uninitialized_var(f0);
int ind;
u8 op0 = 0;
......@@ -1636,25 +1682,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.atomic.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.atomic.rkey);
((struct mthca_raddr_seg *) wqe)->reserved = 0;
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
wr->wr.atomic.rkey);
wqe += sizeof (struct mthca_raddr_seg);
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
((struct mthca_atomic_seg *) wqe)->swap_add =
cpu_to_be64(wr->wr.atomic.swap);
((struct mthca_atomic_seg *) wqe)->compare =
cpu_to_be64(wr->wr.atomic.compare_add);
} else {
((struct mthca_atomic_seg *) wqe)->swap_add =
cpu_to_be64(wr->wr.atomic.compare_add);
((struct mthca_atomic_seg *) wqe)->compare = 0;
}
set_atomic_seg(wqe, wr);
wqe += sizeof (struct mthca_atomic_seg);
size += (sizeof (struct mthca_raddr_seg) +
sizeof (struct mthca_atomic_seg)) / 16;
......@@ -1663,12 +1695,9 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
case IB_WR_RDMA_READ:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.rdma.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.rdma.rkey);
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
......@@ -1683,12 +1712,9 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.rdma.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.rdma.rkey);
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
......@@ -1700,16 +1726,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case UD:
((struct mthca_tavor_ud_seg *) wqe)->lkey =
cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
((struct mthca_tavor_ud_seg *) wqe)->av_addr =
cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
((struct mthca_tavor_ud_seg *) wqe)->dqpn =
cpu_to_be32(wr->wr.ud.remote_qpn);
((struct mthca_tavor_ud_seg *) wqe)->qkey =
cpu_to_be32(wr->wr.ud.remote_qkey);
wqe += sizeof (struct mthca_tavor_ud_seg);
set_tavor_ud_seg(wqe, wr);
wqe += sizeof (struct mthca_tavor_ud_seg);
size += sizeof (struct mthca_tavor_ud_seg) / 16;
break;
......@@ -1734,13 +1752,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
for (i = 0; i < wr->num_sge; ++i) {
((struct mthca_data_seg *) wqe)->byte_count =
cpu_to_be32(wr->sg_list[i].length);
((struct mthca_data_seg *) wqe)->lkey =
cpu_to_be32(wr->sg_list[i].lkey);
((struct mthca_data_seg *) wqe)->addr =
cpu_to_be64(wr->sg_list[i].addr);
wqe += sizeof (struct mthca_data_seg);
mthca_set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mthca_data_seg);
size += sizeof (struct mthca_data_seg) / 16;
}
......@@ -1768,11 +1781,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
mthca_opcode[wr->opcode]);
wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size |
cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size |
((wr->send_flags & IB_SEND_FENCE) ?
MTHCA_NEXT_FENCE : 0));
if (!size0) {
if (!nreq) {
size0 = size;
op0 = mthca_opcode[wr->opcode];
f0 = wr->send_flags & IB_SEND_FENCE ?
......@@ -1822,7 +1835,14 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
int nreq;
int i;
int size;
int size0 = 0;
/*
* size0 is only used if nreq != 0, and it will always be
* initialized the first time through the main loop before
* nreq is incremented. So nreq cannot become non-zero
* without initializing size0, and it is in fact never used
* uninitialized.
*/
int uninitialized_var(size0);
int ind;
void *wqe;
void *prev_wqe;
......@@ -1863,13 +1883,8 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
}
for (i = 0; i < wr->num_sge; ++i) {
((struct mthca_data_seg *) wqe)->byte_count =
cpu_to_be32(wr->sg_list[i].length);
((struct mthca_data_seg *) wqe)->lkey =
cpu_to_be32(wr->sg_list[i].lkey);
((struct mthca_data_seg *) wqe)->addr =
cpu_to_be64(wr->sg_list[i].addr);
wqe += sizeof (struct mthca_data_seg);
mthca_set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mthca_data_seg);
size += sizeof (struct mthca_data_seg) / 16;
}
......@@ -1881,7 +1896,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD | size);
if (!size0)
if (!nreq)
size0 = size;
++ind;
......@@ -1903,7 +1918,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qp->rq.next_ind = ind;
qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
size0 = 0;
}
}
......@@ -1945,8 +1959,15 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int nreq;
int i;
int size;
int size0 = 0;
u32 f0 = 0;
/*
* f0 and size0 are only used if nreq != 0, and they will
* always be initialized the first time through the main loop
* before nreq is incremented. So nreq cannot become non-zero
* without initializing f0 and size0, and they are in fact
* never used uninitialized.
*/
int uninitialized_var(size0);
u32 uninitialized_var(f0);
int ind;
u8 op0 = 0;
......@@ -1966,7 +1987,6 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
size0 = 0;
/*
* Make sure that descriptors are written before
......@@ -2017,26 +2037,12 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.atomic.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.atomic.rkey);
((struct mthca_raddr_seg *) wqe)->reserved = 0;
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
wr->wr.atomic.rkey);
wqe += sizeof (struct mthca_raddr_seg);
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
((struct mthca_atomic_seg *) wqe)->swap_add =
cpu_to_be64(wr->wr.atomic.swap);
((struct mthca_atomic_seg *) wqe)->compare =
cpu_to_be64(wr->wr.atomic.compare_add);
} else {
((struct mthca_atomic_seg *) wqe)->swap_add =
cpu_to_be64(wr->wr.atomic.compare_add);
((struct mthca_atomic_seg *) wqe)->compare = 0;
}
wqe += sizeof (struct mthca_atomic_seg);
set_atomic_seg(wqe, wr);
wqe += sizeof (struct mthca_atomic_seg);
size += (sizeof (struct mthca_raddr_seg) +
sizeof (struct mthca_atomic_seg)) / 16;
break;
......@@ -2044,12 +2050,9 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.rdma.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.rdma.rkey);
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
......@@ -2064,12 +2067,9 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.rdma.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.rdma.rkey);
((struct mthca_raddr_seg *) wqe)->reserved = 0;
wqe += sizeof (struct mthca_raddr_seg);
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
......@@ -2081,14 +2081,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case UD:
memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
((struct mthca_arbel_ud_seg *) wqe)->dqpn =
cpu_to_be32(wr->wr.ud.remote_qpn);
((struct mthca_arbel_ud_seg *) wqe)->qkey =
cpu_to_be32(wr->wr.ud.remote_qkey);
wqe += sizeof (struct mthca_arbel_ud_seg);
set_arbel_ud_seg(wqe, wr);
wqe += sizeof (struct mthca_arbel_ud_seg);
size += sizeof (struct mthca_arbel_ud_seg) / 16;
break;
......@@ -2113,13 +2107,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
for (i = 0; i < wr->num_sge; ++i) {
((struct mthca_data_seg *) wqe)->byte_count =
cpu_to_be32(wr->sg_list[i].length);
((struct mthca_data_seg *) wqe)->lkey =
cpu_to_be32(wr->sg_list[i].lkey);
((struct mthca_data_seg *) wqe)->addr =
cpu_to_be64(wr->sg_list[i].addr);
wqe += sizeof (struct mthca_data_seg);
mthca_set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mthca_data_seg);
size += sizeof (struct mthca_data_seg) / 16;
}
......@@ -2151,7 +2140,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
((wr->send_flags & IB_SEND_FENCE) ?
MTHCA_NEXT_FENCE : 0));
if (!size0) {
if (!nreq) {
size0 = size;
op0 = mthca_opcode[wr->opcode];
f0 = wr->send_flags & IB_SEND_FENCE ?
......@@ -2241,20 +2230,12 @@ int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
}
for (i = 0; i < wr->num_sge; ++i) {
((struct mthca_data_seg *) wqe)->byte_count =
cpu_to_be32(wr->sg_list[i].length);
((struct mthca_data_seg *) wqe)->lkey =
cpu_to_be32(wr->sg_list[i].lkey);
((struct mthca_data_seg *) wqe)->addr =
cpu_to_be64(wr->sg_list[i].addr);
mthca_set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mthca_data_seg);
}
if (i < qp->rq.max_gs) {
((struct mthca_data_seg *) wqe)->byte_count = 0;
((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
((struct mthca_data_seg *) wqe)->addr = 0;
}
if (i < qp->rq.max_gs)
mthca_set_data_seg_inval(wqe);
qp->wrid[ind] = wr->wr_id;
......
......@@ -543,20 +543,12 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
}
for (i = 0; i < wr->num_sge; ++i) {
((struct mthca_data_seg *) wqe)->byte_count =
cpu_to_be32(wr->sg_list[i].length);
((struct mthca_data_seg *) wqe)->lkey =
cpu_to_be32(wr->sg_list[i].lkey);
((struct mthca_data_seg *) wqe)->addr =
cpu_to_be64(wr->sg_list[i].addr);
mthca_set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mthca_data_seg);
}
if (i < srq->max_gs) {
((struct mthca_data_seg *) wqe)->byte_count = 0;
((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
((struct mthca_data_seg *) wqe)->addr = 0;
}
if (i < srq->max_gs)
mthca_set_data_seg_inval(wqe);
((struct mthca_next_seg *) prev_wqe)->nda_op =
cpu_to_be32((ind << srq->wqe_shift) | 1);
......@@ -662,20 +654,12 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
}
for (i = 0; i < wr->num_sge; ++i) {
((struct mthca_data_seg *) wqe)->byte_count =
cpu_to_be32(wr->sg_list[i].length);
((struct mthca_data_seg *) wqe)->lkey =
cpu_to_be32(wr->sg_list[i].lkey);
((struct mthca_data_seg *) wqe)->addr =
cpu_to_be64(wr->sg_list[i].addr);
mthca_set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mthca_data_seg);
}
if (i < srq->max_gs) {
((struct mthca_data_seg *) wqe)->byte_count = 0;
((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
((struct mthca_data_seg *) wqe)->addr = 0;
}
if (i < srq->max_gs)
mthca_set_data_seg_inval(wqe);
srq->wrid[ind] = wr->wr_id;
srq->first_free = next_ind;
......
......@@ -113,4 +113,19 @@ struct mthca_mlx_seg {
__be16 vcrc;
};
static __always_inline void mthca_set_data_seg(struct mthca_data_seg *dseg,
struct ib_sge *sg)
{
dseg->byte_count = cpu_to_be32(sg->length);
dseg->lkey = cpu_to_be32(sg->lkey);
dseg->addr = cpu_to_be64(sg->addr);
}
static __always_inline void mthca_set_data_seg_inval(struct mthca_data_seg *dseg)
{
dseg->byte_count = 0;
dseg->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
dseg->addr = 0;
}
#endif /* MTHCA_WQE_H */
......@@ -310,8 +310,6 @@ int iser_conn_init(struct iser_conn **ib_conn);
void iser_conn_terminate(struct iser_conn *ib_conn);
void iser_conn_release(struct iser_conn *ib_conn);
void iser_rcv_completion(struct iser_desc *desc,
unsigned long dto_xfer_len);
......@@ -329,9 +327,6 @@ void iser_reg_single(struct iser_device *device,
struct iser_regd_buf *regd_buf,
enum dma_data_direction direction);
int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
enum iser_data_dir cmd_dir);
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
enum iser_data_dir cmd_dir);
......
......@@ -103,8 +103,8 @@ void iser_reg_single(struct iser_device *device,
/**
* iser_start_rdma_unaligned_sg
*/
int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
enum iser_data_dir cmd_dir)
static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
enum iser_data_dir cmd_dir)
{
int dma_nents;
struct ib_device *dev;
......
......@@ -310,6 +310,29 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
return ret;
}
/**
* Frees all conn objects and deallocs conn descriptor
*/
static void iser_conn_release(struct iser_conn *ib_conn)
{
struct iser_device *device = ib_conn->device;
BUG_ON(ib_conn->state != ISER_CONN_DOWN);
mutex_lock(&ig.connlist_mutex);
list_del(&ib_conn->conn_list);
mutex_unlock(&ig.connlist_mutex);
iser_free_ib_conn_res(ib_conn);
ib_conn->device = NULL;
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
if (device != NULL)
iser_device_try_release(device);
if (ib_conn->iser_conn)
ib_conn->iser_conn->ib_conn = NULL;
kfree(ib_conn);
}
/**
* triggers start of the disconnect procedures and wait for them to be done
*/
......@@ -549,30 +572,6 @@ int iser_connect(struct iser_conn *ib_conn,
return err;
}
/**
* Frees all conn objects and deallocs conn descriptor
*/
void iser_conn_release(struct iser_conn *ib_conn)
{
struct iser_device *device = ib_conn->device;
BUG_ON(ib_conn->state != ISER_CONN_DOWN);
mutex_lock(&ig.connlist_mutex);
list_del(&ib_conn->conn_list);
mutex_unlock(&ig.connlist_mutex);
iser_free_ib_conn_res(ib_conn);
ib_conn->device = NULL;
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
if (device != NULL)
iser_device_try_release(device);
if (ib_conn->iser_conn)
ib_conn->iser_conn->ib_conn = NULL;
kfree(ib_conn);
}
/**
* iser_reg_page_vec - Register physical memory
*
......
......@@ -30,41 +30,133 @@
* SOFTWARE.
*/
#include <linux/workqueue.h>
#include "mlx4.h"
void mlx4_handle_catas_err(struct mlx4_dev *dev)
enum {
MLX4_CATAS_POLL_INTERVAL = 5 * HZ,
};
static DEFINE_SPINLOCK(catas_lock);
static LIST_HEAD(catas_list);
static struct workqueue_struct *catas_wq;
static struct work_struct catas_work;
static int internal_err_reset = 1;
module_param(internal_err_reset, int, 0644);
MODULE_PARM_DESC(internal_err_reset,
"Reset device on internal errors if non-zero (default 1)");
static void dump_err_buf(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
mlx4_err(dev, "Catastrophic error detected:\n");
mlx4_err(dev, "Internal error detected:\n");
for (i = 0; i < priv->fw.catas_size; ++i)
mlx4_err(dev, " buf[%02x]: %08x\n",
i, swab32(readl(priv->catas_err.map + i)));
}
mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
static void poll_catas(unsigned long dev_ptr)
{
struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
struct mlx4_priv *priv = mlx4_priv(dev);
if (readl(priv->catas_err.map)) {
dump_err_buf(dev);
mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
if (internal_err_reset) {
spin_lock(&catas_lock);
list_add(&priv->catas_err.list, &catas_list);
spin_unlock(&catas_lock);
queue_work(catas_wq, &catas_work);
}
} else
mod_timer(&priv->catas_err.timer,
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
}
void mlx4_map_catas_buf(struct mlx4_dev *dev)
static void catas_reset(struct work_struct *work)
{
struct mlx4_priv *priv, *tmppriv;
struct mlx4_dev *dev;
LIST_HEAD(tlist);
int ret;
spin_lock_irq(&catas_lock);
list_splice_init(&catas_list, &tlist);
spin_unlock_irq(&catas_lock);
list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
ret = mlx4_restart_one(priv->dev.pdev);
dev = &priv->dev;
if (ret)
mlx4_err(dev, "Reset failed (%d)\n", ret);
else
mlx4_dbg(dev, "Reset succeeded\n");
}
}
void mlx4_start_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
unsigned long addr;
INIT_LIST_HEAD(&priv->catas_err.list);
init_timer(&priv->catas_err.timer);
priv->catas_err.map = NULL;
addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
priv->fw.catas_offset;
priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
if (!priv->catas_err.map)
mlx4_warn(dev, "Failed to map catastrophic error buffer at 0x%lx\n",
if (!priv->catas_err.map) {
mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n",
addr);
return;
}
priv->catas_err.timer.data = (unsigned long) dev;
priv->catas_err.timer.function = poll_catas;
priv->catas_err.timer.expires =
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
add_timer(&priv->catas_err.timer);
}
void mlx4_unmap_catas_buf(struct mlx4_dev *dev)
void mlx4_stop_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
del_timer_sync(&priv->catas_err.timer);
if (priv->catas_err.map)
iounmap(priv->catas_err.map);
spin_lock_irq(&catas_lock);
list_del(&priv->catas_err.list);
spin_unlock_irq(&catas_lock);
}
int __init mlx4_catas_init(void)
{
INIT_WORK(&catas_work, catas_reset);
catas_wq = create_singlethread_workqueue("mlx4_err");
if (!catas_wq)
return -ENOMEM;
return 0;
}
void mlx4_catas_cleanup(void)
{
destroy_workqueue(catas_wq);
}
......@@ -89,14 +89,12 @@ struct mlx4_eq_context {
(1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
(1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
(1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
(1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
(1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
(1ull << MLX4_EVENT_TYPE_CMD))
#define MLX4_CATAS_EVENT_MASK (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR)
struct mlx4_eqe {
u8 reserved1;
......@@ -264,7 +262,7 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
for (i = 0; i < MLX4_EQ_CATAS; ++i)
for (i = 0; i < MLX4_NUM_EQ; ++i)
work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
return IRQ_RETVAL(work);
......@@ -281,14 +279,6 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
return IRQ_HANDLED;
}
static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr)
{
mlx4_handle_catas_err(dev_ptr);
/* MSI-X vectors always belong to us */
return IRQ_HANDLED;
}
static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
int eq_num)
{
......@@ -490,11 +480,9 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
if (eq_table->have_irq)
free_irq(dev->pdev->irq, dev);
for (i = 0; i < MLX4_EQ_CATAS; ++i)
for (i = 0; i < MLX4_NUM_EQ; ++i)
if (eq_table->eq[i].have_irq)
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
if (eq_table->eq[MLX4_EQ_CATAS].have_irq)
free_irq(eq_table->eq[MLX4_EQ_CATAS].irq, dev);
}
static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev)
......@@ -598,32 +586,19 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev)
if (dev->flags & MLX4_FLAG_MSI_X) {
static const char *eq_name[] = {
[MLX4_EQ_COMP] = DRV_NAME " (comp)",
[MLX4_EQ_ASYNC] = DRV_NAME " (async)",
[MLX4_EQ_CATAS] = DRV_NAME " (catas)"
[MLX4_EQ_ASYNC] = DRV_NAME " (async)"
};
err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS,
&priv->eq_table.eq[MLX4_EQ_CATAS]);
if (err)
goto err_out_async;
for (i = 0; i < MLX4_EQ_CATAS; ++i) {
for (i = 0; i < MLX4_NUM_EQ; ++i) {
err = request_irq(priv->eq_table.eq[i].irq,
mlx4_msi_x_interrupt,
0, eq_name[i], priv->eq_table.eq + i);
if (err)
goto err_out_catas;
goto err_out_async;
priv->eq_table.eq[i].have_irq = 1;
}
err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq,
mlx4_catas_interrupt, 0,
eq_name[MLX4_EQ_CATAS], dev);
if (err)
goto err_out_catas;
priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1;
} else {
err = request_irq(dev->pdev->irq, mlx4_interrupt,
IRQF_SHARED, DRV_NAME, dev);
......@@ -639,22 +614,11 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
for (i = 0; i < MLX4_EQ_CATAS; ++i)
for (i = 0; i < MLX4_NUM_EQ; ++i)
eq_set_ci(&priv->eq_table.eq[i], 1);
if (dev->flags & MLX4_FLAG_MSI_X) {
err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0,
priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
if (err)
mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n",
priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err);
}
return 0;
err_out_catas:
mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
err_out_async:
mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
......@@ -675,19 +639,13 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
if (dev->flags & MLX4_FLAG_MSI_X)
mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1,
priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
mlx4_free_irqs(dev);
for (i = 0; i < MLX4_EQ_CATAS; ++i)
for (i = 0; i < MLX4_NUM_EQ; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
if (dev->flags & MLX4_FLAG_MSI_X)
mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
mlx4_unmap_clr_int(dev);
......
......@@ -142,6 +142,7 @@ int mlx4_register_device(struct mlx4_dev *dev)
mlx4_add_device(intf, priv);
mutex_unlock(&intf_mutex);
mlx4_start_catas_poll(dev);
return 0;
}
......@@ -151,6 +152,7 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_interface *intf;
mlx4_stop_catas_poll(dev);
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
......
......@@ -78,7 +78,7 @@ static const char mlx4_version[] __devinitdata =
static struct mlx4_profile default_profile = {
.num_qp = 1 << 16,
.num_srq = 1 << 16,
.rdmarc_per_qp = 4,
.rdmarc_per_qp = 1 << 4,
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
.num_mpt = 1 << 17,
......@@ -583,13 +583,11 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev)
goto err_pd_table_free;
}
mlx4_map_catas_buf(dev);
err = mlx4_init_eq_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"event queue table, aborting.\n");
goto err_catas_buf;
goto err_mr_table_free;
}
err = mlx4_cmd_use_events(dev);
......@@ -659,8 +657,7 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev)
err_eq_table_free:
mlx4_cleanup_eq_table(dev);
err_catas_buf:
mlx4_unmap_catas_buf(dev);
err_mr_table_free:
mlx4_cleanup_mr_table(dev);
err_pd_table_free:
......@@ -836,9 +833,6 @@ static int __devinit mlx4_init_one(struct pci_dev *pdev,
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
mlx4_unmap_catas_buf(dev);
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_pd_table(dev);
mlx4_cleanup_uar_table(dev);
......@@ -885,9 +879,6 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev)
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
mlx4_unmap_catas_buf(dev);
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_pd_table(dev);
......@@ -908,6 +899,12 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev)
}
}
int mlx4_restart_one(struct pci_dev *pdev)
{
mlx4_remove_one(pdev);
return mlx4_init_one(pdev, NULL);
}
static struct pci_device_id mlx4_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
......@@ -930,6 +927,10 @@ static int __init mlx4_init(void)
{
int ret;
ret = mlx4_catas_init();
if (ret)
return ret;
ret = pci_register_driver(&mlx4_driver);
return ret < 0 ? ret : 0;
}
......@@ -937,6 +938,7 @@ static int __init mlx4_init(void)
static void __exit mlx4_cleanup(void)
{
pci_unregister_driver(&mlx4_driver);
mlx4_catas_cleanup();
}
module_init(mlx4_init);
......
......@@ -39,6 +39,7 @@
#include <linux/mutex.h>
#include <linux/radix-tree.h>
#include <linux/timer.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
......@@ -67,7 +68,6 @@ enum {
enum {
MLX4_EQ_ASYNC,
MLX4_EQ_COMP,
MLX4_EQ_CATAS,
MLX4_NUM_EQ
};
......@@ -248,7 +248,8 @@ struct mlx4_mcg_table {
struct mlx4_catas_err {
u32 __iomem *map;
int size;
struct timer_list timer;
struct list_head list;
};
struct mlx4_priv {
......@@ -311,9 +312,11 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
void mlx4_map_catas_buf(struct mlx4_dev *dev);
void mlx4_unmap_catas_buf(struct mlx4_dev *dev);
void mlx4_start_catas_poll(struct mlx4_dev *dev);
void mlx4_stop_catas_poll(struct mlx4_dev *dev);
int mlx4_catas_init(void);
void mlx4_catas_cleanup(void);
int mlx4_restart_one(struct pci_dev *pdev);
int mlx4_register_device(struct mlx4_dev *dev);
void mlx4_unregister_device(struct mlx4_dev *dev);
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册