提交 f696bf6d 编写于 作者: B Bart Van Assche 提交者: Jason Gunthorpe

RDMA: Constify the argument of the work request conversion functions

When posting a send work request, the work request that is posted is not
modified by any of the RDMA drivers. Make this explicit by constifying
most ib_send_wr pointers in RDMA transport drivers.
Signed-off-by: NBart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: NSagi Grimberg <sagi@grimberg.me>
Reviewed-by: NSteve Wise <swise@opengridcomputing.com>
Reviewed-by: NDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: NJason Gunthorpe <jgg@mellanox.com>
上级 3e081b77
...@@ -1876,7 +1876,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, ...@@ -1876,7 +1876,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
/* Routine for sending QP1 packets for RoCE V1 an V2 /* Routine for sending QP1 packets for RoCE V1 an V2
*/ */
static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp, static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
struct ib_send_wr *wr, const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe, struct bnxt_qplib_swqe *wqe,
int payload_size) int payload_size)
{ {
...@@ -2093,7 +2093,7 @@ static int is_ud_qp(struct bnxt_re_qp *qp) ...@@ -2093,7 +2093,7 @@ static int is_ud_qp(struct bnxt_re_qp *qp)
} }
static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp, static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
struct ib_send_wr *wr, const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe) struct bnxt_qplib_swqe *wqe)
{ {
struct bnxt_re_ah *ah = NULL; struct bnxt_re_ah *ah = NULL;
...@@ -2131,7 +2131,7 @@ static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp, ...@@ -2131,7 +2131,7 @@ static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
return 0; return 0;
} }
static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr, static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe) struct bnxt_qplib_swqe *wqe)
{ {
switch (wr->opcode) { switch (wr->opcode) {
...@@ -2163,7 +2163,7 @@ static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr, ...@@ -2163,7 +2163,7 @@ static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
return 0; return 0;
} }
static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr, static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe) struct bnxt_qplib_swqe *wqe)
{ {
switch (wr->opcode) { switch (wr->opcode) {
...@@ -2190,7 +2190,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr, ...@@ -2190,7 +2190,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
return 0; return 0;
} }
static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr, static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe) struct bnxt_qplib_swqe *wqe)
{ {
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
...@@ -2209,7 +2209,7 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr, ...@@ -2209,7 +2209,7 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
return 0; return 0;
} }
static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr, static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
struct bnxt_qplib_swqe *wqe) struct bnxt_qplib_swqe *wqe)
{ {
struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr); struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
...@@ -2251,7 +2251,7 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr, ...@@ -2251,7 +2251,7 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
} }
static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev, static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
struct ib_send_wr *wr, const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe) struct bnxt_qplib_swqe *wqe)
{ {
/* Copy the inline data to the data field */ /* Copy the inline data to the data field */
...@@ -2281,7 +2281,7 @@ static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev, ...@@ -2281,7 +2281,7 @@ static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
} }
static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev, static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
struct ib_send_wr *wr, const struct ib_send_wr *wr,
struct bnxt_qplib_swqe *wqe) struct bnxt_qplib_swqe *wqe)
{ {
int payload_sz = 0; int payload_sz = 0;
...@@ -2313,7 +2313,7 @@ static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) ...@@ -2313,7 +2313,7 @@ static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp, struct bnxt_re_qp *qp,
struct ib_send_wr *wr) const struct ib_send_wr *wr)
{ {
struct bnxt_qplib_swqe wqe; struct bnxt_qplib_swqe wqe;
int rc = 0, payload_sz = 0; int rc = 0, payload_sz = 0;
......
...@@ -39,8 +39,8 @@ ...@@ -39,8 +39,8 @@
#define NO_SUPPORT -1 #define NO_SUPPORT -1
static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, static int build_rdma_send(union t3_wr *wqe, const struct ib_send_wr *wr,
u8 * flit_cnt) u8 *flit_cnt)
{ {
int i; int i;
u32 plen; u32 plen;
...@@ -84,8 +84,8 @@ static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, ...@@ -84,8 +84,8 @@ static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
return 0; return 0;
} }
static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, static int build_rdma_write(union t3_wr *wqe, const struct ib_send_wr *wr,
u8 *flit_cnt) u8 *flit_cnt)
{ {
int i; int i;
u32 plen; u32 plen;
...@@ -125,8 +125,8 @@ static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, ...@@ -125,8 +125,8 @@ static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
return 0; return 0;
} }
static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, static int build_rdma_read(union t3_wr *wqe, const struct ib_send_wr *wr,
u8 *flit_cnt) u8 *flit_cnt)
{ {
if (wr->num_sge > 1) if (wr->num_sge > 1)
return -EINVAL; return -EINVAL;
...@@ -146,8 +146,8 @@ static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, ...@@ -146,8 +146,8 @@ static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
return 0; return 0;
} }
static int build_memreg(union t3_wr *wqe, struct ib_reg_wr *wr, static int build_memreg(union t3_wr *wqe, const struct ib_reg_wr *wr,
u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
{ {
struct iwch_mr *mhp = to_iwch_mr(wr->mr); struct iwch_mr *mhp = to_iwch_mr(wr->mr);
int i; int i;
...@@ -189,8 +189,8 @@ static int build_memreg(union t3_wr *wqe, struct ib_reg_wr *wr, ...@@ -189,8 +189,8 @@ static int build_memreg(union t3_wr *wqe, struct ib_reg_wr *wr,
return 0; return 0;
} }
static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr, static int build_inv_stag(union t3_wr *wqe, const struct ib_send_wr *wr,
u8 *flit_cnt) u8 *flit_cnt)
{ {
wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey); wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
wqe->local_inv.reserved = 0; wqe->local_inv.reserved = 0;
......
...@@ -410,7 +410,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, ...@@ -410,7 +410,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
} }
static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
struct ib_send_wr *wr, int max, u32 *plenp) const struct ib_send_wr *wr, int max, u32 *plenp)
{ {
u8 *dstp, *srcp; u8 *dstp, *srcp;
u32 plen = 0; u32 plen = 0;
...@@ -480,7 +480,7 @@ static int build_isgl(__be64 *queue_start, __be64 *queue_end, ...@@ -480,7 +480,7 @@ static int build_isgl(__be64 *queue_start, __be64 *queue_end,
} }
static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
struct ib_send_wr *wr, u8 *len16) const struct ib_send_wr *wr, u8 *len16)
{ {
u32 plen; u32 plen;
int size; int size;
...@@ -547,7 +547,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, ...@@ -547,7 +547,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
} }
static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
struct ib_send_wr *wr, u8 *len16) const struct ib_send_wr *wr, u8 *len16)
{ {
u32 plen; u32 plen;
int size; int size;
...@@ -589,7 +589,8 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, ...@@ -589,7 +589,8 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
return 0; return 0;
} }
static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr,
u8 *len16)
{ {
if (wr->num_sge > 1) if (wr->num_sge > 1)
return -EINVAL; return -EINVAL;
...@@ -648,7 +649,7 @@ static int build_srq_recv(union t4_recv_wr *wqe, struct ib_recv_wr *wr, ...@@ -648,7 +649,7 @@ static int build_srq_recv(union t4_recv_wr *wqe, struct ib_recv_wr *wr,
} }
static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr, static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
struct ib_reg_wr *wr, struct c4iw_mr *mhp, const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
u8 *len16) u8 *len16)
{ {
__be64 *p = (__be64 *)fr->pbl; __be64 *p = (__be64 *)fr->pbl;
...@@ -680,8 +681,8 @@ static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr, ...@@ -680,8 +681,8 @@ static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
} }
static int build_memreg(struct t4_sq *sq, union t4_wr *wqe, static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16, const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
bool dsgl_supported) u8 *len16, bool dsgl_supported)
{ {
struct fw_ri_immd *imdp; struct fw_ri_immd *imdp;
__be64 *p; __be64 *p;
...@@ -743,7 +744,8 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe, ...@@ -743,7 +744,8 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
return 0; return 0;
} }
static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
u8 *len16)
{ {
wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
wqe->inv.r2 = 0; wqe->inv.r2 = 0;
...@@ -862,7 +864,8 @@ static int ib_to_fw_opcode(int ib_opcode) ...@@ -862,7 +864,8 @@ static int ib_to_fw_opcode(int ib_opcode)
return opcode; return opcode;
} }
static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) static int complete_sq_drain_wr(struct c4iw_qp *qhp,
const struct ib_send_wr *wr)
{ {
struct t4_cqe cqe = {}; struct t4_cqe cqe = {};
struct c4iw_cq *schp; struct c4iw_cq *schp;
......
...@@ -985,7 +985,7 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); ...@@ -985,7 +985,7 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
int cnt); int cnt);
__be32 send_ieth(struct ib_send_wr *wr); __be32 send_ieth(const struct ib_send_wr *wr);
int to_hr_qp_type(int qp_type); int to_hr_qp_type(int qp_type);
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
......
...@@ -53,7 +53,7 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg, ...@@ -53,7 +53,7 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length); dseg->len = cpu_to_le32(sg->length);
} }
static void set_extend_sge(struct hns_roce_qp *qp, struct ib_send_wr *wr, static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
unsigned int *sge_ind) unsigned int *sge_ind)
{ {
struct hns_roce_v2_wqe_data_seg *dseg; struct hns_roce_v2_wqe_data_seg *dseg;
...@@ -100,7 +100,7 @@ static void set_extend_sge(struct hns_roce_qp *qp, struct ib_send_wr *wr, ...@@ -100,7 +100,7 @@ static void set_extend_sge(struct hns_roce_qp *qp, struct ib_send_wr *wr,
} }
} }
static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr, static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
void *wqe, unsigned int *sge_ind, void *wqe, unsigned int *sge_ind,
struct ib_send_wr **bad_wr) struct ib_send_wr **bad_wr)
......
...@@ -2925,7 +2925,7 @@ static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey) ...@@ -2925,7 +2925,7 @@ static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
} }
static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
struct ib_ud_wr *wr, const struct ib_ud_wr *wr,
void *wqe, unsigned *mlx_seg_len) void *wqe, unsigned *mlx_seg_len)
{ {
struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
...@@ -3073,7 +3073,7 @@ static int fill_gid_by_hw_index(struct mlx4_ib_dev *ibdev, u8 port_num, ...@@ -3073,7 +3073,7 @@ static int fill_gid_by_hw_index(struct mlx4_ib_dev *ibdev, u8 port_num,
} }
#define MLX4_ROCEV2_QP1_SPORT 0xC000 #define MLX4_ROCEV2_QP1_SPORT 0xC000
static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
void *wqe, unsigned *mlx_seg_len) void *wqe, unsigned *mlx_seg_len)
{ {
struct ib_device *ib_dev = sqp->qp.ibqp.device; struct ib_device *ib_dev = sqp->qp.ibqp.device;
...@@ -3355,7 +3355,7 @@ static __be32 convert_access(int acc) ...@@ -3355,7 +3355,7 @@ static __be32 convert_access(int acc)
} }
static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg, static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg,
struct ib_reg_wr *wr) const struct ib_reg_wr *wr)
{ {
struct mlx4_ib_mr *mr = to_mmr(wr->mr); struct mlx4_ib_mr *mr = to_mmr(wr->mr);
...@@ -3385,7 +3385,7 @@ static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, ...@@ -3385,7 +3385,7 @@ static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
} }
static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
struct ib_atomic_wr *wr) const struct ib_atomic_wr *wr)
{ {
if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->swap_add = cpu_to_be64(wr->swap); aseg->swap_add = cpu_to_be64(wr->swap);
...@@ -3401,7 +3401,7 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, ...@@ -3401,7 +3401,7 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
} }
static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
struct ib_atomic_wr *wr) const struct ib_atomic_wr *wr)
{ {
aseg->swap_add = cpu_to_be64(wr->swap); aseg->swap_add = cpu_to_be64(wr->swap);
aseg->swap_add_mask = cpu_to_be64(wr->swap_mask); aseg->swap_add_mask = cpu_to_be64(wr->swap_mask);
...@@ -3410,7 +3410,7 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, ...@@ -3410,7 +3410,7 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
} }
static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
struct ib_ud_wr *wr) const struct ib_ud_wr *wr)
{ {
memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av)); memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av));
dseg->dqpn = cpu_to_be32(wr->remote_qpn); dseg->dqpn = cpu_to_be32(wr->remote_qpn);
...@@ -3421,7 +3421,7 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, ...@@ -3421,7 +3421,7 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
struct mlx4_wqe_datagram_seg *dseg, struct mlx4_wqe_datagram_seg *dseg,
struct ib_ud_wr *wr, const struct ib_ud_wr *wr,
enum mlx4_ib_qp_type qpt) enum mlx4_ib_qp_type qpt)
{ {
union mlx4_ext_av *av = &to_mah(wr->ah)->av; union mlx4_ext_av *av = &to_mah(wr->ah)->av;
...@@ -3443,7 +3443,8 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, ...@@ -3443,7 +3443,8 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY); dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
} }
static void build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) static void build_tunnel_header(const struct ib_ud_wr *wr, void *wqe,
unsigned *mlx_seg_len)
{ {
struct mlx4_wqe_inline_seg *inl = wqe; struct mlx4_wqe_inline_seg *inl = wqe;
struct mlx4_ib_tunnel_header hdr; struct mlx4_ib_tunnel_header hdr;
...@@ -3526,9 +3527,9 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) ...@@ -3526,9 +3527,9 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
dseg->addr = cpu_to_be64(sg->addr); dseg->addr = cpu_to_be64(sg->addr);
} }
static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr, static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe,
struct mlx4_ib_qp *qp, unsigned *lso_seg_len, const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp,
__be32 *lso_hdr_sz, __be32 *blh) unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh)
{ {
unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16); unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16);
...@@ -3546,7 +3547,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr, ...@@ -3546,7 +3547,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr,
return 0; return 0;
} }
static __be32 send_ieth(struct ib_send_wr *wr) static __be32 send_ieth(const struct ib_send_wr *wr)
{ {
switch (wr->opcode) { switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM: case IB_WR_SEND_WITH_IMM:
......
...@@ -469,7 +469,7 @@ struct mlx5_umr_wr { ...@@ -469,7 +469,7 @@ struct mlx5_umr_wr {
u32 mkey; u32 mkey;
}; };
static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr) static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
{ {
return container_of(wr, struct mlx5_umr_wr, wr); return container_of(wr, struct mlx5_umr_wr, wr);
} }
......
...@@ -3508,7 +3508,7 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg, ...@@ -3508,7 +3508,7 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
} }
static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg, static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
struct ib_send_wr *wr, void *qend, const struct ib_send_wr *wr, void *qend,
struct mlx5_ib_qp *qp, int *size) struct mlx5_ib_qp *qp, int *size)
{ {
void *seg = eseg; void *seg = eseg;
...@@ -3561,7 +3561,7 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg, ...@@ -3561,7 +3561,7 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
} }
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
struct ib_send_wr *wr) const struct ib_send_wr *wr)
{ {
memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
...@@ -3709,9 +3709,9 @@ static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask) ...@@ -3709,9 +3709,9 @@ static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
static int set_reg_umr_segment(struct mlx5_ib_dev *dev, static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
struct mlx5_wqe_umr_ctrl_seg *umr, struct mlx5_wqe_umr_ctrl_seg *umr,
struct ib_send_wr *wr, int atomic) const struct ib_send_wr *wr, int atomic)
{ {
struct mlx5_umr_wr *umrwr = umr_wr(wr); const struct mlx5_umr_wr *umrwr = umr_wr(wr);
memset(umr, 0, sizeof(*umr)); memset(umr, 0, sizeof(*umr));
...@@ -3782,9 +3782,10 @@ static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg) ...@@ -3782,9 +3782,10 @@ static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
seg->status = MLX5_MKEY_STATUS_FREE; seg->status = MLX5_MKEY_STATUS_FREE;
} }
static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
const struct ib_send_wr *wr)
{ {
struct mlx5_umr_wr *umrwr = umr_wr(wr); const struct mlx5_umr_wr *umrwr = umr_wr(wr);
memset(seg, 0, sizeof(*seg)); memset(seg, 0, sizeof(*seg));
if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
...@@ -3833,7 +3834,7 @@ static void set_reg_umr_inline_seg(void *seg, struct mlx5_ib_qp *qp, ...@@ -3833,7 +3834,7 @@ static void set_reg_umr_inline_seg(void *seg, struct mlx5_ib_qp *qp,
seg += mr_list_size; seg += mr_list_size;
} }
static __be32 send_ieth(struct ib_send_wr *wr) static __be32 send_ieth(const struct ib_send_wr *wr)
{ {
switch (wr->opcode) { switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM: case IB_WR_SEND_WITH_IMM:
...@@ -3865,7 +3866,7 @@ static u8 wq_sig(void *wqe) ...@@ -3865,7 +3866,7 @@ static u8 wq_sig(void *wqe)
return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4); return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
} }
static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
void *wqe, int *sz) void *wqe, int *sz)
{ {
struct mlx5_wqe_inline_seg *seg; struct mlx5_wqe_inline_seg *seg;
...@@ -4011,7 +4012,7 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr, ...@@ -4011,7 +4012,7 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
return 0; return 0;
} }
static int set_sig_data_segment(struct ib_sig_handover_wr *wr, static int set_sig_data_segment(const struct ib_sig_handover_wr *wr,
struct mlx5_ib_qp *qp, void **seg, int *size) struct mlx5_ib_qp *qp, void **seg, int *size)
{ {
struct ib_sig_attrs *sig_attrs = wr->sig_attrs; struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
...@@ -4113,7 +4114,7 @@ static int set_sig_data_segment(struct ib_sig_handover_wr *wr, ...@@ -4113,7 +4114,7 @@ static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
} }
static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
struct ib_sig_handover_wr *wr, u32 size, const struct ib_sig_handover_wr *wr, u32 size,
u32 length, u32 pdn) u32 length, u32 pdn)
{ {
struct ib_mr *sig_mr = wr->sig_mr; struct ib_mr *sig_mr = wr->sig_mr;
...@@ -4144,10 +4145,10 @@ static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, ...@@ -4144,10 +4145,10 @@ static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
} }
static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp, static int set_sig_umr_wr(const struct ib_send_wr *send_wr,
void **seg, int *size) struct mlx5_ib_qp *qp, void **seg, int *size)
{ {
struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr); const struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr); struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
u32 pdn = get_pd(qp)->pdn; u32 pdn = get_pd(qp)->pdn;
u32 xlt_size; u32 xlt_size;
...@@ -4222,7 +4223,7 @@ static int set_psv_wr(struct ib_sig_domain *domain, ...@@ -4222,7 +4223,7 @@ static int set_psv_wr(struct ib_sig_domain *domain,
} }
static int set_reg_wr(struct mlx5_ib_qp *qp, static int set_reg_wr(struct mlx5_ib_qp *qp,
struct ib_reg_wr *wr, const struct ib_reg_wr *wr,
void **seg, int *size) void **seg, int *size)
{ {
struct mlx5_ib_mr *mr = to_mmr(wr->mr); struct mlx5_ib_mr *mr = to_mmr(wr->mr);
...@@ -4295,7 +4296,7 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) ...@@ -4295,7 +4296,7 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
struct mlx5_wqe_ctrl_seg **ctrl, struct mlx5_wqe_ctrl_seg **ctrl,
struct ib_send_wr *wr, unsigned *idx, const struct ib_send_wr *wr, unsigned *idx,
int *size, int nreq) int *size, int nreq)
{ {
if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
......
...@@ -1488,7 +1488,7 @@ void mthca_free_qp(struct mthca_dev *dev, ...@@ -1488,7 +1488,7 @@ void mthca_free_qp(struct mthca_dev *dev,
/* Create UD header for an MLX send and build a data segment for it */ /* Create UD header for an MLX send and build a data segment for it */
static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
int ind, struct ib_ud_wr *wr, int ind, const struct ib_ud_wr *wr,
struct mthca_mlx_seg *mlx, struct mthca_mlx_seg *mlx,
struct mthca_data_seg *data) struct mthca_data_seg *data)
{ {
...@@ -1581,7 +1581,7 @@ static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg, ...@@ -1581,7 +1581,7 @@ static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
} }
static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
struct ib_atomic_wr *wr) const struct ib_atomic_wr *wr)
{ {
if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->swap_add = cpu_to_be64(wr->swap); aseg->swap_add = cpu_to_be64(wr->swap);
...@@ -1594,7 +1594,7 @@ static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, ...@@ -1594,7 +1594,7 @@ static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
} }
static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
struct ib_ud_wr *wr) const struct ib_ud_wr *wr)
{ {
useg->lkey = cpu_to_be32(to_mah(wr->ah)->key); useg->lkey = cpu_to_be32(to_mah(wr->ah)->key);
useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma); useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma);
...@@ -1604,7 +1604,7 @@ static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, ...@@ -1604,7 +1604,7 @@ static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
} }
static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
struct ib_ud_wr *wr) const struct ib_ud_wr *wr)
{ {
memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE); memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE);
useg->dqpn = cpu_to_be32(wr->remote_qpn); useg->dqpn = cpu_to_be32(wr->remote_qpn);
......
...@@ -3040,7 +3040,8 @@ static int nes_process_mad(struct ib_device *ibdev, int mad_flags, ...@@ -3040,7 +3040,8 @@ static int nes_process_mad(struct ib_device *ibdev, int mad_flags,
} }
static inline void static inline void
fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, struct ib_send_wr *ib_wr, u32 uselkey) fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, const struct ib_send_wr *ib_wr,
u32 uselkey)
{ {
int sge_index; int sge_index;
int total_payload_length = 0; int total_payload_length = 0;
......
...@@ -1953,7 +1953,7 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq) ...@@ -1953,7 +1953,7 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq)
/* unprivileged verbs and their support functions. */ /* unprivileged verbs and their support functions. */
static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
struct ocrdma_hdr_wqe *hdr, struct ocrdma_hdr_wqe *hdr,
struct ib_send_wr *wr) const struct ib_send_wr *wr)
{ {
struct ocrdma_ewqe_ud_hdr *ud_hdr = struct ocrdma_ewqe_ud_hdr *ud_hdr =
(struct ocrdma_ewqe_ud_hdr *)(hdr + 1); (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
...@@ -2000,7 +2000,7 @@ static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge) ...@@ -2000,7 +2000,7 @@ static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
struct ocrdma_hdr_wqe *hdr, struct ocrdma_hdr_wqe *hdr,
struct ocrdma_sge *sge, struct ocrdma_sge *sge,
struct ib_send_wr *wr, u32 wqe_size) const struct ib_send_wr *wr, u32 wqe_size)
{ {
int i; int i;
char *dpp_addr; char *dpp_addr;
...@@ -2038,7 +2038,7 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, ...@@ -2038,7 +2038,7 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
} }
static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
struct ib_send_wr *wr) const struct ib_send_wr *wr)
{ {
int status; int status;
struct ocrdma_sge *sge; struct ocrdma_sge *sge;
...@@ -2057,7 +2057,7 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, ...@@ -2057,7 +2057,7 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
} }
static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
struct ib_send_wr *wr) const struct ib_send_wr *wr)
{ {
int status; int status;
struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
...@@ -2075,7 +2075,7 @@ static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, ...@@ -2075,7 +2075,7 @@ static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
} }
static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
struct ib_send_wr *wr) const struct ib_send_wr *wr)
{ {
struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
struct ocrdma_sge *sge = ext_rw + 1; struct ocrdma_sge *sge = ext_rw + 1;
...@@ -2105,7 +2105,7 @@ static int get_encoded_page_size(int pg_sz) ...@@ -2105,7 +2105,7 @@ static int get_encoded_page_size(int pg_sz)
static int ocrdma_build_reg(struct ocrdma_qp *qp, static int ocrdma_build_reg(struct ocrdma_qp *qp,
struct ocrdma_hdr_wqe *hdr, struct ocrdma_hdr_wqe *hdr,
struct ib_reg_wr *wr) const struct ib_reg_wr *wr)
{ {
u64 fbo; u64 fbo;
struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
......
...@@ -380,7 +380,7 @@ int qedr_destroy_gsi_qp(struct qedr_dev *dev) ...@@ -380,7 +380,7 @@ int qedr_destroy_gsi_qp(struct qedr_dev *dev)
#define QEDR_GSI_QPN (1) #define QEDR_GSI_QPN (1)
static inline int qedr_gsi_build_header(struct qedr_dev *dev, static inline int qedr_gsi_build_header(struct qedr_dev *dev,
struct qedr_qp *qp, struct qedr_qp *qp,
struct ib_send_wr *swr, const struct ib_send_wr *swr,
struct ib_ud_header *udh, struct ib_ud_header *udh,
int *roce_mode) int *roce_mode)
{ {
...@@ -488,7 +488,7 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev, ...@@ -488,7 +488,7 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
static inline int qedr_gsi_build_packet(struct qedr_dev *dev, static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
struct qedr_qp *qp, struct qedr_qp *qp,
struct ib_send_wr *swr, const struct ib_send_wr *swr,
struct qed_roce_ll2_packet **p_packet) struct qed_roce_ll2_packet **p_packet)
{ {
u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE]; u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
......
...@@ -2781,7 +2781,7 @@ static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev, ...@@ -2781,7 +2781,7 @@ static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
} while (0) } while (0)
static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size, static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
struct ib_send_wr *wr) const struct ib_send_wr *wr)
{ {
u32 data_size = 0; u32 data_size = 0;
int i; int i;
...@@ -2845,7 +2845,7 @@ static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev, ...@@ -2845,7 +2845,7 @@ static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
static int qedr_prepare_reg(struct qedr_qp *qp, static int qedr_prepare_reg(struct qedr_qp *qp,
struct rdma_sq_fmr_wqe_1st *fwqe1, struct rdma_sq_fmr_wqe_1st *fwqe1,
struct ib_reg_wr *wr) const struct ib_reg_wr *wr)
{ {
struct qedr_mr *mr = get_qedr_mr(wr->mr); struct qedr_mr *mr = get_qedr_mr(wr->mr);
struct rdma_sq_fmr_wqe_2nd *fwqe2; struct rdma_sq_fmr_wqe_2nd *fwqe2;
...@@ -2907,7 +2907,8 @@ static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) ...@@ -2907,7 +2907,8 @@ static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
} }
} }
static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) static inline bool qedr_can_post_send(struct qedr_qp *qp,
const struct ib_send_wr *wr)
{ {
int wq_is_full, err_wr, pbl_is_full; int wq_is_full, err_wr, pbl_is_full;
struct qedr_dev *dev = qp->dev; struct qedr_dev *dev = qp->dev;
......
...@@ -311,7 +311,7 @@ void qib_rc_rnr_retry(unsigned long arg); ...@@ -311,7 +311,7 @@ void qib_rc_rnr_retry(unsigned long arg);
void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr); void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr);
int qib_post_ud_send(struct rvt_qp *qp, struct ib_send_wr *wr); int qib_post_ud_send(struct rvt_qp *qp, const struct ib_send_wr *wr);
void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr, void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
int has_grh, void *data, u32 tlen, struct rvt_qp *qp); int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
......
...@@ -599,7 +599,8 @@ static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n) ...@@ -599,7 +599,8 @@ static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
qp->rq.offset + n * qp->rq.wqe_size); qp->rq.offset + n * qp->rq.wqe_size);
} }
static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, struct ib_reg_wr *wr) static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr,
const struct ib_reg_wr *wr)
{ {
struct pvrdma_user_mr *mr = to_vmr(wr->mr); struct pvrdma_user_mr *mr = to_vmr(wr->mr);
......
...@@ -1620,7 +1620,7 @@ int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -1620,7 +1620,7 @@ int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
static inline int rvt_qp_valid_operation( static inline int rvt_qp_valid_operation(
struct rvt_qp *qp, struct rvt_qp *qp,
const struct rvt_operation_params *post_parms, const struct rvt_operation_params *post_parms,
struct ib_send_wr *wr) const struct ib_send_wr *wr)
{ {
int len; int len;
...@@ -1717,7 +1717,7 @@ static inline int rvt_qp_is_avail( ...@@ -1717,7 +1717,7 @@ static inline int rvt_qp_is_avail(
* @wr: the work request to send * @wr: the work request to send
*/ */
static int rvt_post_one_wr(struct rvt_qp *qp, static int rvt_post_one_wr(struct rvt_qp *qp,
struct ib_send_wr *wr, const struct ib_send_wr *wr,
int *call_send) int *call_send)
{ {
struct rvt_swqe *wqe; struct rvt_swqe *wqe;
......
...@@ -554,7 +554,7 @@ static int rxe_destroy_qp(struct ib_qp *ibqp) ...@@ -554,7 +554,7 @@ static int rxe_destroy_qp(struct ib_qp *ibqp)
return 0; return 0;
} }
static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr, static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
unsigned int mask, unsigned int length) unsigned int mask, unsigned int length)
{ {
int num_sge = ibwr->num_sge; int num_sge = ibwr->num_sge;
...@@ -582,7 +582,7 @@ static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr, ...@@ -582,7 +582,7 @@ static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
} }
static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
struct ib_send_wr *ibwr) const struct ib_send_wr *ibwr)
{ {
wr->wr_id = ibwr->wr_id; wr->wr_id = ibwr->wr_id;
wr->num_sge = ibwr->num_sge; wr->num_sge = ibwr->num_sge;
...@@ -637,7 +637,7 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, ...@@ -637,7 +637,7 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
} }
} }
static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
unsigned int mask, unsigned int length, unsigned int mask, unsigned int length,
struct rxe_send_wqe *wqe) struct rxe_send_wqe *wqe)
{ {
...@@ -685,7 +685,7 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, ...@@ -685,7 +685,7 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
return 0; return 0;
} }
static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr, static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
unsigned int mask, u32 length) unsigned int mask, u32 length)
{ {
int err; int err;
......
...@@ -1352,7 +1352,7 @@ struct ib_rdma_wr { ...@@ -1352,7 +1352,7 @@ struct ib_rdma_wr {
u32 rkey; u32 rkey;
}; };
static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr) static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
{ {
return container_of(wr, struct ib_rdma_wr, wr); return container_of(wr, struct ib_rdma_wr, wr);
} }
...@@ -1367,7 +1367,7 @@ struct ib_atomic_wr { ...@@ -1367,7 +1367,7 @@ struct ib_atomic_wr {
u32 rkey; u32 rkey;
}; };
static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr) static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
{ {
return container_of(wr, struct ib_atomic_wr, wr); return container_of(wr, struct ib_atomic_wr, wr);
} }
...@@ -1384,7 +1384,7 @@ struct ib_ud_wr { ...@@ -1384,7 +1384,7 @@ struct ib_ud_wr {
u8 port_num; /* valid for DR SMPs on switch only */ u8 port_num; /* valid for DR SMPs on switch only */
}; };
static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr) static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
{ {
return container_of(wr, struct ib_ud_wr, wr); return container_of(wr, struct ib_ud_wr, wr);
} }
...@@ -1396,7 +1396,7 @@ struct ib_reg_wr { ...@@ -1396,7 +1396,7 @@ struct ib_reg_wr {
int access; int access;
}; };
static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr) static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
{ {
return container_of(wr, struct ib_reg_wr, wr); return container_of(wr, struct ib_reg_wr, wr);
} }
...@@ -1409,7 +1409,8 @@ struct ib_sig_handover_wr { ...@@ -1409,7 +1409,8 @@ struct ib_sig_handover_wr {
struct ib_sge *prot; struct ib_sge *prot;
}; };
static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr) static inline const struct ib_sig_handover_wr *
sig_handover_wr(const struct ib_send_wr *wr)
{ {
return container_of(wr, struct ib_sig_handover_wr, wr); return container_of(wr, struct ib_sig_handover_wr, wr);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册