提交 f241e749 编写于 作者: J Jack Morgenstein 提交者: David S. Miller

mlx5: minor fixes (mainly avoidance of hidden casts)

There were many places where parameters which should be u8/u16 were
integer type.

Additionally, in 2 places, a check for a non-null pointer was added
before dereferencing the pointer (this is actually a bug fix).
Signed-off-by: NJack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: NEli Cohen <eli@mellanox.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 9603b61d
...@@ -348,7 +348,7 @@ static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, ...@@ -348,7 +348,7 @@ static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
u16 tail, u16 head) u16 tail, u16 head)
{ {
int idx; u16 idx;
do { do {
idx = tail & (qp->sq.wqe_cnt - 1); idx = tail & (qp->sq.wqe_cnt - 1);
......
...@@ -41,7 +41,7 @@ enum { ...@@ -41,7 +41,7 @@ enum {
}; };
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
int port, struct ib_wc *in_wc, struct ib_grh *in_grh, u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
void *in_mad, void *response_mad) void *in_mad, void *response_mad)
{ {
u8 op_modifier = 0; u8 op_modifier = 0;
......
...@@ -478,7 +478,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -478,7 +478,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
int uuarn; int uuarn;
int err; int err;
int i; int i;
int reqlen; size_t reqlen;
if (!dev->ib_active) if (!dev->ib_active)
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
......
...@@ -148,7 +148,7 @@ int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) ...@@ -148,7 +148,7 @@ int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
u64 off_mask; u64 off_mask;
u64 buf_off; u64 buf_off;
page_size = 1 << page_shift; page_size = (u64)1 << page_shift;
page_mask = page_size - 1; page_mask = page_size - 1;
buf_off = addr & page_mask; buf_off = addr & page_mask;
off_size = page_size >> 6; off_size = page_size >> 6;
......
...@@ -461,7 +461,7 @@ void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) ...@@ -461,7 +461,7 @@ void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
int port, struct ib_wc *in_wc, struct ib_grh *in_grh, u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
void *in_mad, void *response_mad); void *in_mad, void *response_mad);
struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
struct mlx5_ib_ah *ah); struct mlx5_ib_ah *ah);
......
...@@ -2539,7 +2539,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -2539,7 +2539,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM:
set_raddr_seg(seg, wr->wr.rdma.remote_addr, set_raddr_seg(seg, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey); wr->wr.rdma.rkey);
seg += sizeof(struct mlx5_wqe_raddr_seg); seg += sizeof(struct mlx5_wqe_raddr_seg);
size += sizeof(struct mlx5_wqe_raddr_seg) / 16; size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
break; break;
...@@ -2668,7 +2668,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -2668,7 +2668,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_QPT_SMI: case IB_QPT_SMI:
case IB_QPT_GSI: case IB_QPT_GSI:
set_datagram_seg(seg, wr); set_datagram_seg(seg, wr);
seg += sizeof(struct mlx5_wqe_datagram_seg); seg += sizeof(struct mlx5_wqe_datagram_seg);
size += sizeof(struct mlx5_wqe_datagram_seg) / 16; size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
if (unlikely((seg == qend))) if (unlikely((seg == qend)))
seg = mlx5_get_send_wqe(qp, 0); seg = mlx5_get_send_wqe(qp, 0);
......
...@@ -56,7 +56,7 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, ...@@ -56,7 +56,7 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
if (size <= max_direct) { if (size <= max_direct) {
buf->nbufs = 1; buf->nbufs = 1;
buf->npages = 1; buf->npages = 1;
buf->page_shift = get_order(size) + PAGE_SHIFT; buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
size, &t, GFP_KERNEL); size, &t, GFP_KERNEL);
if (!buf->direct.buf) if (!buf->direct.buf)
......
...@@ -464,7 +464,7 @@ static void dump_command(struct mlx5_core_dev *dev, ...@@ -464,7 +464,7 @@ static void dump_command(struct mlx5_core_dev *dev,
struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
struct mlx5_cmd_mailbox *next = msg->next; struct mlx5_cmd_mailbox *next = msg->next;
int data_only; int data_only;
int offset = 0; u32 offset = 0;
int dump_len; int dump_len;
data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
......
...@@ -252,7 +252,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) ...@@ -252,7 +252,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
case MLX5_PORT_CHANGE_SUBTYPE_GUID: case MLX5_PORT_CHANGE_SUBTYPE_GUID:
case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
dev->event(dev, port_subtype_event(eqe->sub_type), &port); if (dev->event)
dev->event(dev, port_subtype_event(eqe->sub_type), &port);
break; break;
default: default:
mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#include "mlx5_core.h" #include "mlx5_core.h"
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
u16 opmod, int port) u16 opmod, u8 port)
{ {
struct mlx5_mad_ifc_mbox_in *in = NULL; struct mlx5_mad_ifc_mbox_in *in = NULL;
struct mlx5_mad_ifc_mbox_out *out = NULL; struct mlx5_mad_ifc_mbox_out *out = NULL;
......
...@@ -311,7 +311,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) ...@@ -311,7 +311,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap); copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap);
if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) if (dev->profile && dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
flags = be64_to_cpu(query_out->hca_cap.flags); flags = be64_to_cpu(query_out->hca_cap.flags);
......
...@@ -51,7 +51,7 @@ enum { ...@@ -51,7 +51,7 @@ enum {
struct mlx5_pages_req { struct mlx5_pages_req {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
u32 func_id; u16 func_id;
s32 npages; s32 npages;
struct work_struct work; struct work_struct work;
}; };
......
...@@ -86,7 +86,7 @@ struct mlx5_reg_pcap { ...@@ -86,7 +86,7 @@ struct mlx5_reg_pcap {
__be32 caps_31_0; __be32 caps_31_0;
}; };
int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps) int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
{ {
struct mlx5_reg_pcap in; struct mlx5_reg_pcap in;
struct mlx5_reg_pcap out; struct mlx5_reg_pcap out;
......
...@@ -456,9 +456,6 @@ struct mlx5_eqe_cq_err { ...@@ -456,9 +456,6 @@ struct mlx5_eqe_cq_err {
u8 syndrome; u8 syndrome;
}; };
struct mlx5_eqe_dropped_packet {
};
struct mlx5_eqe_port_state { struct mlx5_eqe_port_state {
u8 reserved0[8]; u8 reserved0[8];
u8 port; u8 port;
...@@ -498,7 +495,6 @@ union ev_data { ...@@ -498,7 +495,6 @@ union ev_data {
struct mlx5_eqe_comp comp; struct mlx5_eqe_comp comp;
struct mlx5_eqe_qp_srq qp_srq; struct mlx5_eqe_qp_srq qp_srq;
struct mlx5_eqe_cq_err cq_err; struct mlx5_eqe_cq_err cq_err;
struct mlx5_eqe_dropped_packet dp;
struct mlx5_eqe_port_state port; struct mlx5_eqe_port_state port;
struct mlx5_eqe_gpio gpio; struct mlx5_eqe_gpio gpio;
struct mlx5_eqe_congestion cong; struct mlx5_eqe_congestion cong;
......
...@@ -381,8 +381,8 @@ struct mlx5_buf { ...@@ -381,8 +381,8 @@ struct mlx5_buf {
struct mlx5_buf_list *page_list; struct mlx5_buf_list *page_list;
int nbufs; int nbufs;
int npages; int npages;
int page_shift;
int size; int size;
u8 page_shift;
}; };
struct mlx5_eq { struct mlx5_eq {
...@@ -736,7 +736,7 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, ...@@ -736,7 +736,7 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
u16 opmod, int port); u16 opmod, u8 port);
void mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev); int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
...@@ -769,7 +769,7 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); ...@@ -769,7 +769,7 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out, int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write); u16 reg_num, int arg, int write);
int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps); int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
...@@ -826,7 +826,7 @@ void mlx5_unregister_interface(struct mlx5_interface *intf); ...@@ -826,7 +826,7 @@ void mlx5_unregister_interface(struct mlx5_interface *intf);
struct mlx5_profile { struct mlx5_profile {
u64 mask; u64 mask;
u32 log_max_qp; u8 log_max_qp;
struct { struct {
int size; int size;
int limit; int limit;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册