提交 fbbfa34c 编写于 作者: D David S. Miller

Merge branch 'qed-fixes'

Yuval Mintz says:

====================
qed: Fix dependencies and warnings series

The first patch in this series follows Dan Carpenter's reports about
Smatch warnings for recent qed additions and fixes those.

The second patch is the most significant one [and the reason this is
ntended for 'net'] - it's based on Arnd Bermann's suggestion for fixing
compilation issues that were introduced with the roce addition as a result
of certain combinations of qed, qede and qedr Kconfig options.

The third follows the discussion with Arnd and clears a lot of the warnings
that arise when compiling the drivers with "C=1".

Please consider applying this series to 'net'.
====================
Acked-by: NArnd Bergmann <arnd@arndb.de>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -107,10 +107,14 @@ config QEDE
---help---
This enables the support for ...
config QED_RDMA
bool
config INFINIBAND_QEDR
tristate "QLogic qede RoCE sources [debug]"
depends on QEDE && 64BIT
select QED_LL2
select QED_RDMA
default n
---help---
This provides a temporary node that allows the compilation
......
......@@ -5,4 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_selftest.o qed_dcbx.o qed_debug.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o
qed-$(CONFIG_QED_RDMA) += qed_roce.o
......@@ -47,13 +47,8 @@
#define TM_ALIGN BIT(TM_SHIFT)
#define TM_ELEM_SIZE 4
/* ILT constants */
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
#define ILT_DEFAULT_HW_P_SIZE 4
#else
#define ILT_DEFAULT_HW_P_SIZE 3
#endif
#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
......@@ -349,14 +344,14 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
return NULL;
}
void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
p_mgr->srq_count = num_srqs;
}
u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
......@@ -1804,8 +1799,8 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
return 0;
}
void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
struct qed_rdma_pf_params *p_params)
static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
struct qed_rdma_pf_params *p_params)
{
u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
enum protocol_type proto;
......
......@@ -405,7 +405,7 @@ struct phy_defs {
/***************************** Constant Arrays *******************************/
/* Debug arrays */
static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
......@@ -4028,10 +4028,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
}
/* Dump MCP Trace */
enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
{
u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
......@@ -4130,10 +4130,10 @@ enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
}
/* Dump GRC FIFO */
enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
{
u32 offset = 0, dwords_read, size_param_offset;
bool fifo_has_data;
......@@ -4192,10 +4192,10 @@ enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
}
/* Dump IGU FIFO */
enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
{
u32 offset = 0, dwords_read, size_param_offset;
bool fifo_has_data;
......@@ -4255,10 +4255,11 @@ enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
}
/* Protection Override dump */
enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
bool dump,
u32 *num_dumped_dwords)
{
u32 offset = 0, size_param_offset, override_window_dwords;
......@@ -6339,10 +6340,11 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
}
/* Wrapper for unifying the idle_chk and mcp_trace api */
enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf)
static enum dbg_status
qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf)
{
u32 num_errors, num_warnnings;
......@@ -6413,8 +6415,8 @@ static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
#define QED_RESULTS_BUF_MIN_SIZE 16
/* Generic function for decoding debug feature info */
enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
enum qed_dbg_features feature_idx)
static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_params.features[feature_idx];
......@@ -6480,8 +6482,9 @@ enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
}
/* Generic function for performing the dump of a debug feature. */
enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
enum qed_dbg_features feature_idx)
static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_params.features[feature_idx];
......
......@@ -497,12 +497,13 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
PROTOCOLID_ROCE,
0) * 2;
NULL) * 2;
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
num_cons =
qed_cxt_get_proto_cid_count(p_hwfn,
PROTOCOLID_ISCSI, 0);
PROTOCOLID_ISCSI,
NULL);
n_eqes += 2 * num_cons;
}
......@@ -1422,19 +1423,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
u32 *feat_num = p_hwfn->hw_info.feat_num;
int num_features = 1;
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the
* status blocks equally between L2 / RoCE but with consideration as
* to how many l2 queues / cnqs we have
*/
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
if (IS_ENABLED(CONFIG_QED_RDMA) &&
p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
* the status blocks equally between L2 / RoCE but with
* consideration as to how many l2 queues / cnqs we have.
*/
num_features++;
feat_num[QED_RDMA_CNQ] =
min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
}
#endif
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
num_features,
RESC_NUM(p_hwfn, QED_L2_QUEUE));
......
......@@ -38,6 +38,7 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_roce.h"
#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
......@@ -140,11 +141,11 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
qed_ll2_dealloc_buffer(cdev, buffer);
}
void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
struct qed_ll2_rx_packet *p_pkt,
struct core_rx_fast_path_cqe *p_cqe,
bool b_last_packet)
static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
struct qed_ll2_rx_packet *p_pkt,
struct core_rx_fast_path_cqe *p_cqe,
bool b_last_packet)
{
u16 packet_length = le16_to_cpu(p_cqe->packet_length);
struct qed_ll2_buffer *buffer = p_pkt->cookie;
......@@ -515,7 +516,7 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
return rc;
}
void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
{
struct qed_ll2_info *p_ll2_conn = NULL;
struct qed_ll2_rx_packet *p_pkt = NULL;
......@@ -1123,9 +1124,6 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(start_bd->addr, first_frag);
start_bd->nbytes = cpu_to_le16(first_frag_len);
SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
type);
DP_VERBOSE(p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
......
......@@ -293,24 +293,4 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn,
*/
void qed_ll2_free(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_connections);
void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr,
u16 data_length,
u8 data_length_error,
u16 parse_flags,
u16 vlan,
u32 src_mac_addr_hi,
u16 src_mac_addr_lo, bool b_last_packet);
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet);
void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet);
#endif
......@@ -33,10 +33,8 @@
#include "qed_hw.h"
#include "qed_selftest.h"
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
#endif
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
......@@ -682,9 +680,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
enum qed_int_mode int_mode)
{
struct qed_sb_cnt_info sb_cnt_info;
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
int num_l2_queues;
#endif
int num_l2_queues = 0;
int rc;
int i;
......@@ -715,8 +711,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
num_l2_queues = 0;
if (!IS_ENABLED(CONFIG_QED_RDMA))
return 0;
for_each_hwfn(cdev, i)
num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
......@@ -738,7 +735,6 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
cdev->int_params.rdma_msix_cnt,
cdev->int_params.rdma_msix_base);
#endif
return 0;
}
......@@ -843,18 +839,20 @@ static void qed_update_pf_params(struct qed_dev *cdev,
{
int i;
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
/* divide by 3 the MRs to avoid MF ILT overflow */
params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
#endif
for (i = 0; i < cdev->num_hwfns; i++) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
p_hwfn->pf_params = *params;
}
if (!IS_ENABLED(CONFIG_QED_RDMA))
return;
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
/* divide by 3 the MRs to avoid MF ILT overflow */
params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
}
static int qed_slowpath_start(struct qed_dev *cdev,
......@@ -1432,7 +1430,7 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
struct qed_selftest_ops qed_selftest_ops_pass = {
static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
.selftest_register = &qed_selftest_register,
......
......@@ -129,17 +129,12 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
}
}
u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
{
/* First sb id for RoCE is after all the l2 sb */
return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
}
u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
{
return QED_CAU_DEF_RX_TIMER_RES;
}
static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params)
......@@ -162,7 +157,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
p_hwfn->p_rdma_info = p_rdma_info;
p_rdma_info->proto = PROTOCOLID_ROCE;
num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0);
num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
NULL);
p_rdma_info->num_qps = num_cons / 2;
......@@ -275,7 +271,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
return rc;
}
void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
......@@ -527,6 +523,26 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
rc = qed_rdma_bmap_alloc_id(p_hwfn,
&p_hwfn->p_rdma_info->tid_map, itid);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
if (rc)
goto out;
rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
out:
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
return rc;
}
static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
......@@ -573,7 +589,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
return qed_rdma_start_fw(p_hwfn, params, p_ptt);
}
int qed_rdma_stop(void *rdma_cxt)
static int qed_rdma_stop(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_close_func_ramrod_data *p_ramrod;
......@@ -629,8 +645,8 @@ int qed_rdma_stop(void *rdma_cxt)
return rc;
}
int qed_rdma_add_user(void *rdma_cxt,
struct qed_rdma_add_user_out_params *out_params)
static int qed_rdma_add_user(void *rdma_cxt,
struct qed_rdma_add_user_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
u32 dpi_start_offset;
......@@ -664,7 +680,7 @@ int qed_rdma_add_user(void *rdma_cxt,
return rc;
}
struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
......@@ -680,7 +696,7 @@ struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
return p_port;
}
struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
......@@ -690,7 +706,7 @@ struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
return p_hwfn->p_rdma_info->dev;
}
void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
......@@ -701,27 +717,7 @@ void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
rc = qed_rdma_bmap_alloc_id(p_hwfn,
&p_hwfn->p_rdma_info->tid_map, itid);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
if (rc)
goto out;
rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
out:
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
return rc;
}
void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
{
struct qed_hwfn *p_hwfn;
u16 qz_num;
......@@ -816,7 +812,7 @@ static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
return 0;
}
int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
u32 returned_id;
......@@ -836,7 +832,7 @@ int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
return rc;
}
void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
......@@ -873,8 +869,9 @@ qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
return toggle_bit;
}
int qed_rdma_create_cq(void *rdma_cxt,
struct qed_rdma_create_cq_in_params *params, u16 *icid)
static int qed_rdma_create_cq(void *rdma_cxt,
struct qed_rdma_create_cq_in_params *params,
u16 *icid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
......@@ -957,98 +954,10 @@ int qed_rdma_create_cq(void *rdma_cxt,
return rc;
}
int qed_rdma_resize_cq(void *rdma_cxt,
struct qed_rdma_resize_cq_in_params *in_params,
struct qed_rdma_resize_cq_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_resize_cq_output_params *p_ramrod_res;
struct rdma_resize_cq_ramrod_data *p_ramrod;
enum qed_rdma_toggle_bit toggle_bit;
struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent;
dma_addr_t ramrod_res_phys;
u8 fw_return_code;
int rc = -ENOMEM;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
p_ramrod_res =
(struct rdma_resize_cq_output_params *)
dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct rdma_resize_cq_output_params),
&ramrod_res_phys, GFP_KERNEL);
if (!p_ramrod_res) {
DP_NOTICE(p_hwfn,
"qed resize cq failed: cannot allocate memory (ramrod)\n");
return rc;
}
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = in_params->icid;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
RDMA_RAMROD_RESIZE_CQ,
p_hwfn->p_rdma_info->proto, &init_data);
if (rc)
goto err;
p_ramrod = &p_ent->ramrod.rdma_resize_cq;
p_ramrod->flags = 0;
/* toggle the bit for every resize or create cq for a given icid */
toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
in_params->icid);
SET_FIELD(p_ramrod->flags,
RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
SET_FIELD(p_ramrod->flags,
RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
in_params->pbl_two_level);
p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
if (rc)
goto err;
if (fw_return_code != RDMA_RETURN_OK) {
DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
rc = -EINVAL;
goto err;
}
out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct rdma_resize_cq_output_params),
p_ramrod_res, ramrod_res_phys);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
return rc;
err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct rdma_resize_cq_output_params),
p_ramrod_res, ramrod_res_phys);
DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
return rc;
}
int qed_rdma_destroy_cq(void *rdma_cxt,
struct qed_rdma_destroy_cq_in_params *in_params,
struct qed_rdma_destroy_cq_out_params *out_params)
static int
qed_rdma_destroy_cq(void *rdma_cxt,
struct qed_rdma_destroy_cq_in_params *in_params,
struct qed_rdma_destroy_cq_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_destroy_cq_output_params *p_ramrod_res;
......@@ -1169,7 +1078,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
return flavor;
}
int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 responder_icid;
......@@ -1793,9 +1702,9 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
return rc;
}
int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
struct qed_rdma_query_qp_out_params *out_params)
static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
struct qed_rdma_query_qp_out_params *out_params)
{
struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
struct roce_query_qp_req_output_params *p_req_ramrod_res;
......@@ -1936,7 +1845,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
return rc;
}
int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
......@@ -1985,9 +1894,9 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
return 0;
}
int qed_rdma_query_qp(void *rdma_cxt,
struct qed_rdma_qp *qp,
struct qed_rdma_query_qp_out_params *out_params)
static int qed_rdma_query_qp(void *rdma_cxt,
struct qed_rdma_qp *qp,
struct qed_rdma_query_qp_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc;
......@@ -2022,7 +1931,7 @@ int qed_rdma_query_qp(void *rdma_cxt,
return rc;
}
int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc = 0;
......@@ -2038,7 +1947,7 @@ int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
return rc;
}
struct qed_rdma_qp *
static struct qed_rdma_qp *
qed_rdma_create_qp(void *rdma_cxt,
struct qed_rdma_create_qp_in_params *in_params,
struct qed_rdma_create_qp_out_params *out_params)
......@@ -2215,9 +2124,9 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
return rc;
}
int qed_rdma_modify_qp(void *rdma_cxt,
struct qed_rdma_qp *qp,
struct qed_rdma_modify_qp_in_params *params)
static int qed_rdma_modify_qp(void *rdma_cxt,
struct qed_rdma_qp *qp,
struct qed_rdma_modify_qp_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
enum qed_roce_qp_state prev_state;
......@@ -2312,8 +2221,9 @@ int qed_rdma_modify_qp(void *rdma_cxt,
return rc;
}
int qed_rdma_register_tid(void *rdma_cxt,
struct qed_rdma_register_tid_in_params *params)
static int
qed_rdma_register_tid(void *rdma_cxt,
struct qed_rdma_register_tid_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_register_tid_ramrod_data *p_ramrod;
......@@ -2450,7 +2360,7 @@ int qed_rdma_register_tid(void *rdma_cxt,
return rc;
}
int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_deregister_tid_ramrod_data *p_ramrod;
......@@ -2561,7 +2471,8 @@ void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_rdma_dpm_conf(p_hwfn, p_ptt);
}
int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params)
static int qed_rdma_start(void *rdma_cxt,
struct qed_rdma_start_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_ptt *p_ptt;
......@@ -2601,7 +2512,7 @@ static int qed_rdma_init(struct qed_dev *cdev,
return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
}
void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
......@@ -2809,11 +2720,6 @@ static int qed_roce_ll2_stop(struct qed_dev *cdev)
struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
int rc;
if (!cdev) {
DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n");
return -EINVAL;
}
if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
return -EINVAL;
......@@ -2850,7 +2756,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
int rc;
int i;
if (!cdev || !pkt || !params) {
if (!pkt || !params) {
DP_ERR(cdev,
"roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
cdev, pkt, params);
......
......@@ -95,26 +95,6 @@ struct qed_rdma_info {
enum protocol_type proto;
};
struct qed_rdma_resize_cq_in_params {
u16 icid;
u32 cq_size;
bool pbl_two_level;
u64 pbl_ptr;
u16 pbl_num_pages;
u8 pbl_page_size_log;
};
struct qed_rdma_resize_cq_out_params {
u32 prod;
u32 cons;
};
struct qed_rdma_resize_cnq_in_params {
u32 cnq_id;
u32 pbl_page_size_log;
u64 pbl_ptr;
};
struct qed_rdma_qp {
struct regpair qp_handle;
struct regpair qp_handle_async;
......@@ -181,36 +161,55 @@ struct qed_rdma_qp {
dma_addr_t shared_queue_phys_addr;
};
int
qed_rdma_add_user(void *rdma_cxt,
struct qed_rdma_add_user_out_params *out_params);
int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
int
qed_rdma_register_tid(void *rdma_cxt,
struct qed_rdma_register_tid_in_params *params);
void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
int qed_rdma_stop(void *rdma_cxt);
u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
#if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_async_roce_event(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe);
int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp);
int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
struct qed_rdma_modify_qp_in_params *params);
int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
struct qed_rdma_query_qp_out_params *out_params);
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet);
void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet);
void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr,
u16 data_length,
u8 data_length_error,
u16 parse_flags,
u16 vlan,
u32 src_mac_addr_hi,
u16 src_mac_addr_lo, bool b_last_packet);
#else
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment,
bool b_last_packet) {}
static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment,
bool b_last_packet) {}
static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr,
u16 data_length,
u8 data_length_error,
u16 parse_flags,
u16 vlan,
u32 src_mac_addr_hi,
u16 src_mac_addr_lo,
bool b_last_packet) {}
#endif
#endif
......@@ -80,7 +80,6 @@ union ramrod_data {
struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
struct rdma_create_cq_ramrod_data rdma_create_cq;
struct rdma_resize_cq_ramrod_data rdma_resize_cq;
struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
struct rdma_srq_create_ramrod_data rdma_create_srq;
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
......
......@@ -28,9 +28,7 @@
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#include "qed_roce.h"
#endif
/***************************************************************************
* Structures & Definitions
......@@ -240,11 +238,9 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
case PROTOCOLID_ROCE:
qed_async_roce_event(p_hwfn, p_eqe);
return 0;
#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
......
......@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o
qede-$(CONFIG_DCB) += qede_dcbnl.o
qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o
qede-$(CONFIG_QED_RDMA) += qede_roce.o
......@@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv);
bool qede_roce_supported(struct qede_dev *dev);
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#if IS_ENABLED(CONFIG_QED_RDMA)
int qede_roce_dev_add(struct qede_dev *dev);
void qede_roce_dev_event_open(struct qede_dev *dev);
void qede_roce_dev_event_close(struct qede_dev *dev);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册