提交 a2e7699e 编写于 作者: T Tomer Tayar 提交者: David S. Miller

qed*: Refactoring and rearranging FW API with no functional impact

This patch refactors and reorders the FW API files in preparation of
upgrading the code to support new FW.

- Make use of the BIT macro in appropriate places.
- Whitespace changes to align values and code blocks.
- Comments are updated (spelling mistakes, removed if not clear).
- Group together code blocks which are related or deal with similar
 matters.
Signed-off-by: NAriel Elior <Ariel.Elior@cavium.com>
Signed-off-by: NMichal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: NTomer Tayar <Tomer.Tayar@cavium.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 bbb6189d
...@@ -494,7 +494,7 @@ struct rdma_sq_fmr_wqe { ...@@ -494,7 +494,7 @@ struct rdma_sq_fmr_wqe {
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF #define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7 #define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7
__le32 Reserved5; __le32 reserved5;
}; };
/* First element (16 bytes) of fmr wqe */ /* First element (16 bytes) of fmr wqe */
...@@ -574,7 +574,7 @@ struct rdma_sq_fmr_wqe_3rd { ...@@ -574,7 +574,7 @@ struct rdma_sq_fmr_wqe_3rd {
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF #define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7 #define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7
__le32 Reserved5; __le32 reserved5;
}; };
struct rdma_sq_local_inv_wqe { struct rdma_sq_local_inv_wqe {
......
...@@ -110,7 +110,7 @@ struct src_ent { ...@@ -110,7 +110,7 @@ struct src_ent {
}; };
#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ #define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12)) #define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12)
#define CONN_CXT_SIZE(p_hwfn) \ #define CONN_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
...@@ -2326,7 +2326,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, ...@@ -2326,7 +2326,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
for (elem_i = 0; elem_i < elems_per_p; elem_i++) { for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
elem = (union type1_task_context *)elem_start; elem = (union type1_task_context *)elem_start;
SET_FIELD(elem->roce_ctx.tdif_context.flags1, SET_FIELD(elem->roce_ctx.tdif_context.flags1,
TDIF_TASK_CONTEXT_REFTAGMASK, 0xf); TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn); elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
} }
} }
......
...@@ -358,20 +358,14 @@ struct phy_defs { ...@@ -358,20 +358,14 @@ struct phy_defs {
(arr)[i] = qed_rd(dev, ptt, addr); \ (arr)[i] = qed_rd(dev, ptt, addr); \
} while (0) } while (0)
#ifndef DWORDS_TO_BYTES
#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD) #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
#endif
#ifndef BYTES_TO_DWORDS
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD) #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
#endif
/* extra lines include a signature line + optional latency events line */ /* Extra lines include a signature line + optional latency events line */
#ifndef NUM_DBG_LINES
#define NUM_EXTRA_DBG_LINES(block_desc) \ #define NUM_EXTRA_DBG_LINES(block_desc) \
(1 + ((block_desc)->has_latency_events ? 1 : 0)) (1 + ((block_desc)->has_latency_events ? 1 : 0))
#define NUM_DBG_LINES(block_desc) \ #define NUM_DBG_LINES(block_desc) \
((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc)) ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
#endif
#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2) #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
#define RAM_LINES_TO_BYTES(lines) \ #define RAM_LINES_TO_BYTES(lines) \
...@@ -441,23 +435,17 @@ struct phy_defs { ...@@ -441,23 +435,17 @@ struct phy_defs {
#define FW_IMG_MAIN 1 #define FW_IMG_MAIN 1
#ifndef REG_FIFO_ELEMENT_DWORDS
#define REG_FIFO_ELEMENT_DWORDS 2 #define REG_FIFO_ELEMENT_DWORDS 2
#endif
#define REG_FIFO_DEPTH_ELEMENTS 32 #define REG_FIFO_DEPTH_ELEMENTS 32
#define REG_FIFO_DEPTH_DWORDS \ #define REG_FIFO_DEPTH_DWORDS \
(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS) (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
#ifndef IGU_FIFO_ELEMENT_DWORDS
#define IGU_FIFO_ELEMENT_DWORDS 4 #define IGU_FIFO_ELEMENT_DWORDS 4
#endif
#define IGU_FIFO_DEPTH_ELEMENTS 64 #define IGU_FIFO_DEPTH_ELEMENTS 64
#define IGU_FIFO_DEPTH_DWORDS \ #define IGU_FIFO_DEPTH_DWORDS \
(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS) (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
#endif
#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
#define PROTECTION_OVERRIDE_DEPTH_DWORDS \ #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \ (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
...@@ -1089,6 +1077,20 @@ static struct block_defs block_xyld_defs = { ...@@ -1089,6 +1077,20 @@ static struct block_defs block_xyld_defs = {
true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
}; };
static struct block_defs block_ptld_defs = {
"ptld", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_ypld_defs = {
"ypld", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_prm_defs = { static struct block_defs block_prm_defs = {
"prm", "prm",
{true, true}, false, 0, {true, true}, false, 0,
...@@ -1221,6 +1223,34 @@ static struct block_defs block_cau_defs = { ...@@ -1221,6 +1223,34 @@ static struct block_defs block_cau_defs = {
true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
}; };
static struct block_defs block_rgfs_defs = {
"rgfs", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_rgsrc_defs = {
"rgsrc", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_tgfs_defs = {
"tgfs", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_tgsrc_defs = {
"tgsrc", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_umac_defs = { static struct block_defs block_umac_defs = {
"umac", "umac",
{false, true}, false, 0, {false, true}, false, 0,
...@@ -1338,48 +1368,6 @@ static struct block_defs block_avs_wrap_defs = { ...@@ -1338,48 +1368,6 @@ static struct block_defs block_avs_wrap_defs = {
true, false, DBG_RESET_REG_MISCS_PL_UA, 11 true, false, DBG_RESET_REG_MISCS_PL_UA, 11
}; };
static struct block_defs block_rgfs_defs = {
"rgfs", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_rgsrc_defs = {
"rgsrc", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_tgfs_defs = {
"tgfs", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_tgsrc_defs = {
"tgsrc", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_ptld_defs = {
"ptld", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_ypld_defs = {
"ypld", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_aeu_defs = { static struct block_defs block_misc_aeu_defs = {
"misc_aeu", {false, false}, false, 0, "misc_aeu", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
...@@ -5596,10 +5584,6 @@ struct igu_fifo_addr_data { ...@@ -5596,10 +5584,6 @@ struct igu_fifo_addr_data {
#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
/********************************* Macros ************************************/
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
/***************************** Constant Arrays *******************************/ /***************************** Constant Arrays *******************************/
struct user_dbg_array { struct user_dbg_array {
......
...@@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn) ...@@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* This function reconfigures the QM pf on the fly. /* This function reconfigures the QM pf on the fly.
* For this purpose we: * For this purpose we:
* 1. reconfigure the QM database * 1. reconfigure the QM database
* 2. set new values to runtime arrat * 2. set new values to runtime array
* 3. send an sdm_qm_cmd through the rbc interface to stop the QM * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
* 4. activate init tool in QM_PF stage * 4. activate init tool in QM_PF stage
* 5. send an sdm_qm_cmd through rbc interface to release the QM * 5. send an sdm_qm_cmd through rbc interface to release the QM
...@@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, ...@@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
} }
/* Protocl Configuration */ /* Protocol Configuration */
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
(p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0); (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
......
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
struct qed_hwfn; struct qed_hwfn;
struct qed_ptt; struct qed_ptt;
/* opcodes for the event ring */ /* Opcodes for the event ring */
enum common_event_opcode { enum common_event_opcode {
COMMON_EVENT_PF_START, COMMON_EVENT_PF_START,
COMMON_EVENT_PF_STOP, COMMON_EVENT_PF_STOP,
...@@ -82,6 +82,286 @@ enum common_ramrod_cmd_id { ...@@ -82,6 +82,286 @@ enum common_ramrod_cmd_id {
MAX_COMMON_RAMROD_CMD_ID MAX_COMMON_RAMROD_CMD_ID
}; };
/* How ll2 should deal with packet upon errors */
enum core_error_handle {
LL2_DROP_PACKET,
LL2_DO_NOTHING,
LL2_ASSERT,
MAX_CORE_ERROR_HANDLE
};
/* Opcodes for the event ring */
enum core_event_opcode {
CORE_EVENT_TX_QUEUE_START,
CORE_EVENT_TX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_FLUSH,
MAX_CORE_EVENT_OPCODE
};
/* The L4 pseudo checksum mode for Core */
enum core_l4_pseudo_checksum_mode {
CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
};
/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_port_stats {
struct regpair gsi_invalid_hdr;
struct regpair gsi_invalid_pkt_length;
struct regpair gsi_unsupported_pkt_typ;
struct regpair gsi_crcchksm_error;
};
/* Ethernet TX Per Queue Stats */
struct core_ll2_pstorm_per_queue_stat {
struct regpair sent_ucast_bytes;
struct regpair sent_mcast_bytes;
struct regpair sent_bcast_bytes;
struct regpair sent_ucast_pkts;
struct regpair sent_mcast_pkts;
struct regpair sent_bcast_pkts;
};
/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_rx_prod {
__le16 bd_prod;
__le16 cqe_prod;
__le32 reserved;
};
struct core_ll2_tstorm_per_queue_stat {
struct regpair packet_too_big_discard;
struct regpair no_buff_discard;
};
struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_ucast_bytes;
struct regpair rcv_mcast_bytes;
struct regpair rcv_bcast_bytes;
struct regpair rcv_ucast_pkts;
struct regpair rcv_mcast_pkts;
struct regpair rcv_bcast_pkts;
};
/* Core Ramrod Command IDs (light L2) */
enum core_ramrod_cmd_id {
CORE_RAMROD_UNUSED,
CORE_RAMROD_RX_QUEUE_START,
CORE_RAMROD_TX_QUEUE_START,
CORE_RAMROD_RX_QUEUE_STOP,
CORE_RAMROD_TX_QUEUE_STOP,
CORE_RAMROD_RX_QUEUE_FLUSH,
MAX_CORE_RAMROD_CMD_ID
};
/* Core RX CQE Type for Light L2 */
enum core_roce_flavor_type {
CORE_ROCE,
CORE_RROCE,
MAX_CORE_ROCE_FLAVOR_TYPE
};
/* Specifies how ll2 should deal with packets errors: packet_too_big and
* no_buff.
*/
struct core_rx_action_on_error {
u8 error_type;
#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
};
/* Core RX BD for Light L2 */
struct core_rx_bd {
struct regpair addr;
__le16 reserved[4];
};
/* Core RX CM offload BD for Light L2 */
struct core_rx_bd_with_buff_len {
struct regpair addr;
__le16 buff_length;
__le16 reserved[3];
};
/* Core RX CM offload BD for Light L2 */
union core_rx_bd_union {
struct core_rx_bd rx_bd;
struct core_rx_bd_with_buff_len rx_bd_with_len;
};
/* Opaque Data for Light L2 RX CQE */
struct core_rx_cqe_opaque_data {
__le32 data[2];
};
/* Core RX CQE Type for Light L2 */
enum core_rx_cqe_type {
CORE_RX_CQE_ILLEGAL_TYPE,
CORE_RX_CQE_TYPE_REGULAR,
CORE_RX_CQE_TYPE_GSI_OFFLOAD,
CORE_RX_CQE_TYPE_SLOW_PATH,
MAX_CORE_RX_CQE_TYPE
};
/* Core RX CQE for Light L2 */
struct core_rx_fast_path_cqe {
u8 type;
u8 placement_offset;
struct parsing_and_err_flags parse_flags;
__le16 packet_length;
__le16 vlan;
struct core_rx_cqe_opaque_data opaque_data;
struct parsing_err_flags err_flags;
__le16 reserved0;
__le32 reserved1[3];
};
/* Core Rx CM offload CQE */
struct core_rx_gsi_offload_cqe {
u8 type;
u8 data_length_error;
struct parsing_and_err_flags parse_flags;
__le16 data_length;
__le16 vlan;
__le32 src_mac_addrhi;
__le16 src_mac_addrlo;
__le16 qp_id;
__le32 gid_dst[4];
};
/* Core RX CQE for Light L2 */
struct core_rx_slow_path_cqe {
u8 type;
u8 ramrod_cmd_id;
__le16 echo;
struct core_rx_cqe_opaque_data opaque_data;
__le32 reserved1[5];
};
/* Core RX CM offload BD for Light L2 */
union core_rx_cqe_union {
struct core_rx_fast_path_cqe rx_cqe_fp;
struct core_rx_gsi_offload_cqe rx_cqe_gsi;
struct core_rx_slow_path_cqe rx_cqe_sp;
};
/* Ramrod data for rx queue start ramrod */
struct core_rx_start_ramrod_data {
struct regpair bd_base;
struct regpair cqe_pbl_addr;
__le16 mtu;
__le16 sb_id;
u8 sb_index;
u8 complete_cqe_flg;
u8 complete_event_flg;
u8 drop_ttl0_flg;
__le16 num_of_pbl_pages;
u8 inner_vlan_removal_en;
u8 queue_id;
u8 main_func_queue;
u8 mf_si_bcast_accept_all;
u8 mf_si_mcast_accept_all;
struct core_rx_action_on_error action_on_error;
u8 gsi_offload_flag;
u8 reserved[7];
};
/* Ramrod data for rx queue stop ramrod */
struct core_rx_stop_ramrod_data {
u8 complete_cqe_flg;
u8 complete_event_flg;
u8 queue_id;
u8 reserved1;
__le16 reserved2[2];
};
/* Flags for Core TX BD */
struct core_tx_bd_data {
__le16 as_bitfield;
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
#define CORE_TX_BD_DATA_START_BD_MASK 0x1
#define CORE_TX_BD_DATA_START_BD_SHIFT 2
#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
#define CORE_TX_BD_DATA_NBDS_MASK 0xF
#define CORE_TX_BD_DATA_NBDS_SHIFT 8
#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
};
/* Core TX BD for Light L2 */
struct core_tx_bd {
struct regpair addr;
__le16 nbytes;
__le16 nw_vlan_or_lb_echo;
struct core_tx_bd_data bd_data;
__le16 bitfield1;
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
#define CORE_TX_BD_TX_DST_MASK 0x3
#define CORE_TX_BD_TX_DST_SHIFT 14
};
/* Light L2 TX Destination */
enum core_tx_dest {
CORE_TX_DEST_NW,
CORE_TX_DEST_LB,
CORE_TX_DEST_RESERVED,
CORE_TX_DEST_DROP,
MAX_CORE_TX_DEST
};
/* Ramrod data for tx queue start ramrod */
struct core_tx_start_ramrod_data {
struct regpair pbl_base_addr;
__le16 mtu;
__le16 sb_id;
u8 sb_index;
u8 stats_en;
u8 stats_id;
u8 conn_type;
__le16 pbl_size;
__le16 qm_pq_id;
u8 gsi_offload_flag;
u8 resrved[3];
};
/* Ramrod data for tx queue stop ramrod */
struct core_tx_stop_ramrod_data {
__le32 reserved0[2];
};
/* Enum flag for what type of dcb data to update */
enum dcb_dscp_update_mode {
DONT_UPDATE_DCB_DSCP,
UPDATE_DCB,
UPDATE_DSCP,
UPDATE_DCB_DSCP,
MAX_DCB_DSCP_UPDATE_MODE
};
/* The core storm context for the Ystorm */ /* The core storm context for the Ystorm */
struct ystorm_core_conn_st_ctx { struct ystorm_core_conn_st_ctx {
__le32 reserved[4]; __le32 reserved[4];
...@@ -369,84 +649,84 @@ struct tstorm_core_conn_ag_ctx { ...@@ -369,84 +649,84 @@ struct tstorm_core_conn_ag_ctx {
u8 byte0; u8 byte0;
u8 byte1; u8 byte1;
u8 flags0; u8 flags0;
#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ #define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 #define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ #define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 #define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ #define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 #define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ #define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 #define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ #define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 #define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ #define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 #define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ #define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 #define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1; u8 flags1;
#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ #define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 #define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ #define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 #define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ #define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 #define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ #define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 #define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2; u8 flags2;
#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ #define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 #define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ #define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 #define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ #define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 #define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ #define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 #define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3; u8 flags3;
#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ #define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 #define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ #define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 #define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ #define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 #define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ #define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 #define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ #define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 #define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ #define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 #define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4; u8 flags4;
#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ #define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 #define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ #define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 #define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ #define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 #define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ #define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 #define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ #define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 #define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ #define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 #define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ #define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 #define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ #define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 #define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5; u8 flags5;
#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ #define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 #define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ #define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 #define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ #define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 #define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ #define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 #define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ #define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 #define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ #define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 #define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ #define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 #define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ #define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 #define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0; __le32 reg0;
__le32 reg1; __le32 reg1;
...@@ -533,286 +813,34 @@ struct ustorm_core_conn_ag_ctx { ...@@ -533,286 +813,34 @@ struct ustorm_core_conn_ag_ctx {
__le32 rx_producers; __le32 rx_producers;
__le32 reg1; __le32 reg1;
__le32 reg2; __le32 reg2;
__le32 reg3; __le32 reg3;
__le16 word2; __le16 word2;
__le16 word3; __le16 word3;
};
/* The core storm context for the Mstorm */
struct mstorm_core_conn_st_ctx {
__le32 reserved[24];
};
/* The core storm context for the Ustorm */
struct ustorm_core_conn_st_ctx {
__le32 reserved[4];
};
/* core connection context */
struct core_conn_context {
struct ystorm_core_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_core_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_core_conn_st_ctx xstorm_st_context;
struct xstorm_core_conn_ag_ctx xstorm_ag_context;
struct tstorm_core_conn_ag_ctx tstorm_ag_context;
struct ustorm_core_conn_ag_ctx ustorm_ag_context;
struct mstorm_core_conn_st_ctx mstorm_st_context;
struct ustorm_core_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
};
enum core_error_handle {
LL2_DROP_PACKET,
LL2_DO_NOTHING,
LL2_ASSERT,
MAX_CORE_ERROR_HANDLE
};
enum core_event_opcode {
CORE_EVENT_TX_QUEUE_START,
CORE_EVENT_TX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_FLUSH,
MAX_CORE_EVENT_OPCODE
};
enum core_l4_pseudo_checksum_mode {
CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
};
struct core_ll2_port_stats {
struct regpair gsi_invalid_hdr;
struct regpair gsi_invalid_pkt_length;
struct regpair gsi_unsupported_pkt_typ;
struct regpair gsi_crcchksm_error;
};
struct core_ll2_pstorm_per_queue_stat {
struct regpair sent_ucast_bytes;
struct regpair sent_mcast_bytes;
struct regpair sent_bcast_bytes;
struct regpair sent_ucast_pkts;
struct regpair sent_mcast_pkts;
struct regpair sent_bcast_pkts;
};
struct core_ll2_rx_prod {
__le16 bd_prod;
__le16 cqe_prod;
__le32 reserved;
};
struct core_ll2_tstorm_per_queue_stat {
struct regpair packet_too_big_discard;
struct regpair no_buff_discard;
};
struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_ucast_bytes;
struct regpair rcv_mcast_bytes;
struct regpair rcv_bcast_bytes;
struct regpair rcv_ucast_pkts;
struct regpair rcv_mcast_pkts;
struct regpair rcv_bcast_pkts;
};
enum core_ramrod_cmd_id {
CORE_RAMROD_UNUSED,
CORE_RAMROD_RX_QUEUE_START,
CORE_RAMROD_TX_QUEUE_START,
CORE_RAMROD_RX_QUEUE_STOP,
CORE_RAMROD_TX_QUEUE_STOP,
CORE_RAMROD_RX_QUEUE_FLUSH,
MAX_CORE_RAMROD_CMD_ID
};
enum core_roce_flavor_type {
CORE_ROCE,
CORE_RROCE,
MAX_CORE_ROCE_FLAVOR_TYPE
};
struct core_rx_action_on_error {
u8 error_type;
#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
};
struct core_rx_bd {
struct regpair addr;
__le16 reserved[4];
};
struct core_rx_bd_with_buff_len {
struct regpair addr;
__le16 buff_length;
__le16 reserved[3];
};
union core_rx_bd_union {
struct core_rx_bd rx_bd;
struct core_rx_bd_with_buff_len rx_bd_with_len;
};
struct core_rx_cqe_opaque_data {
__le32 data[2];
};
enum core_rx_cqe_type {
CORE_RX_CQE_ILLIGAL_TYPE,
CORE_RX_CQE_TYPE_REGULAR,
CORE_RX_CQE_TYPE_GSI_OFFLOAD,
CORE_RX_CQE_TYPE_SLOW_PATH,
MAX_CORE_RX_CQE_TYPE
};
struct core_rx_fast_path_cqe {
u8 type;
u8 placement_offset;
struct parsing_and_err_flags parse_flags;
__le16 packet_length;
__le16 vlan;
struct core_rx_cqe_opaque_data opaque_data;
struct parsing_err_flags err_flags;
__le16 reserved0;
__le32 reserved1[3];
};
struct core_rx_gsi_offload_cqe {
u8 type;
u8 data_length_error;
struct parsing_and_err_flags parse_flags;
__le16 data_length;
__le16 vlan;
__le32 src_mac_addrhi;
__le16 src_mac_addrlo;
__le16 qp_id;
__le32 gid_dst[4];
};
struct core_rx_slow_path_cqe {
u8 type;
u8 ramrod_cmd_id;
__le16 echo;
struct core_rx_cqe_opaque_data opaque_data;
__le32 reserved1[5];
};
union core_rx_cqe_union {
struct core_rx_fast_path_cqe rx_cqe_fp;
struct core_rx_gsi_offload_cqe rx_cqe_gsi;
struct core_rx_slow_path_cqe rx_cqe_sp;
};
struct core_rx_start_ramrod_data {
struct regpair bd_base;
struct regpair cqe_pbl_addr;
__le16 mtu;
__le16 sb_id;
u8 sb_index;
u8 complete_cqe_flg;
u8 complete_event_flg;
u8 drop_ttl0_flg;
__le16 num_of_pbl_pages;
u8 inner_vlan_removal_en;
u8 queue_id;
u8 main_func_queue;
u8 mf_si_bcast_accept_all;
u8 mf_si_mcast_accept_all;
struct core_rx_action_on_error action_on_error;
u8 gsi_offload_flag;
u8 reserved[7];
};
struct core_rx_stop_ramrod_data {
u8 complete_cqe_flg;
u8 complete_event_flg;
u8 queue_id;
u8 reserved1;
__le16 reserved2[2];
};
struct core_tx_bd_data {
__le16 as_bitfield;
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
#define CORE_TX_BD_DATA_START_BD_MASK 0x1
#define CORE_TX_BD_DATA_START_BD_SHIFT 2
#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
#define CORE_TX_BD_DATA_NBDS_MASK 0xF
#define CORE_TX_BD_DATA_NBDS_SHIFT 8
#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
};
struct core_tx_bd {
struct regpair addr;
__le16 nbytes;
__le16 nw_vlan_or_lb_echo;
struct core_tx_bd_data bd_data;
__le16 bitfield1;
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
#define CORE_TX_BD_TX_DST_MASK 0x3
#define CORE_TX_BD_TX_DST_SHIFT 14
};
enum core_tx_dest {
CORE_TX_DEST_NW,
CORE_TX_DEST_LB,
CORE_TX_DEST_RESERVED,
CORE_TX_DEST_DROP,
MAX_CORE_TX_DEST
};
struct core_tx_start_ramrod_data {
struct regpair pbl_base_addr;
__le16 mtu;
__le16 sb_id;
u8 sb_index;
u8 stats_en;
u8 stats_id;
u8 conn_type;
__le16 pbl_size;
__le16 qm_pq_id;
u8 gsi_offload_flag;
u8 resrved[3];
}; };
struct core_tx_stop_ramrod_data { /* The core storm context for the Mstorm */
__le32 reserved0[2]; struct mstorm_core_conn_st_ctx {
__le32 reserved[24];
}; };
enum dcb_dscp_update_mode { /* The core storm context for the Ustorm */
DONT_UPDATE_DCB_DSCP, struct ustorm_core_conn_st_ctx {
UPDATE_DCB, __le32 reserved[4];
UPDATE_DSCP, };
UPDATE_DCB_DSCP,
MAX_DCB_DSCP_UPDATE_MODE /* core connection context */
struct core_conn_context {
struct ystorm_core_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_core_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_core_conn_st_ctx xstorm_st_context;
struct xstorm_core_conn_ag_ctx xstorm_ag_context;
struct tstorm_core_conn_ag_ctx tstorm_ag_context;
struct ustorm_core_conn_ag_ctx ustorm_ag_context;
struct mstorm_core_conn_st_ctx mstorm_st_context;
struct ustorm_core_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
}; };
struct eth_mstorm_per_pf_stat { struct eth_mstorm_per_pf_stat {
...@@ -896,6 +924,49 @@ struct eth_ustorm_per_queue_stat { ...@@ -896,6 +924,49 @@ struct eth_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts; struct regpair rcv_bcast_pkts;
}; };
/* Event Ring VF-PF Channel data */
struct vf_pf_channel_eqe_data {
struct regpair msg_addr;
};
/* Event Ring malicious VF data */
struct malicious_vf_eqe_data {
u8 vf_id;
u8 err_id;
__le16 reserved[3];
};
/* Event Ring initial cleanup data */
struct initial_cleanup_eqe_data {
u8 vf_id;
u8 reserved[7];
};
/* Event Data Union */
union event_ring_data {
u8 bytes[8];
struct vf_pf_channel_eqe_data vf_pf_channel;
struct iscsi_eqe_data iscsi_info;
union rdma_eqe_data rdma_data;
struct malicious_vf_eqe_data malicious_vf;
struct initial_cleanup_eqe_data vf_init_cleanup;
};
/* Event Ring Entry */
struct event_ring_entry {
u8 protocol_id;
u8 opcode;
__le16 reserved0;
__le16 echo;
u8 fw_return_code;
u8 flags;
#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
union event_ring_data data;
};
/* Event Ring Next Page Address */ /* Event Ring Next Page Address */
struct event_ring_next_addr { struct event_ring_next_addr {
struct regpair addr; struct regpair addr;
...@@ -908,6 +979,7 @@ union event_ring_element { ...@@ -908,6 +979,7 @@ union event_ring_element {
struct event_ring_next_addr next_addr; struct event_ring_next_addr next_addr;
}; };
/* Ports mode */
enum fw_flow_ctrl_mode { enum fw_flow_ctrl_mode {
flow_ctrl_pause, flow_ctrl_pause,
flow_ctrl_pfc, flow_ctrl_pfc,
...@@ -928,7 +1000,7 @@ enum iwarp_ll2_tx_queues { ...@@ -928,7 +1000,7 @@ enum iwarp_ll2_tx_queues {
MAX_IWARP_LL2_TX_QUEUES MAX_IWARP_LL2_TX_QUEUES
}; };
/* Mstorm non-triggering VF zone */ /* Malicious VF error ID */
enum malicious_vf_error_id { enum malicious_vf_error_id {
MALICIOUS_VF_NO_ERROR, MALICIOUS_VF_NO_ERROR,
VF_PF_CHANNEL_NOT_READY, VF_PF_CHANNEL_NOT_READY,
...@@ -954,6 +1026,7 @@ enum malicious_vf_error_id { ...@@ -954,6 +1026,7 @@ enum malicious_vf_error_id {
MAX_MALICIOUS_VF_ERROR_ID MAX_MALICIOUS_VF_ERROR_ID
}; };
/* Mstorm non-triggering VF zone */
struct mstorm_non_trigger_vf_zone { struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat; struct eth_mstorm_per_queue_stat eth_queue_stat;
struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD]; struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
...@@ -962,7 +1035,6 @@ struct mstorm_non_trigger_vf_zone { ...@@ -962,7 +1035,6 @@ struct mstorm_non_trigger_vf_zone {
/* Mstorm VF zone */ /* Mstorm VF zone */
struct mstorm_vf_zone { struct mstorm_vf_zone {
struct mstorm_non_trigger_vf_zone non_trigger; struct mstorm_non_trigger_vf_zone non_trigger;
}; };
/* personality per PF */ /* personality per PF */
...@@ -974,7 +1046,7 @@ enum personality_type { ...@@ -974,7 +1046,7 @@ enum personality_type {
PERSONALITY_RDMA, PERSONALITY_RDMA,
PERSONALITY_CORE, PERSONALITY_CORE,
PERSONALITY_ETH, PERSONALITY_ETH,
PERSONALITY_RESERVED4, PERSONALITY_RESERVED,
MAX_PERSONALITY_TYPE MAX_PERSONALITY_TYPE
}; };
...@@ -1017,6 +1089,7 @@ struct pf_start_ramrod_data { ...@@ -1017,6 +1089,7 @@ struct pf_start_ramrod_data {
struct hsi_fp_ver_struct hsi_fp_ver; struct hsi_fp_ver_struct hsi_fp_ver;
}; };
/* Data for port update ramrod */
struct protocol_dcb_data { struct protocol_dcb_data {
u8 dcb_enable_flag; u8 dcb_enable_flag;
u8 reserved_a; u8 reserved_a;
...@@ -1026,6 +1099,7 @@ struct protocol_dcb_data { ...@@ -1026,6 +1099,7 @@ struct protocol_dcb_data {
u8 reserved0; u8 reserved0;
}; };
/* Update tunnel configuration */
struct pf_update_tunnel_config { struct pf_update_tunnel_config {
u8 update_rx_pf_clss; u8 update_rx_pf_clss;
u8 update_rx_def_ucast_clss; u8 update_rx_def_ucast_clss;
...@@ -1042,6 +1116,7 @@ struct pf_update_tunnel_config { ...@@ -1042,6 +1116,7 @@ struct pf_update_tunnel_config {
__le16 reserved; __le16 reserved;
}; };
/* Data for port update ramrod */
struct pf_update_ramrod_data { struct pf_update_ramrod_data {
u8 pf_id; u8 pf_id;
u8 update_eth_dcb_data_mode; u8 update_eth_dcb_data_mode;
...@@ -1079,11 +1154,13 @@ enum protocol_version_array_key { ...@@ -1079,11 +1154,13 @@ enum protocol_version_array_key {
MAX_PROTOCOL_VERSION_ARRAY_KEY MAX_PROTOCOL_VERSION_ARRAY_KEY
}; };
/* RDMA TX Stats */
struct rdma_sent_stats { struct rdma_sent_stats {
struct regpair sent_bytes; struct regpair sent_bytes;
struct regpair sent_pkts; struct regpair sent_pkts;
}; };
/* Pstorm non-triggering VF zone */
struct pstorm_non_trigger_vf_zone { struct pstorm_non_trigger_vf_zone {
struct eth_pstorm_per_queue_stat eth_queue_stat; struct eth_pstorm_per_queue_stat eth_queue_stat;
struct rdma_sent_stats rdma_stats; struct rdma_sent_stats rdma_stats;
...@@ -1103,11 +1180,13 @@ struct ramrod_header { ...@@ -1103,11 +1180,13 @@ struct ramrod_header {
__le16 echo; __le16 echo;
}; };
/* RDMA RX Stats */
struct rdma_rcv_stats { struct rdma_rcv_stats {
struct regpair rcv_bytes; struct regpair rcv_bytes;
struct regpair rcv_pkts; struct regpair rcv_pkts;
}; };
/* Slowpath Element (SPQE) */
struct slow_path_element { struct slow_path_element {
struct ramrod_header hdr; struct ramrod_header hdr;
struct regpair data_ptr; struct regpair data_ptr;
...@@ -1197,6 +1276,7 @@ struct vf_stop_ramrod_data { ...@@ -1197,6 +1276,7 @@ struct vf_stop_ramrod_data {
__le32 reserved2; __le32 reserved2;
}; };
/* VF zone size mode */
enum vf_zone_size_mode { enum vf_zone_size_mode {
VF_ZONE_SIZE_MODE_DEFAULT, VF_ZONE_SIZE_MODE_DEFAULT,
VF_ZONE_SIZE_MODE_DOUBLE, VF_ZONE_SIZE_MODE_DOUBLE,
...@@ -1204,6 +1284,7 @@ enum vf_zone_size_mode { ...@@ -1204,6 +1284,7 @@ enum vf_zone_size_mode {
MAX_VF_ZONE_SIZE_MODE MAX_VF_ZONE_SIZE_MODE
}; };
/* Attentions status block */
struct atten_status_block { struct atten_status_block {
__le32 atten_bits; __le32 atten_bits;
__le32 atten_ack; __le32 atten_ack;
...@@ -1212,12 +1293,6 @@ struct atten_status_block { ...@@ -1212,12 +1293,6 @@ struct atten_status_block {
__le32 reserved1; __le32 reserved1;
}; };
enum command_type_bit {
IGU_COMMAND_TYPE_NOP = 0,
IGU_COMMAND_TYPE_SET = 1,
MAX_COMMAND_TYPE_BIT
};
/* DMAE command */ /* DMAE command */
struct dmae_cmd { struct dmae_cmd {
__le32 opcode; __le32 opcode;
...@@ -1841,7 +1916,7 @@ struct dbg_attn_block_result { ...@@ -1841,7 +1916,7 @@ struct dbg_attn_block_result {
struct dbg_attn_reg_result reg_results[15]; struct dbg_attn_reg_result reg_results[15];
}; };
/* mode header */ /* Mode header */
struct dbg_mode_hdr { struct dbg_mode_hdr {
__le16 data; __le16 data;
#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 #define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
...@@ -1863,25 +1938,28 @@ struct dbg_attn_reg { ...@@ -1863,25 +1938,28 @@ struct dbg_attn_reg {
__le32 mask_address; __le32 mask_address;
}; };
/* attention types */ /* Attention types */
enum dbg_attn_type { enum dbg_attn_type {
ATTN_TYPE_INTERRUPT, ATTN_TYPE_INTERRUPT,
ATTN_TYPE_PARITY, ATTN_TYPE_PARITY,
MAX_DBG_ATTN_TYPE MAX_DBG_ATTN_TYPE
}; };
/* Debug Bus block data */
struct dbg_bus_block { struct dbg_bus_block {
u8 num_of_lines; u8 num_of_lines;
u8 has_latency_events; u8 has_latency_events;
__le16 lines_offset; __le16 lines_offset;
}; };
/* Debug Bus block user data */
struct dbg_bus_block_user_data { struct dbg_bus_block_user_data {
u8 num_of_lines; u8 num_of_lines;
u8 has_latency_events; u8 has_latency_events;
__le16 names_offset; __le16 names_offset;
}; };
/* Block Debug line data */
struct dbg_bus_line { struct dbg_bus_line {
u8 data; u8 data;
#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF #define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
...@@ -1893,14 +1971,14 @@ struct dbg_bus_line { ...@@ -1893,14 +1971,14 @@ struct dbg_bus_line {
u8 group_sizes; u8 group_sizes;
}; };
/* condition header for registers dump */ /* Condition header for registers dump */
struct dbg_dump_cond_hdr { struct dbg_dump_cond_hdr {
struct dbg_mode_hdr mode; /* Mode header */ struct dbg_mode_hdr mode; /* Mode header */
u8 block_id; /* block ID */ u8 block_id; /* block ID */
u8 data_size; /* size in dwords of the data following this header */ u8 data_size; /* size in dwords of the data following this header */
}; };
/* memory data for registers dump */ /* Memory data for registers dump */
struct dbg_dump_mem { struct dbg_dump_mem {
__le32 dword0; __le32 dword0;
#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF #define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
...@@ -1916,18 +1994,18 @@ struct dbg_dump_mem { ...@@ -1916,18 +1994,18 @@ struct dbg_dump_mem {
#define DBG_DUMP_MEM_RESERVED_SHIFT 25 #define DBG_DUMP_MEM_RESERVED_SHIFT 25
}; };
/* register data for registers dump */ /* Register data for registers dump */
struct dbg_dump_reg { struct dbg_dump_reg {
__le32 data; __le32 data;
#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */ #define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_DUMP_REG_ADDRESS_SHIFT 0 #define DBG_DUMP_REG_ADDRESS_SHIFT 0
#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 /* indicates register is wide-bus */ #define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23 #define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */ #define DBG_DUMP_REG_LENGTH_MASK 0xFF
#define DBG_DUMP_REG_LENGTH_SHIFT 24 #define DBG_DUMP_REG_LENGTH_SHIFT 24
}; };
/* split header for registers dump */ /* Split header for registers dump */
struct dbg_dump_split_hdr { struct dbg_dump_split_hdr {
__le32 hdr; __le32 hdr;
#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF #define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
...@@ -1936,7 +2014,7 @@ struct dbg_dump_split_hdr { ...@@ -1936,7 +2014,7 @@ struct dbg_dump_split_hdr {
#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24 #define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
}; };
/* condition header for idle check */ /* Condition header for idle check */
struct dbg_idle_chk_cond_hdr { struct dbg_idle_chk_cond_hdr {
struct dbg_mode_hdr mode; /* Mode header */ struct dbg_mode_hdr mode; /* Mode header */
__le16 data_size; /* size in dwords of the data following this header */ __le16 data_size; /* size in dwords of the data following this header */
...@@ -2022,7 +2100,7 @@ struct dbg_idle_chk_rule_parsing_data { ...@@ -2022,7 +2100,7 @@ struct dbg_idle_chk_rule_parsing_data {
#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1 #define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
}; };
/* idle check severity types */ /* Idle check severity types */
enum dbg_idle_chk_severity_types { enum dbg_idle_chk_severity_types {
/* idle check failure should cause an error */ /* idle check failure should cause an error */
IDLE_CHK_SEVERITY_ERROR, IDLE_CHK_SEVERITY_ERROR,
...@@ -2072,6 +2150,7 @@ enum dbg_bus_clients { ...@@ -2072,6 +2150,7 @@ enum dbg_bus_clients {
MAX_DBG_BUS_CLIENTS MAX_DBG_BUS_CLIENTS
}; };
/* Debug Bus constraint operation types */
enum dbg_bus_constraint_ops { enum dbg_bus_constraint_ops {
DBG_BUS_CONSTRAINT_OP_EQ, DBG_BUS_CONSTRAINT_OP_EQ,
DBG_BUS_CONSTRAINT_OP_NE, DBG_BUS_CONSTRAINT_OP_NE,
...@@ -2086,6 +2165,7 @@ enum dbg_bus_constraint_ops { ...@@ -2086,6 +2165,7 @@ enum dbg_bus_constraint_ops {
MAX_DBG_BUS_CONSTRAINT_OPS MAX_DBG_BUS_CONSTRAINT_OPS
}; };
/* Debug Bus trigger state data */
struct dbg_bus_trigger_state_data { struct dbg_bus_trigger_state_data {
u8 data; u8 data;
#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF #define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
...@@ -2165,6 +2245,7 @@ struct dbg_bus_data { ...@@ -2165,6 +2245,7 @@ struct dbg_bus_data {
struct dbg_bus_storm_data storms[6]; struct dbg_bus_storm_data storms[6];
}; };
/* Debug bus filter types */
enum dbg_bus_filter_types { enum dbg_bus_filter_types {
DBG_BUS_FILTER_TYPE_OFF, DBG_BUS_FILTER_TYPE_OFF,
DBG_BUS_FILTER_TYPE_PRE, DBG_BUS_FILTER_TYPE_PRE,
...@@ -2181,6 +2262,7 @@ enum dbg_bus_frame_modes { ...@@ -2181,6 +2262,7 @@ enum dbg_bus_frame_modes {
MAX_DBG_BUS_FRAME_MODES MAX_DBG_BUS_FRAME_MODES
}; };
/* Debug bus other engine mode */
enum dbg_bus_other_engine_modes { enum dbg_bus_other_engine_modes {
DBG_BUS_OTHER_ENGINE_MODE_NONE, DBG_BUS_OTHER_ENGINE_MODE_NONE,
DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX, DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
...@@ -2190,12 +2272,14 @@ enum dbg_bus_other_engine_modes { ...@@ -2190,12 +2272,14 @@ enum dbg_bus_other_engine_modes {
MAX_DBG_BUS_OTHER_ENGINE_MODES MAX_DBG_BUS_OTHER_ENGINE_MODES
}; };
/* Debug bus post-trigger recording types */
enum dbg_bus_post_trigger_types { enum dbg_bus_post_trigger_types {
DBG_BUS_POST_TRIGGER_RECORD, DBG_BUS_POST_TRIGGER_RECORD,
DBG_BUS_POST_TRIGGER_DROP, DBG_BUS_POST_TRIGGER_DROP,
MAX_DBG_BUS_POST_TRIGGER_TYPES MAX_DBG_BUS_POST_TRIGGER_TYPES
}; };
/* Debug bus pre-trigger recording types */
enum dbg_bus_pre_trigger_types { enum dbg_bus_pre_trigger_types {
DBG_BUS_PRE_TRIGGER_START_FROM_ZERO, DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
...@@ -2203,11 +2287,10 @@ enum dbg_bus_pre_trigger_types { ...@@ -2203,11 +2287,10 @@ enum dbg_bus_pre_trigger_types {
MAX_DBG_BUS_PRE_TRIGGER_TYPES MAX_DBG_BUS_PRE_TRIGGER_TYPES
}; };
/* Debug bus SEMI frame modes */
enum dbg_bus_semi_frame_modes { enum dbg_bus_semi_frame_modes {
DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
0, DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST =
3,
MAX_DBG_BUS_SEMI_FRAME_MODES MAX_DBG_BUS_SEMI_FRAME_MODES
}; };
...@@ -2220,6 +2303,7 @@ enum dbg_bus_states { ...@@ -2220,6 +2303,7 @@ enum dbg_bus_states {
MAX_DBG_BUS_STATES MAX_DBG_BUS_STATES
}; };
/* Debug Bus Storm modes */
enum dbg_bus_storm_modes { enum dbg_bus_storm_modes {
DBG_BUS_STORM_MODE_PRINTF, DBG_BUS_STORM_MODE_PRINTF,
DBG_BUS_STORM_MODE_PRAM_ADDR, DBG_BUS_STORM_MODE_PRAM_ADDR,
...@@ -2406,6 +2490,7 @@ struct dbg_tools_data { ...@@ -2406,6 +2490,7 @@ struct dbg_tools_data {
/* Number of VLAN priorities */ /* Number of VLAN priorities */
#define NUM_OF_VLAN_PRIORITIES 8 #define NUM_OF_VLAN_PRIORITIES 8
/* BRB RAM init requirements */
struct init_brb_ram_req { struct init_brb_ram_req {
__le32 guranteed_per_tc; __le32 guranteed_per_tc;
__le32 headroom_per_tc; __le32 headroom_per_tc;
...@@ -2414,17 +2499,20 @@ struct init_brb_ram_req { ...@@ -2414,17 +2499,20 @@ struct init_brb_ram_req {
u8 num_active_tcs[MAX_NUM_PORTS]; u8 num_active_tcs[MAX_NUM_PORTS];
}; };
/* ETS per-TC init requirements */
struct init_ets_tc_req { struct init_ets_tc_req {
u8 use_sp; u8 use_sp;
u8 use_wfq; u8 use_wfq;
__le16 weight; __le16 weight;
}; };
/* ETS init requirements */
struct init_ets_req { struct init_ets_req {
__le32 mtu; __le32 mtu;
struct init_ets_tc_req tc_req[NUM_OF_TCS]; struct init_ets_tc_req tc_req[NUM_OF_TCS];
}; };
/* NIG LB RL init requirements */
struct init_nig_lb_rl_req { struct init_nig_lb_rl_req {
__le16 lb_mac_rate; __le16 lb_mac_rate;
__le16 lb_rate; __le16 lb_rate;
...@@ -2432,15 +2520,18 @@ struct init_nig_lb_rl_req { ...@@ -2432,15 +2520,18 @@ struct init_nig_lb_rl_req {
__le16 tc_rate[NUM_OF_PHYS_TCS]; __le16 tc_rate[NUM_OF_PHYS_TCS];
}; };
/* NIG TC mapping for each priority */
struct init_nig_pri_tc_map_entry { struct init_nig_pri_tc_map_entry {
u8 tc_id; u8 tc_id;
u8 valid; u8 valid;
}; };
/* NIG priority to TC map init requirements */
struct init_nig_pri_tc_map_req { struct init_nig_pri_tc_map_req {
struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES]; struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
}; };
/* QM per-port init parameters */
struct init_qm_port_params { struct init_qm_port_params {
u8 active; u8 active;
u8 active_phys_tcs; u8 active_phys_tcs;
...@@ -2563,7 +2654,7 @@ struct bin_buffer_hdr { ...@@ -2563,7 +2654,7 @@ struct bin_buffer_hdr {
__le32 length; __le32 length;
}; };
/* binary init buffer types */ /* Binary init buffer types */
enum bin_init_buffer_type { enum bin_init_buffer_type {
BIN_BUF_INIT_FW_VER_INFO, BIN_BUF_INIT_FW_VER_INFO,
BIN_BUF_INIT_CMD, BIN_BUF_INIT_CMD,
...@@ -2793,6 +2884,7 @@ struct iro { ...@@ -2793,6 +2884,7 @@ struct iro {
}; };
/***************************** Public Functions *******************************/ /***************************** Public Functions *******************************/
/** /**
* @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
* arrays. * arrays.
...@@ -3119,6 +3211,7 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, ...@@ -3119,6 +3211,7 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
#define MAX_NAME_LEN 16 #define MAX_NAME_LEN 16
/***************************** Public Functions *******************************/ /***************************** Public Functions *******************************/
/** /**
* @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
* debug arrays. * debug arrays.
...@@ -3820,115 +3913,194 @@ void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -3820,115 +3913,194 @@ void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 pf_id, bool tcp, bool udp, u16 pf_id, bool tcp, bool udp,
bool ipv4, bool ipv6); bool ipv4, bool ipv6);
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) #define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) #define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
/* Tstorm port statistics */
#define TSTORM_PORT_STAT_OFFSET(port_id) \ #define TSTORM_PORT_STAT_OFFSET(port_id) \
(IRO[1].base + ((port_id) * IRO[1].m1)) (IRO[1].base + ((port_id) * IRO[1].m1))
#define TSTORM_PORT_STAT_SIZE (IRO[1].size) #define TSTORM_PORT_STAT_SIZE (IRO[1].size)
/* Tstorm ll2 port statistics */
#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ #define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
(IRO[2].base + ((port_id) * IRO[2].m1)) (IRO[2].base + ((port_id) * IRO[2].m1))
#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size) #define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
/* Ustorm VF-PF Channel ready flag */
#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ #define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
(IRO[3].base + ((vf_id) * IRO[3].m1)) (IRO[3].base + ((vf_id) * IRO[3].m1))
#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) #define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
/* Ustorm Final flr cleanup ack */
#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \ #define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
(IRO[4].base + (pf_id) * IRO[4].m1) (IRO[4].base + ((pf_id) * IRO[4].m1))
#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) #define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
/* Ustorm Event ring consumer */
#define USTORM_EQE_CONS_OFFSET(pf_id) \ #define USTORM_EQE_CONS_OFFSET(pf_id) \
(IRO[5].base + ((pf_id) * IRO[5].m1)) (IRO[5].base + ((pf_id) * IRO[5].m1))
#define USTORM_EQE_CONS_SIZE (IRO[5].size) #define USTORM_EQE_CONS_SIZE (IRO[5].size)
/* Ustorm eth queue zone */
#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \ #define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
(IRO[6].base + ((queue_zone_id) * IRO[6].m1)) (IRO[6].base + ((queue_zone_id) * IRO[6].m1))
#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size) #define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
/* Ustorm Common Queue ring consumer */
#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \ #define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
(IRO[7].base + ((queue_zone_id) * IRO[7].m1)) (IRO[7].base + ((queue_zone_id) * IRO[7].m1))
#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size) #define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
/* Tstorm producers */
#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \ #define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
(IRO[14].base + ((core_rx_queue_id) * IRO[14].m1)) (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size) #define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
/* Tstorm LightL2 queue statistics */
#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ #define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
(IRO[15].base + ((core_rx_queue_id) * IRO[15].m1)) (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) #define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
/* Ustorm LiteL2 queue statistics */
#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ #define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
(IRO[16].base + ((core_rx_queue_id) * IRO[16].m1)) (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size) #define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
/* Pstorm LiteL2 queue statistics */
#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ #define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
(IRO[17].base + ((core_tx_stats_id) * IRO[17].m1)) (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17]. size) #define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17].size)
/* Mstorm queue statistics */
#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ #define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[18].base + ((stat_counter_id) * IRO[18].m1)) (IRO[18].base + ((stat_counter_id) * IRO[18].m1))
#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size) #define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
/* Mstorm ETH PF queues producers */
#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \ #define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
(IRO[19].base + ((queue_id) * IRO[19].m1)) (IRO[19].base + ((queue_id) * IRO[19].m1))
#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size) #define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
* mode.
*/
#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \ #define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
(IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2)) (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size) #define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
/* TPA agregation timeout in us resolution (on ASIC) */
#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base) #define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size) #define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
/* Mstorm pf statistics */
#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ #define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
(IRO[22].base + ((pf_id) * IRO[22].m1)) (IRO[22].base + ((pf_id) * IRO[22].m1))
#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size) #define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
/* Ustorm queue statistics */
#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ #define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[23].base + ((stat_counter_id) * IRO[23].m1)) (IRO[23].base + ((stat_counter_id) * IRO[23].m1))
#define USTORM_QUEUE_STAT_SIZE (IRO[23].size) #define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
/* Ustorm pf statistics */
#define USTORM_ETH_PF_STAT_OFFSET(pf_id)\
(IRO[24].base + ((pf_id) * IRO[24].m1)) (IRO[24].base + ((pf_id) * IRO[24].m1))
#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size) #define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
/* Pstorm queue statistics */
#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ #define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[25].base + ((stat_counter_id) * IRO[25].m1)) (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size) #define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
/* Pstorm pf statistics */
#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ #define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
(IRO[26].base + ((pf_id) * IRO[26].m1)) (IRO[26].base + ((pf_id) * IRO[26].m1))
#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size) #define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
(IRO[27].base + ((ethtype) * IRO[27].m1)) /* Control frame's EthType configuration for TX control frame security */
#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
(IRO[27].base + ((eth_type_id) * IRO[27].m1))
#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size) #define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
/* Tstorm last parser message */
#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base) #define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size) #define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
/* Tstorm Eth limit Rx rate */
#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ #define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
(IRO[29].base + ((pf_id) * IRO[29].m1)) (IRO[29].base + ((pf_id) * IRO[29].m1))
#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size) #define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
/* Xstorm queue zone */
#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ #define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
(IRO[30].base + ((queue_id) * IRO[30].m1)) (IRO[30].base + ((queue_id) * IRO[30].m1))
#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size) #define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
/* Tstorm cmdq-cons of given command queue-id */
#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ #define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
(IRO[34].base + ((cmdq_queue_id) * IRO[34].m1)) (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size) #define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
* BDqueue-id.
*/
#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ #define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
(IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2)) (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size) #define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ #define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
(IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) #define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
/* Tstorm iSCSI RX stats */
#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ #define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
(IRO[37].base + ((pf_id) * IRO[37].m1)) (IRO[37].base + ((pf_id) * IRO[37].m1))
#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size) #define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
/* Mstorm iSCSI RX stats */
#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ #define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
(IRO[38].base + ((pf_id) * IRO[38].m1)) (IRO[38].base + ((pf_id) * IRO[38].m1))
#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) #define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
/* Ustorm iSCSI RX stats */
#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ #define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
(IRO[39].base + ((pf_id) * IRO[39].m1)) (IRO[39].base + ((pf_id) * IRO[39].m1))
#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) #define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
/* Xstorm iSCSI TX stats */
#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ #define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
(IRO[40].base + ((pf_id) * IRO[40].m1)) (IRO[40].base + ((pf_id) * IRO[40].m1))
#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size) #define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
/* Ystorm iSCSI TX stats */
#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ #define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
(IRO[41].base + ((pf_id) * IRO[41].m1)) (IRO[41].base + ((pf_id) * IRO[41].m1))
#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) #define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
/* Pstorm iSCSI TX stats */
#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ #define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
(IRO[42].base + ((pf_id) * IRO[42].m1)) (IRO[42].base + ((pf_id) * IRO[42].m1))
#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) #define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
/* Tstorm FCoE RX stats */
#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
(IRO[43].base + ((pf_id) * IRO[43].m1))
/* Pstorm FCoE TX stats */
#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
(IRO[44].base + ((pf_id) * IRO[44].m1))
/* Pstorm RDMA queue statistics */
#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ #define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
(IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1)) (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size) #define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
/* Tstorm RDMA queue statistics */
#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ #define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
(IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) #define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
(IRO[43].base + ((pf_id) * IRO[43].m1))
#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
(IRO[44].base + ((pf_id) * IRO[44].m1))
static const struct iro iro_arr[49] = { static const struct iro iro_arr[49] = {
{0x0, 0x0, 0x0, 0x0, 0x8}, {0x0, 0x0, 0x0, 0x0, 0x8},
...@@ -4946,6 +5118,7 @@ struct eth_conn_context { ...@@ -4946,6 +5118,7 @@ struct eth_conn_context {
struct mstorm_eth_conn_st_ctx mstorm_st_context; struct mstorm_eth_conn_st_ctx mstorm_st_context;
}; };
/* Ethernet filter types: mac/vlan/pair */
enum eth_error_code { enum eth_error_code {
ETH_OK = 0x00, ETH_OK = 0x00,
ETH_FILTERS_MAC_ADD_FAIL_FULL, ETH_FILTERS_MAC_ADD_FAIL_FULL,
...@@ -4972,6 +5145,7 @@ enum eth_error_code { ...@@ -4972,6 +5145,7 @@ enum eth_error_code {
MAX_ETH_ERROR_CODE MAX_ETH_ERROR_CODE
}; };
/* Opcodes for the event ring */
enum eth_event_opcode { enum eth_event_opcode {
ETH_EVENT_UNUSED, ETH_EVENT_UNUSED,
ETH_EVENT_VPORT_START, ETH_EVENT_VPORT_START,
...@@ -5039,6 +5213,7 @@ enum eth_filter_type { ...@@ -5039,6 +5213,7 @@ enum eth_filter_type {
MAX_ETH_FILTER_TYPE MAX_ETH_FILTER_TYPE
}; };
/* Eth IPv4 Fragment Type */
enum eth_ipv4_frag_type { enum eth_ipv4_frag_type {
ETH_IPV4_NOT_FRAG, ETH_IPV4_NOT_FRAG,
ETH_IPV4_FIRST_FRAG, ETH_IPV4_FIRST_FRAG,
...@@ -5046,12 +5221,14 @@ enum eth_ipv4_frag_type { ...@@ -5046,12 +5221,14 @@ enum eth_ipv4_frag_type {
MAX_ETH_IPV4_FRAG_TYPE MAX_ETH_IPV4_FRAG_TYPE
}; };
/* eth IPv4 Fragment Type */
enum eth_ip_type { enum eth_ip_type {
ETH_IPV4, ETH_IPV4,
ETH_IPV6, ETH_IPV6,
MAX_ETH_IP_TYPE MAX_ETH_IP_TYPE
}; };
/* Ethernet Ramrod Command IDs */
enum eth_ramrod_cmd_id { enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED, ETH_RAMROD_UNUSED,
ETH_RAMROD_VPORT_START, ETH_RAMROD_VPORT_START,
...@@ -5073,7 +5250,7 @@ enum eth_ramrod_cmd_id { ...@@ -5073,7 +5250,7 @@ enum eth_ramrod_cmd_id {
MAX_ETH_RAMROD_CMD_ID MAX_ETH_RAMROD_CMD_ID
}; };
/* return code from eth sp ramrods */ /* Return code from eth sp ramrods */
struct eth_return_code { struct eth_return_code {
u8 value; u8 value;
#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F #define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F
...@@ -5209,6 +5386,7 @@ struct eth_vport_tx_mode { ...@@ -5209,6 +5386,7 @@ struct eth_vport_tx_mode {
__le16 reserved2[3]; __le16 reserved2[3];
}; };
/* GFT filter update action type */
enum gft_filter_update_action { enum gft_filter_update_action {
GFT_ADD_FILTER, GFT_ADD_FILTER,
GFT_DELETE_FILTER, GFT_DELETE_FILTER,
...@@ -5221,6 +5399,7 @@ enum gft_logic_filter_type { ...@@ -5221,6 +5399,7 @@ enum gft_logic_filter_type {
MAX_GFT_LOGIC_FILTER_TYPE MAX_GFT_LOGIC_FILTER_TYPE
}; };
/* Ramrod data for rx add openflow filter */
struct rx_add_openflow_filter_data { struct rx_add_openflow_filter_data {
__le16 action_icid; __le16 action_icid;
u8 priority; u8 priority;
...@@ -5244,11 +5423,13 @@ struct rx_add_openflow_filter_data { ...@@ -5244,11 +5423,13 @@ struct rx_add_openflow_filter_data {
__le16 l4_src_port; __le16 l4_src_port;
}; };
/* Ramrod data for rx create gft action */
struct rx_create_gft_action_data { struct rx_create_gft_action_data {
u8 vport_id; u8 vport_id;
u8 reserved[7]; u8 reserved[7];
}; };
/* Ramrod data for rx create openflow action */
struct rx_create_openflow_action_data { struct rx_create_openflow_action_data {
u8 vport_id; u8 vport_id;
u8 reserved[7]; u8 reserved[7];
...@@ -5286,7 +5467,7 @@ struct rx_queue_start_ramrod_data { ...@@ -5286,7 +5467,7 @@ struct rx_queue_start_ramrod_data {
struct regpair reserved2; struct regpair reserved2;
}; };
/* Ramrod data for rx queue start ramrod */ /* Ramrod data for rx queue stop ramrod */
struct rx_queue_stop_ramrod_data { struct rx_queue_stop_ramrod_data {
__le16 rx_queue_id; __le16 rx_queue_id;
u8 complete_cqe_flg; u8 complete_cqe_flg;
...@@ -5324,6 +5505,9 @@ struct rx_udp_filter_data { ...@@ -5324,6 +5505,9 @@ struct rx_udp_filter_data {
__le32 tenant_id; __le32 tenant_id;
}; };
/* Add or delete GFT filter - filter is packet header of type of packet wished
* to pass certain FW flow.
*/
struct rx_update_gft_filter_data { struct rx_update_gft_filter_data {
struct regpair pkt_hdr_addr; struct regpair pkt_hdr_addr;
__le16 pkt_hdr_length; __le16 pkt_hdr_length;
...@@ -5963,6 +6147,7 @@ struct xstorm_eth_hw_conn_ag_ctx { ...@@ -5963,6 +6147,7 @@ struct xstorm_eth_hw_conn_ag_ctx {
__le16 conn_dpi; __le16 conn_dpi;
}; };
/* GFT CAM line struct */
struct gft_cam_line { struct gft_cam_line {
__le32 camline; __le32 camline;
#define GFT_CAM_LINE_VALID_MASK 0x1 #define GFT_CAM_LINE_VALID_MASK 0x1
...@@ -5975,6 +6160,7 @@ struct gft_cam_line { ...@@ -5975,6 +6160,7 @@ struct gft_cam_line {
#define GFT_CAM_LINE_RESERVED1_SHIFT 29 #define GFT_CAM_LINE_RESERVED1_SHIFT 29
}; };
/* GFT CAM line struct with fields breakout */
struct gft_cam_line_mapped { struct gft_cam_line_mapped {
__le32 camline; __le32 camline;
#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1 #define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
...@@ -6008,12 +6194,14 @@ union gft_cam_line_union { ...@@ -6008,12 +6194,14 @@ union gft_cam_line_union {
struct gft_cam_line_mapped cam_line_mapped; struct gft_cam_line_mapped cam_line_mapped;
}; };
/* Used in gft_profile_key: Indication for ip version */
enum gft_profile_ip_version { enum gft_profile_ip_version {
GFT_PROFILE_IPV4 = 0, GFT_PROFILE_IPV4 = 0,
GFT_PROFILE_IPV6 = 1, GFT_PROFILE_IPV6 = 1,
MAX_GFT_PROFILE_IP_VERSION MAX_GFT_PROFILE_IP_VERSION
}; };
/* Profile key stucr fot GFT logic in Prs */
struct gft_profile_key { struct gft_profile_key {
__le16 profile_key; __le16 profile_key;
#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1 #define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1
...@@ -6030,6 +6218,7 @@ struct gft_profile_key { ...@@ -6030,6 +6218,7 @@ struct gft_profile_key {
#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14 #define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
}; };
/* Used in gft_profile_key: Indication for tunnel type */
enum gft_profile_tunnel_type { enum gft_profile_tunnel_type {
GFT_PROFILE_NO_TUNNEL = 0, GFT_PROFILE_NO_TUNNEL = 0,
GFT_PROFILE_VXLAN_TUNNEL = 1, GFT_PROFILE_VXLAN_TUNNEL = 1,
...@@ -6040,6 +6229,7 @@ enum gft_profile_tunnel_type { ...@@ -6040,6 +6229,7 @@ enum gft_profile_tunnel_type {
MAX_GFT_PROFILE_TUNNEL_TYPE MAX_GFT_PROFILE_TUNNEL_TYPE
}; };
/* Used in gft_profile_key: Indication for protocol type */
enum gft_profile_upper_protocol_type { enum gft_profile_upper_protocol_type {
GFT_PROFILE_ROCE_PROTOCOL = 0, GFT_PROFILE_ROCE_PROTOCOL = 0,
GFT_PROFILE_RROCE_PROTOCOL = 1, GFT_PROFILE_RROCE_PROTOCOL = 1,
...@@ -6060,6 +6250,7 @@ enum gft_profile_upper_protocol_type { ...@@ -6060,6 +6250,7 @@ enum gft_profile_upper_protocol_type {
MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
}; };
/* GFT RAM line struct */
struct gft_ram_line { struct gft_ram_line {
__le32 lo; __le32 lo;
#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3 #define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
...@@ -6149,6 +6340,7 @@ struct gft_ram_line { ...@@ -6149,6 +6340,7 @@ struct gft_ram_line {
#define GFT_RAM_LINE_RESERVED1_SHIFT 10 #define GFT_RAM_LINE_RESERVED1_SHIFT 10
}; };
/* Used in the first 2 bits for gft_ram_line: Indication for vlan mask */
enum gft_vlan_select { enum gft_vlan_select {
INNER_PROVIDER_VLAN = 0, INNER_PROVIDER_VLAN = 0,
INNER_VLAN = 1, INNER_VLAN = 1,
...@@ -6157,10 +6349,205 @@ enum gft_vlan_select { ...@@ -6157,10 +6349,205 @@ enum gft_vlan_select {
MAX_GFT_VLAN_SELECT MAX_GFT_VLAN_SELECT
}; };
struct mstorm_rdma_task_st_ctx { /* The rdma task context of Mstorm */
struct regpair temp[4]; struct ystorm_rdma_task_st_ctx {
struct regpair temp[4];
};
struct ystorm_rdma_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 msem_ctx_upd_seq;
u8 flags0;
#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
__le32 mw_cnt;
u8 ref_cnt_seq;
u8 ctx_upd_seq;
__le16 dif_flags;
__le16 tx_ref_count;
__le16 last_used_ltid;
__le16 parent_mr_lo;
__le16 parent_mr_hi;
__le32 fbo_lo;
__le32 fbo_hi;
};
struct mstorm_rdma_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
__le32 mw_cnt;
u8 ref_cnt_seq;
u8 ctx_upd_seq;
__le16 dif_flags;
__le16 tx_ref_count;
__le16 last_used_ltid;
__le16 parent_mr_lo;
__le16 parent_mr_hi;
__le32 fbo_lo;
__le32 fbo_hi;
};
/* The roce task context of Mstorm */
struct mstorm_rdma_task_st_ctx {
struct regpair temp[4];
};
/* The roce task context of Ustorm */
struct ustorm_rdma_task_st_ctx {
struct regpair temp[2];
};
struct ustorm_rdma_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
u8 flags1;
#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 reg2;
__le32 dif_runt_value;
__le32 reg4;
__le32 reg5;
};
/* RDMA task context */
struct rdma_task_context {
struct ystorm_rdma_task_st_ctx ystorm_st_context;
struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
struct tdif_task_context tdif_context;
struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
struct mstorm_rdma_task_st_ctx mstorm_st_context;
struct rdif_task_context rdif_context;
struct ustorm_rdma_task_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
}; };
/* rdma function init ramrod data */
struct rdma_close_func_ramrod_data { struct rdma_close_func_ramrod_data {
u8 cnq_start_offset; u8 cnq_start_offset;
u8 num_cnqs; u8 num_cnqs;
...@@ -6169,6 +6556,7 @@ struct rdma_close_func_ramrod_data { ...@@ -6169,6 +6556,7 @@ struct rdma_close_func_ramrod_data {
u8 reserved[4]; u8 reserved[4];
}; };
/* rdma function init CNQ parameters */
struct rdma_cnq_params { struct rdma_cnq_params {
__le16 sb_num; __le16 sb_num;
u8 sb_index; u8 sb_index;
...@@ -6179,6 +6567,7 @@ struct rdma_cnq_params { ...@@ -6179,6 +6567,7 @@ struct rdma_cnq_params {
u8 reserved1[6]; u8 reserved1[6];
}; };
/* rdma create cq ramrod data */
struct rdma_create_cq_ramrod_data { struct rdma_create_cq_ramrod_data {
struct regpair cq_handle; struct regpair cq_handle;
struct regpair pbl_addr; struct regpair pbl_addr;
...@@ -6193,21 +6582,25 @@ struct rdma_create_cq_ramrod_data { ...@@ -6193,21 +6582,25 @@ struct rdma_create_cq_ramrod_data {
__le16 reserved1; __le16 reserved1;
}; };
/* rdma deregister tid ramrod data */
struct rdma_deregister_tid_ramrod_data { struct rdma_deregister_tid_ramrod_data {
__le32 itid; __le32 itid;
__le32 reserved; __le32 reserved;
}; };
/* rdma destroy cq output params */
struct rdma_destroy_cq_output_params { struct rdma_destroy_cq_output_params {
__le16 cnq_num; __le16 cnq_num;
__le16 reserved0; __le16 reserved0;
__le32 reserved1; __le32 reserved1;
}; };
/* rdma destroy cq ramrod data */
struct rdma_destroy_cq_ramrod_data { struct rdma_destroy_cq_ramrod_data {
struct regpair output_params_addr; struct regpair output_params_addr;
}; };
/* RDMA slow path EQ cmd IDs */
enum rdma_event_opcode { enum rdma_event_opcode {
RDMA_EVENT_UNUSED, RDMA_EVENT_UNUSED,
RDMA_EVENT_FUNC_INIT, RDMA_EVENT_FUNC_INIT,
...@@ -6223,6 +6616,7 @@ enum rdma_event_opcode { ...@@ -6223,6 +6616,7 @@ enum rdma_event_opcode {
MAX_RDMA_EVENT_OPCODE MAX_RDMA_EVENT_OPCODE
}; };
/* RDMA FW return code for slow path ramrods */
enum rdma_fw_return_code { enum rdma_fw_return_code {
RDMA_RETURN_OK = 0, RDMA_RETURN_OK = 0,
RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR, RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR,
...@@ -6232,6 +6626,7 @@ enum rdma_fw_return_code { ...@@ -6232,6 +6626,7 @@ enum rdma_fw_return_code {
MAX_RDMA_FW_RETURN_CODE MAX_RDMA_FW_RETURN_CODE
}; };
/* rdma function init header */
struct rdma_init_func_hdr { struct rdma_init_func_hdr {
u8 cnq_start_offset; u8 cnq_start_offset;
u8 num_cnqs; u8 num_cnqs;
...@@ -6241,11 +6636,13 @@ struct rdma_init_func_hdr { ...@@ -6241,11 +6636,13 @@ struct rdma_init_func_hdr {
u8 reserved[3]; u8 reserved[3];
}; };
/* rdma function init ramrod data */
struct rdma_init_func_ramrod_data { struct rdma_init_func_ramrod_data {
struct rdma_init_func_hdr params_header; struct rdma_init_func_hdr params_header;
struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES]; struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
}; };
/* RDMA ramrod command IDs */
enum rdma_ramrod_cmd_id { enum rdma_ramrod_cmd_id {
RDMA_RAMROD_UNUSED, RDMA_RAMROD_UNUSED,
RDMA_RAMROD_FUNC_INIT, RDMA_RAMROD_FUNC_INIT,
...@@ -6261,6 +6658,7 @@ enum rdma_ramrod_cmd_id { ...@@ -6261,6 +6658,7 @@ enum rdma_ramrod_cmd_id {
MAX_RDMA_RAMROD_CMD_ID MAX_RDMA_RAMROD_CMD_ID
}; };
/* rdma register tid ramrod data */
struct rdma_register_tid_ramrod_data { struct rdma_register_tid_ramrod_data {
__le16 flags; __le16 flags;
#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F #define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
...@@ -6313,11 +6711,13 @@ struct rdma_register_tid_ramrod_data { ...@@ -6313,11 +6711,13 @@ struct rdma_register_tid_ramrod_data {
__le32 reserved4[2]; __le32 reserved4[2];
}; };
/* rdma resize cq output params */
struct rdma_resize_cq_output_params { struct rdma_resize_cq_output_params {
__le32 old_cq_cons; __le32 old_cq_cons;
__le32 old_cq_prod; __le32 old_cq_prod;
}; };
/* rdma resize cq ramrod data */
struct rdma_resize_cq_ramrod_data { struct rdma_resize_cq_ramrod_data {
u8 flags; u8 flags;
#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1 #define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1
...@@ -6333,10 +6733,12 @@ struct rdma_resize_cq_ramrod_data { ...@@ -6333,10 +6733,12 @@ struct rdma_resize_cq_ramrod_data {
struct regpair output_params_addr; struct regpair output_params_addr;
}; };
/* The rdma storm context of Mstorm */
struct rdma_srq_context { struct rdma_srq_context {
struct regpair temp[8]; struct regpair temp[8];
}; };
/* rdma create qp requester ramrod data */
struct rdma_srq_create_ramrod_data { struct rdma_srq_create_ramrod_data {
struct regpair pbl_base_addr; struct regpair pbl_base_addr;
__le16 pages_in_srq_pbl; __le16 pages_in_srq_pbl;
...@@ -6348,206 +6750,19 @@ struct rdma_srq_create_ramrod_data { ...@@ -6348,206 +6750,19 @@ struct rdma_srq_create_ramrod_data {
struct regpair producers_addr; struct regpair producers_addr;
}; };
/* rdma create qp requester ramrod data */
struct rdma_srq_destroy_ramrod_data { struct rdma_srq_destroy_ramrod_data {
struct rdma_srq_id srq_id; struct rdma_srq_id srq_id;
__le32 reserved; __le32 reserved;
}; };
/* rdma create qp requester ramrod data */
struct rdma_srq_modify_ramrod_data { struct rdma_srq_modify_ramrod_data {
struct rdma_srq_id srq_id; struct rdma_srq_id srq_id;
__le32 wqe_limit; __le32 wqe_limit;
}; };
struct ystorm_rdma_task_st_ctx { /* RDMA Tid type enumeration (for register_tid ramrod) */
struct regpair temp[4];
};
struct ystorm_rdma_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 msem_ctx_upd_seq;
u8 flags0;
#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
__le32 mw_cnt;
u8 ref_cnt_seq;
u8 ctx_upd_seq;
__le16 dif_flags;
__le16 tx_ref_count;
__le16 last_used_ltid;
__le16 parent_mr_lo;
__le16 parent_mr_hi;
__le32 fbo_lo;
__le32 fbo_hi;
};
struct mstorm_rdma_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
__le32 mw_cnt;
u8 ref_cnt_seq;
u8 ctx_upd_seq;
__le16 dif_flags;
__le16 tx_ref_count;
__le16 last_used_ltid;
__le16 parent_mr_lo;
__le16 parent_mr_hi;
__le32 fbo_lo;
__le32 fbo_hi;
};
struct ustorm_rdma_task_st_ctx {
struct regpair temp[2];
};
struct ustorm_rdma_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
u8 flags1;
#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 reg2;
__le32 dif_runt_value;
__le32 reg4;
__le32 reg5;
};
struct rdma_task_context {
struct ystorm_rdma_task_st_ctx ystorm_st_context;
struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
struct tdif_task_context tdif_context;
struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
struct mstorm_rdma_task_st_ctx mstorm_st_context;
struct rdif_task_context rdif_context;
struct ustorm_rdma_task_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
};
enum rdma_tid_type { enum rdma_tid_type {
RDMA_TID_REGISTERED_MR, RDMA_TID_REGISTERED_MR,
RDMA_TID_FMR, RDMA_TID_FMR,
...@@ -7345,30 +7560,37 @@ struct ystorm_rdma_conn_ag_ctx { ...@@ -7345,30 +7560,37 @@ struct ystorm_rdma_conn_ag_ctx {
__le32 reg3; __le32 reg3;
}; };
struct mstorm_roce_conn_st_ctx { /* The roce storm context of Ystorm */
struct regpair temp[6]; struct ystorm_roce_conn_st_ctx {
struct regpair temp[2];
}; };
/* The roce storm context of Mstorm */
struct pstorm_roce_conn_st_ctx { struct pstorm_roce_conn_st_ctx {
struct regpair temp[16]; struct regpair temp[16];
}; };
struct ystorm_roce_conn_st_ctx { /* The roce storm context of Xstorm */
struct regpair temp[2];
};
struct xstorm_roce_conn_st_ctx { struct xstorm_roce_conn_st_ctx {
struct regpair temp[24]; struct regpair temp[24];
}; };
/* The roce storm context of Tstorm */
struct tstorm_roce_conn_st_ctx { struct tstorm_roce_conn_st_ctx {
struct regpair temp[30]; struct regpair temp[30];
}; };
/* The roce storm context of Mstorm */
struct mstorm_roce_conn_st_ctx {
struct regpair temp[6];
};
/* The roce storm context of Ystorm */
struct ustorm_roce_conn_st_ctx { struct ustorm_roce_conn_st_ctx {
struct regpair temp[12]; struct regpair temp[12];
}; };
/* roce connection context */
struct roce_conn_context { struct roce_conn_context {
struct ystorm_roce_conn_st_ctx ystorm_st_context; struct ystorm_roce_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2]; struct regpair ystorm_st_padding[2];
...@@ -7385,6 +7607,7 @@ struct roce_conn_context { ...@@ -7385,6 +7607,7 @@ struct roce_conn_context {
struct regpair ustorm_st_padding[2]; struct regpair ustorm_st_padding[2];
}; };
/* roce create qp requester ramrod data */
struct roce_create_qp_req_ramrod_data { struct roce_create_qp_req_ramrod_data {
__le16 flags; __le16 flags;
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 #define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
...@@ -7431,6 +7654,7 @@ struct roce_create_qp_req_ramrod_data { ...@@ -7431,6 +7654,7 @@ struct roce_create_qp_req_ramrod_data {
__le16 dpi; __le16 dpi;
}; };
/* roce create qp responder ramrod data */
struct roce_create_qp_resp_ramrod_data { struct roce_create_qp_resp_ramrod_data {
__le16 flags; __le16 flags;
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
...@@ -7482,24 +7706,29 @@ struct roce_create_qp_resp_ramrod_data { ...@@ -7482,24 +7706,29 @@ struct roce_create_qp_resp_ramrod_data {
__le16 dpi; __le16 dpi;
}; };
/* RoCE destroy qp requester output params */
struct roce_destroy_qp_req_output_params { struct roce_destroy_qp_req_output_params {
__le32 num_bound_mw; __le32 num_bound_mw;
__le32 cq_prod; __le32 cq_prod;
}; };
/* RoCE destroy qp requester ramrod data */
struct roce_destroy_qp_req_ramrod_data { struct roce_destroy_qp_req_ramrod_data {
struct regpair output_params_addr; struct regpair output_params_addr;
}; };
/* RoCE destroy qp responder output params */
struct roce_destroy_qp_resp_output_params { struct roce_destroy_qp_resp_output_params {
__le32 num_invalidated_mw; __le32 num_invalidated_mw;
__le32 cq_prod; __le32 cq_prod;
}; };
/* RoCE destroy qp responder ramrod data */
struct roce_destroy_qp_resp_ramrod_data { struct roce_destroy_qp_resp_ramrod_data {
struct regpair output_params_addr; struct regpair output_params_addr;
}; };
/* roce special events statistics */
struct roce_events_stats { struct roce_events_stats {
__le16 silent_drops; __le16 silent_drops;
__le16 rnr_naks_sent; __le16 rnr_naks_sent;
...@@ -7508,6 +7737,7 @@ struct roce_events_stats { ...@@ -7508,6 +7737,7 @@ struct roce_events_stats {
__le32 reserved; __le32 reserved;
}; };
/* ROCE slow path EQ cmd IDs */
enum roce_event_opcode { enum roce_event_opcode {
ROCE_EVENT_CREATE_QP = 11, ROCE_EVENT_CREATE_QP = 11,
ROCE_EVENT_MODIFY_QP, ROCE_EVENT_MODIFY_QP,
...@@ -7518,6 +7748,7 @@ enum roce_event_opcode { ...@@ -7518,6 +7748,7 @@ enum roce_event_opcode {
MAX_ROCE_EVENT_OPCODE MAX_ROCE_EVENT_OPCODE
}; };
/* roce func init ramrod data */
struct roce_init_func_params { struct roce_init_func_params {
u8 ll2_queue_id; u8 ll2_queue_id;
u8 cnp_vlan_priority; u8 cnp_vlan_priority;
...@@ -7526,11 +7757,13 @@ struct roce_init_func_params { ...@@ -7526,11 +7757,13 @@ struct roce_init_func_params {
__le32 cnp_send_timeout; __le32 cnp_send_timeout;
}; };
/* roce func init ramrod data */
struct roce_init_func_ramrod_data { struct roce_init_func_ramrod_data {
struct rdma_init_func_ramrod_data rdma; struct rdma_init_func_ramrod_data rdma;
struct roce_init_func_params roce; struct roce_init_func_params roce;
}; };
/* roce modify qp requester ramrod data */
struct roce_modify_qp_req_ramrod_data { struct roce_modify_qp_req_ramrod_data {
__le16 flags; __le16 flags;
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
...@@ -7575,6 +7808,7 @@ struct roce_modify_qp_req_ramrod_data { ...@@ -7575,6 +7808,7 @@ struct roce_modify_qp_req_ramrod_data {
__le32 dst_gid[4]; __le32 dst_gid[4];
}; };
/* roce modify qp responder ramrod data */
struct roce_modify_qp_resp_ramrod_data { struct roce_modify_qp_resp_ramrod_data {
__le16 flags; __le16 flags;
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
...@@ -7615,6 +7849,7 @@ struct roce_modify_qp_resp_ramrod_data { ...@@ -7615,6 +7849,7 @@ struct roce_modify_qp_resp_ramrod_data {
__le32 dst_gid[4]; __le32 dst_gid[4];
}; };
/* RoCE query qp requester output params */
struct roce_query_qp_req_output_params { struct roce_query_qp_req_output_params {
__le32 psn; __le32 psn;
__le32 flags; __le32 flags;
...@@ -7626,10 +7861,12 @@ struct roce_query_qp_req_output_params { ...@@ -7626,10 +7861,12 @@ struct roce_query_qp_req_output_params {
#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2 #define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2
}; };
/* RoCE query qp requester ramrod data */
struct roce_query_qp_req_ramrod_data { struct roce_query_qp_req_ramrod_data {
struct regpair output_params_addr; struct regpair output_params_addr;
}; };
/* RoCE query qp responder output params */
struct roce_query_qp_resp_output_params { struct roce_query_qp_resp_output_params {
__le32 psn; __le32 psn;
__le32 err_flag; __le32 err_flag;
...@@ -7639,10 +7876,12 @@ struct roce_query_qp_resp_output_params { ...@@ -7639,10 +7876,12 @@ struct roce_query_qp_resp_output_params {
#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1 #define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
}; };
/* RoCE query qp responder ramrod data */
struct roce_query_qp_resp_ramrod_data { struct roce_query_qp_resp_ramrod_data {
struct regpair output_params_addr; struct regpair output_params_addr;
}; };
/* ROCE ramrod command IDs */
enum roce_ramrod_cmd_id { enum roce_ramrod_cmd_id {
ROCE_RAMROD_CREATE_QP = 11, ROCE_RAMROD_CREATE_QP = 11,
ROCE_RAMROD_MODIFY_QP, ROCE_RAMROD_MODIFY_QP,
...@@ -7733,10 +7972,10 @@ struct tstorm_roce_req_conn_ag_ctx { ...@@ -7733,10 +7972,10 @@ struct tstorm_roce_req_conn_ag_ctx {
u8 flags0; u8 flags0;
#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 #define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK 0x1 #define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1
#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT 1 #define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1
#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK 0x1 #define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1
#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT 2 #define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2
#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1 #define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3 #define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 #define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
...@@ -8464,10 +8703,10 @@ struct xstorm_roce_resp_conn_ag_ctx { ...@@ -8464,10 +8703,10 @@ struct xstorm_roce_resp_conn_ag_ctx {
#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1 #define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7 #define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12; u8 flags12;
#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK 0x1
#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT 0
#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 #define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1 #define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0
#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1
#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1
#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 #define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 #define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 #define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
...@@ -8621,6 +8860,7 @@ struct ystorm_roce_resp_conn_ag_ctx { ...@@ -8621,6 +8860,7 @@ struct ystorm_roce_resp_conn_ag_ctx {
__le32 reg3; __le32 reg3;
}; };
/* Roce doorbell data */
enum roce_flavor { enum roce_flavor {
PLAIN_ROCE, PLAIN_ROCE,
RROCE_IPV4, RROCE_IPV4,
...@@ -8628,14 +8868,17 @@ enum roce_flavor { ...@@ -8628,14 +8868,17 @@ enum roce_flavor {
MAX_ROCE_FLAVOR MAX_ROCE_FLAVOR
}; };
/* The iwarp storm context of Ystorm */
struct ystorm_iwarp_conn_st_ctx { struct ystorm_iwarp_conn_st_ctx {
__le32 reserved[4]; __le32 reserved[4];
}; };
/* The iwarp storm context of Pstorm */
struct pstorm_iwarp_conn_st_ctx { struct pstorm_iwarp_conn_st_ctx {
__le32 reserved[36]; __le32 reserved[36];
}; };
/* The iwarp storm context of Xstorm */
struct xstorm_iwarp_conn_st_ctx { struct xstorm_iwarp_conn_st_ctx {
__le32 reserved[44]; __le32 reserved[44];
}; };
...@@ -9001,18 +9244,22 @@ struct tstorm_iwarp_conn_ag_ctx { ...@@ -9001,18 +9244,22 @@ struct tstorm_iwarp_conn_ag_ctx {
__le32 last_hq_sequence; __le32 last_hq_sequence;
}; };
/* The iwarp storm context of Tstorm */
struct tstorm_iwarp_conn_st_ctx { struct tstorm_iwarp_conn_st_ctx {
__le32 reserved[60]; __le32 reserved[60];
}; };
/* The iwarp storm context of Mstorm */
struct mstorm_iwarp_conn_st_ctx { struct mstorm_iwarp_conn_st_ctx {
__le32 reserved[32]; __le32 reserved[32];
}; };
/* The iwarp storm context of Ustorm */
struct ustorm_iwarp_conn_st_ctx { struct ustorm_iwarp_conn_st_ctx {
__le32 reserved[24]; __le32 reserved[24];
}; };
/* iwarp connection context */
struct iwarp_conn_context { struct iwarp_conn_context {
struct ystorm_iwarp_conn_st_ctx ystorm_st_context; struct ystorm_iwarp_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2]; struct regpair ystorm_st_padding[2];
...@@ -9030,6 +9277,7 @@ struct iwarp_conn_context { ...@@ -9030,6 +9277,7 @@ struct iwarp_conn_context {
struct ustorm_iwarp_conn_st_ctx ustorm_st_context; struct ustorm_iwarp_conn_st_ctx ustorm_st_context;
}; };
/* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */
struct iwarp_create_qp_ramrod_data { struct iwarp_create_qp_ramrod_data {
u8 flags; u8 flags;
#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1 #define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
...@@ -9061,6 +9309,7 @@ struct iwarp_create_qp_ramrod_data { ...@@ -9061,6 +9309,7 @@ struct iwarp_create_qp_ramrod_data {
u8 reserved2[6]; u8 reserved2[6];
}; };
/* iWARP completion queue types */
enum iwarp_eqe_async_opcode { enum iwarp_eqe_async_opcode {
IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE, IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE,
IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED, IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED,
...@@ -9083,6 +9332,7 @@ struct iwarp_eqe_data_tcp_async_completion { ...@@ -9083,6 +9332,7 @@ struct iwarp_eqe_data_tcp_async_completion {
u8 reserved[5]; u8 reserved[5];
}; };
/* iWARP completion queue types */
enum iwarp_eqe_sync_opcode { enum iwarp_eqe_sync_opcode {
IWARP_EVENT_TYPE_TCP_OFFLOAD = IWARP_EVENT_TYPE_TCP_OFFLOAD =
11, 11,
...@@ -9095,6 +9345,7 @@ enum iwarp_eqe_sync_opcode { ...@@ -9095,6 +9345,7 @@ enum iwarp_eqe_sync_opcode {
MAX_IWARP_EQE_SYNC_OPCODE MAX_IWARP_EQE_SYNC_OPCODE
}; };
/* iWARP EQE completion status */
enum iwarp_fw_return_code { enum iwarp_fw_return_code {
IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5, IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5,
IWARP_CONN_ERROR_TCP_CONNECTION_RST, IWARP_CONN_ERROR_TCP_CONNECTION_RST,
...@@ -9125,24 +9376,27 @@ enum iwarp_fw_return_code { ...@@ -9125,24 +9376,27 @@ enum iwarp_fw_return_code {
MAX_IWARP_FW_RETURN_CODE MAX_IWARP_FW_RETURN_CODE
}; };
/* unaligned opaque data received from LL2 */
struct iwarp_init_func_params { struct iwarp_init_func_params {
u8 ll2_ooo_q_index; u8 ll2_ooo_q_index;
u8 reserved1[7]; u8 reserved1[7];
}; };
/* iwarp func init ramrod data */
struct iwarp_init_func_ramrod_data { struct iwarp_init_func_ramrod_data {
struct rdma_init_func_ramrod_data rdma; struct rdma_init_func_ramrod_data rdma;
struct tcp_init_params tcp; struct tcp_init_params tcp;
struct iwarp_init_func_params iwarp; struct iwarp_init_func_params iwarp;
}; };
/* iWARP QP - possible states to transition to */
enum iwarp_modify_qp_new_state_type { enum iwarp_modify_qp_new_state_type {
IWARP_MODIFY_QP_STATE_CLOSING = 1, IWARP_MODIFY_QP_STATE_CLOSING = 1,
IWARP_MODIFY_QP_STATE_ERROR = IWARP_MODIFY_QP_STATE_ERROR = 2,
2,
MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE
}; };
/* iwarp modify qp responder ramrod data */
struct iwarp_modify_qp_ramrod_data { struct iwarp_modify_qp_ramrod_data {
__le16 transition_to_state; __le16 transition_to_state;
__le16 flags; __le16 flags;
...@@ -9162,17 +9416,20 @@ struct iwarp_modify_qp_ramrod_data { ...@@ -9162,17 +9416,20 @@ struct iwarp_modify_qp_ramrod_data {
__le32 reserved4[8]; __le32 reserved4[8];
}; };
/* MPA params for Enhanced mode */
struct mpa_rq_params { struct mpa_rq_params {
__le32 ird; __le32 ird;
__le32 ord; __le32 ord;
}; };
/* MPA host Address-Len for private data */
struct mpa_ulp_buffer { struct mpa_ulp_buffer {
struct regpair addr; struct regpair addr;
__le16 len; __le16 len;
__le16 reserved[3]; __le16 reserved[3];
}; };
/* iWARP MPA offload params common to Basic and Enhanced modes */
struct mpa_outgoing_params { struct mpa_outgoing_params {
u8 crc_needed; u8 crc_needed;
u8 reject; u8 reject;
...@@ -9181,6 +9438,9 @@ struct mpa_outgoing_params { ...@@ -9181,6 +9438,9 @@ struct mpa_outgoing_params {
struct mpa_ulp_buffer outgoing_ulp_buffer; struct mpa_ulp_buffer outgoing_ulp_buffer;
}; };
/* iWARP MPA offload params passed by driver to FW in MPA Offload Request
* Ramrod.
*/
struct iwarp_mpa_offload_ramrod_data { struct iwarp_mpa_offload_ramrod_data {
struct mpa_outgoing_params common; struct mpa_outgoing_params common;
__le32 tcp_cid; __le32 tcp_cid;
...@@ -9200,6 +9460,7 @@ struct iwarp_mpa_offload_ramrod_data { ...@@ -9200,6 +9460,7 @@ struct iwarp_mpa_offload_ramrod_data {
u8 reserved3[15]; u8 reserved3[15];
}; };
/* iWARP TCP connection offload params passed by driver to FW */
struct iwarp_offload_params { struct iwarp_offload_params {
struct mpa_ulp_buffer incoming_ulp_buffer; struct mpa_ulp_buffer incoming_ulp_buffer;
struct regpair async_eqe_output_buf; struct regpair async_eqe_output_buf;
...@@ -9211,6 +9472,7 @@ struct iwarp_offload_params { ...@@ -9211,6 +9472,7 @@ struct iwarp_offload_params {
u8 reserved[10]; u8 reserved[10];
}; };
/* iWARP query QP output params */
struct iwarp_query_qp_output_params { struct iwarp_query_qp_output_params {
__le32 flags; __le32 flags;
#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1 #define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
...@@ -9220,13 +9482,14 @@ struct iwarp_query_qp_output_params { ...@@ -9220,13 +9482,14 @@ struct iwarp_query_qp_output_params {
u8 reserved1[4]; u8 reserved1[4];
}; };
/* iWARP query QP ramrod data */
struct iwarp_query_qp_ramrod_data { struct iwarp_query_qp_ramrod_data {
struct regpair output_params_addr; struct regpair output_params_addr;
}; };
/* iWARP Ramrod Command IDs */
enum iwarp_ramrod_cmd_id { enum iwarp_ramrod_cmd_id {
IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11,
11,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
IWARP_RAMROD_CMD_ID_CREATE_QP, IWARP_RAMROD_CMD_ID_CREATE_QP,
...@@ -9236,22 +9499,28 @@ enum iwarp_ramrod_cmd_id { ...@@ -9236,22 +9499,28 @@ enum iwarp_ramrod_cmd_id {
MAX_IWARP_RAMROD_CMD_ID MAX_IWARP_RAMROD_CMD_ID
}; };
/* Per PF iWARP retransmit path statistics */
struct iwarp_rxmit_stats_drv { struct iwarp_rxmit_stats_drv {
struct regpair tx_go_to_slow_start_event_cnt; struct regpair tx_go_to_slow_start_event_cnt;
struct regpair tx_fast_retransmit_event_cnt; struct regpair tx_fast_retransmit_event_cnt;
}; };
/* iWARP and TCP connection offload params passed by driver to FW in iWARP
* offload ramrod.
*/
struct iwarp_tcp_offload_ramrod_data { struct iwarp_tcp_offload_ramrod_data {
struct iwarp_offload_params iwarp; struct iwarp_offload_params iwarp;
struct tcp_offload_params_opt2 tcp; struct tcp_offload_params_opt2 tcp;
}; };
/* iWARP MPA negotiation types */
enum mpa_negotiation_mode { enum mpa_negotiation_mode {
MPA_NEGOTIATION_TYPE_BASIC = 1, MPA_NEGOTIATION_TYPE_BASIC = 1,
MPA_NEGOTIATION_TYPE_ENHANCED = 2, MPA_NEGOTIATION_TYPE_ENHANCED = 2,
MAX_MPA_NEGOTIATION_MODE MAX_MPA_NEGOTIATION_MODE
}; };
/* iWARP MPA Enhanced mode RTR types */
enum mpa_rtr_type { enum mpa_rtr_type {
MPA_RTR_TYPE_NONE = 0, MPA_RTR_TYPE_NONE = 0,
MPA_RTR_TYPE_ZERO_SEND = 1, MPA_RTR_TYPE_ZERO_SEND = 1,
...@@ -9264,6 +9533,7 @@ enum mpa_rtr_type { ...@@ -9264,6 +9533,7 @@ enum mpa_rtr_type {
MAX_MPA_RTR_TYPE MAX_MPA_RTR_TYPE
}; };
/* unaligned opaque data received from LL2 */
struct unaligned_opaque_data { struct unaligned_opaque_data {
__le16 first_mpa_offset; __le16 first_mpa_offset;
u8 tcp_payload_offset; u8 tcp_payload_offset;
...@@ -9427,6 +9697,7 @@ struct ystorm_iwarp_conn_ag_ctx { ...@@ -9427,6 +9697,7 @@ struct ystorm_iwarp_conn_ag_ctx {
__le32 reg3; __le32 reg3;
}; };
/* The fcoe storm context of Ystorm */
struct ystorm_fcoe_conn_st_ctx { struct ystorm_fcoe_conn_st_ctx {
u8 func_mode; u8 func_mode;
u8 cos; u8 cos;
...@@ -9461,6 +9732,7 @@ struct ystorm_fcoe_conn_st_ctx { ...@@ -9461,6 +9732,7 @@ struct ystorm_fcoe_conn_st_ctx {
u8 fcp_xfer_size; u8 fcp_xfer_size;
}; };
/* FCoE 16-bits vlan structure */
struct fcoe_vlan_fields { struct fcoe_vlan_fields {
__le16 fields; __le16 fields;
#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF #define FCOE_VLAN_FIELDS_VID_MASK 0xFFF
...@@ -9471,16 +9743,19 @@ struct fcoe_vlan_fields { ...@@ -9471,16 +9743,19 @@ struct fcoe_vlan_fields {
#define FCOE_VLAN_FIELDS_PRI_SHIFT 13 #define FCOE_VLAN_FIELDS_PRI_SHIFT 13
}; };
/* FCoE 16-bits vlan union */
union fcoe_vlan_field_union { union fcoe_vlan_field_union {
struct fcoe_vlan_fields fields; struct fcoe_vlan_fields fields;
__le16 val; __le16 val;
}; };
/* FCoE 16-bits vlan, vif union */
union fcoe_vlan_vif_field_union { union fcoe_vlan_vif_field_union {
union fcoe_vlan_field_union vlan; union fcoe_vlan_field_union vlan;
__le16 vif; __le16 vif;
}; };
/* Ethernet context section */
struct pstorm_fcoe_eth_context_section { struct pstorm_fcoe_eth_context_section {
u8 remote_addr_3; u8 remote_addr_3;
u8 remote_addr_2; u8 remote_addr_2;
...@@ -9500,6 +9775,7 @@ struct pstorm_fcoe_eth_context_section { ...@@ -9500,6 +9775,7 @@ struct pstorm_fcoe_eth_context_section {
__le16 inner_eth_type; __le16 inner_eth_type;
}; };
/* The fcoe storm context of Pstorm */
struct pstorm_fcoe_conn_st_ctx { struct pstorm_fcoe_conn_st_ctx {
u8 func_mode; u8 func_mode;
u8 cos; u8 cos;
...@@ -9532,6 +9808,7 @@ struct pstorm_fcoe_conn_st_ctx { ...@@ -9532,6 +9808,7 @@ struct pstorm_fcoe_conn_st_ctx {
u8 reserved1; u8 reserved1;
}; };
/* The fcoe storm context of Xstorm */
struct xstorm_fcoe_conn_st_ctx { struct xstorm_fcoe_conn_st_ctx {
u8 func_mode; u8 func_mode;
u8 src_mac_index; u8 src_mac_index;
...@@ -9831,6 +10108,7 @@ struct xstorm_fcoe_conn_ag_ctx { ...@@ -9831,6 +10108,7 @@ struct xstorm_fcoe_conn_ag_ctx {
__le32 reg8; __le32 reg8;
}; };
/* The fcoe storm context of Ustorm */
struct ustorm_fcoe_conn_st_ctx { struct ustorm_fcoe_conn_st_ctx {
struct regpair respq_pbl_addr; struct regpair respq_pbl_addr;
__le16 num_pages_in_pbl; __le16 num_pages_in_pbl;
...@@ -9996,6 +10274,7 @@ struct ustorm_fcoe_conn_ag_ctx { ...@@ -9996,6 +10274,7 @@ struct ustorm_fcoe_conn_ag_ctx {
__le16 word3; __le16 word3;
}; };
/* The fcoe storm context of Tstorm */
struct tstorm_fcoe_conn_st_ctx { struct tstorm_fcoe_conn_st_ctx {
__le16 stat_ram_addr; __le16 stat_ram_addr;
__le16 rx_max_fc_payload_len; __le16 rx_max_fc_payload_len;
...@@ -10009,15 +10288,15 @@ struct tstorm_fcoe_conn_st_ctx { ...@@ -10009,15 +10288,15 @@ struct tstorm_fcoe_conn_st_ctx {
#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2 #define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2
u8 timers_cleanup_invocation_cnt; u8 timers_cleanup_invocation_cnt;
__le32 reserved1[2]; __le32 reserved1[2];
__le32 dst_mac_address_bytes0to3; __le32 dst_mac_address_bytes_0_to_3;
__le16 dst_mac_address_bytes4to5; __le16 dst_mac_address_bytes_4_to_5;
__le16 ramrod_echo; __le16 ramrod_echo;
u8 flags1; u8 flags1;
#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3 #define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3
#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0 #define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0
#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F #define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F
#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2 #define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2
u8 q_relative_offset; u8 cq_relative_offset;
u8 bdq_resource_id; u8 bdq_resource_id;
u8 reserved0[5]; u8 reserved0[5];
}; };
...@@ -10059,6 +10338,7 @@ struct mstorm_fcoe_conn_ag_ctx { ...@@ -10059,6 +10338,7 @@ struct mstorm_fcoe_conn_ag_ctx {
__le32 reg1; __le32 reg1;
}; };
/* Fast path part of the fcoe storm context of Mstorm */
struct fcoe_mstorm_fcoe_conn_st_ctx_fp { struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
__le16 xfer_prod; __le16 xfer_prod;
__le16 reserved1; __le16 reserved1;
...@@ -10073,6 +10353,7 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_fp { ...@@ -10073,6 +10353,7 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
u8 reserved2[2]; u8 reserved2[2];
}; };
/* Non fast path part of the fcoe storm context of Mstorm */
struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp { struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
__le16 conn_id; __le16 conn_id;
__le16 stat_ram_addr; __le16 stat_ram_addr;
...@@ -10088,11 +10369,13 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp { ...@@ -10088,11 +10369,13 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
struct regpair reserved2[3]; struct regpair reserved2[3];
}; };
/* The fcoe storm context of Mstorm */
struct mstorm_fcoe_conn_st_ctx { struct mstorm_fcoe_conn_st_ctx {
struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp; struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp;
struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp; struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp;
}; };
/* fcoe connection context */
struct fcoe_conn_context { struct fcoe_conn_context {
struct ystorm_fcoe_conn_st_ctx ystorm_st_context; struct ystorm_fcoe_conn_st_ctx ystorm_st_context;
struct pstorm_fcoe_conn_st_ctx pstorm_st_context; struct pstorm_fcoe_conn_st_ctx pstorm_st_context;
...@@ -10111,14 +10394,21 @@ struct fcoe_conn_context { ...@@ -10111,14 +10394,21 @@ struct fcoe_conn_context {
struct mstorm_fcoe_conn_st_ctx mstorm_st_context; struct mstorm_fcoe_conn_st_ctx mstorm_st_context;
}; };
/* FCoE connection offload params passed by driver to FW in FCoE offload
* ramrod.
*/
struct fcoe_conn_offload_ramrod_params { struct fcoe_conn_offload_ramrod_params {
struct fcoe_conn_offload_ramrod_data offload_ramrod_data; struct fcoe_conn_offload_ramrod_data offload_ramrod_data;
}; };
/* FCoE connection terminate params passed by driver to FW in FCoE terminate
* conn ramrod.
*/
struct fcoe_conn_terminate_ramrod_params { struct fcoe_conn_terminate_ramrod_params {
struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data; struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data;
}; };
/* FCoE event type */
enum fcoe_event_type { enum fcoe_event_type {
FCOE_EVENT_INIT_FUNC, FCOE_EVENT_INIT_FUNC,
FCOE_EVENT_DESTROY_FUNC, FCOE_EVENT_DESTROY_FUNC,
...@@ -10129,10 +10419,12 @@ enum fcoe_event_type { ...@@ -10129,10 +10419,12 @@ enum fcoe_event_type {
MAX_FCOE_EVENT_TYPE MAX_FCOE_EVENT_TYPE
}; };
/* FCoE init params passed by driver to FW in FCoE init ramrod */
struct fcoe_init_ramrod_params { struct fcoe_init_ramrod_params {
struct fcoe_init_func_ramrod_data init_ramrod_data; struct fcoe_init_func_ramrod_data init_ramrod_data;
}; };
/* FCoE ramrod Command IDs */
enum fcoe_ramrod_cmd_id { enum fcoe_ramrod_cmd_id {
FCOE_RAMROD_CMD_ID_INIT_FUNC, FCOE_RAMROD_CMD_ID_INIT_FUNC,
FCOE_RAMROD_CMD_ID_DESTROY_FUNC, FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
...@@ -10142,6 +10434,9 @@ enum fcoe_ramrod_cmd_id { ...@@ -10142,6 +10434,9 @@ enum fcoe_ramrod_cmd_id {
MAX_FCOE_RAMROD_CMD_ID MAX_FCOE_RAMROD_CMD_ID
}; };
/* FCoE statistics params buffer passed by driver to FW in FCoE statistics
* ramrod.
*/
struct fcoe_stat_ramrod_params { struct fcoe_stat_ramrod_params {
struct fcoe_stat_ramrod_data stat_ramrod_data; struct fcoe_stat_ramrod_data stat_ramrod_data;
}; };
...@@ -10190,15 +10485,18 @@ struct ystorm_fcoe_conn_ag_ctx { ...@@ -10190,15 +10485,18 @@ struct ystorm_fcoe_conn_ag_ctx {
__le32 reg3; __le32 reg3;
}; };
/* The iscsi storm connection context of Ystorm */
struct ystorm_iscsi_conn_st_ctx { struct ystorm_iscsi_conn_st_ctx {
__le32 reserved[4]; __le32 reserved[4];
}; };
/* Combined iSCSI and TCP storm connection of Pstorm */
struct pstorm_iscsi_tcp_conn_st_ctx { struct pstorm_iscsi_tcp_conn_st_ctx {
__le32 tcp[32]; __le32 tcp[32];
__le32 iscsi[4]; __le32 iscsi[4];
}; };
/* The combined tcp and iscsi storm context of Xstorm */
struct xstorm_iscsi_tcp_conn_st_ctx { struct xstorm_iscsi_tcp_conn_st_ctx {
__le32 reserved_iscsi[40]; __le32 reserved_iscsi[40];
__le32 reserved_tcp[4]; __le32 reserved_tcp[4];
...@@ -10627,6 +10925,7 @@ struct ustorm_iscsi_conn_ag_ctx { ...@@ -10627,6 +10925,7 @@ struct ustorm_iscsi_conn_ag_ctx {
__le16 word3; __le16 word3;
}; };
/* The iscsi storm connection context of Tstorm */
struct tstorm_iscsi_conn_st_ctx { struct tstorm_iscsi_conn_st_ctx {
__le32 reserved[40]; __le32 reserved[40];
}; };
...@@ -10668,15 +10967,18 @@ struct mstorm_iscsi_conn_ag_ctx { ...@@ -10668,15 +10967,18 @@ struct mstorm_iscsi_conn_ag_ctx {
__le32 reg1; __le32 reg1;
}; };
/* Combined iSCSI and TCP storm connection of Mstorm */
struct mstorm_iscsi_tcp_conn_st_ctx { struct mstorm_iscsi_tcp_conn_st_ctx {
__le32 reserved_tcp[20]; __le32 reserved_tcp[20];
__le32 reserved_iscsi[8]; __le32 reserved_iscsi[8];
}; };
/* The iscsi storm context of Ustorm */
struct ustorm_iscsi_conn_st_ctx { struct ustorm_iscsi_conn_st_ctx {
__le32 reserved[52]; __le32 reserved[52];
}; };
/* iscsi connection context */
struct iscsi_conn_context { struct iscsi_conn_context {
struct ystorm_iscsi_conn_st_ctx ystorm_st_context; struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2]; struct regpair ystorm_st_padding[2];
...@@ -10698,6 +11000,7 @@ struct iscsi_conn_context { ...@@ -10698,6 +11000,7 @@ struct iscsi_conn_context {
struct ustorm_iscsi_conn_st_ctx ustorm_st_context; struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
}; };
/* iSCSI init params passed by driver to FW in iSCSI init ramrod */
struct iscsi_init_ramrod_params { struct iscsi_init_ramrod_params {
struct iscsi_spe_func_init iscsi_init_spe; struct iscsi_spe_func_init iscsi_init_spe;
struct tcp_init_params tcp_init; struct tcp_init_params tcp_init;
......
...@@ -47,50 +47,87 @@ ...@@ -47,50 +47,87 @@
#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \ #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
0x100) - 1 : 0) 0x100) - 1 : 0)
#define QM_INVALID_PQ_ID 0xffff #define QM_INVALID_PQ_ID 0xffff
/* Feature enable */ /* Feature enable */
#define QM_BYPASS_EN 1 #define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1 #define QM_BYTE_CRD_EN 1
/* Other PQ constants */ /* Other PQ constants */
#define QM_OTHER_PQS_PER_PF 4 #define QM_OTHER_PQS_PER_PF 4
/* WFQ constants */ /* WFQ constants */
/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
#define QM_WFQ_UPPER_BOUND 62500000 #define QM_WFQ_UPPER_BOUND 62500000
/* Bit of VOQ in WFQ VP PQ map */
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
/* Bit of PF in WFQ VP PQ map */
#define QM_WFQ_VP_PQ_PF_SHIFT 5 #define QM_WFQ_VP_PQ_PF_SHIFT 5
/* 0x9000 = 4*9*1024 */
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
/* Max WFQ increment value is 0.7 * upper bound */
#define QM_WFQ_MAX_INC_VAL 43750000 #define QM_WFQ_MAX_INC_VAL 43750000
/* RL constants */ /* RL constants */
#define QM_RL_UPPER_BOUND 62500000
#define QM_RL_PERIOD 5 /* in us */ /* Period in us */
#define QM_RL_PERIOD 5
/* Period in 25MHz cycles */
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
#define QM_RL_MAX_INC_VAL 43750000
/* RL increment value - rate is specified in mbps */
#define QM_RL_INC_VAL(rate) max_t(u32, \ #define QM_RL_INC_VAL(rate) max_t(u32, \
(u32)(((rate ? rate : \ (u32)(((rate ? rate : \
1000000) * \ 1000000) * \
QM_RL_PERIOD * \ QM_RL_PERIOD * \
101) / (8 * 100)), 1) 101) / (8 * 100)), 1)
/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
#define QM_RL_UPPER_BOUND 62500000
/* Max PF RL increment value is 0.7 * upper bound */
#define QM_RL_MAX_INC_VAL 43750000
/* AFullOprtnstcCrdMask constants */ /* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1 #define QM_OPPOR_LINE_VOQ_DEF 1
#define QM_OPPOR_FW_STOP_DEF 0 #define QM_OPPOR_FW_STOP_DEF 0
#define QM_OPPOR_PQ_EMPTY_DEF 1 #define QM_OPPOR_PQ_EMPTY_DEF 1
/* Command Queue constants */ /* Command Queue constants */
/* Pure LB CmdQ lines (+spare) */
#define PBF_CMDQ_PURE_LB_LINES 150 #define PBF_CMDQ_PURE_LB_LINES 150
#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \ #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
(ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \ #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \ (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
(ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
4) * \ #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
2) | QM_LINE_CRD_REG_SIGN_BIT) ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
/* BTB: blocks constants (block size = 256B) */ /* BTB: blocks constants (block size = 256B) */
/* 256B blocks in 9700B packet */
#define BTB_JUMBO_PKT_BLOCKS 38 #define BTB_JUMBO_PKT_BLOCKS 38
/* Headroom per-port */
#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
#define BTB_PURE_LB_FACTOR 10 #define BTB_PURE_LB_FACTOR 10
/* Factored (hence really 0.7) */
#define BTB_PURE_LB_RATIO 7 #define BTB_PURE_LB_RATIO 7
/* QM stop command constants */ /* QM stop command constants */
#define QM_STOP_PQ_MASK_WIDTH 32 #define QM_STOP_PQ_MASK_WIDTH 32
#define QM_STOP_CMD_ADDR 2 #define QM_STOP_CMD_ADDR 2
...@@ -108,11 +145,9 @@ ...@@ -108,11 +145,9 @@
#define QM_STOP_CMD_POLL_PERIOD_US 500 #define QM_STOP_CMD_POLL_PERIOD_US 500
/* QM command macros */ /* QM command macros */
#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \ #define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE
_STRUCT_SIZE #define QM_CMD_SET_FIELD(var, cmd, field, value) \
#define QM_CMD_SET_FIELD(var, cmd, field, \ SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
value) SET_FIELD(var[cmd ## _ ## field ## \
_OFFSET], \
cmd ## _ ## field, \ cmd ## _ ## field, \
value) value)
/* QM: VOQ macros */ /* QM: VOQ macros */
...@@ -128,6 +163,7 @@ ...@@ -128,6 +163,7 @@
max_phy_tcs_pr_port) \ max_phy_tcs_pr_port) \
: LB_VOQ(port)) : LB_VOQ(port))
/******************** INTERNAL IMPLEMENTATION *********************/ /******************** INTERNAL IMPLEMENTATION *********************/
/* Prepare PF RL enable/disable runtime init values */ /* Prepare PF RL enable/disable runtime init values */
static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
{ {
......
...@@ -82,7 +82,7 @@ struct aeu_invert_reg_bit { ...@@ -82,7 +82,7 @@ struct aeu_invert_reg_bit {
#define ATTENTION_LENGTH_SHIFT (4) #define ATTENTION_LENGTH_SHIFT (4)
#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
ATTENTION_LENGTH_SHIFT) ATTENTION_LENGTH_SHIFT)
#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
ATTENTION_PARITY) ATTENTION_PARITY)
......
...@@ -62,22 +62,6 @@ ...@@ -62,22 +62,6 @@
#include "qed_sriov.h" #include "qed_sriov.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
static int
qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
u16 echo, union event_ring_data *data, u8 fw_return_code)
{
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
return p_iscsi->event_cb(p_iscsi->event_context,
fw_event_code, data);
} else {
DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
return -EINVAL;
}
}
struct qed_iscsi_conn { struct qed_iscsi_conn {
struct list_head list_entry; struct list_head list_entry;
bool free_on_delete; bool free_on_delete;
...@@ -161,6 +145,22 @@ struct qed_iscsi_conn { ...@@ -161,6 +145,22 @@ struct qed_iscsi_conn {
u8 abortive_dsconnect; u8 abortive_dsconnect;
}; };
static int
qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
u16 echo, union event_ring_data *data, u8 fw_return_code)
{
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
return p_iscsi->event_cb(p_iscsi->event_context,
fw_event_code, data);
} else {
DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
return -EINVAL;
}
}
static int static int
qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode, enum spq_mode comp_mode,
...@@ -276,7 +276,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, ...@@ -276,7 +276,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer); p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer);
val = p_params->tx_sws_timer; val = p_params->tx_sws_timer;
p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val); p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val);
p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt; p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt;
p_hwfn->p_iscsi_info->event_context = event_context; p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb; p_hwfn->p_iscsi_info->event_cb = async_event_cb;
...@@ -304,8 +304,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, ...@@ -304,8 +304,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
int rc = 0; int rc = 0;
u32 dval; u32 dval;
u16 wval; u16 wval;
u8 i;
u16 *p; u16 *p;
u8 i;
/* Get SPQ entry */ /* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data)); memset(&init_data, 0, sizeof(init_data));
......
...@@ -342,56 +342,57 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context, ...@@ -342,56 +342,57 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
cpu_to_le16(dif_task_params->application_tag_mask); cpu_to_le16(dif_task_params->application_tag_mask);
SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED, SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
dif_task_params->crc_seed ? 1 : 0); dif_task_params->crc_seed ? 1 : 0);
SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE, SET_FIELD(rdif_context->flags0,
RDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
dif_task_params->host_guard_type); dif_task_params->host_guard_type);
SET_FIELD(rdif_context->flags0, SET_FIELD(rdif_context->flags0,
RDIF_TASK_CONTEXT_PROTECTIONTYPE, RDIF_TASK_CONTEXT_PROTECTION_TYPE,
dif_task_params->protection_type); dif_task_params->protection_type);
SET_FIELD(rdif_context->flags0, SET_FIELD(rdif_context->flags0,
RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1); RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1);
SET_FIELD(rdif_context->flags0, SET_FIELD(rdif_context->flags0,
RDIF_TASK_CONTEXT_KEEPREFTAGCONST, RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
dif_task_params->keep_ref_tag_const ? 1 : 0); dif_task_params->keep_ref_tag_const ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_VALIDATEAPPTAG, RDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
(dif_task_params->validate_app_tag && (dif_task_params->validate_app_tag &&
dif_task_params->dif_on_network) ? 1 : 0); dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_VALIDATEGUARD, RDIF_TASK_CONTEXT_VALIDATE_GUARD,
(dif_task_params->validate_guard && (dif_task_params->validate_guard &&
dif_task_params->dif_on_network) ? 1 : 0); dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_VALIDATEREFTAG, RDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
(dif_task_params->validate_ref_tag && (dif_task_params->validate_ref_tag &&
dif_task_params->dif_on_network) ? 1 : 0); dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_HOSTINTERFACE, RDIF_TASK_CONTEXT_HOST_INTERFACE,
dif_task_params->dif_on_host ? 1 : 0); dif_task_params->dif_on_host ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_NETWORKINTERFACE, RDIF_TASK_CONTEXT_NETWORK_INTERFACE,
dif_task_params->dif_on_network ? 1 : 0); dif_task_params->dif_on_network ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDGUARD, RDIF_TASK_CONTEXT_FORWARD_GUARD,
dif_task_params->forward_guard ? 1 : 0); dif_task_params->forward_guard ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDAPPTAG, RDIF_TASK_CONTEXT_FORWARD_APP_TAG,
dif_task_params->forward_app_tag ? 1 : 0); dif_task_params->forward_app_tag ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDREFTAG, RDIF_TASK_CONTEXT_FORWARD_REF_TAG,
dif_task_params->forward_ref_tag ? 1 : 0); dif_task_params->forward_ref_tag ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK, RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
dif_task_params->forward_app_tag_with_mask ? 1 : 0); dif_task_params->forward_app_tag_with_mask ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK, RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
dif_task_params->forward_ref_tag_with_mask ? 1 : 0); dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_INTERVALSIZE, RDIF_TASK_CONTEXT_INTERVAL_SIZE,
dif_task_params->dif_block_size_log - 9); dif_task_params->dif_block_size_log - 9);
SET_FIELD(rdif_context->state, SET_FIELD(rdif_context->state,
RDIF_TASK_CONTEXT_REFTAGMASK, RDIF_TASK_CONTEXT_REF_TAG_MASK,
dif_task_params->ref_tag_mask); dif_task_params->ref_tag_mask);
SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG, SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG,
dif_task_params->ignore_app_tag); dif_task_params->ignore_app_tag);
} }
...@@ -399,7 +400,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context, ...@@ -399,7 +400,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) { task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
tdif_context->app_tag_value = tdif_context->app_tag_value =
cpu_to_le16(dif_task_params->application_tag); cpu_to_le16(dif_task_params->application_tag);
tdif_context->partial_crc_valueB = tdif_context->partial_crc_value_b =
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000); cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
tdif_context->partial_crc_value_a = tdif_context->partial_crc_value_a =
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000); cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
...@@ -407,59 +408,63 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context, ...@@ -407,59 +408,63 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
dif_task_params->crc_seed ? 1 : 0); dif_task_params->crc_seed ? 1 : 0);
SET_FIELD(tdif_context->flags0, SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_SETERRORWITHEOP, TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP,
dif_task_params->tx_dif_conn_err_en ? 1 : 0); dif_task_params->tx_dif_conn_err_en ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD, SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD,
dif_task_params->forward_guard ? 1 : 0); dif_task_params->forward_guard ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_FORWARD_APP_TAG,
dif_task_params->forward_app_tag ? 1 : 0); dif_task_params->forward_app_tag ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_FORWARD_REF_TAG,
dif_task_params->forward_ref_tag ? 1 : 0); dif_task_params->forward_ref_tag ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE, SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE,
dif_task_params->dif_block_size_log - 9); dif_task_params->dif_block_size_log - 9);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_HOST_INTERFACE,
dif_task_params->dif_on_host ? 1 : 0); dif_task_params->dif_on_host ? 1 : 0);
SET_FIELD(tdif_context->flags1, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_NETWORKINTERFACE, TDIF_TASK_CONTEXT_NETWORK_INTERFACE,
dif_task_params->dif_on_network ? 1 : 0); dif_task_params->dif_on_network ? 1 : 0);
val = cpu_to_le32(dif_task_params->initial_ref_tag); val = cpu_to_le32(dif_task_params->initial_ref_tag);
tdif_context->initial_ref_tag = val; tdif_context->initial_ref_tag = val;
tdif_context->app_tag_mask = tdif_context->app_tag_mask =
cpu_to_le16(dif_task_params->application_tag_mask); cpu_to_le16(dif_task_params->application_tag_mask);
SET_FIELD(tdif_context->flags0, SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_HOSTGUARDTYPE, TDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
dif_task_params->host_guard_type); dif_task_params->host_guard_type);
SET_FIELD(tdif_context->flags0, SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_PROTECTIONTYPE, TDIF_TASK_CONTEXT_PROTECTION_TYPE,
dif_task_params->protection_type); dif_task_params->protection_type);
SET_FIELD(tdif_context->flags0, SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_INITIALREFTAGVALID, TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID,
dif_task_params->initial_ref_tag_is_valid ? 1 : 0); dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
SET_FIELD(tdif_context->flags0, SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_KEEPREFTAGCONST, TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
dif_task_params->keep_ref_tag_const ? 1 : 0); dif_task_params->keep_ref_tag_const ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_VALIDATE_GUARD,
(dif_task_params->validate_guard && (dif_task_params->validate_guard &&
dif_task_params->dif_on_host) ? 1 : 0); dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_VALIDATEAPPTAG, TDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
(dif_task_params->validate_app_tag && (dif_task_params->validate_app_tag &&
dif_task_params->dif_on_host) ? 1 : 0); dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_VALIDATEREFTAG, TDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
(dif_task_params->validate_ref_tag && (dif_task_params->validate_ref_tag &&
dif_task_params->dif_on_host) ? 1 : 0); dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK, TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
dif_task_params->forward_app_tag_with_mask ? 1 : 0); dif_task_params->forward_app_tag_with_mask ? 1 : 0);
SET_FIELD(tdif_context->flags1, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK, TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
dif_task_params->forward_ref_tag_with_mask ? 1 : 0); dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
SET_FIELD(tdif_context->flags1, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_REFTAGMASK, TDIF_TASK_CONTEXT_REF_TAG_MASK,
dif_task_params->ref_tag_mask); dif_task_params->ref_tag_mask);
SET_FIELD(tdif_context->flags0, SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_IGNOREAPPTAG, TDIF_TASK_CONTEXT_IGNORE_APP_TAG,
dif_task_params->ignore_app_tag ? 1 : 0); dif_task_params->ignore_app_tag ? 1 : 0);
} }
} }
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#ifndef _COMMON_HSI_H #ifndef _COMMON_HSI_H
#define _COMMON_HSI_H #define _COMMON_HSI_H
#include <linux/types.h> #include <linux/types.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <linux/bitops.h> #include <linux/bitops.h>
...@@ -48,13 +49,19 @@ ...@@ -48,13 +49,19 @@
} while (0) } while (0)
#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) #define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) #define HILO_64(hi, lo) \
#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64)
#define HILO_64_REGPAIR(regpair) ({ \
typeof(regpair) __regpair = (regpair); \
HILO_64(__regpair.hi, __regpair.lo); })
#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair)) #define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
#ifndef __COMMON_HSI__ #ifndef __COMMON_HSI__
#define __COMMON_HSI__ #define __COMMON_HSI__
/********************************/
/* PROTOCOL COMMON FW CONSTANTS */
/********************************/
#define X_FINAL_CLEANUP_AGG_INT 1 #define X_FINAL_CLEANUP_AGG_INT 1
...@@ -147,9 +154,6 @@ ...@@ -147,9 +154,6 @@
#define LB_TC (NUM_OF_PHYS_TCS) #define LB_TC (NUM_OF_PHYS_TCS)
/* Num of possible traffic priority values */
#define NUM_OF_PRIO (8)
#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2) #define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB) #define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2) #define MAX_NUM_VOQS (MAX_NUM_VOQS_K2)
...@@ -160,11 +164,6 @@ ...@@ -160,11 +164,6 @@
#define NUM_OF_LCIDS (320) #define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320) #define NUM_OF_LTIDS (320)
/* Clock values */
#define MASTER_CLK_FREQ_E4 (375e6)
#define STORM_CLK_FREQ_E4 (1000e6)
#define CLK25M_CLK_FREQ_E4 (25e6)
/* Global PXP windows (GTT) */ /* Global PXP windows (GTT) */
#define NUM_OF_GTT 19 #define NUM_OF_GTT 19
#define GTT_DWORD_SIZE_BITS 10 #define GTT_DWORD_SIZE_BITS 10
...@@ -201,7 +200,7 @@ ...@@ -201,7 +200,7 @@
#define DQ_DEMS_TOE_LOCAL_ADV_WND 4 #define DQ_DEMS_TOE_LOCAL_ADV_WND 4
#define DQ_DEMS_ROCE_CQ_CONS 7 #define DQ_DEMS_ROCE_CQ_CONS 7
/* XCM agg val selection */ /* XCM agg val selection (HW) */
#define DQ_XCM_AGG_VAL_SEL_WORD2 0 #define DQ_XCM_AGG_VAL_SEL_WORD2 0
#define DQ_XCM_AGG_VAL_SEL_WORD3 1 #define DQ_XCM_AGG_VAL_SEL_WORD3 1
#define DQ_XCM_AGG_VAL_SEL_WORD4 2 #define DQ_XCM_AGG_VAL_SEL_WORD4 2
...@@ -211,7 +210,7 @@ ...@@ -211,7 +210,7 @@
#define DQ_XCM_AGG_VAL_SEL_REG5 6 #define DQ_XCM_AGG_VAL_SEL_REG5 6
#define DQ_XCM_AGG_VAL_SEL_REG6 7 #define DQ_XCM_AGG_VAL_SEL_REG6 7
/* XCM agg val selection */ /* XCM agg val selection (FW) */
#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 #define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
...@@ -263,7 +262,7 @@ ...@@ -263,7 +262,7 @@
#define DQ_TCM_ROCE_RQ_PROD_CMD \ #define DQ_TCM_ROCE_RQ_PROD_CMD \
DQ_TCM_AGG_VAL_SEL_WORD0 DQ_TCM_AGG_VAL_SEL_WORD0
/* XCM agg counter flag selection */ /* XCM agg counter flag selection (HW) */
#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 #define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 #define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
#define DQ_XCM_AGG_FLG_SHIFT_CF12 2 #define DQ_XCM_AGG_FLG_SHIFT_CF12 2
...@@ -273,7 +272,7 @@ ...@@ -273,7 +272,7 @@
#define DQ_XCM_AGG_FLG_SHIFT_CF22 6 #define DQ_XCM_AGG_FLG_SHIFT_CF22 6
#define DQ_XCM_AGG_FLG_SHIFT_CF23 7 #define DQ_XCM_AGG_FLG_SHIFT_CF23 7
/* XCM agg counter flag selection */ /* XCM agg counter flag selection (FW) */
#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) #define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) #define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) #define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
...@@ -347,6 +346,7 @@ ...@@ -347,6 +346,7 @@
#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS) #define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS)
#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1) #define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3) #define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
#define DQ_REGION_SHIFT (12) #define DQ_REGION_SHIFT (12)
/* DPM */ /* DPM */
...@@ -359,29 +359,30 @@ ...@@ -359,29 +359,30 @@
/* QM CONSTANTS */ /* QM CONSTANTS */
/*****************/ /*****************/
/* number of TX queues in the QM */ /* Number of TX queues in the QM */
#define MAX_QM_TX_QUEUES_K2 512 #define MAX_QM_TX_QUEUES_K2 512
#define MAX_QM_TX_QUEUES_BB 448 #define MAX_QM_TX_QUEUES_BB 448
#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 #define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2
/* number of Other queues in the QM */ /* Number of Other queues in the QM */
#define MAX_QM_OTHER_QUEUES_BB 64 #define MAX_QM_OTHER_QUEUES_BB 64
#define MAX_QM_OTHER_QUEUES_K2 128 #define MAX_QM_OTHER_QUEUES_K2 128
#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 #define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2
/* number of queues in a PF queue group */ /* Number of queues in a PF queue group */
#define QM_PF_QUEUE_GROUP_SIZE 8 #define QM_PF_QUEUE_GROUP_SIZE 8
/* the size of a single queue element in bytes */ /* The size of a single queue element in bytes */
#define QM_PQ_ELEMENT_SIZE 4 #define QM_PQ_ELEMENT_SIZE 4
/* base number of Tx PQs in the CM PQ representation. /* Base number of Tx PQs in the CM PQ representation.
* should be used when storing PQ IDs in CM PQ registers and context * Should be used when storing PQ IDs in CM PQ registers and context.
*/ */
#define CM_TX_PQ_BASE 0x200 #define CM_TX_PQ_BASE 0x200
/* number of global Vport/QCN rate limiters */ /* Number of global Vport/QCN rate limiters */
#define MAX_QM_GLOBAL_RLS 256 #define MAX_QM_GLOBAL_RLS 256
/* QM registers data */ /* QM registers data */
#define QM_LINE_CRD_REG_WIDTH 16 #define QM_LINE_CRD_REG_WIDTH 16
#define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1)) #define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1))
...@@ -432,8 +433,7 @@ ...@@ -432,8 +433,7 @@
#define IGU_CMD_INT_ACK_BASE 0x0400 #define IGU_CMD_INT_ACK_BASE 0x0400
#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \ #define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
MAX_TOT_SB_PER_PATH - \ MAX_TOT_SB_PER_PATH - 1)
1)
#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff #define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 #define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
...@@ -447,8 +447,7 @@ ...@@ -447,8 +447,7 @@
#define IGU_CMD_PROD_UPD_BASE 0x0600 #define IGU_CMD_PROD_UPD_BASE 0x0600
#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\ #define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
MAX_TOT_SB_PER_PATH - \ MAX_TOT_SB_PER_PATH - 1)
1)
#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff #define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
/*****************/ /*****************/
...@@ -555,11 +554,6 @@ ...@@ -555,11 +554,6 @@
/* VF BAR */ /* VF BAR */
#define PXP_VF_BAR0 0 #define PXP_VF_BAR0 0
#define PXP_VF_BAR0_START_GRC 0x3E00
#define PXP_VF_BAR0_GRC_LENGTH 0x200
#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
PXP_VF_BAR0_GRC_LENGTH - 1)
#define PXP_VF_BAR0_START_IGU 0 #define PXP_VF_BAR0_START_IGU 0
#define PXP_VF_BAR0_IGU_LENGTH 0x3000 #define PXP_VF_BAR0_IGU_LENGTH 0x3000
#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ #define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \
...@@ -577,40 +571,33 @@ ...@@ -577,40 +571,33 @@
#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 #define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 #define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \ #define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B + \
+ \ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 #define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \ #define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B + \
+ \ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 #define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \ #define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B + \
+ \ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 #define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \ #define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B + \
+ \ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 #define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \ #define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B + \
+ \ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 #define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \ #define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B + \
+ \ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1) #define PXP_VF_BAR0_START_GRC 0x3E00
#define PXP_VF_BAR0_GRC_LENGTH 0x200
#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
PXP_VF_BAR0_GRC_LENGTH - 1)
#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 #define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 #define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
...@@ -624,11 +611,15 @@ ...@@ -624,11 +611,15 @@
#define PXP_NUM_ILT_RECORDS_BB 7600 #define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000 #define PXP_NUM_ILT_RECORDS_K2 11000
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
/* Host Interface */
#define PXP_QUEUES_ZONE_MAX_NUM 320 #define PXP_QUEUES_ZONE_MAX_NUM 320
/*****************/ /*****************/
/* PRM CONSTANTS */ /* PRM CONSTANTS */
/*****************/ /*****************/
#define PRM_DMA_PAD_BYTES_NUM 2 #define PRM_DMA_PAD_BYTES_NUM 2
/*****************/ /*****************/
/* SDMs CONSTANTS */ /* SDMs CONSTANTS */
/*****************/ /*****************/
...@@ -656,7 +647,7 @@ ...@@ -656,7 +647,7 @@
#define SDM_COMP_TYPE_INC_ORDER_CNT 9 #define SDM_COMP_TYPE_INC_ORDER_CNT 9
/*****************/ /*****************/
/* PBF Constants */ /* PBF CONSTANTS */
/*****************/ /*****************/
/* Number of PBF command queue lines. Each line is 32B. */ /* Number of PBF command queue lines. Each line is 32B. */
...@@ -679,6 +670,7 @@ struct async_data { ...@@ -679,6 +670,7 @@ struct async_data {
u8 fw_debug_param; u8 fw_debug_param;
}; };
/* Interrupt coalescing TimeSet */
struct coalescing_timeset { struct coalescing_timeset {
u8 value; u8 value;
#define COALESCING_TIMESET_TIMESET_MASK 0x7F #define COALESCING_TIMESET_TIMESET_MASK 0x7F
...@@ -692,20 +684,12 @@ struct common_queue_zone { ...@@ -692,20 +684,12 @@ struct common_queue_zone {
__le16 reserved; __le16 reserved;
}; };
/* ETH Rx producers data */
struct eth_rx_prod_data { struct eth_rx_prod_data {
__le16 bd_prod; __le16 bd_prod;
__le16 cqe_prod; __le16 cqe_prod;
}; };
struct regpair {
__le32 lo;
__le32 hi;
};
struct vf_pf_channel_eqe_data {
struct regpair msg_addr;
};
struct iscsi_eqe_data { struct iscsi_eqe_data {
__le32 cid; __le32 cid;
__le16 conn_id; __le16 conn_id;
...@@ -719,52 +703,6 @@ struct iscsi_eqe_data { ...@@ -719,52 +703,6 @@ struct iscsi_eqe_data {
#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7 #define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
}; };
struct rdma_eqe_destroy_qp {
__le32 cid;
u8 reserved[4];
};
union rdma_eqe_data {
struct regpair async_handle;
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
};
struct malicious_vf_eqe_data {
u8 vf_id;
u8 err_id;
__le16 reserved[3];
};
struct initial_cleanup_eqe_data {
u8 vf_id;
u8 reserved[7];
};
/* Event Data Union */
union event_ring_data {
u8 bytes[8];
struct vf_pf_channel_eqe_data vf_pf_channel;
struct iscsi_eqe_data iscsi_info;
union rdma_eqe_data rdma_data;
struct malicious_vf_eqe_data malicious_vf;
struct initial_cleanup_eqe_data vf_init_cleanup;
};
/* Event Ring Entry */
struct event_ring_entry {
u8 protocol_id;
u8 opcode;
__le16 reserved0;
__le16 echo;
u8 fw_return_code;
u8 flags;
#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
union event_ring_data data;
};
/* Multi function mode */ /* Multi function mode */
enum mf_mode { enum mf_mode {
ERROR_MODE /* Unsupported mode */, ERROR_MODE /* Unsupported mode */,
...@@ -781,13 +719,31 @@ enum protocol_type { ...@@ -781,13 +719,31 @@ enum protocol_type {
PROTOCOLID_CORE, PROTOCOLID_CORE,
PROTOCOLID_ETH, PROTOCOLID_ETH,
PROTOCOLID_IWARP, PROTOCOLID_IWARP,
PROTOCOLID_RESERVED5, PROTOCOLID_RESERVED0,
PROTOCOLID_PREROCE, PROTOCOLID_PREROCE,
PROTOCOLID_COMMON, PROTOCOLID_COMMON,
PROTOCOLID_RESERVED6, PROTOCOLID_RESERVED1,
MAX_PROTOCOL_TYPE MAX_PROTOCOL_TYPE
}; };
struct regpair {
__le32 lo;
__le32 hi;
};
/* RoCE Destroy Event Data */
struct rdma_eqe_destroy_qp {
__le32 cid;
u8 reserved[4];
};
/* RDMA Event Data Union */
union rdma_eqe_data {
struct regpair async_handle;
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
};
/* Ustorm Queue Zone */
struct ustorm_eth_queue_zone { struct ustorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset; struct coalescing_timeset int_coalescing_timeset;
u8 reserved[3]; u8 reserved[3];
...@@ -798,7 +754,7 @@ struct ustorm_queue_zone { ...@@ -798,7 +754,7 @@ struct ustorm_queue_zone {
struct common_queue_zone common; struct common_queue_zone common;
}; };
/* status block structure */ /* Status block structure */
struct cau_pi_entry { struct cau_pi_entry {
u32 prod; u32 prod;
#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF #define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
...@@ -811,7 +767,7 @@ struct cau_pi_entry { ...@@ -811,7 +767,7 @@ struct cau_pi_entry {
#define CAU_PI_ENTRY_RESERVED_SHIFT 24 #define CAU_PI_ENTRY_RESERVED_SHIFT 24
}; };
/* status block structure */ /* Status block structure */
struct cau_sb_entry { struct cau_sb_entry {
u32 data; u32 data;
#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF #define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
...@@ -839,7 +795,16 @@ struct cau_sb_entry { ...@@ -839,7 +795,16 @@ struct cau_sb_entry {
#define CAU_SB_ENTRY_TPH_SHIFT 31 #define CAU_SB_ENTRY_TPH_SHIFT 31
}; };
/* core doorbell data */ /* Igu cleanup bit values to distinguish between clean or producer consumer
* update.
*/
enum command_type_bit {
IGU_COMMAND_TYPE_NOP = 0,
IGU_COMMAND_TYPE_SET = 1,
MAX_COMMAND_TYPE_BIT
};
/* Core doorbell data */
struct core_db_data { struct core_db_data {
u8 params; u8 params;
#define CORE_DB_DATA_DEST_MASK 0x3 #define CORE_DB_DATA_DEST_MASK 0x3
...@@ -946,7 +911,7 @@ struct db_pwm_addr { ...@@ -946,7 +911,7 @@ struct db_pwm_addr {
#define DB_PWM_ADDR_RESERVED1_SHIFT 28 #define DB_PWM_ADDR_RESERVED1_SHIFT 28
}; };
/* Parameters to RoCE firmware, passed in EDPM doorbell */ /* Parameters to RDMA firmware, passed in EDPM doorbell */
struct db_rdma_dpm_params { struct db_rdma_dpm_params {
__le32 params; __le32 params;
#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F #define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
...@@ -969,7 +934,9 @@ struct db_rdma_dpm_params { ...@@ -969,7 +934,9 @@ struct db_rdma_dpm_params {
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
}; };
/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */ /* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
* DPM burst.
*/
struct db_rdma_dpm_data { struct db_rdma_dpm_data {
__le16 icid; __le16 icid;
__le16 prod_val; __le16 prod_val;
...@@ -1044,6 +1011,7 @@ struct parsing_and_err_flags { ...@@ -1044,6 +1011,7 @@ struct parsing_and_err_flags {
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
}; };
/* Parsing error flags bitmap */
struct parsing_err_flags { struct parsing_err_flags {
__le16 flags; __le16 flags;
#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1 #define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1
...@@ -1080,10 +1048,12 @@ struct parsing_err_flags { ...@@ -1080,10 +1048,12 @@ struct parsing_err_flags {
#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15 #define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15
}; };
/* Pb context */
struct pb_context { struct pb_context {
__le32 crc[4]; __le32 crc[4];
}; };
/* Concrete Function ID */
struct pxp_concrete_fid { struct pxp_concrete_fid {
__le16 fid; __le16 fid;
#define PXP_CONCRETE_FID_PFID_MASK 0xF #define PXP_CONCRETE_FID_PFID_MASK 0xF
...@@ -1098,6 +1068,7 @@ struct pxp_concrete_fid { ...@@ -1098,6 +1068,7 @@ struct pxp_concrete_fid {
#define PXP_CONCRETE_FID_VFID_SHIFT 8 #define PXP_CONCRETE_FID_VFID_SHIFT 8
}; };
/* Concrete Function ID */
struct pxp_pretend_concrete_fid { struct pxp_pretend_concrete_fid {
__le16 fid; __le16 fid;
#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF #define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
...@@ -1110,12 +1081,13 @@ struct pxp_pretend_concrete_fid { ...@@ -1110,12 +1081,13 @@ struct pxp_pretend_concrete_fid {
#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 #define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
}; };
/* Function ID */
union pxp_pretend_fid { union pxp_pretend_fid {
struct pxp_pretend_concrete_fid concrete_fid; struct pxp_pretend_concrete_fid concrete_fid;
__le16 opaque_fid; __le16 opaque_fid;
}; };
/* Pxp Pretend Command Register. */ /* Pxp Pretend Command Register */
struct pxp_pretend_cmd { struct pxp_pretend_cmd {
union pxp_pretend_fid fid; union pxp_pretend_fid fid;
__le16 control; __le16 control;
...@@ -1139,7 +1111,7 @@ struct pxp_pretend_cmd { ...@@ -1139,7 +1111,7 @@ struct pxp_pretend_cmd {
#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 #define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
}; };
/* PTT Record in PXP Admin Window. */ /* PTT Record in PXP Admin Window */
struct pxp_ptt_entry { struct pxp_ptt_entry {
__le32 offset; __le32 offset;
#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF #define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
...@@ -1149,7 +1121,7 @@ struct pxp_ptt_entry { ...@@ -1149,7 +1121,7 @@ struct pxp_ptt_entry {
struct pxp_pretend_cmd pretend; struct pxp_pretend_cmd pretend;
}; };
/* VF Zone A Permission Register. */ /* VF Zone A Permission Register */
struct pxp_vf_zone_a_permission { struct pxp_vf_zone_a_permission {
__le32 control; __le32 control;
#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF #define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
...@@ -1162,86 +1134,74 @@ struct pxp_vf_zone_a_permission { ...@@ -1162,86 +1134,74 @@ struct pxp_vf_zone_a_permission {
#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 #define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
}; };
/* RSS hash type */ /* Rdif context */
struct rdif_task_context { struct rdif_task_context {
__le32 initial_ref_tag; __le32 initial_ref_tag;
__le16 app_tag_value; __le16 app_tag_value;
__le16 app_tag_mask; __le16 app_tag_mask;
u8 flags0; u8 flags0;
#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 #define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 #define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 #define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 #define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 #define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 #define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 #define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 #define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 #define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 #define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 #define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 #define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 #define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7 #define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7
u8 partial_dif_data[7]; u8 partial_dif_data[7];
__le16 partial_crc_value; __le16 partial_crc_value;
__le16 partial_checksum_value; __le16 partial_checksum_value;
__le32 offset_in_io; __le32 offset_in_io;
__le16 flags1; __le16 flags1;
#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 #define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 #define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 #define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 #define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 #define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 #define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 #define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 #define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 #define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 #define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 #define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 #define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 #define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 #define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 #define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 #define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 #define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 #define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 #define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 #define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 #define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 #define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 #define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14 #define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 #define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15 #define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
__le16 state; __le16 state;
#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF #define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF
#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0 #define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0
#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF #define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF
#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4 #define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4
#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1 #define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1
#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8 #define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8
#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 #define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1
#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 #define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9
#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF #define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10 #define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10
#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 #define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 #define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
__le32 reserved2; __le32 reserved2;
}; };
/* RSS hash type */ /* Status block structure */
enum rss_hash_type {
RSS_HASH_TYPE_DEFAULT = 0,
RSS_HASH_TYPE_IPV4 = 1,
RSS_HASH_TYPE_TCP_IPV4 = 2,
RSS_HASH_TYPE_IPV6 = 3,
RSS_HASH_TYPE_TCP_IPV6 = 4,
RSS_HASH_TYPE_UDP_IPV4 = 5,
RSS_HASH_TYPE_UDP_IPV6 = 6,
MAX_RSS_HASH_TYPE
};
/* status block structure */
struct status_block { struct status_block {
__le16 pi_array[PIS_PER_SB]; __le16 pi_array[PIS_PER_SB];
__le32 sb_num; __le32 sb_num;
...@@ -1258,88 +1218,90 @@ struct status_block { ...@@ -1258,88 +1218,90 @@ struct status_block {
#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 #define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
}; };
/* Tdif context */
struct tdif_task_context { struct tdif_task_context {
__le32 initial_ref_tag; __le32 initial_ref_tag;
__le16 app_tag_value; __le16 app_tag_value;
__le16 app_tag_mask; __le16 app_tag_mask;
__le16 partial_crc_valueB; __le16 partial_crc_value_b;
__le16 partial_checksum_valueB; __le16 partial_checksum_value_b;
__le16 stateB; __le16 stateB;
#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF #define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF
#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0 #define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0
#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF #define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF
#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4 #define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4
#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1 #define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1
#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8 #define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8
#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 #define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1
#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 #define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9
#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F #define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 #define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
u8 reserved1; u8 reserved1;
u8 flags0; u8 flags0;
#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 #define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 #define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 #define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 #define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 #define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 #define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 #define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 #define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 #define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 #define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 #define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 #define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 #define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 #define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
__le32 flags1; __le32 flags1;
#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 #define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 #define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 #define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 #define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 #define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 #define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 #define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 #define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 #define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 #define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 #define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 #define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 #define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 #define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 #define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 #define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 #define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 #define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 #define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 #define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 #define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 #define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF #define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF
#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14 #define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14
#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF #define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF
#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18 #define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18
#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1 #define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1
#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22 #define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22
#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1 #define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1
#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23 #define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23
#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF #define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24 #define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24
#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 #define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28 #define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28
#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 #define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29 #define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29
#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 #define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30 #define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30
#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 #define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 #define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
__le32 offset_in_iob; __le32 offset_in_io_b;
__le16 partial_crc_value_a; __le16 partial_crc_value_a;
__le16 partial_checksum_valuea_; __le16 partial_checksum_value_a;
__le32 offset_in_ioa; __le32 offset_in_io_a;
u8 partial_dif_data_a[8]; u8 partial_dif_data_a[8];
u8 partial_dif_data_b[8]; u8 partial_dif_data_b[8];
}; };
/* Timers context */
struct timers_context { struct timers_context {
__le32 logical_client_0; __le32 logical_client_0;
#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF
...@@ -1385,6 +1347,7 @@ struct timers_context { ...@@ -1385,6 +1347,7 @@ struct timers_context {
#define TIMERS_CONTEXT_RESERVED7_SHIFT 29 #define TIMERS_CONTEXT_RESERVED7_SHIFT 29
}; };
/* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */
enum tunnel_next_protocol { enum tunnel_next_protocol {
e_unknown = 0, e_unknown = 0,
e_l2 = 1, e_l2 = 1,
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
/********************/ /********************/
/* ETH FW CONSTANTS */ /* ETH FW CONSTANTS */
/********************/ /********************/
#define ETH_HSI_VER_MAJOR 3 #define ETH_HSI_VER_MAJOR 3
#define ETH_HSI_VER_MINOR 10 #define ETH_HSI_VER_MINOR 10
...@@ -78,16 +79,16 @@ ...@@ -78,16 +79,16 @@
#define ETH_RX_MAX_BUFF_PER_PKT 5 #define ETH_RX_MAX_BUFF_PER_PKT 5
#define ETH_RX_BD_THRESHOLD 12 #define ETH_RX_BD_THRESHOLD 12
/* num of MAC/VLAN filters */ /* Num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512 #define ETH_NUM_MAC_FILTERS 512
#define ETH_NUM_VLAN_FILTERS 512 #define ETH_NUM_VLAN_FILTERS 512
/* approx. multicast constants */ /* Approx. multicast constants */
#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 #define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
#define ETH_MULTICAST_MAC_BINS 256 #define ETH_MULTICAST_MAC_BINS 256
#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) #define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
/* ethernet vport update constants */ /* Ethernet vport update constants */
#define ETH_FILTER_RULES_COUNT 10 #define ETH_FILTER_RULES_COUNT 10
#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 #define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
#define ETH_RSS_KEY_SIZE_REGS 10 #define ETH_RSS_KEY_SIZE_REGS 10
...@@ -123,7 +124,7 @@ struct eth_tx_1st_bd_flags { ...@@ -123,7 +124,7 @@ struct eth_tx_1st_bd_flags {
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 #define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
}; };
/* The parsing information data fo rthe first tx bd of a given packet. */ /* The parsing information data fo rthe first tx bd of a given packet */
struct eth_tx_data_1st_bd { struct eth_tx_data_1st_bd {
__le16 vlan; __le16 vlan;
u8 nbds; u8 nbds;
...@@ -137,7 +138,7 @@ struct eth_tx_data_1st_bd { ...@@ -137,7 +138,7 @@ struct eth_tx_data_1st_bd {
#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 #define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
}; };
/* The parsing information data for the second tx bd of a given packet. */ /* The parsing information data for the second tx bd of a given packet */
struct eth_tx_data_2nd_bd { struct eth_tx_data_2nd_bd {
__le16 tunn_ip_size; __le16 tunn_ip_size;
__le16 bitfields1; __le16 bitfields1;
...@@ -168,7 +169,7 @@ struct eth_tx_data_2nd_bd { ...@@ -168,7 +169,7 @@ struct eth_tx_data_2nd_bd {
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 #define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
}; };
/* Firmware data for L2-EDPM packet. */ /* Firmware data for L2-EDPM packet */
struct eth_edpm_fw_data { struct eth_edpm_fw_data {
struct eth_tx_data_1st_bd data_1st_bd; struct eth_tx_data_1st_bd data_1st_bd;
struct eth_tx_data_2nd_bd data_2nd_bd; struct eth_tx_data_2nd_bd data_2nd_bd;
...@@ -179,7 +180,7 @@ struct eth_fast_path_cqe_fw_debug { ...@@ -179,7 +180,7 @@ struct eth_fast_path_cqe_fw_debug {
__le16 reserved2; __le16 reserved2;
}; };
/* tunneling parsing flags */ /* Tunneling parsing flags */
struct eth_tunnel_parsing_flags { struct eth_tunnel_parsing_flags {
u8 flags; u8 flags;
#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 #define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
...@@ -207,7 +208,7 @@ struct eth_pmd_flow_flags { ...@@ -207,7 +208,7 @@ struct eth_pmd_flow_flags {
#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 #define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
}; };
/* Regular ETH Rx FP CQE. */ /* Regular ETH Rx FP CQE */
struct eth_fast_path_rx_reg_cqe { struct eth_fast_path_rx_reg_cqe {
u8 type; u8 type;
u8 bitfields; u8 bitfields;
...@@ -231,7 +232,7 @@ struct eth_fast_path_rx_reg_cqe { ...@@ -231,7 +232,7 @@ struct eth_fast_path_rx_reg_cqe {
struct eth_pmd_flow_flags pmd_flags; struct eth_pmd_flow_flags pmd_flags;
}; };
/* TPA-continue ETH Rx FP CQE. */ /* TPA-continue ETH Rx FP CQE */
struct eth_fast_path_rx_tpa_cont_cqe { struct eth_fast_path_rx_tpa_cont_cqe {
u8 type; u8 type;
u8 tpa_agg_index; u8 tpa_agg_index;
...@@ -243,7 +244,7 @@ struct eth_fast_path_rx_tpa_cont_cqe { ...@@ -243,7 +244,7 @@ struct eth_fast_path_rx_tpa_cont_cqe {
struct eth_pmd_flow_flags pmd_flags; struct eth_pmd_flow_flags pmd_flags;
}; };
/* TPA-end ETH Rx FP CQE. */ /* TPA-end ETH Rx FP CQE */
struct eth_fast_path_rx_tpa_end_cqe { struct eth_fast_path_rx_tpa_end_cqe {
u8 type; u8 type;
u8 tpa_agg_index; u8 tpa_agg_index;
...@@ -259,7 +260,7 @@ struct eth_fast_path_rx_tpa_end_cqe { ...@@ -259,7 +260,7 @@ struct eth_fast_path_rx_tpa_end_cqe {
struct eth_pmd_flow_flags pmd_flags; struct eth_pmd_flow_flags pmd_flags;
}; };
/* TPA-start ETH Rx FP CQE. */ /* TPA-start ETH Rx FP CQE */
struct eth_fast_path_rx_tpa_start_cqe { struct eth_fast_path_rx_tpa_start_cqe {
u8 type; u8 type;
u8 bitfields; u8 bitfields;
...@@ -295,7 +296,7 @@ struct eth_rx_bd { ...@@ -295,7 +296,7 @@ struct eth_rx_bd {
struct regpair addr; struct regpair addr;
}; };
/* regular ETH Rx SP CQE */ /* Regular ETH Rx SP CQE */
struct eth_slow_path_rx_cqe { struct eth_slow_path_rx_cqe {
u8 type; u8 type;
u8 ramrod_cmd_id; u8 ramrod_cmd_id;
...@@ -306,7 +307,7 @@ struct eth_slow_path_rx_cqe { ...@@ -306,7 +307,7 @@ struct eth_slow_path_rx_cqe {
struct eth_pmd_flow_flags pmd_flags; struct eth_pmd_flow_flags pmd_flags;
}; };
/* union for all ETH Rx CQE types */ /* Union for all ETH Rx CQE types */
union eth_rx_cqe { union eth_rx_cqe {
struct eth_fast_path_rx_reg_cqe fast_path_regular; struct eth_fast_path_rx_reg_cqe fast_path_regular;
struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
...@@ -366,7 +367,7 @@ struct eth_tx_2nd_bd { ...@@ -366,7 +367,7 @@ struct eth_tx_2nd_bd {
struct eth_tx_data_2nd_bd data; struct eth_tx_data_2nd_bd data;
}; };
/* The parsing information data for the third tx bd of a given packet. */ /* The parsing information data for the third tx bd of a given packet */
struct eth_tx_data_3rd_bd { struct eth_tx_data_3rd_bd {
__le16 lso_mss; __le16 lso_mss;
__le16 bitfields; __le16 bitfields;
...@@ -389,7 +390,7 @@ struct eth_tx_3rd_bd { ...@@ -389,7 +390,7 @@ struct eth_tx_3rd_bd {
struct eth_tx_data_3rd_bd data; struct eth_tx_data_3rd_bd data;
}; };
/* Complementary information for the regular tx bd of a given packet. */ /* Complementary information for the regular tx bd of a given packet */
struct eth_tx_data_bd { struct eth_tx_data_bd {
__le16 reserved0; __le16 reserved0;
__le16 bitfields; __le16 bitfields;
...@@ -448,4 +449,16 @@ struct eth_db_data { ...@@ -448,4 +449,16 @@ struct eth_db_data {
__le16 bd_prod; __le16 bd_prod;
}; };
/* RSS hash type */
enum rss_hash_type {
RSS_HASH_TYPE_DEFAULT = 0,
RSS_HASH_TYPE_IPV4 = 1,
RSS_HASH_TYPE_TCP_IPV4 = 2,
RSS_HASH_TYPE_IPV6 = 3,
RSS_HASH_TYPE_TCP_IPV6 = 4,
RSS_HASH_TYPE_UDP_IPV4 = 5,
RSS_HASH_TYPE_UDP_IPV6 = 6,
MAX_RSS_HASH_TYPE
};
#endif /* __ETH_COMMON__ */ #endif /* __ETH_COMMON__ */
...@@ -8,172 +8,14 @@ ...@@ -8,172 +8,14 @@
#ifndef __FCOE_COMMON__ #ifndef __FCOE_COMMON__
#define __FCOE_COMMON__ #define __FCOE_COMMON__
/*********************/ /*********************/
/* FCOE FW CONSTANTS */ /* FCOE FW CONSTANTS */
/*********************/ /*********************/
#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 #define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12
struct fcoe_abts_pkt { /* The fcoe storm task context protection-information of Ystorm */
__le32 abts_rsp_fc_payload_lo;
__le16 abts_rsp_rx_id;
u8 abts_rsp_rctl;
u8 reserved2;
};
/* FCoE additional WQE (Sq/XferQ) information */
union fcoe_additional_info_union {
__le32 previous_tid;
__le32 parent_tid;
__le32 burst_length;
__le32 seq_rec_updated_offset;
};
struct fcoe_exp_ro {
__le32 data_offset;
__le32 reserved;
};
union fcoe_cleanup_addr_exp_ro_union {
struct regpair abts_rsp_fc_payload_hi;
struct fcoe_exp_ro exp_ro;
};
/* FCoE Ramrod Command IDs */
enum fcoe_completion_status {
FCOE_COMPLETION_STATUS_SUCCESS,
FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
MAX_FCOE_COMPLETION_STATUS
};
struct fc_addr_nw {
u8 addr_lo;
u8 addr_mid;
u8 addr_hi;
};
/* FCoE connection offload */
struct fcoe_conn_offload_ramrod_data {
struct regpair sq_pbl_addr;
struct regpair sq_curr_page_addr;
struct regpair sq_next_page_addr;
struct regpair xferq_pbl_addr;
struct regpair xferq_curr_page_addr;
struct regpair xferq_next_page_addr;
struct regpair respq_pbl_addr;
struct regpair respq_curr_page_addr;
struct regpair respq_next_page_addr;
__le16 dst_mac_addr_lo;
__le16 dst_mac_addr_mid;
__le16 dst_mac_addr_hi;
__le16 src_mac_addr_lo;
__le16 src_mac_addr_mid;
__le16 src_mac_addr_hi;
__le16 tx_max_fc_pay_len;
__le16 e_d_tov_timer_val;
__le16 rx_max_fc_pay_len;
__le16 vlan_tag;
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
__le16 physical_q0;
__le16 rec_rr_tov_timer_val;
struct fc_addr_nw s_id;
u8 max_conc_seqs_c3;
struct fc_addr_nw d_id;
u8 flags;
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6
__le16 conn_id;
u8 def_q_idx;
u8 reserved[5];
};
/* FCoE terminate connection request */
struct fcoe_conn_terminate_ramrod_data {
struct regpair terminate_params_addr;
};
struct fcoe_slow_sgl_ctx {
struct regpair base_sgl_addr;
__le16 curr_sge_off;
__le16 remainder_num_sges;
__le16 curr_sgl_index;
__le16 reserved;
};
union fcoe_dix_desc_ctx {
struct fcoe_slow_sgl_ctx dix_sgl;
struct scsi_sge cached_dix_sge;
};
struct fcoe_fast_sgl_ctx {
struct regpair sgl_start_addr;
__le32 sgl_byte_offset;
__le16 task_reuse_cnt;
__le16 init_offset_in_first_sge;
};
struct fcoe_fcp_cmd_payload {
__le32 opaque[8];
};
struct fcoe_fcp_rsp_payload {
__le32 opaque[6];
};
struct fcoe_fcp_xfer_payload {
__le32 opaque[3];
};
/* FCoE firmware function init */
struct fcoe_init_func_ramrod_data {
struct scsi_init_func_params func_params;
struct scsi_init_func_queues q_params;
__le16 mtu;
__le16 sq_num_pages_in_pbl;
__le32 reserved;
};
/* FCoE: Mode of the connection: Target or Initiator or both */
enum fcoe_mode_type {
FCOE_INITIATOR_MODE = 0x0,
FCOE_TARGET_MODE = 0x1,
FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
MAX_FCOE_MODE_TYPE
};
struct fcoe_rx_stat {
struct regpair fcoe_rx_byte_cnt;
struct regpair fcoe_rx_data_pkt_cnt;
struct regpair fcoe_rx_xfer_pkt_cnt;
struct regpair fcoe_rx_other_pkt_cnt;
__le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
__le32 fcoe_silent_drop_pkt_rq_full_cnt;
__le32 fcoe_silent_drop_pkt_crc_error_cnt;
__le32 fcoe_silent_drop_pkt_task_invalid_cnt;
__le32 fcoe_silent_drop_total_pkt_cnt;
__le32 rsrv;
};
struct fcoe_stat_ramrod_data {
struct regpair stat_params_addr;
};
struct protection_info_ctx { struct protection_info_ctx {
__le16 flags; __le16 flags;
#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3 #define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3
...@@ -192,21 +34,40 @@ struct protection_info_ctx { ...@@ -192,21 +34,40 @@ struct protection_info_ctx {
u8 dst_size; u8 dst_size;
}; };
/* The fcoe storm task context protection-information of Ystorm */
union protection_info_union_ctx { union protection_info_union_ctx {
struct protection_info_ctx info; struct protection_info_ctx info;
__le32 value; __le32 value;
}; };
/* FCP CMD payload */
struct fcoe_fcp_cmd_payload {
__le32 opaque[8];
};
/* FCP RSP payload */
struct fcoe_fcp_rsp_payload {
__le32 opaque[6];
};
/* FCP RSP payload */
struct fcp_rsp_payload_padded { struct fcp_rsp_payload_padded {
struct fcoe_fcp_rsp_payload rsp_payload; struct fcoe_fcp_rsp_payload rsp_payload;
__le32 reserved[2]; __le32 reserved[2];
}; };
/* FCP RSP payload */
struct fcoe_fcp_xfer_payload {
__le32 opaque[3];
};
/* FCP RSP payload */
struct fcp_xfer_payload_padded { struct fcp_xfer_payload_padded {
struct fcoe_fcp_xfer_payload xfer_payload; struct fcoe_fcp_xfer_payload xfer_payload;
__le32 reserved[5]; __le32 reserved[5];
}; };
/* Task params */
struct fcoe_tx_data_params { struct fcoe_tx_data_params {
__le32 data_offset; __le32 data_offset;
__le32 offset_in_io; __le32 offset_in_io;
...@@ -227,6 +88,7 @@ struct fcoe_tx_data_params { ...@@ -227,6 +88,7 @@ struct fcoe_tx_data_params {
__le16 reserved3; __le16 reserved3;
}; };
/* Middle path parameters: FC header fields provided by the driver */
struct fcoe_tx_mid_path_params { struct fcoe_tx_mid_path_params {
__le32 parameter; __le32 parameter;
u8 r_ctl; u8 r_ctl;
...@@ -237,11 +99,13 @@ struct fcoe_tx_mid_path_params { ...@@ -237,11 +99,13 @@ struct fcoe_tx_mid_path_params {
__le16 ox_id; __le16 ox_id;
}; };
/* Task params */
struct fcoe_tx_params { struct fcoe_tx_params {
struct fcoe_tx_data_params data; struct fcoe_tx_data_params data;
struct fcoe_tx_mid_path_params mid_path; struct fcoe_tx_mid_path_params mid_path;
}; };
/* Union of FCP CMD payload \ TX params \ ABTS \ Cleanup */
union fcoe_tx_info_union_ctx { union fcoe_tx_info_union_ctx {
struct fcoe_fcp_cmd_payload fcp_cmd_payload; struct fcoe_fcp_cmd_payload fcp_cmd_payload;
struct fcp_rsp_payload_padded fcp_rsp_payload; struct fcp_rsp_payload_padded fcp_rsp_payload;
...@@ -249,6 +113,22 @@ union fcoe_tx_info_union_ctx { ...@@ -249,6 +113,22 @@ union fcoe_tx_info_union_ctx {
struct fcoe_tx_params tx_params; struct fcoe_tx_params tx_params;
}; };
/* Data sgl */
struct fcoe_slow_sgl_ctx {
struct regpair base_sgl_addr;
__le16 curr_sge_off;
__le16 remainder_num_sges;
__le16 curr_sgl_index;
__le16 reserved;
};
/* Union of DIX SGL \ cached DIX sges */
union fcoe_dix_desc_ctx {
struct fcoe_slow_sgl_ctx dix_sgl;
struct scsi_sge cached_dix_sge;
};
/* The fcoe storm task context of Ystorm */
struct ystorm_fcoe_task_st_ctx { struct ystorm_fcoe_task_st_ctx {
u8 task_type; u8 task_type;
u8 sgl_mode; u8 sgl_mode;
...@@ -407,6 +287,27 @@ struct tstorm_fcoe_task_ag_ctx { ...@@ -407,6 +287,27 @@ struct tstorm_fcoe_task_ag_ctx {
__le32 data_offset_next; __le32 data_offset_next;
}; };
/* Cached data sges */
struct fcoe_exp_ro {
__le32 data_offset;
__le32 reserved;
};
/* Union of Cleanup address \ expected relative offsets */
union fcoe_cleanup_addr_exp_ro_union {
struct regpair abts_rsp_fc_payload_hi;
struct fcoe_exp_ro exp_ro;
};
/* Fields coppied from ABTSrsp pckt */
struct fcoe_abts_pkt {
__le32 abts_rsp_fc_payload_lo;
__le16 abts_rsp_rx_id;
u8 abts_rsp_rctl;
u8 reserved2;
};
/* FW read- write (modifyable) part The fcoe task storm context of Tstorm */
struct fcoe_tstorm_fcoe_task_st_ctx_read_write { struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union; union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
__le16 flags; __le16 flags;
...@@ -436,6 +337,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_write { ...@@ -436,6 +337,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
__le16 reserved1; __le16 reserved1;
}; };
/* FW read only part The fcoe task storm context of Tstorm */
struct fcoe_tstorm_fcoe_task_st_ctx_read_only { struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
u8 task_type; u8 task_type;
u8 dev_type; u8 dev_type;
...@@ -446,6 +348,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_only { ...@@ -446,6 +348,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
__le32 rsrv; __le32 rsrv;
}; };
/** The fcoe task storm context of Tstorm */
struct tstorm_fcoe_task_st_ctx { struct tstorm_fcoe_task_st_ctx {
struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write; struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write;
struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only; struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
...@@ -507,6 +410,7 @@ struct mstorm_fcoe_task_ag_ctx { ...@@ -507,6 +410,7 @@ struct mstorm_fcoe_task_ag_ctx {
__le32 reg2; __le32 reg2;
}; };
/* The fcoe task storm context of Mstorm */
struct mstorm_fcoe_task_st_ctx { struct mstorm_fcoe_task_st_ctx {
struct regpair rsp_buf_addr; struct regpair rsp_buf_addr;
__le32 rsrv[2]; __le32 rsrv[2];
...@@ -596,6 +500,7 @@ struct ustorm_fcoe_task_ag_ctx { ...@@ -596,6 +500,7 @@ struct ustorm_fcoe_task_ag_ctx {
__le32 reg5; __le32 reg5;
}; };
/* FCoE task context */
struct fcoe_task_context { struct fcoe_task_context {
struct ystorm_fcoe_task_st_ctx ystorm_st_context; struct ystorm_fcoe_task_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2]; struct regpair ystorm_st_padding[2];
...@@ -611,6 +516,129 @@ struct fcoe_task_context { ...@@ -611,6 +516,129 @@ struct fcoe_task_context {
struct rdif_task_context rdif_context; struct rdif_task_context rdif_context;
}; };
/* FCoE additional WQE (Sq/XferQ) information */
union fcoe_additional_info_union {
__le32 previous_tid;
__le32 parent_tid;
__le32 burst_length;
__le32 seq_rec_updated_offset;
};
/* FCoE Ramrod Command IDs */
enum fcoe_completion_status {
FCOE_COMPLETION_STATUS_SUCCESS,
FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
MAX_FCOE_COMPLETION_STATUS
};
/* FC address (SID/DID) network presentation */
struct fc_addr_nw {
u8 addr_lo;
u8 addr_mid;
u8 addr_hi;
};
/* FCoE connection offload */
struct fcoe_conn_offload_ramrod_data {
struct regpair sq_pbl_addr;
struct regpair sq_curr_page_addr;
struct regpair sq_next_page_addr;
struct regpair xferq_pbl_addr;
struct regpair xferq_curr_page_addr;
struct regpair xferq_next_page_addr;
struct regpair respq_pbl_addr;
struct regpair respq_curr_page_addr;
struct regpair respq_next_page_addr;
__le16 dst_mac_addr_lo;
__le16 dst_mac_addr_mid;
__le16 dst_mac_addr_hi;
__le16 src_mac_addr_lo;
__le16 src_mac_addr_mid;
__le16 src_mac_addr_hi;
__le16 tx_max_fc_pay_len;
__le16 e_d_tov_timer_val;
__le16 rx_max_fc_pay_len;
__le16 vlan_tag;
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
__le16 physical_q0;
__le16 rec_rr_tov_timer_val;
struct fc_addr_nw s_id;
u8 max_conc_seqs_c3;
struct fc_addr_nw d_id;
u8 flags;
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6
__le16 conn_id;
u8 def_q_idx;
u8 reserved[5];
};
/* FCoE terminate connection request */
struct fcoe_conn_terminate_ramrod_data {
struct regpair terminate_params_addr;
};
/* Data sgl */
struct fcoe_fast_sgl_ctx {
struct regpair sgl_start_addr;
__le32 sgl_byte_offset;
__le16 task_reuse_cnt;
__le16 init_offset_in_first_sge;
};
/* FCoE firmware function init */
struct fcoe_init_func_ramrod_data {
struct scsi_init_func_params func_params;
struct scsi_init_func_queues q_params;
__le16 mtu;
__le16 sq_num_pages_in_pbl;
__le32 reserved;
};
/* FCoE: Mode of the connection: Target or Initiator or both */
enum fcoe_mode_type {
FCOE_INITIATOR_MODE = 0x0,
FCOE_TARGET_MODE = 0x1,
FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
MAX_FCOE_MODE_TYPE
};
/* Per PF FCoE receive path statistics - tStorm RAM structure */
struct fcoe_rx_stat {
struct regpair fcoe_rx_byte_cnt;
struct regpair fcoe_rx_data_pkt_cnt;
struct regpair fcoe_rx_xfer_pkt_cnt;
struct regpair fcoe_rx_other_pkt_cnt;
__le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
__le32 fcoe_silent_drop_pkt_rq_full_cnt;
__le32 fcoe_silent_drop_pkt_crc_error_cnt;
__le32 fcoe_silent_drop_pkt_task_invalid_cnt;
__le32 fcoe_silent_drop_total_pkt_cnt;
__le32 rsrv;
};
/* FCoe statistics request */
struct fcoe_stat_ramrod_data {
struct regpair stat_params_addr;
};
/* Per PF FCoE transmit path statistics - pStorm RAM structure */
struct fcoe_tx_stat { struct fcoe_tx_stat {
struct regpair fcoe_tx_byte_cnt; struct regpair fcoe_tx_byte_cnt;
struct regpair fcoe_tx_data_pkt_cnt; struct regpair fcoe_tx_data_pkt_cnt;
...@@ -618,6 +646,7 @@ struct fcoe_tx_stat { ...@@ -618,6 +646,7 @@ struct fcoe_tx_stat {
struct regpair fcoe_tx_other_pkt_cnt; struct regpair fcoe_tx_other_pkt_cnt;
}; };
/* FCoE SQ/XferQ element */
struct fcoe_wqe { struct fcoe_wqe {
__le16 task_id; __le16 task_id;
__le16 flags; __le16 flags;
...@@ -638,6 +667,7 @@ struct fcoe_wqe { ...@@ -638,6 +667,7 @@ struct fcoe_wqe {
union fcoe_additional_info_union additional_info_union; union fcoe_additional_info_union additional_info_union;
}; };
/* FCoE XFRQ element */
struct xfrqe_prot_flags { struct xfrqe_prot_flags {
u8 flags; u8 flags;
#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF #define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
...@@ -650,6 +680,7 @@ struct xfrqe_prot_flags { ...@@ -650,6 +680,7 @@ struct xfrqe_prot_flags {
#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7 #define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7
}; };
/* FCoE doorbell data */
struct fcoe_db_data { struct fcoe_db_data {
u8 params; u8 params;
#define FCOE_DB_DATA_DEST_MASK 0x3 #define FCOE_DB_DATA_DEST_MASK 0x3
...@@ -665,4 +696,5 @@ struct fcoe_db_data { ...@@ -665,4 +696,5 @@ struct fcoe_db_data {
u8 agg_flags; u8 agg_flags;
__le16 sq_prod; __le16 sq_prod;
}; };
#endif /* __FCOE_COMMON__ */ #endif /* __FCOE_COMMON__ */
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#ifndef __ISCSI_COMMON__ #ifndef __ISCSI_COMMON__
#define __ISCSI_COMMON__ #define __ISCSI_COMMON__
/**********************/ /**********************/
/* ISCSI FW CONSTANTS */ /* ISCSI FW CONSTANTS */
/**********************/ /**********************/
...@@ -105,6 +106,7 @@ ...@@ -105,6 +106,7 @@
#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) #define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10)
#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) #define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20)
/* ISCSI SGL entry */
struct cqe_error_bitmap { struct cqe_error_bitmap {
u8 cqe_error_status_bits; u8 cqe_error_status_bits;
#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 #define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
...@@ -126,37 +128,82 @@ union cqe_error_status { ...@@ -126,37 +128,82 @@ union cqe_error_status {
struct cqe_error_bitmap error_bits; struct cqe_error_bitmap error_bits;
}; };
/* iSCSI Login Response PDU header */
struct data_hdr { struct data_hdr {
__le32 data[12]; __le32 data[12];
}; };
struct iscsi_async_msg_hdr { /* Union of data/r2t sequence number */
__le16 reserved0; union iscsi_seq_num {
u8 flags_attr; __le16 data_sn;
#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F __le16 r2t_sn;
#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 };
#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1
#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 /* iSCSI DIF flags */
u8 opcode; struct iscsi_dif_flags {
u8 flags;
#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1
#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4
#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7
#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5
};
/* The iscsi storm task context of Ystorm */
struct ystorm_iscsi_task_state {
struct scsi_cached_sges data_desc;
struct scsi_sgl_params sgl_params;
__le32 exp_r2t_sn;
__le32 buffer_offset;
union iscsi_seq_num seq_num;
struct iscsi_dif_flags dif_flags;
u8 flags;
#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2
};
/* The iscsi storm task context of Ystorm */
struct ystorm_iscsi_task_rxmit_opt {
__le32 fast_rxmit_sge_offset;
__le32 scan_start_buffer_offset;
__le32 fast_rxmit_buffer_offset;
u8 scan_start_sgl_index;
u8 fast_rxmit_sgl_index;
__le16 reserved;
};
/* iSCSI Common PDU header */
struct iscsi_common_hdr {
u8 hdr_status;
u8 hdr_response;
u8 hdr_flags;
u8 hdr_first_byte;
#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F
#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0
#define ISCSI_COMMON_HDR_IMM_MASK 0x1
#define ISCSI_COMMON_HDR_IMM_SHIFT 6
#define ISCSI_COMMON_HDR_RSRV_MASK 0x1
#define ISCSI_COMMON_HDR_RSRV_SHIFT 7
__le32 hdr_second_dword; __le32 hdr_second_dword;
#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun; struct regpair lun_reserved;
__le32 all_ones; __le32 itt;
__le32 reserved1; __le32 ttt;
__le32 stat_sn; __le32 cmdstat_sn;
__le32 exp_cmd_sn; __le32 exp_statcmd_sn;
__le32 max_cmd_sn; __le32 max_cmd_sn;
__le16 param1_rsrv; __le32 data[3];
u8 async_vcode;
u8 async_event;
__le16 param3_rsrv;
__le16 param2_rsrv;
__le32 reserved7;
}; };
/* iSCSI Command PDU header */
struct iscsi_cmd_hdr { struct iscsi_cmd_hdr {
__le16 reserved1; __le16 reserved1;
u8 flags_attr; u8 flags_attr;
...@@ -190,92 +237,7 @@ struct iscsi_cmd_hdr { ...@@ -190,92 +237,7 @@ struct iscsi_cmd_hdr {
__le32 cdb[4]; __le32 cdb[4];
}; };
struct iscsi_common_hdr { /* iSCSI Command PDU header with Extended CDB (Initiator Mode) */
u8 hdr_status;
u8 hdr_response;
u8 hdr_flags;
u8 hdr_first_byte;
#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F
#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0
#define ISCSI_COMMON_HDR_IMM_MASK 0x1
#define ISCSI_COMMON_HDR_IMM_SHIFT 6
#define ISCSI_COMMON_HDR_RSRV_MASK 0x1
#define ISCSI_COMMON_HDR_RSRV_SHIFT 7
__le32 hdr_second_dword;
#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun_reserved;
__le32 itt;
__le32 ttt;
__le32 cmdstat_sn;
__le32 exp_statcmd_sn;
__le32 max_cmd_sn;
__le32 data[3];
};
struct iscsi_conn_offload_params {
struct regpair sq_pbl_addr;
struct regpair r2tq_pbl_addr;
struct regpair xhq_pbl_addr;
struct regpair uhq_pbl_addr;
__le32 initial_ack;
__le16 physical_q0;
__le16 physical_q1;
u8 flags;
#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
u8 pbl_page_size_log;
u8 pbe_page_size_log;
u8 default_cq;
__le32 stat_sn;
};
struct iscsi_slow_path_hdr {
u8 op_code;
u8 flags;
#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
};
struct iscsi_conn_update_ramrod_params {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
__le32 fw_cid;
u8 flags;
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6
u8 reserved0[3];
__le32 max_seq_size;
__le32 max_send_pdu_length;
__le32 max_recv_pdu_length;
__le32 first_seq_length;
__le32 exp_stat_sn;
};
struct iscsi_ext_cdb_cmd_hdr { struct iscsi_ext_cdb_cmd_hdr {
__le16 reserved1; __le16 reserved1;
u8 flags_attr; u8 flags_attr;
...@@ -303,6 +265,7 @@ struct iscsi_ext_cdb_cmd_hdr { ...@@ -303,6 +265,7 @@ struct iscsi_ext_cdb_cmd_hdr {
struct scsi_sge cdb_sge; struct scsi_sge cdb_sge;
}; };
/* iSCSI login request PDU header */
struct iscsi_login_req_hdr { struct iscsi_login_req_hdr {
u8 version_min; u8 version_min;
u8 version_max; u8 version_max;
...@@ -334,6 +297,7 @@ struct iscsi_login_req_hdr { ...@@ -334,6 +297,7 @@ struct iscsi_login_req_hdr {
__le32 reserved2[4]; __le32 reserved2[4];
}; };
/* iSCSI logout request PDU header */
struct iscsi_logout_req_hdr { struct iscsi_logout_req_hdr {
__le16 reserved0; __le16 reserved0;
u8 reason_code; u8 reason_code;
...@@ -348,6 +312,7 @@ struct iscsi_logout_req_hdr { ...@@ -348,6 +312,7 @@ struct iscsi_logout_req_hdr {
__le32 reserved4[4]; __le32 reserved4[4];
}; };
/* iSCSI Data-out PDU header */
struct iscsi_data_out_hdr { struct iscsi_data_out_hdr {
__le16 reserved1; __le16 reserved1;
u8 flags_attr; u8 flags_attr;
...@@ -368,6 +333,7 @@ struct iscsi_data_out_hdr { ...@@ -368,6 +333,7 @@ struct iscsi_data_out_hdr {
__le32 reserved5; __le32 reserved5;
}; };
/* iSCSI Data-in PDU header */
struct iscsi_data_in_hdr { struct iscsi_data_in_hdr {
u8 status_rsvd; u8 status_rsvd;
u8 reserved1; u8 reserved1;
...@@ -397,6 +363,7 @@ struct iscsi_data_in_hdr { ...@@ -397,6 +363,7 @@ struct iscsi_data_in_hdr {
__le32 residual_count; __le32 residual_count;
}; };
/* iSCSI R2T PDU header */
struct iscsi_r2t_hdr { struct iscsi_r2t_hdr {
u8 reserved0[3]; u8 reserved0[3];
u8 opcode; u8 opcode;
...@@ -412,6 +379,7 @@ struct iscsi_r2t_hdr { ...@@ -412,6 +379,7 @@ struct iscsi_r2t_hdr {
__le32 desired_data_trns_len; __le32 desired_data_trns_len;
}; };
/* iSCSI NOP-out PDU header */
struct iscsi_nop_out_hdr { struct iscsi_nop_out_hdr {
__le16 reserved1; __le16 reserved1;
u8 flags_attr; u8 flags_attr;
...@@ -432,6 +400,7 @@ struct iscsi_nop_out_hdr { ...@@ -432,6 +400,7 @@ struct iscsi_nop_out_hdr {
__le32 reserved6; __le32 reserved6;
}; };
/* iSCSI NOP-in PDU header */
struct iscsi_nop_in_hdr { struct iscsi_nop_in_hdr {
__le16 reserved0; __le16 reserved0;
u8 flags_attr; u8 flags_attr;
...@@ -456,6 +425,7 @@ struct iscsi_nop_in_hdr { ...@@ -456,6 +425,7 @@ struct iscsi_nop_in_hdr {
__le32 reserved7; __le32 reserved7;
}; };
/* iSCSI Login Response PDU header */
struct iscsi_login_response_hdr { struct iscsi_login_response_hdr {
u8 version_active; u8 version_active;
u8 version_max; u8 version_max;
...@@ -490,6 +460,7 @@ struct iscsi_login_response_hdr { ...@@ -490,6 +460,7 @@ struct iscsi_login_response_hdr {
__le32 reserved4[2]; __le32 reserved4[2];
}; };
/* iSCSI Logout Response PDU header */
struct iscsi_logout_response_hdr { struct iscsi_logout_response_hdr {
u8 reserved1; u8 reserved1;
u8 response; u8 response;
...@@ -512,6 +483,7 @@ struct iscsi_logout_response_hdr { ...@@ -512,6 +483,7 @@ struct iscsi_logout_response_hdr {
__le32 reserved5[1]; __le32 reserved5[1];
}; };
/* iSCSI Text Request PDU header */
struct iscsi_text_request_hdr { struct iscsi_text_request_hdr {
__le16 reserved0; __le16 reserved0;
u8 flags_attr; u8 flags_attr;
...@@ -535,6 +507,7 @@ struct iscsi_text_request_hdr { ...@@ -535,6 +507,7 @@ struct iscsi_text_request_hdr {
__le32 reserved4[4]; __le32 reserved4[4];
}; };
/* iSCSI Text Response PDU header */
struct iscsi_text_response_hdr { struct iscsi_text_response_hdr {
__le16 reserved1; __le16 reserved1;
u8 flags; u8 flags;
...@@ -559,6 +532,7 @@ struct iscsi_text_response_hdr { ...@@ -559,6 +532,7 @@ struct iscsi_text_response_hdr {
__le32 reserved4[3]; __le32 reserved4[3];
}; };
/* iSCSI TMF Request PDU header */
struct iscsi_tmf_request_hdr { struct iscsi_tmf_request_hdr {
__le16 reserved0; __le16 reserved0;
u8 function; u8 function;
...@@ -597,6 +571,7 @@ struct iscsi_tmf_response_hdr { ...@@ -597,6 +571,7 @@ struct iscsi_tmf_response_hdr {
__le32 reserved4[3]; __le32 reserved4[3];
}; };
/* iSCSI Response PDU header */
struct iscsi_response_hdr { struct iscsi_response_hdr {
u8 hdr_status; u8 hdr_status;
u8 hdr_response; u8 hdr_response;
...@@ -618,6 +593,7 @@ struct iscsi_response_hdr { ...@@ -618,6 +593,7 @@ struct iscsi_response_hdr {
__le32 residual_count; __le32 residual_count;
}; };
/* iSCSI Reject PDU header */
struct iscsi_reject_hdr { struct iscsi_reject_hdr {
u8 reserved4; u8 reserved4;
u8 hdr_reason; u8 hdr_reason;
...@@ -638,6 +614,35 @@ struct iscsi_reject_hdr { ...@@ -638,6 +614,35 @@ struct iscsi_reject_hdr {
__le32 reserved3[2]; __le32 reserved3[2];
}; };
/* iSCSI Asynchronous Message PDU header */
struct iscsi_async_msg_hdr {
__le16 reserved0;
u8 flags_attr;
#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F
#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0
#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1
#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 all_ones;
__le32 reserved1;
__le32 stat_sn;
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
__le16 param1_rsrv;
u8 async_vcode;
u8 async_event;
__le16 param3_rsrv;
__le16 param2_rsrv;
__le32 reserved7;
};
/* PDU header part of Ystorm task context */
union iscsi_task_hdr { union iscsi_task_hdr {
struct iscsi_common_hdr common; struct iscsi_common_hdr common;
struct data_hdr data; struct data_hdr data;
...@@ -661,282 +666,11 @@ union iscsi_task_hdr { ...@@ -661,282 +666,11 @@ union iscsi_task_hdr {
struct iscsi_async_msg_hdr async_msg; struct iscsi_async_msg_hdr async_msg;
}; };
struct iscsi_cqe_common { /* The iscsi storm task context of Ystorm */
__le16 conn_id; struct ystorm_iscsi_task_st_ctx {
u8 cqe_type; struct ystorm_iscsi_task_state state;
union cqe_error_status error_bitmap; struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
__le32 reserved[3]; union iscsi_task_hdr pdu_hdr;
union iscsi_task_hdr iscsi_hdr;
};
struct iscsi_cqe_solicited {
__le16 conn_id;
u8 cqe_type;
union cqe_error_status error_bitmap;
__le16 itid;
u8 task_type;
u8 fw_dbg_field;
u8 caused_conn_err;
u8 reserved0[3];
__le32 reserved1[1];
union iscsi_task_hdr iscsi_hdr;
};
struct iscsi_cqe_unsolicited {
__le16 conn_id;
u8 cqe_type;
union cqe_error_status error_bitmap;
__le16 reserved0;
u8 reserved1;
u8 unsol_cqe_type;
struct regpair rqe_opaque;
union iscsi_task_hdr iscsi_hdr;
};
union iscsi_cqe {
struct iscsi_cqe_common cqe_common;
struct iscsi_cqe_solicited cqe_solicited;
struct iscsi_cqe_unsolicited cqe_unsolicited;
};
enum iscsi_cqes_type {
ISCSI_CQE_TYPE_SOLICITED = 1,
ISCSI_CQE_TYPE_UNSOLICITED,
ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE,
ISCSI_CQE_TYPE_TASK_CLEANUP,
ISCSI_CQE_TYPE_DUMMY,
MAX_ISCSI_CQES_TYPE
};
enum iscsi_cqe_unsolicited_type {
ISCSI_CQE_UNSOLICITED_NONE,
ISCSI_CQE_UNSOLICITED_SINGLE,
ISCSI_CQE_UNSOLICITED_FIRST,
ISCSI_CQE_UNSOLICITED_MIDDLE,
ISCSI_CQE_UNSOLICITED_LAST,
MAX_ISCSI_CQE_UNSOLICITED_TYPE
};
struct iscsi_debug_modes {
u8 flags;
#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7
};
struct iscsi_dif_flags {
u8 flags;
#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1
#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4
#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7
#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5
};
enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_INIT_FUNC = 0,
ISCSI_EVENT_TYPE_DESTROY_FUNC,
ISCSI_EVENT_TYPE_OFFLOAD_CONN,
ISCSI_EVENT_TYPE_UPDATE_CONN,
ISCSI_EVENT_TYPE_CLEAR_SQ,
ISCSI_EVENT_TYPE_TERMINATE_CONN,
ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
RESERVED9,
ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
ISCSI_EVENT_TYPE_ASYN_SYN_RCVD,
ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME,
ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT,
ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT,
ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2,
ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR,
ISCSI_EVENT_TYPE_TCP_CONN_ERROR,
MAX_ISCSI_EQE_OPCODE
};
enum iscsi_error_types {
ISCSI_STATUS_NONE = 0,
ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1,
ISCSI_CONN_ERROR_TASK_CID_MISMATCH,
ISCSI_CONN_ERROR_TASK_NOT_VALID,
ISCSI_CONN_ERROR_RQ_RING_IS_FULL,
ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL,
ISCSI_CONN_ERROR_HQE_CACHING_FAILED,
ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR,
ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
ISCSI_CONN_ERROR_DATA_OVERRUN,
ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
ISCSI_CONN_ERROR_IP_OPTIONS_ERROR,
ISCSI_CONN_ERROR_PRS_ERRORS,
ISCSI_CONN_ERROR_CONNECT_INVALID_TCP_OPTION,
ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_TTT_OUT_OF_RANGE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE,
ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL,
ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_OUT_ITT,
ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT,
ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET,
ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO,
ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2,
ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO,
ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO_S_BIT_ONE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO,
ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG,
ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX,
ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR,
ISCSI_CONN_ERROR_INVALID_ITT,
ISCSI_ERROR_UNKNOWN,
MAX_ISCSI_ERROR_TYPES
};
enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_UNUSED = 0,
ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1,
ISCSI_RAMROD_CMD_ID_DESTROY_FUNC = 2,
ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN = 3,
ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4,
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
MAX_ISCSI_RAMROD_CMD_ID
};
struct iscsi_reg1 {
__le32 reg1_map;
#define ISCSI_REG1_NUM_SGES_MASK 0xF
#define ISCSI_REG1_NUM_SGES_SHIFT 0
#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
#define ISCSI_REG1_RESERVED1_SHIFT 4
};
union iscsi_seq_num {
__le16 data_sn;
__le16 r2t_sn;
};
struct iscsi_spe_conn_mac_update {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
__le32 fw_cid;
__le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
u8 reserved0[2];
};
struct iscsi_spe_conn_offload {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
__le32 fw_cid;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params tcp;
};
struct iscsi_spe_conn_offload_option2 {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
__le32 fw_cid;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params_opt2 tcp;
};
struct iscsi_spe_conn_termination {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
__le32 fw_cid;
u8 abortive;
u8 reserved0[7];
struct regpair queue_cnts_addr;
struct regpair query_params_addr;
};
struct iscsi_spe_func_dstry {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le32 reserved1;
};
struct iscsi_spe_func_init {
struct iscsi_slow_path_hdr hdr;
__le16 half_way_close_timeout;
u8 num_sq_pages_in_ring;
u8 num_r2tq_pages_in_ring;
u8 num_uhq_pages_in_ring;
u8 ll2_rx_queue_id;
u8 ooo_enable;
struct iscsi_debug_modes debug_mode;
__le16 reserved1;
__le32 reserved2;
__le32 reserved3;
__le32 reserved4;
struct scsi_init_func_params func_params;
struct scsi_init_func_queues q_params;
};
struct ystorm_iscsi_task_state {
struct scsi_cached_sges data_desc;
struct scsi_sgl_params sgl_params;
__le32 exp_r2t_sn;
__le32 buffer_offset;
union iscsi_seq_num seq_num;
struct iscsi_dif_flags dif_flags;
u8 flags;
#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2
};
struct ystorm_iscsi_task_rxmit_opt {
__le32 fast_rxmit_sge_offset;
__le32 scan_start_buffer_offset;
__le32 fast_rxmit_buffer_offset;
u8 scan_start_sgl_index;
u8 fast_rxmit_sgl_index;
__le16 reserved;
};
struct ystorm_iscsi_task_st_ctx {
struct ystorm_iscsi_task_state state;
struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
union iscsi_task_hdr pdu_hdr;
}; };
struct ystorm_iscsi_task_ag_ctx { struct ystorm_iscsi_task_ag_ctx {
...@@ -1104,6 +838,7 @@ struct ustorm_iscsi_task_ag_ctx { ...@@ -1104,6 +838,7 @@ struct ustorm_iscsi_task_ag_ctx {
__le32 exp_r2t_sn; __le32 exp_r2t_sn;
}; };
/* The iscsi storm task context of Mstorm */
struct mstorm_iscsi_task_st_ctx { struct mstorm_iscsi_task_st_ctx {
struct scsi_cached_sges data_desc; struct scsi_cached_sges data_desc;
struct scsi_sgl_params sgl_params; struct scsi_sgl_params sgl_params;
...@@ -1117,6 +852,15 @@ struct mstorm_iscsi_task_st_ctx { ...@@ -1117,6 +852,15 @@ struct mstorm_iscsi_task_st_ctx {
__le32 reserved1; __le32 reserved1;
}; };
struct iscsi_reg1 {
__le32 reg1_map;
#define ISCSI_REG1_NUM_SGES_MASK 0xF
#define ISCSI_REG1_NUM_SGES_SHIFT 0
#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
#define ISCSI_REG1_RESERVED1_SHIFT 4
};
/* The iscsi storm task context of Ustorm */
struct ustorm_iscsi_task_st_ctx { struct ustorm_iscsi_task_st_ctx {
__le32 rem_rcv_len; __le32 rem_rcv_len;
__le32 exp_data_transfer_len; __le32 exp_data_transfer_len;
...@@ -1162,6 +906,7 @@ struct ustorm_iscsi_task_st_ctx { ...@@ -1162,6 +906,7 @@ struct ustorm_iscsi_task_st_ctx {
u8 cq_rss_number; u8 cq_rss_number;
}; };
/* iscsi task context */
struct iscsi_task_context { struct iscsi_task_context {
struct ystorm_iscsi_task_st_ctx ystorm_st_context; struct ystorm_iscsi_task_st_ctx ystorm_st_context;
struct ystorm_iscsi_task_ag_ctx ystorm_ag_context; struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
...@@ -1175,32 +920,345 @@ struct iscsi_task_context { ...@@ -1175,32 +920,345 @@ struct iscsi_task_context {
struct rdif_task_context rdif_context; struct rdif_task_context rdif_context;
}; };
enum iscsi_task_type { /* iSCSI connection offload params passed by driver to FW in ISCSI offload
ISCSI_TASK_TYPE_INITIATOR_WRITE, * ramrod.
ISCSI_TASK_TYPE_INITIATOR_READ, */
ISCSI_TASK_TYPE_MIDPATH, struct iscsi_conn_offload_params {
ISCSI_TASK_TYPE_UNSOLIC, struct regpair sq_pbl_addr;
ISCSI_TASK_TYPE_EXCHCLEANUP, struct regpair r2tq_pbl_addr;
ISCSI_TASK_TYPE_IRRELEVANT, struct regpair xhq_pbl_addr;
ISCSI_TASK_TYPE_TARGET_WRITE, struct regpair uhq_pbl_addr;
ISCSI_TASK_TYPE_TARGET_READ, __le32 initial_ack;
ISCSI_TASK_TYPE_TARGET_RESPONSE, __le16 physical_q0;
ISCSI_TASK_TYPE_LOGIN_RESPONSE, __le16 physical_q1;
MAX_ISCSI_TASK_TYPE u8 flags;
}; #define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
union iscsi_ttt_txlen_union { #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
__le32 desired_tx_len; #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
__le32 ttt; #define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
}; #define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
struct iscsi_uhqe { #define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
__le32 reg1; u8 pbl_page_size_log;
#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF u8 pbe_page_size_log;
#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 u8 default_cq;
#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 __le32 stat_sn;
#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 };
#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1
/* spe message header */
struct iscsi_slow_path_hdr {
u8 op_code;
u8 flags;
#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
};
/* iSCSI connection update params passed by driver to FW in ISCSI update
*ramrod.
*/
struct iscsi_conn_update_ramrod_params {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
__le32 fw_cid;
u8 flags;
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6
u8 reserved0[3];
__le32 max_seq_size;
__le32 max_send_pdu_length;
__le32 max_recv_pdu_length;
__le32 first_seq_length;
__le32 exp_stat_sn;
};
/* iSCSI CQ element */
struct iscsi_cqe_common {
__le16 conn_id;
u8 cqe_type;
union cqe_error_status error_bitmap;
__le32 reserved[3];
union iscsi_task_hdr iscsi_hdr;
};
/* iSCSI CQ element */
struct iscsi_cqe_solicited {
__le16 conn_id;
u8 cqe_type;
union cqe_error_status error_bitmap;
__le16 itid;
u8 task_type;
u8 fw_dbg_field;
u8 caused_conn_err;
u8 reserved0[3];
__le32 reserved1[1];
union iscsi_task_hdr iscsi_hdr;
};
/* iSCSI CQ element */
struct iscsi_cqe_unsolicited {
__le16 conn_id;
u8 cqe_type;
union cqe_error_status error_bitmap;
__le16 reserved0;
u8 reserved1;
u8 unsol_cqe_type;
struct regpair rqe_opaque;
union iscsi_task_hdr iscsi_hdr;
};
/* iSCSI CQ element */
union iscsi_cqe {
struct iscsi_cqe_common cqe_common;
struct iscsi_cqe_solicited cqe_solicited;
struct iscsi_cqe_unsolicited cqe_unsolicited;
};
/* iSCSI CQE type */
enum iscsi_cqes_type {
ISCSI_CQE_TYPE_SOLICITED = 1,
ISCSI_CQE_TYPE_UNSOLICITED,
ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE,
ISCSI_CQE_TYPE_TASK_CLEANUP,
ISCSI_CQE_TYPE_DUMMY,
MAX_ISCSI_CQES_TYPE
};
/* iSCSI CQE type */
enum iscsi_cqe_unsolicited_type {
ISCSI_CQE_UNSOLICITED_NONE,
ISCSI_CQE_UNSOLICITED_SINGLE,
ISCSI_CQE_UNSOLICITED_FIRST,
ISCSI_CQE_UNSOLICITED_MIDDLE,
ISCSI_CQE_UNSOLICITED_LAST,
MAX_ISCSI_CQE_UNSOLICITED_TYPE
};
/* iscsi debug modes */
struct iscsi_debug_modes {
u8 flags;
#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7
};
/* iSCSI kernel completion queue IDs */
enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_INIT_FUNC = 0,
ISCSI_EVENT_TYPE_DESTROY_FUNC,
ISCSI_EVENT_TYPE_OFFLOAD_CONN,
ISCSI_EVENT_TYPE_UPDATE_CONN,
ISCSI_EVENT_TYPE_CLEAR_SQ,
ISCSI_EVENT_TYPE_TERMINATE_CONN,
ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
RESERVED9,
ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
ISCSI_EVENT_TYPE_ASYN_SYN_RCVD,
ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME,
ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT,
ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT,
ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2,
ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR,
ISCSI_EVENT_TYPE_TCP_CONN_ERROR,
MAX_ISCSI_EQE_OPCODE
};
/* iSCSI EQE and CQE completion status */
enum iscsi_error_types {
ISCSI_STATUS_NONE = 0,
ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1,
ISCSI_CONN_ERROR_TASK_CID_MISMATCH,
ISCSI_CONN_ERROR_TASK_NOT_VALID,
ISCSI_CONN_ERROR_RQ_RING_IS_FULL,
ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL,
ISCSI_CONN_ERROR_HQE_CACHING_FAILED,
ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR,
ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
ISCSI_CONN_ERROR_DATA_OVERRUN,
ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
ISCSI_CONN_ERROR_IP_OPTIONS_ERROR,
ISCSI_CONN_ERROR_PRS_ERRORS,
ISCSI_CONN_ERROR_CONNECT_INVALID_TCP_OPTION,
ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_TTT_OUT_OF_RANGE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE,
ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL,
ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_OUT_ITT,
ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT,
ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET,
ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO,
ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2,
ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO,
ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO_S_BIT_ONE,
ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO,
ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG,
ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT,
ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX,
ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR,
ISCSI_CONN_ERROR_INVALID_ITT,
ISCSI_ERROR_UNKNOWN,
MAX_ISCSI_ERROR_TYPES
};
/* iSCSI Ramrod Command IDs */
enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_UNUSED = 0,
ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1,
ISCSI_RAMROD_CMD_ID_DESTROY_FUNC = 2,
ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN = 3,
ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4,
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
MAX_ISCSI_RAMROD_CMD_ID
};
/* iSCSI connection termination request */
struct iscsi_spe_conn_mac_update {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
__le32 fw_cid;
__le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
u8 reserved0[2];
};
/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in
* iSCSI offload ramrod.
*/
struct iscsi_spe_conn_offload {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
__le32 fw_cid;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params tcp;
};
/* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in
* iSCSI offload ramrod.
*/
struct iscsi_spe_conn_offload_option2 {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
__le32 fw_cid;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params_opt2 tcp;
};
/* iSCSI connection termination request */
struct iscsi_spe_conn_termination {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
__le32 fw_cid;
u8 abortive;
u8 reserved0[7];
struct regpair queue_cnts_addr;
struct regpair query_params_addr;
};
/* iSCSI firmware function destroy parameters */
struct iscsi_spe_func_dstry {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le32 reserved1;
};
/* iSCSI firmware function init parameters */
struct iscsi_spe_func_init {
struct iscsi_slow_path_hdr hdr;
__le16 half_way_close_timeout;
u8 num_sq_pages_in_ring;
u8 num_r2tq_pages_in_ring;
u8 num_uhq_pages_in_ring;
u8 ll2_rx_queue_id;
u8 ooo_enable;
struct iscsi_debug_modes debug_mode;
__le16 reserved1;
__le32 reserved2;
__le32 reserved3;
__le32 reserved4;
struct scsi_init_func_params func_params;
struct scsi_init_func_queues q_params;
};
/* iSCSI task type */
enum iscsi_task_type {
ISCSI_TASK_TYPE_INITIATOR_WRITE,
ISCSI_TASK_TYPE_INITIATOR_READ,
ISCSI_TASK_TYPE_MIDPATH,
ISCSI_TASK_TYPE_UNSOLIC,
ISCSI_TASK_TYPE_EXCHCLEANUP,
ISCSI_TASK_TYPE_IRRELEVANT,
ISCSI_TASK_TYPE_TARGET_WRITE,
ISCSI_TASK_TYPE_TARGET_READ,
ISCSI_TASK_TYPE_TARGET_RESPONSE,
ISCSI_TASK_TYPE_LOGIN_RESPONSE,
MAX_ISCSI_TASK_TYPE
};
/* iSCSI DesiredDataTransferLength/ttt union */
union iscsi_ttt_txlen_union {
__le32 desired_tx_len;
__le32 ttt;
};
/* iSCSI uHQ element */
struct iscsi_uhqe {
__le32 reg1;
#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF
#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0
#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1
#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20
#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1
#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 #define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21
#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 #define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1
#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 #define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22
...@@ -1215,7 +1273,7 @@ struct iscsi_uhqe { ...@@ -1215,7 +1273,7 @@ struct iscsi_uhqe {
#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 #define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
}; };
/* iSCSI WQ element */
struct iscsi_wqe { struct iscsi_wqe {
__le16 task_id; __le16 task_id;
u8 flags; u8 flags;
...@@ -1233,6 +1291,7 @@ struct iscsi_wqe { ...@@ -1233,6 +1291,7 @@ struct iscsi_wqe {
#define ISCSI_WQE_CDB_SIZE_SHIFT 24 #define ISCSI_WQE_CDB_SIZE_SHIFT 24
}; };
/* iSCSI wqe type */
enum iscsi_wqe_type { enum iscsi_wqe_type {
ISCSI_WQE_TYPE_NORMAL, ISCSI_WQE_TYPE_NORMAL,
ISCSI_WQE_TYPE_TASK_CLEANUP, ISCSI_WQE_TYPE_TASK_CLEANUP,
...@@ -1244,6 +1303,7 @@ enum iscsi_wqe_type { ...@@ -1244,6 +1303,7 @@ enum iscsi_wqe_type {
MAX_ISCSI_WQE_TYPE MAX_ISCSI_WQE_TYPE
}; };
/* iSCSI xHQ element */
struct iscsi_xhqe { struct iscsi_xhqe {
union iscsi_ttt_txlen_union ttt_or_txlen; union iscsi_ttt_txlen_union ttt_or_txlen;
__le32 exp_stat_sn; __le32 exp_stat_sn;
...@@ -1263,15 +1323,18 @@ struct iscsi_xhqe { ...@@ -1263,15 +1323,18 @@ struct iscsi_xhqe {
__le16 reserved1; __le16 reserved1;
}; };
/* Per PF iSCSI receive path statistics - mStorm RAM structure */
struct mstorm_iscsi_stats_drv { struct mstorm_iscsi_stats_drv {
struct regpair iscsi_rx_dropped_pdus_task_not_valid; struct regpair iscsi_rx_dropped_pdus_task_not_valid;
}; };
/* Per PF iSCSI transmit path statistics - pStorm RAM structure */
struct pstorm_iscsi_stats_drv { struct pstorm_iscsi_stats_drv {
struct regpair iscsi_tx_bytes_cnt; struct regpair iscsi_tx_bytes_cnt;
struct regpair iscsi_tx_packet_cnt; struct regpair iscsi_tx_packet_cnt;
}; };
/* Per PF iSCSI receive path statistics - tStorm RAM structure */
struct tstorm_iscsi_stats_drv { struct tstorm_iscsi_stats_drv {
struct regpair iscsi_rx_bytes_cnt; struct regpair iscsi_rx_bytes_cnt;
struct regpair iscsi_rx_packet_cnt; struct regpair iscsi_rx_packet_cnt;
...@@ -1281,17 +1344,20 @@ struct tstorm_iscsi_stats_drv { ...@@ -1281,17 +1344,20 @@ struct tstorm_iscsi_stats_drv {
__le32 iscsi_immq_threshold_cnt; __le32 iscsi_immq_threshold_cnt;
}; };
/* Per PF iSCSI receive path statistics - uStorm RAM structure */
struct ustorm_iscsi_stats_drv { struct ustorm_iscsi_stats_drv {
struct regpair iscsi_rx_data_pdu_cnt; struct regpair iscsi_rx_data_pdu_cnt;
struct regpair iscsi_rx_r2t_pdu_cnt; struct regpair iscsi_rx_r2t_pdu_cnt;
struct regpair iscsi_rx_total_pdu_cnt; struct regpair iscsi_rx_total_pdu_cnt;
}; };
/* Per PF iSCSI transmit path statistics - xStorm RAM structure */
struct xstorm_iscsi_stats_drv { struct xstorm_iscsi_stats_drv {
struct regpair iscsi_tx_go_to_slow_start_event_cnt; struct regpair iscsi_tx_go_to_slow_start_event_cnt;
struct regpair iscsi_tx_fast_retransmit_event_cnt; struct regpair iscsi_tx_fast_retransmit_event_cnt;
}; };
/* Per PF iSCSI transmit path statistics - yStorm RAM structure */
struct ystorm_iscsi_stats_drv { struct ystorm_iscsi_stats_drv {
struct regpair iscsi_tx_data_pdu_cnt; struct regpair iscsi_tx_data_pdu_cnt;
struct regpair iscsi_tx_r2t_pdu_cnt; struct regpair iscsi_tx_r2t_pdu_cnt;
...@@ -1376,6 +1442,8 @@ struct tstorm_iscsi_task_ag_ctx { ...@@ -1376,6 +1442,8 @@ struct tstorm_iscsi_task_ag_ctx {
__le32 reg1; __le32 reg1;
__le32 reg2; __le32 reg2;
}; };
/* iSCSI doorbell data */
struct iscsi_db_data { struct iscsi_db_data {
u8 params; u8 params;
#define ISCSI_DB_DATA_DEST_MASK 0x3 #define ISCSI_DB_DATA_DEST_MASK 0x3
......
...@@ -29,9 +29,12 @@ ...@@ -29,9 +29,12 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*/ */
#ifndef __IWARP_COMMON__ #ifndef __IWARP_COMMON__
#define __IWARP_COMMON__ #define __IWARP_COMMON__
#include <linux/qed/rdma_common.h> #include <linux/qed/rdma_common.h>
/************************/ /************************/
/* IWARP FW CONSTANTS */ /* IWARP FW CONSTANTS */
/************************/ /************************/
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#ifndef __RDMA_COMMON__ #ifndef __RDMA_COMMON__
#define __RDMA_COMMON__ #define __RDMA_COMMON__
/************************/ /************************/
/* RDMA FW CONSTANTS */ /* RDMA FW CONSTANTS */
/************************/ /************************/
......
...@@ -33,6 +33,10 @@ ...@@ -33,6 +33,10 @@
#ifndef __ROCE_COMMON__ #ifndef __ROCE_COMMON__
#define __ROCE_COMMON__ #define __ROCE_COMMON__
/************************/
/* ROCE FW CONSTANTS */
/************************/
#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) #define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) #define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
...@@ -40,6 +44,7 @@ ...@@ -40,6 +44,7 @@
#define ROCE_DCQCN_NP_MAX_QPS (64) #define ROCE_DCQCN_NP_MAX_QPS (64)
#define ROCE_DCQCN_RP_MAX_QPS (64) #define ROCE_DCQCN_RP_MAX_QPS (64)
/* Affiliated asynchronous events / errors enumeration */
enum roce_async_events_type { enum roce_async_events_type {
ROCE_ASYNC_EVENT_NONE = 0, ROCE_ASYNC_EVENT_NONE = 0,
ROCE_ASYNC_EVENT_COMM_EST = 1, ROCE_ASYNC_EVENT_COMM_EST = 1,
......
...@@ -33,6 +33,10 @@ ...@@ -33,6 +33,10 @@
#ifndef __STORAGE_COMMON__ #ifndef __STORAGE_COMMON__
#define __STORAGE_COMMON__ #define __STORAGE_COMMON__
/*********************/
/* SCSI CONSTANTS */
/*********************/
#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2) #define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
#define BDQ_NUM_RESOURCES (4) #define BDQ_NUM_RESOURCES (4)
...@@ -42,34 +46,40 @@ ...@@ -42,34 +46,40 @@
#define SCSI_NUM_SGES_SLOW_SGL_THR 8 #define SCSI_NUM_SGES_SLOW_SGL_THR 8
#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15) #define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15)
/* SCSI buffer descriptor */
struct scsi_bd { struct scsi_bd {
struct regpair address; struct regpair address;
struct regpair opaque; struct regpair opaque;
}; };
/* Scsi Drv BDQ struct */
struct scsi_bdq_ram_drv_data { struct scsi_bdq_ram_drv_data {
__le16 external_producer; __le16 external_producer;
__le16 reserved0[3]; __le16 reserved0[3];
}; };
/* SCSI SGE entry */
struct scsi_sge { struct scsi_sge {
struct regpair sge_addr; struct regpair sge_addr;
__le32 sge_len; __le32 sge_len;
__le32 reserved; __le32 reserved;
}; };
/* Cached SGEs section */
struct scsi_cached_sges { struct scsi_cached_sges {
struct scsi_sge sge[4]; struct scsi_sge sge[4];
}; };
/* Scsi Drv CMDQ struct */
struct scsi_drv_cmdq { struct scsi_drv_cmdq {
__le16 cmdq_cons; __le16 cmdq_cons;
__le16 reserved0; __le16 reserved0;
__le32 reserved1; __le32 reserved1;
}; };
/* Common SCSI init params passed by driver to FW in function init ramrod */
struct scsi_init_func_params { struct scsi_init_func_params {
__le16 num_tasks; __le16 num_tasks;
u8 log_page_size; u8 log_page_size;
...@@ -77,6 +87,7 @@ struct scsi_init_func_params { ...@@ -77,6 +87,7 @@ struct scsi_init_func_params {
u8 reserved2[12]; u8 reserved2[12];
}; };
/* SCSI RQ/CQ/CMDQ firmware function init parameters */
struct scsi_init_func_queues { struct scsi_init_func_queues {
struct regpair glbl_q_params_addr; struct regpair glbl_q_params_addr;
__le16 rq_buffer_size; __le16 rq_buffer_size;
...@@ -107,16 +118,19 @@ struct scsi_init_func_queues { ...@@ -107,16 +118,19 @@ struct scsi_init_func_queues {
__le32 reserved1; __le32 reserved1;
}; };
/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */
struct scsi_ram_per_bdq_resource_drv_data { struct scsi_ram_per_bdq_resource_drv_data {
struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS]; struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
}; };
/* SCSI SGL types */
enum scsi_sgl_mode { enum scsi_sgl_mode {
SCSI_TX_SLOW_SGL, SCSI_TX_SLOW_SGL,
SCSI_FAST_SGL, SCSI_FAST_SGL,
MAX_SCSI_SGL_MODE MAX_SCSI_SGL_MODE
}; };
/* SCSI SGL parameters */
struct scsi_sgl_params { struct scsi_sgl_params {
struct regpair sgl_addr; struct regpair sgl_addr;
__le32 sgl_total_length; __le32 sgl_total_length;
...@@ -126,6 +140,7 @@ struct scsi_sgl_params { ...@@ -126,6 +140,7 @@ struct scsi_sgl_params {
u8 reserved; u8 reserved;
}; };
/* SCSI terminate connection params */
struct scsi_terminate_extra_params { struct scsi_terminate_extra_params {
__le16 unsolicited_cq_count; __le16 unsolicited_cq_count;
__le16 cmdq_count; __le16 cmdq_count;
......
...@@ -33,8 +33,13 @@ ...@@ -33,8 +33,13 @@
#ifndef __TCP_COMMON__ #ifndef __TCP_COMMON__
#define __TCP_COMMON__ #define __TCP_COMMON__
/********************/
/* TCP FW CONSTANTS */
/********************/
#define TCP_INVALID_TIMEOUT_VAL -1 #define TCP_INVALID_TIMEOUT_VAL -1
/* OOO opaque data received from LL2 */
struct ooo_opaque { struct ooo_opaque {
__le32 cid; __le32 cid;
u8 drop_isle; u8 drop_isle;
...@@ -43,25 +48,29 @@ struct ooo_opaque { ...@@ -43,25 +48,29 @@ struct ooo_opaque {
u8 ooo_isle; u8 ooo_isle;
}; };
/* tcp connect mode enum */
enum tcp_connect_mode { enum tcp_connect_mode {
TCP_CONNECT_ACTIVE, TCP_CONNECT_ACTIVE,
TCP_CONNECT_PASSIVE, TCP_CONNECT_PASSIVE,
MAX_TCP_CONNECT_MODE MAX_TCP_CONNECT_MODE
}; };
/* tcp function init parameters */
struct tcp_init_params { struct tcp_init_params {
__le32 two_msl_timer; __le32 two_msl_timer;
__le16 tx_sws_timer; __le16 tx_sws_timer;
u8 maxfinrt; u8 max_fin_rt;
u8 reserved[9]; u8 reserved[9];
}; };
/* tcp IPv4/IPv6 enum */
enum tcp_ip_version { enum tcp_ip_version {
TCP_IPV4, TCP_IPV4,
TCP_IPV6, TCP_IPV6,
MAX_TCP_IP_VERSION MAX_TCP_IP_VERSION
}; };
/* tcp offload parameters */
struct tcp_offload_params { struct tcp_offload_params {
__le16 local_mac_addr_lo; __le16 local_mac_addr_lo;
__le16 local_mac_addr_mid; __le16 local_mac_addr_mid;
...@@ -132,6 +141,7 @@ struct tcp_offload_params { ...@@ -132,6 +141,7 @@ struct tcp_offload_params {
__le32 reserved3[2]; __le32 reserved3[2];
}; };
/* tcp offload parameters */
struct tcp_offload_params_opt2 { struct tcp_offload_params_opt2 {
__le16 local_mac_addr_lo; __le16 local_mac_addr_lo;
__le16 local_mac_addr_mid; __le16 local_mac_addr_mid;
...@@ -166,6 +176,7 @@ struct tcp_offload_params_opt2 { ...@@ -166,6 +176,7 @@ struct tcp_offload_params_opt2 {
__le32 reserved1[22]; __le32 reserved1[22];
}; };
/* tcp IPv4/IPv6 enum */
enum tcp_seg_placement_event { enum tcp_seg_placement_event {
TCP_EVENT_ADD_PEN, TCP_EVENT_ADD_PEN,
TCP_EVENT_ADD_NEW_ISLE, TCP_EVENT_ADD_NEW_ISLE,
...@@ -177,6 +188,7 @@ enum tcp_seg_placement_event { ...@@ -177,6 +188,7 @@ enum tcp_seg_placement_event {
MAX_TCP_SEG_PLACEMENT_EVENT MAX_TCP_SEG_PLACEMENT_EVENT
}; };
/* tcp init parameters */
struct tcp_update_params { struct tcp_update_params {
__le16 flags; __le16 flags;
#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
...@@ -226,6 +238,7 @@ struct tcp_update_params { ...@@ -226,6 +238,7 @@ struct tcp_update_params {
u8 reserved1[7]; u8 reserved1[7];
}; };
/* toe upload parameters */
struct tcp_upload_params { struct tcp_upload_params {
__le32 rcv_next; __le32 rcv_next;
__le32 snd_una; __le32 snd_una;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册