提交 cee4d264 编写于 作者: M Manish Chopra 提交者: David S. Miller

qed: Add slowpath L2 support

This patch adds to the qed the support to configure various L2 elements,
such as channels and basic filtering conditions.
It also enhances its public API to allow qede to later utilize this
functionality.
Signed-off-by: NManish Chopra <Manish.Chopra@qlogic.com>
Signed-off-by: NYuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: NAriel Elior <Ariel.Elior@qlogic.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 e712d52b
...@@ -799,6 +799,60 @@ int qed_hw_stop(struct qed_dev *cdev) ...@@ -799,6 +799,60 @@ int qed_hw_stop(struct qed_dev *cdev)
return rc; return rc;
} }
void qed_hw_stop_fastpath(struct qed_dev *cdev)
{
int i, j;
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
DP_VERBOSE(p_hwfn,
NETIF_MSG_IFDOWN,
"Shutting down the fastpath\n");
qed_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
if ((!qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_CONN)) &&
(!qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_TASK)))
break;
usleep_range(1000, 2000);
}
if (i == QED_HW_STOP_RETRY_LIMIT)
DP_NOTICE(p_hwfn,
"Timers linear scans are not over [Connection %02x Tasks %02x]\n",
(u8)qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_CONN),
(u8)qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_TASK));
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
/* Need to wait 1ms to guarantee SBs are cleared */
usleep_range(1000, 2000);
}
}
void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
{
/* Re-open incoming traffic */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
}
static int qed_reg_assert(struct qed_hwfn *hwfn, static int qed_reg_assert(struct qed_hwfn *hwfn,
struct qed_ptt *ptt, u32 reg, struct qed_ptt *ptt, u32 reg,
bool expected) bool expected)
...@@ -1337,3 +1391,63 @@ void qed_chain_free(struct qed_dev *cdev, ...@@ -1337,3 +1391,63 @@ void qed_chain_free(struct qed_dev *cdev,
p_chain->p_virt_addr, p_chain->p_virt_addr,
p_chain->p_phys_addr); p_chain->p_phys_addr);
} }
int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
u16 src_id, u16 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
u16 min, max;
min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
DP_NOTICE(p_hwfn,
"l2_queue id [%d] is not valid, available indices [%d - %d]\n",
src_id, min, max);
return -EINVAL;
}
*dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
return 0;
}
int qed_fw_vport(struct qed_hwfn *p_hwfn,
u8 src_id, u8 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
u8 min, max;
min = (u8)RESC_START(p_hwfn, QED_VPORT);
max = min + RESC_NUM(p_hwfn, QED_VPORT);
DP_NOTICE(p_hwfn,
"vport id [%d] is not valid, available indices [%d - %d]\n",
src_id, min, max);
return -EINVAL;
}
*dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
return 0;
}
int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
u8 src_id, u8 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
u8 min, max;
min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
DP_NOTICE(p_hwfn,
"rss_eng id [%d] is not valid, available indices [%d - %d]\n",
src_id, min, max);
return -EINVAL;
}
*dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
return 0;
}
...@@ -86,6 +86,25 @@ int qed_hw_init(struct qed_dev *cdev, ...@@ -86,6 +86,25 @@ int qed_hw_init(struct qed_dev *cdev,
*/ */
int qed_hw_stop(struct qed_dev *cdev); int qed_hw_stop(struct qed_dev *cdev);
/**
* @brief qed_hw_stop_fastpath -should be called incase
* slowpath is still required for the device,
* but fastpath is not.
*
* @param cdev
*
*/
void qed_hw_stop_fastpath(struct qed_dev *cdev);
/**
* @brief qed_hw_start_fastpath -restart fastpath traffic,
* only if hw_stop_fastpath was called
*
* @param cdev
*
*/
void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
/** /**
* @brief qed_hw_reset - * @brief qed_hw_reset -
* *
...@@ -206,6 +225,45 @@ qed_chain_alloc(struct qed_dev *cdev, ...@@ -206,6 +225,45 @@ qed_chain_alloc(struct qed_dev *cdev,
void qed_chain_free(struct qed_dev *cdev, void qed_chain_free(struct qed_dev *cdev,
struct qed_chain *p_chain); struct qed_chain *p_chain);
/**
* @@brief qed_fw_l2_queue - Get absolute L2 queue ID
*
* @param p_hwfn
* @param src_id - relative to p_hwfn
* @param dst_id - absolute per engine
*
* @return int
*/
int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
u16 src_id,
u16 *dst_id);
/**
* @@brief qed_fw_vport - Get absolute vport ID
*
* @param p_hwfn
* @param src_id - relative to p_hwfn
* @param dst_id - absolute per engine
*
* @return int
*/
int qed_fw_vport(struct qed_hwfn *p_hwfn,
u8 src_id,
u8 *dst_id);
/**
* @@brief qed_fw_rss_eng - Get absolute RSS engine ID
*
* @param p_hwfn
* @param src_id - relative to p_hwfn
* @param dst_id - absolute per engine
*
* @return int
*/
int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
u8 src_id,
u8 *dst_id);
/** /**
* *@brief Cleanup of previous driver remains prior to load * *@brief Cleanup of previous driver remains prior to load
* *
......
...@@ -2561,6 +2561,300 @@ struct eth_conn_context { ...@@ -2561,6 +2561,300 @@ struct eth_conn_context {
struct ustorm_eth_conn_st_ctx ustorm_st_context; struct ustorm_eth_conn_st_ctx ustorm_st_context;
}; };
enum eth_filter_action {
ETH_FILTER_ACTION_REMOVE,
ETH_FILTER_ACTION_ADD,
ETH_FILTER_ACTION_REPLACE,
MAX_ETH_FILTER_ACTION
};
struct eth_filter_cmd {
u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */;
u8 vport_id /* the vport id */;
u8 action /* filter command action: add/remove/replace */;
u8 reserved0;
__le32 vni;
__le16 mac_lsb;
__le16 mac_mid;
__le16 mac_msb;
__le16 vlan_id;
};
struct eth_filter_cmd_header {
u8 rx;
u8 tx;
u8 cmd_cnt;
u8 assert_on_error;
u8 reserved1[4];
};
enum eth_filter_type {
ETH_FILTER_TYPE_MAC,
ETH_FILTER_TYPE_VLAN,
ETH_FILTER_TYPE_PAIR,
ETH_FILTER_TYPE_INNER_MAC,
ETH_FILTER_TYPE_INNER_VLAN,
ETH_FILTER_TYPE_INNER_PAIR,
ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR,
ETH_FILTER_TYPE_MAC_VNI_PAIR,
ETH_FILTER_TYPE_VNI,
MAX_ETH_FILTER_TYPE
};
enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED,
ETH_RAMROD_VPORT_START /* VPort Start Ramrod */,
ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */,
ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */,
ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
ETH_RAMROD_RESERVED,
ETH_RAMROD_RESERVED2,
ETH_RAMROD_RESERVED3,
ETH_RAMROD_RESERVED4,
ETH_RAMROD_RESERVED5,
ETH_RAMROD_RESERVED6,
ETH_RAMROD_RESERVED7,
ETH_RAMROD_RESERVED8,
MAX_ETH_RAMROD_CMD_ID
};
struct eth_vport_rss_config {
__le16 capabilities;
#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_SHIFT 7
#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_SHIFT 8
#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x7F
#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 9
u8 rss_id;
u8 rss_mode;
u8 update_rss_key;
u8 update_rss_ind_table;
u8 update_rss_capabilities;
u8 tbl_size;
__le32 reserved2[2];
__le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
__le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
__le32 reserved3[2];
};
enum eth_vport_rss_mode {
ETH_VPORT_RSS_MODE_DISABLED,
ETH_VPORT_RSS_MODE_REGULAR,
MAX_ETH_VPORT_RSS_MODE
};
struct eth_vport_rx_mode {
__le16 state;
#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
__le16 reserved2[3];
};
struct eth_vport_tpa_param {
u64 reserved[2];
};
struct eth_vport_tx_mode {
__le16 state;
#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
__le16 reserved2[3];
};
struct rx_queue_start_ramrod_data {
__le16 rx_queue_id;
__le16 num_of_pbl_pages;
__le16 bd_max_bytes;
__le16 sb_id;
u8 sb_index;
u8 vport_id;
u8 default_rss_queue_flg;
u8 complete_cqe_flg;
u8 complete_event_flg;
u8 stats_counter_id;
u8 pin_context;
u8 pxp_tph_valid_bd;
u8 pxp_tph_valid_pkt;
u8 pxp_st_hint;
__le16 pxp_st_index;
u8 reserved[4];
struct regpair cqe_pbl_addr;
struct regpair bd_base;
struct regpair sge_base;
};
struct rx_queue_stop_ramrod_data {
__le16 rx_queue_id;
u8 complete_cqe_flg;
u8 complete_event_flg;
u8 vport_id;
u8 reserved[3];
};
struct rx_queue_update_ramrod_data {
__le16 rx_queue_id;
u8 complete_cqe_flg;
u8 complete_event_flg;
u8 init_sge_ring_flg;
u8 vport_id;
u8 pxp_tph_valid_sge;
u8 pxp_st_hint;
__le16 pxp_st_index;
u8 reserved[6];
struct regpair sge_base;
};
struct tx_queue_start_ramrod_data {
__le16 sb_id;
u8 sb_index;
u8 vport_id;
u8 tc;
u8 stats_counter_id;
__le16 qm_pq_id;
u8 flags;
#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_MASK 0x1F
#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_SHIFT 3
u8 pin_context;
u8 pxp_tph_valid_bd;
u8 pxp_tph_valid_pkt;
__le16 pxp_st_index;
u8 pxp_st_hint;
u8 reserved1[3];
__le16 queue_zone_id;
__le16 test_dup_count;
__le16 pbl_size;
struct regpair pbl_base_addr;
};
struct tx_queue_stop_ramrod_data {
__le16 reserved[4];
};
struct vport_filter_update_ramrod_data {
struct eth_filter_cmd_header filter_cmd_hdr;
struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
};
struct vport_start_ramrod_data {
u8 vport_id;
u8 sw_fid;
__le16 mtu;
u8 drop_ttl0_en;
u8 inner_vlan_removal_en;
struct eth_vport_rx_mode rx_mode;
struct eth_vport_tx_mode tx_mode;
struct eth_vport_tpa_param tpa_param;
__le16 sge_buff_size;
u8 max_sges_num;
u8 tx_switching_en;
u8 anti_spoofing_en;
u8 default_vlan_en;
u8 handle_ptp_pkts;
u8 silent_vlan_removal_en;
__le16 default_vlan;
u8 untagged;
u8 reserved[7];
};
struct vport_stop_ramrod_data {
u8 vport_id;
u8 reserved[7];
};
struct vport_update_ramrod_data_cmn {
u8 vport_id;
u8 update_rx_active_flg;
u8 rx_active_flg;
u8 update_tx_active_flg;
u8 tx_active_flg;
u8 update_rx_mode_flg;
u8 update_tx_mode_flg;
u8 update_approx_mcast_flg;
u8 update_rss_flg;
u8 update_inner_vlan_removal_en_flg;
u8 inner_vlan_removal_en;
u8 update_tpa_param_flg;
u8 update_tpa_en_flg;
u8 update_sge_param_flg;
__le16 sge_buff_size;
u8 max_sges_num;
u8 update_tx_switching_en_flg;
u8 tx_switching_en;
u8 update_anti_spoofing_en_flg;
u8 anti_spoofing_en;
u8 update_handle_ptp_pkts;
u8 handle_ptp_pkts;
u8 update_default_vlan_en_flg;
u8 default_vlan_en;
u8 update_default_vlan_flg;
__le16 default_vlan;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
u8 silent_vlan_removal_en;
u8 reserved;
};
struct vport_update_ramrod_mcast {
__le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
};
struct vport_update_ramrod_data {
struct vport_update_ramrod_data_cmn common;
struct eth_vport_rx_mode rx_mode;
struct eth_vport_tx_mode tx_mode;
struct eth_vport_tpa_param tpa_param;
struct vport_update_ramrod_mcast approx_mcast;
struct eth_vport_rss_config rss_config;
};
struct mstorm_eth_conn_ag_ctx { struct mstorm_eth_conn_ag_ctx {
u8 byte0 /* cdu_validation */; u8 byte0 /* cdu_validation */;
u8 byte1 /* state */; u8 byte1 /* state */;
......
...@@ -182,6 +182,8 @@ static int qed_init_pci(struct qed_dev *cdev, ...@@ -182,6 +182,8 @@ static int qed_init_pci(struct qed_dev *cdev,
int qed_fill_dev_info(struct qed_dev *cdev, int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info) struct qed_dev_info *dev_info)
{ {
struct qed_ptt *ptt;
memset(dev_info, 0, sizeof(struct qed_dev_info)); memset(dev_info, 0, sizeof(struct qed_dev_info));
dev_info->num_hwfns = cdev->num_hwfns; dev_info->num_hwfns = cdev->num_hwfns;
...@@ -199,6 +201,14 @@ int qed_fill_dev_info(struct qed_dev *cdev, ...@@ -199,6 +201,14 @@ int qed_fill_dev_info(struct qed_dev *cdev,
qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev); qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev);
ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
if (ptt) {
qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
&dev_info->flash_size);
qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
}
return 0; return 0;
} }
......
...@@ -516,6 +516,22 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn, ...@@ -516,6 +516,22 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_flash_size)
{
u32 flash_size;
flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
*p_flash_size = flash_size;
return 0;
}
int int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
......
...@@ -89,6 +89,19 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, ...@@ -89,6 +89,19 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
int qed_mcp_drain(struct qed_hwfn *p_hwfn, int qed_mcp_drain(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt); struct qed_ptt *p_ptt);
/**
* @brief Get the flash size value
*
* @param p_hwfn
* @param p_ptt
* @param p_flash_size - flash size in bytes to be filled.
*
* @return int - 0 - operation was successul.
*/
int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_flash_size);
/** /**
* @brief Send driver version to MFW * @brief Send driver version to MFW
* *
......
...@@ -32,8 +32,35 @@ struct qed_spq_comp_cb { ...@@ -32,8 +32,35 @@ struct qed_spq_comp_cb {
void *cookie; void *cookie;
}; };
/**
* @brief qed_eth_cqe_completion - handles the completion of a
* ramrod on the cqe ring
*
* @param p_hwfn
* @param cqe
*
* @return int
*/
int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe);
/**
* @file
*
* QED Slow-hwfn queue interface
*/
union ramrod_data { union ramrod_data {
struct pf_start_ramrod_data pf_start; struct pf_start_ramrod_data pf_start;
struct rx_queue_start_ramrod_data rx_queue_start;
struct rx_queue_update_ramrod_data rx_queue_update;
struct rx_queue_stop_ramrod_data rx_queue_stop;
struct tx_queue_start_ramrod_data tx_queue_start;
struct tx_queue_stop_ramrod_data tx_queue_stop;
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
struct vport_update_ramrod_data vport_update;
struct vport_filter_update_ramrod_data vport_filter_update;
}; };
#define EQ_MAX_CREDIT 0xffffffff #define EQ_MAX_CREDIT 0xffffffff
......
...@@ -373,6 +373,35 @@ void qed_eq_free(struct qed_hwfn *p_hwfn, ...@@ -373,6 +373,35 @@ void qed_eq_free(struct qed_hwfn *p_hwfn,
kfree(p_eq); kfree(p_eq);
} }
/***************************************************************************
* CQE API - manipulate EQ functionality
***************************************************************************/
static int qed_cqe_completion(
struct qed_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe,
enum protocol_type protocol)
{
/* @@@tmp - it's possible we'll eventually want to handle some
* actual commands that can arrive here, but for now this is only
* used to complete the ramrod using the echo value on the cqe
*/
return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
}
int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe)
{
int rc;
rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
if (rc)
DP_NOTICE(p_hwfn,
"Failed to handle RXQ CQE [cmd 0x%02x]\n",
cqe->ramrod_cmd_id);
return rc;
}
/*************************************************************************** /***************************************************************************
* Slow hwfn Queue (spq) * Slow hwfn Queue (spq)
***************************************************************************/ ***************************************************************************/
......
...@@ -24,12 +24,132 @@ struct qed_dev_eth_info { ...@@ -24,12 +24,132 @@ struct qed_dev_eth_info {
u8 num_vlan_filters; u8 num_vlan_filters;
}; };
struct qed_update_vport_rss_params {
u16 rss_ind_table[128];
u32 rss_key[10];
};
struct qed_update_vport_params {
u8 vport_id;
u8 update_vport_active_flg;
u8 vport_active_flg;
u8 update_rss_flg;
struct qed_update_vport_rss_params rss_params;
};
struct qed_stop_rxq_params {
u8 rss_id;
u8 rx_queue_id;
u8 vport_id;
bool eq_completion_only;
};
struct qed_stop_txq_params {
u8 rss_id;
u8 tx_queue_id;
};
enum qed_filter_rx_mode_type {
QED_FILTER_RX_MODE_TYPE_REGULAR,
QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
QED_FILTER_RX_MODE_TYPE_PROMISC,
};
enum qed_filter_xcast_params_type {
QED_FILTER_XCAST_TYPE_ADD,
QED_FILTER_XCAST_TYPE_DEL,
QED_FILTER_XCAST_TYPE_REPLACE,
};
struct qed_filter_ucast_params {
enum qed_filter_xcast_params_type type;
u8 vlan_valid;
u16 vlan;
u8 mac_valid;
unsigned char mac[ETH_ALEN];
};
struct qed_filter_mcast_params {
enum qed_filter_xcast_params_type type;
u8 num;
unsigned char mac[64][ETH_ALEN];
};
union qed_filter_type_params {
enum qed_filter_rx_mode_type accept_flags;
struct qed_filter_ucast_params ucast;
struct qed_filter_mcast_params mcast;
};
enum qed_filter_type {
QED_FILTER_TYPE_UCAST,
QED_FILTER_TYPE_MCAST,
QED_FILTER_TYPE_RX_MODE,
QED_MAX_FILTER_TYPES,
};
struct qed_filter_params {
enum qed_filter_type type;
union qed_filter_type_params filter;
};
struct qed_queue_start_common_params {
u8 rss_id;
u8 queue_id;
u8 vport_id;
u16 sb;
u16 sb_idx;
};
struct qed_eth_cb_ops {
struct qed_common_cb_ops common;
};
struct qed_eth_ops { struct qed_eth_ops {
const struct qed_common_ops *common; const struct qed_common_ops *common;
int (*fill_dev_info)(struct qed_dev *cdev, int (*fill_dev_info)(struct qed_dev *cdev,
struct qed_dev_eth_info *info); struct qed_dev_eth_info *info);
int (*vport_start)(struct qed_dev *cdev,
u8 vport_id, u16 mtu,
u8 drop_ttl0_flg,
u8 inner_vlan_removal_en_flg);
int (*vport_stop)(struct qed_dev *cdev,
u8 vport_id);
int (*vport_update)(struct qed_dev *cdev,
struct qed_update_vport_params *params);
int (*q_rx_start)(struct qed_dev *cdev,
struct qed_queue_start_common_params *params,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
void __iomem **pp_prod);
int (*q_rx_stop)(struct qed_dev *cdev,
struct qed_stop_rxq_params *params);
int (*q_tx_start)(struct qed_dev *cdev,
struct qed_queue_start_common_params *params,
dma_addr_t pbl_addr,
u16 pbl_size,
void __iomem **pp_doorbell);
int (*q_tx_stop)(struct qed_dev *cdev,
struct qed_stop_txq_params *params);
int (*filter_config)(struct qed_dev *cdev,
struct qed_filter_params *params);
int (*fastpath_stop)(struct qed_dev *cdev);
int (*eth_cqe_completion)(struct qed_dev *cdev,
u8 rss_id,
struct eth_slow_path_rx_cqe *cqe);
}; };
const struct qed_eth_ops *qed_get_eth_ops(u32 version); const struct qed_eth_ops *qed_get_eth_ops(u32 version);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册