提交 afc3de92 编写于 作者: D David S. Miller

Merge branch 'qed-next'

Yuval Mintz says:

====================
qed: update series

This patch series tries to improve general configuration by changing
configuration to better suit B0 boards and allow more available
resources to each physical function.
In additition, it contains some small fixes and semantic changes.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -146,9 +146,6 @@ struct qed_hw_info {
u16 ovlan;
u32 part_num[4];
u32 vendor_id;
u32 device_id;
unsigned char hw_mac_addr[ETH_ALEN];
struct qed_igu_info *p_igu_info;
......
......@@ -32,6 +32,33 @@
#include "qed_sp.h"
/* API common to all protocols */
enum BAR_ID {
BAR_ID_0, /* used for GRC */
BAR_ID_1 /* Used for doorbells */
};
static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
if (val)
return 1 << (val + 15);
/* Old MFW initialized above registered only conditionally */
if (p_hwfn->cdev->num_hwfns > 1) {
DP_INFO(p_hwfn,
"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
} else {
DP_INFO(p_hwfn,
"BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
return 512 * 1024;
}
}
void qed_init_dp(struct qed_dev *cdev,
u32 dp_module, u8 dp_level)
{
......@@ -393,7 +420,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
{
int hw_mode = 0;
hw_mode = (1 << MODE_BB_A0);
hw_mode = (1 << MODE_BB_B0);
switch (p_hwfn->cdev->num_ports_in_engines) {
case 1:
......@@ -650,10 +677,8 @@ int qed_hw_init(struct qed_dev *cdev,
bool allow_npar_tx_switch,
const u8 *bin_fw_data)
{
struct qed_storm_stats *p_stat;
u32 load_code, param, *p_address;
u32 load_code, param;
int rc, mfw_rc, i;
u8 fw_vport = 0;
rc = qed_init_fw_data(cdev, bin_fw_data);
if (rc != 0)
......@@ -662,10 +687,6 @@ int qed_hw_init(struct qed_dev *cdev,
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
rc = qed_fw_vport(p_hwfn, 0, &fw_vport);
if (rc != 0)
return rc;
/* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
......@@ -729,35 +750,60 @@ int qed_hw_init(struct qed_dev *cdev,
}
p_hwfn->hw_init_done = true;
}
/* init PF stats */
p_stat = &p_hwfn->storm_stats;
p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM +
MSTORM_QUEUE_STAT_OFFSET(fw_vport);
p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
return 0;
}
p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM +
USTORM_QUEUE_STAT_OFFSET(fw_vport);
p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
#define QED_HW_STOP_RETRY_LIMIT (10)
static inline void qed_hw_timers_stop(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
int i;
p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM +
PSTORM_QUEUE_STAT_OFFSET(fw_vport);
p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
/* close timers */
qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
if ((!qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_CONN)) &&
(!qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_TASK)))
break;
p_address = &p_stat->tstats.address;
*p_address = BAR0_MAP_REG_TSDM_RAM +
TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
p_stat->tstats.len = sizeof(struct tstorm_per_port_stat);
/* Dependent on number of connection/tasks, possibly
* 1ms sleep is required between polls
*/
usleep_range(1000, 2000);
}
return 0;
if (i < QED_HW_STOP_RETRY_LIMIT)
return;
DP_NOTICE(p_hwfn,
"Timers linear scans are not over [Connection %02x Tasks %02x]\n",
(u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
(u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
}
void qed_hw_timers_stop_all(struct qed_dev *cdev)
{
int j;
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
}
}
#define QED_HW_STOP_RETRY_LIMIT (10)
int qed_hw_stop(struct qed_dev *cdev)
{
int rc = 0, t_rc;
int i, j;
int j;
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
......@@ -770,7 +816,8 @@ int qed_hw_stop(struct qed_dev *cdev)
rc = qed_sp_pf_stop(p_hwfn);
if (rc)
return rc;
DP_NOTICE(p_hwfn,
"Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
qed_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
......@@ -781,24 +828,7 @@ int qed_hw_stop(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
if ((!qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_CONN)) &&
(!qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_TASK)))
break;
usleep_range(1000, 2000);
}
if (i == QED_HW_STOP_RETRY_LIMIT)
DP_NOTICE(p_hwfn,
"Timers linear scans are not over [Connection %02x Tasks %02x]\n",
(u8)qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_CONN),
(u8)qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_TASK));
qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
/* Disable Attention Generation */
qed_int_igu_disable_int(p_hwfn, p_ptt);
......@@ -827,7 +857,7 @@ int qed_hw_stop(struct qed_dev *cdev)
void qed_hw_stop_fastpath(struct qed_dev *cdev)
{
int i, j;
int j;
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
......@@ -846,25 +876,6 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
if ((!qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_CONN)) &&
(!qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_TASK)))
break;
usleep_range(1000, 2000);
}
if (i == QED_HW_STOP_RETRY_LIMIT)
DP_NOTICE(p_hwfn,
"Timers linear scans are not over [Connection %02x Tasks %02x]\n",
(u8)qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_CONN),
(u8)qed_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_TASK));
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
/* Need to wait 1ms to guarantee SBs are cleared */
......@@ -949,18 +960,8 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
}
/* Setup bar access */
static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
{
int rc;
/* Allocate PTT pool */
rc = qed_ptt_pool_alloc(p_hwfn);
if (rc)
return rc;
/* Allocate the main PTT */
p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
/* clear indirect access */
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
......@@ -975,8 +976,6 @@ static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
/* enable internal target-read */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
return 0;
}
static void get_function_id(struct qed_hwfn *p_hwfn)
......@@ -1084,13 +1083,6 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
/* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
/* Read Vendor Id / Device Id */
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
offsetof(struct nvm_cfg1, glob) +
offsetof(struct nvm_cfg1_glob, pci_id);
p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
NVM_CFG1_GLOB_VENDOR_ID_MASK;
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
offsetof(struct nvm_cfg1, glob) +
offsetof(struct nvm_cfg1_glob, core_cfg);
......@@ -1284,7 +1276,7 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
return rc;
}
static void qed_get_dev_info(struct qed_dev *cdev)
static int qed_get_dev_info(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
u32 tmp;
......@@ -1323,6 +1315,14 @@ static void qed_get_dev_info(struct qed_dev *cdev)
"Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
cdev->chip_num, cdev->chip_rev,
cdev->chip_bond_id, cdev->chip_metal);
if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
DP_NOTICE(cdev->hwfns,
"The chip type/rev (BB A0) is not supported!\n");
return -EINVAL;
}
return 0;
}
static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
......@@ -1345,15 +1345,24 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
get_function_id(p_hwfn);
rc = qed_hw_hwfn_prepare(p_hwfn);
/* Allocate PTT pool */
rc = qed_ptt_pool_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
goto err0;
}
/* Allocate the main PTT */
p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id)
qed_get_dev_info(p_hwfn->cdev);
if (!p_hwfn->my_id) {
rc = qed_get_dev_info(p_hwfn->cdev);
if (rc != 0)
goto err1;
}
qed_hw_hwfn_prepare(p_hwfn);
/* Initialize MCP structure */
rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
......@@ -1385,17 +1394,6 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
return rc;
}
static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
u8 bar_id)
{
u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE
: PGLUE_B_REG_PF_BAR1_SIZE);
u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
/* Get the BAR size(in KB) from hardware given val */
return 1 << (val + 15);
}
int qed_hw_prepare(struct qed_dev *cdev,
int personality)
{
......@@ -1420,11 +1418,11 @@ int qed_hw_prepare(struct qed_dev *cdev,
u8 __iomem *addr;
/* adjust bar offset for second engine */
addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2;
addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
p_regview = addr;
/* adjust doorbell bar offset for second engine */
addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2;
addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
p_doorbell = addr;
/* prepare second hw function */
......@@ -1536,223 +1534,6 @@ void qed_chain_free(struct qed_dev *cdev,
p_chain->p_phys_addr);
}
static void __qed_get_vport_stats(struct qed_dev *cdev,
struct qed_eth_stats *stats)
{
int i, j;
memset(stats, 0, sizeof(*stats));
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
struct eth_mstorm_per_queue_stat mstats;
struct eth_ustorm_per_queue_stat ustats;
struct eth_pstorm_per_queue_stat pstats;
struct tstorm_per_port_stat tstats;
struct port_stats port_stats;
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
memset(&mstats, 0, sizeof(mstats));
qed_memcpy_from(p_hwfn, p_ptt, &mstats,
p_hwfn->storm_stats.mstats.address,
p_hwfn->storm_stats.mstats.len);
memset(&ustats, 0, sizeof(ustats));
qed_memcpy_from(p_hwfn, p_ptt, &ustats,
p_hwfn->storm_stats.ustats.address,
p_hwfn->storm_stats.ustats.len);
memset(&pstats, 0, sizeof(pstats));
qed_memcpy_from(p_hwfn, p_ptt, &pstats,
p_hwfn->storm_stats.pstats.address,
p_hwfn->storm_stats.pstats.len);
memset(&tstats, 0, sizeof(tstats));
qed_memcpy_from(p_hwfn, p_ptt, &tstats,
p_hwfn->storm_stats.tstats.address,
p_hwfn->storm_stats.tstats.len);
memset(&port_stats, 0, sizeof(port_stats));
if (p_hwfn->mcp_info)
qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, stats),
sizeof(port_stats));
qed_ptt_release(p_hwfn, p_ptt);
stats->no_buff_discards +=
HILO_64_REGPAIR(mstats.no_buff_discard);
stats->packet_too_big_discard +=
HILO_64_REGPAIR(mstats.packet_too_big_discard);
stats->ttl0_discard +=
HILO_64_REGPAIR(mstats.ttl0_discard);
stats->tpa_coalesced_pkts +=
HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
stats->tpa_coalesced_events +=
HILO_64_REGPAIR(mstats.tpa_coalesced_events);
stats->tpa_aborts_num +=
HILO_64_REGPAIR(mstats.tpa_aborts_num);
stats->tpa_coalesced_bytes +=
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
stats->rx_ucast_bytes +=
HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
stats->rx_mcast_bytes +=
HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
stats->rx_bcast_bytes +=
HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
stats->rx_ucast_pkts +=
HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
stats->rx_mcast_pkts +=
HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
stats->rx_bcast_pkts +=
HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
stats->mftag_filter_discards +=
HILO_64_REGPAIR(tstats.mftag_filter_discard);
stats->mac_filter_discards +=
HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
stats->tx_ucast_bytes +=
HILO_64_REGPAIR(pstats.sent_ucast_bytes);
stats->tx_mcast_bytes +=
HILO_64_REGPAIR(pstats.sent_mcast_bytes);
stats->tx_bcast_bytes +=
HILO_64_REGPAIR(pstats.sent_bcast_bytes);
stats->tx_ucast_pkts +=
HILO_64_REGPAIR(pstats.sent_ucast_pkts);
stats->tx_mcast_pkts +=
HILO_64_REGPAIR(pstats.sent_mcast_pkts);
stats->tx_bcast_pkts +=
HILO_64_REGPAIR(pstats.sent_bcast_pkts);
stats->tx_err_drop_pkts +=
HILO_64_REGPAIR(pstats.error_drop_pkts);
stats->rx_64_byte_packets += port_stats.pmm.r64;
stats->rx_127_byte_packets += port_stats.pmm.r127;
stats->rx_255_byte_packets += port_stats.pmm.r255;
stats->rx_511_byte_packets += port_stats.pmm.r511;
stats->rx_1023_byte_packets += port_stats.pmm.r1023;
stats->rx_1518_byte_packets += port_stats.pmm.r1518;
stats->rx_1522_byte_packets += port_stats.pmm.r1522;
stats->rx_2047_byte_packets += port_stats.pmm.r2047;
stats->rx_4095_byte_packets += port_stats.pmm.r4095;
stats->rx_9216_byte_packets += port_stats.pmm.r9216;
stats->rx_16383_byte_packets += port_stats.pmm.r16383;
stats->rx_crc_errors += port_stats.pmm.rfcs;
stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
stats->rx_pause_frames += port_stats.pmm.rxpf;
stats->rx_pfc_frames += port_stats.pmm.rxpp;
stats->rx_align_errors += port_stats.pmm.raln;
stats->rx_carrier_errors += port_stats.pmm.rfcr;
stats->rx_oversize_packets += port_stats.pmm.rovr;
stats->rx_jabbers += port_stats.pmm.rjbr;
stats->rx_undersize_packets += port_stats.pmm.rund;
stats->rx_fragments += port_stats.pmm.rfrg;
stats->tx_64_byte_packets += port_stats.pmm.t64;
stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
stats->tx_pause_frames += port_stats.pmm.txpf;
stats->tx_pfc_frames += port_stats.pmm.txpp;
stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
stats->tx_total_collisions += port_stats.pmm.tncl;
stats->rx_mac_bytes += port_stats.pmm.rbyte;
stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
stats->tx_mac_bytes += port_stats.pmm.tbyte;
stats->tx_mac_uc_packets += port_stats.pmm.txuca;
stats->tx_mac_mc_packets += port_stats.pmm.txmca;
stats->tx_mac_bc_packets += port_stats.pmm.txbca;
stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
for (j = 0; j < 8; j++) {
stats->brb_truncates += port_stats.brb.brb_truncate[j];
stats->brb_discards += port_stats.brb.brb_discard[j];
}
}
}
void qed_get_vport_stats(struct qed_dev *cdev,
struct qed_eth_stats *stats)
{
u32 i;
if (!cdev) {
memset(stats, 0, sizeof(*stats));
return;
}
__qed_get_vport_stats(cdev, stats);
if (!cdev->reset_stats)
return;
/* Reduce the statistics baseline */
for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
}
/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
void qed_reset_vport_stats(struct qed_dev *cdev)
{
int i;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
struct eth_mstorm_per_queue_stat mstats;
struct eth_ustorm_per_queue_stat ustats;
struct eth_pstorm_per_queue_stat pstats;
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
memset(&mstats, 0, sizeof(mstats));
qed_memcpy_to(p_hwfn, p_ptt,
p_hwfn->storm_stats.mstats.address,
&mstats,
p_hwfn->storm_stats.mstats.len);
memset(&ustats, 0, sizeof(ustats));
qed_memcpy_to(p_hwfn, p_ptt,
p_hwfn->storm_stats.ustats.address,
&ustats,
p_hwfn->storm_stats.ustats.len);
memset(&pstats, 0, sizeof(pstats));
qed_memcpy_to(p_hwfn, p_ptt,
p_hwfn->storm_stats.pstats.address,
&pstats,
p_hwfn->storm_stats.pstats.len);
qed_ptt_release(p_hwfn, p_ptt);
}
/* PORT statistics are not necessarily reset, so we need to
* read and create a baseline for future statistics.
*/
if (!cdev->reset_stats)
DP_INFO(cdev, "Reset stats not allocated\n");
else
__qed_get_vport_stats(cdev, cdev->reset_stats);
}
int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
u16 src_id, u16 *dst_id)
{
......
......@@ -77,6 +77,15 @@ int qed_hw_init(struct qed_dev *cdev,
bool allow_npar_tx_switch,
const u8 *bin_fw_data);
/**
* @brief qed_hw_timers_stop_all - stop the timers HW block
*
* @param cdev
*
* @return void
*/
void qed_hw_timers_stop_all(struct qed_dev *cdev);
/**
* @brief qed_hw_stop -
*
......@@ -156,8 +165,6 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
*/
void qed_ptt_release(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
void qed_get_vport_stats(struct qed_dev *cdev,
struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev);
enum qed_dmae_address_type_t {
......
......@@ -968,7 +968,7 @@ struct igu_msix_vector {
enum init_modes {
MODE_BB_A0,
MODE_RESERVED,
MODE_BB_B0,
MODE_RESERVED2,
MODE_ASIC,
MODE_RESERVED3,
......
......@@ -31,6 +31,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
......@@ -1231,6 +1232,328 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
return rc;
}
/* Statistics related code */
static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
u32 *p_len,
u16 statistics_bin)
{
*p_addr = BAR0_MAP_REG_PSDM_RAM +
PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
*p_len = sizeof(struct eth_pstorm_per_queue_stat);
}
static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_eth_stats *p_stats,
u16 statistics_bin)
{
struct eth_pstorm_per_queue_stat pstats;
u32 pstats_addr = 0, pstats_len = 0;
__qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
statistics_bin);
memset(&pstats, 0, sizeof(pstats));
qed_memcpy_from(p_hwfn, p_ptt, &pstats,
pstats_addr, pstats_len);
p_stats->tx_ucast_bytes +=
HILO_64_REGPAIR(pstats.sent_ucast_bytes);
p_stats->tx_mcast_bytes +=
HILO_64_REGPAIR(pstats.sent_mcast_bytes);
p_stats->tx_bcast_bytes +=
HILO_64_REGPAIR(pstats.sent_bcast_bytes);
p_stats->tx_ucast_pkts +=
HILO_64_REGPAIR(pstats.sent_ucast_pkts);
p_stats->tx_mcast_pkts +=
HILO_64_REGPAIR(pstats.sent_mcast_pkts);
p_stats->tx_bcast_pkts +=
HILO_64_REGPAIR(pstats.sent_bcast_pkts);
p_stats->tx_err_drop_pkts +=
HILO_64_REGPAIR(pstats.error_drop_pkts);
}
static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
u32 *p_len)
{
*p_addr = BAR0_MAP_REG_TSDM_RAM +
TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
*p_len = sizeof(struct tstorm_per_port_stat);
}
static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_eth_stats *p_stats,
u16 statistics_bin)
{
u32 tstats_addr = 0, tstats_len = 0;
struct tstorm_per_port_stat tstats;
__qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len);
memset(&tstats, 0, sizeof(tstats));
qed_memcpy_from(p_hwfn, p_ptt, &tstats,
tstats_addr, tstats_len);
p_stats->mftag_filter_discards +=
HILO_64_REGPAIR(tstats.mftag_filter_discard);
p_stats->mac_filter_discards +=
HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
}
static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
u32 *p_len,
u16 statistics_bin)
{
*p_addr = BAR0_MAP_REG_USDM_RAM +
USTORM_QUEUE_STAT_OFFSET(statistics_bin);
*p_len = sizeof(struct eth_ustorm_per_queue_stat);
}
static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_eth_stats *p_stats,
u16 statistics_bin)
{
struct eth_ustorm_per_queue_stat ustats;
u32 ustats_addr = 0, ustats_len = 0;
__qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
statistics_bin);
memset(&ustats, 0, sizeof(ustats));
qed_memcpy_from(p_hwfn, p_ptt, &ustats,
ustats_addr, ustats_len);
p_stats->rx_ucast_bytes +=
HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
p_stats->rx_mcast_bytes +=
HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
p_stats->rx_bcast_bytes +=
HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
p_stats->rx_ucast_pkts +=
HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
p_stats->rx_mcast_pkts +=
HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
p_stats->rx_bcast_pkts +=
HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
}
static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
u32 *p_len,
u16 statistics_bin)
{
*p_addr = BAR0_MAP_REG_MSDM_RAM +
MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
*p_len = sizeof(struct eth_mstorm_per_queue_stat);
}
static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_eth_stats *p_stats,
u16 statistics_bin)
{
struct eth_mstorm_per_queue_stat mstats;
u32 mstats_addr = 0, mstats_len = 0;
__qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
statistics_bin);
memset(&mstats, 0, sizeof(mstats));
qed_memcpy_from(p_hwfn, p_ptt, &mstats,
mstats_addr, mstats_len);
p_stats->no_buff_discards +=
HILO_64_REGPAIR(mstats.no_buff_discard);
p_stats->packet_too_big_discard +=
HILO_64_REGPAIR(mstats.packet_too_big_discard);
p_stats->ttl0_discard +=
HILO_64_REGPAIR(mstats.ttl0_discard);
p_stats->tpa_coalesced_pkts +=
HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
p_stats->tpa_coalesced_events +=
HILO_64_REGPAIR(mstats.tpa_coalesced_events);
p_stats->tpa_aborts_num +=
HILO_64_REGPAIR(mstats.tpa_aborts_num);
p_stats->tpa_coalesced_bytes +=
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_eth_stats *p_stats)
{
struct port_stats port_stats;
int j;
memset(&port_stats, 0, sizeof(port_stats));
qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, stats),
sizeof(port_stats));
p_stats->rx_64_byte_packets += port_stats.pmm.r64;
p_stats->rx_127_byte_packets += port_stats.pmm.r127;
p_stats->rx_255_byte_packets += port_stats.pmm.r255;
p_stats->rx_511_byte_packets += port_stats.pmm.r511;
p_stats->rx_1023_byte_packets += port_stats.pmm.r1023;
p_stats->rx_1518_byte_packets += port_stats.pmm.r1518;
p_stats->rx_1522_byte_packets += port_stats.pmm.r1522;
p_stats->rx_2047_byte_packets += port_stats.pmm.r2047;
p_stats->rx_4095_byte_packets += port_stats.pmm.r4095;
p_stats->rx_9216_byte_packets += port_stats.pmm.r9216;
p_stats->rx_16383_byte_packets += port_stats.pmm.r16383;
p_stats->rx_crc_errors += port_stats.pmm.rfcs;
p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
p_stats->rx_pause_frames += port_stats.pmm.rxpf;
p_stats->rx_pfc_frames += port_stats.pmm.rxpp;
p_stats->rx_align_errors += port_stats.pmm.raln;
p_stats->rx_carrier_errors += port_stats.pmm.rfcr;
p_stats->rx_oversize_packets += port_stats.pmm.rovr;
p_stats->rx_jabbers += port_stats.pmm.rjbr;
p_stats->rx_undersize_packets += port_stats.pmm.rund;
p_stats->rx_fragments += port_stats.pmm.rfrg;
p_stats->tx_64_byte_packets += port_stats.pmm.t64;
p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
p_stats->tx_pause_frames += port_stats.pmm.txpf;
p_stats->tx_pfc_frames += port_stats.pmm.txpp;
p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
p_stats->tx_total_collisions += port_stats.pmm.tncl;
p_stats->rx_mac_bytes += port_stats.pmm.rbyte;
p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
p_stats->tx_mac_bytes += port_stats.pmm.tbyte;
p_stats->tx_mac_uc_packets += port_stats.pmm.txuca;
p_stats->tx_mac_mc_packets += port_stats.pmm.txmca;
p_stats->tx_mac_bc_packets += port_stats.pmm.txbca;
p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
for (j = 0; j < 8; j++) {
p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
p_stats->brb_discards += port_stats.brb.brb_discard[j];
}
}
static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_eth_stats *stats,
u16 statistics_bin)
{
__qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
if (p_hwfn->mcp_info)
__qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
}
static void _qed_get_vport_stats(struct qed_dev *cdev,
struct qed_eth_stats *stats)
{
u8 fw_vport = 0;
int i;
memset(stats, 0, sizeof(*stats));
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
struct qed_ptt *p_ptt;
/* The main vport index is relative first */
if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
DP_ERR(p_hwfn, "No vport available!\n");
continue;
}
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
__qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport);
qed_ptt_release(p_hwfn, p_ptt);
}
}
void qed_get_vport_stats(struct qed_dev *cdev,
struct qed_eth_stats *stats)
{
u32 i;
if (!cdev) {
memset(stats, 0, sizeof(*stats));
return;
}
_qed_get_vport_stats(cdev, stats);
if (!cdev->reset_stats)
return;
/* Reduce the statistics baseline */
for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
}
/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
void qed_reset_vport_stats(struct qed_dev *cdev)
{
int i;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
struct eth_mstorm_per_queue_stat mstats;
struct eth_ustorm_per_queue_stat ustats;
struct eth_pstorm_per_queue_stat pstats;
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
u32 addr = 0, len = 0;
if (!p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
memset(&mstats, 0, sizeof(mstats));
__qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
memset(&ustats, 0, sizeof(ustats));
__qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
memset(&pstats, 0, sizeof(pstats));
__qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
qed_ptt_release(p_hwfn, p_ptt);
}
/* PORT statistics are not necessarily reset, so we need to
* read and create a baseline for future statistics.
*/
if (!cdev->reset_stats)
DP_INFO(cdev, "Reset stats not allocated\n");
else
_qed_get_vport_stats(cdev, cdev->reset_stats);
}
static int qed_fill_eth_dev_info(struct qed_dev *cdev,
struct qed_dev_eth_info *info)
{
......
......@@ -779,7 +779,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
true, data);
if (rc)
goto err3;
goto err2;
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
......@@ -798,12 +798,14 @@ static int qed_slowpath_start(struct qed_dev *cdev,
return rc;
}
qed_reset_vport_stats(cdev);
return 0;
err3:
qed_free_stream_mem(cdev);
qed_slowpath_irq_free(cdev);
err2:
qed_hw_timers_stop_all(cdev);
qed_slowpath_irq_free(cdev);
qed_free_stream_mem(cdev);
qed_disable_msix(cdev);
err1:
qed_resc_free(cdev);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册