提交 d16132ce 编写于 作者: A Ariel Elior 提交者: David S. Miller

bnx2x: Support VF FLR

The FLR indication arrives as an attention from the management processor.
Upon VF flr all FLRed function in the indication have already been
released by Firmware and now we basically need to free the resources
allocated to those VFs, and clean any remainders from the device
(FLR final cleanup).
Signed-off-by: NAriel Elior <ariele@broadcom.com>
Signed-off-by: NEilon Greenstein <eilong@broadcom.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 f1929b01
...@@ -1879,7 +1879,13 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, ...@@ -1879,7 +1879,13 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae); int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae);
void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl); void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl);
/* FLR related routines */
u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count);
int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt);
u8 bnx2x_is_pcie_pending(struct pci_dev *dev); u8 bnx2x_is_pcie_pending(struct pci_dev *dev);
int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
char *msg, u32 poll_cnt);
void bnx2x_calc_fc_adv(struct bnx2x *bp); void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
......
...@@ -1097,8 +1097,8 @@ static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, ...@@ -1097,8 +1097,8 @@ static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
return val; return val;
} }
static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
char *msg, u32 poll_cnt) char *msg, u32 poll_cnt)
{ {
u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
if (val != 0) { if (val != 0) {
...@@ -1108,7 +1108,8 @@ static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, ...@@ -1108,7 +1108,8 @@ static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
return 0; return 0;
} }
static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) /* Common routines with VF FLR cleanup */
u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
{ {
/* adjust polling timeout */ /* adjust polling timeout */
if (CHIP_REV_IS_EMUL(bp)) if (CHIP_REV_IS_EMUL(bp))
...@@ -1120,7 +1121,7 @@ static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) ...@@ -1120,7 +1121,7 @@ static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
return FLR_POLL_CNT; return FLR_POLL_CNT;
} }
static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
{ {
struct pbf_pN_cmd_regs cmd_regs[] = { struct pbf_pN_cmd_regs cmd_regs[] = {
{0, (CHIP_IS_E3B0(bp)) ? {0, (CHIP_IS_E3B0(bp)) ?
...@@ -1195,8 +1196,7 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) ...@@ -1195,8 +1196,7 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
(((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
u32 poll_cnt)
{ {
struct sdm_op_gen op_gen = {0}; struct sdm_op_gen op_gen = {0};
...@@ -1221,7 +1221,8 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, ...@@ -1221,7 +1221,8 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
BNX2X_ERR("FW final cleanup did not succeed\n"); BNX2X_ERR("FW final cleanup did not succeed\n");
DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
(REG_RD(bp, comp_addr))); (REG_RD(bp, comp_addr)));
ret = 1; bnx2x_panic();
return 1;
} }
/* Zero completion for nxt FLR */ /* Zero completion for nxt FLR */
REG_WR(bp, comp_addr, 0); REG_WR(bp, comp_addr, 0);
...@@ -3904,6 +3905,10 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) ...@@ -3904,6 +3905,10 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_DRV_INFO_REQ) if (val & DRV_STATUS_DRV_INFO_REQ)
bnx2x_handle_drv_info_req(bp); bnx2x_handle_drv_info_req(bp);
if (val & DRV_STATUS_VF_DISABLED)
bnx2x_vf_handle_flr_event(bp);
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp); bnx2x_pmf_update(bp);
......
...@@ -876,6 +876,7 @@ ...@@ -876,6 +876,7 @@
#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2) #define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2)
#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1) #define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1)
#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0) #define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0)
#define DORQ_REG_VF_USAGE_CNT 0x170320
#define HC_REG_AGG_INT_0 0x108050 #define HC_REG_AGG_INT_0 0x108050
#define HC_REG_AGG_INT_1 0x108054 #define HC_REG_AGG_INT_1 0x108054
#define HC_REG_ATTN_BIT 0x108120 #define HC_REG_ATTN_BIT 0x108120
......
...@@ -138,6 +138,17 @@ enum bnx2x_vfop_mcast_state { ...@@ -138,6 +138,17 @@ enum bnx2x_vfop_mcast_state {
BNX2X_VFOP_MCAST_ADD, BNX2X_VFOP_MCAST_ADD,
BNX2X_VFOP_MCAST_CHK_DONE BNX2X_VFOP_MCAST_CHK_DONE
}; };
enum bnx2x_vfop_qflr_state {
BNX2X_VFOP_QFLR_CLR_VLAN,
BNX2X_VFOP_QFLR_CLR_MAC,
BNX2X_VFOP_QFLR_TERMINATE,
BNX2X_VFOP_QFLR_DONE
};
enum bnx2x_vfop_flr_state {
BNX2X_VFOP_FLR_QUEUES,
BNX2X_VFOP_FLR_HW
};
enum bnx2x_vfop_close_state { enum bnx2x_vfop_close_state {
BNX2X_VFOP_CLOSE_QUEUES, BNX2X_VFOP_CLOSE_QUEUES,
...@@ -973,6 +984,94 @@ int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, ...@@ -973,6 +984,94 @@ int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
return -ENOMEM; return -ENOMEM;
} }
/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
int qid = vfop->args.qx.qid;
enum bnx2x_vfop_qflr_state state = vfop->state;
struct bnx2x_queue_state_params *qstate;
struct bnx2x_vfop_cmd cmd;
bnx2x_vfop_reset_wq(vf);
if (vfop->rc < 0)
goto op_err;
DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
cmd.done = bnx2x_vfop_qflr;
cmd.block = false;
switch (state) {
case BNX2X_VFOP_QFLR_CLR_VLAN:
/* vlan-clear-all: driver-only, don't consume credit */
vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
if (vfop->rc)
goto op_err;
return;
case BNX2X_VFOP_QFLR_CLR_MAC:
/* mac-clear-all: driver only consume credit */
vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
DP(BNX2X_MSG_IOV,
"VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
vf->abs_vfid, vfop->rc);
if (vfop->rc)
goto op_err;
return;
case BNX2X_VFOP_QFLR_TERMINATE:
qstate = &vfop->op_p->qctor.qstate;
memset(qstate , 0, sizeof(*qstate));
qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
vfop->state = BNX2X_VFOP_QFLR_DONE;
DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
vf->abs_vfid, qstate->q_obj->state);
if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
qstate->cmd = BNX2X_Q_CMD_TERMINATE;
vfop->rc = bnx2x_queue_state_change(bp, qstate);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
} else {
goto op_done;
}
op_err:
BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
vf->abs_vfid, qid, vfop->rc);
op_done:
case BNX2X_VFOP_QFLR_DONE:
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
bnx2x_vfop_default(state);
}
op_pending:
return;
}
static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
vfop->args.qx.qid = qid;
bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
bnx2x_vfop_qflr, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
cmd->block);
}
return -ENOMEM;
}
/* VFOP multi-casts */ /* VFOP multi-casts */
static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
{ {
...@@ -1430,6 +1529,201 @@ static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) ...@@ -1430,6 +1529,201 @@ static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
vf->state = VF_FREE; vf->state = VF_FREE;
} }
static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
/* DQ usage counter */
bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
"DQ VF usage counter timed out",
poll_cnt);
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
/* FW cleanup command - poll for the results */
if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
poll_cnt))
BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
/* verify TX hw is flushed */
bnx2x_tx_hw_flushed(bp, poll_cnt);
}
static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
enum bnx2x_vfop_flr_state state = vfop->state;
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vfop_flr,
.block = false,
};
if (vfop->rc < 0)
goto op_err;
DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
switch (state) {
case BNX2X_VFOP_FLR_QUEUES:
/* the cleanup operations are valid if and only if the VF
* was first acquired.
*/
if (++(qx->qid) < vf_rxq_count(vf)) {
vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
qx->qid);
if (vfop->rc)
goto op_err;
return;
}
/* remove multicasts */
vfop->state = BNX2X_VFOP_FLR_HW;
vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
0, true);
if (vfop->rc)
goto op_err;
return;
case BNX2X_VFOP_FLR_HW:
/* dispatch final cleanup and wait for HW queues to flush */
bnx2x_vf_flr_clnup_hw(bp, vf);
/* release VF resources */
bnx2x_vf_free_resc(bp, vf);
/* re-open the mailbox */
bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
goto op_done;
default:
bnx2x_vfop_default(state);
}
op_err:
BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
op_done:
vf->flr_clnup_stage = VF_FLR_ACK;
bnx2x_vfop_end(bp, vf, vfop);
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
}
static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
vfop_handler_t done)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
vfop->args.qx.qid = -1; /* loop */
bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
bnx2x_vfop_flr, done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
}
return -ENOMEM;
}
static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
{
int i = prev_vf ? prev_vf->index + 1 : 0;
struct bnx2x_virtf *vf;
/* find next VF to cleanup */
next_vf_to_clean:
for (;
i < BNX2X_NR_VIRTFN(bp) &&
(bnx2x_vf(bp, i, state) != VF_RESET ||
bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
i++)
;
DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. num of vfs: %d\n", i,
BNX2X_NR_VIRTFN(bp));
if (i < BNX2X_NR_VIRTFN(bp)) {
vf = BP_VF(bp, i);
/* lock the vf pf channel */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
/* invoke the VF FLR SM */
if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
vf->abs_vfid);
/* mark the VF to be ACKED and continue */
vf->flr_clnup_stage = VF_FLR_ACK;
goto next_vf_to_clean;
}
return;
}
/* we are done, update vf records */
for_each_vf(bp, i) {
vf = BP_VF(bp, i);
if (vf->flr_clnup_stage != VF_FLR_ACK)
continue;
vf->flr_clnup_stage = VF_FLR_EPILOG;
}
/* Acknowledge the handled VFs.
* we are acknowledge all the vfs which an flr was requested for, even
* if amongst them there are such that we never opened, since the mcp
* will interrupt us immediately again if we only ack some of the bits,
* resulting in an endless loop. This can happen for example in KVM
* where an 'all ones' flr request is sometimes given by hyper visor
*/
DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
for (i = 0; i < FLRD_VFS_DWORDS; i++)
SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
bp->vfdb->flrd_vfs[i]);
bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
/* clear the acked bits - better yet if the MCP implemented
* write to clear semantics
*/
for (i = 0; i < FLRD_VFS_DWORDS; i++)
SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
}
void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
{
int i;
/* Read FLR'd VFs */
for (i = 0; i < FLRD_VFS_DWORDS; i++)
bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
DP(BNX2X_MSG_MCP,
"DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
for_each_vf(bp, i) {
struct bnx2x_virtf *vf = BP_VF(bp, i);
u32 reset = 0;
if (vf->abs_vfid < 32)
reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
else
reset = bp->vfdb->flrd_vfs[1] &
(1 << (vf->abs_vfid - 32));
if (reset) {
/* set as reset and ready for cleanup */
vf->state = VF_RESET;
vf->flr_clnup_stage = VF_FLR_CLN;
DP(BNX2X_MSG_IOV,
"Initiating Final cleanup for VF %d\n",
vf->abs_vfid);
}
}
/* do the FLR cleanup for all marked VFs*/
bnx2x_vf_flr_clnup(bp, NULL);
}
/* IOV global initialization routines */ /* IOV global initialization routines */
void bnx2x_iov_init_dq(struct bnx2x *bp) void bnx2x_iov_init_dq(struct bnx2x *bp)
{ {
......
...@@ -684,9 +684,16 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp, ...@@ -684,9 +684,16 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block); void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block);
int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
/* FLR routines */
/* VF FLR helpers */ /* VF FLR helpers */
int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid); int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid);
void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid); void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);
/* Handles an FLR (or VF_DISABLE) notification form the MCP */
void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type, void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
u16 length); u16 length);
void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
......
...@@ -312,6 +312,7 @@ enum channel_tlvs { ...@@ -312,6 +312,7 @@ enum channel_tlvs {
CHANNEL_TLV_RELEASE, CHANNEL_TLV_RELEASE,
CHANNEL_TLV_PF_RELEASE_VF, CHANNEL_TLV_PF_RELEASE_VF,
CHANNEL_TLV_LIST_END, CHANNEL_TLV_LIST_END,
CHANNEL_TLV_FLR,
CHANNEL_TLV_MAX CHANNEL_TLV_MAX
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册