diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index e0becec17b097d7a78ded087f80939e4f444b47e..ffc080795be7be7d671a42d36e3655bce203fa60 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -495,10 +495,6 @@ struct qed_hwfn { bool b_rdma_enabled_in_prs; u32 rdma_prs_search_reg; - /* Array of sb_info of all status blocks */ - struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD]; - u16 num_sbs; - struct qed_cxt_mngr *p_cxt_mngr; /* Flag indicating whether interrupts are enabled or not*/ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index d73e3c2654665c4537baa34875beef321cd376ee..7649f35000db919a9d4376910a85de1c10810885 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1030,7 +1030,7 @@ void qed_resc_setup(struct qed_dev *cdev) qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); - qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); + qed_iov_setup(p_hwfn); #ifdef CONFIG_QED_LL2 if (p_hwfn->using_ll2) qed_ll2_setup(p_hwfn); @@ -1155,7 +1155,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) static void qed_init_cau_rt_data(struct qed_dev *cdev) { u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; - int i, sb_id; + int i, igu_sb_id; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -1165,15 +1165,17 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev) p_igu_info = p_hwfn->hw_info.p_igu_info; - for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev); - sb_id++) { - p_block = &p_igu_info->igu_map.igu_blocks[sb_id]; + for (igu_sb_id = 0; + igu_sb_id < QED_MAPPING_MEMORY_SIZE(cdev); igu_sb_id++) { + p_block = &p_igu_info->entry[igu_sb_id]; + if (!p_block->is_pf) continue; qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_block->function_id, 0, 0); - STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry); + STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2, + sb_entry); } } } @@ -2036,9 +2038,12 @@ static void get_function_id(struct qed_hwfn *p_hwfn) static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) { u32 *feat_num = p_hwfn->hw_info.feat_num; - struct qed_sb_cnt_info sb_cnt_info; + struct qed_sb_cnt_info sb_cnt; u32 non_l2_sbs = 0; + memset(&sb_cnt, 0, sizeof(sb_cnt)); + qed_int_get_num_sbs(p_hwfn, &sb_cnt); + if (IS_ENABLED(CONFIG_QED_RDMA) && p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide @@ -2046,7 +2051,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) * consideration as to how many l2 queues / cnqs we have. */ feat_num[QED_RDMA_CNQ] = - min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2, + min_t(u32, sb_cnt.cnt / 2, RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); non_l2_sbs = feat_num[QED_RDMA_CNQ]; @@ -2055,14 +2060,11 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE || p_hwfn->hw_info.personality == QED_PCI_ETH) { /* Start by allocating VF queues, then PF's */ - memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); - qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); feat_num[QED_VF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_L2_QUEUE), - sb_cnt_info.sb_iov_cnt); + sb_cnt.iov_cnt); feat_num[QED_PF_L2_QUE] = min_t(u32, - RESC_NUM(p_hwfn, QED_SB) - - non_l2_sbs, + sb_cnt.cnt - non_l2_sbs, RESC_NUM(p_hwfn, QED_L2_QUEUE) - FEAT_NUM(p_hwfn, @@ -2070,7 +2072,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) } if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) - feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB), + feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, QED_CMDQS_CQS)); DP_VERBOSE(p_hwfn, @@ -2080,7 +2082,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ), - RESC_NUM(p_hwfn, QED_SB)); + (int)sb_cnt.cnt); } const char *qed_hw_get_resc_name(enum qed_resources res_id) @@ -2199,7 +2201,6 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, { u8 num_funcs = p_hwfn->num_funcs_on_engine; bool b_ah = QED_IS_AH(p_hwfn->cdev); - struct qed_sb_cnt_info sb_cnt_info; switch (res_id) { case QED_L2_QUEUE: @@ -2251,9 +2252,10 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, *p_resc_num = 1; break; case QED_SB: - memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); - qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); - *p_resc_num = sb_cnt_info.sb_cnt; + /* Since we want its value to reflect whether MFW supports + * the new scheme, have a default of 0. + */ + *p_resc_num = 0; break; default: return -EINVAL; @@ -2322,11 +2324,6 @@ static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, goto out; } - /* Special handling for status blocks; Would be revised in future */ - if (res_id == QED_SB) { - *p_resc_num -= 1; - *p_resc_start -= p_hwfn->enabled_func_idx; - } out: /* PQs have to divide by 8 [that's the HW granularity]. * Reduce number so it would fit. @@ -2424,6 +2421,10 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return -EINVAL; } + /* This will also learn the number of SBs from MFW */ + if (qed_int_igu_reset_cam(p_hwfn, p_ptt)) + return -EINVAL; + qed_hw_set_feat(p_hwfn); for (res_id = 0; res_id < QED_MAX_RESC; res_id++) diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index cb342f16c13794e24af55e007ddc57919a42c9db..3fc4ff22960ed10cd027923e8ab5c4217277e08b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -183,7 +183,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, p_data->q_params.queue_relative_offset = (u8)tmp; for (i = 0; i < fcoe_pf_params->num_cqs; i++) { - tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id); + u16 igu_sb_id; + + igu_sb_id = qed_get_igu_sb_id(p_hwfn, i); + tmp = cpu_to_le16(igu_sb_id); p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 6ac6d80311bbf0084c7a35a5672291154f181798..719cdbfe16952f11b2dd23a987abd6df8b805c7e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1300,6 +1300,40 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); } +static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 igu_sb_id, + u32 pi_index, + enum qed_coalescing_fsm coalescing_fsm, + u8 timeset) +{ + struct cau_pi_entry pi_entry; + u32 sb_offset, pi_offset; + + if (IS_VF(p_hwfn->cdev)) + return; + + sb_offset = igu_sb_id * PIS_PER_SB; + memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); + + SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); + if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) + SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); + else + SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); + + pi_offset = sb_offset + pi_index; + if (p_hwfn->hw_init_done) { + qed_wr(p_hwfn, p_ptt, + CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), + *((u32 *)&(pi_entry))); + } else { + STORE_RT_REG(p_hwfn, + CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, + *((u32 *)&(pi_entry))); + } +} + void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, dma_addr_t sb_phys, @@ -1366,40 +1400,6 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, } } -void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 igu_sb_id, - u32 pi_index, - enum qed_coalescing_fsm coalescing_fsm, - u8 timeset) -{ - struct cau_pi_entry pi_entry; - u32 sb_offset, pi_offset; - - if (IS_VF(p_hwfn->cdev)) - return; - - sb_offset = igu_sb_id * PIS_PER_SB; - memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); - - SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); - if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) - SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); - else - SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); - - pi_offset = sb_offset + pi_index; - if (p_hwfn->hw_init_done) { - qed_wr(p_hwfn, p_ptt, - CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), - *((u32 *)&(pi_entry))); - } else { - STORE_RT_REG(p_hwfn, - CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, - *((u32 *)&(pi_entry))); - } -} - void qed_int_sb_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_sb_info *sb_info) { @@ -1412,16 +1412,47 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn, sb_info->igu_sb_id, 0, 0); } -/** - * @brief qed_get_igu_sb_id - given a sw sb_id return the - * igu_sb_id - * - * @param p_hwfn - * @param sb_id - * - * @return u16 - */ -static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) +struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf) +{ + struct qed_igu_block *p_block; + u16 igu_id; + + for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); + igu_id++) { + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; + + if (!(p_block->status & QED_IGU_STATUS_VALID) || + !(p_block->status & QED_IGU_STATUS_FREE)) + continue; + + if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf) + return p_block; + } + + return NULL; +} + +static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id) +{ + struct qed_igu_block *p_block; + u16 igu_id; + + for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); + igu_id++) { + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; + + if (!(p_block->status & QED_IGU_STATUS_VALID) || + !p_block->is_pf || + p_block->vector_number != vector_id) + continue; + + return igu_id; + } + + return QED_SB_INVALID_IDX; +} + +u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) { u16 igu_sb_id; @@ -1429,7 +1460,7 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) if (sb_id == QED_SP_SB_ID) igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; else if (IS_PF(p_hwfn->cdev)) - igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb; + igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1); else igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); @@ -1454,8 +1485,19 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); if (sb_id != QED_SP_SB_ID) { - p_hwfn->sbs_info[sb_id] = sb_info; - p_hwfn->num_sbs++; + if (IS_PF(p_hwfn->cdev)) { + struct qed_igu_info *p_info; + struct qed_igu_block *p_block; + + p_info = p_hwfn->hw_info.p_igu_info; + p_block = &p_info->entry[sb_info->igu_sb_id]; + + p_block->sb_info = sb_info; + p_block->status &= ~QED_IGU_STATUS_FREE; + p_info->usage.free_cnt--; + } else { + qed_vf_set_sb_info(p_hwfn, sb_id, sb_info); + } } sb_info->cdev = p_hwfn->cdev; @@ -1484,20 +1526,35 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, int qed_int_sb_release(struct qed_hwfn *p_hwfn, struct qed_sb_info *sb_info, u16 sb_id) { - if (sb_id == QED_SP_SB_ID) { - DP_ERR(p_hwfn, "Do Not free sp sb using this function"); - return -EINVAL; - } + struct qed_igu_block *p_block; + struct qed_igu_info *p_info; + + if (!sb_info) + return 0; /* zero status block and ack counter */ sb_info->sb_ack = 0; memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); - if (p_hwfn->sbs_info[sb_id] != NULL) { - p_hwfn->sbs_info[sb_id] = NULL; - p_hwfn->num_sbs--; + if (IS_VF(p_hwfn->cdev)) { + qed_vf_set_sb_info(p_hwfn, sb_id, NULL); + return 0; + } + + p_info = p_hwfn->hw_info.p_igu_info; + p_block = &p_info->entry[sb_info->igu_sb_id]; + + /* Vector 0 is reserved to Default SB */ + if (!p_block->vector_number) { + DP_ERR(p_hwfn, "Do Not free sp sb using this function"); + return -EINVAL; } + /* Lose reference to client's SB info, and fix counters */ + p_block->sb_info = NULL; + p_block->status |= QED_IGU_STATUS_FREE; + p_info->usage.free_cnt++; + return 0; } @@ -1616,10 +1673,9 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); } -int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - enum qed_int_mode int_mode) +static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) { - int rc = 0; /* Configure AEU signal change to produce attentions */ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); @@ -1632,6 +1688,16 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, /* Unmask AEU signals toward IGU */ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); +} + +int +qed_int_igu_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_int_mode int_mode) +{ + int rc = 0; + + qed_int_igu_enable_attn(p_hwfn, p_ptt); + if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { rc = qed_slowpath_irq_req(p_hwfn); if (rc) { @@ -1660,10 +1726,11 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) #define IGU_CLEANUP_SLEEP_LENGTH (1000) static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 sb_id, bool cleanup_set, u16 opaque_fid) + u16 igu_sb_id, + bool cleanup_set, u16 opaque_fid) { u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; - u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id; + u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; /* Set the data field */ @@ -1686,8 +1753,8 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, mmiowb(); /* calculate where to read the status bit from */ - sb_bit = 1 << (sb_id % 32); - sb_bit_addr = sb_id / 32 * sizeof(u32); + sb_bit = 1 << (igu_sb_id % 32); + sb_bit_addr = igu_sb_id / 32 * sizeof(u32); sb_bit_addr += IGU_REG_CLEANUP_STATUS_0; @@ -1704,29 +1771,38 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, if (!sleep_cnt) DP_NOTICE(p_hwfn, "Timeout waiting for clear status 0x%08x [for sb %d]\n", - val, sb_id); + val, igu_sb_id); } void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 sb_id, u16 opaque, bool b_set) + u16 igu_sb_id, u16 opaque, bool b_set) { + struct qed_igu_block *p_block; int pi, i; + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", + igu_sb_id, + p_block->function_id, + p_block->is_pf, p_block->vector_number); + /* Set */ if (b_set) - qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque); + qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); /* Clear */ - qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque); + qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); /* Wait for the IGU SB to cleanup */ for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { u32 val; val = qed_rd(p_hwfn, p_ptt, - IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4)); - if (val & (1 << (sb_id % 32))) + IGU_REG_WRITE_DONE_PENDING + + ((igu_sb_id / 32) * 4)); + if (val & BIT((igu_sb_id % 32))) usleep_range(10, 20); else break; @@ -1734,84 +1810,205 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, if (i == IGU_CLEANUP_SLEEP_LENGTH) DP_NOTICE(p_hwfn, "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", - sb_id); + igu_sb_id); /* Clear the CAU for the SB */ for (pi = 0; pi < 12; pi++) qed_wr(p_hwfn, p_ptt, - CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0); + CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); } void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_set, bool b_slowpath) { - u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; - u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; - u32 sb_id = 0, val = 0; + struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; + struct qed_igu_block *p_block; + u16 igu_sb_id = 0; + u32 val = 0; val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); - DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, - "IGU cleaning SBs [%d,...,%d]\n", - igu_base_sb, igu_base_sb + igu_sb_cnt - 1); + for (igu_sb_id = 0; + igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { + p_block = &p_info->entry[igu_sb_id]; + + if (!(p_block->status & QED_IGU_STATUS_VALID) || + !p_block->is_pf || + (p_block->status & QED_IGU_STATUS_DSB)) + continue; - for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++) - qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, p_hwfn->hw_info.opaque_fid, b_set); + } - if (!b_slowpath) - return; - - sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; - DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, - "IGU cleaning slowpath SB [%d]\n", sb_id); - qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, - p_hwfn->hw_info.opaque_fid, b_set); + if (b_slowpath) + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, + p_info->igu_dsb_id, + p_hwfn->hw_info.opaque_fid, + b_set); } -static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u16 sb_id) +int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 val = qed_rd(p_hwfn, p_ptt, - IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id); + struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; struct qed_igu_block *p_block; + int pf_sbs, vf_sbs; + u16 igu_sb_id; + u32 val, rval; - p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; + if (!RESC_NUM(p_hwfn, QED_SB)) { + p_info->b_allow_pf_vf_change = false; + } else { + /* Use the numbers the MFW have provided - + * don't forget MFW accounts for the default SB as well. + */ + p_info->b_allow_pf_vf_change = true; + + if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) { + DP_INFO(p_hwfn, + "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", + RESC_NUM(p_hwfn, QED_SB) - 1, + p_info->usage.cnt); + p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1; + } - /* stop scanning when hit first invalid PF entry */ - if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && - GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) - goto out; + if (IS_PF_SRIOV(p_hwfn)) { + u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs; - /* Fill the block information */ - p_block->status = QED_IGU_STATUS_VALID; - p_block->function_id = GET_FIELD(val, - IGU_MAPPING_LINE_FUNCTION_NUMBER); - p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); - p_block->vector_number = GET_FIELD(val, - IGU_MAPPING_LINE_VECTOR_NUMBER); + if (vfs != p_info->usage.iov_cnt) + DP_VERBOSE(p_hwfn, + NETIF_MSG_INTR, + "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", + p_info->usage.iov_cnt, vfs); - DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, - "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d is_pf = %d vector_num = 0x%x\n", - sb_id, val, p_block->function_id, - p_block->is_pf, p_block->vector_number); + /* At this point we know how many SBs we have totally + * in IGU + number of PF SBs. So we can validate that + * we'd have sufficient for VF. + */ + if (vfs > p_info->usage.free_cnt + + p_info->usage.free_cnt_iov - p_info->usage.cnt) { + DP_NOTICE(p_hwfn, + "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", + p_info->usage.free_cnt + + p_info->usage.free_cnt_iov, + p_info->usage.cnt, vfs); + return -EINVAL; + } -out: - return val; + /* Currently cap the number of VFs SBs by the + * number of VFs. + */ + p_info->usage.iov_cnt = vfs; + } + } + + /* Mark all SBs as free, now in the right PF/VFs division */ + p_info->usage.free_cnt = p_info->usage.cnt; + p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; + p_info->usage.orig = p_info->usage.cnt; + p_info->usage.iov_orig = p_info->usage.iov_cnt; + + /* We now proceed to re-configure the IGU cam to reflect the initial + * configuration. We can start with the Default SB. + */ + pf_sbs = p_info->usage.cnt; + vf_sbs = p_info->usage.iov_cnt; + + for (igu_sb_id = p_info->igu_dsb_id; + igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { + p_block = &p_info->entry[igu_sb_id]; + val = 0; + + if (!(p_block->status & QED_IGU_STATUS_VALID)) + continue; + + if (p_block->status & QED_IGU_STATUS_DSB) { + p_block->function_id = p_hwfn->rel_pf_id; + p_block->is_pf = 1; + p_block->vector_number = 0; + p_block->status = QED_IGU_STATUS_VALID | + QED_IGU_STATUS_PF | + QED_IGU_STATUS_DSB; + } else if (pf_sbs) { + pf_sbs--; + p_block->function_id = p_hwfn->rel_pf_id; + p_block->is_pf = 1; + p_block->vector_number = p_info->usage.cnt - pf_sbs; + p_block->status = QED_IGU_STATUS_VALID | + QED_IGU_STATUS_PF | + QED_IGU_STATUS_FREE; + } else if (vf_sbs) { + p_block->function_id = + p_hwfn->cdev->p_iov_info->first_vf_in_pf + + p_info->usage.iov_cnt - vf_sbs; + p_block->is_pf = 0; + p_block->vector_number = 0; + p_block->status = QED_IGU_STATUS_VALID | + QED_IGU_STATUS_FREE; + vf_sbs--; + } else { + p_block->function_id = 0; + p_block->is_pf = 0; + p_block->vector_number = 0; + } + + SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, + p_block->function_id); + SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); + SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, + p_block->vector_number); + + /* VF entries would be enabled when VF is initializaed */ + SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); + + rval = qed_rd(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); + + if (rval != val) { + qed_wr(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + + sizeof(u32) * igu_sb_id, val); + + DP_VERBOSE(p_hwfn, + NETIF_MSG_INTR, + "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", + igu_sb_id, + p_block->function_id, + p_block->is_pf, + p_block->vector_number, rval, val); + } + } + + return 0; +} + +static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 igu_sb_id) +{ + u32 val = qed_rd(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); + struct qed_igu_block *p_block; + + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; + + /* Fill the block information */ + p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); + p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); + p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); + p_block->igu_sb_id = igu_sb_id; } int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_igu_info *p_igu_info; - u32 val, min_vf = 0, max_vf = 0; - u16 sb_id, last_iov_sb_id = 0; - struct qed_igu_block *blk; - u16 prev_sb_id = 0xFF; + struct qed_igu_block *p_block; + u32 min_vf = 0, max_vf = 0; + u16 igu_sb_id; p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); if (!p_hwfn->hw_info.p_igu_info) @@ -1819,12 +2016,10 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_igu_info = p_hwfn->hw_info.p_igu_info; - /* Initialize base sb / sb cnt for PFs and VFs */ - p_igu_info->igu_base_sb = 0xffff; - p_igu_info->igu_sb_cnt = 0; - p_igu_info->igu_dsb_id = 0xffff; - p_igu_info->igu_base_sb_iov = 0xffff; + /* Distinguish between existent and non-existent default SB */ + p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX; + /* Find the range of VF ids whose SB belong to this PF */ if (p_hwfn->cdev->p_iov_info) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; @@ -1832,113 +2027,69 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; } - for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); - sb_id++) { - blk = &p_igu_info->igu_map.igu_blocks[sb_id]; - - val = qed_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id); - - /* stop scanning when hit first invalid PF entry */ - if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && - GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) - break; - - if (blk->is_pf) { - if (blk->function_id == p_hwfn->rel_pf_id) { - blk->status |= QED_IGU_STATUS_PF; - - if (blk->vector_number == 0) { - if (p_igu_info->igu_dsb_id == 0xffff) - p_igu_info->igu_dsb_id = sb_id; - } else { - if (p_igu_info->igu_base_sb == - 0xffff) { - p_igu_info->igu_base_sb = sb_id; - } else if (prev_sb_id != sb_id - 1) { - DP_NOTICE(p_hwfn->cdev, - "consecutive igu vectors for HWFN %x broken", - p_hwfn->rel_pf_id); - break; - } - prev_sb_id = sb_id; - /* we don't count the default */ - (p_igu_info->igu_sb_cnt)++; - } - } - } else { - if ((blk->function_id >= min_vf) && - (blk->function_id < max_vf)) { - /* Available for VFs of this PF */ - if (p_igu_info->igu_base_sb_iov == 0xffff) { - p_igu_info->igu_base_sb_iov = sb_id; - } else if (last_iov_sb_id != sb_id - 1) { - if (!val) { - DP_VERBOSE(p_hwfn->cdev, - NETIF_MSG_INTR, - "First uninitialized IGU CAM entry at index 0x%04x\n", - sb_id); - } else { - DP_NOTICE(p_hwfn->cdev, - "Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n", - p_hwfn->rel_pf_id, - last_iov_sb_id, - sb_id); } - break; - } - blk->status |= QED_IGU_STATUS_FREE; - p_hwfn->hw_info.p_igu_info->free_blks++; - last_iov_sb_id = sb_id; - } + for (igu_sb_id = 0; + igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { + /* Read current entry; Notice it might not belong to this PF */ + qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); + p_block = &p_igu_info->entry[igu_sb_id]; + + if ((p_block->is_pf) && + (p_block->function_id == p_hwfn->rel_pf_id)) { + p_block->status = QED_IGU_STATUS_PF | + QED_IGU_STATUS_VALID | + QED_IGU_STATUS_FREE; + + if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) + p_igu_info->usage.cnt++; + } else if (!(p_block->is_pf) && + (p_block->function_id >= min_vf) && + (p_block->function_id < max_vf)) { + /* Available for VFs of this PF */ + p_block->status = QED_IGU_STATUS_VALID | + QED_IGU_STATUS_FREE; + + if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) + p_igu_info->usage.iov_cnt++; } - } - /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect - * the number of VF SBs [especially for first VF on engine, as we can't - * differentiate between empty entries and its entries]. - * Since we don't really support more SBs than VFs today, prevent any - * such configuration by sanitizing the number of SBs to equal the - * number of VFs. - */ - if (IS_PF_SRIOV(p_hwfn)) { - u16 total_vfs = p_hwfn->cdev->p_iov_info->total_vfs; + /* Mark the First entry belonging to the PF or its VFs + * as the default SB [we'll reset IGU prior to first usage]. + */ + if ((p_block->status & QED_IGU_STATUS_VALID) && + (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) { + p_igu_info->igu_dsb_id = igu_sb_id; + p_block->status |= QED_IGU_STATUS_DSB; + } - if (total_vfs < p_igu_info->free_blks) { - DP_VERBOSE(p_hwfn, - (NETIF_MSG_INTR | QED_MSG_IOV), - "Limiting number of SBs for IOV - %04x --> %04x\n", - p_igu_info->free_blks, - p_hwfn->cdev->p_iov_info->total_vfs); - p_igu_info->free_blks = total_vfs; - } else if (total_vfs > p_igu_info->free_blks) { - DP_NOTICE(p_hwfn, - "IGU has only %04x SBs for VFs while the device has %04x VFs\n", - p_igu_info->free_blks, total_vfs); - return -EINVAL; + /* limit number of prints by having each PF print only its + * entries with the exception of PF0 which would print + * everything. + */ + if ((p_block->status & QED_IGU_STATUS_VALID) || + (p_hwfn->abs_pf_id == 0)) { + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", + igu_sb_id, p_block->function_id, + p_block->is_pf, p_block->vector_number); } } - p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks; - - DP_VERBOSE( - p_hwfn, - NETIF_MSG_INTR, - "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n", - p_igu_info->igu_base_sb, - p_igu_info->igu_base_sb_iov, - p_igu_info->igu_sb_cnt, - p_igu_info->igu_sb_cnt_iov, - p_igu_info->igu_dsb_id); - - if (p_igu_info->igu_base_sb == 0xffff || - p_igu_info->igu_dsb_id == 0xffff || - p_igu_info->igu_sb_cnt == 0) { + + if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) { DP_NOTICE(p_hwfn, - "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n", - p_igu_info->igu_base_sb, - p_igu_info->igu_sb_cnt, - p_igu_info->igu_dsb_id); + "IGU CAM returned invalid values igu_dsb_id=0x%x\n", + p_igu_info->igu_dsb_id); return -EINVAL; } + /* All non default SB are considered free at this point */ + p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; + p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", + p_igu_info->igu_dsb_id, + p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt); + return 0; } @@ -2035,31 +2186,7 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, if (!info || !p_sb_cnt_info) return; - p_sb_cnt_info->sb_cnt = info->igu_sb_cnt; - p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov; - p_sb_cnt_info->sb_free_blk = info->free_blks; -} - -u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) -{ - struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; - - /* Determine origin of SB id */ - if ((sb_id >= p_info->igu_base_sb) && - (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) { - return sb_id - p_info->igu_base_sb; - } else if ((sb_id >= p_info->igu_base_sb_iov) && - (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) { - /* We want the first VF queue to be adjacent to the - * last PF queue. Since L2 queues can be partial to - * SBs, we'll use the feature instead. - */ - return sb_id - p_info->igu_base_sb_iov + - FEAT_NUM(p_hwfn, QED_PF_L2_QUE); - } else { - DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id); - return 0; - } + memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info)); } void qed_int_disable_post_isr_release(struct qed_dev *cdev) diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 0ae0bb4593effc45a12895a43c49c8a1fdcfb636..5199634ed630bb47c93c84a85cf3466b62d86919 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -78,24 +78,6 @@ enum qed_coalescing_fsm { QED_COAL_TX_STATE_MACHINE }; -/** - * @brief qed_int_cau_conf_pi - configure cau for a given - * status block - * - * @param p_hwfn - * @param p_ptt - * @param igu_sb_id - * @param pi_index - * @param state - * @param timeset - */ -void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 igu_sb_id, - u32 pi_index, - enum qed_coalescing_fsm coalescing_fsm, - u8 timeset); - /** * @brief qed_int_igu_enable_int - enable device interrupts * @@ -217,32 +199,63 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev); #define SB_ALIGNED_SIZE(p_hwfn) \ ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) +#define QED_SB_INVALID_IDX 0xffff + struct qed_igu_block { - u8 status; + u8 status; #define QED_IGU_STATUS_FREE 0x01 #define QED_IGU_STATUS_VALID 0x02 #define QED_IGU_STATUS_PF 0x04 +#define QED_IGU_STATUS_DSB 0x08 - u8 vector_number; - u8 function_id; - u8 is_pf; -}; + u8 vector_number; + u8 function_id; + u8 is_pf; + + /* Index inside IGU [meant for back reference] */ + u16 igu_sb_id; -struct qed_igu_map { - struct qed_igu_block igu_blocks[MAX_TOT_SB_PER_PATH]; + struct qed_sb_info *sb_info; }; struct qed_igu_info { - struct qed_igu_map igu_map; - u16 igu_dsb_id; - u16 igu_base_sb; - u16 igu_base_sb_iov; - u16 igu_sb_cnt; - u16 igu_sb_cnt_iov; - u16 free_blks; + struct qed_igu_block entry[MAX_TOT_SB_PER_PATH]; + u16 igu_dsb_id; + + struct qed_sb_cnt_info usage; + + bool b_allow_pf_vf_change; }; -/* TODO Names of function may change... */ +/** + * @brief - Make sure the IGU CAM reflects the resources provided by MFW + * + * @param p_hwfn + * @param p_ptt + */ +int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * @brief Translate the weakly-defined client sb-id into an IGU sb-id + * + * @param p_hwfn + * @param sb_id - user provided sb_id + * + * @return an index inside IGU CAM where the SB resides + */ +u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); + +/** + * @brief return a pointer to an unused valid SB + * + * @param p_hwfn + * @param b_is_pf - true iff we want a SB belonging to a PF + * + * @return point to an igu_block, NULL if none is available + */ +struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, + bool b_is_pf); + void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_set, @@ -321,13 +334,13 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn); * * @param p_hwfn * @param p_ptt - * @param sb_id - igu status block id + * @param igu_sb_id - igu status block id * @param opaque - opaque fid of the sb owner. * @param b_set - set(1) / clear(0) */ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 sb_id, + u16 igu_sb_id, u16 opaque, bool b_set); @@ -376,16 +389,6 @@ void qed_int_free(struct qed_hwfn *p_hwfn); void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); -/** - * @brief - Returns an Rx queue index appropriate for usage with given SB. - * - * @param p_hwfn - * @param sb_id - absolute index of SB - * - * @return index of Rx queue - */ -u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); - /** * @brief - Enable Interrupt & Attention for hw function * diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 43a20a6fd1b68b18edae96b9d5d725d26485d9ff..bc8ce09d390f07347d2b1e406639266f50edc5b6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -220,7 +220,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, p_queue->cmdq_sb_pi = p_params->gl_cmd_pi; for (i = 0; i < p_params->num_queues; i++) { - val = p_hwfn->sbs_info[i]->igu_sb_id; + val = qed_get_igu_sb_id(p_hwfn, i); p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c5bb80b9afc1b878c56dcd6a89804fe9b5577850..ac3bdcd9f0b6d8eca8a38a1b4b76ec991bdf58ce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -762,7 +762,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, for_each_hwfn(cdev, i) { memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); - cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt; + cdev->int_params.in.num_vectors += sb_cnt_info.cnt; cdev->int_params.in.num_vectors++; /* slowpath */ } diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index eb1a5cfc49c0b7a222932cbd88bdb4ec5ed6b078..b9434b707b0821ac4564a657ef7d1be2f361aae0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -581,6 +581,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; u32 cnq_id, sb_id; + u16 igu_sb_id; int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); @@ -612,10 +613,10 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); + igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); + p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id); p_cnq_params = &p_ramrod->cnq_params[cnq_id]; p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id]; - p_cnq_params->sb_num = - cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id); p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index b6bda45d048982cbc696615b9a8324e762590d00..5ae8827534f8b7c0bf99d8ef5567861c9e92951c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -378,33 +378,6 @@ static int qed_iov_pci_cfg_info(struct qed_dev *cdev) return 0; } -static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) -{ - struct qed_igu_block *p_sb; - u16 sb_id; - u32 val; - - if (!p_hwfn->hw_info.p_igu_info) { - DP_ERR(p_hwfn, - "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n"); - return; - } - - for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); - sb_id++) { - p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; - if ((p_sb->status & QED_IGU_STATUS_FREE) && - !(p_sb->status & QED_IGU_STATUS_PF)) { - val = qed_rd(p_hwfn, p_ptt, - IGU_REG_MAPPING_MEMORY + sb_id * 4); - SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); - qed_wr(p_hwfn, p_ptt, - IGU_REG_MAPPING_MEMORY + 4 * sb_id, val); - } - } -} - static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; @@ -555,13 +528,12 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn) return qed_iov_allocate_vfdb(p_hwfn); } -void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +void qed_iov_setup(struct qed_hwfn *p_hwfn) { if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) return; qed_iov_setup_vfdb(p_hwfn); - qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt); } void qed_iov_free(struct qed_hwfn *p_hwfn) @@ -868,45 +840,36 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf, u16 num_rx_queues) { - struct qed_igu_block *igu_blocks; - int qid = 0, igu_id = 0; + struct qed_igu_block *p_block; + struct cau_sb_entry sb_entry; + int qid = 0; u32 val = 0; - igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks; - - if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks) - num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks; - p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues; + if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov) + num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov; + p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues; SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); - while ((qid < num_rx_queues) && - (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) { - if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) { - struct cau_sb_entry sb_entry; - - vf->igu_sbs[qid] = (u16)igu_id; - igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE; - - SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); - - qed_wr(p_hwfn, p_ptt, - IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id, - val); - - /* Configure igu sb in CAU which were marked valid */ - qed_init_cau_sb_entry(p_hwfn, &sb_entry, - p_hwfn->rel_pf_id, - vf->abs_vf_id, 1); - qed_dmae_host2grc(p_hwfn, p_ptt, - (u64)(uintptr_t)&sb_entry, - CAU_REG_SB_VAR_MEMORY + - igu_id * sizeof(u64), 2, 0); - qid++; - } - igu_id++; + for (qid = 0; qid < num_rx_queues; qid++) { + p_block = qed_get_igu_free_sb(p_hwfn, false); + vf->igu_sbs[qid] = p_block->igu_sb_id; + p_block->status &= ~QED_IGU_STATUS_FREE; + SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); + + qed_wr(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + + sizeof(u32) * p_block->igu_sb_id, val); + + /* Configure igu sb in CAU which were marked valid */ + qed_init_cau_sb_entry(p_hwfn, &sb_entry, + p_hwfn->rel_pf_id, vf->abs_vf_id, 1); + qed_dmae_host2grc(p_hwfn, p_ptt, + (u64)(uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + p_block->igu_sb_id * sizeof(u64), 2, 0); } vf->num_sbs = (u8) num_rx_queues; @@ -931,10 +894,8 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); qed_wr(p_hwfn, p_ptt, addr, val); - p_info->igu_map.igu_blocks[igu_id].status |= - QED_IGU_STATUS_FREE; - - p_hwfn->hw_info.p_igu_info->free_blks++; + p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE; + p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++; } vf->num_sbs = 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 81a497ce65851898636b1e46d9c8ad3ff4081f48..801cc005e52bf15ec62aaa3bbe00f22c4c62f9cf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -316,9 +316,8 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn); * @brief qed_iov_setup - setup sriov related resources * * @param p_hwfn - * @param p_ptt */ -void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +void qed_iov_setup(struct qed_hwfn *p_hwfn); /** * @brief qed_iov_free - free sriov related resources @@ -397,7 +396,7 @@ static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn) return 0; } -static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +static inline void qed_iov_setup(struct qed_hwfn *p_hwfn) { } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 11d71e5eea14375662e8e4da66a78e8b43d3f499..3703b22a397344bded91ad3155a0c028152e280a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -792,9 +792,12 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, req->only_untagged = only_untagged; /* status blocks */ - for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) - if (p_hwfn->sbs_info[i]) - req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys; + for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) { + struct qed_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i]; + + if (p_sb) + req->sb_addr[i] = p_sb->sb_phys; + } /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, @@ -1240,6 +1243,24 @@ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; } +void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, + u16 sb_id, struct qed_sb_info *p_sb) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + + if (!p_iov) { + DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); + return; + } + + if (sb_id >= PFVF_MAX_SBS_PER_VF) { + DP_NOTICE(p_hwfn, "Can't configure SB %04x\n", sb_id); + return; + } + + p_iov->sbs_info[sb_id] = p_sb; +} + int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 34ac70b0e5fe8a8c52c28b73532d059c2ee28763..67862085f0321932205707fd555469a267ba5b56 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -627,6 +627,14 @@ struct qed_vf_iov { * this has to be propagated as it affects the fastpath. */ bool b_pre_fp_hsi; + + /* Current day VFs are passing the SBs physical address on vport + * start, and as they lack an IGU mapping they need to store the + * addresses of previously registered SBs. + * Even if we were to change configuration flow, due to backward + * compatibility [with older PFs] we'd still need to store these. + */ + struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF]; }; #ifdef CONFIG_QED_SRIOV @@ -836,6 +844,16 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn); */ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); +/** + * @brief Stores [or removes] a configured sb_info. + * + * @param p_hwfn + * @param sb_id - zero-based SB index [for fastpath] + * @param sb_info - may be NULL [during removal]. + */ +void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, + u16 sb_id, struct qed_sb_info *p_sb); + /** * @brief qed_vf_pf_vport_start - perform vport start for VF. * diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 73c46d6d5727e4e354124e1516e73c9b0be74c83..607e1c5e185a3d1dda995f1828cd71aecb61c774 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -886,9 +886,15 @@ struct qed_eth_stats { #define TX_PI(tc) (RX_PI + 1 + tc) struct qed_sb_cnt_info { - int sb_cnt; - int sb_iov_cnt; - int sb_free_blk; + /* Original, current, and free SBs for PF */ + int orig; + int cnt; + int free_cnt; + + /* Original, current and free SBS for child VFs */ + int iov_orig; + int iov_cnt; + int free_cnt_iov; }; static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)