提交 c5a2a4a3 编写于 作者: U Usha Ketineni 提交者: Jeff Kirsher

ice: Fix to make VLAN priority tagged traffic to appear on all TCs

This patch includes below changes to resolve the issue of ETS bandwidth
shaping to work.

1. Allocation of Tx queues is accounted for based on the enabled TC's
   in ice_vsi_setup_q_map() and enabled the Tx queues on those TC's via
   ice_vsi_cfg_txqs()

2. Get the mapped netdev TC # for the user priority and set the priority
   to TC mapping for the VSI.
Signed-off-by: NUsha Ketineni <usha.k.ketineni@intel.com>
Signed-off-by: NAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: NAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: NJeff Kirsher <jeffrey.t.kirsher@intel.com>
上级 99fc1057
......@@ -112,7 +112,9 @@ extern const char ice_drv_ver[];
struct ice_tc_info {
u16 qoffset;
u16 qcount;
u16 qcount_tx;
u16 qcount_rx;
u8 netdev_tc;
};
struct ice_tc_cfg {
......
......@@ -774,11 +774,13 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
*/
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
u16 offset = 0, qmap = 0, numq_tc;
u16 pow = 0, max_rss = 0, qcount;
u16 offset = 0, qmap = 0, tx_count = 0;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
u16 tx_numq_tc, rx_numq_tc;
u16 pow = 0, max_rss = 0;
bool ena_tc0 = false;
u8 netdev_tc = 0;
int i;
/* at least TC0 should be enabled by default */
......@@ -794,7 +796,12 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->tc_cfg.ena_tc |= 1;
}
numq_tc = qcount_rx / vsi->tc_cfg.numtc;
rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
if (!rx_numq_tc)
rx_numq_tc = 1;
tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
if (!tx_numq_tc)
tx_numq_tc = 1;
/* TC mapping is a function of the number of Rx queues assigned to the
* VSI for each traffic class and the offset of these queues.
......@@ -808,7 +815,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
* Setup number and offset of Rx queues for all TCs for the VSI
*/
qcount = numq_tc;
qcount_rx = rx_numq_tc;
/* qcount will change if RSS is enabled */
if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
......@@ -816,37 +824,41 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
max_rss = ICE_MAX_LG_RSS_QS;
else
max_rss = ICE_MAX_SMALL_RSS_QS;
qcount = min_t(int, numq_tc, max_rss);
qcount = min_t(int, qcount, vsi->rss_size);
qcount_rx = min_t(int, rx_numq_tc, max_rss);
qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
}
}
/* find the (rounded up) power-of-2 of qcount */
pow = order_base_2(qcount);
pow = order_base_2(qcount_rx);
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
/* TC is not enabled */
vsi->tc_cfg.tc_info[i].qoffset = 0;
vsi->tc_cfg.tc_info[i].qcount = 1;
vsi->tc_cfg.tc_info[i].qcount_rx = 1;
vsi->tc_cfg.tc_info[i].qcount_tx = 1;
vsi->tc_cfg.tc_info[i].netdev_tc = 0;
ctxt->info.tc_mapping[i] = 0;
continue;
}
/* TC is enabled */
vsi->tc_cfg.tc_info[i].qoffset = offset;
vsi->tc_cfg.tc_info[i].qcount = qcount;
vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
ICE_AQ_VSI_TC_Q_OFFSET_M) |
((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
ICE_AQ_VSI_TC_Q_NUM_M);
offset += qcount;
offset += qcount_rx;
tx_count += tx_numq_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
}
vsi->num_txq = qcount_tx;
vsi->num_rxq = offset;
vsi->num_txq = tx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
......@@ -1611,10 +1623,10 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
u8 num_q_grps, q_idx = 0;
enum ice_status status;
u16 buf_len, i, pf_q;
int err = 0, tc = 0;
u8 num_q_grps;
buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
......@@ -1628,38 +1640,49 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
qg_buf->num_txqs = 1;
num_q_grps = 1;
/* set up and configure the Tx queues */
ice_for_each_txq(vsi, i) {
struct ice_tlan_ctx tlan_ctx = { 0 };
/* set up and configure the Tx queues for each enabled TC */
for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) {
if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
break;
pf_q = vsi->txq_map[i];
ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
ice_tlan_ctx_info);
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
struct ice_tlan_ctx tlan_ctx = { 0 };
pf_q = vsi->txq_map[q_idx];
ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx,
pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
ice_tlan_ctx_info);
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
*/
vsi->tx_rings[q_idx]->tail =
pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
num_q_grps, qg_buf, buf_len,
NULL);
if (status) {
dev_err(&vsi->back->pdev->dev,
"Failed to set LAN Tx queue context, error: %d\n",
status);
err = -ENODEV;
goto err_cfg_txqs;
}
/* init queue specific tail reg. It is referred as transmit
* comm scheduler queue doorbell.
*/
vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
num_q_grps, qg_buf, buf_len, NULL);
if (status) {
dev_err(&vsi->back->pdev->dev,
"Failed to set LAN Tx queue context, error: %d\n",
status);
err = -ENODEV;
goto err_cfg_txqs;
}
/* Add Tx Queue TEID into the VSI Tx ring from the
* response. This will complete configuring and
* enabling the queue.
*/
txq = &qg_buf->txqs[0];
if (pf_q == le16_to_cpu(txq->txq_id))
vsi->tx_rings[q_idx]->txq_teid =
le32_to_cpu(txq->q_teid);
/* Add Tx Queue TEID into the VSI Tx ring from the response
* This will complete configuring and enabling the queue.
*/
txq = &qg_buf->txqs[0];
if (pf_q == le16_to_cpu(txq->txq_id))
vsi->tx_rings[i]->txq_teid =
le32_to_cpu(txq->q_teid);
q_idx++;
}
}
err_cfg_txqs:
devm_kfree(&pf->pdev->dev, qg_buf);
......@@ -2057,6 +2080,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
/* set RSS capabilities */
ice_vsi_set_rss_params(vsi);
/* set tc configuration */
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
ret = ice_vsi_init(vsi);
if (ret)
......@@ -2120,11 +2146,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
goto unroll_vsi_init;
}
ice_vsi_set_tc_cfg(vsi);
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = vsi->num_txq;
max_txqs[i] = pf->num_lan_tx;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
......@@ -2520,11 +2544,13 @@ int ice_vsi_release(struct ice_vsi *vsi)
int ice_vsi_rebuild(struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf;
int ret, i;
if (!vsi)
return -EINVAL;
pf = vsi->back;
ice_vsi_free_q_vectors(vsi);
ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
......@@ -2534,6 +2560,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_free_arrays(vsi, false);
ice_dev_onetime_setup(&vsi->back->hw);
ice_vsi_set_num_qs(vsi);
ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
ret = ice_vsi_init(vsi);
......@@ -2580,11 +2607,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
break;
}
ice_vsi_set_tc_cfg(vsi);
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = vsi->num_txq;
max_txqs[i] = pf->num_lan_tx;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
......
......@@ -349,6 +349,9 @@ ice_prepare_for_reset(struct ice_pf *pf)
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf);
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
ice_shutdown_all_ctrlq(hw);
set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
......@@ -2543,7 +2546,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (err)
return err;
}
err = ice_vsi_cfg_txqs(vsi);
if (!err)
err = ice_vsi_cfg_rxqs(vsi);
......
......@@ -630,7 +630,7 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
*
* Cleanup scheduling elements from SW DB
*/
static void ice_sched_clear_port(struct ice_port_info *pi)
void ice_sched_clear_port(struct ice_port_info *pi)
{
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return;
......
......@@ -26,6 +26,7 @@ struct ice_sched_agg_info {
/* FW AQ command calls */
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_clear_port(struct ice_port_info *pi);
void ice_sched_cleanup_all(struct ice_hw *hw);
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册