提交 4e1ec715 编写于 作者: C Chiqijun 提交者: Yang Yingliang

net/hinic: Delete unused microcode back pressure feature

driver inclusion
category: bugfix
bugzilla: 4472

-----------------------------------------------------------------------

Delete unused microcode back pressure feature.
Signed-off-by: NChiqijun <chiqijun@huawei.com>
Reviewed-by: NZengweiliang <zengweiliang.zengweiliang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 d803b85f
...@@ -1009,14 +1009,6 @@ static int hinic_set_ringparam(struct net_device *netdev, ...@@ -1009,14 +1009,6 @@ static int hinic_set_ringparam(struct net_device *netdev,
new_rq_depth == nic_dev->rq_depth) new_rq_depth == nic_dev->rq_depth)
return 0; return 0;
if (test_bit(HINIC_BP_ENABLE, &nic_dev->flags) &&
new_rq_depth <= nic_dev->bp_upper_thd) {
nicif_err(nic_dev, drv, netdev,
"BP is enable, rq_depth must be larger than upper threshold: %d\n",
nic_dev->bp_upper_thd);
return -EINVAL;
}
nicif_info(nic_dev, drv, netdev, nicif_info(nic_dev, drv, netdev,
"Change Tx/Rx ring depth from %d/%d to %d/%d\n", "Change Tx/Rx ring depth from %d/%d to %d/%d\n",
nic_dev->sq_depth, nic_dev->rq_depth, nic_dev->sq_depth, nic_dev->rq_depth,
...@@ -1796,14 +1788,6 @@ static int hinic_set_pauseparam(struct net_device *netdev, ...@@ -1796,14 +1788,6 @@ static int hinic_set_pauseparam(struct net_device *netdev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (test_bit(HINIC_BP_ENABLE, &nic_dev->flags) && pause->tx_pause &&
nic_dev->rq_depth <= nic_dev->bp_upper_thd) {
nicif_err(nic_dev, drv, netdev,
"Can not set tx pause enable, rq depth is less than bp upper threshold: %d\n",
nic_dev->bp_upper_thd);
return -EINVAL;
}
err = hinic_get_port_info(nic_dev->hwdev, &port_info); err = hinic_get_port_info(nic_dev->hwdev, &port_info);
if (err) { if (err) {
nicif_err(nic_dev, drv, netdev, nicif_err(nic_dev, drv, netdev,
......
...@@ -137,11 +137,6 @@ static unsigned char qp_coalesc_timer_high = HINIC_RX_COAL_TIME_HIGH; ...@@ -137,11 +137,6 @@ static unsigned char qp_coalesc_timer_high = HINIC_RX_COAL_TIME_HIGH;
module_param(qp_coalesc_timer_high, byte, 0444); module_param(qp_coalesc_timer_high, byte, 0444);
MODULE_PARM_DESC(qp_coalesc_timer_high, "MSI-X adaptive high coalesce time, range is 0 - 255"); MODULE_PARM_DESC(qp_coalesc_timer_high, "MSI-X adaptive high coalesce time, range is 0 - 255");
static unsigned int enable_bp;
static unsigned int bp_lower_thd = HINIC_RX_BP_LOWER_THD;
static unsigned int bp_upper_thd = HINIC_RX_BP_UPPER_THD;
#define HINIC_NIC_DEV_WQ_NAME "hinic_nic_dev_wq" #define HINIC_NIC_DEV_WQ_NAME "hinic_nic_dev_wq"
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_LINK) #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_LINK)
...@@ -1554,26 +1549,6 @@ static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev) ...@@ -1554,26 +1549,6 @@ static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev)
return -EFAULT; return -EFAULT;
} }
if (enable_bp) {
nic_dev->bp_upper_thd = (u16)bp_upper_thd;
nic_dev->bp_lower_thd = (u16)bp_lower_thd;
err = hinic_set_bp_thd(nic_dev->hwdev,
nic_dev->bp_lower_thd);
if (err) {
nic_err(&nic_dev->pdev->dev,
"Failed to set bp lower threshold\n");
return -EFAULT;
}
set_bit(HINIC_BP_ENABLE, &nic_dev->flags);
} else {
err = hinic_disable_fw_bp(nic_dev->hwdev);
if (err)
return -EFAULT;
clear_bit(HINIC_BP_ENABLE, &nic_dev->flags);
}
hinic_set_anti_attack(nic_dev->hwdev, true); hinic_set_anti_attack(nic_dev->hwdev, true);
if (set_link_status_follow < HINIC_LINK_FOLLOW_STATUS_MAX && if (set_link_status_follow < HINIC_LINK_FOLLOW_STATUS_MAX &&
...@@ -2520,13 +2495,6 @@ static int hinic_validate_parameters(struct hinic_lld_dev *lld_dev) ...@@ -2520,13 +2495,6 @@ static int hinic_validate_parameters(struct hinic_lld_dev *lld_dev)
{ {
struct pci_dev *pdev = lld_dev->pdev; struct pci_dev *pdev = lld_dev->pdev;
if (bp_upper_thd < bp_lower_thd || bp_lower_thd == 0) {
nic_warn(&pdev->dev, "Module Parameter bp_upper_thd: %d, bp_lower_thd: %d is invalid, resetting to default\n",
bp_upper_thd, bp_lower_thd);
bp_lower_thd = HINIC_RX_BP_LOWER_THD;
bp_upper_thd = HINIC_RX_BP_UPPER_THD;
}
/* Check poll_weight value, default poll_weight is 64. /* Check poll_weight value, default poll_weight is 64.
* The poll_weight isn't more than max queue depth, * The poll_weight isn't more than max queue depth,
* so the valid value range is 1~4096. * so the valid value range is 1~4096.
......
...@@ -1478,137 +1478,6 @@ int hinic_dcb_set_rq_iq_mapping(void *hwdev, u32 num_rqs, u8 *map) ...@@ -1478,137 +1478,6 @@ int hinic_dcb_set_rq_iq_mapping(void *hwdev, u32 num_rqs, u8 *map)
} }
EXPORT_SYMBOL(hinic_dcb_set_rq_iq_mapping); EXPORT_SYMBOL(hinic_dcb_set_rq_iq_mapping);
int hinic_set_pfc_threshold(void *hwdev, u16 op_type, u16 threshold)
{
struct hinic_pfc_thd pfc_thd = {0};
u16 out_size = sizeof(pfc_thd);
int err;
if (op_type == HINIC_PFC_SET_FUNC_THD)
pfc_thd.func_thd = threshold;
else if (op_type == HINIC_PFC_SET_GLB_THD)
pfc_thd.glb_thd = threshold;
else
return -EINVAL;
err = hinic_global_func_id_get(hwdev, &pfc_thd.func_id);
if (err)
return err;
pfc_thd.op_type = op_type;
err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PFC_THD,
&pfc_thd, sizeof(pfc_thd),
&pfc_thd, &out_size);
if (err || !out_size || pfc_thd.status) {
nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
"Failed to set pfc threshold, err: %d, status: 0x%x, out size: 0x%x\n",
err, pfc_thd.status, out_size);
return -EFAULT;
}
return 0;
}
int hinic_set_bp_thd(void *hwdev, u16 threshold)
{
int err;
err = hinic_set_pfc_threshold(hwdev, HINIC_PFC_SET_GLB_THD, threshold);
if (err) {
nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
"Failed to set global threshold\n");
return -EFAULT;
}
err = hinic_set_pfc_threshold(hwdev, HINIC_PFC_SET_FUNC_THD, threshold);
if (err) {
nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
"Failed to set function threshold\n");
return -EFAULT;
}
return 0;
}
int hinic_disable_fw_bp(void *hwdev)
{
int err;
err = hinic_set_pfc_threshold(hwdev, HINIC_PFC_SET_FUNC_THD, 0);
if (err) {
nic_err(((struct hinic_hwdev *)hwdev)->dev_hdl,
"Failed to disable ucode backpressure\n");
return -EFAULT;
}
return 0;
}
int hinic_set_iq_enable(void *hwdev, u16 q_id, u16 lower_thd, u16 prod_idx)
{
struct hinic_hwdev *dev = hwdev;
struct hinic_cmd_enable_iq *iq_info;
struct hinic_cmd_buf *cmd_buf;
int err;
cmd_buf = hinic_alloc_cmd_buf(hwdev);
if (!cmd_buf) {
nic_err(dev->dev_hdl, "Failed to allocate cmd buf\n");
return -ENOMEM;
}
iq_info = cmd_buf->buf;
cmd_buf->size = sizeof(*iq_info);
iq_info->force_en = 0;
iq_info->rq_depth = (u8)ilog2(dev->nic_io->rq_depth);
iq_info->num_rq = (u8)dev->nic_io->max_qps;
/* num_qps will not lager than 64 */
iq_info->glb_rq_id = dev->nic_io->global_qpn + q_id;
iq_info->q_id = q_id;
iq_info->lower_thd = lower_thd;
iq_info->prod_idx = prod_idx;
hinic_cpu_to_be32(iq_info, sizeof(*iq_info));
err = hinic_cmdq_async(hwdev, HINIC_ACK_TYPE_CMDQ, HINIC_MOD_L2NIC,
HINIC_UCODE_CMD_SET_IQ_ENABLE, cmd_buf);
if (err) {
hinic_free_cmd_buf(hwdev, cmd_buf);
nic_err(dev->dev_hdl, "Failed to set iq enable, err:%d\n", err);
return -EFAULT;
}
return 0;
}
int hinic_set_iq_enable_mgmt(void *hwdev, u16 q_id, u16 lower_thd, u16 prod_idx)
{
struct hinic_hwdev *dev = hwdev;
struct hinic_cmd_enable_iq_mgmt iq_info = {0};
int err;
iq_info.force_en = 0;
iq_info.rq_depth = (u8)ilog2(dev->nic_io->rq_depth);
iq_info.num_rq = (u8)dev->nic_io->max_qps;
/* num_qps will not lager than 64 */
iq_info.glb_rq_id = dev->nic_io->global_qpn + q_id;
iq_info.q_id = q_id;
iq_info.lower_thd = lower_thd;
iq_info.prod_idx = prod_idx;
err = l2nic_msg_to_mgmt_async(hwdev, HINIC_PORT_CMD_SET_IQ_ENABLE,
&iq_info, sizeof(iq_info));
if (err || iq_info.status) {
nic_err(dev->dev_hdl, "Failed to set iq enable for rq:%d, err: %d, status: 0x%x\n",
q_id, err, iq_info.status);
return -EFAULT;
}
return 0;
}
/* nictool */ /* nictool */
int hinic_set_lro_aging_timer(void *hwdev, u8 timer_en, u32 period) int hinic_set_lro_aging_timer(void *hwdev, u8 timer_en, u32 period)
{ {
......
...@@ -499,17 +499,6 @@ int hinic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up); ...@@ -499,17 +499,6 @@ int hinic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up);
int hinic_dcb_set_rq_iq_mapping(void *hwdev, u32 num_rqs, u8 *map); int hinic_dcb_set_rq_iq_mapping(void *hwdev, u32 num_rqs, u8 *map);
int hinic_set_pfc_threshold(void *hwdev, u16 op_type, u16 threshold);
int hinic_set_bp_thd(void *hwdev, u16 threshold);
int hinic_disable_fw_bp(void *hwdev);
int hinic_set_iq_enable(void *hwdev, u16 q_id, u16 lower_thd, u16 prod_idx);
int hinic_set_iq_enable_mgmt(void *hwdev, u16 q_id, u16 lower_thd,
u16 prod_idx);
/* nictool adaptation interface*/ /* nictool adaptation interface*/
int hinic_set_lro_aging_timer(void *hwdev, u8 timer_en, u32 period); int hinic_set_lro_aging_timer(void *hwdev, u8 timer_en, u32 period);
/* There should be output parameters, add the /* There should be output parameters, add the
......
...@@ -41,7 +41,6 @@ enum hinic_flags { ...@@ -41,7 +41,6 @@ enum hinic_flags {
HINIC_LP_TEST, HINIC_LP_TEST,
HINIC_RSS_ENABLE, HINIC_RSS_ENABLE,
HINIC_DCB_ENABLE, HINIC_DCB_ENABLE,
HINIC_BP_ENABLE,
HINIC_SAME_RXTX, HINIC_SAME_RXTX,
HINIC_INTR_ADAPT, HINIC_INTR_ADAPT,
HINIC_UPDATE_MAC_FILTER, HINIC_UPDATE_MAC_FILTER,
...@@ -219,8 +218,6 @@ struct hinic_nic_dev { ...@@ -219,8 +218,6 @@ struct hinic_nic_dev {
/* lock for disable or enable traffic flow */ /* lock for disable or enable traffic flow */
struct semaphore dcb_sem; struct semaphore dcb_sem;
u16 bp_lower_thd;
u16 bp_upper_thd;
bool heart_status; bool heart_status;
struct hinic_intr_coal_info *intr_coalesce; struct hinic_intr_coal_info *intr_coalesce;
......
...@@ -384,7 +384,6 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, ...@@ -384,7 +384,6 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq,
rxq_stats->other_errors; rxq_stats->other_errors;
stats->csum_errors = rxq_stats->csum_errors; stats->csum_errors = rxq_stats->csum_errors;
stats->other_errors = rxq_stats->other_errors; stats->other_errors = rxq_stats->other_errors;
stats->unlock_bp = rxq_stats->unlock_bp;
stats->dropped = rxq_stats->dropped; stats->dropped = rxq_stats->dropped;
} while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
u64_stats_update_end(&stats->syncp); u64_stats_update_end(&stats->syncp);
...@@ -397,7 +396,6 @@ void hinic_rxq_clean_stats(struct hinic_rxq_stats *rxq_stats) ...@@ -397,7 +396,6 @@ void hinic_rxq_clean_stats(struct hinic_rxq_stats *rxq_stats)
rxq_stats->packets = 0; rxq_stats->packets = 0;
rxq_stats->errors = 0; rxq_stats->errors = 0;
rxq_stats->csum_errors = 0; rxq_stats->csum_errors = 0;
rxq_stats->unlock_bp = 0;
rxq_stats->other_errors = 0; rxq_stats->other_errors = 0;
rxq_stats->dropped = 0; rxq_stats->dropped = 0;
...@@ -477,34 +475,6 @@ static void hinic_rx_gro(struct hinic_rxq *rxq, u32 offload_type, ...@@ -477,34 +475,6 @@ static void hinic_rx_gro(struct hinic_rxq *rxq, u32 offload_type,
skb->csum_level = 1; skb->csum_level = 1;
} }
#define HINIC_RX_BP_THD 128
static void hinic_unlock_bp(struct hinic_rxq *rxq, bool bp_en, bool force_en)
{
struct net_device *netdev = rxq->netdev;
struct hinic_nic_dev *nic_dev = netdev_priv(netdev);
int free_wqebbs, err;
if (bp_en)
set_bit(HINIC_RX_STATUS_BP_EN, &rxq->status);
free_wqebbs = rxq->delta - 1;
if (test_bit(HINIC_RX_STATUS_BP_EN, &rxq->status) &&
(nic_dev->rq_depth - free_wqebbs) >= nic_dev->bp_upper_thd &&
(rxq->bp_cnt >= HINIC_RX_BP_THD || force_en)) {
err = hinic_set_iq_enable_mgmt(nic_dev->hwdev, rxq->q_id,
nic_dev->bp_lower_thd,
rxq->next_to_update);
if (!err) {
clear_bit(HINIC_RX_STATUS_BP_EN, &rxq->status);
rxq->bp_cnt = 0;
rxq->rxq_stats.unlock_bp++;
} else {
nicif_err(nic_dev, drv, netdev, "Failed to set iq enable\n");
}
}
}
static void hinic_copy_lp_data(struct hinic_nic_dev *nic_dev, static void hinic_copy_lp_data(struct hinic_nic_dev *nic_dev,
struct sk_buff *skb) struct sk_buff *skb)
{ {
...@@ -653,7 +623,6 @@ int hinic_rx_poll(struct hinic_rxq *rxq, int budget) ...@@ -653,7 +623,6 @@ int hinic_rx_poll(struct hinic_rxq *rxq, int budget)
u64 rx_bytes = 0; u64 rx_bytes = 0;
u16 sw_ci, num_lro; u16 sw_ci, num_lro;
int pkts = 0, nr_pkts = 0; int pkts = 0, nr_pkts = 0;
bool bp_en = false;
u16 num_wqe = 0; u16 num_wqe = 0;
while (likely(pkts < budget)) { while (likely(pkts < budget)) {
...@@ -695,10 +664,6 @@ int hinic_rx_poll(struct hinic_rxq *rxq, int budget) ...@@ -695,10 +664,6 @@ int hinic_rx_poll(struct hinic_rxq *rxq, int budget)
((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
} }
} }
if (unlikely(HINIC_GET_RX_BP_EN(status))) {
rxq->bp_cnt++;
bp_en = true;
}
rx_cqe->status = 0; rx_cqe->status = 0;
...@@ -709,9 +674,6 @@ int hinic_rx_poll(struct hinic_rxq *rxq, int budget) ...@@ -709,9 +674,6 @@ int hinic_rx_poll(struct hinic_rxq *rxq, int budget)
if (rxq->delta >= HINIC_RX_BUFFER_WRITE) if (rxq->delta >= HINIC_RX_BUFFER_WRITE)
hinic_rx_fill_buffers(rxq); hinic_rx_fill_buffers(rxq);
if (unlikely(bp_en || test_bit(HINIC_RX_STATUS_BP_EN, &rxq->status)))
hinic_unlock_bp(rxq, bp_en, pkts < budget);
u64_stats_update_begin(&rxq->rxq_stats.syncp); u64_stats_update_begin(&rxq->rxq_stats.syncp);
rxq->rxq_stats.packets += nr_pkts; rxq->rxq_stats.packets += nr_pkts;
rxq->rxq_stats.bytes += rx_bytes; rxq->rxq_stats.bytes += rx_bytes;
......
...@@ -29,23 +29,15 @@ ...@@ -29,23 +29,15 @@
#define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF #define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF
#define HINIC_RX_BP_LOWER_THD 200
#define HINIC_RX_BP_UPPER_THD 400
#define HINIC_SUPPORT_LRO_ADAP_QPS_MAX 16 #define HINIC_SUPPORT_LRO_ADAP_QPS_MAX 16
#define HINIC_RX_BUFFER_WRITE 16 #define HINIC_RX_BUFFER_WRITE 16
enum {
HINIC_RX_STATUS_BP_EN,
};
struct hinic_rxq_stats { struct hinic_rxq_stats {
u64 packets; u64 packets;
u64 bytes; u64 bytes;
u64 errors; u64 errors;
u64 csum_errors; u64 csum_errors;
u64 other_errors; u64 other_errors;
u64 unlock_bp;
u64 dropped; u64 dropped;
u64 alloc_skb_err; u64 alloc_skb_err;
...@@ -88,7 +80,6 @@ struct hinic_rxq { ...@@ -88,7 +80,6 @@ struct hinic_rxq {
u16 next_to_update; u16 next_to_update;
struct device *dev; /* device for DMA mapping */ struct device *dev; /* device for DMA mapping */
u32 bp_cnt;
unsigned long status; unsigned long status;
dma_addr_t cqe_start_paddr; dma_addr_t cqe_start_paddr;
void *cqe_start_vaddr; void *cqe_start_vaddr;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册