提交 1b2b01a7 编写于 作者: D David S. Miller

Merge branch 'net-hns3-some-code-optimizations-bugfixes'

Huazhong Tan says:

====================
net: hns3: some code optimizations & bugfixes

This patch-set includes code optimizations and bugfixes for
the HNS3 ethernet controller driver.

[patch 1/11] fixes a selftest issue when doing autoneg.

[patch 2/11 - 3-11] adds two code optimizations about VLAN issue.

[patch 4/11] restores the MAC autoneg state after reset.

[patch 5/11 - 8/11] adds some code optimizations and bugfixes about
HW errors handling.

[patch 9/11 - 11/11] fixes some issues related to driver loading and
unloading.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -213,7 +213,6 @@ struct hnae3_ae_dev {
const struct hnae3_ae_ops *ops;
struct list_head node;
u32 flag;
u8 override_pci_need_reset; /* fix to stop multiple reset happening */
unsigned long hw_err_reset_req;
enum hnae3_reset_type reset_type;
void *priv;
......@@ -264,6 +263,8 @@ struct hnae3_ae_dev {
* get auto autonegotiation of pause frame use
* restart_autoneg()
* restart autonegotiation
* halt_autoneg()
* halt/resume autonegotiation when autonegotiation on
* get_coalesce_usecs()
* get usecs to delay a TX interrupt after a packet is sent
* get_rx_max_coalesced_frames()
......@@ -383,6 +384,7 @@ struct hnae3_ae_ops {
int (*set_autoneg)(struct hnae3_handle *handle, bool enable);
int (*get_autoneg)(struct hnae3_handle *handle);
int (*restart_autoneg)(struct hnae3_handle *handle);
int (*halt_autoneg)(struct hnae3_handle *handle, bool halt);
void (*get_coalesce_usecs)(struct hnae3_handle *handle,
u32 *tx_usecs, u32 *rx_usecs);
......
......@@ -1950,7 +1950,7 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
ops = ae_dev->ops;
/* request the reset */
if (ops->reset_event) {
if (!ae_dev->override_pci_need_reset) {
if (ae_dev->hw_err_reset_req) {
reset_type = ops->get_reset_level(ae_dev,
&ae_dev->hw_err_reset_req);
ops->set_default_reset_request(ae_dev, reset_type);
......
......@@ -336,6 +336,13 @@ static void hns3_self_test(struct net_device *ndev,
h->ae_algo->ops->enable_vlan_filter(h, false);
#endif
/* Tell firmware to stop mac autoneg before loopback test start,
* otherwise loopback test may be failed when the port is still
* negotiating.
*/
if (h->ae_algo->ops->halt_autoneg)
h->ae_algo->ops->halt_autoneg(h, true);
set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
......@@ -358,6 +365,9 @@ static void hns3_self_test(struct net_device *ndev,
clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
if (h->ae_algo->ops->halt_autoneg)
h->ae_algo->ops->halt_autoneg(h, false);
#if IS_ENABLED(CONFIG_VLAN_8021Q)
if (dis_vlan_filter)
h->ae_algo->ops->enable_vlan_filter(h, true);
......
......@@ -1060,6 +1060,52 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
return ret;
}
/* hclge_query_bd_num: query number of buffer descriptors
* @hdev: pointer to struct hclge_dev
* @is_ras: true for ras, false for msix
* @mpf_bd_num: number of main PF interrupt buffer descriptors
* @pf_bd_num: number of not main PF interrupt buffer descriptors
*
* This function querys number of mpf and pf buffer descriptors.
*/
static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras,
int *mpf_bd_num, int *pf_bd_num)
{
struct device *dev = &hdev->pdev->dev;
u32 mpf_min_bd_num, pf_min_bd_num;
enum hclge_opcode_type opcode;
struct hclge_desc desc_bd;
int ret;
if (is_ras) {
opcode = HCLGE_QUERY_RAS_INT_STS_BD_NUM;
mpf_min_bd_num = HCLGE_MPF_RAS_INT_MIN_BD_NUM;
pf_min_bd_num = HCLGE_PF_RAS_INT_MIN_BD_NUM;
} else {
opcode = HCLGE_QUERY_MSIX_INT_STS_BD_NUM;
mpf_min_bd_num = HCLGE_MPF_MSIX_INT_MIN_BD_NUM;
pf_min_bd_num = HCLGE_PF_MSIX_INT_MIN_BD_NUM;
}
hclge_cmd_setup_basic_desc(&desc_bd, opcode, true);
ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
if (ret) {
dev_err(dev, "fail(%d) to query msix int status bd num\n",
ret);
return ret;
}
*mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
*pf_bd_num = le32_to_cpu(desc_bd.data[1]);
if (*mpf_bd_num < mpf_min_bd_num || *pf_bd_num < pf_min_bd_num) {
dev_err(dev, "Invalid bd num: mpf(%d), pf(%d)\n",
*mpf_bd_num, *pf_bd_num);
return -EINVAL;
}
return 0;
}
/* hclge_handle_mpf_ras_error: handle all main PF RAS errors
* @hdev: pointer to struct hclge_dev
* @desc: descriptor for describing the command
......@@ -1291,24 +1337,16 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
{
struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
struct hclge_desc desc_bd;
struct hclge_desc *desc;
int ret;
/* query the number of registers in the RAS int status */
hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_RAS_INT_STS_BD_NUM,
true);
ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
if (ret) {
dev_err(dev, "fail(%d) to query ras int status bd num\n", ret);
ret = hclge_query_bd_num(hdev, true, &mpf_bd_num, &pf_bd_num);
if (ret)
return ret;
}
mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
pf_bd_num = le32_to_cpu(desc_bd.data[1]);
bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
......@@ -1606,6 +1644,8 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
if (status & HCLGE_RAS_REG_NFE_MASK ||
status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
ae_dev->hw_err_reset_req = 0;
else
goto out;
/* Handling Non-fatal HNS RAS errors */
if (status & HCLGE_RAS_REG_NFE_MASK) {
......@@ -1613,27 +1653,22 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
"HNS Non-Fatal RAS error(status=0x%x) identified\n",
status);
hclge_handle_all_ras_errors(hdev);
} else {
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
hdev->pdev->revision < 0x21) {
ae_dev->override_pci_need_reset = 1;
return PCI_ERS_RESULT_RECOVERED;
}
}
if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
dev_warn(dev, "ROCEE uncorrected RAS error identified\n");
/* Handling Non-fatal Rocee RAS errors */
if (hdev->pdev->revision >= 0x21 &&
status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
dev_warn(dev, "ROCEE Non-Fatal RAS error identified\n");
hclge_handle_rocee_ras_error(ae_dev);
}
if ((status & HCLGE_RAS_REG_NFE_MASK ||
status & HCLGE_RAS_REG_ROCEE_ERR_MASK) &&
ae_dev->hw_err_reset_req) {
ae_dev->override_pci_need_reset = 0;
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
goto out;
if (ae_dev->hw_err_reset_req)
return PCI_ERS_RESULT_NEED_RESET;
}
ae_dev->override_pci_need_reset = 1;
out:
return PCI_ERS_RESULT_RECOVERED;
}
......@@ -1847,28 +1882,21 @@ static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
struct hclge_mac_tnl_stats mac_tnl_stats;
struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
struct hclge_desc desc_bd;
struct hclge_desc *desc;
u32 status;
int ret;
/* query the number of bds for the MSIx int status */
hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
true);
ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
if (ret) {
dev_err(dev, "fail(%d) to query msix int status bd num\n",
ret);
return ret;
}
ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num);
if (ret)
goto out;
mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
pf_bd_num = le32_to_cpu(desc_bd.data[1]);
bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc)
if (!desc) {
ret = -ENOMEM;
goto out;
}
ret = hclge_handle_mpf_msix_error(hdev, desc, mpf_bd_num,
reset_requests);
......@@ -1931,7 +1959,6 @@ void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
struct hclge_desc desc_bd;
struct hclge_desc *desc;
u32 status;
int ret;
......@@ -1940,19 +1967,11 @@ void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
/* query the number of bds for the MSIx int status */
hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
true);
ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
if (ret) {
dev_err(dev, "fail(%d) to query msix int status bd num\n",
ret);
ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num);
if (ret)
return;
}
mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
pf_bd_num = le32_to_cpu(desc_bd.data[1]);
bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc)
return;
......
......@@ -6,6 +6,11 @@
#include "hclge_main.h"
#define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10
#define HCLGE_PF_RAS_INT_MIN_BD_NUM 4
#define HCLGE_MPF_MSIX_INT_MIN_BD_NUM 10
#define HCLGE_PF_MSIX_INT_MIN_BD_NUM 4
#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00
#define HCLGE_RAS_REG_NFE_MASK 0xFF00
#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000
......
......@@ -35,6 +35,7 @@
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
......@@ -2315,6 +2316,17 @@ static int hclge_restart_autoneg(struct hnae3_handle *handle)
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
return hclge_set_autoneg_en(hdev, !halt);
return 0;
}
static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
{
struct hclge_config_fec_cmd *req;
......@@ -2388,6 +2400,15 @@ static int hclge_mac_init(struct hclge_dev *hdev)
return ret;
}
if (hdev->hw.mac.support_autoneg) {
ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
if (ret) {
dev_err(&hdev->pdev->dev,
"Config mac autoneg fail ret=%d\n", ret);
return ret;
}
}
mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
......@@ -3528,6 +3549,7 @@ static void hclge_service_task(struct work_struct *work)
hclge_update_port_info(hdev);
hclge_update_link_status(hdev);
hclge_update_vport_alive(hdev);
hclge_sync_vlan_filter(hdev);
if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
hclge_rfs_filter_expire(hdev);
hdev->fd_arfs_expire_timer = 0;
......@@ -7101,12 +7123,13 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
if (!req0->resp_code)
return 0;
if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
dev_warn(&hdev->pdev->dev,
"vlan %d filter is not in vf vlan table\n",
vlan);
/* vf vlan filter is disabled when vf vlan table is full,
* then new vlan id will not be added into vf vlan table.
* Just return 0 without warning, avoid massive verbose
* print logs when unload.
*/
if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
return 0;
}
dev_err(&hdev->pdev->dev,
"Kill vf vlan filter fail, ret =%d.\n",
......@@ -7730,11 +7753,20 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
bool writen_to_tbl = false;
int ret = 0;
/* when port based VLAN enabled, we use port based VLAN as the VLAN
* filter entry. In this case, we don't update VLAN filter table
* when user add new VLAN or remove exist VLAN, just update the vport
* VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
* table until port based VLAN disabled
/* When device is resetting, firmware is unable to handle
* mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
set_bit(vlan_id, vport->vlan_del_fail_bmap);
return -EBUSY;
}
/* When port base vlan enabled, we use port base vlan as the vlan
* filter entry. In this case, we don't update vlan filter table
* when user add new vlan or remove exist vlan, just update the vport
* vlan list. The vlan id in vlan list will be writen in vlan filter
* table until port base vlan disabled
*/
if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
......@@ -7742,16 +7774,53 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
writen_to_tbl = true;
}
if (ret)
return ret;
if (!ret) {
if (is_kill)
hclge_rm_vport_vlan_table(vport, vlan_id, false);
else
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
} else if (is_kill) {
/* When remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack
*/
set_bit(vlan_id, vport->vlan_del_fail_bmap);
}
return ret;
}
if (is_kill)
hclge_rm_vport_vlan_table(vport, vlan_id, false);
else
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
{
#define HCLGE_MAX_SYNC_COUNT 60
return 0;
int i, ret, sync_cnt = 0;
u16 vlan_id;
/* start from vport 1 for PF is always alive */
for (i = 0; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i];
vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
VLAN_N_VID);
while (vlan_id != VLAN_N_VID) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id, vlan_id,
0, true);
if (ret && ret != -EINVAL)
return;
clear_bit(vlan_id, vport->vlan_del_fail_bmap);
hclge_rm_vport_vlan_table(vport, vlan_id, false);
sync_cnt++;
if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
return;
vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
VLAN_N_VID);
}
}
}
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
......@@ -8213,25 +8282,44 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hnae3_client *client = vport->nic.client;
struct hclge_dev *hdev = ae_dev->priv;
int rst_cnt;
int ret;
rst_cnt = hdev->rst_stats.reset_cnt;
ret = client->ops->init_instance(&vport->nic);
if (ret)
return ret;
set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
hnae3_set_client_init_flag(client, ae_dev, 1);
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
rst_cnt != hdev->rst_stats.reset_cnt) {
ret = -EBUSY;
goto init_nic_err;
}
/* Enable nic hw error interrupts */
ret = hclge_config_nic_hw_error(hdev, true);
if (ret)
if (ret) {
dev_err(&ae_dev->pdev->dev,
"fail(%d) to enable hw error interrupts\n", ret);
goto init_nic_err;
}
hnae3_set_client_init_flag(client, ae_dev, 1);
if (netif_msg_drv(&hdev->vport->nic))
hclge_info_show(hdev);
return ret;
init_nic_err:
clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
msleep(HCLGE_WAIT_RESET_DONE);
client->ops->uninit_instance(&vport->nic, 0);
return ret;
}
static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
......@@ -8239,6 +8327,7 @@ static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hnae3_client *client = vport->roce.client;
struct hclge_dev *hdev = ae_dev->priv;
int rst_cnt;
int ret;
if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
......@@ -8250,14 +8339,38 @@ static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
if (ret)
return ret;
rst_cnt = hdev->rst_stats.reset_cnt;
ret = client->ops->init_instance(&vport->roce);
if (ret)
return ret;
set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
rst_cnt != hdev->rst_stats.reset_cnt) {
ret = -EBUSY;
goto init_roce_err;
}
/* Enable roce ras interrupts */
ret = hclge_config_rocee_ras_interrupt(hdev, true);
if (ret) {
dev_err(&ae_dev->pdev->dev,
"fail(%d) to enable roce ras interrupts\n", ret);
goto init_roce_err;
}
hnae3_set_client_init_flag(client, ae_dev, 1);
return 0;
init_roce_err:
clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
msleep(HCLGE_WAIT_RESET_DONE);
hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
return ret;
}
static int hclge_init_client_instance(struct hnae3_client *client,
......@@ -8300,12 +8413,6 @@ static int hclge_init_client_instance(struct hnae3_client *client,
}
}
/* Enable roce ras interrupts */
ret = hclge_config_rocee_ras_interrupt(hdev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
"fail(%d) to enable roce ras interrupts\n", ret);
return ret;
clear_nic:
......@@ -8329,6 +8436,9 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
vport = &hdev->vport[i];
if (hdev->roce_client) {
clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
msleep(HCLGE_WAIT_RESET_DONE);
hdev->roce_client->ops->uninit_instance(&vport->roce,
0);
hdev->roce_client = NULL;
......@@ -8338,6 +8448,9 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
return;
if (hdev->nic_client && client->ops->uninit_instance) {
clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
msleep(HCLGE_WAIT_RESET_DONE);
client->ops->uninit_instance(&vport->nic, 0);
hdev->nic_client = NULL;
vport->nic.client = NULL;
......@@ -9265,6 +9378,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
.restart_autoneg = hclge_restart_autoneg,
.halt_autoneg = hclge_halt_autoneg,
.get_pauseparam = hclge_get_pauseparam,
.set_pauseparam = hclge_set_pauseparam,
.set_mtu = hclge_set_mtu,
......
......@@ -700,6 +700,7 @@ struct hclge_mac_tnl_stats {
};
#define HCLGE_RESET_INTERVAL (10 * HZ)
#define HCLGE_WAIT_RESET_DONE 100
#pragma pack(1)
struct hclge_vf_vlan_cfg {
......@@ -929,6 +930,7 @@ struct hclge_vport {
u32 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
struct hclge_port_base_vlan_config port_base_vlan_cfg;
struct hclge_tx_vtag_cfg txvlan_cfg;
struct hclge_rx_vtag_cfg rxvlan_cfg;
......
......@@ -1244,6 +1244,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
#define HCLGEVF_VLAN_MBX_MSG_LEN 5
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
int ret;
if (vlan_id > HCLGEVF_MAX_VLAN_ID)
return -EINVAL;
......@@ -1251,12 +1252,53 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
/* When device is resetting, firmware is unable to handle
* mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) {
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
return -EBUSY;
}
msg_data[0] = is_kill;
memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
memcpy(&msg_data[3], &proto, sizeof(proto));
return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_VLAN_FILTER, msg_data,
HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_VLAN_FILTER, msg_data,
HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
/* When remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack.
*/
if (is_kill && ret)
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
return ret;
}
static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
{
#define HCLGEVF_MAX_SYNC_COUNT 60
struct hnae3_handle *handle = &hdev->nic;
int ret, sync_cnt = 0;
u16 vlan_id;
vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
while (vlan_id != VLAN_N_VID) {
ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
vlan_id, true);
if (ret)
return;
clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
sync_cnt++;
if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
return;
vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
}
}
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
......@@ -1797,6 +1839,8 @@ static void hclgevf_service_task(struct work_struct *work)
hclgevf_update_link_mode(hdev);
hclgevf_sync_vlan_filter(hdev);
hclgevf_deferred_task_schedule(hdev);
clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
......
......@@ -4,6 +4,7 @@
#ifndef __HCLGEVF_MAIN_H
#define __HCLGEVF_MAIN_H
#include <linux/fs.h>
#include <linux/if_vlan.h>
#include <linux/types.h>
#include "hclge_mbx.h"
#include "hclgevf_cmd.h"
......@@ -270,6 +271,8 @@ struct hclgevf_dev {
u16 *vector_status;
int *vector_irq;
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
bool mbx_event_pending;
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册