提交 70e79832 编写于 作者: D David S. Miller

Merge branch 'hns3-add-code-optimization-for-VF-reset-and-some-new-reset-feature'

Huazhong Tan says:

====================
hns3: add code optimization for VF reset and some new reset feature

Currently hardware supports below reset:
1. VF reset: triggered by sending cmd to IMP(Integrated Management
   Processor). Only reset specific VF function and do not affect
   other PF or VF.
2. PF reset: triggered by sending cmd to IMP. Only reset specific PF
   and it's VF.
3. PF FLR: triggered by PCIe subsystem. Only reset specific PF and
   it's VF.
4. VF FLR: triggered by PCIe subsystem. Only reset specific VF function
   and do not affect other PF or VF.
5. Core reset: triggered by writing to register. Reset most hardware
   unit, such as SSU, which affects all the PF and VF.
6. Global reset: triggered by writing to register. Reset all hardware
   unit, which affects all the PF and VF.
7. IMP reset: triggered by IMU(Intelligent Management Unit) when
   IMP is not longer feeding IMU's watchdog. IMU will reload the IMP
   firmware and IMP will perform global reset after firmware reloading,
   which affects all the PF and VF.

Current driver only support PF/VF reset, incomplete core and global
reset(lacking the vf reset handling). So this patchset adds complete
reset support in hns3 driver.

Also, this patchset contains some optimization related to reset.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -85,6 +85,12 @@ struct hclge_mbx_pf_to_vf_cmd {
u16 msg[8];
};
struct hclge_vf_rst_cmd {
u8 dest_vfid;
u8 vf_rst;
u8 rsv[22];
};
/* used by VF to store the received Async responses from PF */
struct hclgevf_mbx_arq_ring {
#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8
......
......@@ -124,7 +124,10 @@ enum hnae3_reset_notify_type {
enum hnae3_reset_type {
HNAE3_VF_RESET,
HNAE3_VF_FUNC_RESET,
HNAE3_VF_PF_FUNC_RESET,
HNAE3_VF_FULL_RESET,
HNAE3_FLR_RESET,
HNAE3_FUNC_RESET,
HNAE3_CORE_RESET,
HNAE3_GLOBAL_RESET,
......@@ -132,6 +135,11 @@ enum hnae3_reset_type {
HNAE3_NONE_RESET,
};
enum hnae3_flr_state {
HNAE3_FLR_DOWN,
HNAE3_FLR_DONE,
};
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
......@@ -297,7 +305,8 @@ struct hnae3_ae_dev {
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev);
void (*flr_prepare)(struct hnae3_ae_dev *ae_dev);
void (*flr_done)(struct hnae3_ae_dev *ae_dev);
int (*init_client_instance)(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev);
void (*uninit_client_instance)(struct hnae3_client *client,
......
......@@ -415,9 +415,6 @@ static void hns3_nic_net_down(struct net_device *netdev)
const struct hnae3_ae_ops *ops;
int i;
if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
/* disable vectors */
for (i = 0; i < priv->vector_num; i++)
hns3_vector_disable(&priv->tqp_vector[i]);
......@@ -439,6 +436,11 @@ static void hns3_nic_net_down(struct net_device *netdev)
static int hns3_nic_net_stop(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return 0;
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
......@@ -1849,9 +1851,29 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
static void hns3_reset_prepare(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "hns3 flr prepare\n");
if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
ae_dev->ops->flr_prepare(ae_dev);
}
static void hns3_reset_done(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "hns3 flr done\n");
if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
ae_dev->ops->flr_done(ae_dev);
}
static const struct pci_error_handlers hns3_err_handler = {
.error_detected = hns3_error_detected,
.slot_reset = hns3_slot_reset,
.reset_prepare = hns3_reset_prepare,
.reset_done = hns3_reset_done,
};
static struct pci_driver hns3_driver = {
......@@ -2699,6 +2721,7 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
{
struct hns3_nic_priv *priv = netdev_priv(napi->dev);
struct hns3_enet_ring *ring;
int rx_pkt_total = 0;
......@@ -2707,6 +2730,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
bool clean_complete = true;
int rx_budget;
if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
napi_complete(napi);
return 0;
}
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
......@@ -2731,9 +2759,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
napi_complete(napi);
hns3_update_new_int_gl(tqp_vector);
hns3_mask_vector_irq(tqp_vector, 1);
if (likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) &&
napi_complete(napi)) {
hns3_update_new_int_gl(tqp_vector);
hns3_mask_vector_irq(tqp_vector, 1);
}
return rx_pkt_total;
}
......@@ -3818,20 +3848,30 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
ret = hns3_nic_alloc_vector_data(priv);
if (ret)
return ret;
hns3_restore_coal(priv);
ret = hns3_nic_init_vector_data(priv);
if (ret)
return ret;
goto err_dealloc_vector;
ret = hns3_init_all_ring(priv);
if (ret) {
hns3_nic_uninit_vector_data(priv);
priv->ring_data = NULL;
}
if (ret)
goto err_uninit_vector;
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
err_uninit_vector:
hns3_nic_uninit_vector_data(priv);
priv->ring_data = NULL;
err_dealloc_vector:
hns3_nic_dealloc_vector_data(priv);
return ret;
}
......@@ -3856,6 +3896,10 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_store_coal(priv);
ret = hns3_nic_dealloc_vector_data(priv);
if (ret)
netdev_err(netdev, "dealloc vector error\n");
ret = hns3_uninit_all_ring(priv);
if (ret)
netdev_err(netdev, "uninit ring error\n");
......
......@@ -593,7 +593,11 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
{
return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET));
return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET ||
ae_dev->reset_type == HNAE3_FLR_RESET ||
ae_dev->reset_type == HNAE3_VF_FUNC_RESET ||
ae_dev->reset_type == HNAE3_VF_FULL_RESET ||
ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET));
}
#define hns3_read_dev(a, reg) \
......
......@@ -350,11 +350,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
hdev->hw.cmq.crq.next_to_use = 0;
hclge_cmd_init_regs(&hdev->hw);
clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
/* Check if there is new reset pending, because the higher level
* reset may happen when lower level reset is being processed.
*/
if ((hclge_is_reset_pending(hdev))) {
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
return -EBUSY;
}
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
......
......@@ -2144,6 +2144,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*/
/* check for vector0 reset event sources */
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
}
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
......@@ -2160,13 +2168,6 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
return HCLGE_VECTOR0_EVENT_RST;
}
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
}
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
......@@ -2352,11 +2353,15 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
static int hclge_reset_wait(struct hclge_dev *hdev)
{
#define HCLGE_RESET_WATI_MS 100
#define HCLGE_RESET_WAIT_CNT 5
#define HCLGE_RESET_WAIT_CNT 200
u32 val, reg, reg_bit;
u32 cnt = 0;
switch (hdev->reset_type) {
case HNAE3_IMP_RESET:
reg = HCLGE_GLOBAL_RESET_REG;
reg_bit = HCLGE_IMP_RESET_BIT;
break;
case HNAE3_GLOBAL_RESET:
reg = HCLGE_GLOBAL_RESET_REG;
reg_bit = HCLGE_GLOBAL_RESET_BIT;
......@@ -2369,6 +2374,8 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
reg = HCLGE_FUN_RST_ING;
reg_bit = HCLGE_FUN_RST_ING_B;
break;
case HNAE3_FLR_RESET:
break;
default:
dev_err(&hdev->pdev->dev,
"Wait for unsupported reset type: %d\n",
......@@ -2376,6 +2383,20 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return -EINVAL;
}
if (hdev->reset_type == HNAE3_FLR_RESET) {
while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
cnt++ < HCLGE_RESET_WAIT_CNT)
msleep(HCLGE_RESET_WATI_MS);
if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
dev_err(&hdev->pdev->dev,
"flr wait timeout: %d\n", cnt);
return -EBUSY;
}
return 0;
}
val = hclge_read_dev(&hdev->hw, reg);
while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS);
......@@ -2392,6 +2413,55 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return 0;
}
static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
{
struct hclge_vf_rst_cmd *req;
struct hclge_desc desc;
req = (struct hclge_vf_rst_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
req->dest_vfid = func_id;
if (reset)
req->vf_rst = 0x1;
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
{
int i;
for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i];
int ret;
/* Send cmd to set/clear VF's FUNC_RST_ING */
ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
if (ret) {
dev_err(&hdev->pdev->dev,
"set vf(%d) rst failded %d!\n",
vport->vport_id, ret);
return ret;
}
if (!reset)
continue;
/* Inform VF to process the reset.
* hclge_inform_reset_assert_to_vf may fail if VF
* driver is not loaded.
*/
ret = hclge_inform_reset_assert_to_vf(vport);
if (ret)
dev_warn(&hdev->pdev->dev,
"inform reset to vf(%d) failded %d!\n",
vport->vport_id, ret);
}
return 0;
}
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
{
struct hclge_desc desc;
......@@ -2434,6 +2504,12 @@ static void hclge_do_reset(struct hclge_dev *hdev)
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
case HNAE3_FLR_RESET:
dev_info(&pdev->dev, "FLR requested\n");
/* schedule again to check later */
set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&pdev->dev,
"Unsupported reset type: %d\n", hdev->reset_type);
......@@ -2465,6 +2541,9 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
rst_level = HNAE3_FUNC_RESET;
clear_bit(HNAE3_FUNC_RESET, addr);
} else if (test_bit(HNAE3_FLR_RESET, addr)) {
rst_level = HNAE3_FLR_RESET;
clear_bit(HNAE3_FLR_RESET, addr);
}
return rst_level;
......@@ -2495,12 +2574,34 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
hclge_enable_vector(&hdev->misc_vector, true);
}
static int hclge_reset_prepare_down(struct hclge_dev *hdev)
{
int ret = 0;
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
/* fall through */
case HNAE3_FLR_RESET:
ret = hclge_set_all_vf_rst(hdev, true);
break;
default:
break;
}
return ret;
}
static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
{
u32 reg_val;
int ret = 0;
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
/* There is no mechanism for PF to know if VF has stopped IO
* for now, just wait 100 ms for VF to stop IO
*/
msleep(100);
ret = hclge_func_reset_cmd(hdev, 0);
if (ret) {
dev_err(&hdev->pdev->dev,
......@@ -2515,6 +2616,19 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
*/
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
break;
case HNAE3_FLR_RESET:
/* There is no mechanism for PF to know if VF has stopped IO
* for now, just wait 100 ms for VF to stop IO
*/
msleep(100);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
break;
case HNAE3_IMP_RESET:
reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
break;
default:
break;
}
......@@ -2562,6 +2676,23 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
return false;
}
static int hclge_reset_prepare_up(struct hclge_dev *hdev)
{
int ret = 0;
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
/* fall through */
case HNAE3_FLR_RESET:
ret = hclge_set_all_vf_rst(hdev, false);
break;
default:
break;
}
return ret;
}
static void hclge_reset(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
......@@ -2579,6 +2710,10 @@ static void hclge_reset(struct hclge_dev *hdev)
if (ret)
goto err_reset;
ret = hclge_reset_prepare_down(hdev);
if (ret)
goto err_reset;
rtnl_lock();
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
......@@ -2614,6 +2749,10 @@ static void hclge_reset(struct hclge_dev *hdev)
hclge_clear_reset_cause(hdev);
ret = hclge_reset_prepare_up(hdev);
if (ret)
goto err_reset_lock;
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
if (ret)
goto err_reset_lock;
......@@ -6815,6 +6954,34 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
cancel_work_sync(&hdev->mbx_service_task);
}
static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
{
#define HCLGE_FLR_WAIT_MS 100
#define HCLGE_FLR_WAIT_CNT 50
struct hclge_dev *hdev = ae_dev->priv;
int cnt = 0;
clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
hclge_reset_event(hdev->pdev, NULL);
while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
cnt++ < HCLGE_FLR_WAIT_CNT)
msleep(HCLGE_FLR_WAIT_MS);
if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
dev_err(&hdev->pdev->dev,
"flr wait down timeout: %d\n", cnt);
}
static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
}
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
......@@ -7473,6 +7640,8 @@ static void hclge_get_link_mode(struct hnae3_handle *handle,
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
.flr_prepare = hclge_flr_prepare,
.flr_done = hclge_flr_done,
.init_client_instance = hclge_init_client_instance,
.uninit_client_instance = hclge_uninit_client_instance,
.map_ring_to_vector = hclge_map_ring_to_vector,
......
......@@ -97,6 +97,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
/* Reset related Registers */
#define HCLGE_PF_OTHER_INT_REG 0x20600
#define HCLGE_MISC_RESET_STS_REG 0x20700
#define HCLGE_MISC_VECTOR_INT_STS 0x20800
#define HCLGE_GLOBAL_RESET_REG 0x20A00
......@@ -116,6 +117,8 @@ enum HLCGE_PORT_TYPE {
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
#define HCLGE_VECTOR0_IMP_RESET_INT_B 1
#define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
#define HCLGE_MAC_MIN_FRAME 64
......@@ -594,6 +597,7 @@ struct hclge_dev {
struct hclge_misc_vector misc_vector;
struct hclge_hw_stats hw_stats;
unsigned long state;
unsigned long flr_state;
unsigned long last_reset_time;
enum hnae3_reset_type reset_type;
......@@ -775,6 +779,12 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
return tqp->index;
}
static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
{
return !!hdev->reset_pending;
}
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
......@@ -784,6 +794,7 @@ int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
......
......@@ -79,15 +79,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
return status;
}
static int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
enum hnae3_reset_type reset_type;
u8 msg_data[2];
u8 dest_vfid;
dest_vfid = (u8)vport->vport_id;
if (hdev->reset_type == HNAE3_FUNC_RESET)
reset_type = HNAE3_VF_PF_FUNC_RESET;
else if (hdev->reset_type == HNAE3_FLR_RESET)
reset_type = HNAE3_VF_FULL_RESET;
else
return -EINVAL;
memcpy(&msg_data[0], &reset_type, sizeof(u16));
/* send this requested info to VF */
return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
HCLGE_MBX_ASSERTING_RESET, dest_vfid);
}
......@@ -363,24 +374,10 @@ static void hclge_reset_vf(struct hclge_vport *vport,
int ret;
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
mbx_req->mbx_src_vfid);
/* Acknowledge VF that PF is now about to assert the reset for the VF.
* On receiving this message VF will get into pending state and will
* start polling for the hardware reset completion status.
*/
ret = hclge_inform_reset_assert_to_vf(vport);
if (ret) {
dev_err(&hdev->pdev->dev,
"PF fail(%d) to inform VF(%d)of reset, reset failed!\n",
ret, vport->vport_id);
return;
}
vport->vport_id);
dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n",
mbx_req->mbx_src_vfid);
/* reset this virtual function */
hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid);
ret = hclge_func_reset_cmd(hdev, vport->vport_id);
hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
}
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
......
......@@ -189,7 +189,8 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock);
if (num > hclgevf_ring_space(&hw->cmq.csq)) {
if (num > hclgevf_ring_space(&hw->cmq.csq) ||
test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
......@@ -338,6 +339,16 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
/* Check if there is new reset pending, because the higher level
* reset may happen when lower level reset is being processed.
*/
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
return -EBUSY;
}
/* get firmware version */
ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
......
......@@ -31,11 +31,19 @@
#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
/* RST register bits for RESET event */
#define HCLGEVF_VECTOR0_RST_INT_B 2
#define HCLGEVF_TQP_RESET_TRY_TIMES 10
/* Reset related Registers */
#define HCLGEVF_FUN_RST_ING 0x20C00
#define HCLGEVF_FUN_RST_ING_B 0
#define HCLGEVF_RST_ING 0x20C00
#define HCLGEVF_FUN_RST_ING_BIT BIT(0)
#define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5)
#define HCLGEVF_CORE_RST_ING_BIT BIT(6)
#define HCLGEVF_IMP_RST_ING_BIT BIT(7)
#define HCLGEVF_RST_ING_BITS \
(HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
#define HCLGEVF_RSS_IND_TBL_SIZE 512
#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
......@@ -54,17 +62,25 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
HCLGEVF_VECTOR0_EVENT_MBX,
HCLGEVF_VECTOR0_EVENT_OTHER,
};
/* states of hclgevf device & tasks */
enum hclgevf_states {
/* device states */
HCLGEVF_STATE_DOWN,
HCLGEVF_STATE_DISABLED,
HCLGEVF_STATE_IRQ_INITED,
/* task states */
HCLGEVF_STATE_SERVICE_SCHED,
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,
HCLGEVF_STATE_MBX_SERVICE_SCHED,
HCLGEVF_STATE_MBX_HANDLING,
HCLGEVF_STATE_CMD_DISABLE,
};
#define HCLGEVF_MPF_ENBALE 1
......@@ -145,9 +161,12 @@ struct hclgevf_dev {
struct hclgevf_misc_vector misc_vector;
struct hclgevf_rss_cfg rss_cfg;
unsigned long state;
unsigned long flr_state;
unsigned long default_reset_request;
unsigned long last_reset_time;
enum hnae3_reset_type reset_level;
unsigned long reset_pending;
enum hnae3_reset_type reset_type;
#define HCLGEVF_RESET_REQUESTED 0
#define HCLGEVF_RESET_PENDING 1
......@@ -196,18 +215,9 @@ struct hclgevf_dev {
u32 flag;
};
static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev)
{
return (hdev &&
(test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
(hdev->reset_level == HNAE3_VF_RESET));
}
static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev)
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
{
return (hdev &&
(test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
(hdev->reset_level == HNAE3_VF_FULL_RESET));
return !!hdev->reset_pending;
}
int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
......
......@@ -40,6 +40,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
}
while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
return -EIO;
udelay(HCLGEVF_SLEEP_USCOEND);
i++;
}
......@@ -148,6 +151,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
crq = &hdev->hw.cmq.crq;
while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
dev_info(&hdev->pdev->dev, "vf crq need init\n");
return;
}
desc = &crq->desc[crq->next_to_use];
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
......@@ -233,6 +241,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
enum hnae3_reset_type reset_type;
u16 link_status;
u16 *msg_q;
u8 duplex;
......@@ -248,6 +257,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
/* process all the async queue messages */
while (tail != hdev->arq.head) {
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
dev_info(&hdev->pdev->dev,
"vf crq need init in async\n");
return;
}
msg_q = hdev->arq.msg_q[hdev->arq.head];
switch (msg_q[0]) {
......@@ -267,7 +282,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
* has been completely reset. After this stack should
* eventually be re-initialized.
*/
hdev->reset_level = HNAE3_VF_RESET;
reset_type = le16_to_cpu(msg_q[1]);
set_bit(reset_type, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册