提交 0a14fd29 编写于 作者: D David S. Miller

Merge branch 'hns3-next'

Huazhong Tan says:

====================
net: hns3: updates for -next

This series adds some updates for the HNS3 ethernet driver.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -91,6 +91,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_STASH_B,
HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B,
HNAE3_DEV_SUPPORT_PAUSE_B,
HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
};
#define hnae3_dev_fd_supported(hdev) \
......@@ -141,6 +142,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (ae_dev)->caps)
#define hnae3_ae_dev_rxd_adv_layout_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, (ae_dev)->caps)
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
#define ring_ptr_move_bw(ring, p) \
......@@ -246,6 +250,24 @@ enum hnae3_port_base_vlan_state {
HNAE3_PORT_BASE_VLAN_NOCHANGE,
};
enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_TM_NODES,
HNAE3_DBG_CMD_TM_PRI,
HNAE3_DBG_CMD_TM_QSET,
HNAE3_DBG_CMD_DEV_INFO,
HNAE3_DBG_CMD_TX_BD,
HNAE3_DBG_CMD_RX_BD,
HNAE3_DBG_CMD_MAC_UC,
HNAE3_DBG_CMD_MAC_MC,
HNAE3_DBG_CMD_MNG_TBL,
HNAE3_DBG_CMD_LOOPBACK,
HNAE3_DBG_CMD_INTERRUPT_INFO,
HNAE3_DBG_CMD_RESET_INFO,
HNAE3_DBG_CMD_IMP_INFO,
HNAE3_DBG_CMD_NCL_CONFIG,
HNAE3_DBG_CMD_UNKNOWN,
};
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
......@@ -623,7 +645,7 @@ struct hnae3_ae_ops {
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys);
int (*dbg_run_cmd)(struct hnae3_handle *handle, const char *cmd_buf);
int (*dbg_read_cmd)(struct hnae3_handle *handle, const char *cmd_buf,
int (*dbg_read_cmd)(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
char *buf, int len);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
......@@ -786,10 +808,6 @@ struct hnae3_handle {
#define hnae3_get_bit(origin, shift) \
hnae3_get_field(origin, 0x1 << (shift), shift)
#define HNAE3_DBG_TM_NODES "tm_nodes"
#define HNAE3_DBG_TM_PRI "tm_priority"
#define HNAE3_DBG_TM_QSET "tm_qset"
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
......
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2021 Hisilicon Limited. */
#ifndef __HNS3_DEBUGFS_H
#define __HNS3_DEBUGFS_H
#define HNS3_DBG_READ_LEN 65536
#define HNS3_DBG_READ_LEN_128KB 0x20000
#define HNS3_DBG_READ_LEN_4MB 0x400000
#define HNS3_DBG_WRITE_LEN 1024
#define HNS3_DBG_DATA_STR_LEN 32
#define HNS3_DBG_INFO_LEN 256
#define HNS3_DBG_ITEM_NAME_LEN 32
#define HNS3_DBG_FILE_NAME_LEN 16
struct hns3_dbg_item {
char name[HNS3_DBG_ITEM_NAME_LEN];
u16 interval; /* blank numbers after the item */
};
struct hns3_dbg_data {
struct hnae3_handle *handle;
u16 qid;
};
enum hns3_dbg_dentry_type {
HNS3_DBG_DENTRY_TM,
HNS3_DBG_DENTRY_TX_BD,
HNS3_DBG_DENTRY_RX_BD,
HNS3_DBG_DENTRY_MAC,
HNS3_DBG_DENTRY_COMMON,
};
struct hns3_dbg_dentry_info {
const char *name;
struct dentry *dentry;
};
struct hns3_dbg_cmd_info {
const char *name;
enum hnae3_dbg_cmd cmd;
enum hns3_dbg_dentry_type dentry;
u32 buf_len;
char *buf;
int (*init)(struct hnae3_handle *handle, unsigned int cmd);
};
struct hns3_dbg_func {
enum hnae3_dbg_cmd cmd;
int (*dbg_dump)(struct hnae3_handle *handle, char *buf, int len);
int (*dbg_dump_bd)(struct hns3_dbg_data *data, char *buf, int len);
};
struct hns3_dbg_cap_info {
const char *name;
enum HNAE3_DEV_CAP_BITS cap_bit;
};
#endif
......@@ -91,6 +91,278 @@ static const struct pci_device_id hns3_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
#define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t) \
{ ptype, \
l, \
CHECKSUM_##s, \
HNS3_L3_TYPE_##t, \
1 }
#define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \
{ ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0 }
static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
HNS3_RX_PTYPE_UNUSED_ENTRY(0),
HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP),
HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP),
HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP),
HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL),
HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL),
HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL),
HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM),
HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL),
HNS3_RX_PTYPE_UNUSED_ENTRY(9),
HNS3_RX_PTYPE_UNUSED_ENTRY(10),
HNS3_RX_PTYPE_UNUSED_ENTRY(11),
HNS3_RX_PTYPE_UNUSED_ENTRY(12),
HNS3_RX_PTYPE_UNUSED_ENTRY(13),
HNS3_RX_PTYPE_UNUSED_ENTRY(14),
HNS3_RX_PTYPE_UNUSED_ENTRY(15),
HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL),
HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4),
HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4),
HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4),
HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4),
HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4),
HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4),
HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4),
HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4),
HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4),
HNS3_RX_PTYPE_UNUSED_ENTRY(26),
HNS3_RX_PTYPE_UNUSED_ENTRY(27),
HNS3_RX_PTYPE_UNUSED_ENTRY(28),
HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL),
HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL),
HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4),
HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4),
HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4),
HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4),
HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4),
HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4),
HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4),
HNS3_RX_PTYPE_UNUSED_ENTRY(38),
HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6),
HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6),
HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6),
HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6),
HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6),
HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6),
HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6),
HNS3_RX_PTYPE_UNUSED_ENTRY(46),
HNS3_RX_PTYPE_UNUSED_ENTRY(47),
HNS3_RX_PTYPE_UNUSED_ENTRY(48),
HNS3_RX_PTYPE_UNUSED_ENTRY(49),
HNS3_RX_PTYPE_UNUSED_ENTRY(50),
HNS3_RX_PTYPE_UNUSED_ENTRY(51),
HNS3_RX_PTYPE_UNUSED_ENTRY(52),
HNS3_RX_PTYPE_UNUSED_ENTRY(53),
HNS3_RX_PTYPE_UNUSED_ENTRY(54),
HNS3_RX_PTYPE_UNUSED_ENTRY(55),
HNS3_RX_PTYPE_UNUSED_ENTRY(56),
HNS3_RX_PTYPE_UNUSED_ENTRY(57),
HNS3_RX_PTYPE_UNUSED_ENTRY(58),
HNS3_RX_PTYPE_UNUSED_ENTRY(59),
HNS3_RX_PTYPE_UNUSED_ENTRY(60),
HNS3_RX_PTYPE_UNUSED_ENTRY(61),
HNS3_RX_PTYPE_UNUSED_ENTRY(62),
HNS3_RX_PTYPE_UNUSED_ENTRY(63),
HNS3_RX_PTYPE_UNUSED_ENTRY(64),
HNS3_RX_PTYPE_UNUSED_ENTRY(65),
HNS3_RX_PTYPE_UNUSED_ENTRY(66),
HNS3_RX_PTYPE_UNUSED_ENTRY(67),
HNS3_RX_PTYPE_UNUSED_ENTRY(68),
HNS3_RX_PTYPE_UNUSED_ENTRY(69),
HNS3_RX_PTYPE_UNUSED_ENTRY(70),
HNS3_RX_PTYPE_UNUSED_ENTRY(71),
HNS3_RX_PTYPE_UNUSED_ENTRY(72),
HNS3_RX_PTYPE_UNUSED_ENTRY(73),
HNS3_RX_PTYPE_UNUSED_ENTRY(74),
HNS3_RX_PTYPE_UNUSED_ENTRY(75),
HNS3_RX_PTYPE_UNUSED_ENTRY(76),
HNS3_RX_PTYPE_UNUSED_ENTRY(77),
HNS3_RX_PTYPE_UNUSED_ENTRY(78),
HNS3_RX_PTYPE_UNUSED_ENTRY(79),
HNS3_RX_PTYPE_UNUSED_ENTRY(80),
HNS3_RX_PTYPE_UNUSED_ENTRY(81),
HNS3_RX_PTYPE_UNUSED_ENTRY(82),
HNS3_RX_PTYPE_UNUSED_ENTRY(83),
HNS3_RX_PTYPE_UNUSED_ENTRY(84),
HNS3_RX_PTYPE_UNUSED_ENTRY(85),
HNS3_RX_PTYPE_UNUSED_ENTRY(86),
HNS3_RX_PTYPE_UNUSED_ENTRY(87),
HNS3_RX_PTYPE_UNUSED_ENTRY(88),
HNS3_RX_PTYPE_UNUSED_ENTRY(89),
HNS3_RX_PTYPE_UNUSED_ENTRY(90),
HNS3_RX_PTYPE_UNUSED_ENTRY(91),
HNS3_RX_PTYPE_UNUSED_ENTRY(92),
HNS3_RX_PTYPE_UNUSED_ENTRY(93),
HNS3_RX_PTYPE_UNUSED_ENTRY(94),
HNS3_RX_PTYPE_UNUSED_ENTRY(95),
HNS3_RX_PTYPE_UNUSED_ENTRY(96),
HNS3_RX_PTYPE_UNUSED_ENTRY(97),
HNS3_RX_PTYPE_UNUSED_ENTRY(98),
HNS3_RX_PTYPE_UNUSED_ENTRY(99),
HNS3_RX_PTYPE_UNUSED_ENTRY(100),
HNS3_RX_PTYPE_UNUSED_ENTRY(101),
HNS3_RX_PTYPE_UNUSED_ENTRY(102),
HNS3_RX_PTYPE_UNUSED_ENTRY(103),
HNS3_RX_PTYPE_UNUSED_ENTRY(104),
HNS3_RX_PTYPE_UNUSED_ENTRY(105),
HNS3_RX_PTYPE_UNUSED_ENTRY(106),
HNS3_RX_PTYPE_UNUSED_ENTRY(107),
HNS3_RX_PTYPE_UNUSED_ENTRY(108),
HNS3_RX_PTYPE_UNUSED_ENTRY(109),
HNS3_RX_PTYPE_UNUSED_ENTRY(110),
HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6),
HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6),
HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6),
HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6),
HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6),
HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6),
HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6),
HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6),
HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6),
HNS3_RX_PTYPE_UNUSED_ENTRY(120),
HNS3_RX_PTYPE_UNUSED_ENTRY(121),
HNS3_RX_PTYPE_UNUSED_ENTRY(122),
HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL),
HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL),
HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4),
HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4),
HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4),
HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4),
HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4),
HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4),
HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4),
HNS3_RX_PTYPE_UNUSED_ENTRY(132),
HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6),
HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6),
HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6),
HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6),
HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6),
HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6),
HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6),
HNS3_RX_PTYPE_UNUSED_ENTRY(140),
HNS3_RX_PTYPE_UNUSED_ENTRY(141),
HNS3_RX_PTYPE_UNUSED_ENTRY(142),
HNS3_RX_PTYPE_UNUSED_ENTRY(143),
HNS3_RX_PTYPE_UNUSED_ENTRY(144),
HNS3_RX_PTYPE_UNUSED_ENTRY(145),
HNS3_RX_PTYPE_UNUSED_ENTRY(146),
HNS3_RX_PTYPE_UNUSED_ENTRY(147),
HNS3_RX_PTYPE_UNUSED_ENTRY(148),
HNS3_RX_PTYPE_UNUSED_ENTRY(149),
HNS3_RX_PTYPE_UNUSED_ENTRY(150),
HNS3_RX_PTYPE_UNUSED_ENTRY(151),
HNS3_RX_PTYPE_UNUSED_ENTRY(152),
HNS3_RX_PTYPE_UNUSED_ENTRY(153),
HNS3_RX_PTYPE_UNUSED_ENTRY(154),
HNS3_RX_PTYPE_UNUSED_ENTRY(155),
HNS3_RX_PTYPE_UNUSED_ENTRY(156),
HNS3_RX_PTYPE_UNUSED_ENTRY(157),
HNS3_RX_PTYPE_UNUSED_ENTRY(158),
HNS3_RX_PTYPE_UNUSED_ENTRY(159),
HNS3_RX_PTYPE_UNUSED_ENTRY(160),
HNS3_RX_PTYPE_UNUSED_ENTRY(161),
HNS3_RX_PTYPE_UNUSED_ENTRY(162),
HNS3_RX_PTYPE_UNUSED_ENTRY(163),
HNS3_RX_PTYPE_UNUSED_ENTRY(164),
HNS3_RX_PTYPE_UNUSED_ENTRY(165),
HNS3_RX_PTYPE_UNUSED_ENTRY(166),
HNS3_RX_PTYPE_UNUSED_ENTRY(167),
HNS3_RX_PTYPE_UNUSED_ENTRY(168),
HNS3_RX_PTYPE_UNUSED_ENTRY(169),
HNS3_RX_PTYPE_UNUSED_ENTRY(170),
HNS3_RX_PTYPE_UNUSED_ENTRY(171),
HNS3_RX_PTYPE_UNUSED_ENTRY(172),
HNS3_RX_PTYPE_UNUSED_ENTRY(173),
HNS3_RX_PTYPE_UNUSED_ENTRY(174),
HNS3_RX_PTYPE_UNUSED_ENTRY(175),
HNS3_RX_PTYPE_UNUSED_ENTRY(176),
HNS3_RX_PTYPE_UNUSED_ENTRY(177),
HNS3_RX_PTYPE_UNUSED_ENTRY(178),
HNS3_RX_PTYPE_UNUSED_ENTRY(179),
HNS3_RX_PTYPE_UNUSED_ENTRY(180),
HNS3_RX_PTYPE_UNUSED_ENTRY(181),
HNS3_RX_PTYPE_UNUSED_ENTRY(182),
HNS3_RX_PTYPE_UNUSED_ENTRY(183),
HNS3_RX_PTYPE_UNUSED_ENTRY(184),
HNS3_RX_PTYPE_UNUSED_ENTRY(185),
HNS3_RX_PTYPE_UNUSED_ENTRY(186),
HNS3_RX_PTYPE_UNUSED_ENTRY(187),
HNS3_RX_PTYPE_UNUSED_ENTRY(188),
HNS3_RX_PTYPE_UNUSED_ENTRY(189),
HNS3_RX_PTYPE_UNUSED_ENTRY(190),
HNS3_RX_PTYPE_UNUSED_ENTRY(191),
HNS3_RX_PTYPE_UNUSED_ENTRY(192),
HNS3_RX_PTYPE_UNUSED_ENTRY(193),
HNS3_RX_PTYPE_UNUSED_ENTRY(194),
HNS3_RX_PTYPE_UNUSED_ENTRY(195),
HNS3_RX_PTYPE_UNUSED_ENTRY(196),
HNS3_RX_PTYPE_UNUSED_ENTRY(197),
HNS3_RX_PTYPE_UNUSED_ENTRY(198),
HNS3_RX_PTYPE_UNUSED_ENTRY(199),
HNS3_RX_PTYPE_UNUSED_ENTRY(200),
HNS3_RX_PTYPE_UNUSED_ENTRY(201),
HNS3_RX_PTYPE_UNUSED_ENTRY(202),
HNS3_RX_PTYPE_UNUSED_ENTRY(203),
HNS3_RX_PTYPE_UNUSED_ENTRY(204),
HNS3_RX_PTYPE_UNUSED_ENTRY(205),
HNS3_RX_PTYPE_UNUSED_ENTRY(206),
HNS3_RX_PTYPE_UNUSED_ENTRY(207),
HNS3_RX_PTYPE_UNUSED_ENTRY(208),
HNS3_RX_PTYPE_UNUSED_ENTRY(209),
HNS3_RX_PTYPE_UNUSED_ENTRY(210),
HNS3_RX_PTYPE_UNUSED_ENTRY(211),
HNS3_RX_PTYPE_UNUSED_ENTRY(212),
HNS3_RX_PTYPE_UNUSED_ENTRY(213),
HNS3_RX_PTYPE_UNUSED_ENTRY(214),
HNS3_RX_PTYPE_UNUSED_ENTRY(215),
HNS3_RX_PTYPE_UNUSED_ENTRY(216),
HNS3_RX_PTYPE_UNUSED_ENTRY(217),
HNS3_RX_PTYPE_UNUSED_ENTRY(218),
HNS3_RX_PTYPE_UNUSED_ENTRY(219),
HNS3_RX_PTYPE_UNUSED_ENTRY(220),
HNS3_RX_PTYPE_UNUSED_ENTRY(221),
HNS3_RX_PTYPE_UNUSED_ENTRY(222),
HNS3_RX_PTYPE_UNUSED_ENTRY(223),
HNS3_RX_PTYPE_UNUSED_ENTRY(224),
HNS3_RX_PTYPE_UNUSED_ENTRY(225),
HNS3_RX_PTYPE_UNUSED_ENTRY(226),
HNS3_RX_PTYPE_UNUSED_ENTRY(227),
HNS3_RX_PTYPE_UNUSED_ENTRY(228),
HNS3_RX_PTYPE_UNUSED_ENTRY(229),
HNS3_RX_PTYPE_UNUSED_ENTRY(230),
HNS3_RX_PTYPE_UNUSED_ENTRY(231),
HNS3_RX_PTYPE_UNUSED_ENTRY(232),
HNS3_RX_PTYPE_UNUSED_ENTRY(233),
HNS3_RX_PTYPE_UNUSED_ENTRY(234),
HNS3_RX_PTYPE_UNUSED_ENTRY(235),
HNS3_RX_PTYPE_UNUSED_ENTRY(236),
HNS3_RX_PTYPE_UNUSED_ENTRY(237),
HNS3_RX_PTYPE_UNUSED_ENTRY(238),
HNS3_RX_PTYPE_UNUSED_ENTRY(239),
HNS3_RX_PTYPE_UNUSED_ENTRY(240),
HNS3_RX_PTYPE_UNUSED_ENTRY(241),
HNS3_RX_PTYPE_UNUSED_ENTRY(242),
HNS3_RX_PTYPE_UNUSED_ENTRY(243),
HNS3_RX_PTYPE_UNUSED_ENTRY(244),
HNS3_RX_PTYPE_UNUSED_ENTRY(245),
HNS3_RX_PTYPE_UNUSED_ENTRY(246),
HNS3_RX_PTYPE_UNUSED_ENTRY(247),
HNS3_RX_PTYPE_UNUSED_ENTRY(248),
HNS3_RX_PTYPE_UNUSED_ENTRY(249),
HNS3_RX_PTYPE_UNUSED_ENTRY(250),
HNS3_RX_PTYPE_UNUSED_ENTRY(251),
HNS3_RX_PTYPE_UNUSED_ENTRY(252),
HNS3_RX_PTYPE_UNUSED_ENTRY(253),
HNS3_RX_PTYPE_UNUSED_ENTRY(254),
HNS3_RX_PTYPE_UNUSED_ENTRY(255),
};
#define HNS3_INVALID_PTYPE \
ARRAY_SIZE(hns3_rx_ptype_tbl)
static irqreturn_t hns3_irq_handle(int irq, void *vector)
{
struct hns3_enet_tqp_vector *tqp_vector = vector;
......@@ -362,7 +634,7 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
return 0;
}
static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
u16 hns3_get_max_available_channels(struct hnae3_handle *h)
{
u16 alloc_tqps, max_rss_size, rss_size;
......@@ -2980,51 +3252,31 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
return 0;
}
static void hns3_checksum_complete(struct hns3_enet_ring *ring,
struct sk_buff *skb, u32 l234info)
static bool hns3_checksum_complete(struct hns3_enet_ring *ring,
struct sk_buff *skb, u32 ptype, u16 csum)
{
u32 lo, hi;
if (ptype == HNS3_INVALID_PTYPE ||
hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
return false;
u64_stats_update_begin(&ring->syncp);
ring->stats.csum_complete++;
u64_stats_update_end(&ring->syncp);
skb->ip_summed = CHECKSUM_COMPLETE;
lo = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_L_M,
HNS3_RXD_L2_CSUM_L_S);
hi = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_H_M,
HNS3_RXD_L2_CSUM_H_S);
skb->csum = csum_unfold((__force __sum16)(lo | hi << 8));
skb->csum = csum_unfold((__force __sum16)csum);
return true;
}
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
u32 l234info, u32 bd_base_info, u32 ol_info)
static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info,
u32 ol_info, u32 ptype)
{
struct net_device *netdev = ring_to_netdev(ring);
int l3_type, l4_type;
int ol4_type;
skb->ip_summed = CHECKSUM_NONE;
skb_checksum_none_assert(skb);
if (!(netdev->features & NETIF_F_RXCSUM))
return;
if (l234info & BIT(HNS3_RXD_L2_CSUM_B)) {
hns3_checksum_complete(ring, skb, l234info);
return;
}
/* check if hardware has done checksum */
if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
return;
if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
BIT(HNS3_RXD_OL3E_B) |
BIT(HNS3_RXD_OL4E_B)))) {
u64_stats_update_begin(&ring->syncp);
ring->stats.l3l4_csum_err++;
u64_stats_update_end(&ring->syncp);
if (ptype != HNS3_INVALID_PTYPE) {
skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level;
skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed;
return;
}
......@@ -3054,6 +3306,45 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
}
}
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
u32 l234info, u32 bd_base_info, u32 ol_info,
u16 csum)
{
struct net_device *netdev = ring_to_netdev(ring);
struct hns3_nic_priv *priv = netdev_priv(netdev);
u32 ptype = HNS3_INVALID_PTYPE;
skb->ip_summed = CHECKSUM_NONE;
skb_checksum_none_assert(skb);
if (!(netdev->features & NETIF_F_RXCSUM))
return;
if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state))
ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
HNS3_RXD_PTYPE_S);
if (hns3_checksum_complete(ring, skb, ptype, csum))
return;
/* check if hardware has done checksum */
if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
return;
if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
BIT(HNS3_RXD_OL3E_B) |
BIT(HNS3_RXD_OL4E_B)))) {
u64_stats_update_begin(&ring->syncp);
ring->stats.l3l4_csum_err++;
u64_stats_update_end(&ring->syncp);
return;
}
hns3_rx_handle_csum(skb, l234info, ol_info, ptype);
}
static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
if (skb_has_frag_list(skb))
......@@ -3235,8 +3526,10 @@ static int hns3_add_frag(struct hns3_enet_ring *ring)
static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
struct sk_buff *skb, u32 l234info,
u32 bd_base_info, u32 ol_info)
u32 bd_base_info, u32 ol_info, u16 csum)
{
struct net_device *netdev = ring_to_netdev(ring);
struct hns3_nic_priv *priv = netdev_priv(netdev);
u32 l3_type;
skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
......@@ -3244,7 +3537,8 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
HNS3_RXD_GRO_SIZE_S);
/* if there is no HW GRO, do not set gro params */
if (!skb_shinfo(skb)->gso_size) {
hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info,
csum);
return 0;
}
......@@ -3252,7 +3546,16 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
HNS3_RXD_GRO_COUNT_M,
HNS3_RXD_GRO_COUNT_S);
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
HNS3_RXD_PTYPE_S);
l3_type = hns3_rx_ptype_tbl[ptype].l3_type;
} else {
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
HNS3_RXD_L3ID_S);
}
if (l3_type == HNS3_L3_TYPE_IPV4)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
else if (l3_type == HNS3_L3_TYPE_IPV6)
......@@ -3285,6 +3588,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
struct hns3_desc *desc;
unsigned int len;
int pre_ntc, ret;
u16 csum;
/* bdinfo handled below is only valid on the last BD of the
* current packet, and ring->next_to_clean indicates the first
......@@ -3296,6 +3600,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
l234info = le32_to_cpu(desc->rx.l234_info);
ol_info = le32_to_cpu(desc->rx.ol_info);
csum = le16_to_cpu(desc->csum);
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
......@@ -3328,7 +3633,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
/* This is needed in order to enable forwarding support */
ret = hns3_set_gro_and_checksum(ring, skb, l234info,
bd_base_info, ol_info);
bd_base_info, ol_info, csum);
if (unlikely(ret)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.rx_err_cnt++;
......@@ -4343,13 +4648,21 @@ static int hns3_client_init(struct hnae3_handle *handle)
hns3_dcbnl_setup(handle);
hns3_dbg_init(handle);
ret = hns3_dbg_init(handle);
if (ret) {
dev_err(priv->dev, "failed to init debugfs, ret = %d\n",
ret);
goto out_client_start;
}
netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size);
if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev))
set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
......
......@@ -19,6 +19,7 @@ enum hns3_nic_state {
HNS3_NIC_STATE_SERVICE_SCHED,
HNS3_NIC_STATE2_RESET_REQUESTED,
HNS3_NIC_STATE_HW_TX_CSUM_ENABLE,
HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE,
HNS3_NIC_STATE_MAX
};
......@@ -82,12 +83,6 @@ enum hns3_nic_state {
#define HNS3_RXD_STRP_TAGP_S 13
#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S)
#define HNS3_RXD_L2_CSUM_B 15
#define HNS3_RXD_L2_CSUM_L_S 4
#define HNS3_RXD_L2_CSUM_L_M (0xff << HNS3_RXD_L2_CSUM_L_S)
#define HNS3_RXD_L2_CSUM_H_S 24
#define HNS3_RXD_L2_CSUM_H_M (0xff << HNS3_RXD_L2_CSUM_H_S)
#define HNS3_RXD_L2E_B 16
#define HNS3_RXD_L3E_B 17
#define HNS3_RXD_L4E_B 18
......@@ -114,6 +109,9 @@ enum hns3_nic_state {
#define HNS3_RXD_FBLI_S 14
#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S)
#define HNS3_RXD_PTYPE_S 4
#define HNS3_RXD_PTYPE_M GENMASK(11, 4)
#define HNS3_RXD_BDTYPE_S 0
#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S)
#define HNS3_RXD_VLD_B 4
......@@ -238,7 +236,10 @@ enum hns3_pkt_tun_type {
/* hardware spec ring buffer format */
struct __packed hns3_desc {
__le64 addr;
union {
__le64 addr;
__le16 csum;
};
union {
struct {
__le16 vlan_tag;
......@@ -366,6 +367,14 @@ enum hns3_pkt_ol4type {
HNS3_OL4_TYPE_UNKNOWN
};
struct hns3_rx_ptype {
u32 ptype:8;
u32 csum_level:2;
u32 ip_summed:2;
u32 l3_type:4;
u32 valid:1;
};
struct ring_stats {
u64 sw_err_cnt;
u64 seg_pkt_cnt;
......@@ -397,6 +406,7 @@ struct ring_stats {
u64 rx_multicast;
u64 non_reuse_pg;
};
__le16 csum;
};
};
......@@ -640,9 +650,10 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle);
static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
#endif
void hns3_dbg_init(struct hnae3_handle *handle);
int hns3_dbg_init(struct hnae3_handle *handle);
void hns3_dbg_uninit(struct hnae3_handle *handle);
void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
void hns3_dbg_unregister_debugfs(void);
void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
u16 hns3_get_max_available_channels(struct hnae3_handle *h);
#endif
......@@ -386,6 +386,8 @@ static void hclge_parse_capability(struct hclge_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_PHY_IMP_B))
set_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_RXD_ADV_LAYOUT_B))
set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps);
}
static __le32 hclge_build_api_caps(void)
......@@ -469,7 +471,7 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev)
struct hclge_desc desc;
u32 compat = 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_COMPAT_CFG, false);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
req = (struct hclge_firmware_compat_cmd *)desc.data;
......
......@@ -267,10 +267,10 @@ enum hclge_opcode_type {
/* NCL config command */
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
/* M7 stats command */
HCLGE_OPC_M7_STATS_BD = 0x7012,
HCLGE_OPC_M7_STATS_INFO = 0x7013,
HCLGE_OPC_M7_COMPAT_CFG = 0x701A,
/* IMP stats command */
HCLGE_OPC_IMP_STATS_BD = 0x7012,
HCLGE_OPC_IMP_STATS_INFO = 0x7013,
HCLGE_OPC_IMP_COMPAT_CFG = 0x701A,
/* SFP command */
HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
......@@ -391,6 +391,7 @@ enum HCLGE_CAP_BITS {
HCLGE_CAP_UDP_TUNNEL_CSUM_B,
HCLGE_CAP_FEC_B = 13,
HCLGE_CAP_PAUSE_B = 14,
HCLGE_CAP_RXD_ADV_LAYOUT_B = 15,
};
enum HCLGE_API_CAP_BITS {
......@@ -1100,7 +1101,7 @@ struct hclge_fd_user_def_cfg_cmd {
u8 rsv[12];
};
struct hclge_get_m7_bd_cmd {
struct hclge_get_imp_bd_cmd {
__le32 bd_num;
u8 rsv[20];
};
......
......@@ -7,7 +7,6 @@
#include <linux/etherdevice.h>
#include "hclge_cmd.h"
#define HCLGE_DBG_BUF_LEN 256
#define HCLGE_DBG_MNG_TBL_MAX 64
#define HCLGE_DBG_MNG_VLAN_MASK_B BIT(0)
......@@ -83,6 +82,11 @@ struct hclge_dbg_reg_type_info {
struct hclge_dbg_reg_common_msg reg_msg;
};
struct hclge_dbg_func {
enum hnae3_dbg_cmd cmd;
int (*dbg_dump)(struct hclge_dev *hdev, char *buf, int len);
};
static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
{true, "BP_CPU_STATE"},
......@@ -723,4 +727,13 @@ static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
{true, "RCB_CFG_TX_RING_EBDNUM"},
};
#define HCLGE_DBG_INFO_LEN 256
#define HCLGE_DBG_ID_LEN 16
#define HCLGE_DBG_ITEM_NAME_LEN 32
#define HCLGE_DBG_DATA_STR_LEN 32
struct hclge_dbg_item {
char name[HCLGE_DBG_ITEM_NAME_LEN];
u16 interval; /* blank numbers after the item */
};
#endif
......@@ -3936,6 +3936,21 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
return ret;
}
static void hclge_show_rst_info(struct hclge_dev *hdev)
{
char *buf;
buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
if (!buf)
return;
hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
kfree(buf);
}
static bool hclge_reset_err_handle(struct hclge_dev *hdev)
{
#define MAX_RESET_FAIL_CNT 5
......@@ -3966,7 +3981,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
dev_err(&hdev->pdev->dev, "Reset fail!\n");
hclge_dbg_dump_rst_info(hdev);
hclge_show_rst_info(hdev);
set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
......@@ -11167,6 +11182,18 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
}
}
static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
{
if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
}
static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
{
if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
}
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
......@@ -11339,6 +11366,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
}
hclge_init_rxd_adv_layout(hdev);
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
......@@ -11720,6 +11749,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
return ret;
hclge_init_rxd_adv_layout(hdev);
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
......@@ -11735,6 +11766,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_clear_vf_vlan(hdev);
hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
hclge_uninit_rxd_adv_layout(hdev);
hclge_uninit_mac_table(hdev);
hclge_del_all_fd_entries(hdev);
......
......@@ -53,6 +53,7 @@
/* bar registers for common func */
#define HCLGE_VECTOR0_OTER_EN_REG 0x20600
#define HCLGE_GRO_EN_REG 0x28000
#define HCLGE_RXD_ADV_LAYOUT_EN_REG 0x28008
/* bar registers for rcb */
#define HCLGE_RING_RX_ADDR_L_REG 0x80000
......@@ -147,6 +148,8 @@
#define HCLGE_MAX_QSET_NUM 1024
#define HCLGE_DBG_RESET_INFO_LEN 1024
enum HLCGE_PORT_TYPE {
HOST_PORT,
NETWORK_PORT
......@@ -1060,7 +1063,7 @@ int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf,
int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
char *buf, int len);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
......@@ -1088,6 +1091,6 @@ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
void hclge_report_hw_error(struct hclge_dev *hdev,
enum hnae3_hw_error_type type);
void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
void hclge_dbg_dump_rst_info(struct hclge_dev *hdev);
int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len);
int hclge_push_vf_link_status(struct hclge_vport *vport);
#endif
......@@ -359,6 +359,8 @@ static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_TUNNEL_CSUM_B))
set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_RXD_ADV_LAYOUT_B))
set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps);
}
static __le32 hclgevf_build_api_caps(void)
......
......@@ -159,6 +159,7 @@ enum HCLGEVF_CAP_BITS {
HCLGEVF_CAP_HW_PAD_B,
HCLGEVF_CAP_STASH_B,
HCLGEVF_CAP_UDP_TUNNEL_CSUM_B,
HCLGEVF_CAP_RXD_ADV_LAYOUT_B = 15,
};
enum HCLGEVF_API_CAP_BITS {
......
......@@ -3242,6 +3242,18 @@ static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev)
{
if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1);
}
static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev)
{
if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0);
}
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
......@@ -3279,6 +3291,8 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
hclgevf_init_rxd_adv_layout(hdev);
dev_info(&hdev->pdev->dev, "Reset done\n");
return 0;
......@@ -3379,6 +3393,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
hclgevf_init_rxd_adv_layout(hdev);
hdev->last_reset_time = jiffies;
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
HCLGEVF_DRIVER_NAME);
......@@ -3405,6 +3421,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
struct hclge_vf_to_pf_msg send_msg;
hclgevf_state_uninit(hdev);
hclgevf_uninit_rxd_adv_layout(hdev);
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
......
......@@ -47,6 +47,7 @@
/* bar registers for common func */
#define HCLGEVF_GRO_EN_REG 0x28000
#define HCLGEVF_RXD_ADV_LAYOUT_EN_REG 0x28008
/* bar registers for rcb */
#define HCLGEVF_RING_RX_ADDR_L_REG 0x80000
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册