hclge_main.c 249.5 KB
Newer Older
1 2
// SPDX-License-Identifier: GPL-2.0+
// Copyright (c) 2016-2017 Hisilicon Limited.
3 4 5 6 7 8 9 10 11 12 13

#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
F
Fuyun Liang 已提交
14
#include <linux/if_vlan.h>
15
#include <linux/crash_dump.h>
16
#include <net/rtnetlink.h>
17
#include "hclge_cmd.h"
18
#include "hclge_dcb.h"
19
#include "hclge_main.h"
20
#include "hclge_mbx.h"
21 22
#include "hclge_mdio.h"
#include "hclge_tm.h"
23
#include "hclge_err.h"
24 25 26 27 28 29
#include "hnae3.h"

#define HCLGE_NAME			"hclge"
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))

30
#define HCLGE_BUF_SIZE_UNIT	256U
31 32
#define HCLGE_BUF_MUL_BY	2
#define HCLGE_BUF_DIV_BY	2
33 34 35
#define NEED_RESERVE_TC_NUM	2
#define BUF_MAX_PERCENT		100
#define BUF_RESERVE_PERCENT	90
36

37 38
#define HCLGE_RESET_MAX_FAIL_CNT	5

39
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
40
static int hclge_init_vlan_config(struct hclge_dev *hdev);
41
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
42
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
43
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
44 45
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
			       u16 *allocated_size, bool is_alloc);
J
Jian Shen 已提交
46 47
static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
48 49
static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
						   unsigned long *addr);
50 51 52 53 54 55 56 57 58 59 60

static struct hnae3_ae_algo ae_algo;

static const struct pci_device_id ae_algo_pci_tbl[] = {
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
61
	/* required last entry */
62 63 64
	{0, }
};

65 66
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
					 HCLGE_CMDQ_TX_ADDR_H_REG,
					 HCLGE_CMDQ_TX_DEPTH_REG,
					 HCLGE_CMDQ_TX_TAIL_REG,
					 HCLGE_CMDQ_TX_HEAD_REG,
					 HCLGE_CMDQ_RX_ADDR_L_REG,
					 HCLGE_CMDQ_RX_ADDR_H_REG,
					 HCLGE_CMDQ_RX_DEPTH_REG,
					 HCLGE_CMDQ_RX_TAIL_REG,
					 HCLGE_CMDQ_RX_HEAD_REG,
					 HCLGE_VECTOR0_CMDQ_SRC_REG,
					 HCLGE_CMDQ_INTR_STS_REG,
					 HCLGE_CMDQ_INTR_EN_REG,
					 HCLGE_CMDQ_INTR_GEN_REG};

static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
					   HCLGE_VECTOR0_OTER_EN_REG,
					   HCLGE_MISC_RESET_STS_REG,
					   HCLGE_MISC_VECTOR_INT_STS,
					   HCLGE_GLOBAL_RESET_REG,
					   HCLGE_FUN_RST_ING,
					   HCLGE_GRO_EN_REG};

static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
					 HCLGE_RING_RX_ADDR_H_REG,
					 HCLGE_RING_RX_BD_NUM_REG,
					 HCLGE_RING_RX_BD_LENGTH_REG,
					 HCLGE_RING_RX_MERGE_EN_REG,
					 HCLGE_RING_RX_TAIL_REG,
					 HCLGE_RING_RX_HEAD_REG,
					 HCLGE_RING_RX_FBD_NUM_REG,
					 HCLGE_RING_RX_OFFSET_REG,
					 HCLGE_RING_RX_FBD_OFFSET_REG,
					 HCLGE_RING_RX_STASH_REG,
					 HCLGE_RING_RX_BD_ERR_REG,
					 HCLGE_RING_TX_ADDR_L_REG,
					 HCLGE_RING_TX_ADDR_H_REG,
					 HCLGE_RING_TX_BD_NUM_REG,
					 HCLGE_RING_TX_PRIORITY_REG,
					 HCLGE_RING_TX_TC_REG,
					 HCLGE_RING_TX_MERGE_EN_REG,
					 HCLGE_RING_TX_TAIL_REG,
					 HCLGE_RING_TX_HEAD_REG,
					 HCLGE_RING_TX_FBD_NUM_REG,
					 HCLGE_RING_TX_OFFSET_REG,
					 HCLGE_RING_TX_EBD_NUM_REG,
					 HCLGE_RING_TX_EBD_OFFSET_REG,
					 HCLGE_RING_TX_BD_ERR_REG,
					 HCLGE_RING_EN_REG};

static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
					     HCLGE_TQP_INTR_GL0_REG,
					     HCLGE_TQP_INTR_GL1_REG,
					     HCLGE_TQP_INTR_GL2_REG,
					     HCLGE_TQP_INTR_RL_REG};

123
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
124
	"App    Loopback test",
125 126
	"Serdes serial Loopback test",
	"Serdes parallel Loopback test",
127 128 129 130 131 132 133 134
	"Phy    Loopback test"
};

static const struct hclge_comm_stats_str g_mac_stats_string[] = {
	{"mac_tx_mac_pause_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
	{"mac_rx_mac_pause_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
135 136 137 138 139 140
	{"mac_tx_control_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
	{"mac_rx_control_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
	{"mac_tx_pfc_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
	{"mac_tx_pfc_pri0_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
	{"mac_tx_pfc_pri1_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
	{"mac_tx_pfc_pri2_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
	{"mac_tx_pfc_pri3_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
	{"mac_tx_pfc_pri4_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
	{"mac_tx_pfc_pri5_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
	{"mac_tx_pfc_pri6_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
	{"mac_tx_pfc_pri7_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
157 158
	{"mac_rx_pfc_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
	{"mac_rx_pfc_pri0_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
	{"mac_rx_pfc_pri1_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
	{"mac_rx_pfc_pri2_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
	{"mac_rx_pfc_pri3_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
	{"mac_rx_pfc_pri4_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
	{"mac_rx_pfc_pri5_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
	{"mac_rx_pfc_pri6_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
	{"mac_rx_pfc_pri7_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
	{"mac_tx_total_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
	{"mac_tx_total_oct_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
	{"mac_tx_good_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
	{"mac_tx_bad_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
	{"mac_tx_good_oct_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
	{"mac_tx_bad_oct_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
	{"mac_tx_uni_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
	{"mac_tx_multi_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
	{"mac_tx_broad_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
	{"mac_tx_undersize_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
J
Jian Shen 已提交
195 196
	{"mac_tx_oversize_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
197 198 199 200 201 202 203 204 205 206 207 208
	{"mac_tx_64_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
	{"mac_tx_65_127_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
	{"mac_tx_128_255_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
	{"mac_tx_256_511_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
	{"mac_tx_512_1023_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
	{"mac_tx_1024_1518_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
	{"mac_tx_1519_2047_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
	{"mac_tx_2048_4095_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
	{"mac_tx_4096_8191_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
	{"mac_tx_8192_9216_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
	{"mac_tx_9217_12287_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
	{"mac_tx_12288_16383_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
	{"mac_tx_1519_max_good_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
	{"mac_tx_1519_max_bad_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
	{"mac_rx_total_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
	{"mac_rx_total_oct_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
	{"mac_rx_good_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
	{"mac_rx_bad_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
	{"mac_rx_good_oct_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
	{"mac_rx_bad_oct_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
	{"mac_rx_uni_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
	{"mac_rx_multi_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
	{"mac_rx_broad_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
	{"mac_rx_undersize_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
J
Jian Shen 已提交
245 246
	{"mac_rx_oversize_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
247 248 249 250 251 252 253 254 255 256 257 258
	{"mac_rx_64_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
	{"mac_rx_65_127_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
	{"mac_rx_128_255_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
	{"mac_rx_256_511_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
	{"mac_rx_512_1023_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
	{"mac_rx_1024_1518_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
	{"mac_rx_1519_2047_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
	{"mac_rx_2048_4095_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
	{"mac_rx_4096_8191_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
	{"mac_rx_8192_9216_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
	{"mac_rx_9217_12287_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
	{"mac_rx_12288_16383_oct_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
	{"mac_rx_1519_max_good_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
	{"mac_rx_1519_max_bad_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
275

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
	{"mac_tx_fragment_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
	{"mac_tx_undermin_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
	{"mac_tx_jabber_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
	{"mac_tx_err_all_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
	{"mac_tx_from_app_good_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
	{"mac_tx_from_app_bad_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
	{"mac_rx_fragment_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
	{"mac_rx_undermin_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
	{"mac_rx_jabber_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
	{"mac_rx_fcs_err_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
	{"mac_rx_send_app_good_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
	{"mac_rx_send_app_bad_pkt_num",
		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
300 301
};

302 303 304
static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
	{
		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
305
		.ethter_type = cpu_to_le16(ETH_P_LLDP),
306 307 308 309 310 311
		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
		.i_port_bitmap = 0x1,
	},
};

312 313 314 315 316 317 318 319
static const u8 hclge_hash_key[] = {
	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
};

320
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
321
{
322
#define HCLGE_MAC_CMD_NUM 21
323 324 325

	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
326
	__le64 *desc_data;
327 328 329 330 331 332 333 334 335 336 337 338 339
	int i, k, n;
	int ret;

	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"Get MAC pkt stats fail, status = %d.\n", ret);

		return ret;
	}

	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
340
		/* for special opcode 0032, only the first desc has the head */
341
		if (unlikely(i == 0)) {
342
			desc_data = (__le64 *)(&desc[i].data[0]);
343
			n = HCLGE_RD_FIRST_STATS_NUM;
344
		} else {
345
			desc_data = (__le64 *)(&desc[i]);
346
			n = HCLGE_RD_OTHER_STATS_NUM;
347
		}
348

349
		for (k = 0; k < n; k++) {
350 351
			*data += le64_to_cpu(*desc_data);
			data++;
352 353 354 355 356 357 358
			desc_data++;
		}
	}

	return 0;
}

359 360 361 362 363 364 365 366 367
static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
{
	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
	struct hclge_desc *desc;
	__le64 *desc_data;
	u16 i, k, n;
	int ret;

	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
368 369
	if (!desc)
		return -ENOMEM;
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
	if (ret) {
		kfree(desc);
		return ret;
	}

	for (i = 0; i < desc_num; i++) {
		/* for special opcode 0034, only the first desc has the head */
		if (i == 0) {
			desc_data = (__le64 *)(&desc[i].data[0]);
			n = HCLGE_RD_FIRST_STATS_NUM;
		} else {
			desc_data = (__le64 *)(&desc[i]);
			n = HCLGE_RD_OTHER_STATS_NUM;
		}

		for (k = 0; k < n; k++) {
			*data += le64_to_cpu(*desc_data);
			data++;
			desc_data++;
		}
	}

	kfree(desc);

	return 0;
}

static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
{
	struct hclge_desc desc;
	__le32 *desc_data;
	u32 reg_num;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		return ret;

	desc_data = (__le32 *)(&desc.data[0]);
	reg_num = le32_to_cpu(*desc_data);

	*desc_num = 1 + ((reg_num - 3) >> 2) +
		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);

	return 0;
}

static int hclge_mac_update_stats(struct hclge_dev *hdev)
{
	u32 desc_num;
	int ret;

	ret = hclge_mac_query_reg_num(hdev, &desc_num);

	/* The firmware supports the new statistics acquisition method */
	if (!ret)
		ret = hclge_mac_update_stats_complete(hdev, desc_num);
	else if (ret == -EOPNOTSUPP)
		ret = hclge_mac_update_stats_defective(hdev);
	else
		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");

	return ret;
}

438 439 440 441 442 443 444 445 446 447 448 449 450 451
static int hclge_tqps_update_stats(struct hnae3_handle *handle)
{
	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	struct hnae3_queue *queue;
	struct hclge_desc desc[1];
	struct hclge_tqp *tqp;
	int ret, i;

	for (i = 0; i < kinfo->num_tqps; i++) {
		queue = handle->kinfo.tqp[i];
		tqp = container_of(queue, struct hclge_tqp, q);
		/* command : HCLGE_OPC_QUERY_IGU_STAT */
452
		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
453 454
					   true);

455
		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
456 457 458 459
		ret = hclge_cmd_send(&hdev->hw, desc, 1);
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"Query tqp stat fail, status = %d,queue = %d\n",
460
				ret, i);
461 462 463
			return ret;
		}
		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
464
			le32_to_cpu(desc[0].data[1]);
465 466 467 468 469 470 471 472 473 474
	}

	for (i = 0; i < kinfo->num_tqps; i++) {
		queue = handle->kinfo.tqp[i];
		tqp = container_of(queue, struct hclge_tqp, q);
		/* command : HCLGE_OPC_QUERY_IGU_STAT */
		hclge_cmd_setup_basic_desc(&desc[0],
					   HCLGE_OPC_QUERY_TX_STATUS,
					   true);

475
		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
476 477 478 479 480 481 482 483
		ret = hclge_cmd_send(&hdev->hw, desc, 1);
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"Query tqp stat fail, status = %d,queue = %d\n",
				ret, i);
			return ret;
		}
		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
484
			le32_to_cpu(desc[0].data[1]);
485 486 487 488 489 490 491 492 493 494 495 496 497 498
	}

	return 0;
}

static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
{
	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
	struct hclge_tqp *tqp;
	u64 *buff = data;
	int i;

	for (i = 0; i < kinfo->num_tqps; i++) {
		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
499
		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
500 501 502 503
	}

	for (i = 0; i < kinfo->num_tqps; i++) {
		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
504
		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
505 506 507 508 509 510 511 512 513
	}

	return buff;
}

static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
{
	struct hnae3_knic_private_info *kinfo = &handle->kinfo;

514
	/* each tqp has TX & RX two queues */
515 516 517 518 519 520 521 522 523 524 525 526
	return kinfo->num_tqps * (2);
}

static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
{
	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
	u8 *buff = data;
	int i = 0;

	for (i = 0; i < kinfo->num_tqps; i++) {
		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
			struct hclge_tqp, q);
527
		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
528 529 530 531 532 533 534
			 tqp->index);
		buff = buff + ETH_GSTRING_LEN;
	}

	for (i = 0; i < kinfo->num_tqps; i++) {
		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
			struct hclge_tqp, q);
535
		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
536 537 538 539 540 541 542
			 tqp->index);
		buff = buff + ETH_GSTRING_LEN;
	}

	return buff;
}

543
static u64 *hclge_comm_get_stats(const void *comm_stats,
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
				 const struct hclge_comm_stats_str strs[],
				 int size, u64 *data)
{
	u64 *buf = data;
	u32 i;

	for (i = 0; i < size; i++)
		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);

	return buf + size;
}

static u8 *hclge_comm_get_strings(u32 stringset,
				  const struct hclge_comm_stats_str strs[],
				  int size, u8 *data)
{
	char *buff = (char *)data;
	u32 i;

	if (stringset != ETH_SS_STATS)
		return buff;

	for (i = 0; i < size; i++) {
567
		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
		buff = buff + ETH_GSTRING_LEN;
	}

	return (u8 *)buff;
}

static void hclge_update_stats_for_all(struct hclge_dev *hdev)
{
	struct hnae3_handle *handle;
	int status;

	handle = &hdev->vport[0].nic;
	if (handle->client) {
		status = hclge_tqps_update_stats(handle);
		if (status) {
			dev_err(&hdev->pdev->dev,
				"Update TQPS stats fail, status = %d.\n",
				status);
		}
	}

	status = hclge_mac_update_stats(hdev);
	if (status)
		dev_err(&hdev->pdev->dev,
			"Update MAC stats fail, status = %d.\n", status);
}

static void hclge_update_stats(struct hnae3_handle *handle,
			       struct net_device_stats *net_stats)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	int status;

602 603 604
	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
		return;

605 606 607 608 609 610 611 612 613 614 615 616
	status = hclge_mac_update_stats(hdev);
	if (status)
		dev_err(&hdev->pdev->dev,
			"Update MAC stats fail, status = %d.\n",
			status);

	status = hclge_tqps_update_stats(handle);
	if (status)
		dev_err(&hdev->pdev->dev,
			"Update TQPS stats fail, status = %d.\n",
			status);

617
	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
618 619 620 621
}

static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
{
622 623 624 625
#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
		HNAE3_SUPPORT_PHY_LOOPBACK |\
		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
626 627 628 629 630 631 632 633 634 635 636 637 638

	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	int count = 0;

	/* Loopback test support rules:
	 * mac: only GE mode support
	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
	 * phy: only support when phy device exist on board
	 */
	if (stringset == ETH_SS_TEST) {
		/* clear loopback bit flags at first */
		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
D
David S. Miller 已提交
639
		if (hdev->pdev->revision >= 0x21 ||
640
		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
641 642 643
		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
			count += 1;
644
			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
645
		}
646

647 648 649
		count += 2;
		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
650 651 652 653 654 655 656 657
	} else if (stringset == ETH_SS_STATS) {
		count = ARRAY_SIZE(g_mac_stats_string) +
			hclge_tqps_get_sset_count(handle, stringset);
	}

	return count;
}

658
static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
659 660 661 662 663 664 665
			      u8 *data)
{
	u8 *p = (char *)data;
	int size;

	if (stringset == ETH_SS_STATS) {
		size = ARRAY_SIZE(g_mac_stats_string);
666 667
		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
					   size, p);
668 669
		p = hclge_tqps_get_strings(handle, p);
	} else if (stringset == ETH_SS_TEST) {
670
		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
671
			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
672 673 674
			       ETH_GSTRING_LEN);
			p += ETH_GSTRING_LEN;
		}
675
		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
676
			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
677 678 679 680 681 682
			       ETH_GSTRING_LEN);
			p += ETH_GSTRING_LEN;
		}
		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
			memcpy(p,
			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
683 684 685 686
			       ETH_GSTRING_LEN);
			p += ETH_GSTRING_LEN;
		}
		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
687
			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
688 689 690 691 692 693 694 695 696 697 698 699
			       ETH_GSTRING_LEN);
			p += ETH_GSTRING_LEN;
		}
	}
}

static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	u64 *p;

700 701
	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
				 ARRAY_SIZE(g_mac_stats_string), data);
702 703 704
	p = hclge_tqps_get_stats(handle, p);
}

705 706 707 708 709 710 711 712 713 714
static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
				     u64 *rx_cnt)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	*tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
	*rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
}

715
static int hclge_parse_func_status(struct hclge_dev *hdev,
716
				   struct hclge_func_status_cmd *status)
717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
{
	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
		return -EINVAL;

	/* Set the pf to main pf */
	if (status->pf_state & HCLGE_PF_STATE_MAIN)
		hdev->flag |= HCLGE_FLAG_MAIN;
	else
		hdev->flag &= ~HCLGE_FLAG_MAIN;

	return 0;
}

static int hclge_query_function_status(struct hclge_dev *hdev)
{
732 733
#define HCLGE_QUERY_MAX_CNT	5

734
	struct hclge_func_status_cmd *req;
735 736 737 738 739
	struct hclge_desc desc;
	int timeout = 0;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
740
	req = (struct hclge_func_status_cmd *)desc.data;
741 742 743 744 745

	do {
		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
		if (ret) {
			dev_err(&hdev->pdev->dev,
746
				"query function status failed %d.\n", ret);
747 748 749 750 751 752 753
			return ret;
		}

		/* Check pf reset is done */
		if (req->pf_state)
			break;
		usleep_range(1000, 2000);
754
	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
755 756 757 758 759 760 761 762

	ret = hclge_parse_func_status(hdev, req);

	return ret;
}

static int hclge_query_pf_resource(struct hclge_dev *hdev)
{
763
	struct hclge_pf_res_cmd *req;
764 765 766 767 768 769 770 771 772 773 774
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"query pf resource failed %d.\n", ret);
		return ret;
	}

775
	req = (struct hclge_pf_res_cmd *)desc.data;
776 777 778
	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;

779 780 781 782 783 784
	if (req->tx_buf_size)
		hdev->tx_buf_size =
			__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
	else
		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;

785 786
	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);

787 788 789 790 791 792
	if (req->dv_buf_size)
		hdev->dv_buf_size =
			__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
	else
		hdev->dv_buf_size = HCLGE_DEFAULT_DV;

793 794
	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);

795
	if (hnae3_dev_roce_supported(hdev)) {
796 797 798
		hdev->roce_base_msix_offset =
		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
799
		hdev->num_roce_msi =
P
Peng Li 已提交
800 801
		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
802 803 804 805

		/* PF should have NIC vectors and Roce vectors,
		 * NIC vectors are queued before Roce vectors.
		 */
806
		hdev->num_msi = hdev->num_roce_msi +
807
				hdev->roce_base_msix_offset;
808 809
	} else {
		hdev->num_msi =
P
Peng Li 已提交
810 811
		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
	}

	return 0;
}

static int hclge_parse_speed(int speed_cmd, int *speed)
{
	switch (speed_cmd) {
	case 6:
		*speed = HCLGE_MAC_SPEED_10M;
		break;
	case 7:
		*speed = HCLGE_MAC_SPEED_100M;
		break;
	case 0:
		*speed = HCLGE_MAC_SPEED_1G;
		break;
	case 1:
		*speed = HCLGE_MAC_SPEED_10G;
		break;
	case 2:
		*speed = HCLGE_MAC_SPEED_25G;
		break;
	case 3:
		*speed = HCLGE_MAC_SPEED_40G;
		break;
	case 4:
		*speed = HCLGE_MAC_SPEED_50G;
		break;
	case 5:
		*speed = HCLGE_MAC_SPEED_100G;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892
static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	u32 speed_ability = hdev->hw.mac.speed_ability;
	u32 speed_bit = 0;

	switch (speed) {
	case HCLGE_MAC_SPEED_10M:
		speed_bit = HCLGE_SUPPORT_10M_BIT;
		break;
	case HCLGE_MAC_SPEED_100M:
		speed_bit = HCLGE_SUPPORT_100M_BIT;
		break;
	case HCLGE_MAC_SPEED_1G:
		speed_bit = HCLGE_SUPPORT_1G_BIT;
		break;
	case HCLGE_MAC_SPEED_10G:
		speed_bit = HCLGE_SUPPORT_10G_BIT;
		break;
	case HCLGE_MAC_SPEED_25G:
		speed_bit = HCLGE_SUPPORT_25G_BIT;
		break;
	case HCLGE_MAC_SPEED_40G:
		speed_bit = HCLGE_SUPPORT_40G_BIT;
		break;
	case HCLGE_MAC_SPEED_50G:
		speed_bit = HCLGE_SUPPORT_50G_BIT;
		break;
	case HCLGE_MAC_SPEED_100G:
		speed_bit = HCLGE_SUPPORT_100G_BIT;
		break;
	default:
		return -EINVAL;
	}

	if (speed_bit & speed_ability)
		return 0;

	return -EINVAL;
}

893
static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
894 895
{
	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
896
		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
897 898 899 900 901 902 903 904 905 906 907 908 909 910
				 mac->supported);
	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
				 mac->supported);
	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
				 mac->supported);
	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
				 mac->supported);
	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
				 mac->supported);
}
911

912 913 914 915 916
static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
{
	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
				 mac->supported);
917
	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
918
		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
919 920 921 922 923 924 925 926 927 928 929
				 mac->supported);
	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
				 mac->supported);
	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
				 mac->supported);
	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
				 mac->supported);
}
930

931 932 933 934 935 936 937 938 939 940 941
static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
{
	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
				 mac->supported);
	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
				 mac->supported);
	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
				 mac->supported);
942
	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
943 944 945 946 947 948
		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
				 mac->supported);
	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
				 mac->supported);
}
949

950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966
static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
{
	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
				 mac->supported);
	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
				 mac->supported);
	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
				 mac->supported);
	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
				 mac->supported);
	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
				 mac->supported);
967
	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
968 969 970
		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
				 mac->supported);
}
971

972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
static void hclge_convert_setting_fec(struct hclge_mac *mac)
{
	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);

	switch (mac->speed) {
	case HCLGE_MAC_SPEED_10G:
	case HCLGE_MAC_SPEED_40G:
		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
				 mac->supported);
		mac->fec_ability =
			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
		break;
	case HCLGE_MAC_SPEED_25G:
	case HCLGE_MAC_SPEED_50G:
		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
				 mac->supported);
		mac->fec_ability =
			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
			BIT(HNAE3_FEC_AUTO);
		break;
	case HCLGE_MAC_SPEED_100G:
		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
		break;
	default:
		mac->fec_ability = 0;
		break;
	}
}

1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
					u8 speed_ability)
{
	struct hclge_mac *mac = &hdev->hw.mac;

	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
				 mac->supported);

	hclge_convert_setting_sr(mac, speed_ability);
	hclge_convert_setting_lr(mac, speed_ability);
	hclge_convert_setting_cr(mac, speed_ability);
1015 1016
	if (hdev->pdev->revision >= 0x21)
		hclge_convert_setting_fec(mac);
1017 1018 1019

	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1020
	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1021 1022 1023 1024 1025 1026 1027 1028
}

static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
					    u8 speed_ability)
{
	struct hclge_mac *mac = &hdev->hw.mac;

	hclge_convert_setting_kr(mac, speed_ability);
1029 1030
	if (hdev->pdev->revision >= 0x21)
		hclge_convert_setting_fec(mac);
1031 1032
	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1033
	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1034 1035
}

1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
					 u8 speed_ability)
{
	unsigned long *supported = hdev->hw.mac.supported;

	/* default to support all speed for GE port */
	if (!speed_ability)
		speed_ability = HCLGE_SUPPORT_GE;

	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
				 supported);

	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
				 supported);
		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
				 supported);
	}

	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
	}

	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1064
	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1065 1066
}

1067 1068 1069 1070
static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
{
	u8 media_type = hdev->hw.mac.media_type;

1071 1072 1073 1074
	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
		hclge_parse_fiber_link_mode(hdev, speed_ability);
	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
		hclge_parse_copper_link_mode(hdev, speed_ability);
1075 1076
	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
		hclge_parse_backplane_link_mode(hdev, speed_ability);
1077
}
1078 1079
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
1080
	struct hclge_cfg_param_cmd *req;
1081 1082
	u64 mac_addr_tmp_high;
	u64 mac_addr_tmp;
1083
	unsigned int i;
1084

1085
	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1086 1087

	/* get the configuration */
P
Peng Li 已提交
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
					      HCLGE_CFG_VMDQ_M,
					      HCLGE_CFG_VMDQ_S);
	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
					    HCLGE_CFG_TQP_DESC_N_M,
					    HCLGE_CFG_TQP_DESC_N_S);

	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
					HCLGE_CFG_PHY_ADDR_M,
					HCLGE_CFG_PHY_ADDR_S);
	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
					  HCLGE_CFG_MEDIA_TP_M,
					  HCLGE_CFG_MEDIA_TP_S);
	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
					  HCLGE_CFG_RX_BUF_LEN_M,
					  HCLGE_CFG_RX_BUF_LEN_S);
1106 1107
	/* get mac_address */
	mac_addr_tmp = __le32_to_cpu(req->param[2]);
P
Peng Li 已提交
1108 1109 1110
	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
					    HCLGE_CFG_MAC_ADDR_H_M,
					    HCLGE_CFG_MAC_ADDR_H_S);
1111 1112 1113

	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;

P
Peng Li 已提交
1114 1115 1116 1117 1118 1119
	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
					     HCLGE_CFG_DEFAULT_SPEED_M,
					     HCLGE_CFG_DEFAULT_SPEED_S);
	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
					    HCLGE_CFG_RSS_SIZE_M,
					    HCLGE_CFG_RSS_SIZE_S);
1120

1121 1122 1123
	for (i = 0; i < ETH_ALEN; i++)
		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;

1124
	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1125
	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1126

P
Peng Li 已提交
1127 1128 1129
	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
					     HCLGE_CFG_SPEED_ABILITY_M,
					     HCLGE_CFG_SPEED_ABILITY_S);
1130 1131 1132 1133 1134
	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
					 HCLGE_CFG_UMV_TBL_SPACE_M,
					 HCLGE_CFG_UMV_TBL_SPACE_S);
	if (!cfg->umv_space)
		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1135 1136 1137 1138 1139 1140 1141 1142 1143
}

/* hclge_get_cfg: query the static parameter from flash
 * @hdev: pointer to struct hclge_dev
 * @hcfg: the config structure to be getted
 */
static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
{
	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1144
	struct hclge_cfg_param_cmd *req;
1145 1146
	unsigned int i;
	int ret;
1147 1148

	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1149 1150
		u32 offset = 0;

1151
		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1152 1153
		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
					   true);
P
Peng Li 已提交
1154 1155
		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1156
		/* Len should be united by 4 bytes when send to hardware */
P
Peng Li 已提交
1157 1158
		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1159
		req->offset = cpu_to_le32(offset);
1160 1161 1162 1163
	}

	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
	if (ret) {
1164
		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1165 1166 1167 1168
		return ret;
	}

	hclge_parse_cfg(hcfg, desc);
1169

1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
	return 0;
}

static int hclge_get_cap(struct hclge_dev *hdev)
{
	int ret;

	ret = hclge_query_function_status(hdev);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"query function status error %d.\n", ret);
		return ret;
	}

	/* get pf resource */
	ret = hclge_query_pf_resource(hdev);
1186 1187
	if (ret)
		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1188

1189
	return ret;
1190 1191
}

1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
{
#define HCLGE_MIN_TX_DESC	64
#define HCLGE_MIN_RX_DESC	64

	if (!is_kdump_kernel())
		return;

	dev_info(&hdev->pdev->dev,
		 "Running kdump kernel. Using minimal resources\n");

	/* minimal queue pairs equals to the number of vports */
	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
}

1209 1210 1211
static int hclge_configure(struct hclge_dev *hdev)
{
	struct hclge_cfg cfg;
1212 1213
	unsigned int i;
	int ret;
1214 1215 1216 1217 1218 1219 1220 1221 1222

	ret = hclge_get_cfg(hdev, &cfg);
	if (ret) {
		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
		return ret;
	}

	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
	hdev->base_tqp_pid = 0;
1223
	hdev->rss_size_max = cfg.rss_size_max;
1224
	hdev->rx_buf_len = cfg.rx_buf_len;
1225
	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1226
	hdev->hw.mac.media_type = cfg.media_type;
1227
	hdev->hw.mac.phy_addr = cfg.phy_addr;
1228 1229
	hdev->num_tx_desc = cfg.tqp_desc_num;
	hdev->num_rx_desc = cfg.tqp_desc_num;
1230
	hdev->tm_info.num_pg = 1;
1231
	hdev->tc_max = cfg.tc_num;
1232
	hdev->tm_info.hw_pfc_map = 0;
1233
	hdev->wanted_umv_size = cfg.umv_space;
1234

1235
	if (hnae3_dev_fd_supported(hdev)) {
1236
		hdev->fd_en = true;
1237 1238
		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
	}
1239

1240 1241 1242 1243 1244 1245
	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
	if (ret) {
		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
		return ret;
	}

1246 1247
	hclge_parse_link_mode(hdev, cfg.speed_ability);

1248 1249
	if ((hdev->tc_max > HNAE3_MAX_TC) ||
	    (hdev->tc_max < 1)) {
1250
		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1251 1252
			 hdev->tc_max);
		hdev->tc_max = 1;
1253 1254
	}

1255 1256 1257 1258 1259 1260 1261 1262
	/* Dev does not support DCB */
	if (!hnae3_dev_dcb_supported(hdev)) {
		hdev->tc_max = 1;
		hdev->pfc_max = 0;
	} else {
		hdev->pfc_max = hdev->tc_max;
	}

1263
	hdev->tm_info.num_tc = 1;
1264

1265
	/* Currently not support uncontiuous tc */
1266
	for (i = 0; i < hdev->tm_info.num_tc; i++)
P
Peng Li 已提交
1267
		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1268

1269
	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1270

1271 1272
	hclge_init_kdump_kernel_config(hdev);

1273 1274 1275
	return ret;
}

1276 1277
static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
			    unsigned int tso_mss_max)
1278
{
1279
	struct hclge_cfg_tso_status_cmd *req;
1280
	struct hclge_desc desc;
1281
	u16 tso_mss;
1282 1283 1284

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);

1285
	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1286 1287

	tso_mss = 0;
P
Peng Li 已提交
1288 1289
	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1290 1291 1292
	req->tso_mss_min = cpu_to_le16(tso_mss);

	tso_mss = 0;
P
Peng Li 已提交
1293 1294
	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1295
	req->tso_mss_max = cpu_to_le16(tso_mss);
1296 1297 1298 1299

	return hclge_cmd_send(&hdev->hw, &desc, 1);
}

1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
static int hclge_config_gro(struct hclge_dev *hdev, bool en)
{
	struct hclge_cfg_gro_status_cmd *req;
	struct hclge_desc desc;
	int ret;

	if (!hnae3_dev_gro_supported(hdev))
		return 0;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
	req = (struct hclge_cfg_gro_status_cmd *)desc.data;

	req->gro_en = cpu_to_le16(en ? 1 : 0);

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		dev_err(&hdev->pdev->dev,
			"GRO hardware config cmd failed, ret = %d\n", ret);

	return ret;
}

1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
	struct hclge_tqp *tqp;
	int i;

	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
				  sizeof(struct hclge_tqp), GFP_KERNEL);
	if (!hdev->htqp)
		return -ENOMEM;

	tqp = hdev->htqp;

	for (i = 0; i < hdev->num_tqps; i++) {
		tqp->dev = &hdev->pdev->dev;
		tqp->index = i;

		tqp->q.ae_algo = &ae_algo;
		tqp->q.buf_size = hdev->rx_buf_len;
1340 1341
		tqp->q.tx_desc_num = hdev->num_tx_desc;
		tqp->q.rx_desc_num = hdev->num_rx_desc;
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
			i * HCLGE_TQP_REG_SIZE;

		tqp++;
	}

	return 0;
}

static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
{
1354
	struct hclge_tqp_map_cmd *req;
1355 1356 1357 1358 1359
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);

1360
	req = (struct hclge_tqp_map_cmd *)desc.data;
1361
	req->tqp_id = cpu_to_le16(tqp_pid);
1362
	req->tqp_vf = func_id;
1363 1364 1365
	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
	if (!is_pf)
		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1366 1367 1368
	req->tqp_vid = cpu_to_le16(tqp_vid);

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1369 1370
	if (ret)
		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1371

1372
	return ret;
1373 1374
}

1375
static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1376
{
1377
	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1378
	struct hclge_dev *hdev = vport->back;
1379
	int i, alloced;
1380 1381

	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1382
	     alloced < num_tqps; i++) {
1383 1384 1385
		if (!hdev->htqp[i].alloced) {
			hdev->htqp[i].q.handle = &vport->nic;
			hdev->htqp[i].q.tqp_index = alloced;
1386 1387
			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1388
			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1389 1390 1391 1392
			hdev->htqp[i].alloced = true;
			alloced++;
		}
	}
1393 1394 1395
	vport->alloc_tqps = alloced;
	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
				vport->alloc_tqps / hdev->tm_info.num_tc);
1396 1397 1398 1399

	return 0;
}

1400 1401 1402
static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
			    u16 num_tx_desc, u16 num_rx_desc)

1403 1404 1405 1406
{
	struct hnae3_handle *nic = &vport->nic;
	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
	struct hclge_dev *hdev = vport->back;
1407
	int ret;
1408

1409 1410 1411
	kinfo->num_tx_desc = num_tx_desc;
	kinfo->num_rx_desc = num_rx_desc;

1412 1413
	kinfo->rx_buf_len = hdev->rx_buf_len;

1414
	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1415 1416 1417 1418
				  sizeof(struct hnae3_queue *), GFP_KERNEL);
	if (!kinfo->tqp)
		return -ENOMEM;

1419
	ret = hclge_assign_tqp(vport, num_tqps);
1420
	if (ret)
1421 1422
		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);

1423
	return ret;
1424 1425
}

1426 1427 1428 1429 1430 1431 1432 1433
static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
				  struct hclge_vport *vport)
{
	struct hnae3_handle *nic = &vport->nic;
	struct hnae3_knic_private_info *kinfo;
	u16 i;

	kinfo = &nic->kinfo;
1434
	for (i = 0; i < vport->alloc_tqps; i++) {
1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
		struct hclge_tqp *q =
			container_of(kinfo->tqp[i], struct hclge_tqp, q);
		bool is_pf;
		int ret;

		is_pf = !(vport->vport_id);
		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
					     i, is_pf);
		if (ret)
			return ret;
	}

	return 0;
}

static int hclge_map_tqp(struct hclge_dev *hdev)
{
	struct hclge_vport *vport = hdev->vport;
	u16 i, num_vport;

	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
	for (i = 0; i < num_vport; i++)	{
		int ret;

		ret = hclge_map_tqp_to_vport(hdev, vport);
		if (ret)
			return ret;

		vport++;
	}

	return 0;
}

1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
{
	struct hnae3_handle *nic = &vport->nic;
	struct hclge_dev *hdev = vport->back;
	int ret;

	nic->pdev = hdev->pdev;
	nic->ae_algo = &ae_algo;
	nic->numa_node_mask = hdev->numa_node_mask;

1479 1480 1481 1482
	ret = hclge_knic_setup(vport, num_tqps,
			       hdev->num_tx_desc, hdev->num_rx_desc);
	if (ret)
		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1483

1484
	return ret;
1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
}

static int hclge_alloc_vport(struct hclge_dev *hdev)
{
	struct pci_dev *pdev = hdev->pdev;
	struct hclge_vport *vport;
	u32 tqp_main_vport;
	u32 tqp_per_vport;
	int num_vport, i;
	int ret;

	/* We need to alloc a vport for main NIC of PF */
	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;

1499 1500 1501 1502 1503
	if (hdev->num_tqps < num_vport) {
		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
			hdev->num_tqps, num_vport);
		return -EINVAL;
	}
1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516

	/* Alloc the same number of TQPs for every vport */
	tqp_per_vport = hdev->num_tqps / num_vport;
	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;

	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
			     GFP_KERNEL);
	if (!vport)
		return -ENOMEM;

	hdev->vport = vport;
	hdev->num_alloc_vport = num_vport;

1517 1518
	if (IS_ENABLED(CONFIG_PCI_IOV))
		hdev->num_alloc_vfs = hdev->num_req_vfs;
1519 1520 1521 1522

	for (i = 0; i < num_vport; i++) {
		vport->back = hdev;
		vport->vport_id = i;
1523
		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1524 1525
		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
		vport->rxvlan_cfg.rx_vlan_offload_en = true;
L
liuzhongzhu 已提交
1526
		INIT_LIST_HEAD(&vport->vlan_list);
1527 1528
		INIT_LIST_HEAD(&vport->uc_mac_list);
		INIT_LIST_HEAD(&vport->mc_mac_list);
1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546

		if (i == 0)
			ret = hclge_vport_setup(vport, tqp_main_vport);
		else
			ret = hclge_vport_setup(vport, tqp_per_vport);
		if (ret) {
			dev_err(&pdev->dev,
				"vport setup failed for vport %d, %d\n",
				i, ret);
			return ret;
		}

		vport++;
	}

	return 0;
}

1547 1548
static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
				    struct hclge_pkt_buf_alloc *buf_alloc)
1549 1550 1551 1552
{
/* TX buffer size is unit by 128 byte */
#define HCLGE_BUF_SIZE_UNIT_SHIFT	7
#define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1553
	struct hclge_tx_buff_alloc_cmd *req;
1554 1555 1556 1557
	struct hclge_desc desc;
	int ret;
	u8 i;

1558
	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1559 1560

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1561
	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1562
		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1563

1564 1565 1566
		req->tx_pkt_buff[i] =
			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1567
	}
1568 1569

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1570
	if (ret)
1571 1572 1573
		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
			ret);

1574
	return ret;
1575 1576
}

1577 1578
static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
				 struct hclge_pkt_buf_alloc *buf_alloc)
1579
{
1580
	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1581

1582 1583
	if (ret)
		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1584

1585
	return ret;
1586 1587
}

1588
static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1589
{
1590 1591
	unsigned int i;
	u32 cnt = 0;
1592 1593 1594 1595 1596 1597 1598 1599

	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
		if (hdev->hw_tc_map & BIT(i))
			cnt++;
	return cnt;
}

/* Get the number of pfc enabled TCs, which have private buffer */
1600 1601
static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
				  struct hclge_pkt_buf_alloc *buf_alloc)
1602 1603
{
	struct hclge_priv_buf *priv;
1604 1605
	unsigned int i;
	int cnt = 0;
1606 1607

	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1608
		priv = &buf_alloc->priv_buf[i];
1609 1610 1611 1612 1613 1614 1615 1616 1617
		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
		    priv->enable)
			cnt++;
	}

	return cnt;
}

/* Get the number of pfc disabled TCs, which have private buffer */
1618 1619
static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
				     struct hclge_pkt_buf_alloc *buf_alloc)
1620 1621
{
	struct hclge_priv_buf *priv;
1622 1623
	unsigned int i;
	int cnt = 0;
1624 1625

	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1626
		priv = &buf_alloc->priv_buf[i];
1627 1628 1629 1630 1631 1632 1633 1634 1635
		if (hdev->hw_tc_map & BIT(i) &&
		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
		    priv->enable)
			cnt++;
	}

	return cnt;
}

1636
static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1637 1638 1639 1640 1641 1642
{
	struct hclge_priv_buf *priv;
	u32 rx_priv = 0;
	int i;

	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1643
		priv = &buf_alloc->priv_buf[i];
1644 1645 1646 1647 1648 1649
		if (priv->enable)
			rx_priv += priv->buf_size;
	}
	return rx_priv;
}

1650
static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1651 1652 1653 1654
{
	u32 i, total_tx_size = 0;

	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1655
		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1656 1657 1658 1659

	return total_tx_size;
}

1660 1661 1662
static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
				struct hclge_pkt_buf_alloc *buf_alloc,
				u32 rx_all)
1663
{
1664 1665
	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
	u32 tc_num = hclge_get_tc_num(hdev);
1666
	u32 shared_buf, aligned_mps;
1667 1668 1669
	u32 rx_priv;
	int i;

1670
	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1671

1672
	if (hnae3_dev_dcb_supported(hdev))
1673 1674
		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
					hdev->dv_buf_size;
1675
	else
1676
		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1677
					+ hdev->dv_buf_size;
1678

1679
	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1680 1681
	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
			     HCLGE_BUF_SIZE_UNIT);
1682

1683
	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1684
	if (rx_all < rx_priv + shared_std)
1685 1686
		return false;

1687
	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1688
	buf_alloc->s_buf.buf_size = shared_buf;
1689 1690 1691
	if (hnae3_dev_dcb_supported(hdev)) {
		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1692 1693
			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
				  HCLGE_BUF_SIZE_UNIT);
1694
	} else {
1695
		buf_alloc->s_buf.self.high = aligned_mps +
1696
						HCLGE_NON_DCB_ADDITIONAL_BUF;
1697 1698 1699 1700
		buf_alloc->s_buf.self.low = aligned_mps;
	}

	if (hnae3_dev_dcb_supported(hdev)) {
1701 1702 1703 1704 1705 1706
		hi_thrd = shared_buf - hdev->dv_buf_size;

		if (tc_num <= NEED_RESERVE_TC_NUM)
			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
					/ BUF_MAX_PERCENT;

1707
		if (tc_num)
1708
			hi_thrd = hi_thrd / tc_num;
1709

1710
		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1711
		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1712
		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1713 1714 1715
	} else {
		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
		lo_thrd = aligned_mps;
1716
	}
1717 1718

	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1719 1720
		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1721 1722 1723 1724 1725
	}

	return true;
}

1726 1727
static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
				struct hclge_pkt_buf_alloc *buf_alloc)
1728 1729 1730 1731 1732 1733 1734
{
	u32 i, total_size;

	total_size = hdev->pkt_buf_size;

	/* alloc tx buffer for all enabled tc */
	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1735
		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1736

1737 1738 1739
		if (hdev->hw_tc_map & BIT(i)) {
			if (total_size < hdev->tx_buf_size)
				return -ENOMEM;
1740

1741
			priv->tx_buf_size = hdev->tx_buf_size;
1742
		} else {
1743
			priv->tx_buf_size = 0;
1744
		}
1745 1746 1747 1748 1749 1750 1751

		total_size -= priv->tx_buf_size;
	}

	return 0;
}

1752 1753
static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
				  struct hclge_pkt_buf_alloc *buf_alloc)
1754
{
1755 1756
	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1757
	unsigned int i;
1758 1759

	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1760
		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1761

1762 1763 1764 1765 1766 1767 1768 1769 1770
		priv->enable = 0;
		priv->wl.low = 0;
		priv->wl.high = 0;
		priv->buf_size = 0;

		if (!(hdev->hw_tc_map & BIT(i)))
			continue;

		priv->enable = 1;
1771 1772

		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1773
			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1774 1775
			priv->wl.high = roundup(priv->wl.low + aligned_mps,
						HCLGE_BUF_SIZE_UNIT);
1776 1777
		} else {
			priv->wl.low = 0;
1778 1779
			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
					aligned_mps;
1780
		}
1781 1782

		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1783 1784
	}

1785 1786
	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
}
1787

1788 1789 1790 1791 1792 1793
static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
					  struct hclge_pkt_buf_alloc *buf_alloc)
{
	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
	int i;
1794 1795 1796

	/* let the last to be cleared first */
	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1797
		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1798
		unsigned int mask = BIT((unsigned int)i);
1799

1800 1801
		if (hdev->hw_tc_map & mask &&
		    !(hdev->tm_info.hw_pfc_map & mask)) {
1802 1803 1804 1805 1806 1807 1808 1809
			/* Clear the no pfc TC private buffer */
			priv->wl.low = 0;
			priv->wl.high = 0;
			priv->buf_size = 0;
			priv->enable = 0;
			no_pfc_priv_num--;
		}

1810
		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1811 1812 1813 1814
		    no_pfc_priv_num == 0)
			break;
	}

1815 1816
	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
}
1817

1818 1819 1820 1821 1822 1823
static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
					struct hclge_pkt_buf_alloc *buf_alloc)
{
	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
	int i;
1824 1825 1826

	/* let the last to be cleared first */
	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1827
		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1828
		unsigned int mask = BIT((unsigned int)i);
1829

1830 1831
		if (hdev->hw_tc_map & mask &&
		    hdev->tm_info.hw_pfc_map & mask) {
1832 1833 1834 1835 1836 1837 1838 1839
			/* Reduce the number of pfc TC with private buffer */
			priv->wl.low = 0;
			priv->enable = 0;
			priv->wl.high = 0;
			priv->buf_size = 0;
			pfc_priv_num--;
		}

1840
		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1841 1842 1843
		    pfc_priv_num == 0)
			break;
	}
1844 1845 1846 1847

	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
}

1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896
static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
				      struct hclge_pkt_buf_alloc *buf_alloc)
{
#define COMPENSATE_BUFFER	0x3C00
#define COMPENSATE_HALF_MPS_NUM	5
#define PRIV_WL_GAP		0x1800

	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
	u32 tc_num = hclge_get_tc_num(hdev);
	u32 half_mps = hdev->mps >> 1;
	u32 min_rx_priv;
	unsigned int i;

	if (tc_num)
		rx_priv = rx_priv / tc_num;

	if (tc_num <= NEED_RESERVE_TC_NUM)
		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;

	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
			COMPENSATE_HALF_MPS_NUM * half_mps;
	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);

	if (rx_priv < min_rx_priv)
		return false;

	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];

		priv->enable = 0;
		priv->wl.low = 0;
		priv->wl.high = 0;
		priv->buf_size = 0;

		if (!(hdev->hw_tc_map & BIT(i)))
			continue;

		priv->enable = 1;
		priv->buf_size = rx_priv;
		priv->wl.high = rx_priv - hdev->dv_buf_size;
		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
	}

	buf_alloc->s_buf.buf_size = 0;

	return true;
}

1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915
/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
 * @hdev: pointer to struct hclge_dev
 * @buf_alloc: pointer to buffer calculation data
 * @return: 0: calculate sucessful, negative: fail
 */
static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
				struct hclge_pkt_buf_alloc *buf_alloc)
{
	/* When DCB is not supported, rx private buffer is not allocated. */
	if (!hnae3_dev_dcb_supported(hdev)) {
		u32 rx_all = hdev->pkt_buf_size;

		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
			return -ENOMEM;

		return 0;
	}

1916 1917 1918
	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
		return 0;

1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929
	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
		return 0;

	/* try to decrease the buffer size */
	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
		return 0;

	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
		return 0;

	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1930 1931 1932 1933 1934
		return 0;

	return -ENOMEM;
}

1935 1936
static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
				   struct hclge_pkt_buf_alloc *buf_alloc)
1937
{
1938
	struct hclge_rx_priv_buff_cmd *req;
1939 1940 1941 1942 1943
	struct hclge_desc desc;
	int ret;
	int i;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1944
	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1945 1946 1947

	/* Alloc private buffer TCs */
	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1948
		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1949 1950 1951 1952

		req->buf_num[i] =
			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
		req->buf_num[i] |=
1953
			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1954 1955
	}

1956
	req->shared_buf =
1957
		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1958 1959
			    (1 << HCLGE_TC0_PRI_BUF_EN_B));

1960
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1961
	if (ret)
1962 1963 1964
		dev_err(&hdev->pdev->dev,
			"rx private buffer alloc cmd failed %d\n", ret);

1965
	return ret;
1966 1967
}

1968 1969
static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
				   struct hclge_pkt_buf_alloc *buf_alloc)
1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988
{
	struct hclge_rx_priv_wl_buf *req;
	struct hclge_priv_buf *priv;
	struct hclge_desc desc[2];
	int i, j;
	int ret;

	for (i = 0; i < 2; i++) {
		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
					   false);
		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;

		/* The first descriptor set the NEXT bit to 1 */
		if (i == 0)
			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
		else
			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);

		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1989 1990 1991
			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;

			priv = &buf_alloc->priv_buf[idx];
1992 1993 1994
			req->tc_wl[j].high =
				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
			req->tc_wl[j].high |=
1995
				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1996 1997 1998
			req->tc_wl[j].low =
				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
			req->tc_wl[j].low |=
1999
				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2000 2001 2002 2003 2004
		}
	}

	/* Send 2 descriptor at one time */
	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2005
	if (ret)
2006 2007 2008
		dev_err(&hdev->pdev->dev,
			"rx private waterline config cmd failed %d\n",
			ret);
2009
	return ret;
2010 2011
}

2012 2013
static int hclge_common_thrd_config(struct hclge_dev *hdev,
				    struct hclge_pkt_buf_alloc *buf_alloc)
2014
{
2015
	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038
	struct hclge_rx_com_thrd *req;
	struct hclge_desc desc[2];
	struct hclge_tc_thrd *tc;
	int i, j;
	int ret;

	for (i = 0; i < 2; i++) {
		hclge_cmd_setup_basic_desc(&desc[i],
					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
		req = (struct hclge_rx_com_thrd *)&desc[i].data;

		/* The first descriptor set the NEXT bit to 1 */
		if (i == 0)
			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
		else
			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);

		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];

			req->com_thrd[j].high =
				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
			req->com_thrd[j].high |=
2039
				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2040 2041 2042
			req->com_thrd[j].low =
				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
			req->com_thrd[j].low |=
2043
				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2044 2045 2046 2047 2048
		}
	}

	/* Send 2 descriptors at one time */
	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2049
	if (ret)
2050 2051
		dev_err(&hdev->pdev->dev,
			"common threshold config cmd failed %d\n", ret);
2052
	return ret;
2053 2054
}

2055 2056
static int hclge_common_wl_config(struct hclge_dev *hdev,
				  struct hclge_pkt_buf_alloc *buf_alloc)
2057
{
2058
	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2059 2060 2061 2062 2063 2064 2065 2066
	struct hclge_rx_com_wl *req;
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);

	req = (struct hclge_rx_com_wl *)desc.data;
	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2067
	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2068 2069

	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2070
	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2071 2072

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2073
	if (ret)
2074 2075 2076
		dev_err(&hdev->pdev->dev,
			"common waterline config cmd failed %d\n", ret);

2077
	return ret;
2078 2079 2080 2081
}

int hclge_buffer_alloc(struct hclge_dev *hdev)
{
2082
	struct hclge_pkt_buf_alloc *pkt_buf;
2083 2084
	int ret;

2085 2086
	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
	if (!pkt_buf)
2087 2088
		return -ENOMEM;

2089
	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2090 2091 2092
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"could not calc tx buffer size for all TCs %d\n", ret);
2093
		goto out;
2094 2095
	}

2096
	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2097 2098 2099
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"could not alloc tx buffers %d\n", ret);
2100
		goto out;
2101 2102
	}

2103
	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2104 2105 2106 2107
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"could not calc rx priv buffer size for all TCs %d\n",
			ret);
2108
		goto out;
2109 2110
	}

2111
	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2112 2113 2114
	if (ret) {
		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
			ret);
2115
		goto out;
2116 2117
	}

2118
	if (hnae3_dev_dcb_supported(hdev)) {
2119
		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2120 2121 2122 2123
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"could not configure rx private waterline %d\n",
				ret);
2124
			goto out;
2125
		}
2126

2127
		ret = hclge_common_thrd_config(hdev, pkt_buf);
2128 2129 2130 2131
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"could not configure common threshold %d\n",
				ret);
2132
			goto out;
2133
		}
2134 2135
	}

2136 2137
	ret = hclge_common_wl_config(hdev, pkt_buf);
	if (ret)
2138 2139 2140
		dev_err(&hdev->pdev->dev,
			"could not configure common waterline %d\n", ret);

2141 2142 2143
out:
	kfree(pkt_buf);
	return ret;
2144 2145 2146 2147 2148 2149 2150
}

static int hclge_init_roce_base_info(struct hclge_vport *vport)
{
	struct hnae3_handle *roce = &vport->roce;
	struct hnae3_handle *nic = &vport->nic;

2151
	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168

	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
	    vport->back->num_msi_left == 0)
		return -EINVAL;

	roce->rinfo.base_vector = vport->back->roce_base_vector;

	roce->rinfo.netdev = nic->kinfo.netdev;
	roce->rinfo.roce_io_base = vport->back->hw.io_base;

	roce->pdev = nic->pdev;
	roce->ae_algo = nic->ae_algo;
	roce->numa_node_mask = nic->numa_node_mask;

	return 0;
}

2169
static int hclge_init_msi(struct hclge_dev *hdev)
2170 2171
{
	struct pci_dev *pdev = hdev->pdev;
2172 2173
	int vectors;
	int i;
2174

2175 2176 2177 2178 2179 2180 2181
	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
					PCI_IRQ_MSI | PCI_IRQ_MSIX);
	if (vectors < 0) {
		dev_err(&pdev->dev,
			"failed(%d) to allocate MSI/MSI-X vectors\n",
			vectors);
		return vectors;
2182
	}
2183 2184 2185 2186
	if (vectors < hdev->num_msi)
		dev_warn(&hdev->pdev->dev,
			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
			 hdev->num_msi, vectors);
2187

2188 2189 2190
	hdev->num_msi = vectors;
	hdev->num_msi_left = vectors;
	hdev->base_msi_vector = pdev->irq;
2191
	hdev->roce_base_vector = hdev->base_msi_vector +
2192
				hdev->roce_base_msix_offset;
2193 2194 2195

	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
					   sizeof(u16), GFP_KERNEL);
2196 2197
	if (!hdev->vector_status) {
		pci_free_irq_vectors(pdev);
2198
		return -ENOMEM;
2199
	}
2200 2201 2202 2203

	for (i = 0; i < hdev->num_msi; i++)
		hdev->vector_status[i] = HCLGE_INVALID_VPORT;

2204 2205 2206 2207 2208
	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
					sizeof(int), GFP_KERNEL);
	if (!hdev->vector_irq) {
		pci_free_irq_vectors(pdev);
		return -ENOMEM;
2209 2210 2211 2212 2213
	}

	return 0;
}

2214
static u8 hclge_check_speed_dup(u8 duplex, int speed)
2215
{
2216 2217
	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
		duplex = HCLGE_MAC_FULL;
2218

2219
	return duplex;
2220 2221
}

2222 2223
static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
				      u8 duplex)
2224
{
2225
	struct hclge_config_mac_speed_dup_cmd *req;
2226 2227 2228
	struct hclge_desc desc;
	int ret;

2229
	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2230 2231 2232

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);

2233 2234
	if (duplex)
		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2235 2236 2237

	switch (speed) {
	case HCLGE_MAC_SPEED_10M:
P
Peng Li 已提交
2238 2239
		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
				HCLGE_CFG_SPEED_S, 6);
2240 2241
		break;
	case HCLGE_MAC_SPEED_100M:
P
Peng Li 已提交
2242 2243
		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
				HCLGE_CFG_SPEED_S, 7);
2244 2245
		break;
	case HCLGE_MAC_SPEED_1G:
P
Peng Li 已提交
2246 2247
		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
				HCLGE_CFG_SPEED_S, 0);
2248 2249
		break;
	case HCLGE_MAC_SPEED_10G:
P
Peng Li 已提交
2250 2251
		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
				HCLGE_CFG_SPEED_S, 1);
2252 2253
		break;
	case HCLGE_MAC_SPEED_25G:
P
Peng Li 已提交
2254 2255
		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
				HCLGE_CFG_SPEED_S, 2);
2256 2257
		break;
	case HCLGE_MAC_SPEED_40G:
P
Peng Li 已提交
2258 2259
		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
				HCLGE_CFG_SPEED_S, 3);
2260 2261
		break;
	case HCLGE_MAC_SPEED_50G:
P
Peng Li 已提交
2262 2263
		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
				HCLGE_CFG_SPEED_S, 4);
2264 2265
		break;
	case HCLGE_MAC_SPEED_100G:
P
Peng Li 已提交
2266 2267
		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
				HCLGE_CFG_SPEED_S, 5);
2268 2269
		break;
	default:
2270
		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2271 2272 2273
		return -EINVAL;
	}

P
Peng Li 已提交
2274 2275
	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
		      1);
2276 2277 2278 2279 2280 2281 2282 2283

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"mac speed/duplex config cmd failed %d.\n", ret);
		return ret;
	}

2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300
	return 0;
}

int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
{
	int ret;

	duplex = hclge_check_speed_dup(duplex, speed);
	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
		return 0;

	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
	if (ret)
		return ret;

	hdev->hw.mac.speed = speed;
	hdev->hw.mac.duplex = duplex;
2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315

	return 0;
}

static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
				     u8 duplex)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
}

static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
{
2316
	struct hclge_config_auto_neg_cmd *req;
2317
	struct hclge_desc desc;
2318
	u32 flag = 0;
2319 2320 2321 2322
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);

2323
	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2324 2325
	if (enable)
		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2326
	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2327 2328

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2329
	if (ret)
2330 2331 2332
		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
			ret);

2333
	return ret;
2334 2335 2336 2337 2338 2339 2340
}

static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

2341 2342 2343 2344 2345 2346 2347 2348 2349 2350
	if (!hdev->hw.mac.support_autoneg) {
		if (enable) {
			dev_err(&hdev->pdev->dev,
				"autoneg is not supported by current port\n");
			return -EOPNOTSUPP;
		} else {
			return 0;
		}
	}

2351 2352 2353 2354 2355 2356 2357
	return hclge_set_autoneg_en(hdev, enable);
}

static int hclge_get_autoneg(struct hnae3_handle *handle)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
2358 2359 2360 2361
	struct phy_device *phydev = hdev->hw.mac.phydev;

	if (phydev)
		return phydev->autoneg;
2362 2363 2364 2365

	return hdev->hw.mac.autoneg;
}

2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379
static int hclge_restart_autoneg(struct hnae3_handle *handle)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	int ret;

	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");

	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
	if (ret)
		return ret;
	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}

2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390
static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
		return hclge_set_autoneg_en(hdev, !halt);

	return 0;
}

2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448
static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
{
	struct hclge_config_fec_cmd *req;
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);

	req = (struct hclge_config_fec_cmd *)desc.data;
	if (fec_mode & BIT(HNAE3_FEC_AUTO))
		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
	if (fec_mode & BIT(HNAE3_FEC_RS))
		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
	if (fec_mode & BIT(HNAE3_FEC_BASER))
		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);

	return ret;
}

static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	struct hclge_mac *mac = &hdev->hw.mac;
	int ret;

	if (fec_mode && !(mac->fec_ability & fec_mode)) {
		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
		return -EINVAL;
	}

	ret = hclge_set_fec_hw(hdev, fec_mode);
	if (ret)
		return ret;

	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
	return 0;
}

static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
			  u8 *fec_mode)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	struct hclge_mac *mac = &hdev->hw.mac;

	if (fec_ability)
		*fec_ability = mac->fec_ability;
	if (fec_mode)
		*fec_mode = mac->fec_mode;
}

2449 2450 2451 2452 2453
static int hclge_mac_init(struct hclge_dev *hdev)
{
	struct hclge_mac *mac = &hdev->hw.mac;
	int ret;

2454
	hdev->support_sfp_query = true;
2455 2456 2457
	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
					 hdev->hw.mac.duplex);
2458 2459 2460 2461 2462 2463
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"Config mac speed dup fail ret=%d\n", ret);
		return ret;
	}

2464 2465 2466 2467 2468 2469 2470 2471 2472
	if (hdev->hw.mac.support_autoneg) {
		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"Config mac autoneg fail ret=%d\n", ret);
			return ret;
		}
	}

2473 2474
	mac->link = 0;

2475 2476 2477 2478 2479 2480 2481 2482 2483
	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"Fec mode init fail, ret = %d\n", ret);
			return ret;
		}
	}

2484 2485 2486 2487 2488
	ret = hclge_set_mac_mtu(hdev, hdev->mps);
	if (ret) {
		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
		return ret;
	}
2489

2490
	ret = hclge_buffer_alloc(hdev);
2491
	if (ret)
2492
		dev_err(&hdev->pdev->dev,
2493
			"allocate buffer fail, ret=%d\n", ret);
2494

2495
	return ret;
2496 2497
}

2498 2499
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
2500 2501
	if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2502 2503 2504
		schedule_work(&hdev->mbx_service_task);
}

2505 2506
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
2507 2508
	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2509 2510 2511
		schedule_work(&hdev->rst_service_task);
}

2512 2513 2514 2515
static void hclge_task_schedule(struct hclge_dev *hdev)
{
	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2516 2517 2518 2519 2520 2521
	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
		hdev->hw_stats.stats_timer++;
		hdev->fd_arfs_expire_timer++;
		mod_delayed_work(system_wq, &hdev->service_task,
				 round_jiffies_relative(HZ));
	}
2522 2523 2524 2525
}

static int hclge_get_mac_link_status(struct hclge_dev *hdev)
{
2526
	struct hclge_link_status_cmd *req;
2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538
	struct hclge_desc desc;
	int link_status;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
			ret);
		return ret;
	}

2539
	req = (struct hclge_link_status_cmd *)desc.data;
2540
	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2541 2542 2543 2544 2545 2546

	return !!link_status;
}

static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
{
2547
	unsigned int mac_state;
2548 2549
	int link_stat;

2550 2551 2552
	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
		return 0;

2553 2554 2555
	mac_state = hclge_get_mac_link_status(hdev);

	if (hdev->hw.mac.phydev) {
2556
		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570
			link_stat = mac_state &
				hdev->hw.mac.phydev->link;
		else
			link_stat = 0;

	} else {
		link_stat = mac_state;
	}

	return !!link_stat;
}

static void hclge_update_link_status(struct hclge_dev *hdev)
{
2571
	struct hnae3_client *rclient = hdev->roce_client;
2572
	struct hnae3_client *client = hdev->nic_client;
2573
	struct hnae3_handle *rhandle;
2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584
	struct hnae3_handle *handle;
	int state;
	int i;

	if (!client)
		return;
	state = hclge_get_mac_phy_link(hdev);
	if (state != hdev->hw.mac.link) {
		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
			handle = &hdev->vport[i].nic;
			client->ops->link_status_change(handle, state);
2585
			hclge_config_mac_tnl_int(hdev, state);
2586 2587 2588 2589
			rhandle = &hdev->vport[i].roce;
			if (rclient && rclient->ops->link_status_change)
				rclient->ops->link_status_change(rhandle,
								 state);
2590 2591 2592 2593 2594
		}
		hdev->hw.mac.link = state;
	}
}

2595 2596
static void hclge_update_port_capability(struct hclge_mac *mac)
{
J
Jian Shen 已提交
2597 2598 2599
	/* update fec ability by speed */
	hclge_convert_setting_fec(mac);

2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618
	/* firmware can not identify back plane type, the media type
	 * read from configuration can help deal it
	 */
	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
		mac->module_type = HNAE3_MODULE_TYPE_KR;
	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
		mac->module_type = HNAE3_MODULE_TYPE_TP;

	if (mac->support_autoneg == true) {
		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
		linkmode_copy(mac->advertising, mac->supported);
	} else {
		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
				   mac->supported);
		linkmode_zero(mac->advertising);
	}
}

2619 2620
static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
{
2621
	struct hclge_sfp_info_cmd *resp;
2622 2623 2624
	struct hclge_desc desc;
	int ret;

2625 2626
	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
	resp = (struct hclge_sfp_info_cmd *)desc.data;
2627 2628 2629 2630 2631 2632 2633 2634 2635 2636
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret == -EOPNOTSUPP) {
		dev_warn(&hdev->pdev->dev,
			 "IMP do not support get SFP speed %d\n", ret);
		return ret;
	} else if (ret) {
		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
		return ret;
	}

2637
	*speed = le32_to_cpu(resp->speed);
2638 2639 2640 2641

	return 0;
}

2642
static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2643
{
2644 2645
	struct hclge_sfp_info_cmd *resp;
	struct hclge_desc desc;
2646 2647
	int ret;

2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665
	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
	resp = (struct hclge_sfp_info_cmd *)desc.data;

	resp->query_type = QUERY_ACTIVE_SPEED;

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret == -EOPNOTSUPP) {
		dev_warn(&hdev->pdev->dev,
			 "IMP does not support get SFP info %d\n", ret);
		return ret;
	} else if (ret) {
		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
		return ret;
	}

	mac->speed = le32_to_cpu(resp->speed);
	/* if resp->speed_ability is 0, it means it's an old version
	 * firmware, do not update these params
2666
	 */
2667 2668 2669 2670 2671
	if (resp->speed_ability) {
		mac->module_type = le32_to_cpu(resp->module_type);
		mac->speed_ability = le32_to_cpu(resp->speed_ability);
		mac->autoneg = resp->autoneg;
		mac->support_autoneg = resp->autoneg_ability;
2672
		mac->speed_type = QUERY_ACTIVE_SPEED;
J
Jian Shen 已提交
2673 2674 2675 2676
		if (!resp->active_fec)
			mac->fec_mode = 0;
		else
			mac->fec_mode = BIT(resp->active_fec);
2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691
	} else {
		mac->speed_type = QUERY_SFP_SPEED;
	}

	return 0;
}

static int hclge_update_port_info(struct hclge_dev *hdev)
{
	struct hclge_mac *mac = &hdev->hw.mac;
	int speed = HCLGE_MAC_SPEED_UNKNOWN;
	int ret;

	/* get the port info from SFP cmd if not copper port */
	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2692 2693
		return 0;

2694
	/* if IMP does not support get SFP/qSFP info, return directly */
2695 2696
	if (!hdev->support_sfp_query)
		return 0;
2697

2698 2699 2700 2701 2702
	if (hdev->pdev->revision >= 0x21)
		ret = hclge_get_sfp_info(hdev, mac);
	else
		ret = hclge_get_sfp_speed(hdev, &speed);

2703 2704 2705 2706
	if (ret == -EOPNOTSUPP) {
		hdev->support_sfp_query = false;
		return ret;
	} else if (ret) {
2707
		return ret;
2708 2709
	}

2710 2711 2712 2713 2714 2715 2716 2717 2718 2719
	if (hdev->pdev->revision >= 0x21) {
		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
			hclge_update_port_capability(mac);
			return 0;
		}
		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
					       HCLGE_MAC_FULL);
	} else {
		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
			return 0; /* do nothing if no SFP */
2720

2721 2722 2723
		/* must config full duplex for SFP */
		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
	}
2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735
}

static int hclge_get_status(struct hnae3_handle *handle)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	hclge_update_link_status(hdev);

	return hdev->hw.mac.link;
}

2736 2737
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
2738
	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2739 2740

	/* fetch the events from their corresponding regs */
2741
	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2742
	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2743 2744
	msix_src_reg = hclge_read_dev(&hdev->hw,
				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2745 2746 2747 2748 2749 2750 2751

	/* Assumption: If by any chance reset and mailbox events are reported
	 * together then we will only process reset event in this go and will
	 * defer the processing of the mailbox events. Since, we would have not
	 * cleared RX CMDQ event this time we would receive again another
	 * interrupt from H/W just for the mailbox.
	 */
2752 2753

	/* check for vector0 reset event sources */
2754 2755 2756 2757 2758
	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2759
		hdev->rst_stats.imp_rst_cnt++;
2760 2761 2762
		return HCLGE_VECTOR0_EVENT_RST;
	}

2763
	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2764
		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2765
		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2766 2767
		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2768
		hdev->rst_stats.global_rst_cnt++;
2769 2770 2771
		return HCLGE_VECTOR0_EVENT_RST;
	}

2772
	/* check for vector0 msix event source */
2773
	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2774 2775 2776
		dev_info(&hdev->pdev->dev, "received event 0x%x\n",
			 msix_src_reg);
		*clearval = msix_src_reg;
2777
		return HCLGE_VECTOR0_EVENT_ERR;
2778
	}
2779

2780 2781 2782 2783 2784 2785
	/* check for vector0 mailbox(=CMDQ RX) event source */
	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
		*clearval = cmdq_src_reg;
		return HCLGE_VECTOR0_EVENT_MBX;
	}
2786

2787
	/* print other vector0 event source */
2788 2789 2790 2791 2792
	dev_info(&hdev->pdev->dev,
		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
		 cmdq_src_reg, msix_src_reg);
	*clearval = msix_src_reg;

2793 2794 2795 2796 2797 2798
	return HCLGE_VECTOR0_EVENT_OTHER;
}

static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
				    u32 regclr)
{
2799 2800
	switch (event_type) {
	case HCLGE_VECTOR0_EVENT_RST:
2801
		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2802 2803 2804 2805
		break;
	case HCLGE_VECTOR0_EVENT_MBX:
		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
		break;
2806 2807
	default:
		break;
2808
	}
2809 2810
}

2811 2812 2813 2814 2815 2816 2817 2818 2819
static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
{
	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
}

L
Lipeng 已提交
2820 2821 2822 2823 2824 2825 2826 2827
static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
{
	writel(enable ? 1 : 0, vector->addr);
}

static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
{
	struct hclge_dev *hdev = data;
2828
	u32 clearval = 0;
2829
	u32 event_cause;
L
Lipeng 已提交
2830 2831

	hclge_enable_vector(&hdev->misc_vector, false);
2832 2833
	event_cause = hclge_check_event_cause(hdev, &clearval);

2834
	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2835
	switch (event_cause) {
2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848
	case HCLGE_VECTOR0_EVENT_ERR:
		/* we do not know what type of reset is required now. This could
		 * only be decided after we fetch the type of errors which
		 * caused this event. Therefore, we will do below for now:
		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
		 *    have defered type of reset to be used.
		 * 2. Schedule the reset serivce task.
		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
		 *    will fetch the correct type of reset.  This would be done
		 *    by first decoding the types of errors.
		 */
		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
		/* fall through */
2849
	case HCLGE_VECTOR0_EVENT_RST:
2850
		hclge_reset_task_schedule(hdev);
2851
		break;
2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862
	case HCLGE_VECTOR0_EVENT_MBX:
		/* If we are here then,
		 * 1. Either we are not handling any mbx task and we are not
		 *    scheduled as well
		 *                        OR
		 * 2. We could be handling a mbx task but nothing more is
		 *    scheduled.
		 * In both cases, we should schedule mbx task as there are more
		 * mbx messages reported by this interrupt.
		 */
		hclge_mbx_task_schedule(hdev);
2863
		break;
2864
	default:
2865 2866
		dev_warn(&hdev->pdev->dev,
			 "received unknown or unhandled event of vector0\n");
2867 2868 2869
		break;
	}

2870
	/* clear the source of interrupt if it is not cause by reset */
2871 2872
	if (!clearval ||
	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2873 2874 2875
		hclge_clear_event_cause(hdev, event_cause, clearval);
		hclge_enable_vector(&hdev->misc_vector, true);
	}
L
Lipeng 已提交
2876 2877 2878 2879 2880 2881

	return IRQ_HANDLED;
}

static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
{
2882 2883 2884 2885 2886 2887
	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
		dev_warn(&hdev->pdev->dev,
			 "vector(vector_id %d) has been freed.\n", vector_id);
		return;
	}

L
Lipeng 已提交
2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911
	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
	hdev->num_msi_left += 1;
	hdev->num_msi_used -= 1;
}

static void hclge_get_misc_vector(struct hclge_dev *hdev)
{
	struct hclge_misc_vector *vector = &hdev->misc_vector;

	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);

	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
	hdev->vector_status[0] = 0;

	hdev->num_msi_left -= 1;
	hdev->num_msi_used += 1;
}

static int hclge_misc_irq_init(struct hclge_dev *hdev)
{
	int ret;

	hclge_get_misc_vector(hdev);

2912 2913 2914
	/* this would be explicitly freed in the end */
	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
			  0, "hclge_misc", hdev);
L
Lipeng 已提交
2915 2916 2917 2918 2919 2920 2921 2922 2923
	if (ret) {
		hclge_free_vector(hdev, 0);
		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
			hdev->misc_vector.vector_irq);
	}

	return ret;
}

2924 2925 2926 2927 2928 2929
static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
{
	free_irq(hdev->misc_vector.vector_irq, hdev);
	hclge_free_vector(hdev, 0);
}

2930 2931
int hclge_notify_client(struct hclge_dev *hdev,
			enum hnae3_reset_notify_type type)
2932 2933 2934 2935
{
	struct hnae3_client *client = hdev->nic_client;
	u16 i;

2936
	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
2937 2938
		return 0;

2939 2940 2941 2942 2943 2944 2945 2946
	if (!client->ops->reset_notify)
		return -EOPNOTSUPP;

	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
		struct hnae3_handle *handle = &hdev->vport[i].nic;
		int ret;

		ret = client->ops->reset_notify(handle, type);
2947 2948 2949
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"notify nic client failed %d(%d)\n", type, ret);
2950
			return ret;
2951
		}
2952 2953 2954 2955 2956
	}

	return 0;
}

2957 2958 2959 2960 2961 2962 2963
static int hclge_notify_roce_client(struct hclge_dev *hdev,
				    enum hnae3_reset_notify_type type)
{
	struct hnae3_client *client = hdev->roce_client;
	int ret = 0;
	u16 i;

2964
	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984
		return 0;

	if (!client->ops->reset_notify)
		return -EOPNOTSUPP;

	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
		struct hnae3_handle *handle = &hdev->vport[i].roce;

		ret = client->ops->reset_notify(handle, type);
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"notify roce client failed %d(%d)",
				type, ret);
			return ret;
		}
	}

	return ret;
}

2985 2986 2987
static int hclge_reset_wait(struct hclge_dev *hdev)
{
#define HCLGE_RESET_WATI_MS	100
2988
#define HCLGE_RESET_WAIT_CNT	200
2989 2990 2991 2992
	u32 val, reg, reg_bit;
	u32 cnt = 0;

	switch (hdev->reset_type) {
2993 2994 2995 2996
	case HNAE3_IMP_RESET:
		reg = HCLGE_GLOBAL_RESET_REG;
		reg_bit = HCLGE_IMP_RESET_BIT;
		break;
2997 2998 2999 3000 3001 3002 3003 3004
	case HNAE3_GLOBAL_RESET:
		reg = HCLGE_GLOBAL_RESET_REG;
		reg_bit = HCLGE_GLOBAL_RESET_BIT;
		break;
	case HNAE3_FUNC_RESET:
		reg = HCLGE_FUN_RST_ING;
		reg_bit = HCLGE_FUN_RST_ING_B;
		break;
3005 3006
	case HNAE3_FLR_RESET:
		break;
3007 3008 3009 3010 3011 3012 3013
	default:
		dev_err(&hdev->pdev->dev,
			"Wait for unsupported reset type: %d\n",
			hdev->reset_type);
		return -EINVAL;
	}

3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027
	if (hdev->reset_type == HNAE3_FLR_RESET) {
		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
		       cnt++ < HCLGE_RESET_WAIT_CNT)
			msleep(HCLGE_RESET_WATI_MS);

		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
			dev_err(&hdev->pdev->dev,
				"flr wait timeout: %d\n", cnt);
			return -EBUSY;
		}

		return 0;
	}

3028
	val = hclge_read_dev(&hdev->hw, reg);
P
Peng Li 已提交
3029
	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043
		msleep(HCLGE_RESET_WATI_MS);
		val = hclge_read_dev(&hdev->hw, reg);
		cnt++;
	}

	if (cnt >= HCLGE_RESET_WAIT_CNT) {
		dev_warn(&hdev->pdev->dev,
			 "Wait for reset timeout: %d\n", hdev->reset_type);
		return -EBUSY;
	}

	return 0;
}

3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058
static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
{
	struct hclge_vf_rst_cmd *req;
	struct hclge_desc desc;

	req = (struct hclge_vf_rst_cmd *)desc.data;
	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
	req->dest_vfid = func_id;

	if (reset)
		req->vf_rst = 0x1;

	return hclge_cmd_send(&hdev->hw, &desc, 1);
}

3059
static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070
{
	int i;

	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
		struct hclge_vport *vport = &hdev->vport[i];
		int ret;

		/* Send cmd to set/clear VF's FUNC_RST_ING */
		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
		if (ret) {
			dev_err(&hdev->pdev->dev,
3071
				"set vf(%d) rst failed %d!\n",
3072 3073 3074 3075
				vport->vport_id, ret);
			return ret;
		}

3076
		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3077 3078 3079 3080 3081 3082 3083 3084 3085
			continue;

		/* Inform VF to process the reset.
		 * hclge_inform_reset_assert_to_vf may fail if VF
		 * driver is not loaded.
		 */
		ret = hclge_inform_reset_assert_to_vf(vport);
		if (ret)
			dev_warn(&hdev->pdev->dev,
3086
				 "inform reset to vf(%d) failed %d!\n",
3087 3088 3089 3090 3091 3092
				 vport->vport_id, ret);
	}

	return 0;
}

3093
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3094 3095 3096 3097 3098 3099
{
	struct hclge_desc desc;
	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
P
Peng Li 已提交
3100
	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3101 3102 3103 3104 3105 3106 3107 3108 3109 3110
	req->fun_reset_vfid = func_id;

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		dev_err(&hdev->pdev->dev,
			"send function reset cmd fail, status =%d\n", ret);

	return ret;
}

3111
static void hclge_do_reset(struct hclge_dev *hdev)
3112
{
3113
	struct hnae3_handle *handle = &hdev->vport[0].nic;
3114 3115 3116
	struct pci_dev *pdev = hdev->pdev;
	u32 val;

3117 3118 3119 3120 3121 3122 3123 3124
	if (hclge_get_hw_reset_stat(handle)) {
		dev_info(&pdev->dev, "Hardware reset not finish\n");
		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
		return;
	}

3125
	switch (hdev->reset_type) {
3126 3127
	case HNAE3_GLOBAL_RESET:
		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
P
Peng Li 已提交
3128
		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3129 3130 3131 3132 3133
		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
		dev_info(&pdev->dev, "Global Reset requested\n");
		break;
	case HNAE3_FUNC_RESET:
		dev_info(&pdev->dev, "PF Reset requested\n");
3134 3135 3136
		/* schedule again to check later */
		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
		hclge_reset_task_schedule(hdev);
3137
		break;
3138 3139 3140 3141 3142 3143
	case HNAE3_FLR_RESET:
		dev_info(&pdev->dev, "FLR requested\n");
		/* schedule again to check later */
		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
		hclge_reset_task_schedule(hdev);
		break;
3144 3145
	default:
		dev_warn(&pdev->dev,
3146
			 "Unsupported reset type: %d\n", hdev->reset_type);
3147 3148 3149 3150
		break;
	}
}

3151
static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3152 3153 3154
						   unsigned long *addr)
{
	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3155
	struct hclge_dev *hdev = ae_dev->priv;
3156

3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173
	/* first, resolve any unknown reset type to the known type(s) */
	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
		/* we will intentionally ignore any errors from this function
		 *  as we will end up in *some* reset request in any case
		 */
		hclge_handle_hw_msix_error(hdev, addr);
		clear_bit(HNAE3_UNKNOWN_RESET, addr);
		/* We defered the clearing of the error event which caused
		 * interrupt since it was not posssible to do that in
		 * interrupt context (and this is the reason we introduced
		 * new UNKNOWN reset type). Now, the errors have been
		 * handled and cleared in hardware we can safely enable
		 * interrupts. This is an exception to the norm.
		 */
		hclge_enable_vector(&hdev->misc_vector, true);
	}

3174
	/* return the highest priority reset level amongst all */
3175 3176 3177 3178 3179 3180
	if (test_bit(HNAE3_IMP_RESET, addr)) {
		rst_level = HNAE3_IMP_RESET;
		clear_bit(HNAE3_IMP_RESET, addr);
		clear_bit(HNAE3_GLOBAL_RESET, addr);
		clear_bit(HNAE3_FUNC_RESET, addr);
	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3181
		rst_level = HNAE3_GLOBAL_RESET;
3182 3183 3184
		clear_bit(HNAE3_GLOBAL_RESET, addr);
		clear_bit(HNAE3_FUNC_RESET, addr);
	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3185
		rst_level = HNAE3_FUNC_RESET;
3186
		clear_bit(HNAE3_FUNC_RESET, addr);
3187 3188 3189
	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
		rst_level = HNAE3_FLR_RESET;
		clear_bit(HNAE3_FLR_RESET, addr);
3190
	}
3191

3192 3193 3194 3195
	if (hdev->reset_type != HNAE3_NONE_RESET &&
	    rst_level < hdev->reset_type)
		return HNAE3_NONE_RESET;

3196 3197 3198
	return rst_level;
}

3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220
static void hclge_clear_reset_cause(struct hclge_dev *hdev)
{
	u32 clearval = 0;

	switch (hdev->reset_type) {
	case HNAE3_IMP_RESET:
		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
		break;
	case HNAE3_GLOBAL_RESET:
		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
		break;
	default:
		break;
	}

	if (!clearval)
		return;

	hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
	hclge_enable_vector(&hdev->misc_vector, true);
}

3221 3222 3223 3224 3225 3226
static int hclge_reset_prepare_down(struct hclge_dev *hdev)
{
	int ret = 0;

	switch (hdev->reset_type) {
	case HNAE3_FUNC_RESET:
3227 3228
		/* fall through */
	case HNAE3_FLR_RESET:
3229 3230 3231 3232 3233 3234 3235 3236 3237
		ret = hclge_set_all_vf_rst(hdev, true);
		break;
	default:
		break;
	}

	return ret;
}

3238 3239
static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
{
3240 3241
#define HCLGE_RESET_SYNC_TIME 100

3242
	u32 reg_val;
3243 3244 3245 3246
	int ret = 0;

	switch (hdev->reset_type) {
	case HNAE3_FUNC_RESET:
3247 3248 3249
		/* There is no mechanism for PF to know if VF has stopped IO
		 * for now, just wait 100 ms for VF to stop IO
		 */
3250
		msleep(HCLGE_RESET_SYNC_TIME);
3251 3252 3253
		ret = hclge_func_reset_cmd(hdev, 0);
		if (ret) {
			dev_err(&hdev->pdev->dev,
3254
				"asserting function reset fail %d!\n", ret);
3255 3256 3257 3258 3259 3260 3261 3262 3263
			return ret;
		}

		/* After performaning pf reset, it is not necessary to do the
		 * mailbox handling or send any command to firmware, because
		 * any mailbox handling or command to firmware is only valid
		 * after hclge_cmd_init is called.
		 */
		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3264
		hdev->rst_stats.pf_rst_cnt++;
3265
		break;
3266 3267 3268 3269
	case HNAE3_FLR_RESET:
		/* There is no mechanism for PF to know if VF has stopped IO
		 * for now, just wait 100 ms for VF to stop IO
		 */
3270
		msleep(HCLGE_RESET_SYNC_TIME);
3271 3272
		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3273
		hdev->rst_stats.flr_rst_cnt++;
3274
		break;
3275 3276 3277 3278 3279
	case HNAE3_IMP_RESET:
		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
		break;
3280 3281 3282 3283
	default:
		break;
	}

3284 3285 3286 3287
	/* inform hardware that preparatory work is done */
	msleep(HCLGE_RESET_SYNC_TIME);
	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
			HCLGE_NIC_CMQ_ENABLE);
3288 3289 3290 3291 3292
	dev_info(&hdev->pdev->dev, "prepare wait ok\n");

	return ret;
}

3293
static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309
{
#define MAX_RESET_FAIL_CNT 5

	if (hdev->reset_pending) {
		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
			 hdev->reset_pending);
		return true;
	} else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
		   (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
		    BIT(HCLGE_IMP_RESET_BIT))) {
		dev_info(&hdev->pdev->dev,
			 "reset failed because IMP Reset is pending\n");
		hclge_clear_reset_cause(hdev);
		return false;
	} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
		hdev->reset_fail_cnt++;
3310 3311 3312 3313 3314
		set_bit(hdev->reset_type, &hdev->reset_pending);
		dev_info(&hdev->pdev->dev,
			 "re-schedule reset task(%d)\n",
			 hdev->reset_fail_cnt);
		return true;
3315 3316 3317 3318 3319 3320 3321
	}

	hclge_clear_reset_cause(hdev);
	dev_err(&hdev->pdev->dev, "Reset fail!\n");
	return false;
}

3322 3323 3324 3325 3326 3327
static int hclge_reset_prepare_up(struct hclge_dev *hdev)
{
	int ret = 0;

	switch (hdev->reset_type) {
	case HNAE3_FUNC_RESET:
3328 3329
		/* fall through */
	case HNAE3_FLR_RESET:
3330 3331 3332 3333 3334 3335 3336 3337 3338
		ret = hclge_set_all_vf_rst(hdev, false);
		break;
	default:
		break;
	}

	return ret;
}

3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357
static int hclge_reset_stack(struct hclge_dev *hdev)
{
	int ret;

	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
	if (ret)
		return ret;

	ret = hclge_reset_ae_dev(hdev->ae_dev);
	if (ret)
		return ret;

	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
	if (ret)
		return ret;

	return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
}

3358 3359
static void hclge_reset(struct hclge_dev *hdev)
{
3360
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3361
	int ret;
3362

3363 3364 3365 3366
	/* Initialize ae_dev reset status as well, in case enet layer wants to
	 * know if device is undergoing reset
	 */
	ae_dev->reset_type = hdev->reset_type;
3367
	hdev->rst_stats.reset_cnt++;
3368
	/* perform reset of the stack & ae device for a client */
3369 3370 3371 3372
	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
	if (ret)
		goto err_reset;

3373 3374 3375 3376
	ret = hclge_reset_prepare_down(hdev);
	if (ret)
		goto err_reset;

3377
	rtnl_lock();
3378 3379 3380
	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
	if (ret)
		goto err_reset_lock;
3381

3382
	rtnl_unlock();
3383

3384 3385 3386
	ret = hclge_reset_prepare_wait(hdev);
	if (ret)
		goto err_reset;
3387

3388
	if (hclge_reset_wait(hdev))
3389
		goto err_reset;
3390

3391 3392
	hdev->rst_stats.hw_reset_done_cnt++;

3393 3394 3395 3396 3397 3398
	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
	if (ret)
		goto err_reset;

	rtnl_lock();

3399
	ret = hclge_reset_stack(hdev);
3400 3401 3402
	if (ret)
		goto err_reset_lock;

3403 3404
	hclge_clear_reset_cause(hdev);

3405 3406 3407 3408
	ret = hclge_reset_prepare_up(hdev);
	if (ret)
		goto err_reset_lock;

3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419
	rtnl_unlock();

	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
	 * times
	 */
	if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
		goto err_reset;

	rtnl_lock();

3420 3421 3422 3423
	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
	if (ret)
		goto err_reset_lock;

3424
	rtnl_unlock();
3425

3426 3427 3428 3429
	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
	if (ret)
		goto err_reset;

3430 3431
	hdev->last_reset_time = jiffies;
	hdev->reset_fail_cnt = 0;
3432
	hdev->rst_stats.reset_done_cnt++;
3433
	ae_dev->reset_type = HNAE3_NONE_RESET;
3434
	del_timer(&hdev->reset_timer);
3435

3436 3437 3438 3439 3440
	return;

err_reset_lock:
	rtnl_unlock();
err_reset:
3441
	if (hclge_reset_err_handle(hdev))
3442
		hclge_reset_task_schedule(hdev);
3443 3444
}

3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456
static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
{
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
	struct hclge_dev *hdev = ae_dev->priv;

	/* We might end up getting called broadly because of 2 below cases:
	 * 1. Recoverable error was conveyed through APEI and only way to bring
	 *    normalcy is to reset.
	 * 2. A new reset request from the stack due to timeout
	 *
	 * For the first case,error event might not have ae handle available.
	 * check if this is a new reset request and we are not here just because
3457 3458 3459 3460
	 * last reset attempt did not succeed and watchdog hit us again. We will
	 * know this if last reset request did not occur very recently (watchdog
	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
	 * In case of new request we reset the "reset level" to PF reset.
3461 3462 3463
	 * And if it is a repeat reset request of the most recent one then we
	 * want to make sure we throttle the reset request. Therefore, we will
	 * not allow it again before 3*HZ times.
3464
	 */
3465 3466 3467
	if (!handle)
		handle = &hdev->vport[0].nic;

3468 3469
	if (time_before(jiffies, (hdev->last_reset_time +
				  HCLGE_RESET_INTERVAL)))
3470
		return;
3471
	else if (hdev->default_reset_request)
3472
		hdev->reset_level =
3473
			hclge_get_reset_level(ae_dev,
3474
					      &hdev->default_reset_request);
3475 3476
	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
		hdev->reset_level = HNAE3_FUNC_RESET;
3477

3478
	dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3479
		 hdev->reset_level);
3480 3481

	/* request reset & schedule reset task */
3482
	set_bit(hdev->reset_level, &hdev->reset_request);
3483 3484
	hclge_reset_task_schedule(hdev);

3485 3486
	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
		hdev->reset_level++;
3487 3488
}

3489 3490 3491 3492 3493 3494 3495 3496
static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
					enum hnae3_reset_type rst_type)
{
	struct hclge_dev *hdev = ae_dev->priv;

	set_bit(rst_type, &hdev->default_reset_request);
}

3497 3498 3499 3500 3501
static void hclge_reset_timer(struct timer_list *t)
{
	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);

	dev_info(&hdev->pdev->dev,
3502
		 "triggering reset in reset timer\n");
3503 3504 3505
	hclge_reset_event(hdev->pdev, NULL);
}

3506 3507
static void hclge_reset_subtask(struct hclge_dev *hdev)
{
3508 3509
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);

3510 3511 3512 3513 3514 3515 3516 3517 3518
	/* check if there is any ongoing reset in the hardware. This status can
	 * be checked from reset_pending. If there is then, we need to wait for
	 * hardware to complete reset.
	 *    a. If we are able to figure out in reasonable time that hardware
	 *       has fully resetted then, we can proceed with driver, client
	 *       reset.
	 *    b. else, we can come back later to check this status so re-sched
	 *       now.
	 */
3519
	hdev->last_reset_time = jiffies;
3520
	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3521 3522
	if (hdev->reset_type != HNAE3_NONE_RESET)
		hclge_reset(hdev);
3523

3524
	/* check if we got any *new* reset requests to be honored */
3525
	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3526 3527
	if (hdev->reset_type != HNAE3_NONE_RESET)
		hclge_do_reset(hdev);
3528 3529 3530 3531

	hdev->reset_type = HNAE3_NONE_RESET;
}

3532
static void hclge_reset_service_task(struct work_struct *work)
L
Lipeng 已提交
3533
{
3534 3535 3536 3537 3538 3539 3540 3541
	struct hclge_dev *hdev =
		container_of(work, struct hclge_dev, rst_service_task);

	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
		return;

	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);

3542
	hclge_reset_subtask(hdev);
3543 3544

	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
L
Lipeng 已提交
3545 3546
}

3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561
static void hclge_mailbox_service_task(struct work_struct *work)
{
	struct hclge_dev *hdev =
		container_of(work, struct hclge_dev, mbx_service_task);

	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
		return;

	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);

	hclge_mbx_handler(hdev);

	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}

3562 3563 3564 3565 3566 3567 3568 3569 3570 3571
static void hclge_update_vport_alive(struct hclge_dev *hdev)
{
	int i;

	/* start from vport 1 for PF is always alive */
	for (i = 1; i < hdev->num_alloc_vport; i++) {
		struct hclge_vport *vport = &hdev->vport[i];

		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3572 3573 3574 3575

		/* If vf is not alive, set to default value */
		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3576 3577 3578
	}
}

3579 3580 3581
static void hclge_service_task(struct work_struct *work)
{
	struct hclge_dev *hdev =
3582 3583 3584
		container_of(work, struct hclge_dev, service_task.work);

	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3585

3586 3587 3588 3589 3590
	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
		hclge_update_stats_for_all(hdev);
		hdev->hw_stats.stats_timer = 0;
	}

3591
	hclge_update_port_info(hdev);
3592
	hclge_update_link_status(hdev);
3593
	hclge_update_vport_alive(hdev);
3594
	hclge_sync_vlan_filter(hdev);
J
Jian Shen 已提交
3595 3596 3597 3598
	if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
		hclge_rfs_filter_expire(hdev);
		hdev->fd_arfs_expire_timer = 0;
	}
3599 3600

	hclge_task_schedule(hdev);
3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634
}

struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
{
	/* VF handle has no client */
	if (!handle->client)
		return container_of(handle, struct hclge_vport, nic);
	else if (handle->client->type == HNAE3_CLIENT_ROCE)
		return container_of(handle, struct hclge_vport, roce);
	else
		return container_of(handle, struct hclge_vport, nic);
}

static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
			    struct hnae3_vector_info *vector_info)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hnae3_vector_info *vector = vector_info;
	struct hclge_dev *hdev = vport->back;
	int alloc = 0;
	int i, j;

	vector_num = min(hdev->num_msi_left, vector_num);

	for (j = 0; j < vector_num; j++) {
		for (i = 1; i < hdev->num_msi; i++) {
			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
				vector->vector = pci_irq_vector(hdev->pdev, i);
				vector->io_addr = hdev->hw.io_base +
					HCLGE_VECTOR_REG_BASE +
					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
					vport->vport_id *
					HCLGE_VECTOR_VF_OFFSET;
				hdev->vector_status[i] = vport->vport_id;
3635
				hdev->vector_irq[i] = vector->vector;
3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653

				vector++;
				alloc++;

				break;
			}
		}
	}
	hdev->num_msi_left -= alloc;
	hdev->num_msi_used += alloc;

	return alloc;
}

static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
{
	int i;

3654 3655 3656 3657
	for (i = 0; i < hdev->num_msi; i++)
		if (vector == hdev->vector_irq[i])
			return i;

3658 3659 3660
	return -EINVAL;
}

3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678
static int hclge_put_vector(struct hnae3_handle *handle, int vector)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	int vector_id;

	vector_id = hclge_get_vector_index(hdev, vector);
	if (vector_id < 0) {
		dev_err(&hdev->pdev->dev,
			"Get vector index fail. vector_id =%d\n", vector_id);
		return vector_id;
	}

	hclge_free_vector(hdev, vector_id);

	return 0;
}

3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691
static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
{
	return HCLGE_RSS_KEY_SIZE;
}

static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
{
	return HCLGE_RSS_IND_TBL_SIZE;
}

static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
				  const u8 hfunc, const u8 *key)
{
3692
	struct hclge_rss_config_cmd *req;
3693
	unsigned int key_offset = 0;
3694
	struct hclge_desc desc;
3695
	int key_counts;
3696 3697 3698
	int key_size;
	int ret;

3699
	key_counts = HCLGE_RSS_KEY_SIZE;
3700
	req = (struct hclge_rss_config_cmd *)desc.data;
3701

3702
	while (key_counts) {
3703 3704 3705 3706 3707 3708
		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
					   false);

		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);

3709
		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3710 3711 3712
		memcpy(req->hash_key,
		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);

3713 3714
		key_counts -= key_size;
		key_offset++;
3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725
		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"Configure RSS config fail, status = %d\n",
				ret);
			return ret;
		}
	}
	return 0;
}

3726
static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3727
{
3728
	struct hclge_rss_indirection_table_cmd *req;
3729 3730 3731 3732
	struct hclge_desc desc;
	int i, j;
	int ret;

3733
	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3734 3735 3736 3737 3738

	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
		hclge_cmd_setup_basic_desc
			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);

3739 3740 3741
		req->start_table_index =
			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760

		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
			req->rss_result[j] =
				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];

		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"Configure rss indir table fail,status = %d\n",
				ret);
			return ret;
		}
	}
	return 0;
}

static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
				 u16 *tc_size, u16 *tc_offset)
{
3761
	struct hclge_rss_tc_mode_cmd *req;
3762 3763 3764 3765 3766
	struct hclge_desc desc;
	int ret;
	int i;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3767
	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3768 3769

	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3770 3771
		u16 mode = 0;

P
Peng Li 已提交
3772 3773 3774 3775 3776
		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3777 3778

		req->rss_tc_mode[i] = cpu_to_le16(mode);
3779 3780 3781
	}

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3782
	if (ret)
3783 3784 3785
		dev_err(&hdev->pdev->dev,
			"Configure rss tc mode fail, status = %d\n", ret);

3786
	return ret;
3787 3788
}

3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804
static void hclge_get_rss_type(struct hclge_vport *vport)
{
	if (vport->rss_tuple_sets.ipv4_tcp_en ||
	    vport->rss_tuple_sets.ipv4_udp_en ||
	    vport->rss_tuple_sets.ipv4_sctp_en ||
	    vport->rss_tuple_sets.ipv6_tcp_en ||
	    vport->rss_tuple_sets.ipv6_udp_en ||
	    vport->rss_tuple_sets.ipv6_sctp_en)
		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
		 vport->rss_tuple_sets.ipv6_fragment_en)
		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
	else
		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
}

3805 3806
static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
{
3807
	struct hclge_rss_input_tuple_cmd *req;
3808 3809 3810 3811 3812
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);

3813
	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3814 3815 3816 3817 3818 3819 3820 3821 3822 3823

	/* Get the tuple cfg from pf */
	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3824
	hclge_get_rss_type(&hdev->vport[0]);
3825
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3826
	if (ret)
3827 3828
		dev_err(&hdev->pdev->dev,
			"Configure rss input fail, status = %d\n", ret);
3829
	return ret;
3830 3831 3832 3833 3834 3835 3836 3837 3838
}

static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
			 u8 *key, u8 *hfunc)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	int i;

	/* Get hash algorithm */
3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851
	if (hfunc) {
		switch (vport->rss_algo) {
		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
			*hfunc = ETH_RSS_HASH_TOP;
			break;
		case HCLGE_RSS_HASH_ALGO_SIMPLE:
			*hfunc = ETH_RSS_HASH_XOR;
			break;
		default:
			*hfunc = ETH_RSS_HASH_UNKNOWN;
			break;
		}
	}
3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874

	/* Get the RSS Key required by the user */
	if (key)
		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);

	/* Get indirect table */
	if (indir)
		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
			indir[i] =  vport->rss_indirection_tbl[i];

	return 0;
}

static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
			 const  u8 *key, const  u8 hfunc)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	u8 hash_algo;
	int ret, i;

	/* Set the RSS Hash Key if specififed by the user */
	if (key) {
3875 3876
		switch (hfunc) {
		case ETH_RSS_HASH_TOP:
3877
			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3878 3879 3880 3881 3882 3883 3884 3885
			break;
		case ETH_RSS_HASH_XOR:
			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
			break;
		case ETH_RSS_HASH_NO_CHANGE:
			hash_algo = vport->rss_algo;
			break;
		default:
3886
			return -EINVAL;
3887 3888
		}

3889 3890 3891
		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
		if (ret)
			return ret;
3892 3893 3894 3895

		/* Update the shadow RSS key with user specified qids */
		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
		vport->rss_algo = hash_algo;
3896 3897 3898 3899 3900 3901 3902
	}

	/* Update the shadow RSS table with user specified qids */
	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
		vport->rss_indirection_tbl[i] = indir[i];

	/* Update the hardware */
3903
	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3904 3905
}

L
Lipeng 已提交
3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945
static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
{
	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;

	if (nfc->data & RXH_L4_B_2_3)
		hash_sets |= HCLGE_D_PORT_BIT;
	else
		hash_sets &= ~HCLGE_D_PORT_BIT;

	if (nfc->data & RXH_IP_SRC)
		hash_sets |= HCLGE_S_IP_BIT;
	else
		hash_sets &= ~HCLGE_S_IP_BIT;

	if (nfc->data & RXH_IP_DST)
		hash_sets |= HCLGE_D_IP_BIT;
	else
		hash_sets &= ~HCLGE_D_IP_BIT;

	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
		hash_sets |= HCLGE_V_TAG_BIT;

	return hash_sets;
}

static int hclge_set_rss_tuple(struct hnae3_handle *handle,
			       struct ethtool_rxnfc *nfc)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	struct hclge_rss_input_tuple_cmd *req;
	struct hclge_desc desc;
	u8 tuple_sets;
	int ret;

	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
		return -EINVAL;

	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3946
	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
L
Lipeng 已提交
3947

3948 3949 3950 3951 3952 3953 3954 3955
	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
L
Lipeng 已提交
3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991

	tuple_sets = hclge_get_rss_hash_bits(nfc);
	switch (nfc->flow_type) {
	case TCP_V4_FLOW:
		req->ipv4_tcp_en = tuple_sets;
		break;
	case TCP_V6_FLOW:
		req->ipv6_tcp_en = tuple_sets;
		break;
	case UDP_V4_FLOW:
		req->ipv4_udp_en = tuple_sets;
		break;
	case UDP_V6_FLOW:
		req->ipv6_udp_en = tuple_sets;
		break;
	case SCTP_V4_FLOW:
		req->ipv4_sctp_en = tuple_sets;
		break;
	case SCTP_V6_FLOW:
		if ((nfc->data & RXH_L4_B_0_1) ||
		    (nfc->data & RXH_L4_B_2_3))
			return -EINVAL;

		req->ipv6_sctp_en = tuple_sets;
		break;
	case IPV4_FLOW:
		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
		break;
	case IPV6_FLOW:
		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
		break;
	default:
		return -EINVAL;
	}

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3992
	if (ret) {
L
Lipeng 已提交
3993 3994
		dev_err(&hdev->pdev->dev,
			"Set rss tuple fail, status = %d\n", ret);
3995 3996
		return ret;
	}
L
Lipeng 已提交
3997

3998 3999 4000 4001 4002 4003 4004 4005
	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4006
	hclge_get_rss_type(vport);
4007
	return 0;
L
Lipeng 已提交
4008 4009
}

L
Lipeng 已提交
4010 4011 4012 4013 4014 4015 4016 4017 4018 4019
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
			       struct ethtool_rxnfc *nfc)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	u8 tuple_sets;

	nfc->data = 0;

	switch (nfc->flow_type) {
	case TCP_V4_FLOW:
4020
		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
L
Lipeng 已提交
4021 4022
		break;
	case UDP_V4_FLOW:
4023
		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
L
Lipeng 已提交
4024 4025
		break;
	case TCP_V6_FLOW:
4026
		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
L
Lipeng 已提交
4027 4028
		break;
	case UDP_V6_FLOW:
4029
		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
L
Lipeng 已提交
4030 4031
		break;
	case SCTP_V4_FLOW:
4032
		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
L
Lipeng 已提交
4033 4034
		break;
	case SCTP_V6_FLOW:
4035
		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
L
Lipeng 已提交
4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059
		break;
	case IPV4_FLOW:
	case IPV6_FLOW:
		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
		break;
	default:
		return -EINVAL;
	}

	if (!tuple_sets)
		return 0;

	if (tuple_sets & HCLGE_D_PORT_BIT)
		nfc->data |= RXH_L4_B_2_3;
	if (tuple_sets & HCLGE_S_PORT_BIT)
		nfc->data |= RXH_L4_B_0_1;
	if (tuple_sets & HCLGE_D_IP_BIT)
		nfc->data |= RXH_IP_DST;
	if (tuple_sets & HCLGE_S_IP_BIT)
		nfc->data |= RXH_IP_SRC;

	return 0;
}

4060 4061 4062 4063 4064 4065 4066 4067
static int hclge_get_tc_size(struct hnae3_handle *handle)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	return hdev->rss_size_max;
}

4068
int hclge_rss_init_hw(struct hclge_dev *hdev)
4069 4070
{
	struct hclge_vport *vport = hdev->vport;
4071 4072
	u8 *rss_indir = vport[0].rss_indirection_tbl;
	u16 rss_size = vport[0].alloc_rss_size;
4073 4074
	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4075 4076
	u8 *key = vport[0].rss_hash_key;
	u8 hfunc = vport[0].rss_algo;
4077
	u16 tc_valid[HCLGE_MAX_TC_NUM];
4078
	u16 roundup_size;
4079 4080
	unsigned int i;
	int ret;
4081

4082 4083
	ret = hclge_set_rss_indir_table(hdev, rss_indir);
	if (ret)
4084
		return ret;
4085 4086 4087

	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
	if (ret)
4088
		return ret;
4089 4090 4091

	ret = hclge_set_rss_input_tuple(hdev);
	if (ret)
4092
		return ret;
4093

4094 4095 4096 4097 4098 4099 4100 4101
	/* Each TC have the same queue size, and tc_size set to hardware is
	 * the log2 of roundup power of two of rss_size, the acutal queue
	 * size is limited by indirection table.
	 */
	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
		dev_err(&hdev->pdev->dev,
			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
			rss_size);
4102
		return -EINVAL;
4103 4104 4105 4106 4107
	}

	roundup_size = roundup_pow_of_two(rss_size);
	roundup_size = ilog2(roundup_size);

4108
	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4109
		tc_valid[i] = 0;
4110

4111 4112 4113 4114 4115 4116
		if (!(hdev->hw_tc_map & BIT(i)))
			continue;

		tc_valid[i] = 1;
		tc_size[i] = roundup_size;
		tc_offset[i] = rss_size * i;
4117
	}
4118

4119 4120
	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
}
4121

4122 4123 4124 4125
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
{
	struct hclge_vport *vport = hdev->vport;
	int i, j;
4126

4127 4128 4129 4130 4131 4132 4133 4134 4135
	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
			vport[j].rss_indirection_tbl[i] =
				i % vport[j].alloc_rss_size;
	}
}

static void hclge_rss_init_cfg(struct hclge_dev *hdev)
{
4136
	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4137
	struct hclge_vport *vport = hdev->vport;
4138 4139 4140

	if (hdev->pdev->revision >= 0x21)
		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159

	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
		vport[i].rss_tuple_sets.ipv4_tcp_en =
			HCLGE_RSS_INPUT_TUPLE_OTHER;
		vport[i].rss_tuple_sets.ipv4_udp_en =
			HCLGE_RSS_INPUT_TUPLE_OTHER;
		vport[i].rss_tuple_sets.ipv4_sctp_en =
			HCLGE_RSS_INPUT_TUPLE_SCTP;
		vport[i].rss_tuple_sets.ipv4_fragment_en =
			HCLGE_RSS_INPUT_TUPLE_OTHER;
		vport[i].rss_tuple_sets.ipv6_tcp_en =
			HCLGE_RSS_INPUT_TUPLE_OTHER;
		vport[i].rss_tuple_sets.ipv6_udp_en =
			HCLGE_RSS_INPUT_TUPLE_OTHER;
		vport[i].rss_tuple_sets.ipv6_sctp_en =
			HCLGE_RSS_INPUT_TUPLE_SCTP;
		vport[i].rss_tuple_sets.ipv6_fragment_en =
			HCLGE_RSS_INPUT_TUPLE_OTHER;

4160
		vport[i].rss_algo = rss_algo;
4161

4162 4163
		memcpy(vport[i].rss_hash_key, hclge_hash_key,
		       HCLGE_RSS_KEY_SIZE);
4164 4165 4166
	}

	hclge_rss_indir_init_cfg(hdev);
4167 4168
}

4169 4170 4171
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
				int vector_id, bool en,
				struct hnae3_ring_chain_node *ring_chain)
4172 4173 4174 4175
{
	struct hclge_dev *hdev = vport->back;
	struct hnae3_ring_chain_node *node;
	struct hclge_desc desc;
4176 4177 4178 4179 4180
	struct hclge_ctrl_vector_chain_cmd *req
		= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
	enum hclge_cmd_status status;
	enum hclge_opcode_type op;
	u16 tqp_type_and_id;
4181 4182
	int i;

4183 4184
	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
	hclge_cmd_setup_basic_desc(&desc, op, false);
4185 4186 4187 4188
	req->int_vector_id = vector_id;

	i = 0;
	for (node = ring_chain; node; node = node->next) {
4189
		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
P
Peng Li 已提交
4190 4191 4192 4193 4194 4195 4196 4197 4198 4199
		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
				HCLGE_INT_TYPE_S,
				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
				HCLGE_TQP_ID_S, node->tqp_index);
		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
				HCLGE_INT_GL_IDX_S,
				hnae3_get_field(node->int_gl_idx,
						HNAE3_RING_GL_IDX_M,
						HNAE3_RING_GL_IDX_S));
4200
		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4201 4202
		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4203
			req->vfid = vport->vport_id;
4204

4205 4206
			status = hclge_cmd_send(&hdev->hw, &desc, 1);
			if (status) {
4207 4208
				dev_err(&hdev->pdev->dev,
					"Map TQP fail, status is %d.\n",
4209 4210
					status);
				return -EIO;
4211 4212 4213 4214
			}
			i = 0;

			hclge_cmd_setup_basic_desc(&desc,
4215
						   op,
4216 4217 4218 4219 4220 4221 4222
						   false);
			req->int_vector_id = vector_id;
		}
	}

	if (i > 0) {
		req->int_cause_num = i;
4223 4224 4225
		req->vfid = vport->vport_id;
		status = hclge_cmd_send(&hdev->hw, &desc, 1);
		if (status) {
4226
			dev_err(&hdev->pdev->dev,
4227 4228
				"Map TQP fail, status is %d.\n", status);
			return -EIO;
4229 4230 4231 4232 4233 4234
		}
	}

	return 0;
}

4235
static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4236
				    struct hnae3_ring_chain_node *ring_chain)
4237 4238 4239 4240 4241 4242 4243 4244
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	int vector_id;

	vector_id = hclge_get_vector_index(hdev, vector);
	if (vector_id < 0) {
		dev_err(&hdev->pdev->dev,
4245
			"Get vector index fail. vector_id =%d\n", vector_id);
4246 4247 4248
		return vector_id;
	}

4249
	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4250 4251
}

4252
static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4253
				       struct hnae3_ring_chain_node *ring_chain)
4254 4255 4256
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
4257
	int vector_id, ret;
4258

4259 4260 4261
	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
		return 0;

4262 4263 4264 4265 4266 4267 4268
	vector_id = hclge_get_vector_index(hdev, vector);
	if (vector_id < 0) {
		dev_err(&handle->pdev->dev,
			"Get vector index fail. ret =%d\n", vector_id);
		return vector_id;
	}

4269
	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4270
	if (ret)
4271 4272
		dev_err(&handle->pdev->dev,
			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4273
			vector_id, ret);
4274

4275
	return ret;
4276 4277 4278 4279 4280
}

int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
			       struct hclge_promisc_param *param)
{
4281
	struct hclge_promisc_cfg_cmd *req;
4282 4283 4284 4285 4286
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);

4287
	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4288
	req->vf_id = param->vf_id;
4289 4290 4291 4292 4293 4294 4295 4296

	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
	 * pdev revision(0x20), new revision support them. The
	 * value of this two fields will not return error when driver
	 * send command to fireware in revision(0x20).
	 */
	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4297 4298

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4299
	if (ret)
4300 4301
		dev_err(&hdev->pdev->dev,
			"Set promisc mode fail, status is %d.\n", ret);
4302 4303

	return ret;
4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321
}

void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
			      bool en_mc, bool en_bc, int vport_id)
{
	if (!param)
		return;

	memset(param, 0, sizeof(struct hclge_promisc_param));
	if (en_uc)
		param->enable = HCLGE_PROMISC_EN_UC;
	if (en_mc)
		param->enable |= HCLGE_PROMISC_EN_MC;
	if (en_bc)
		param->enable |= HCLGE_PROMISC_EN_BC;
	param->vf_id = vport_id;
}

4322 4323
static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
				  bool en_mc_pmc)
4324 4325 4326 4327
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	struct hclge_promisc_param param;
4328
	bool en_bc_pmc = true;
4329

4330 4331 4332 4333 4334 4335 4336 4337
	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
	 * always bypassed. So broadcast promisc should be disabled until
	 * user enable promisc mode
	 */
	if (handle->pdev->revision == 0x20)
		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;

	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4338
				 vport->vport_id);
4339
	return hclge_cmd_set_promisc_mode(hdev, &param);
4340 4341
}

4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
{
	struct hclge_get_fd_mode_cmd *req;
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);

	req = (struct hclge_get_fd_mode_cmd *)desc.data;

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
		return ret;
	}

	*fd_mode = req->mode;

	return ret;
}

static int hclge_get_fd_allocation(struct hclge_dev *hdev,
				   u32 *stage1_entry_num,
				   u32 *stage2_entry_num,
				   u16 *stage1_counter_num,
				   u16 *stage2_counter_num)
{
	struct hclge_get_fd_allocation_cmd *req;
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);

	req = (struct hclge_get_fd_allocation_cmd *)desc.data;

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
			ret);
		return ret;
	}

	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);

	return ret;
}

static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
{
	struct hclge_set_fd_key_config_cmd *req;
	struct hclge_fd_key_cfg *stage;
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);

	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
	stage = &hdev->fd_cfg.key_cfg[stage_num];
	req->stage = stage_num;
	req->key_select = stage->key_sel;
	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);

	return ret;
}

static int hclge_init_fd_config(struct hclge_dev *hdev)
{
#define LOW_2_WORDS		0x03
	struct hclge_fd_key_cfg *key_cfg;
	int ret;

	if (!hnae3_dev_fd_supported(hdev))
		return 0;

	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
	if (ret)
		return ret;

	switch (hdev->fd_cfg.fd_mode) {
	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
		break;
	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
		break;
	default:
		dev_err(&hdev->pdev->dev,
			"Unsupported flow director mode %d\n",
			hdev->fd_cfg.fd_mode);
		return -EOPNOTSUPP;
	}

	hdev->fd_cfg.proto_support =
		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
	key_cfg->outer_sipv6_word_en = 0;
	key_cfg->outer_dipv6_word_en = 0;

	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);

	/* If use max 400bit key, we can support tuples for ether type */
	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
		hdev->fd_cfg.proto_support |= ETHER_FLOW;
		key_cfg->tuple_active |=
				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
	}

	/* roce_type is used to filter roce frames
	 * dst_vport is used to specify the rule
	 */
	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);

	ret = hclge_get_fd_allocation(hdev,
				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
	if (ret)
		return ret;

	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
}

4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578
static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
				int loc, u8 *key, bool is_add)
{
	struct hclge_fd_tcam_config_1_cmd *req1;
	struct hclge_fd_tcam_config_2_cmd *req2;
	struct hclge_fd_tcam_config_3_cmd *req3;
	struct hclge_desc desc[3];
	int ret;

	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);

	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;

	req1->stage = stage;
	req1->xy_sel = sel_x ? 1 : 0;
	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
	req1->index = cpu_to_le32(loc);
	req1->entry_vld = sel_x ? is_add : 0;

	if (key) {
		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
		       sizeof(req2->tcam_data));
		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
	}

	ret = hclge_cmd_send(&hdev->hw, desc, 3);
	if (ret)
		dev_err(&hdev->pdev->dev,
			"config tcam key fail, ret=%d\n",
			ret);

	return ret;
}

static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
			      struct hclge_fd_ad_data *action)
{
	struct hclge_fd_ad_config_cmd *req;
	struct hclge_desc desc;
	u64 ad_data = 0;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);

	req = (struct hclge_fd_ad_config_cmd *)desc.data;
	req->index = cpu_to_le32(loc);
	req->stage = stage;

	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
		      action->write_rule_id_to_bd);
	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
			action->rule_id);
	ad_data <<= 32;
	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
		      action->forward_to_direct_queue);
	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
			action->queue_id);
	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
			action->counter_id);

	req->ad_data = cpu_to_le64(ad_data);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);

	return ret;
}

static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
				   struct hclge_fd_rule *rule)
{
	u16 tmp_x_s, tmp_y_s;
	u32 tmp_x_l, tmp_y_l;
	int i;

	if (rule->unused_tuple & tuple_bit)
		return true;

	switch (tuple_bit) {
	case 0:
		return false;
	case BIT(INNER_DST_MAC):
4579 4580
		for (i = 0; i < ETH_ALEN; i++) {
			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4581
			       rule->tuples_mask.dst_mac[i]);
4582
			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4583 4584 4585 4586 4587
			       rule->tuples_mask.dst_mac[i]);
		}

		return true;
	case BIT(INNER_SRC_MAC):
4588 4589
		for (i = 0; i < ETH_ALEN; i++) {
			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4590
			       rule->tuples.src_mac[i]);
4591
			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626
			       rule->tuples.src_mac[i]);
		}

		return true;
	case BIT(INNER_VLAN_TAG_FST):
		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
		       rule->tuples_mask.vlan_tag1);
		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
		       rule->tuples_mask.vlan_tag1);
		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);

		return true;
	case BIT(INNER_ETH_TYPE):
		calc_x(tmp_x_s, rule->tuples.ether_proto,
		       rule->tuples_mask.ether_proto);
		calc_y(tmp_y_s, rule->tuples.ether_proto,
		       rule->tuples_mask.ether_proto);
		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);

		return true;
	case BIT(INNER_IP_TOS):
		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);

		return true;
	case BIT(INNER_IP_PROTO):
		calc_x(*key_x, rule->tuples.ip_proto,
		       rule->tuples_mask.ip_proto);
		calc_y(*key_y, rule->tuples.ip_proto,
		       rule->tuples_mask.ip_proto);

		return true;
	case BIT(INNER_SRC_IP):
4627 4628 4629 4630
		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
		       rule->tuples_mask.src_ip[IPV4_INDEX]);
		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
		       rule->tuples_mask.src_ip[IPV4_INDEX]);
4631 4632 4633 4634 4635
		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);

		return true;
	case BIT(INNER_DST_IP):
4636 4637 4638 4639
		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692
		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);

		return true;
	case BIT(INNER_SRC_PORT):
		calc_x(tmp_x_s, rule->tuples.src_port,
		       rule->tuples_mask.src_port);
		calc_y(tmp_y_s, rule->tuples.src_port,
		       rule->tuples_mask.src_port);
		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);

		return true;
	case BIT(INNER_DST_PORT):
		calc_x(tmp_x_s, rule->tuples.dst_port,
		       rule->tuples_mask.dst_port);
		calc_y(tmp_y_s, rule->tuples.dst_port,
		       rule->tuples_mask.dst_port);
		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);

		return true;
	default:
		return false;
	}
}

static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
				 u8 vf_id, u8 network_port_id)
{
	u32 port_number = 0;

	if (port_type == HOST_PORT) {
		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
				pf_id);
		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
				vf_id);
		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
	} else {
		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
				HCLGE_NETWORK_PORT_ID_S, network_port_id);
		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
	}

	return port_number;
}

static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
				       __le32 *key_x, __le32 *key_y,
				       struct hclge_fd_rule *rule)
{
	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
	u8 cur_pos = 0, tuple_size, shift_bits;
4693
	unsigned int i;
4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734

	for (i = 0; i < MAX_META_DATA; i++) {
		tuple_size = meta_data_key_info[i].key_length;
		tuple_bit = key_cfg->meta_data_active & BIT(i);

		switch (tuple_bit) {
		case BIT(ROCE_TYPE):
			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
			cur_pos += tuple_size;
			break;
		case BIT(DST_VPORT):
			port_number = hclge_get_port_number(HOST_PORT, 0,
							    rule->vf_id, 0);
			hnae3_set_field(meta_data,
					GENMASK(cur_pos + tuple_size, cur_pos),
					cur_pos, port_number);
			cur_pos += tuple_size;
			break;
		default:
			break;
		}
	}

	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
	shift_bits = sizeof(meta_data) * 8 - cur_pos;

	*key_x = cpu_to_le32(tmp_x << shift_bits);
	*key_y = cpu_to_le32(tmp_y << shift_bits);
}

/* A complete key is combined with meta data key and tuple key.
 * Meta data key is stored at the MSB region, and tuple key is stored at
 * the LSB region, unused bits will be filled 0.
 */
static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
			    struct hclge_fd_rule *rule)
{
	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
	u8 *cur_key_x, *cur_key_y;
4735 4736
	unsigned int i;
	int ret, tuple_size;
4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813
	u8 meta_data_region;

	memset(key_x, 0, sizeof(key_x));
	memset(key_y, 0, sizeof(key_y));
	cur_key_x = key_x;
	cur_key_y = key_y;

	for (i = 0 ; i < MAX_TUPLE; i++) {
		bool tuple_valid;
		u32 check_tuple;

		tuple_size = tuple_key_info[i].key_length / 8;
		check_tuple = key_cfg->tuple_active & BIT(i);

		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
						     cur_key_y, rule);
		if (tuple_valid) {
			cur_key_x += tuple_size;
			cur_key_y += tuple_size;
		}
	}

	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
			MAX_META_DATA_LENGTH / 8;

	hclge_fd_convert_meta_data(key_cfg,
				   (__le32 *)(key_x + meta_data_region),
				   (__le32 *)(key_y + meta_data_region),
				   rule);

	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
				   true);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"fd key_y config fail, loc=%d, ret=%d\n",
			rule->queue_id, ret);
		return ret;
	}

	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
				   true);
	if (ret)
		dev_err(&hdev->pdev->dev,
			"fd key_x config fail, loc=%d, ret=%d\n",
			rule->queue_id, ret);
	return ret;
}

static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
			       struct hclge_fd_rule *rule)
{
	struct hclge_fd_ad_data ad_data;

	ad_data.ad_id = rule->location;

	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
		ad_data.drop_packet = true;
		ad_data.forward_to_direct_queue = false;
		ad_data.queue_id = 0;
	} else {
		ad_data.drop_packet = false;
		ad_data.forward_to_direct_queue = true;
		ad_data.queue_id = rule->queue_id;
	}

	ad_data.use_counter = false;
	ad_data.counter_id = 0;

	ad_data.use_next_stage = false;
	ad_data.next_input_key = 0;

	ad_data.write_rule_id_to_bd = true;
	ad_data.rule_id = rule->location;

	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
}

4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888
static int hclge_fd_check_spec(struct hclge_dev *hdev,
			       struct ethtool_rx_flow_spec *fs, u32 *unused)
{
	struct ethtool_tcpip4_spec *tcp_ip4_spec;
	struct ethtool_usrip4_spec *usr_ip4_spec;
	struct ethtool_tcpip6_spec *tcp_ip6_spec;
	struct ethtool_usrip6_spec *usr_ip6_spec;
	struct ethhdr *ether_spec;

	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
		return -EINVAL;

	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
		return -EOPNOTSUPP;

	if ((fs->flow_type & FLOW_EXT) &&
	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
		return -EOPNOTSUPP;
	}

	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
	case SCTP_V4_FLOW:
	case TCP_V4_FLOW:
	case UDP_V4_FLOW:
		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);

		if (!tcp_ip4_spec->ip4src)
			*unused |= BIT(INNER_SRC_IP);

		if (!tcp_ip4_spec->ip4dst)
			*unused |= BIT(INNER_DST_IP);

		if (!tcp_ip4_spec->psrc)
			*unused |= BIT(INNER_SRC_PORT);

		if (!tcp_ip4_spec->pdst)
			*unused |= BIT(INNER_DST_PORT);

		if (!tcp_ip4_spec->tos)
			*unused |= BIT(INNER_IP_TOS);

		break;
	case IP_USER_FLOW:
		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);

		if (!usr_ip4_spec->ip4src)
			*unused |= BIT(INNER_SRC_IP);

		if (!usr_ip4_spec->ip4dst)
			*unused |= BIT(INNER_DST_IP);

		if (!usr_ip4_spec->tos)
			*unused |= BIT(INNER_IP_TOS);

		if (!usr_ip4_spec->proto)
			*unused |= BIT(INNER_IP_PROTO);

		if (usr_ip4_spec->l4_4_bytes)
			return -EOPNOTSUPP;

		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
			return -EOPNOTSUPP;

		break;
	case SCTP_V6_FLOW:
	case TCP_V6_FLOW:
	case UDP_V6_FLOW:
		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
			BIT(INNER_IP_TOS);

4889
		/* check whether src/dst ip address used */
4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913
		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
			*unused |= BIT(INNER_SRC_IP);

		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
			*unused |= BIT(INNER_DST_IP);

		if (!tcp_ip6_spec->psrc)
			*unused |= BIT(INNER_SRC_PORT);

		if (!tcp_ip6_spec->pdst)
			*unused |= BIT(INNER_DST_PORT);

		if (tcp_ip6_spec->tclass)
			return -EOPNOTSUPP;

		break;
	case IPV6_USER_FLOW:
		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
			BIT(INNER_DST_PORT);

4914
		/* check whether src/dst ip address used */
4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984
		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
			*unused |= BIT(INNER_SRC_IP);

		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
			*unused |= BIT(INNER_DST_IP);

		if (!usr_ip6_spec->l4_proto)
			*unused |= BIT(INNER_IP_PROTO);

		if (usr_ip6_spec->tclass)
			return -EOPNOTSUPP;

		if (usr_ip6_spec->l4_4_bytes)
			return -EOPNOTSUPP;

		break;
	case ETHER_FLOW:
		ether_spec = &fs->h_u.ether_spec;
		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);

		if (is_zero_ether_addr(ether_spec->h_source))
			*unused |= BIT(INNER_SRC_MAC);

		if (is_zero_ether_addr(ether_spec->h_dest))
			*unused |= BIT(INNER_DST_MAC);

		if (!ether_spec->h_proto)
			*unused |= BIT(INNER_ETH_TYPE);

		break;
	default:
		return -EOPNOTSUPP;
	}

	if ((fs->flow_type & FLOW_EXT)) {
		if (fs->h_ext.vlan_etype)
			return -EOPNOTSUPP;
		if (!fs->h_ext.vlan_tci)
			*unused |= BIT(INNER_VLAN_TAG_FST);

		if (fs->m_ext.vlan_tci) {
			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
				return -EINVAL;
		}
	} else {
		*unused |= BIT(INNER_VLAN_TAG_FST);
	}

	if (fs->flow_type & FLOW_MAC_EXT) {
		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
			return -EOPNOTSUPP;

		if (is_zero_ether_addr(fs->h_ext.h_dest))
			*unused |= BIT(INNER_DST_MAC);
		else
			*unused &= ~(BIT(INNER_DST_MAC));
	}

	return 0;
}

static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
{
	struct hclge_fd_rule *rule = NULL;
	struct hlist_node *node2;

4985
	spin_lock_bh(&hdev->fd_rule_lock);
4986 4987 4988 4989 4990
	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
		if (rule->location >= location)
			break;
	}

4991 4992
	spin_unlock_bh(&hdev->fd_rule_lock);

4993 4994 4995
	return  rule && rule->location == location;
}

4996
/* make sure being called after lock up with fd_rule_lock */
4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019
static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
				     struct hclge_fd_rule *new_rule,
				     u16 location,
				     bool is_add)
{
	struct hclge_fd_rule *rule = NULL, *parent = NULL;
	struct hlist_node *node2;

	if (is_add && !new_rule)
		return -EINVAL;

	hlist_for_each_entry_safe(rule, node2,
				  &hdev->fd_rule_list, rule_node) {
		if (rule->location >= location)
			break;
		parent = rule;
	}

	if (rule && rule->location == location) {
		hlist_del(&rule->rule_node);
		kfree(rule);
		hdev->hclge_fd_rule_num--;

5020 5021 5022 5023
		if (!is_add) {
			if (!hdev->hclge_fd_rule_num)
				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
			clear_bit(location, hdev->fd_bmap);
5024

5025 5026
			return 0;
		}
5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040
	} else if (!is_add) {
		dev_err(&hdev->pdev->dev,
			"delete fail, rule %d is inexistent\n",
			location);
		return -EINVAL;
	}

	INIT_HLIST_NODE(&new_rule->rule_node);

	if (parent)
		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
	else
		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);

5041
	set_bit(location, hdev->fd_bmap);
5042
	hdev->hclge_fd_rule_num++;
5043
	hdev->fd_active_type = new_rule->rule_type;
5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057

	return 0;
}

static int hclge_fd_get_tuple(struct hclge_dev *hdev,
			      struct ethtool_rx_flow_spec *fs,
			      struct hclge_fd_rule *rule)
{
	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);

	switch (flow_type) {
	case SCTP_V4_FLOW:
	case TCP_V4_FLOW:
	case UDP_V4_FLOW:
5058
		rule->tuples.src_ip[IPV4_INDEX] =
5059
				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5060
		rule->tuples_mask.src_ip[IPV4_INDEX] =
5061 5062
				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);

5063
		rule->tuples.dst_ip[IPV4_INDEX] =
5064
				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5065
		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083
				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);

		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
		rule->tuples_mask.src_port =
				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);

		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
		rule->tuples_mask.dst_port =
				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);

		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;

		rule->tuples.ether_proto = ETH_P_IP;
		rule->tuples_mask.ether_proto = 0xFFFF;

		break;
	case IP_USER_FLOW:
5084
		rule->tuples.src_ip[IPV4_INDEX] =
5085
				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5086
		rule->tuples_mask.src_ip[IPV4_INDEX] =
5087 5088
				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);

5089
		rule->tuples.dst_ip[IPV4_INDEX] =
5090
				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5091
		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107
				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);

		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;

		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;

		rule->tuples.ether_proto = ETH_P_IP;
		rule->tuples_mask.ether_proto = 0xFFFF;

		break;
	case SCTP_V6_FLOW:
	case TCP_V6_FLOW:
	case UDP_V6_FLOW:
		be32_to_cpu_array(rule->tuples.src_ip,
5108
				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5109
		be32_to_cpu_array(rule->tuples_mask.src_ip,
5110
				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5111 5112

		be32_to_cpu_array(rule->tuples.dst_ip,
5113
				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5114
		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5115
				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130

		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
		rule->tuples_mask.src_port =
				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);

		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
		rule->tuples_mask.dst_port =
				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);

		rule->tuples.ether_proto = ETH_P_IPV6;
		rule->tuples_mask.ether_proto = 0xFFFF;

		break;
	case IPV6_USER_FLOW:
		be32_to_cpu_array(rule->tuples.src_ip,
5131
				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5132
		be32_to_cpu_array(rule->tuples_mask.src_ip,
5133
				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5134 5135

		be32_to_cpu_array(rule->tuples.dst_ip,
5136
				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5137
		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5138
				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200

		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;

		rule->tuples.ether_proto = ETH_P_IPV6;
		rule->tuples_mask.ether_proto = 0xFFFF;

		break;
	case ETHER_FLOW:
		ether_addr_copy(rule->tuples.src_mac,
				fs->h_u.ether_spec.h_source);
		ether_addr_copy(rule->tuples_mask.src_mac,
				fs->m_u.ether_spec.h_source);

		ether_addr_copy(rule->tuples.dst_mac,
				fs->h_u.ether_spec.h_dest);
		ether_addr_copy(rule->tuples_mask.dst_mac,
				fs->m_u.ether_spec.h_dest);

		rule->tuples.ether_proto =
				be16_to_cpu(fs->h_u.ether_spec.h_proto);
		rule->tuples_mask.ether_proto =
				be16_to_cpu(fs->m_u.ether_spec.h_proto);

		break;
	default:
		return -EOPNOTSUPP;
	}

	switch (flow_type) {
	case SCTP_V4_FLOW:
	case SCTP_V6_FLOW:
		rule->tuples.ip_proto = IPPROTO_SCTP;
		rule->tuples_mask.ip_proto = 0xFF;
		break;
	case TCP_V4_FLOW:
	case TCP_V6_FLOW:
		rule->tuples.ip_proto = IPPROTO_TCP;
		rule->tuples_mask.ip_proto = 0xFF;
		break;
	case UDP_V4_FLOW:
	case UDP_V6_FLOW:
		rule->tuples.ip_proto = IPPROTO_UDP;
		rule->tuples_mask.ip_proto = 0xFF;
		break;
	default:
		break;
	}

	if ((fs->flow_type & FLOW_EXT)) {
		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
	}

	if (fs->flow_type & FLOW_MAC_EXT) {
		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
	}

	return 0;
}

5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230
/* make sure being called after lock up with fd_rule_lock */
static int hclge_fd_config_rule(struct hclge_dev *hdev,
				struct hclge_fd_rule *rule)
{
	int ret;

	if (!rule) {
		dev_err(&hdev->pdev->dev,
			"The flow director rule is NULL\n");
		return -EINVAL;
	}

	/* it will never fail here, so needn't to check return value */
	hclge_fd_update_rule_list(hdev, rule, rule->location, true);

	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
	if (ret)
		goto clear_rule;

	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
	if (ret)
		goto clear_rule;

	return 0;

clear_rule:
	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
	return ret;
}

5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245
static int hclge_add_fd_entry(struct hnae3_handle *handle,
			      struct ethtool_rxnfc *cmd)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	u16 dst_vport_id = 0, q_index = 0;
	struct ethtool_rx_flow_spec *fs;
	struct hclge_fd_rule *rule;
	u32 unused = 0;
	u8 action;
	int ret;

	if (!hnae3_dev_fd_supported(hdev))
		return -EOPNOTSUPP;

5246
	if (!hdev->fd_en) {
5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266
		dev_warn(&hdev->pdev->dev,
			 "Please enable flow director first\n");
		return -EOPNOTSUPP;
	}

	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;

	ret = hclge_fd_check_spec(hdev, fs, &unused);
	if (ret) {
		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
		return ret;
	}

	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
		action = HCLGE_FD_ACTION_DROP_PACKET;
	} else {
		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
		u16 tqps;

5267 5268 5269 5270 5271 5272 5273
		if (vf > hdev->num_req_vfs) {
			dev_err(&hdev->pdev->dev,
				"Error: vf id (%d) > max vf num (%d)\n",
				vf, hdev->num_req_vfs);
			return -EINVAL;
		}

5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292
		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;

		if (ring >= tqps) {
			dev_err(&hdev->pdev->dev,
				"Error: queue id (%d) > max tqp num (%d)\n",
				ring, tqps - 1);
			return -EINVAL;
		}

		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
		q_index = ring;
	}

	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
	if (!rule)
		return -ENOMEM;

	ret = hclge_fd_get_tuple(hdev, fs, rule);
5293 5294 5295 5296
	if (ret) {
		kfree(rule);
		return ret;
	}
5297 5298 5299 5300 5301 5302 5303 5304

	rule->flow_type = fs->flow_type;

	rule->location = fs->location;
	rule->unused_tuple = unused;
	rule->vf_id = dst_vport_id;
	rule->queue_id = q_index;
	rule->action = action;
5305
	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5306

J
Jian Shen 已提交
5307 5308 5309 5310 5311
	/* to avoid rule conflict, when user configure rule by ethtool,
	 * we need to clear all arfs rules
	 */
	hclge_clear_arfs_rules(handle);

5312 5313
	spin_lock_bh(&hdev->fd_rule_lock);
	ret = hclge_fd_config_rule(hdev, rule);
5314

5315
	spin_unlock_bh(&hdev->fd_rule_lock);
5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337

	return ret;
}

static int hclge_del_fd_entry(struct hnae3_handle *handle,
			      struct ethtool_rxnfc *cmd)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	struct ethtool_rx_flow_spec *fs;
	int ret;

	if (!hnae3_dev_fd_supported(hdev))
		return -EOPNOTSUPP;

	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;

	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
		return -EINVAL;

	if (!hclge_fd_rule_exist(hdev, fs->location)) {
		dev_err(&hdev->pdev->dev,
5338
			"Delete fail, rule %d is inexistent\n", fs->location);
5339 5340 5341
		return -ENOENT;
	}

5342 5343
	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
				   NULL, false);
5344 5345 5346
	if (ret)
		return ret;

5347 5348 5349 5350 5351 5352
	spin_lock_bh(&hdev->fd_rule_lock);
	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);

	spin_unlock_bh(&hdev->fd_rule_lock);

	return ret;
5353 5354
}

5355 5356 5357 5358 5359 5360 5361
static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
				     bool clear_list)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	struct hclge_fd_rule *rule;
	struct hlist_node *node;
5362
	u16 location;
5363 5364 5365 5366

	if (!hnae3_dev_fd_supported(hdev))
		return;

5367 5368 5369 5370 5371 5372
	spin_lock_bh(&hdev->fd_rule_lock);
	for_each_set_bit(location, hdev->fd_bmap,
			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
				     NULL, false);

5373 5374 5375 5376 5377 5378
	if (clear_list) {
		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
					  rule_node) {
			hlist_del(&rule->rule_node);
			kfree(rule);
		}
5379 5380 5381 5382
		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
		hdev->hclge_fd_rule_num = 0;
		bitmap_zero(hdev->fd_bmap,
			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5383
	}
5384 5385

	spin_unlock_bh(&hdev->fd_rule_lock);
5386 5387 5388 5389 5390 5391 5392 5393 5394 5395
}

static int hclge_restore_fd_entries(struct hnae3_handle *handle)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	struct hclge_fd_rule *rule;
	struct hlist_node *node;
	int ret;

5396 5397 5398 5399
	/* Return ok here, because reset error handling will check this
	 * return value. If error is returned here, the reset process will
	 * fail.
	 */
5400
	if (!hnae3_dev_fd_supported(hdev))
5401
		return 0;
5402

5403
	/* if fd is disabled, should not restore it when reset */
5404
	if (!hdev->fd_en)
5405 5406
		return 0;

5407
	spin_lock_bh(&hdev->fd_rule_lock);
5408 5409 5410 5411 5412 5413 5414 5415 5416
	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
		if (!ret)
			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);

		if (ret) {
			dev_warn(&hdev->pdev->dev,
				 "Restore rule %d failed, remove it\n",
				 rule->location);
5417
			clear_bit(rule->location, hdev->fd_bmap);
5418 5419 5420 5421 5422
			hlist_del(&rule->rule_node);
			kfree(rule);
			hdev->hclge_fd_rule_num--;
		}
	}
5423 5424 5425 5426 5427 5428

	if (hdev->hclge_fd_rule_num)
		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;

	spin_unlock_bh(&hdev->fd_rule_lock);

5429 5430 5431
	return 0;
}

5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460
static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
				 struct ethtool_rxnfc *cmd)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	if (!hnae3_dev_fd_supported(hdev))
		return -EOPNOTSUPP;

	cmd->rule_cnt = hdev->hclge_fd_rule_num;
	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];

	return 0;
}

static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
				  struct ethtool_rxnfc *cmd)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_fd_rule *rule = NULL;
	struct hclge_dev *hdev = vport->back;
	struct ethtool_rx_flow_spec *fs;
	struct hlist_node *node2;

	if (!hnae3_dev_fd_supported(hdev))
		return -EOPNOTSUPP;

	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;

5461 5462
	spin_lock_bh(&hdev->fd_rule_lock);

5463 5464 5465 5466 5467
	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
		if (rule->location >= fs->location)
			break;
	}

5468 5469 5470
	if (!rule || fs->location != rule->location) {
		spin_unlock_bh(&hdev->fd_rule_lock);

5471
		return -ENOENT;
5472
	}
5473 5474 5475 5476 5477 5478 5479

	fs->flow_type = rule->flow_type;
	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
	case SCTP_V4_FLOW:
	case TCP_V4_FLOW:
	case UDP_V4_FLOW:
		fs->h_u.tcp_ip4_spec.ip4src =
5480
				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5481
		fs->m_u.tcp_ip4_spec.ip4src =
5482 5483
			rule->unused_tuple & BIT(INNER_SRC_IP) ?
			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5484 5485

		fs->h_u.tcp_ip4_spec.ip4dst =
5486
				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5487
		fs->m_u.tcp_ip4_spec.ip4dst =
5488 5489
			rule->unused_tuple & BIT(INNER_DST_IP) ?
			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508

		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
		fs->m_u.tcp_ip4_spec.psrc =
				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
				0 : cpu_to_be16(rule->tuples_mask.src_port);

		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
		fs->m_u.tcp_ip4_spec.pdst =
				rule->unused_tuple & BIT(INNER_DST_PORT) ?
				0 : cpu_to_be16(rule->tuples_mask.dst_port);

		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
		fs->m_u.tcp_ip4_spec.tos =
				rule->unused_tuple & BIT(INNER_IP_TOS) ?
				0 : rule->tuples_mask.ip_tos;

		break;
	case IP_USER_FLOW:
		fs->h_u.usr_ip4_spec.ip4src =
5509
				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5510
		fs->m_u.tcp_ip4_spec.ip4src =
5511 5512
			rule->unused_tuple & BIT(INNER_SRC_IP) ?
			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5513 5514

		fs->h_u.usr_ip4_spec.ip4dst =
5515
				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5516
		fs->m_u.usr_ip4_spec.ip4dst =
5517 5518
			rule->unused_tuple & BIT(INNER_DST_IP) ?
			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536

		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
		fs->m_u.usr_ip4_spec.tos =
				rule->unused_tuple & BIT(INNER_IP_TOS) ?
				0 : rule->tuples_mask.ip_tos;

		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
		fs->m_u.usr_ip4_spec.proto =
				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
				0 : rule->tuples_mask.ip_proto;

		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;

		break;
	case SCTP_V6_FLOW:
	case TCP_V6_FLOW:
	case UDP_V6_FLOW:
		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5537
				  rule->tuples.src_ip, IPV6_SIZE);
5538
		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5539 5540
			memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
			       sizeof(int) * IPV6_SIZE);
5541 5542
		else
			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5543
					  rule->tuples_mask.src_ip, IPV6_SIZE);
5544 5545

		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5546
				  rule->tuples.dst_ip, IPV6_SIZE);
5547
		if (rule->unused_tuple & BIT(INNER_DST_IP))
5548 5549
			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
			       sizeof(int) * IPV6_SIZE);
5550 5551
		else
			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5552
					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566

		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
		fs->m_u.tcp_ip6_spec.psrc =
				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
				0 : cpu_to_be16(rule->tuples_mask.src_port);

		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
		fs->m_u.tcp_ip6_spec.pdst =
				rule->unused_tuple & BIT(INNER_DST_PORT) ?
				0 : cpu_to_be16(rule->tuples_mask.dst_port);

		break;
	case IPV6_USER_FLOW:
		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5567
				  rule->tuples.src_ip, IPV6_SIZE);
5568
		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5569 5570
			memset(fs->m_u.usr_ip6_spec.ip6src, 0,
			       sizeof(int) * IPV6_SIZE);
5571 5572
		else
			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5573
					  rule->tuples_mask.src_ip, IPV6_SIZE);
5574 5575

		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5576
				  rule->tuples.dst_ip, IPV6_SIZE);
5577
		if (rule->unused_tuple & BIT(INNER_DST_IP))
5578 5579
			memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
			       sizeof(int) * IPV6_SIZE);
5580 5581
		else
			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5582
					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614

		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
		fs->m_u.usr_ip6_spec.l4_proto =
				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
				0 : rule->tuples_mask.ip_proto;

		break;
	case ETHER_FLOW:
		ether_addr_copy(fs->h_u.ether_spec.h_source,
				rule->tuples.src_mac);
		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
			eth_zero_addr(fs->m_u.ether_spec.h_source);
		else
			ether_addr_copy(fs->m_u.ether_spec.h_source,
					rule->tuples_mask.src_mac);

		ether_addr_copy(fs->h_u.ether_spec.h_dest,
				rule->tuples.dst_mac);
		if (rule->unused_tuple & BIT(INNER_DST_MAC))
			eth_zero_addr(fs->m_u.ether_spec.h_dest);
		else
			ether_addr_copy(fs->m_u.ether_spec.h_dest,
					rule->tuples_mask.dst_mac);

		fs->h_u.ether_spec.h_proto =
				cpu_to_be16(rule->tuples.ether_proto);
		fs->m_u.ether_spec.h_proto =
				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
				0 : cpu_to_be16(rule->tuples_mask.ether_proto);

		break;
	default:
5615
		spin_unlock_bh(&hdev->fd_rule_lock);
5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646
		return -EOPNOTSUPP;
	}

	if (fs->flow_type & FLOW_EXT) {
		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
		fs->m_ext.vlan_tci =
				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
				cpu_to_be16(VLAN_VID_MASK) :
				cpu_to_be16(rule->tuples_mask.vlan_tag1);
	}

	if (fs->flow_type & FLOW_MAC_EXT) {
		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
		if (rule->unused_tuple & BIT(INNER_DST_MAC))
			eth_zero_addr(fs->m_u.ether_spec.h_dest);
		else
			ether_addr_copy(fs->m_u.ether_spec.h_dest,
					rule->tuples_mask.dst_mac);
	}

	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
		fs->ring_cookie = RX_CLS_FLOW_DISC;
	} else {
		u64 vf_id;

		fs->ring_cookie = rule->queue_id;
		vf_id = rule->vf_id;
		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
		fs->ring_cookie |= vf_id;
	}

5647 5648
	spin_unlock_bh(&hdev->fd_rule_lock);

5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665
	return 0;
}

static int hclge_get_all_rules(struct hnae3_handle *handle,
			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	struct hclge_fd_rule *rule;
	struct hlist_node *node2;
	int cnt = 0;

	if (!hnae3_dev_fd_supported(hdev))
		return -EOPNOTSUPP;

	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];

5666
	spin_lock_bh(&hdev->fd_rule_lock);
5667 5668
	hlist_for_each_entry_safe(rule, node2,
				  &hdev->fd_rule_list, rule_node) {
5669 5670
		if (cnt == cmd->rule_cnt) {
			spin_unlock_bh(&hdev->fd_rule_lock);
5671
			return -EMSGSIZE;
5672
		}
5673 5674 5675 5676 5677

		rule_locs[cnt] = rule->location;
		cnt++;
	}

5678 5679
	spin_unlock_bh(&hdev->fd_rule_lock);

5680 5681 5682 5683 5684
	cmd->rule_cnt = cnt;

	return 0;
}

J
Jian Shen 已提交
5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786
static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
				     struct hclge_fd_rule_tuples *tuples)
{
	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
	tuples->ip_proto = fkeys->basic.ip_proto;
	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);

	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
	} else {
		memcpy(tuples->src_ip,
		       fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
		       sizeof(tuples->src_ip));
		memcpy(tuples->dst_ip,
		       fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
		       sizeof(tuples->dst_ip));
	}
}

/* traverse all rules, check whether an existed rule has the same tuples */
static struct hclge_fd_rule *
hclge_fd_search_flow_keys(struct hclge_dev *hdev,
			  const struct hclge_fd_rule_tuples *tuples)
{
	struct hclge_fd_rule *rule = NULL;
	struct hlist_node *node;

	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
			return rule;
	}

	return NULL;
}

static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
				     struct hclge_fd_rule *rule)
{
	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
			     BIT(INNER_SRC_PORT);
	rule->action = 0;
	rule->vf_id = 0;
	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
	if (tuples->ether_proto == ETH_P_IP) {
		if (tuples->ip_proto == IPPROTO_TCP)
			rule->flow_type = TCP_V4_FLOW;
		else
			rule->flow_type = UDP_V4_FLOW;
	} else {
		if (tuples->ip_proto == IPPROTO_TCP)
			rule->flow_type = TCP_V6_FLOW;
		else
			rule->flow_type = UDP_V6_FLOW;
	}
	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
}

static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
				      u16 flow_id, struct flow_keys *fkeys)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_fd_rule_tuples new_tuples;
	struct hclge_dev *hdev = vport->back;
	struct hclge_fd_rule *rule;
	u16 tmp_queue_id;
	u16 bit_id;
	int ret;

	if (!hnae3_dev_fd_supported(hdev))
		return -EOPNOTSUPP;

	memset(&new_tuples, 0, sizeof(new_tuples));
	hclge_fd_get_flow_tuples(fkeys, &new_tuples);

	spin_lock_bh(&hdev->fd_rule_lock);

	/* when there is already fd rule existed add by user,
	 * arfs should not work
	 */
	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
		spin_unlock_bh(&hdev->fd_rule_lock);

		return -EOPNOTSUPP;
	}

	/* check is there flow director filter existed for this flow,
	 * if not, create a new filter for it;
	 * if filter exist with different queue id, modify the filter;
	 * if filter exist with same queue id, do nothing
	 */
	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
	if (!rule) {
		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
			spin_unlock_bh(&hdev->fd_rule_lock);

			return -ENOSPC;
		}

5787
		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
J
Jian Shen 已提交
5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867
		if (!rule) {
			spin_unlock_bh(&hdev->fd_rule_lock);

			return -ENOMEM;
		}

		set_bit(bit_id, hdev->fd_bmap);
		rule->location = bit_id;
		rule->flow_id = flow_id;
		rule->queue_id = queue_id;
		hclge_fd_build_arfs_rule(&new_tuples, rule);
		ret = hclge_fd_config_rule(hdev, rule);

		spin_unlock_bh(&hdev->fd_rule_lock);

		if (ret)
			return ret;

		return rule->location;
	}

	spin_unlock_bh(&hdev->fd_rule_lock);

	if (rule->queue_id == queue_id)
		return rule->location;

	tmp_queue_id = rule->queue_id;
	rule->queue_id = queue_id;
	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
	if (ret) {
		rule->queue_id = tmp_queue_id;
		return ret;
	}

	return rule->location;
}

static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
{
#ifdef CONFIG_RFS_ACCEL
	struct hnae3_handle *handle = &hdev->vport[0].nic;
	struct hclge_fd_rule *rule;
	struct hlist_node *node;
	HLIST_HEAD(del_list);

	spin_lock_bh(&hdev->fd_rule_lock);
	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
		spin_unlock_bh(&hdev->fd_rule_lock);
		return;
	}
	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
					rule->flow_id, rule->location)) {
			hlist_del_init(&rule->rule_node);
			hlist_add_head(&rule->rule_node, &del_list);
			hdev->hclge_fd_rule_num--;
			clear_bit(rule->location, hdev->fd_bmap);
		}
	}
	spin_unlock_bh(&hdev->fd_rule_lock);

	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
				     rule->location, NULL, false);
		kfree(rule);
	}
#endif
}

static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
{
#ifdef CONFIG_RFS_ACCEL
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
		hclge_del_all_fd_entries(handle, true);
#endif
}

5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
}

static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
}

static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

5890
	return hdev->rst_stats.hw_reset_done_cnt;
5891 5892
}

5893 5894 5895 5896
static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
5897
	bool clear;
5898

5899
	hdev->fd_en = enable;
5900
	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5901
	if (!enable)
5902
		hclge_del_all_fd_entries(handle, clear);
5903 5904 5905 5906
	else
		hclge_restore_fd_entries(handle);
}

5907 5908 5909
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
{
	struct hclge_desc desc;
5910 5911
	struct hclge_config_mac_mode_cmd *req =
		(struct hclge_config_mac_mode_cmd *)desc.data;
5912
	u32 loop_en = 0;
5913 5914 5915
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929

	if (enable) {
		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
	}

5930
	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5931 5932 5933 5934 5935 5936 5937

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		dev_err(&hdev->pdev->dev,
			"mac enable fail, ret =%d.\n", ret);
}

5938
static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5939 5940 5941 5942 5943 5944
{
	struct hclge_config_mac_mode_cmd *req;
	struct hclge_desc desc;
	u32 loop_en;
	int ret;

5945 5946 5947 5948 5949 5950 5951 5952 5953
	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
	/* 1 Read out the MAC mode config at first */
	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"mac loopback get fail, ret =%d.\n", ret);
		return ret;
	}
5954

5955 5956
	/* 2 Then setup the loopback flag */
	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
P
Peng Li 已提交
5957
	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5958 5959
	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5960 5961

	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5962

5963 5964 5965 5966 5967 5968 5969 5970 5971 5972
	/* 3 Config mac work mode with loopback flag
	 * and its original configure parameters
	 */
	hclge_cmd_reuse_desc(&desc, false);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		dev_err(&hdev->pdev->dev,
			"mac loopback set fail, ret =%d.\n", ret);
	return ret;
}
5973

5974 5975
static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
				     enum hnae3_loop loop_mode)
5976 5977 5978
{
#define HCLGE_SERDES_RETRY_MS	10
#define HCLGE_SERDES_RETRY_NUM	100
5979

5980 5981
#define HCLGE_MAC_LINK_STATUS_MS   10
#define HCLGE_MAC_LINK_STATUS_NUM  100
5982 5983 5984
#define HCLGE_MAC_LINK_STATUS_DOWN 0
#define HCLGE_MAC_LINK_STATUS_UP   1

5985 5986
	struct hclge_serdes_lb_cmd *req;
	struct hclge_desc desc;
5987
	int mac_link_ret = 0;
5988
	int ret, i = 0;
5989
	u8 loop_mode_b;
5990

5991
	req = (struct hclge_serdes_lb_cmd *)desc.data;
5992 5993
	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);

5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006
	switch (loop_mode) {
	case HNAE3_LOOP_SERIAL_SERDES:
		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
		break;
	case HNAE3_LOOP_PARALLEL_SERDES:
		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
		break;
	default:
		dev_err(&hdev->pdev->dev,
			"unsupported serdes loopback mode %d\n", loop_mode);
		return -ENOTSUPP;
	}

6007
	if (en) {
6008 6009
		req->enable = loop_mode_b;
		req->mask = loop_mode_b;
6010
		mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
6011
	} else {
6012
		req->mask = loop_mode_b;
6013
		mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043
	}

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"serdes loopback set fail, ret = %d\n", ret);
		return ret;
	}

	do {
		msleep(HCLGE_SERDES_RETRY_MS);
		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
					   true);
		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"serdes loopback get, ret = %d\n", ret);
			return ret;
		}
	} while (++i < HCLGE_SERDES_RETRY_NUM &&
		 !(req->result & HCLGE_CMD_SERDES_DONE_B));

	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
		return -EBUSY;
	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
		return -EIO;
	}

6044
	hclge_cfg_mac_mode(hdev, en);
6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057

	i = 0;
	do {
		/* serdes Internal loopback, independent of the network cable.*/
		msleep(HCLGE_MAC_LINK_STATUS_MS);
		ret = hclge_get_mac_link_status(hdev);
		if (ret == mac_link_ret)
			return 0;
	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);

	dev_err(&hdev->pdev->dev, "config mac mode timeout\n");

	return -EBUSY;
6058 6059
}

6060
static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6061 6062 6063 6064 6065 6066 6067 6068 6069 6070
			    int stream_id, bool enable)
{
	struct hclge_desc desc;
	struct hclge_cfg_com_tqp_queue_cmd *req =
		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
	req->stream_id = cpu_to_le16(stream_id);
6071 6072
	if (enable)
		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6073 6074 6075 6076 6077 6078 6079 6080

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		dev_err(&hdev->pdev->dev,
			"Tqp enable fail, status =%d.\n", ret);
	return ret;
}

6081 6082 6083 6084
static int hclge_set_loopback(struct hnae3_handle *handle,
			      enum hnae3_loop loop_mode, bool en)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
6085
	struct hnae3_knic_private_info *kinfo;
6086
	struct hclge_dev *hdev = vport->back;
6087
	int i, ret;
6088 6089

	switch (loop_mode) {
6090 6091
	case HNAE3_LOOP_APP:
		ret = hclge_set_app_loopback(hdev, en);
6092
		break;
6093 6094 6095
	case HNAE3_LOOP_SERIAL_SERDES:
	case HNAE3_LOOP_PARALLEL_SERDES:
		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6096
		break;
6097 6098 6099 6100 6101 6102 6103
	default:
		ret = -ENOTSUPP;
		dev_err(&hdev->pdev->dev,
			"loop_mode %d is not supported\n", loop_mode);
		break;
	}

6104 6105 6106
	if (ret)
		return ret;

6107 6108
	kinfo = &vport->nic.kinfo;
	for (i = 0; i < kinfo->num_tqps; i++) {
6109 6110 6111 6112
		ret = hclge_tqp_enable(hdev, i, 0, en);
		if (ret)
			return ret;
	}
6113

6114
	return 0;
6115 6116 6117 6118 6119
}

static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
6120
	struct hnae3_knic_private_info *kinfo;
6121 6122 6123 6124
	struct hnae3_queue *queue;
	struct hclge_tqp *tqp;
	int i;

6125 6126
	kinfo = &vport->nic.kinfo;
	for (i = 0; i < kinfo->num_tqps; i++) {
6127 6128 6129 6130 6131 6132
		queue = handle->kinfo.tqp[i];
		tqp = container_of(queue, struct hclge_tqp, q);
		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
	}
}

6133 6134 6135 6136 6137 6138
static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	if (enable) {
6139
		hclge_task_schedule(hdev);
6140
	} else {
6141 6142 6143 6144 6145
		/* Set the DOWN flag here to disable the service to be
		 * scheduled again
		 */
		set_bit(HCLGE_STATE_DOWN, &hdev->state);
		cancel_delayed_work_sync(&hdev->service_task);
6146 6147 6148 6149
		clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
	}
}

6150 6151 6152 6153 6154 6155 6156 6157
static int hclge_ae_start(struct hnae3_handle *handle)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	/* mac enable */
	hclge_cfg_mac_mode(hdev, true);
	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6158
	hdev->hw.mac.link = 0;
6159

6160 6161 6162
	/* reset tqp stats */
	hclge_reset_tqp_stats(handle);

6163
	hclge_mac_start_phy(hdev);
6164 6165 6166 6167 6168 6169 6170 6171

	return 0;
}

static void hclge_ae_stop(struct hnae3_handle *handle)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
6172
	int i;
6173

6174 6175
	set_bit(HCLGE_STATE_DOWN, &hdev->state);

J
Jian Shen 已提交
6176 6177
	hclge_clear_arfs_rules(handle);

6178 6179 6180 6181 6182
	/* If it is not PF reset, the firmware will disable the MAC,
	 * so it only need to stop phy here.
	 */
	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
	    hdev->reset_type != HNAE3_FUNC_RESET) {
6183
		hclge_mac_stop_phy(hdev);
6184
		return;
6185
	}
6186

6187 6188 6189
	for (i = 0; i < handle->kinfo.num_tqps; i++)
		hclge_reset_tqp(handle, i);

6190 6191 6192 6193 6194 6195 6196
	/* Mac disable */
	hclge_cfg_mac_mode(hdev, false);

	hclge_mac_stop_phy(hdev);

	/* reset tqp stats */
	hclge_reset_tqp_stats(handle);
6197
	hclge_update_link_status(hdev);
6198 6199
}

6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225
int hclge_vport_start(struct hclge_vport *vport)
{
	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
	vport->last_active_jiffies = jiffies;
	return 0;
}

void hclge_vport_stop(struct hclge_vport *vport)
{
	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
}

static int hclge_client_start(struct hnae3_handle *handle)
{
	struct hclge_vport *vport = hclge_get_vport(handle);

	return hclge_vport_start(vport);
}

static void hclge_client_stop(struct hnae3_handle *handle)
{
	struct hclge_vport *vport = hclge_get_vport(handle);

	hclge_vport_stop(vport);
}

6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242
static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
					 u16 cmdq_resp, u8  resp_code,
					 enum hclge_mac_vlan_tbl_opcode op)
{
	struct hclge_dev *hdev = vport->back;
	int return_status = -EIO;

	if (cmdq_resp) {
		dev_err(&hdev->pdev->dev,
			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
			cmdq_resp);
		return -EIO;
	}

	if (op == HCLGE_MAC_VLAN_ADD) {
		if ((!resp_code) || (resp_code == 1)) {
			return_status = 0;
6243
		} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6244
			return_status = -ENOSPC;
6245 6246
			dev_err(&hdev->pdev->dev,
				"add mac addr failed for uc_overflow.\n");
6247
		} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6248
			return_status = -ENOSPC;
6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259
			dev_err(&hdev->pdev->dev,
				"add mac addr failed for mc_overflow.\n");
		} else {
			dev_err(&hdev->pdev->dev,
				"add mac addr failed for undefined, code=%d.\n",
				resp_code);
		}
	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
		if (!resp_code) {
			return_status = 0;
		} else if (resp_code == 1) {
6260
			return_status = -ENOENT;
6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271
			dev_dbg(&hdev->pdev->dev,
				"remove mac addr failed for miss.\n");
		} else {
			dev_err(&hdev->pdev->dev,
				"remove mac addr failed for undefined, code=%d.\n",
				resp_code);
		}
	} else if (op == HCLGE_MAC_VLAN_LKUP) {
		if (!resp_code) {
			return_status = 0;
		} else if (resp_code == 1) {
6272
			return_status = -ENOENT;
6273 6274 6275 6276 6277 6278 6279 6280
			dev_dbg(&hdev->pdev->dev,
				"lookup mac addr failed for miss.\n");
		} else {
			dev_err(&hdev->pdev->dev,
				"lookup mac addr failed for undefined, code=%d.\n",
				resp_code);
		}
	} else {
6281
		return_status = -EINVAL;
6282 6283 6284 6285 6286 6287 6288 6289 6290 6291
		dev_err(&hdev->pdev->dev,
			"unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
			op);
	}

	return return_status;
}

static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
{
6292 6293
#define HCLGE_VF_NUM_IN_FIRST_DESC 192

6294 6295
	unsigned int word_num;
	unsigned int bit_num;
6296 6297 6298 6299

	if (vfid > 255 || vfid < 0)
		return -EIO;

6300
	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6301 6302 6303
		word_num = vfid / 32;
		bit_num  = vfid % 32;
		if (clr)
6304
			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6305
		else
6306
			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6307
	} else {
6308
		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6309 6310
		bit_num  = vfid % 32;
		if (clr)
6311
			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6312
		else
6313
			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324
	}

	return 0;
}

static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
{
#define HCLGE_DESC_NUMBER 3
#define HCLGE_FUNC_NUMBER_PER_DESC 6
	int i, j;

6325
	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6326 6327 6328 6329 6330 6331 6332
		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
			if (desc[i].data[j])
				return false;

	return true;
}

6333
static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6334
				   const u8 *addr, bool is_mc)
6335 6336 6337 6338 6339 6340
{
	const unsigned char *mac_addr = addr;
	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
		       (mac_addr[0]) | (mac_addr[1] << 8);
	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);

6341 6342 6343 6344 6345 6346
	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
	if (is_mc) {
		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
	}

6347 6348 6349 6350 6351
	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
}

static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6352
				     struct hclge_mac_vlan_tbl_entry_cmd *req)
6353 6354 6355 6356
{
	struct hclge_dev *hdev = vport->back;
	struct hclge_desc desc;
	u8 resp_code;
6357
	u16 retval;
6358 6359 6360 6361
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);

6362
	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6363 6364 6365 6366 6367 6368 6369 6370

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"del mac addr failed for cmd_send, ret =%d.\n",
			ret);
		return ret;
	}
6371 6372
	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
	retval = le16_to_cpu(desc.retval);
6373

6374
	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6375 6376 6377 6378
					     HCLGE_MAC_VLAN_REMOVE);
}

static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6379
				     struct hclge_mac_vlan_tbl_entry_cmd *req,
6380 6381 6382 6383 6384
				     struct hclge_desc *desc,
				     bool is_mc)
{
	struct hclge_dev *hdev = vport->back;
	u8 resp_code;
6385
	u16 retval;
6386 6387 6388 6389 6390 6391 6392
	int ret;

	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
	if (is_mc) {
		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
		memcpy(desc[0].data,
		       req,
6393
		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404
		hclge_cmd_setup_basic_desc(&desc[1],
					   HCLGE_OPC_MAC_VLAN_ADD,
					   true);
		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
		hclge_cmd_setup_basic_desc(&desc[2],
					   HCLGE_OPC_MAC_VLAN_ADD,
					   true);
		ret = hclge_cmd_send(&hdev->hw, desc, 3);
	} else {
		memcpy(desc[0].data,
		       req,
6405
		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6406 6407 6408 6409 6410 6411 6412 6413
		ret = hclge_cmd_send(&hdev->hw, desc, 1);
	}
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"lookup mac addr failed for cmd_send, ret =%d.\n",
			ret);
		return ret;
	}
6414 6415
	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
	retval = le16_to_cpu(desc[0].retval);
6416

6417
	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6418 6419 6420 6421
					     HCLGE_MAC_VLAN_LKUP);
}

static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6422
				  struct hclge_mac_vlan_tbl_entry_cmd *req,
6423 6424 6425 6426 6427
				  struct hclge_desc *mc_desc)
{
	struct hclge_dev *hdev = vport->back;
	int cfg_status;
	u8 resp_code;
6428
	u16 retval;
6429 6430 6431 6432 6433 6434 6435 6436
	int ret;

	if (!mc_desc) {
		struct hclge_desc desc;

		hclge_cmd_setup_basic_desc(&desc,
					   HCLGE_OPC_MAC_VLAN_ADD,
					   false);
6437 6438
		memcpy(desc.data, req,
		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6439
		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6440 6441 6442 6443
		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
		retval = le16_to_cpu(desc.retval);

		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6444 6445 6446
							   resp_code,
							   HCLGE_MAC_VLAN_ADD);
	} else {
6447
		hclge_cmd_reuse_desc(&mc_desc[0], false);
6448
		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6449
		hclge_cmd_reuse_desc(&mc_desc[1], false);
6450
		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6451
		hclge_cmd_reuse_desc(&mc_desc[2], false);
6452 6453
		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
		memcpy(mc_desc[0].data, req,
6454
		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6455
		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6456 6457 6458 6459
		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
		retval = le16_to_cpu(mc_desc[0].retval);

		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473
							   resp_code,
							   HCLGE_MAC_VLAN_ADD);
	}

	if (ret) {
		dev_err(&hdev->pdev->dev,
			"add mac addr failed for cmd_send, ret =%d.\n",
			ret);
		return ret;
	}

	return cfg_status;
}

6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490
static int hclge_init_umv_space(struct hclge_dev *hdev)
{
	u16 allocated_size = 0;
	int ret;

	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
				  true);
	if (ret)
		return ret;

	if (allocated_size < hdev->wanted_umv_size)
		dev_warn(&hdev->pdev->dev,
			 "Alloc umv space failed, want %d, get %d\n",
			 hdev->wanted_umv_size, allocated_size);

	mutex_init(&hdev->umv_mutex);
	hdev->max_umv_size = allocated_size;
6491 6492 6493 6494
	/* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
	 * preserve some unicast mac vlan table entries shared by pf
	 * and its vfs.
	 */
6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526
	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
	hdev->share_umv_size = hdev->priv_umv_size +
			hdev->max_umv_size % (hdev->num_req_vfs + 2);

	return 0;
}

static int hclge_uninit_umv_space(struct hclge_dev *hdev)
{
	int ret;

	if (hdev->max_umv_size > 0) {
		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
					  false);
		if (ret)
			return ret;
		hdev->max_umv_size = 0;
	}
	mutex_destroy(&hdev->umv_mutex);

	return 0;
}

static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
			       u16 *allocated_size, bool is_alloc)
{
	struct hclge_umv_spc_alc_cmd *req;
	struct hclge_desc desc;
	int ret;

	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6527 6528 6529
	if (!is_alloc)
		hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);

6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582
	req->space_size = cpu_to_le32(space_size);

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"%s umv space failed for cmd_send, ret =%d\n",
			is_alloc ? "allocate" : "free", ret);
		return ret;
	}

	if (is_alloc && allocated_size)
		*allocated_size = le32_to_cpu(desc.data[1]);

	return 0;
}

static void hclge_reset_umv_space(struct hclge_dev *hdev)
{
	struct hclge_vport *vport;
	int i;

	for (i = 0; i < hdev->num_alloc_vport; i++) {
		vport = &hdev->vport[i];
		vport->used_umv_num = 0;
	}

	mutex_lock(&hdev->umv_mutex);
	hdev->share_umv_size = hdev->priv_umv_size +
			hdev->max_umv_size % (hdev->num_req_vfs + 2);
	mutex_unlock(&hdev->umv_mutex);
}

static bool hclge_is_umv_space_full(struct hclge_vport *vport)
{
	struct hclge_dev *hdev = vport->back;
	bool is_full;

	mutex_lock(&hdev->umv_mutex);
	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
		   hdev->share_umv_size == 0);
	mutex_unlock(&hdev->umv_mutex);

	return is_full;
}

static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
{
	struct hclge_dev *hdev = vport->back;

	mutex_lock(&hdev->umv_mutex);
	if (is_free) {
		if (vport->used_umv_num > hdev->priv_umv_size)
			hdev->share_umv_size++;
6583 6584 6585

		if (vport->used_umv_num > 0)
			vport->used_umv_num--;
6586
	} else {
6587 6588
		if (vport->used_umv_num >= hdev->priv_umv_size &&
		    hdev->share_umv_size > 0)
6589 6590 6591 6592 6593 6594
			hdev->share_umv_size--;
		vport->used_umv_num++;
	}
	mutex_unlock(&hdev->umv_mutex);
}

6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606
static int hclge_add_uc_addr(struct hnae3_handle *handle,
			     const unsigned char *addr)
{
	struct hclge_vport *vport = hclge_get_vport(handle);

	return hclge_add_uc_addr_common(vport, addr);
}

int hclge_add_uc_addr_common(struct hclge_vport *vport,
			     const unsigned char *addr)
{
	struct hclge_dev *hdev = vport->back;
6607
	struct hclge_mac_vlan_tbl_entry_cmd req;
6608
	struct hclge_desc desc;
6609
	u16 egress_port = 0;
6610
	int ret;
6611 6612 6613 6614 6615 6616 6617

	/* mac addr check */
	if (is_zero_ether_addr(addr) ||
	    is_broadcast_ether_addr(addr) ||
	    is_multicast_ether_addr(addr)) {
		dev_err(&hdev->pdev->dev,
			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6618
			 addr, is_zero_ether_addr(addr),
6619 6620 6621 6622 6623 6624
			 is_broadcast_ether_addr(addr),
			 is_multicast_ether_addr(addr));
		return -EINVAL;
	}

	memset(&req, 0, sizeof(req));
6625

P
Peng Li 已提交
6626 6627
	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6628 6629

	req.egress_port = cpu_to_le16(egress_port);
6630

6631
	hclge_prepare_mac_addr(&req, addr, false);
6632

6633 6634 6635 6636 6637
	/* Lookup the mac address in the mac_vlan table, and add
	 * it if the entry is inexistent. Repeated unicast entry
	 * is not allowed in the mac vlan table.
	 */
	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650
	if (ret == -ENOENT) {
		if (!hclge_is_umv_space_full(vport)) {
			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
			if (!ret)
				hclge_update_umv_space(vport, false);
			return ret;
		}

		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
			hdev->priv_umv_size);

		return -ENOSPC;
	}
6651 6652

	/* check if we just hit the duplicate */
6653 6654 6655 6656 6657
	if (!ret) {
		dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
			 vport->vport_id, addr);
		return 0;
	}
6658 6659 6660 6661

	dev_err(&hdev->pdev->dev,
		"PF failed to add unicast entry(%pM) in the MAC table\n",
		addr);
6662

6663
	return ret;
6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677
}

static int hclge_rm_uc_addr(struct hnae3_handle *handle,
			    const unsigned char *addr)
{
	struct hclge_vport *vport = hclge_get_vport(handle);

	return hclge_rm_uc_addr_common(vport, addr);
}

int hclge_rm_uc_addr_common(struct hclge_vport *vport,
			    const unsigned char *addr)
{
	struct hclge_dev *hdev = vport->back;
6678
	struct hclge_mac_vlan_tbl_entry_cmd req;
6679
	int ret;
6680 6681 6682 6683 6684

	/* mac addr check */
	if (is_zero_ether_addr(addr) ||
	    is_broadcast_ether_addr(addr) ||
	    is_multicast_ether_addr(addr)) {
6685 6686
		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
			addr);
6687 6688 6689 6690
		return -EINVAL;
	}

	memset(&req, 0, sizeof(req));
P
Peng Li 已提交
6691
	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6692
	hclge_prepare_mac_addr(&req, addr, false);
6693
	ret = hclge_remove_mac_vlan_tbl(vport, &req);
6694 6695
	if (!ret)
		hclge_update_umv_space(vport, true);
6696

6697
	return ret;
6698 6699 6700 6701 6702 6703 6704
}

static int hclge_add_mc_addr(struct hnae3_handle *handle,
			     const unsigned char *addr)
{
	struct hclge_vport *vport = hclge_get_vport(handle);

6705
	return hclge_add_mc_addr_common(vport, addr);
6706 6707 6708 6709 6710 6711
}

int hclge_add_mc_addr_common(struct hclge_vport *vport,
			     const unsigned char *addr)
{
	struct hclge_dev *hdev = vport->back;
6712
	struct hclge_mac_vlan_tbl_entry_cmd req;
6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723
	struct hclge_desc desc[3];
	int status;

	/* mac addr check */
	if (!is_multicast_ether_addr(addr)) {
		dev_err(&hdev->pdev->dev,
			"Add mc mac err! invalid mac:%pM.\n",
			 addr);
		return -EINVAL;
	}
	memset(&req, 0, sizeof(req));
P
Peng Li 已提交
6724
	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6725
	hclge_prepare_mac_addr(&req, addr, true);
6726
	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6727
	if (status) {
6728 6729 6730 6731 6732
		/* This mac addr do not exist, add new entry for it */
		memset(desc[0].data, 0, sizeof(desc[0].data));
		memset(desc[1].data, 0, sizeof(desc[0].data));
		memset(desc[2].data, 0, sizeof(desc[0].data));
	}
6733 6734 6735 6736
	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
	if (status)
		return status;
	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6737

6738 6739
	if (status == -ENOSPC)
		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755

	return status;
}

static int hclge_rm_mc_addr(struct hnae3_handle *handle,
			    const unsigned char *addr)
{
	struct hclge_vport *vport = hclge_get_vport(handle);

	return hclge_rm_mc_addr_common(vport, addr);
}

int hclge_rm_mc_addr_common(struct hclge_vport *vport,
			    const unsigned char *addr)
{
	struct hclge_dev *hdev = vport->back;
6756
	struct hclge_mac_vlan_tbl_entry_cmd req;
6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768
	enum hclge_cmd_status status;
	struct hclge_desc desc[3];

	/* mac addr check */
	if (!is_multicast_ether_addr(addr)) {
		dev_dbg(&hdev->pdev->dev,
			"Remove mc mac err! invalid mac:%pM.\n",
			 addr);
		return -EINVAL;
	}

	memset(&req, 0, sizeof(req));
P
Peng Li 已提交
6769
	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6770
	hclge_prepare_mac_addr(&req, addr, true);
6771 6772 6773
	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
	if (!status) {
		/* This mac addr exist, remove this handle's VFID for it */
6774 6775 6776
		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
		if (status)
			return status;
6777 6778 6779 6780 6781 6782 6783 6784 6785

		if (hclge_is_all_function_id_zero(desc))
			/* All the vfid is zero, so need to delete this entry */
			status = hclge_remove_mac_vlan_tbl(vport, &req);
		else
			/* Not all the vfid is zero, update the vfid */
			status = hclge_add_mac_vlan_tbl(vport, &req, desc);

	} else {
6786 6787 6788 6789 6790 6791 6792
		/* Maybe this mac address is in mta table, but it cannot be
		 * deleted here because an entry of mta represents an address
		 * range rather than a specific address. the delete action to
		 * all entries will take effect in update_mta_status called by
		 * hns3_nic_set_rx_mode.
		 */
		status = 0;
6793 6794 6795 6796 6797
	}

	return status;
}

6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894
void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
			       enum HCLGE_MAC_ADDR_TYPE mac_type)
{
	struct hclge_vport_mac_addr_cfg *mac_cfg;
	struct list_head *list;

	if (!vport->vport_id)
		return;

	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
	if (!mac_cfg)
		return;

	mac_cfg->hd_tbl_status = true;
	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);

	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
	       &vport->uc_mac_list : &vport->mc_mac_list;

	list_add_tail(&mac_cfg->node, list);
}

void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
			      bool is_write_tbl,
			      enum HCLGE_MAC_ADDR_TYPE mac_type)
{
	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
	struct list_head *list;
	bool uc_flag, mc_flag;

	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
	       &vport->uc_mac_list : &vport->mc_mac_list;

	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;

	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
		if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
			if (uc_flag && mac_cfg->hd_tbl_status)
				hclge_rm_uc_addr_common(vport, mac_addr);

			if (mc_flag && mac_cfg->hd_tbl_status)
				hclge_rm_mc_addr_common(vport, mac_addr);

			list_del(&mac_cfg->node);
			kfree(mac_cfg);
			break;
		}
	}
}

void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
				  enum HCLGE_MAC_ADDR_TYPE mac_type)
{
	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
	struct list_head *list;

	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
	       &vport->uc_mac_list : &vport->mc_mac_list;

	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);

		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);

		mac_cfg->hd_tbl_status = false;
		if (is_del_list) {
			list_del(&mac_cfg->node);
			kfree(mac_cfg);
		}
	}
}

void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
{
	struct hclge_vport_mac_addr_cfg *mac, *tmp;
	struct hclge_vport *vport;
	int i;

	mutex_lock(&hdev->vport_cfg_mutex);
	for (i = 0; i < hdev->num_alloc_vport; i++) {
		vport = &hdev->vport[i];
		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
			list_del(&mac->node);
			kfree(mac);
		}

		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
			list_del(&mac->node);
			kfree(mac);
		}
	}
	mutex_unlock(&hdev->vport_cfg_mutex);
}

6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979
static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
					      u16 cmdq_resp, u8 resp_code)
{
#define HCLGE_ETHERTYPE_SUCCESS_ADD		0
#define HCLGE_ETHERTYPE_ALREADY_ADD		1
#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
#define HCLGE_ETHERTYPE_KEY_CONFLICT		3

	int return_status;

	if (cmdq_resp) {
		dev_err(&hdev->pdev->dev,
			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
			cmdq_resp);
		return -EIO;
	}

	switch (resp_code) {
	case HCLGE_ETHERTYPE_SUCCESS_ADD:
	case HCLGE_ETHERTYPE_ALREADY_ADD:
		return_status = 0;
		break;
	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
		dev_err(&hdev->pdev->dev,
			"add mac ethertype failed for manager table overflow.\n");
		return_status = -EIO;
		break;
	case HCLGE_ETHERTYPE_KEY_CONFLICT:
		dev_err(&hdev->pdev->dev,
			"add mac ethertype failed for key conflict.\n");
		return_status = -EIO;
		break;
	default:
		dev_err(&hdev->pdev->dev,
			"add mac ethertype failed for undefined, code=%d.\n",
			resp_code);
		return_status = -EIO;
	}

	return return_status;
}

static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
{
	struct hclge_desc desc;
	u8 resp_code;
	u16 retval;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"add mac ethertype failed for cmd_send, ret =%d.\n",
			ret);
		return ret;
	}

	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
	retval = le16_to_cpu(desc.retval);

	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
}

static int init_mgr_tbl(struct hclge_dev *hdev)
{
	int ret;
	int i;

	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"add mac ethertype failed, ret =%d.\n",
				ret);
			return ret;
		}
	}

	return 0;
}

6980 6981 6982 6983 6984 6985 6986 6987
static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	ether_addr_copy(p, hdev->hw.mac.mac_addr);
}

6988 6989
static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
			      bool is_first)
6990 6991 6992 6993
{
	const unsigned char *new_addr = (const unsigned char *)p;
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
6994
	int ret;
6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005

	/* mac addr check */
	if (is_zero_ether_addr(new_addr) ||
	    is_broadcast_ether_addr(new_addr) ||
	    is_multicast_ether_addr(new_addr)) {
		dev_err(&hdev->pdev->dev,
			"Change uc mac err! invalid mac:%p.\n",
			 new_addr);
		return -EINVAL;
	}

7006 7007
	if ((!is_first || is_kdump_kernel()) &&
	    hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7008
		dev_warn(&hdev->pdev->dev,
7009
			 "remove old uc mac address fail.\n");
7010

7011 7012 7013 7014 7015 7016
	ret = hclge_add_uc_addr(handle, new_addr);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"add uc mac address fail, ret =%d.\n",
			ret);

7017 7018
		if (!is_first &&
		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7019
			dev_err(&hdev->pdev->dev,
7020
				"restore uc mac address fail.\n");
7021 7022

		return -EIO;
7023 7024
	}

7025
	ret = hclge_pause_addr_cfg(hdev, new_addr);
7026 7027 7028 7029 7030 7031 7032 7033 7034 7035
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"configure mac pause address fail, ret =%d.\n",
			ret);
		return -EIO;
	}

	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);

	return 0;
7036 7037
}

7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049
static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
			  int cmd)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	if (!hdev->hw.mac.phydev)
		return -EOPNOTSUPP;

	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
}

7050
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7051
				      u8 fe_type, bool filter_en, u8 vf_id)
7052
{
7053
	struct hclge_vlan_filter_ctrl_cmd *req;
7054 7055 7056 7057 7058
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);

7059
	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7060
	req->vlan_type = vlan_type;
7061
	req->vlan_fe = filter_en ? fe_type : 0;
7062
	req->vf_id = vf_id;
7063 7064

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7065
	if (ret)
7066 7067 7068
		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
			ret);

7069
	return ret;
7070 7071
}

7072 7073
#define HCLGE_FILTER_TYPE_VF		0
#define HCLGE_FILTER_TYPE_PORT		1
7074 7075 7076 7077 7078 7079 7080 7081 7082
#define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
#define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
#define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
#define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
#define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
#define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
#define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
7083 7084 7085 7086 7087 7088

static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

7089 7090
	if (hdev->pdev->revision >= 0x21) {
		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7091
					   HCLGE_FILTER_FE_EGRESS, enable, 0);
7092
		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7093
					   HCLGE_FILTER_FE_INGRESS, enable, 0);
7094 7095
	} else {
		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7096 7097
					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
					   0);
7098
	}
7099 7100 7101 7102
	if (enable)
		handle->netdev_flags |= HNAE3_VLAN_FLTR;
	else
		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7103 7104
}

7105
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7106 7107
				    bool is_kill, u16 vlan, u8 qos,
				    __be16 proto)
7108 7109
{
#define HCLGE_MAX_VF_BYTES  16
7110 7111
	struct hclge_vlan_filter_vf_cfg_cmd *req0;
	struct hclge_vlan_filter_vf_cfg_cmd *req1;
7112 7113 7114 7115 7116
	struct hclge_desc desc[2];
	u8 vf_byte_val;
	u8 vf_byte_off;
	int ret;

7117 7118 7119 7120 7121 7122
	/* if vf vlan table is full, firmware will close vf vlan filter, it
	 * is unable and unnecessary to add new vlan id to vf vlan filter
	 */
	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
		return 0;

7123 7124 7125 7126 7127 7128 7129 7130 7131 7132
	hclge_cmd_setup_basic_desc(&desc[0],
				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
	hclge_cmd_setup_basic_desc(&desc[1],
				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);

	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);

	vf_byte_off = vfid / 8;
	vf_byte_val = 1 << (vfid % 8);

7133 7134
	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7135

7136
	req0->vlan_id  = cpu_to_le16(vlan);
7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152
	req0->vlan_cfg = is_kill;

	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
	else
		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;

	ret = hclge_cmd_send(&hdev->hw, desc, 2);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"Send vf vlan command fail, ret =%d.\n",
			ret);
		return ret;
	}

	if (!is_kill) {
7153
#define HCLGE_VF_VLAN_NO_ENTRY	2
7154 7155 7156
		if (!req0->resp_code || req0->resp_code == 1)
			return 0;

7157
		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7158
			set_bit(vfid, hdev->vf_vlan_full);
7159 7160 7161 7162 7163
			dev_warn(&hdev->pdev->dev,
				 "vf vlan table is full, vf vlan filter is disabled\n");
			return 0;
		}

7164 7165 7166 7167
		dev_err(&hdev->pdev->dev,
			"Add vf vlan filter fail, ret =%d.\n",
			req0->resp_code);
	} else {
7168
#define HCLGE_VF_VLAN_DEL_NO_FOUND	1
7169 7170 7171
		if (!req0->resp_code)
			return 0;

7172 7173 7174 7175 7176 7177
		/* vf vlan filter is disabled when vf vlan table is full,
		 * then new vlan id will not be added into vf vlan table.
		 * Just return 0 without warning, avoid massive verbose
		 * print logs when unload.
		 */
		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7178 7179
			return 0;

7180 7181 7182 7183 7184 7185 7186 7187
		dev_err(&hdev->pdev->dev,
			"Kill vf vlan filter fail, ret =%d.\n",
			req0->resp_code);
	}

	return -EIO;
}

7188 7189
static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
				      u16 vlan_id, bool is_kill)
7190
{
7191
	struct hclge_vlan_filter_pf_cfg_cmd *req;
7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203
	struct hclge_desc desc;
	u8 vlan_offset_byte_val;
	u8 vlan_offset_byte;
	u8 vlan_offset_160;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);

	vlan_offset_160 = vlan_id / 160;
	vlan_offset_byte = (vlan_id % 160) / 8;
	vlan_offset_byte_val = 1 << (vlan_id % 8);

7204
	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7205 7206 7207 7208 7209
	req->vlan_offset = vlan_offset_160;
	req->vlan_cfg = is_kill;
	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222
	if (ret)
		dev_err(&hdev->pdev->dev,
			"port vlan command, send fail, ret =%d.\n", ret);
	return ret;
}

static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
				    u16 vport_id, u16 vlan_id, u8 qos,
				    bool is_kill)
{
	u16 vport_idx, vport_num = 0;
	int ret;

7223 7224 7225
	if (is_kill && !vlan_id)
		return 0;

7226 7227
	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
				       0, proto);
7228 7229
	if (ret) {
		dev_err(&hdev->pdev->dev,
7230 7231
			"Set %d vport vlan filter config fail, ret =%d.\n",
			vport_id, ret);
7232 7233 7234
		return ret;
	}

7235 7236 7237 7238 7239 7240
	/* vlan 0 may be added twice when 8021q module is enabled */
	if (!is_kill && !vlan_id &&
	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
		return 0;

	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7241
		dev_err(&hdev->pdev->dev,
7242 7243 7244
			"Add port vlan failed, vport %d is already in vlan %d\n",
			vport_id, vlan_id);
		return -EINVAL;
7245 7246
	}

7247 7248 7249 7250 7251 7252 7253 7254
	if (is_kill &&
	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
		dev_err(&hdev->pdev->dev,
			"Delete port vlan failed, vport %d is not in vlan %d\n",
			vport_id, vlan_id);
		return -EINVAL;
	}

7255
	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7256 7257 7258 7259 7260 7261 7262 7263 7264
		vport_num++;

	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
						 is_kill);

	return ret;
}

7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277
static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
{
	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
	struct hclge_vport_vtag_tx_cfg_cmd *req;
	struct hclge_dev *hdev = vport->back;
	struct hclge_desc desc;
	int status;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);

	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
P
Peng Li 已提交
7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290
	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
		      vcfg->accept_tag1 ? 1 : 0);
	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
		      vcfg->accept_untag1 ? 1 : 0);
	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
		      vcfg->accept_tag2 ? 1 : 0);
	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
		      vcfg->accept_untag2 ? 1 : 0);
	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
		      vcfg->insert_tag1_en ? 1 : 0);
	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
		      vcfg->insert_tag2_en ? 1 : 0);
	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315

	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
	req->vf_bitmap[req->vf_offset] =
		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);

	status = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (status)
		dev_err(&hdev->pdev->dev,
			"Send port txvlan cfg command fail, ret =%d\n",
			status);

	return status;
}

static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
{
	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
	struct hclge_vport_vtag_rx_cfg_cmd *req;
	struct hclge_dev *hdev = vport->back;
	struct hclge_desc desc;
	int status;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);

	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
P
Peng Li 已提交
7316 7317 7318 7319 7320 7321 7322 7323
	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
		      vcfg->strip_tag1_en ? 1 : 0);
	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
		      vcfg->strip_tag2_en ? 1 : 0);
	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
		      vcfg->vlan1_vlan_prionly ? 1 : 0);
	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
		      vcfg->vlan2_vlan_prionly ? 1 : 0);
7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337

	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
	req->vf_bitmap[req->vf_offset] =
		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);

	status = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (status)
		dev_err(&hdev->pdev->dev,
			"Send port rxvlan cfg command fail, ret =%d\n",
			status);

	return status;
}

7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383
static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
				  u16 port_base_vlan_state,
				  u16 vlan_tag)
{
	int ret;

	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
		vport->txvlan_cfg.accept_tag1 = true;
		vport->txvlan_cfg.insert_tag1_en = false;
		vport->txvlan_cfg.default_tag1 = 0;
	} else {
		vport->txvlan_cfg.accept_tag1 = false;
		vport->txvlan_cfg.insert_tag1_en = true;
		vport->txvlan_cfg.default_tag1 = vlan_tag;
	}

	vport->txvlan_cfg.accept_untag1 = true;

	/* accept_tag2 and accept_untag2 are not supported on
	 * pdev revision(0x20), new revision support them,
	 * this two fields can not be configured by user.
	 */
	vport->txvlan_cfg.accept_tag2 = true;
	vport->txvlan_cfg.accept_untag2 = true;
	vport->txvlan_cfg.insert_tag2_en = false;
	vport->txvlan_cfg.default_tag2 = 0;

	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
		vport->rxvlan_cfg.strip_tag1_en = false;
		vport->rxvlan_cfg.strip_tag2_en =
				vport->rxvlan_cfg.rx_vlan_offload_en;
	} else {
		vport->rxvlan_cfg.strip_tag1_en =
				vport->rxvlan_cfg.rx_vlan_offload_en;
		vport->rxvlan_cfg.strip_tag2_en = true;
	}
	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
	vport->rxvlan_cfg.vlan2_vlan_prionly = false;

	ret = hclge_set_vlan_tx_offload_cfg(vport);
	if (ret)
		return ret;

	return hclge_set_vlan_rx_offload_cfg(vport);
}

7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411
static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
{
	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
	struct hclge_desc desc;
	int status;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
	rx_req->ot_fst_vlan_type =
		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
	rx_req->ot_sec_vlan_type =
		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
	rx_req->in_fst_vlan_type =
		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
	rx_req->in_sec_vlan_type =
		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);

	status = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (status) {
		dev_err(&hdev->pdev->dev,
			"Send rxvlan protocol type command fail, ret =%d\n",
			status);
		return status;
	}

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);

7412
	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424
	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);

	status = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (status)
		dev_err(&hdev->pdev->dev,
			"Send txvlan protocol type command fail, ret =%d\n",
			status);

	return status;
}

7425 7426
static int hclge_init_vlan_config(struct hclge_dev *hdev)
{
7427 7428
#define HCLGE_DEF_VLAN_TYPE		0x8100

7429
	struct hnae3_handle *handle = &hdev->vport[0].nic;
7430
	struct hclge_vport *vport;
7431
	int ret;
7432 7433
	int i;

7434
	if (hdev->pdev->revision >= 0x21) {
7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445
		/* for revision 0x21, vf vlan filter is per function */
		for (i = 0; i < hdev->num_alloc_vport; i++) {
			vport = &hdev->vport[i];
			ret = hclge_set_vlan_filter_ctrl(hdev,
							 HCLGE_FILTER_TYPE_VF,
							 HCLGE_FILTER_FE_EGRESS,
							 true,
							 vport->vport_id);
			if (ret)
				return ret;
		}
7446

7447
		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7448 7449
						 HCLGE_FILTER_FE_INGRESS, true,
						 0);
7450 7451 7452 7453 7454
		if (ret)
			return ret;
	} else {
		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
						 HCLGE_FILTER_FE_EGRESS_V1_B,
7455
						 true, 0);
7456 7457 7458
		if (ret)
			return ret;
	}
7459

7460 7461
	handle->netdev_flags |= HNAE3_VLAN_FLTR;

7462 7463 7464 7465 7466 7467 7468 7469
	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;

	ret = hclge_set_vlan_protocol_type(hdev);
7470 7471
	if (ret)
		return ret;
7472

7473
	for (i = 0; i < hdev->num_alloc_vport; i++) {
7474
		u16 vlan_tag;
7475

7476 7477
		vport = &hdev->vport[i];
		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7478

7479 7480 7481
		ret = hclge_vlan_offload_cfg(vport,
					     vport->port_base_vlan_cfg.state,
					     vlan_tag);
7482 7483 7484 7485
		if (ret)
			return ret;
	}

7486
	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7487 7488
}

7489 7490
static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
				       bool writen_to_tbl)
L
liuzhongzhu 已提交
7491 7492 7493 7494 7495 7496 7497
{
	struct hclge_vport_vlan_cfg *vlan;

	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
	if (!vlan)
		return;

7498
	vlan->hd_tbl_status = writen_to_tbl;
L
liuzhongzhu 已提交
7499 7500 7501 7502 7503
	vlan->vlan_id = vlan_id;

	list_add_tail(&vlan->node, &vport->vlan_list);
}

7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529
static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
{
	struct hclge_vport_vlan_cfg *vlan, *tmp;
	struct hclge_dev *hdev = vport->back;
	int ret;

	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
		if (!vlan->hd_tbl_status) {
			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
						       vport->vport_id,
						       vlan->vlan_id, 0, false);
			if (ret) {
				dev_err(&hdev->pdev->dev,
					"restore vport vlan list failed, ret=%d\n",
					ret);
				return ret;
			}
		}
		vlan->hd_tbl_status = true;
	}

	return 0;
}

static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
				      bool is_write_tbl)
L
liuzhongzhu 已提交
7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587
{
	struct hclge_vport_vlan_cfg *vlan, *tmp;
	struct hclge_dev *hdev = vport->back;

	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
		if (vlan->vlan_id == vlan_id) {
			if (is_write_tbl && vlan->hd_tbl_status)
				hclge_set_vlan_filter_hw(hdev,
							 htons(ETH_P_8021Q),
							 vport->vport_id,
							 vlan_id, 0,
							 true);

			list_del(&vlan->node);
			kfree(vlan);
			break;
		}
	}
}

void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
{
	struct hclge_vport_vlan_cfg *vlan, *tmp;
	struct hclge_dev *hdev = vport->back;

	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
		if (vlan->hd_tbl_status)
			hclge_set_vlan_filter_hw(hdev,
						 htons(ETH_P_8021Q),
						 vport->vport_id,
						 vlan->vlan_id, 0,
						 true);

		vlan->hd_tbl_status = false;
		if (is_del_list) {
			list_del(&vlan->node);
			kfree(vlan);
		}
	}
}

void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
{
	struct hclge_vport_vlan_cfg *vlan, *tmp;
	struct hclge_vport *vport;
	int i;

	mutex_lock(&hdev->vport_cfg_mutex);
	for (i = 0; i < hdev->num_alloc_vport; i++) {
		vport = &hdev->vport[i];
		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
			list_del(&vlan->node);
			kfree(vlan);
		}
	}
	mutex_unlock(&hdev->vport_cfg_mutex);
}

7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624
static void hclge_restore_vlan_table(struct hnae3_handle *handle)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_vport_vlan_cfg *vlan, *tmp;
	struct hclge_dev *hdev = vport->back;
	u16 vlan_proto, qos;
	u16 state, vlan_id;
	int i;

	mutex_lock(&hdev->vport_cfg_mutex);
	for (i = 0; i < hdev->num_alloc_vport; i++) {
		vport = &hdev->vport[i];
		vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
		vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
		qos = vport->port_base_vlan_cfg.vlan_info.qos;
		state = vport->port_base_vlan_cfg.state;

		if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
			hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
						 vport->vport_id, vlan_id, qos,
						 false);
			continue;
		}

		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
			if (vlan->hd_tbl_status)
				hclge_set_vlan_filter_hw(hdev,
							 htons(ETH_P_8021Q),
							 vport->vport_id,
							 vlan->vlan_id, 0,
							 false);
		}
	}

	mutex_unlock(&hdev->vport_cfg_mutex);
}

7625
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7626 7627 7628
{
	struct hclge_vport *vport = hclge_get_vport(handle);

7629 7630 7631 7632 7633 7634 7635
	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
		vport->rxvlan_cfg.strip_tag1_en = false;
		vport->rxvlan_cfg.strip_tag2_en = enable;
	} else {
		vport->rxvlan_cfg.strip_tag1_en = enable;
		vport->rxvlan_cfg.strip_tag2_en = true;
	}
7636 7637
	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7638
	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7639 7640 7641 7642

	return hclge_set_vlan_rx_offload_cfg(vport);
}

7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684
static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
					    u16 port_base_vlan_state,
					    struct hclge_vlan_info *new_info,
					    struct hclge_vlan_info *old_info)
{
	struct hclge_dev *hdev = vport->back;
	int ret;

	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
		hclge_rm_vport_all_vlan_table(vport, false);
		return hclge_set_vlan_filter_hw(hdev,
						 htons(new_info->vlan_proto),
						 vport->vport_id,
						 new_info->vlan_tag,
						 new_info->qos, false);
	}

	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
				       vport->vport_id, old_info->vlan_tag,
				       old_info->qos, true);
	if (ret)
		return ret;

	return hclge_add_vport_all_vlan_table(vport);
}

int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
				    struct hclge_vlan_info *vlan_info)
{
	struct hnae3_handle *nic = &vport->nic;
	struct hclge_vlan_info *old_vlan_info;
	struct hclge_dev *hdev = vport->back;
	int ret;

	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;

	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
	if (ret)
		return ret;

	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
		/* add new VLAN tag */
7685 7686
		ret = hclge_set_vlan_filter_hw(hdev,
					       htons(vlan_info->vlan_proto),
7687 7688 7689 7690 7691 7692 7693
					       vport->vport_id,
					       vlan_info->vlan_tag,
					       vlan_info->qos, false);
		if (ret)
			return ret;

		/* remove old VLAN tag */
7694 7695
		ret = hclge_set_vlan_filter_hw(hdev,
					       htons(old_vlan_info->vlan_proto),
7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781
					       vport->vport_id,
					       old_vlan_info->vlan_tag,
					       old_vlan_info->qos, true);
		if (ret)
			return ret;

		goto update;
	}

	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
					       old_vlan_info);
	if (ret)
		return ret;

	/* update state only when disable/enable port based VLAN */
	vport->port_base_vlan_cfg.state = state;
	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
	else
		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;

update:
	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;

	return 0;
}

static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
					  enum hnae3_port_base_vlan_state state,
					  u16 vlan)
{
	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
		if (!vlan)
			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
		else
			return HNAE3_PORT_BASE_VLAN_ENABLE;
	} else {
		if (!vlan)
			return HNAE3_PORT_BASE_VLAN_DISABLE;
		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
		else
			return HNAE3_PORT_BASE_VLAN_MODIFY;
	}
}

static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
				    u16 vlan, u8 qos, __be16 proto)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	struct hclge_vlan_info vlan_info;
	u16 state;
	int ret;

	if (hdev->pdev->revision == 0x20)
		return -EOPNOTSUPP;

	/* qos is a 3 bits value, so can not be bigger than 7 */
	if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
		return -EINVAL;
	if (proto != htons(ETH_P_8021Q))
		return -EPROTONOSUPPORT;

	vport = &hdev->vport[vfid];
	state = hclge_get_port_base_vlan_state(vport,
					       vport->port_base_vlan_cfg.state,
					       vlan);
	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
		return 0;

	vlan_info.vlan_tag = vlan;
	vlan_info.qos = qos;
	vlan_info.vlan_proto = ntohs(proto);

	/* update port based VLAN for PF */
	if (!vfid) {
		hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
		ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
		hclge_notify_client(hdev, HNAE3_UP_CLIENT);

		return ret;
	}

7782 7783 7784 7785 7786 7787 7788 7789 7790 7791
	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
		return hclge_update_port_base_vlan_cfg(vport, state,
						       &vlan_info);
	} else {
		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
							(u8)vfid, state,
							vlan, qos,
							ntohs(proto));
		return ret;
	}
7792 7793 7794 7795 7796 7797 7798 7799 7800 7801
}

int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
			  u16 vlan_id, bool is_kill)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	bool writen_to_tbl = false;
	int ret = 0;

7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815
	/* When device is resetting, firmware is unable to handle
	 * mailbox. Just record the vlan id, and remove it after
	 * reset finished.
	 */
	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
		set_bit(vlan_id, vport->vlan_del_fail_bmap);
		return -EBUSY;
	}

	/* When port base vlan enabled, we use port base vlan as the vlan
	 * filter entry. In this case, we don't update vlan filter table
	 * when user add new vlan or remove exist vlan, just update the vport
	 * vlan list. The vlan id in vlan list will be writen in vlan filter
	 * table until port base vlan disabled
7816 7817 7818 7819 7820 7821 7822
	 */
	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
					       vlan_id, 0, is_kill);
		writen_to_tbl = true;
	}

7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837
	if (!ret) {
		if (is_kill)
			hclge_rm_vport_vlan_table(vport, vlan_id, false);
		else
			hclge_add_vport_vlan_table(vport, vlan_id,
						   writen_to_tbl);
	} else if (is_kill) {
		/* When remove hw vlan filter failed, record the vlan id,
		 * and try to remove it from hw later, to be consistence
		 * with stack
		 */
		set_bit(vlan_id, vport->vlan_del_fail_bmap);
	}
	return ret;
}
7838

7839 7840 7841
static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
{
#define HCLGE_MAX_SYNC_COUNT	60
7842

7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869
	int i, ret, sync_cnt = 0;
	u16 vlan_id;

	/* start from vport 1 for PF is always alive */
	for (i = 0; i < hdev->num_alloc_vport; i++) {
		struct hclge_vport *vport = &hdev->vport[i];

		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
					 VLAN_N_VID);
		while (vlan_id != VLAN_N_VID) {
			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
						       vport->vport_id, vlan_id,
						       0, true);
			if (ret && ret != -EINVAL)
				return;

			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
			hclge_rm_vport_vlan_table(vport, vlan_id, false);

			sync_cnt++;
			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
				return;

			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
						 VLAN_N_VID);
		}
	}
7870 7871
}

7872
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7873
{
7874
	struct hclge_config_max_frm_size_cmd *req;
7875 7876 7877 7878
	struct hclge_desc desc;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);

7879
	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7880
	req->max_frm_size = cpu_to_le16(new_mps);
7881
	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7882

7883
	return hclge_cmd_send(&hdev->hw, &desc, 1);
7884 7885
}

7886 7887 7888
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
7889 7890 7891 7892 7893 7894

	return hclge_set_vport_mtu(vport, new_mtu);
}

int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
{
7895
	struct hclge_dev *hdev = vport->back;
7896
	int i, max_frm_size, ret;
7897

7898 7899 7900 7901 7902
	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
	    max_frm_size > HCLGE_MAC_MAX_FRAME)
		return -EINVAL;

7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921
	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
	mutex_lock(&hdev->vport_lock);
	/* VF's mps must fit within hdev->mps */
	if (vport->vport_id && max_frm_size > hdev->mps) {
		mutex_unlock(&hdev->vport_lock);
		return -EINVAL;
	} else if (vport->vport_id) {
		vport->mps = max_frm_size;
		mutex_unlock(&hdev->vport_lock);
		return 0;
	}

	/* PF's mps must be greater then VF's mps */
	for (i = 1; i < hdev->num_alloc_vport; i++)
		if (max_frm_size < hdev->vport[i].mps) {
			mutex_unlock(&hdev->vport_lock);
			return -EINVAL;
		}

7922 7923
	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);

7924
	ret = hclge_set_mac_mtu(hdev, max_frm_size);
7925 7926 7927
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"Change mtu fail, ret =%d\n", ret);
7928
		goto out;
7929 7930
	}

7931
	hdev->mps = max_frm_size;
7932
	vport->mps = max_frm_size;
7933

7934 7935 7936 7937 7938
	ret = hclge_buffer_alloc(hdev);
	if (ret)
		dev_err(&hdev->pdev->dev,
			"Allocate buffer fail, ret =%d\n", ret);

7939
out:
7940
	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7941
	mutex_unlock(&hdev->vport_lock);
7942 7943 7944
	return ret;
}

7945 7946 7947
static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
				    bool enable)
{
7948
	struct hclge_reset_tqp_queue_cmd *req;
7949 7950 7951 7952 7953
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);

7954
	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7955
	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7956 7957
	if (enable)
		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"Send tqp reset cmd error, status =%d\n", ret);
		return ret;
	}

	return 0;
}

static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
{
7971
	struct hclge_reset_tqp_queue_cmd *req;
7972 7973 7974 7975 7976
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);

7977
	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7978 7979 7980 7981 7982 7983 7984 7985 7986
	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"Get reset status error, status =%d\n", ret);
		return ret;
	}

P
Peng Li 已提交
7987
	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7988 7989
}

7990
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7991 7992 7993 7994 7995 7996 7997 7998 7999 8000
{
	struct hnae3_queue *queue;
	struct hclge_tqp *tqp;

	queue = handle->kinfo.tqp[queue_id];
	tqp = container_of(queue, struct hclge_tqp, q);

	return tqp->index;
}

8001
int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8002 8003 8004 8005 8006
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	int reset_try_times = 0;
	int reset_status;
8007
	u16 queue_gid;
8008
	int ret;
8009

8010 8011
	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);

8012 8013
	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
	if (ret) {
8014 8015
		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
		return ret;
8016 8017
	}

8018
	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8019
	if (ret) {
8020 8021 8022
		dev_err(&hdev->pdev->dev,
			"Send reset tqp cmd fail, ret = %d\n", ret);
		return ret;
8023 8024 8025 8026 8027
	}

	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
		/* Wait for tqp hw reset */
		msleep(20);
8028
		reset_status = hclge_get_reset_status(hdev, queue_gid);
8029 8030 8031 8032 8033
		if (reset_status)
			break;
	}

	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8034 8035
		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
		return ret;
8036 8037
	}

8038
	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8039 8040 8041 8042 8043
	if (ret)
		dev_err(&hdev->pdev->dev,
			"Deassert the soft reset fail, ret = %d\n", ret);

	return ret;
8044 8045
}

8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
{
	struct hclge_dev *hdev = vport->back;
	int reset_try_times = 0;
	int reset_status;
	u16 queue_gid;
	int ret;

	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);

	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
	if (ret) {
		dev_warn(&hdev->pdev->dev,
			 "Send reset tqp cmd fail, ret = %d\n", ret);
		return;
	}

	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
		/* Wait for tqp hw reset */
		msleep(20);
		reset_status = hclge_get_reset_status(hdev, queue_gid);
		if (reset_status)
			break;
	}

	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
		return;
	}

	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
	if (ret)
		dev_warn(&hdev->pdev->dev,
			 "Deassert the soft reset fail, ret = %d\n", ret);
}

8082 8083 8084 8085 8086 8087 8088 8089
static u32 hclge_get_fw_version(struct hnae3_handle *handle)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	return hdev->fw_version;
}

8090 8091 8092 8093 8094 8095 8096
static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
{
	struct phy_device *phydev = hdev->hw.mac.phydev;

	if (!phydev)
		return;

8097
	phy_set_asym_pause(phydev, rx_en, tx_en);
8098 8099 8100 8101 8102 8103 8104
}

static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
{
	int ret;

	if (rx_en && tx_en)
8105
		hdev->fc_mode_last_time = HCLGE_FC_FULL;
8106
	else if (rx_en && !tx_en)
8107
		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8108
	else if (!rx_en && tx_en)
8109
		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8110
	else
8111
		hdev->fc_mode_last_time = HCLGE_FC_NONE;
8112

8113
	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8114 8115 8116 8117 8118 8119 8120 8121 8122
		return 0;

	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
	if (ret) {
		dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
			ret);
		return ret;
	}

8123
	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8124 8125 8126 8127

	return 0;
}

8128 8129 8130 8131
int hclge_cfg_flowctrl(struct hclge_dev *hdev)
{
	struct phy_device *phydev = hdev->hw.mac.phydev;
	u16 remote_advertising = 0;
8132
	u16 local_advertising;
8133 8134 8135 8136 8137 8138
	u32 rx_pause, tx_pause;
	u8 flowctl;

	if (!phydev->link || !phydev->autoneg)
		return 0;

8139
	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159

	if (phydev->pause)
		remote_advertising = LPA_PAUSE_CAP;

	if (phydev->asym_pause)
		remote_advertising |= LPA_PAUSE_ASYM;

	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
					   remote_advertising);
	tx_pause = flowctl & FLOW_CTRL_TX;
	rx_pause = flowctl & FLOW_CTRL_RX;

	if (phydev->duplex == HCLGE_MAC_HALF) {
		tx_pause = 0;
		rx_pause = 0;
	}

	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
}

8160 8161 8162 8163 8164
static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
				 u32 *rx_en, u32 *tx_en)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
8165
	struct phy_device *phydev = hdev->hw.mac.phydev;
8166

8167
	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189

	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
		*rx_en = 0;
		*tx_en = 0;
		return;
	}

	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
		*rx_en = 1;
		*tx_en = 0;
	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
		*tx_en = 1;
		*rx_en = 0;
	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
		*rx_en = 1;
		*tx_en = 1;
	} else {
		*rx_en = 0;
		*tx_en = 0;
	}
}

8190 8191 8192 8193 8194 8195 8196 8197
static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
				u32 rx_en, u32 tx_en)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	struct phy_device *phydev = hdev->hw.mac.phydev;
	u32 fc_autoneg;

8198 8199 8200 8201 8202 8203 8204
	if (phydev) {
		fc_autoneg = hclge_get_autoneg(handle);
		if (auto_neg != fc_autoneg) {
			dev_info(&hdev->pdev->dev,
				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
			return -EOPNOTSUPP;
		}
8205 8206 8207 8208 8209 8210 8211 8212 8213 8214
	}

	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
		dev_info(&hdev->pdev->dev,
			 "Priority flow control enabled. Cannot set link flow control.\n");
		return -EOPNOTSUPP;
	}

	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);

8215
	if (!auto_neg)
8216 8217
		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);

8218 8219 8220
	if (phydev)
		return phy_start_aneg(phydev);

8221
	return -EOPNOTSUPP;
8222 8223
}

8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
					  u8 *auto_neg, u32 *speed, u8 *duplex)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	if (speed)
		*speed = hdev->hw.mac.speed;
	if (duplex)
		*duplex = hdev->hw.mac.duplex;
	if (auto_neg)
		*auto_neg = hdev->hw.mac.autoneg;
}

8238 8239
static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
				 u8 *module_type)
8240 8241 8242 8243 8244 8245
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	if (media_type)
		*media_type = hdev->hw.mac.media_type;
8246 8247 8248

	if (module_type)
		*module_type = hdev->hw.mac.module_type;
8249 8250 8251 8252 8253 8254 8255 8256
}

static void hclge_get_mdix_mode(struct hnae3_handle *handle,
				u8 *tp_mdix_ctrl, u8 *tp_mdix)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	struct phy_device *phydev = hdev->hw.mac.phydev;
8257 8258
	int mdix_ctrl, mdix, is_resolved;
	unsigned int retval;
8259 8260 8261 8262 8263 8264 8265 8266 8267 8268

	if (!phydev) {
		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
		*tp_mdix = ETH_TP_MDI_INVALID;
		return;
	}

	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);

	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
P
Peng Li 已提交
8269 8270
	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
				    HCLGE_PHY_MDIX_CTRL_S);
8271 8272

	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
P
Peng Li 已提交
8273 8274
	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300

	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);

	switch (mdix_ctrl) {
	case 0x0:
		*tp_mdix_ctrl = ETH_TP_MDI;
		break;
	case 0x1:
		*tp_mdix_ctrl = ETH_TP_MDI_X;
		break;
	case 0x3:
		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
		break;
	default:
		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
		break;
	}

	if (!is_resolved)
		*tp_mdix = ETH_TP_MDI_INVALID;
	else if (mdix)
		*tp_mdix = ETH_TP_MDI_X;
	else
		*tp_mdix = ETH_TP_MDI;
}

8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326
static void hclge_info_show(struct hclge_dev *hdev)
{
	struct device *dev = &hdev->pdev->dev;

	dev_info(dev, "PF info begin:\n");

	dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
	dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
	dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
	dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
	dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
	dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
	dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
	dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
	dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
	dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
	dev_info(dev, "This is %s PF\n",
		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
	dev_info(dev, "DCB %s\n",
		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
	dev_info(dev, "MQPRIO %s\n",
		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");

	dev_info(dev, "PF info end.\n");
}

8327 8328 8329 8330 8331
static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
					  struct hclge_vport *vport)
{
	struct hnae3_client *client = vport->nic.client;
	struct hclge_dev *hdev = ae_dev->priv;
8332
	int rst_cnt;
8333 8334
	int ret;

8335
	rst_cnt = hdev->rst_stats.reset_cnt;
8336 8337 8338 8339 8340
	ret = client->ops->init_instance(&vport->nic);
	if (ret)
		return ret;

	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8341 8342 8343 8344 8345 8346
	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
	    rst_cnt != hdev->rst_stats.reset_cnt) {
		ret = -EBUSY;
		goto init_nic_err;
	}

8347 8348
	/* Enable nic hw error interrupts */
	ret = hclge_config_nic_hw_error(hdev, true);
8349
	if (ret) {
8350 8351
		dev_err(&ae_dev->pdev->dev,
			"fail(%d) to enable hw error interrupts\n", ret);
8352 8353 8354 8355
		goto init_nic_err;
	}

	hnae3_set_client_init_flag(client, ae_dev, 1);
8356

8357 8358 8359
	if (netif_msg_drv(&hdev->vport->nic))
		hclge_info_show(hdev);

8360
	return ret;
8361 8362 8363 8364 8365 8366 8367 8368 8369

init_nic_err:
	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
		msleep(HCLGE_WAIT_RESET_DONE);

	client->ops->uninit_instance(&vport->nic, 0);

	return ret;
8370 8371 8372 8373 8374 8375 8376
}

static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
					   struct hclge_vport *vport)
{
	struct hnae3_client *client = vport->roce.client;
	struct hclge_dev *hdev = ae_dev->priv;
8377
	int rst_cnt;
8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388
	int ret;

	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
	    !hdev->nic_client)
		return 0;

	client = hdev->roce_client;
	ret = hclge_init_roce_base_info(vport);
	if (ret)
		return ret;

8389
	rst_cnt = hdev->rst_stats.reset_cnt;
8390 8391 8392 8393 8394
	ret = client->ops->init_instance(&vport->roce);
	if (ret)
		return ret;

	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8395 8396 8397 8398 8399 8400
	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
	    rst_cnt != hdev->rst_stats.reset_cnt) {
		ret = -EBUSY;
		goto init_roce_err;
	}

8401 8402 8403 8404 8405 8406 8407 8408
	/* Enable roce ras interrupts */
	ret = hclge_config_rocee_ras_interrupt(hdev, true);
	if (ret) {
		dev_err(&ae_dev->pdev->dev,
			"fail(%d) to enable roce ras interrupts\n", ret);
		goto init_roce_err;
	}

8409 8410 8411
	hnae3_set_client_init_flag(client, ae_dev, 1);

	return 0;
8412 8413 8414 8415 8416 8417 8418 8419 8420

init_roce_err:
	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
		msleep(HCLGE_WAIT_RESET_DONE);

	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);

	return ret;
8421 8422
}

8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437
static int hclge_init_client_instance(struct hnae3_client *client,
				      struct hnae3_ae_dev *ae_dev)
{
	struct hclge_dev *hdev = ae_dev->priv;
	struct hclge_vport *vport;
	int i, ret;

	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
		vport = &hdev->vport[i];

		switch (client->type) {
		case HNAE3_CLIENT_KNIC:

			hdev->nic_client = client;
			vport->nic.client = client;
8438
			ret = hclge_init_nic_client_instance(ae_dev, vport);
8439
			if (ret)
8440
				goto clear_nic;
8441

8442 8443 8444
			ret = hclge_init_roce_client_instance(ae_dev, vport);
			if (ret)
				goto clear_roce;
8445 8446 8447

			break;
		case HNAE3_CLIENT_ROCE:
8448
			if (hnae3_dev_roce_supported(hdev)) {
8449 8450 8451 8452
				hdev->roce_client = client;
				vport->roce.client = client;
			}

8453 8454 8455
			ret = hclge_init_roce_client_instance(ae_dev, vport);
			if (ret)
				goto clear_roce;
8456 8457 8458 8459

			break;
		default:
			return -EINVAL;
8460 8461 8462
		}
	}

8463
	return ret;
8464 8465 8466 8467 8468 8469 8470 8471 8472

clear_nic:
	hdev->nic_client = NULL;
	vport->nic.client = NULL;
	return ret;
clear_roce:
	hdev->roce_client = NULL;
	vport->roce.client = NULL;
	return ret;
8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483
}

static void hclge_uninit_client_instance(struct hnae3_client *client,
					 struct hnae3_ae_dev *ae_dev)
{
	struct hclge_dev *hdev = ae_dev->priv;
	struct hclge_vport *vport;
	int i;

	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
		vport = &hdev->vport[i];
8484
		if (hdev->roce_client) {
8485
			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8486 8487 8488
			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
				msleep(HCLGE_WAIT_RESET_DONE);

8489 8490
			hdev->roce_client->ops->uninit_instance(&vport->roce,
								0);
8491 8492 8493
			hdev->roce_client = NULL;
			vport->roce.client = NULL;
		}
8494 8495
		if (client->type == HNAE3_CLIENT_ROCE)
			return;
8496
		if (hdev->nic_client && client->ops->uninit_instance) {
8497
			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8498 8499 8500
			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
				msleep(HCLGE_WAIT_RESET_DONE);

8501
			client->ops->uninit_instance(&vport->nic, 0);
8502 8503 8504
			hdev->nic_client = NULL;
			vport->nic.client = NULL;
		}
8505 8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516
	}
}

static int hclge_pci_init(struct hclge_dev *hdev)
{
	struct pci_dev *pdev = hdev->pdev;
	struct hclge_hw *hw;
	int ret;

	ret = pci_enable_device(pdev);
	if (ret) {
		dev_err(&pdev->dev, "failed to enable PCI device\n");
8517
		return ret;
8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545
	}

	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
	if (ret) {
		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
		if (ret) {
			dev_err(&pdev->dev,
				"can't set consistent PCI DMA");
			goto err_disable_device;
		}
		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
	}

	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
	if (ret) {
		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
		goto err_disable_device;
	}

	pci_set_master(pdev);
	hw = &hdev->hw;
	hw->io_base = pcim_iomap(pdev, 2, 0);
	if (!hw->io_base) {
		dev_err(&pdev->dev, "Can't map configuration register space\n");
		ret = -ENOMEM;
		goto err_clr_master;
	}

8546 8547
	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);

8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561
	return 0;
err_clr_master:
	pci_clear_master(pdev);
	pci_release_regions(pdev);
err_disable_device:
	pci_disable_device(pdev);

	return ret;
}

static void hclge_pci_uninit(struct hclge_dev *hdev)
{
	struct pci_dev *pdev = hdev->pdev;

8562
	pcim_iounmap(pdev, hdev->hw.io_base);
8563
	pci_free_irq_vectors(pdev);
8564 8565 8566 8567 8568
	pci_clear_master(pdev);
	pci_release_mem_regions(pdev);
	pci_disable_device(pdev);
}

8569 8570 8571 8572 8573 8574 8575 8576 8577 8578 8579 8580 8581
static void hclge_state_init(struct hclge_dev *hdev)
{
	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
	set_bit(HCLGE_STATE_DOWN, &hdev->state);
	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}

static void hclge_state_uninit(struct hclge_dev *hdev)
{
	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8582
	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8583

8584 8585
	if (hdev->reset_timer.function)
		del_timer_sync(&hdev->reset_timer);
8586 8587
	if (hdev->service_task.work.func)
		cancel_delayed_work_sync(&hdev->service_task);
8588 8589 8590 8591 8592 8593
	if (hdev->rst_service_task.func)
		cancel_work_sync(&hdev->rst_service_task);
	if (hdev->mbx_service_task.func)
		cancel_work_sync(&hdev->mbx_service_task);
}

8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621
static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
{
#define HCLGE_FLR_WAIT_MS	100
#define HCLGE_FLR_WAIT_CNT	50
	struct hclge_dev *hdev = ae_dev->priv;
	int cnt = 0;

	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
	hclge_reset_event(hdev->pdev, NULL);

	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
	       cnt++ < HCLGE_FLR_WAIT_CNT)
		msleep(HCLGE_FLR_WAIT_MS);

	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
		dev_err(&hdev->pdev->dev,
			"flr wait down timeout: %d\n", cnt);
}

static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
{
	struct hclge_dev *hdev = ae_dev->priv;

	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
}

8622 8623 8624 8625 8626 8627 8628 8629 8630 8631 8632 8633 8634 8635 8636 8637 8638
static void hclge_clear_resetting_state(struct hclge_dev *hdev)
{
	u16 i;

	for (i = 0; i < hdev->num_alloc_vport; i++) {
		struct hclge_vport *vport = &hdev->vport[i];
		int ret;

		 /* Send cmd to clear VF's FUNC_RST_ING */
		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
		if (ret)
			dev_warn(&hdev->pdev->dev,
				 "clear vf(%d) rst failed %d!\n",
				 vport->vport_id, ret);
	}
}

8639 8640 8641 8642 8643 8644 8645 8646 8647
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
	struct pci_dev *pdev = ae_dev->pdev;
	struct hclge_dev *hdev;
	int ret;

	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
	if (!hdev) {
		ret = -ENOMEM;
8648
		goto out;
8649 8650 8651 8652
	}

	hdev->pdev = pdev;
	hdev->ae_dev = ae_dev;
8653
	hdev->reset_type = HNAE3_NONE_RESET;
8654
	hdev->reset_level = HNAE3_FUNC_RESET;
8655
	ae_dev->priv = hdev;
8656
	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8657

8658
	mutex_init(&hdev->vport_lock);
8659
	mutex_init(&hdev->vport_cfg_mutex);
8660
	spin_lock_init(&hdev->fd_rule_lock);
8661

8662 8663 8664
	ret = hclge_pci_init(hdev);
	if (ret) {
		dev_err(&pdev->dev, "PCI init failed\n");
8665
		goto out;
8666 8667
	}

8668 8669 8670 8671
	/* Firmware command queue initialize */
	ret = hclge_cmd_queue_init(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8672
		goto err_pci_uninit;
8673 8674 8675
	}

	/* Firmware command initialize */
8676 8677
	ret = hclge_cmd_init(hdev);
	if (ret)
8678
		goto err_cmd_uninit;
8679 8680 8681

	ret = hclge_get_cap(hdev);
	if (ret) {
8682 8683
		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
			ret);
8684
		goto err_cmd_uninit;
8685 8686 8687 8688 8689
	}

	ret = hclge_configure(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8690
		goto err_cmd_uninit;
8691 8692
	}

8693
	ret = hclge_init_msi(hdev);
8694
	if (ret) {
8695
		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8696
		goto err_cmd_uninit;
8697 8698
	}

L
Lipeng 已提交
8699 8700 8701 8702 8703
	ret = hclge_misc_irq_init(hdev);
	if (ret) {
		dev_err(&pdev->dev,
			"Misc IRQ(vector0) init error, ret = %d.\n",
			ret);
8704
		goto err_msi_uninit;
L
Lipeng 已提交
8705 8706
	}

8707 8708 8709
	ret = hclge_alloc_tqps(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8710
		goto err_msi_irq_uninit;
8711 8712 8713 8714 8715
	}

	ret = hclge_alloc_vport(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8716
		goto err_msi_irq_uninit;
8717 8718
	}

8719 8720 8721
	ret = hclge_map_tqp(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8722
		goto err_msi_irq_uninit;
8723 8724
	}

8725 8726 8727 8728 8729
	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
		ret = hclge_mac_mdio_config(hdev);
		if (ret) {
			dev_err(&hdev->pdev->dev,
				"mdio config fail ret=%d\n", ret);
8730
			goto err_msi_irq_uninit;
8731
		}
8732 8733
	}

8734 8735 8736
	ret = hclge_init_umv_space(hdev);
	if (ret) {
		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8737
		goto err_mdiobus_unreg;
8738 8739
	}

8740 8741 8742
	ret = hclge_mac_init(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8743
		goto err_mdiobus_unreg;
8744 8745 8746 8747 8748
	}

	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
	if (ret) {
		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8749
		goto err_mdiobus_unreg;
8750 8751
	}

8752 8753 8754 8755
	ret = hclge_config_gro(hdev, true);
	if (ret)
		goto err_mdiobus_unreg;

8756 8757 8758
	ret = hclge_init_vlan_config(hdev);
	if (ret) {
		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8759
		goto err_mdiobus_unreg;
8760 8761 8762 8763 8764
	}

	ret = hclge_tm_schd_init(hdev);
	if (ret) {
		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8765
		goto err_mdiobus_unreg;
8766 8767
	}

8768
	hclge_rss_init_cfg(hdev);
8769 8770 8771
	ret = hclge_rss_init_hw(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8772
		goto err_mdiobus_unreg;
8773 8774
	}

8775 8776 8777
	ret = init_mgr_tbl(hdev);
	if (ret) {
		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8778
		goto err_mdiobus_unreg;
8779 8780
	}

8781 8782 8783 8784 8785 8786 8787
	ret = hclge_init_fd_config(hdev);
	if (ret) {
		dev_err(&pdev->dev,
			"fd table init fail, ret=%d\n", ret);
		goto err_mdiobus_unreg;
	}

8788 8789
	INIT_KFIFO(hdev->mac_tnl_log);

8790 8791
	hclge_dcb_ops_set(hdev);

8792
	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8793
	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
8794
	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8795
	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8796

8797
	hclge_clear_all_event_cause(hdev);
8798
	hclge_clear_resetting_state(hdev);
8799

8800 8801 8802
	/* Log and clear the hw errors those already occurred */
	hclge_handle_all_hns_hw_errors(ae_dev);

8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814
	/* request delayed reset for the error recovery because an immediate
	 * global reset on a PF affecting pending initialization of other PFs
	 */
	if (ae_dev->hw_err_reset_req) {
		enum hnae3_reset_type reset_level;

		reset_level = hclge_get_reset_level(ae_dev,
						    &ae_dev->hw_err_reset_req);
		hclge_set_def_reset_request(ae_dev, reset_level);
		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
	}

L
Lipeng 已提交
8815 8816 8817
	/* Enable MISC vector(vector0) */
	hclge_enable_vector(&hdev->misc_vector, true);

8818
	hclge_state_init(hdev);
8819
	hdev->last_reset_time = jiffies;
8820 8821 8822 8823

	pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
	return 0;

8824 8825 8826 8827 8828 8829 8830 8831
err_mdiobus_unreg:
	if (hdev->hw.mac.phydev)
		mdiobus_unregister(hdev->hw.mac.mdio_bus);
err_msi_irq_uninit:
	hclge_misc_irq_uninit(hdev);
err_msi_uninit:
	pci_free_irq_vectors(pdev);
err_cmd_uninit:
8832
	hclge_cmd_uninit(hdev);
8833
err_pci_uninit:
8834
	pcim_iounmap(pdev, hdev->hw.io_base);
8835
	pci_clear_master(pdev);
8836
	pci_release_regions(pdev);
8837 8838
	pci_disable_device(pdev);
out:
8839 8840 8841
	return ret;
}

8842 8843 8844 8845 8846
static void hclge_stats_clear(struct hclge_dev *hdev)
{
	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
}

8847 8848 8849 8850 8851 8852
static void hclge_reset_vport_state(struct hclge_dev *hdev)
{
	struct hclge_vport *vport = hdev->vport;
	int i;

	for (i = 0; i < hdev->num_alloc_vport; i++) {
8853
		hclge_vport_stop(vport);
8854 8855 8856 8857
		vport++;
	}
}

8858 8859 8860 8861 8862 8863 8864 8865
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
{
	struct hclge_dev *hdev = ae_dev->priv;
	struct pci_dev *pdev = ae_dev->pdev;
	int ret;

	set_bit(HCLGE_STATE_DOWN, &hdev->state);

8866
	hclge_stats_clear(hdev);
8867
	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8868
	memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8869

8870 8871 8872 8873 8874 8875 8876 8877 8878 8879 8880 8881
	ret = hclge_cmd_init(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Cmd queue init failed\n");
		return ret;
	}

	ret = hclge_map_tqp(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
		return ret;
	}

8882 8883
	hclge_reset_umv_space(hdev);

8884 8885 8886 8887 8888 8889 8890 8891 8892 8893 8894 8895
	ret = hclge_mac_init(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
		return ret;
	}

	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
	if (ret) {
		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
		return ret;
	}

8896 8897 8898 8899
	ret = hclge_config_gro(hdev, true);
	if (ret)
		return ret;

8900 8901 8902 8903 8904 8905
	ret = hclge_init_vlan_config(hdev);
	if (ret) {
		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
		return ret;
	}

8906
	ret = hclge_tm_init_hw(hdev, true);
8907
	if (ret) {
8908
		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8909 8910 8911 8912 8913 8914 8915 8916 8917
		return ret;
	}

	ret = hclge_rss_init_hw(hdev);
	if (ret) {
		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
		return ret;
	}

8918 8919
	ret = hclge_init_fd_config(hdev);
	if (ret) {
8920
		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
8921 8922 8923
		return ret;
	}

8924
	/* Re-enable the hw error interrupts because
8925
	 * the interrupts get disabled on global reset.
8926
	 */
8927
	ret = hclge_config_nic_hw_error(hdev, true);
8928 8929
	if (ret) {
		dev_err(&pdev->dev,
8930 8931
			"fail(%d) to re-enable NIC hw error interrupts\n",
			ret);
8932 8933
		return ret;
	}
8934

8935 8936 8937 8938 8939 8940 8941 8942 8943 8944
	if (hdev->roce_client) {
		ret = hclge_config_rocee_ras_interrupt(hdev, true);
		if (ret) {
			dev_err(&pdev->dev,
				"fail(%d) to re-enable roce ras interrupts\n",
				ret);
			return ret;
		}
	}

8945 8946
	hclge_reset_vport_state(hdev);

8947 8948 8949 8950 8951 8952
	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
		 HCLGE_DRIVER_NAME);

	return 0;
}

8953 8954 8955 8956 8957
static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
{
	struct hclge_dev *hdev = ae_dev->priv;
	struct hclge_mac *mac = &hdev->hw.mac;

8958
	hclge_state_uninit(hdev);
8959 8960 8961 8962

	if (mac->phydev)
		mdiobus_unregister(mac->mdio_bus);

8963 8964
	hclge_uninit_umv_space(hdev);

L
Lipeng 已提交
8965 8966
	/* Disable MISC vector(vector0) */
	hclge_enable_vector(&hdev->misc_vector, false);
8967 8968
	synchronize_irq(hdev->misc_vector.vector_irq);

8969
	/* Disable all hw interrupts */
8970
	hclge_config_mac_tnl_int(hdev, false);
8971 8972 8973
	hclge_config_nic_hw_error(hdev, false);
	hclge_config_rocee_ras_interrupt(hdev, false);

8974
	hclge_cmd_uninit(hdev);
8975
	hclge_misc_irq_uninit(hdev);
8976
	hclge_pci_uninit(hdev);
8977
	mutex_destroy(&hdev->vport_lock);
8978
	hclge_uninit_vport_mac_table(hdev);
L
liuzhongzhu 已提交
8979
	hclge_uninit_vport_vlan_table(hdev);
8980
	mutex_destroy(&hdev->vport_cfg_mutex);
8981 8982 8983
	ae_dev->priv = NULL;
}

8984 8985 8986 8987 8988 8989
static u32 hclge_get_max_channels(struct hnae3_handle *handle)
{
	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

8990 8991
	return min_t(u32, hdev->rss_size_max,
		     vport->alloc_tqps / kinfo->num_tc);
8992 8993 8994 8995 8996 8997 8998 8999
}

static void hclge_get_channels(struct hnae3_handle *handle,
			       struct ethtool_channels *ch)
{
	ch->max_combined = hclge_get_max_channels(handle);
	ch->other_count = 1;
	ch->max_other = 1;
9000
	ch->combined_count = handle->kinfo.rss_size;
9001 9002
}

9003
static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9004
					u16 *alloc_tqps, u16 *max_rss_size)
9005 9006 9007 9008
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

9009
	*alloc_tqps = vport->alloc_tqps;
9010 9011 9012
	*max_rss_size = hdev->rss_size_max;
}

9013 9014
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
			      bool rxfh_configured)
9015 9016 9017
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9018
	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9019
	struct hclge_dev *hdev = vport->back;
9020
	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9021 9022 9023 9024 9025
	int cur_rss_size = kinfo->rss_size;
	int cur_tqps = kinfo->num_tqps;
	u16 tc_valid[HCLGE_MAX_TC_NUM];
	u16 roundup_size;
	u32 *rss_indir;
9026 9027
	unsigned int i;
	int ret;
9028

9029
	kinfo->req_rss_size = new_tqps_num;
9030

9031
	ret = hclge_tm_vport_map_update(hdev);
9032
	if (ret) {
9033
		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9034 9035 9036 9037 9038 9039 9040 9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053
		return ret;
	}

	roundup_size = roundup_pow_of_two(kinfo->rss_size);
	roundup_size = ilog2(roundup_size);
	/* Set the RSS TC mode according to the new RSS size */
	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
		tc_valid[i] = 0;

		if (!(hdev->hw_tc_map & BIT(i)))
			continue;

		tc_valid[i] = 1;
		tc_size[i] = roundup_size;
		tc_offset[i] = kinfo->rss_size * i;
	}
	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
	if (ret)
		return ret;

9054 9055 9056 9057
	/* RSS indirection table has been configuared by user */
	if (rxfh_configured)
		goto out;

9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072
	/* Reinitializes the rss indirect table according to the new RSS size */
	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
	if (!rss_indir)
		return -ENOMEM;

	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
		rss_indir[i] = i % kinfo->rss_size;

	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
	if (ret)
		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
			ret);

	kfree(rss_indir);

9073
out:
9074 9075 9076 9077 9078 9079 9080 9081 9082
	if (!ret)
		dev_info(&hdev->pdev->dev,
			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
			 cur_rss_size, kinfo->rss_size,
			 cur_tqps, kinfo->rss_size * kinfo->num_tc);

	return ret;
}

9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093 9094 9095 9096 9097 9098 9099 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 9110 9111
static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
			      u32 *regs_num_64_bit)
{
	struct hclge_desc desc;
	u32 total_num;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"Query register number cmd failed, ret = %d.\n", ret);
		return ret;
	}

	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
	*regs_num_64_bit = le32_to_cpu(desc.data[1]);

	total_num = *regs_num_32_bit + *regs_num_64_bit;
	if (!total_num)
		return -EINVAL;

	return 0;
}

static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
				 void *data)
{
#define HCLGE_32_BIT_REG_RTN_DATANUM 8
9112
#define HCLGE_32_BIT_DESC_NODATA_LEN 2
9113 9114 9115 9116

	struct hclge_desc *desc;
	u32 *reg_val = data;
	__le32 *desc_data;
9117
	int nodata_num;
9118 9119 9120 9121 9122 9123 9124
	int cmd_num;
	int i, k, n;
	int ret;

	if (regs_num == 0)
		return 0;

9125 9126 9127
	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
			       HCLGE_32_BIT_REG_RTN_DATANUM);
9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140 9141 9142 9143
	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
	if (!desc)
		return -ENOMEM;

	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"Query 32 bit register cmd failed, ret = %d.\n", ret);
		kfree(desc);
		return ret;
	}

	for (i = 0; i < cmd_num; i++) {
		if (i == 0) {
			desc_data = (__le32 *)(&desc[i].data[0]);
9144
			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9145 9146 9147 9148 9149 9150 9151 9152 9153 9154 9155 9156 9157 9158 9159 9160 9161 9162 9163 9164 9165
		} else {
			desc_data = (__le32 *)(&desc[i]);
			n = HCLGE_32_BIT_REG_RTN_DATANUM;
		}
		for (k = 0; k < n; k++) {
			*reg_val++ = le32_to_cpu(*desc_data++);

			regs_num--;
			if (!regs_num)
				break;
		}
	}

	kfree(desc);
	return 0;
}

static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
				 void *data)
{
#define HCLGE_64_BIT_REG_RTN_DATANUM 4
9166
#define HCLGE_64_BIT_DESC_NODATA_LEN 1
9167 9168 9169 9170

	struct hclge_desc *desc;
	u64 *reg_val = data;
	__le64 *desc_data;
9171
	int nodata_len;
9172 9173 9174 9175 9176 9177 9178
	int cmd_num;
	int i, k, n;
	int ret;

	if (regs_num == 0)
		return 0;

9179 9180 9181
	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
			       HCLGE_64_BIT_REG_RTN_DATANUM);
9182 9183 9184 9185 9186 9187 9188 9189 9190 9191 9192 9193 9194 9195 9196 9197
	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
	if (!desc)
		return -ENOMEM;

	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"Query 64 bit register cmd failed, ret = %d.\n", ret);
		kfree(desc);
		return ret;
	}

	for (i = 0; i < cmd_num; i++) {
		if (i == 0) {
			desc_data = (__le64 *)(&desc[i].data[0]);
9198
			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215
		} else {
			desc_data = (__le64 *)(&desc[i]);
			n = HCLGE_64_BIT_REG_RTN_DATANUM;
		}
		for (k = 0; k < n; k++) {
			*reg_val++ = le64_to_cpu(*desc_data++);

			regs_num--;
			if (!regs_num)
				break;
		}
	}

	kfree(desc);
	return 0;
}

9216 9217 9218 9219 9220
#define MAX_SEPARATE_NUM	4
#define SEPARATOR_VALUE		0xFFFFFFFF
#define REG_NUM_PER_LINE	4
#define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))

9221 9222
static int hclge_get_regs_len(struct hnae3_handle *handle)
{
9223 9224
	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9225 9226 9227 9228 9229 9230 9231 9232 9233 9234 9235 9236
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	u32 regs_num_32_bit, regs_num_64_bit;
	int ret;

	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"Get register number failed, ret = %d.\n", ret);
		return -EOPNOTSUPP;
	}

9237 9238 9239 9240 9241 9242 9243 9244
	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;

	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
		tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
		regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9245 9246 9247 9248 9249
}

static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
			   void *data)
{
9250
	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9251 9252 9253
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	u32 regs_num_32_bit, regs_num_64_bit;
9254 9255
	int i, j, reg_um, separator_num;
	u32 *reg = data;
9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266
	int ret;

	*version = hdev->fw_version;

	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"Get register number failed, ret = %d.\n", ret);
		return;
	}

9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305
	/* fetching per-PF registers valus from PF PCIe register space */
	reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
	for (i = 0; i < reg_um; i++)
		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
	for (i = 0; i < separator_num; i++)
		*reg++ = SEPARATOR_VALUE;

	reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
	for (i = 0; i < reg_um; i++)
		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
	for (i = 0; i < separator_num; i++)
		*reg++ = SEPARATOR_VALUE;

	reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
	for (j = 0; j < kinfo->num_tqps; j++) {
		for (i = 0; i < reg_um; i++)
			*reg++ = hclge_read_dev(&hdev->hw,
						ring_reg_addr_list[i] +
						0x200 * j);
		for (i = 0; i < separator_num; i++)
			*reg++ = SEPARATOR_VALUE;
	}

	reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
	for (j = 0; j < hdev->num_msi_used - 1; j++) {
		for (i = 0; i < reg_um; i++)
			*reg++ = hclge_read_dev(&hdev->hw,
						tqp_intr_reg_addr_list[i] +
						4 * j);
		for (i = 0; i < separator_num; i++)
			*reg++ = SEPARATOR_VALUE;
	}

	/* fetching PF common registers values from firmware */
	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9306 9307 9308 9309 9310 9311
	if (ret) {
		dev_err(&hdev->pdev->dev,
			"Get 32 bit register failed, ret = %d.\n", ret);
		return;
	}

9312 9313
	reg += regs_num_32_bit;
	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9314 9315 9316 9317 9318
	if (ret)
		dev_err(&hdev->pdev->dev,
			"Get 64 bit register failed, ret = %d.\n", ret);
}

9319
static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9320 9321 9322 9323 9324 9325 9326 9327
{
	struct hclge_set_led_state_cmd *req;
	struct hclge_desc desc;
	int ret;

	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);

	req = (struct hclge_set_led_state_cmd *)desc.data;
P
Peng Li 已提交
9328 9329
	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352

	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
	if (ret)
		dev_err(&hdev->pdev->dev,
			"Send set led state cmd error, ret =%d\n", ret);

	return ret;
}

enum hclge_led_status {
	HCLGE_LED_OFF,
	HCLGE_LED_ON,
	HCLGE_LED_NO_CHANGE = 0xFF,
};

static int hclge_set_led_id(struct hnae3_handle *handle,
			    enum ethtool_phys_id_state status)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	switch (status) {
	case ETHTOOL_ID_ACTIVE:
9353
		return hclge_set_led_status(hdev, HCLGE_LED_ON);
9354
	case ETHTOOL_ID_INACTIVE:
9355
		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9356
	default:
9357
		return -EINVAL;
9358 9359 9360
	}
}

9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375
static void hclge_get_link_mode(struct hnae3_handle *handle,
				unsigned long *supported,
				unsigned long *advertising)
{
	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;
	unsigned int idx = 0;

	for (; idx < size; idx++) {
		supported[idx] = hdev->hw.mac.supported[idx];
		advertising[idx] = hdev->hw.mac.advertising[idx];
	}
}

9376
static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9377 9378 9379 9380 9381 9382 9383
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_dev *hdev = vport->back;

	return hclge_config_gro(hdev, enable);
}

9384 9385 9386
static const struct hnae3_ae_ops hclge_ops = {
	.init_ae_dev = hclge_init_ae_dev,
	.uninit_ae_dev = hclge_uninit_ae_dev,
9387 9388
	.flr_prepare = hclge_flr_prepare,
	.flr_done = hclge_flr_done,
9389 9390
	.init_client_instance = hclge_init_client_instance,
	.uninit_client_instance = hclge_uninit_client_instance,
9391 9392
	.map_ring_to_vector = hclge_map_ring_to_vector,
	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9393
	.get_vector = hclge_get_vector,
9394
	.put_vector = hclge_put_vector,
9395
	.set_promisc_mode = hclge_set_promisc_mode,
9396
	.set_loopback = hclge_set_loopback,
9397 9398
	.start = hclge_ae_start,
	.stop = hclge_ae_stop,
9399 9400
	.client_start = hclge_client_start,
	.client_stop = hclge_client_stop,
9401 9402 9403 9404
	.get_status = hclge_get_status,
	.get_ksettings_an_result = hclge_get_ksettings_an_result,
	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
	.get_media_type = hclge_get_media_type,
9405
	.check_port_speed = hclge_check_port_speed,
9406 9407
	.get_fec = hclge_get_fec,
	.set_fec = hclge_set_fec,
9408 9409 9410 9411
	.get_rss_key_size = hclge_get_rss_key_size,
	.get_rss_indir_size = hclge_get_rss_indir_size,
	.get_rss = hclge_get_rss,
	.set_rss = hclge_set_rss,
L
Lipeng 已提交
9412
	.set_rss_tuple = hclge_set_rss_tuple,
L
Lipeng 已提交
9413
	.get_rss_tuple = hclge_get_rss_tuple,
9414 9415 9416
	.get_tc_size = hclge_get_tc_size,
	.get_mac_addr = hclge_get_mac_addr,
	.set_mac_addr = hclge_set_mac_addr,
9417
	.do_ioctl = hclge_do_ioctl,
9418 9419 9420 9421 9422 9423
	.add_uc_addr = hclge_add_uc_addr,
	.rm_uc_addr = hclge_rm_uc_addr,
	.add_mc_addr = hclge_add_mc_addr,
	.rm_mc_addr = hclge_rm_mc_addr,
	.set_autoneg = hclge_set_autoneg,
	.get_autoneg = hclge_get_autoneg,
9424
	.restart_autoneg = hclge_restart_autoneg,
9425
	.halt_autoneg = hclge_halt_autoneg,
9426
	.get_pauseparam = hclge_get_pauseparam,
9427
	.set_pauseparam = hclge_set_pauseparam,
9428 9429 9430
	.set_mtu = hclge_set_mtu,
	.reset_queue = hclge_reset_tqp,
	.get_stats = hclge_get_stats,
9431
	.get_mac_pause_stats = hclge_get_mac_pause_stat,
9432 9433 9434 9435 9436
	.update_stats = hclge_update_stats,
	.get_strings = hclge_get_strings,
	.get_sset_count = hclge_get_sset_count,
	.get_fw_version = hclge_get_fw_version,
	.get_mdix_mode = hclge_get_mdix_mode,
9437
	.enable_vlan_filter = hclge_enable_vlan_filter,
9438
	.set_vlan_filter = hclge_set_vlan_filter,
9439
	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9440
	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9441
	.reset_event = hclge_reset_event,
9442
	.get_reset_level = hclge_get_reset_level,
9443
	.set_default_reset_request = hclge_set_def_reset_request,
9444 9445
	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
	.set_channels = hclge_set_channels,
9446
	.get_channels = hclge_get_channels,
9447 9448
	.get_regs_len = hclge_get_regs_len,
	.get_regs = hclge_get_regs,
9449
	.set_led_id = hclge_set_led_id,
9450
	.get_link_mode = hclge_get_link_mode,
9451 9452
	.add_fd_entry = hclge_add_fd_entry,
	.del_fd_entry = hclge_del_fd_entry,
9453
	.del_all_fd_entries = hclge_del_all_fd_entries,
9454 9455 9456
	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
	.get_fd_rule_info = hclge_get_fd_rule_info,
	.get_fd_all_rules = hclge_get_all_rules,
9457
	.restore_fd_rules = hclge_restore_fd_entries,
9458
	.enable_fd = hclge_enable_fd,
J
Jian Shen 已提交
9459
	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
9460
	.dbg_run_cmd = hclge_dbg_run_cmd,
9461
	.handle_hw_ras_error = hclge_handle_hw_ras_error,
9462 9463 9464
	.get_hw_reset_stat = hclge_get_hw_reset_stat,
	.ae_dev_resetting = hclge_ae_dev_resetting,
	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9465
	.set_gro_en = hclge_gro_en,
9466
	.get_global_queue_id = hclge_covert_handle_qid_global,
9467
	.set_timer_task = hclge_set_timer_task,
9468 9469
	.mac_connect_phy = hclge_mac_connect_phy,
	.mac_disconnect_phy = hclge_mac_disconnect_phy,
9470
	.restore_vlan_table = hclge_restore_vlan_table,
9471 9472 9473 9474 9475 9476 9477 9478 9479 9480 9481
};

static struct hnae3_ae_algo ae_algo = {
	.ops = &hclge_ops,
	.pdev_id_table = ae_algo_pci_tbl,
};

static int hclge_init(void)
{
	pr_info("%s is initializing\n", HCLGE_NAME);

9482 9483 9484
	hnae3_register_ae_algo(&ae_algo);

	return 0;
9485 9486 9487 9488 9489 9490 9491 9492 9493 9494 9495 9496 9497
}

static void hclge_exit(void)
{
	hnae3_unregister_ae_algo(&ae_algo);
}
module_init(hclge_init);
module_exit(hclge_exit);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_DESCRIPTION("HCLGE Driver");
MODULE_VERSION(HCLGE_MOD_VERSION);