hns3_enet.c 123.5 KB
Newer Older
1 2
// SPDX-License-Identifier: GPL-2.0+
// Copyright (c) 2016-2017 Hisilicon Limited.
3 4 5 6

#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
7 8 9
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
10
#include <linux/if_vlan.h>
11
#include <linux/irq.h>
12 13 14 15
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/module.h>
#include <linux/pci.h>
16
#include <linux/aer.h>
17 18 19
#include <linux/skbuff.h>
#include <linux/sctp.h>
#include <net/gre.h>
20
#include <net/ip6_checksum.h>
21
#include <net/pkt_cls.h>
22
#include <net/tcp.h>
23
#include <net/vxlan.h>
24
#include <net/geneve.h>
25 26 27

#include "hnae3.h"
#include "hns3_enet.h"
28 29 30 31 32 33
/* All hns3 tracepoints are defined by the include below, which
 * must be included exactly once across the whole kernel with
 * CREATE_TRACE_POINTS defined
 */
#define CREATE_TRACE_POINTS
#include "hns3_trace.h"
34

35
#define hns3_set_field(origin, shift, val)	((origin) |= ((val) << (shift)))
36
#define hns3_tx_bd_count(S)	DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
37

38 39 40 41 42 43
#define hns3_rl_err(fmt, ...)						\
	do {								\
		if (net_ratelimit())					\
			netdev_err(fmt, ##__VA_ARGS__);			\
	} while (0)

44
static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
45

46
static const char hns3_driver_name[] = "hns3";
47 48 49 50 51
static const char hns3_driver_string[] =
			"Hisilicon Ethernet Network Driver for Hip08 Family";
static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
static struct hnae3_client client;

52 53 54 55 56 57 58
static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, " Network interface message level setting");

#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
			   NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)

59 60 61
#define HNS3_INNER_VLAN_TAG	1
#define HNS3_OUTER_VLAN_TAG	2

62 63
#define HNS3_MIN_TX_LEN		33U

64 65 66 67 68 69 70 71 72 73
/* hns3_pci_tbl - PCI Device ID Table
 *
 * Last entry must be all 0s
 *
 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
 *   Class, Class Mask, private data (not used) }
 */
static const struct pci_device_id hns3_pci_tbl[] = {
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
74
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
75
	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
76
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
77
	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
78
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
79
	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
80
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
81
	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
82
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
83
	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
84 85
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA),
	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
86 87
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
88
	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
89 90 91 92 93
	/* required last entry */
	{0, }
};
MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);

94
static irqreturn_t hns3_irq_handle(int irq, void *vector)
95
{
96
	struct hns3_enet_tqp_vector *tqp_vector = vector;
97

98
	napi_schedule_irqoff(&tqp_vector->napi);
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113

	return IRQ_HANDLED;
}

static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
{
	struct hns3_enet_tqp_vector *tqp_vectors;
	unsigned int i;

	for (i = 0; i < priv->vector_num; i++) {
		tqp_vectors = &priv->tqp_vector[i];

		if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
			continue;

114
		/* clear the affinity mask */
P
Peng Li 已提交
115 116
		irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
		/* release the irq resource */
		free_irq(tqp_vectors->vector_irq, tqp_vectors);
		tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
	}
}

static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
{
	struct hns3_enet_tqp_vector *tqp_vectors;
	int txrx_int_idx = 0;
	int rx_int_idx = 0;
	int tx_int_idx = 0;
	unsigned int i;
	int ret;

	for (i = 0; i < priv->vector_num; i++) {
		tqp_vectors = &priv->tqp_vector[i];

		if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
			continue;

		if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
139 140 141 142
			snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
				 "%s-%s-%s-%d", hns3_driver_name,
				 pci_name(priv->ae_handle->pdev),
				 "TxRx", txrx_int_idx++);
143 144
			txrx_int_idx++;
		} else if (tqp_vectors->rx_group.ring) {
145 146 147 148
			snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
				 "%s-%s-%s-%d", hns3_driver_name,
				 pci_name(priv->ae_handle->pdev),
				 "Rx", rx_int_idx++);
149
		} else if (tqp_vectors->tx_group.ring) {
150 151 152 153
			snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
				 "%s-%s-%s-%d", hns3_driver_name,
				 pci_name(priv->ae_handle->pdev),
				 "Tx", tx_int_idx++);
154 155 156 157 158 159 160
		} else {
			/* Skip this unused q_vector */
			continue;
		}

		tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';

161
		irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN);
162
		ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
163
				  tqp_vectors->name, tqp_vectors);
164 165 166
		if (ret) {
			netdev_err(priv->netdev, "request irq(%d) fail\n",
				   tqp_vectors->vector_irq);
167
			hns3_nic_uninit_irq(priv);
168 169 170
			return ret;
		}

P
Peng Li 已提交
171 172 173
		irq_set_affinity_hint(tqp_vectors->vector_irq,
				      &tqp_vectors->affinity_mask);

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
		tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
	}

	return 0;
}

static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
				 u32 mask_en)
{
	writel(mask_en, tqp_vector->mask_addr);
}

static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
{
	napi_enable(&tqp_vector->napi);
189
	enable_irq(tqp_vector->vector_irq);
190 191 192 193 194 195 196 197 198 199 200 201 202 203

	/* enable vector */
	hns3_mask_vector_irq(tqp_vector, 1);
}

static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
{
	/* disable vector */
	hns3_mask_vector_irq(tqp_vector, 0);

	disable_irq(tqp_vector->vector_irq);
	napi_disable(&tqp_vector->napi);
}

204 205
void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
				 u32 rl_value)
206
{
207 208
	u32 rl_reg = hns3_rl_usec_to_reg(rl_value);

209 210 211 212
	/* this defines the configuration for RL (Interrupt Rate Limiter).
	 * Rl defines rate of interrupts i.e. number of interrupts-per-second
	 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
	 */
213

214 215
	if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable &&
	    !tqp_vector->rx_group.coal.adapt_enable)
216 217 218 219 220 221 222 223 224 225 226
		/* According to the hardware, the range of rl_reg is
		 * 0-59 and the unit is 4.
		 */
		rl_reg |=  HNS3_INT_RL_ENABLE_MASK;

	writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
}

void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
				    u32 gl_value)
{
227
	u32 new_val;
228

229 230 231 232 233 234
	if (tqp_vector->rx_group.coal.unit_1us)
		new_val = gl_value | HNS3_INT_GL_1US;
	else
		new_val = hns3_gl_usec_to_reg(gl_value);

	writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
235 236 237 238 239
}

void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
				    u32 gl_value)
{
240 241 242 243 244 245
	u32 new_val;

	if (tqp_vector->tx_group.coal.unit_1us)
		new_val = gl_value | HNS3_INT_GL_1US;
	else
		new_val = hns3_gl_usec_to_reg(gl_value);
246

247
	writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
248 249
}

250 251
void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
				    u32 ql_value)
252
{
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
	writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET);
}

void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
				    u32 ql_value)
{
	writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET);
}

static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
				      struct hns3_nic_priv *priv)
{
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
	struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
	struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;

269 270 271
	/* initialize the configuration for interrupt coalescing.
	 * 1. GL (Interrupt Gap Limiter)
	 * 2. RL (Interrupt Rate Limiter)
272
	 * 3. QL (Interrupt Quantity Limiter)
G
Guojia Liao 已提交
273 274
	 *
	 * Default: enable interrupt coalescing self-adaptive and GL
275
	 */
276 277
	tx_coal->adapt_enable = 1;
	rx_coal->adapt_enable = 1;
278 279 280

	tx_coal->int_gl = HNS3_INT_GL_50K;
	rx_coal->int_gl = HNS3_INT_GL_50K;
281

282 283
	rx_coal->flow_level = HNS3_FLOW_LOW;
	tx_coal->flow_level = HNS3_FLOW_LOW;
284

285 286 287 288 289 290 291 292
	/* device version above V3(include V3), GL can configure 1us
	 * unit, so uses 1us unit.
	 */
	if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
		tx_coal->unit_1us = 1;
		rx_coal->unit_1us = 1;
	}

293 294 295 296 297 298 299 300
	if (ae_dev->dev_specs.int_ql_max) {
		tx_coal->ql_enable = 1;
		rx_coal->ql_enable = 1;
		tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
		rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
		tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
		rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
	}
301 302
}

303 304 305
static void
hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
			     struct hns3_nic_priv *priv)
306
{
307 308
	struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
	struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
309 310
	struct hnae3_handle *h = priv->ae_handle;

311 312
	hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl);
	hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl);
313
	hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
314 315 316 317 318 319

	if (tx_coal->ql_enable)
		hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql);

	if (rx_coal->ql_enable)
		hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql);
320 321
}

322 323
static int hns3_nic_set_real_num_queue(struct net_device *netdev)
{
324
	struct hnae3_handle *h = hns3_get_handle(netdev);
325
	struct hnae3_knic_private_info *kinfo = &h->kinfo;
326
	struct hnae3_tc_info *tc_info = &kinfo->tc_info;
327
	unsigned int queue_size = kinfo->num_tqps;
328 329
	int i, ret;

330
	if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) {
331 332
		netdev_reset_tc(netdev);
	} else {
333
		ret = netdev_set_num_tc(netdev, tc_info->num_tc);
334 335 336 337 338 339 340
		if (ret) {
			netdev_err(netdev,
				   "netdev_set_num_tc fail, ret=%d!\n", ret);
			return ret;
		}

		for (i = 0; i < HNAE3_MAX_TC; i++) {
341
			if (!test_bit(i, &tc_info->tc_en))
342 343
				continue;

344 345
			netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],
					    tc_info->tqp_offset[i]);
346 347
		}
	}
348 349 350 351

	ret = netif_set_real_num_tx_queues(netdev, queue_size);
	if (ret) {
		netdev_err(netdev,
352
			   "netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
353 354 355 356 357 358 359 360 361 362 363 364 365
		return ret;
	}

	ret = netif_set_real_num_rx_queues(netdev, queue_size);
	if (ret) {
		netdev_err(netdev,
			   "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
		return ret;
	}

	return 0;
}

366 367
static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
{
368
	u16 alloc_tqps, max_rss_size, rss_size;
369

370
	h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
371
	rss_size = alloc_tqps / h->kinfo.tc_info.num_tc;
372

373
	return min_t(u16, rss_size, max_rss_size);
374 375
}

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
static void hns3_tqp_enable(struct hnae3_queue *tqp)
{
	u32 rcb_reg;

	rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
	rcb_reg |= BIT(HNS3_RING_EN_B);
	hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
}

static void hns3_tqp_disable(struct hnae3_queue *tqp)
{
	u32 rcb_reg;

	rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
	rcb_reg &= ~BIT(HNS3_RING_EN_B);
	hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
}

394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
{
#ifdef CONFIG_RFS_ACCEL
	free_irq_cpu_rmap(netdev->rx_cpu_rmap);
	netdev->rx_cpu_rmap = NULL;
#endif
}

static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
{
#ifdef CONFIG_RFS_ACCEL
	struct hns3_nic_priv *priv = netdev_priv(netdev);
	struct hns3_enet_tqp_vector *tqp_vector;
	int i, ret;

	if (!netdev->rx_cpu_rmap) {
		netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
		if (!netdev->rx_cpu_rmap)
			return -ENOMEM;
	}

	for (i = 0; i < priv->vector_num; i++) {
		tqp_vector = &priv->tqp_vector[i];
		ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
				       tqp_vector->vector_irq);
		if (ret) {
			hns3_free_rx_cpu_rmap(netdev);
			return ret;
		}
	}
#endif
	return 0;
}

428 429 430 431 432 433 434
static int hns3_nic_net_up(struct net_device *netdev)
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);
	struct hnae3_handle *h = priv->ae_handle;
	int i, j;
	int ret;

435 436 437 438
	ret = hns3_nic_reset_all_ring(h);
	if (ret)
		return ret;

439 440
	clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);

441 442 443 444
	/* enable the vectors */
	for (i = 0; i < priv->vector_num; i++)
		hns3_vector_enable(&priv->tqp_vector[i]);

445 446 447 448
	/* enable rcb */
	for (j = 0; j < h->kinfo.num_tqps; j++)
		hns3_tqp_enable(h->kinfo.tqp[j]);

449 450
	/* start the ae_dev */
	ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
451 452 453 454
	if (ret) {
		set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
		while (j--)
			hns3_tqp_disable(h->kinfo.tqp[j]);
455

456 457 458
		for (j = i - 1; j >= 0; j--)
			hns3_vector_disable(&priv->tqp_vector[j]);
	}
459 460 461 462

	return ret;
}

463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
static void hns3_config_xps(struct hns3_nic_priv *priv)
{
	int i;

	for (i = 0; i < priv->vector_num; i++) {
		struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
		struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;

		while (ring) {
			int ret;

			ret = netif_set_xps_queue(priv->netdev,
						  &tqp_vector->affinity_mask,
						  ring->tqp->tqp_index);
			if (ret)
				netdev_warn(priv->netdev,
					    "set xps queue failed: %d", ret);

			ring = ring->next;
		}
	}
}

486 487
static int hns3_nic_net_open(struct net_device *netdev)
{
488
	struct hns3_nic_priv *priv = netdev_priv(netdev);
489 490 491
	struct hnae3_handle *h = hns3_get_handle(netdev);
	struct hnae3_knic_private_info *kinfo;
	int i, ret;
492

493 494 495
	if (hns3_nic_resetting(netdev))
		return -EBUSY;

496 497
	netif_carrier_off(netdev);

498 499
	ret = hns3_nic_set_real_num_queue(netdev);
	if (ret)
500 501 502 503
		return ret;

	ret = hns3_nic_net_up(netdev);
	if (ret) {
504
		netdev_err(netdev, "net up fail, ret=%d!\n", ret);
505 506 507
		return ret;
	}

508
	kinfo = &h->kinfo;
509
	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
510
		netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]);
511

512 513 514
	if (h->ae_algo->ops->set_timer_task)
		h->ae_algo->ops->set_timer_task(priv->ae_handle, true);

515
	hns3_config_xps(priv);
516 517 518

	netif_dbg(h, drv, netdev, "net open\n");

519 520 521
	return 0;
}

522 523 524 525 526 527 528 529 530
static void hns3_reset_tx_queue(struct hnae3_handle *h)
{
	struct net_device *ndev = h->kinfo.netdev;
	struct hns3_nic_priv *priv = netdev_priv(ndev);
	struct netdev_queue *dev_queue;
	u32 i;

	for (i = 0; i < h->kinfo.num_tqps; i++) {
		dev_queue = netdev_get_tx_queue(ndev,
531
						priv->ring[i].queue_index);
532 533 534 535
		netdev_tx_reset_queue(dev_queue);
	}
}

536 537 538
static void hns3_nic_net_down(struct net_device *netdev)
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);
539
	struct hnae3_handle *h = hns3_get_handle(netdev);
540 541 542
	const struct hnae3_ae_ops *ops;
	int i;

543 544 545
	/* disable vectors */
	for (i = 0; i < priv->vector_num; i++)
		hns3_vector_disable(&priv->tqp_vector[i]);
546 547 548 549

	/* disable rcb */
	for (i = 0; i < h->kinfo.num_tqps; i++)
		hns3_tqp_disable(h->kinfo.tqp[i]);
550

551 552 553 554 555
	/* stop ae_dev */
	ops = priv->ae_handle->ae_algo->ops;
	if (ops->stop)
		ops->stop(priv->ae_handle);

556 557 558 559 560
	/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
	 * during reset process, because driver may not be able
	 * to disable the ring through firmware when downing the netdev.
	 */
	if (!hns3_nic_resetting(netdev))
561 562 563
		hns3_clear_all_ring(priv->ae_handle, false);

	hns3_reset_tx_queue(priv->ae_handle);
564 565 566 567
}

static int hns3_nic_net_stop(struct net_device *netdev)
{
568
	struct hns3_nic_priv *priv = netdev_priv(netdev);
569
	struct hnae3_handle *h = hns3_get_handle(netdev);
570 571 572 573

	if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
		return 0;

574 575
	netif_dbg(h, drv, netdev, "net stop\n");

576 577 578
	if (h->ae_algo->ops->set_timer_task)
		h->ae_algo->ops->set_timer_task(priv->ae_handle, false);

579 580 581 582 583 584 585 586 587 588 589
	netif_tx_stop_all_queues(netdev);
	netif_carrier_off(netdev);

	hns3_nic_net_down(netdev);

	return 0;
}

static int hns3_nic_uc_sync(struct net_device *netdev,
			    const unsigned char *addr)
{
590
	struct hnae3_handle *h = hns3_get_handle(netdev);
591 592 593 594 595 596 597 598 599 600

	if (h->ae_algo->ops->add_uc_addr)
		return h->ae_algo->ops->add_uc_addr(h, addr);

	return 0;
}

static int hns3_nic_uc_unsync(struct net_device *netdev,
			      const unsigned char *addr)
{
601
	struct hnae3_handle *h = hns3_get_handle(netdev);
602

603 604 605 606 607 608 609
	/* need ignore the request of removing device address, because
	 * we store the device address and other addresses of uc list
	 * in the function's mac filter list.
	 */
	if (ether_addr_equal(addr, netdev->dev_addr))
		return 0;

610 611 612 613 614 615 616 617 618
	if (h->ae_algo->ops->rm_uc_addr)
		return h->ae_algo->ops->rm_uc_addr(h, addr);

	return 0;
}

static int hns3_nic_mc_sync(struct net_device *netdev,
			    const unsigned char *addr)
{
619
	struct hnae3_handle *h = hns3_get_handle(netdev);
620

621
	if (h->ae_algo->ops->add_mc_addr)
622 623 624 625 626 627 628 629
		return h->ae_algo->ops->add_mc_addr(h, addr);

	return 0;
}

static int hns3_nic_mc_unsync(struct net_device *netdev,
			      const unsigned char *addr)
{
630
	struct hnae3_handle *h = hns3_get_handle(netdev);
631

632
	if (h->ae_algo->ops->rm_mc_addr)
633 634 635 636 637
		return h->ae_algo->ops->rm_mc_addr(h, addr);

	return 0;
}

638 639 640 641 642
static u8 hns3_get_netdev_flags(struct net_device *netdev)
{
	u8 flags = 0;

	if (netdev->flags & IFF_PROMISC) {
643
		flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
644 645 646 647 648 649 650 651 652
	} else {
		flags |= HNAE3_VLAN_FLTR;
		if (netdev->flags & IFF_ALLMULTI)
			flags |= HNAE3_USER_MPE;
	}

	return flags;
}

653
static void hns3_nic_set_rx_mode(struct net_device *netdev)
654
{
655
	struct hnae3_handle *h = hns3_get_handle(netdev);
656
	u8 new_flags;
657

658 659
	new_flags = hns3_get_netdev_flags(netdev);

660 661
	__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
	__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync);
662 663

	/* User mode Promisc mode enable and vlan filtering is disabled to
664
	 * let all packets in.
665 666
	 */
	h->netdev_flags = new_flags;
667 668 669 670 671 672 673 674 675
	hns3_request_update_promisc_mode(h);
}

void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
{
	const struct hnae3_ae_ops *ops = handle->ae_algo->ops;

	if (ops->request_update_promisc_mode)
		ops->request_update_promisc_mode(handle);
676 677 678 679 680 681
}

void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);
	struct hnae3_handle *h = priv->ae_handle;
682
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
683 684
	bool last_state;

685 686
	if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 &&
	    h->ae_algo->ops->enable_vlan_filter) {
687 688 689 690 691 692 693
		last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
		if (enable != last_state) {
			netdev_info(netdev,
				    "%s vlan filter\n",
				    enable ? "enable" : "disable");
			h->ae_algo->ops->enable_vlan_filter(h, enable);
		}
694
	}
695 696
}

697
static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
698 699 700 701 702 703 704 705 706 707 708 709
			u16 *mss, u32 *type_cs_vlan_tso)
{
	u32 l4_offset, hdr_len;
	union l3_hdr_info l3;
	union l4_hdr_info l4;
	u32 l4_paylen;
	int ret;

	if (!skb_is_gso(skb))
		return 0;

	ret = skb_cow_head(skb, 0);
710
	if (unlikely(ret < 0))
711 712 713 714 715 716 717 718 719 720 721
		return ret;

	l3.hdr = skb_network_header(skb);
	l4.hdr = skb_transport_header(skb);

	/* Software should clear the IPv4's checksum field when tso is
	 * needed.
	 */
	if (l3.v4->version == 4)
		l3.v4->check = 0;

722
	/* tunnel packet */
723 724
	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
					 SKB_GSO_GRE_CSUM |
725 726
					 SKB_GSO_UDP_TUNNEL |
					 SKB_GSO_UDP_TUNNEL_CSUM)) {
727 728 729 730 731 732 733 734 735 736 737
		/* reset l3&l4 pointers from outer to inner headers */
		l3.hdr = skb_inner_network_header(skb);
		l4.hdr = skb_inner_transport_header(skb);

		/* Software should clear the IPv4's checksum field when
		 * tso is needed.
		 */
		if (l3.v4->version == 4)
			l3.v4->check = 0;
	}

738
	/* normal or tunnel packet */
739 740
	l4_offset = l4.hdr - skb->data;

741
	/* remove payload length from inner pseudo checksum when tso */
742
	l4_paylen = skb->len - l4_offset;
743 744 745 746 747 748 749 750 751 752

	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
		hdr_len = sizeof(*l4.udp) + l4_offset;
		csum_replace_by_diff(&l4.udp->check,
				     (__force __wsum)htonl(l4_paylen));
	} else {
		hdr_len = (l4.tcp->doff << 2) + l4_offset;
		csum_replace_by_diff(&l4.tcp->check,
				     (__force __wsum)htonl(l4_paylen));
	}
753 754

	/* find the txbd field values */
755
	*paylen_fdop_ol4cs = skb->len - hdr_len;
756
	hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
757

758 759 760 761
	/* offload outer UDP header checksum */
	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
		hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1);

762 763 764
	/* get MSS for TSO */
	*mss = skb_shinfo(skb)->gso_size;

765 766
	trace_hns3_tso(skb);

767 768 769
	return 0;
}

770 771
static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
				u8 *il4_proto)
772
{
773
	union l3_hdr_info l3;
774 775 776 777 778 779 780
	unsigned char *l4_hdr;
	unsigned char *exthdr;
	u8 l4_proto_tmp;
	__be16 frag_off;

	/* find outer header point */
	l3.hdr = skb_network_header(skb);
781
	l4_hdr = skb_transport_header(skb);
782 783 784 785 786 787 788 789 790

	if (skb->protocol == htons(ETH_P_IPV6)) {
		exthdr = l3.hdr + sizeof(*l3.v6);
		l4_proto_tmp = l3.v6->nexthdr;
		if (l4_hdr != exthdr)
			ipv6_skip_exthdr(skb, exthdr - skb->data,
					 &l4_proto_tmp, &frag_off);
	} else if (skb->protocol == htons(ETH_P_IP)) {
		l4_proto_tmp = l3.v4->protocol;
791 792
	} else {
		return -EINVAL;
793 794 795 796 797 798 799
	}

	*ol4_proto = l4_proto_tmp;

	/* tunnel packet */
	if (!skb->encapsulation) {
		*il4_proto = 0;
800
		return 0;
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
	}

	/* find inner header point */
	l3.hdr = skb_inner_network_header(skb);
	l4_hdr = skb_inner_transport_header(skb);

	if (l3.v6->version == 6) {
		exthdr = l3.hdr + sizeof(*l3.v6);
		l4_proto_tmp = l3.v6->nexthdr;
		if (l4_hdr != exthdr)
			ipv6_skip_exthdr(skb, exthdr - skb->data,
					 &l4_proto_tmp, &frag_off);
	} else if (l3.v4->version == 4) {
		l4_proto_tmp = l3.v4->protocol;
	}

	*il4_proto = l4_proto_tmp;
818 819

	return 0;
820 821
}

822 823 824 825
/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
 * and it is udp packet, which has a dest port as the IANA assigned.
 * the hardware is expected to do the checksum offload, but the
 * hardware will not do the checksum offload when udp dest port is
826
 * 4789 or 6081.
827 828 829
 */
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
830 831
	struct hns3_nic_priv *priv = netdev_priv(skb->dev);
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
832
	union l4_hdr_info l4;
833

834 835 836 837 838 839
	/* device version above V3(include V3), the hardware can
	 * do this checksum offload.
	 */
	if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
		return false;

840 841
	l4.hdr = skb_transport_header(skb);

842
	if (!(!skb->encapsulation &&
843 844
	      (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
	      l4.udp->dest == htons(GENEVE_UDP_PORT))))
845 846 847 848 849 850 851
		return false;

	skb_checksum_help(skb);

	return true;
}

852 853
static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
				  u32 *ol_type_vlan_len_msec)
854
{
855 856
	u32 l2_len, l3_len, l4_len;
	unsigned char *il2_hdr;
857
	union l3_hdr_info l3;
858
	union l4_hdr_info l4;
859 860

	l3.hdr = skb_network_header(skb);
861
	l4.hdr = skb_transport_header(skb);
862

863 864 865 866 867 868 869
	/* compute OL2 header size, defined in 2 Bytes */
	l2_len = l3.hdr - skb->data;
	hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);

	/* compute OL3 header size, defined in 4 Bytes */
	l3_len = l4.hdr - l3.hdr;
	hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
870

871
	il2_hdr = skb_inner_mac_header(skb);
872
	/* compute OL4 header size, defined in 4 Bytes */
873 874 875 876 877 878
	l4_len = il2_hdr - l4.hdr;
	hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);

	/* define outer network header type */
	if (skb->protocol == htons(ETH_P_IP)) {
		if (skb_is_gso(skb))
879
			hns3_set_field(*ol_type_vlan_len_msec,
880 881 882
				       HNS3_TXD_OL3T_S,
				       HNS3_OL3T_IPV4_CSUM);
		else
883
			hns3_set_field(*ol_type_vlan_len_msec,
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
				       HNS3_TXD_OL3T_S,
				       HNS3_OL3T_IPV4_NO_CSUM);

	} else if (skb->protocol == htons(ETH_P_IPV6)) {
		hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
			       HNS3_OL3T_IPV6);
	}

	if (ol4_proto == IPPROTO_UDP)
		hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
			       HNS3_TUN_MAC_IN_UDP);
	else if (ol4_proto == IPPROTO_GRE)
		hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
			       HNS3_TUN_NVGRE);
}

static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
			   u8 il4_proto, u32 *type_cs_vlan_tso,
			   u32 *ol_type_vlan_len_msec)
{
904
	unsigned char *l2_hdr = skb->data;
905 906 907 908 909 910 911 912 913 914 915 916
	u32 l4_proto = ol4_proto;
	union l4_hdr_info l4;
	union l3_hdr_info l3;
	u32 l2_len, l3_len;

	l4.hdr = skb_transport_header(skb);
	l3.hdr = skb_network_header(skb);

	/* handle encapsulation skb */
	if (skb->encapsulation) {
		/* If this is a not UDP/GRE encapsulation skb */
		if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
917 918 919 920 921 922 923 924 925 926 927 928 929
			/* drop the skb tunnel packet if hardware don't support,
			 * because hardware can't calculate csum when TSO.
			 */
			if (skb_is_gso(skb))
				return -EDOM;

			/* the stack computes the IP header already,
			 * driver calculate l4 checksum when not TSO.
			 */
			skb_checksum_help(skb);
			return 0;
		}

930 931 932 933
		hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);

		/* switch to inner header */
		l2_hdr = skb_inner_mac_header(skb);
934
		l3.hdr = skb_inner_network_header(skb);
935
		l4.hdr = skb_inner_transport_header(skb);
936 937 938 939
		l4_proto = il4_proto;
	}

	if (l3.v4->version == 4) {
940 941
		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
			       HNS3_L3T_IPV4);
942 943 944 945 946

		/* the stack computes the IP header already, the only time we
		 * need the hardware to recompute it is in the case of TSO.
		 */
		if (skb_is_gso(skb))
947
			hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
948
	} else if (l3.v6->version == 6) {
949 950
		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
			       HNS3_L3T_IPV6);
951 952
	}

953 954 955 956 957 958 959 960 961
	/* compute inner(/normal) L2 header size, defined in 2 Bytes */
	l2_len = l3.hdr - l2_hdr;
	hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);

	/* compute inner(/normal) L3 header size, defined in 4 Bytes */
	l3_len = l4.hdr - l3.hdr;
	hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);

	/* compute inner(/normal) L4 header size, defined in 4 Bytes */
962 963
	switch (l4_proto) {
	case IPPROTO_TCP:
964 965 966
		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
			       HNS3_L4T_TCP);
967 968
		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
			       l4.tcp->doff);
969 970
		break;
	case IPPROTO_UDP:
971 972 973
		if (hns3_tunnel_csum_bug(skb))
			break;

974 975 976
		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
			       HNS3_L4T_UDP);
977 978
		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
			       (sizeof(struct udphdr) >> 2));
979 980
		break;
	case IPPROTO_SCTP:
981 982 983
		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
			       HNS3_L4T_SCTP);
984 985
		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
			       (sizeof(struct sctphdr) >> 2));
986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
		break;
	default:
		/* drop the skb tunnel packet if hardware don't support,
		 * because hardware can't calculate csum when TSO.
		 */
		if (skb_is_gso(skb))
			return -EDOM;

		/* the stack computes the IP header already,
		 * driver calculate l4 checksum when not TSO.
		 */
		skb_checksum_help(skb);
		return 0;
	}

	return 0;
}

1004 1005
static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
			     struct sk_buff *skb)
1006
{
1007
	struct hnae3_handle *handle = tx_ring->tqp->handle;
1008
	struct hnae3_ae_dev *ae_dev;
1009 1010 1011 1012 1013 1014
	struct vlan_ethhdr *vhdr;
	int rc;

	if (!(skb->protocol == htons(ETH_P_8021Q) ||
	      skb_vlan_tag_present(skb)))
		return 0;
1015

1016 1017 1018
	/* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert
	 * VLAN enabled, only one VLAN header is allowed in skb, otherwise it
	 * will cause RAS error.
1019
	 */
1020
	ae_dev = pci_get_drvdata(handle->pdev);
1021
	if (unlikely(skb_vlan_tagged_multi(skb) &&
1022
		     ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
1023 1024 1025 1026
		     handle->port_base_vlan_state ==
		     HNAE3_PORT_BASE_VLAN_ENABLE))
		return -EINVAL;

1027
	if (skb->protocol == htons(ETH_P_8021Q) &&
1028
	    !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
		/* When HW VLAN acceleration is turned off, and the stack
		 * sets the protocol to 802.1q, the driver just need to
		 * set the protocol to the encapsulated ethertype.
		 */
		skb->protocol = vlan_get_protocol(skb);
		return 0;
	}

	if (skb_vlan_tag_present(skb)) {
		/* Based on hw strategy, use out_vtag in two layer tag case,
		 * and use inner_vtag in one tag case.
		 */
1041 1042 1043 1044 1045 1046 1047 1048 1049
		if (skb->protocol == htons(ETH_P_8021Q) &&
		    handle->port_base_vlan_state ==
		    HNAE3_PORT_BASE_VLAN_DISABLE)
			rc = HNS3_OUTER_VLAN_TAG;
		else
			rc = HNS3_INNER_VLAN_TAG;

		skb->protocol = vlan_get_protocol(skb);
		return rc;
1050 1051
	}

1052 1053 1054 1055 1056 1057 1058 1059
	rc = skb_cow_head(skb, 0);
	if (unlikely(rc < 0))
		return rc;

	vhdr = (struct vlan_ethhdr *)skb->data;
	vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
					 & VLAN_PRIO_MASK);

1060 1061 1062 1063
	skb->protocol = vlan_get_protocol(skb);
	return 0;
}

1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
/* check if the hardware is capable of checksum offloading */
static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
{
	struct hns3_nic_priv *priv = netdev_priv(skb->dev);

	/* Kindly note, due to backward compatibility of the TX descriptor,
	 * HW checksum of the non-IP packets and GSO packets is handled at
	 * different place in the following code
	 */
	if (skb->csum_not_inet || skb_is_gso(skb) ||
	    !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state))
		return false;

	return true;
}

1080 1081 1082 1083
static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
			      struct sk_buff *skb, struct hns3_desc *desc)
{
	u32 ol_type_vlan_len_msec = 0;
1084
	u32 paylen_ol4cs = skb->len;
1085
	u32 type_cs_vlan_tso = 0;
1086
	u16 mss_hw_csum = 0;
1087 1088 1089 1090 1091 1092
	u16 inner_vtag = 0;
	u16 out_vtag = 0;
	int ret;

	ret = hns3_handle_vtags(ring, skb);
	if (unlikely(ret < 0)) {
1093 1094 1095
		u64_stats_update_begin(&ring->syncp);
		ring->stats.tx_vlan_err++;
		u64_stats_update_end(&ring->syncp);
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
		return ret;
	} else if (ret == HNS3_INNER_VLAN_TAG) {
		inner_vtag = skb_vlan_tag_get(skb);
		inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
				VLAN_PRIO_MASK;
		hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
	} else if (ret == HNS3_OUTER_VLAN_TAG) {
		out_vtag = skb_vlan_tag_get(skb);
		out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
				VLAN_PRIO_MASK;
		hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
			       1);
	}

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		u8 ol4_proto, il4_proto;

1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
		if (hns3_check_hw_tx_csum(skb)) {
			/* set checksum start and offset, defined in 2 Bytes */
			hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
				       skb_checksum_start_offset(skb) >> 1);
			hns3_set_field(ol_type_vlan_len_msec,
				       HNS3_TXD_CSUM_OFFSET_S,
				       skb->csum_offset >> 1);
			mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
			goto out_hw_tx_csum;
		}

1124 1125 1126
		skb_reset_mac_len(skb);

		ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
1127
		if (unlikely(ret < 0)) {
1128 1129 1130
			u64_stats_update_begin(&ring->syncp);
			ring->stats.tx_l4_proto_err++;
			u64_stats_update_end(&ring->syncp);
1131
			return ret;
1132
		}
1133 1134 1135 1136

		ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
				      &type_cs_vlan_tso,
				      &ol_type_vlan_len_msec);
1137
		if (unlikely(ret < 0)) {
1138 1139 1140
			u64_stats_update_begin(&ring->syncp);
			ring->stats.tx_l2l3l4_err++;
			u64_stats_update_end(&ring->syncp);
1141
			return ret;
1142
		}
1143

1144
		ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum,
1145
				   &type_cs_vlan_tso);
1146
		if (unlikely(ret < 0)) {
1147 1148 1149
			u64_stats_update_begin(&ring->syncp);
			ring->stats.tx_tso_err++;
			u64_stats_update_end(&ring->syncp);
1150
			return ret;
1151
		}
1152 1153
	}

1154
out_hw_tx_csum:
1155 1156 1157 1158
	/* Set txbd */
	desc->tx.ol_type_vlan_len_msec =
		cpu_to_le32(ol_type_vlan_len_msec);
	desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
1159
	desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs);
1160
	desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum);
1161 1162 1163 1164 1165 1166
	desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
	desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);

	return 0;
}

1167
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
1168
			  unsigned int size, enum hns_desc_type type)
1169
{
1170 1171
#define HNS3_LIKELY_BD_NUM	1

1172 1173
	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
	struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1174
	struct device *dev = ring_to_dev(ring);
1175
	skb_frag_t *frag;
1176
	unsigned int frag_buf_num;
1177
	int k, sizeoflast;
1178
	dma_addr_t dma;
1179

1180 1181
	if (type == DESC_TYPE_FRAGLIST_SKB ||
	    type == DESC_TYPE_SKB) {
1182 1183
		struct sk_buff *skb = (struct sk_buff *)priv;

1184 1185
		dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
	} else {
1186
		frag = (skb_frag_t *)priv;
1187 1188 1189
		dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
	}

1190
	if (unlikely(dma_mapping_error(dev, dma))) {
1191
		u64_stats_update_begin(&ring->syncp);
1192
		ring->stats.sw_err_cnt++;
1193
		u64_stats_update_end(&ring->syncp);
1194
		return -ENOMEM;
1195 1196
	}

1197
	desc_cb->priv = priv;
1198
	desc_cb->length = size;
1199 1200
	desc_cb->dma = dma;
	desc_cb->type = type;
1201

1202 1203 1204 1205
	if (likely(size <= HNS3_MAX_BD_SIZE)) {
		desc->addr = cpu_to_le64(dma);
		desc->tx.send_size = cpu_to_le16(size);
		desc->tx.bdtp_fe_sc_vld_ra_ri =
1206
			cpu_to_le16(BIT(HNS3_TXD_VLD_B));
1207

1208
		trace_hns3_tx_desc(ring, ring->next_to_use);
1209
		ring_ptr_move_fw(ring, next_to_use);
1210
		return HNS3_LIKELY_BD_NUM;
1211 1212
	}

1213
	frag_buf_num = hns3_tx_bd_count(size);
1214
	sizeoflast = size % HNS3_MAX_BD_SIZE;
1215 1216 1217 1218 1219 1220
	sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;

	/* When frag size is bigger than hardware limit, split this frag */
	for (k = 0; k < frag_buf_num; k++) {
		/* now, fill the descriptor */
		desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1221
		desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1222
				     (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1223
		desc->tx.bdtp_fe_sc_vld_ra_ri =
1224
				cpu_to_le16(BIT(HNS3_TXD_VLD_B));
1225

1226
		trace_hns3_tx_desc(ring, ring->next_to_use);
1227
		/* move ring pointer to next */
1228 1229 1230 1231
		ring_ptr_move_fw(ring, next_to_use);

		desc = &ring->desc[ring->next_to_use];
	}
1232

1233
	return frag_buf_num;
1234 1235
}

1236 1237
static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
				    unsigned int bd_num)
1238
{
1239
	unsigned int size;
1240
	int i;
1241

1242 1243 1244 1245 1246 1247 1248 1249
	size = skb_headlen(skb);
	while (size > HNS3_MAX_BD_SIZE) {
		bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
		size -= HNS3_MAX_BD_SIZE;

		if (bd_num > HNS3_MAX_TSO_BD_NUM)
			return bd_num;
	}
1250

1251 1252 1253 1254 1255
	if (size) {
		bd_size[bd_num++] = size;
		if (bd_num > HNS3_MAX_TSO_BD_NUM)
			return bd_num;
	}
1256

1257
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1258
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
		size = skb_frag_size(frag);
		if (!size)
			continue;

		while (size > HNS3_MAX_BD_SIZE) {
			bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
			size -= HNS3_MAX_BD_SIZE;

			if (bd_num > HNS3_MAX_TSO_BD_NUM)
				return bd_num;
		}

		bd_size[bd_num++] = size;
		if (bd_num > HNS3_MAX_TSO_BD_NUM)
			return bd_num;
	}

	return bd_num;
}

1279 1280
static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
				   u8 max_non_tso_bd_num)
1281 1282 1283 1284 1285 1286
{
	struct sk_buff *frag_skb;
	unsigned int bd_num = 0;

	/* If the total len is within the max bd limit */
	if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
1287
		   skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
1288 1289 1290 1291 1292 1293
		return skb_shinfo(skb)->nr_frags + 1U;

	/* The below case will always be linearized, return
	 * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
	 */
	if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
1294 1295
		     (!skb_is_gso(skb) && skb->len >
		      HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))))
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306
		return HNS3_MAX_TSO_BD_NUM + 1U;

	bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);

	if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM)
		return bd_num;

	skb_walk_frags(skb, frag_skb) {
		bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num);
		if (bd_num > HNS3_MAX_TSO_BD_NUM)
			return bd_num;
1307
	}
1308

1309
	return bd_num;
1310 1311
}

1312 1313 1314 1315 1316 1317 1318 1319
static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
{
	if (!skb->encapsulation)
		return skb_transport_offset(skb) + tcp_hdrlen(skb);

	return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
}

1320 1321 1322 1323 1324
/* HW need every continuous max_non_tso_bd_num buffer data to be larger
 * than MSS, we simplify it by ensuring skb_headlen + the first continuous
 * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss,
 * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger
 * than MSS except the last max_non_tso_bd_num - 1 frags.
1325
 */
1326
static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
1327
				     unsigned int bd_num, u8 max_non_tso_bd_num)
1328 1329 1330 1331
{
	unsigned int tot_len = 0;
	int i;

1332
	for (i = 0; i < max_non_tso_bd_num - 1U; i++)
1333
		tot_len += bd_size[i];
1334

1335 1336 1337 1338
	/* ensure the first max_non_tso_bd_num frags is greater than
	 * mss + header
	 */
	if (tot_len + bd_size[max_non_tso_bd_num - 1U] <
1339
	    skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb))
1340 1341
		return true;

1342 1343
	/* ensure every continuous max_non_tso_bd_num - 1 buffer is greater
	 * than mss except the last one.
1344
	 */
1345
	for (i = 0; i < bd_num - max_non_tso_bd_num; i++) {
1346
		tot_len -= bd_size[i];
1347
		tot_len += bd_size[i + max_non_tso_bd_num - 1U];
1348 1349 1350 1351 1352 1353 1354 1355

		if (tot_len < skb_shinfo(skb)->gso_size)
			return true;
	}

	return false;
}

1356 1357
void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
{
1358
	int i;
1359 1360 1361 1362 1363

	for (i = 0; i < MAX_SKB_FRAGS; i++)
		size[i] = skb_frag_size(&shinfo->frags[i]);
}

1364
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
1365
				  struct net_device *netdev,
1366
				  struct sk_buff *skb)
1367
{
1368
	struct hns3_nic_priv *priv = netdev_priv(netdev);
1369
	u8 max_non_tso_bd_num = priv->max_non_tso_bd_num;
1370
	unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
1371
	unsigned int bd_num;
1372

1373 1374
	bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num);
	if (unlikely(bd_num > max_non_tso_bd_num)) {
1375
		if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
1376 1377
		    !hns3_skb_need_linearized(skb, bd_size, bd_num,
					      max_non_tso_bd_num)) {
1378
			trace_hns3_over_max_bd(skb);
1379
			goto out;
1380
		}
1381

1382
		if (__skb_linearize(skb))
P
Peng Li 已提交
1383
			return -ENOMEM;
1384

1385 1386 1387
		bd_num = hns3_tx_bd_count(skb->len);
		if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
		    (!skb_is_gso(skb) &&
1388
		     bd_num > max_non_tso_bd_num)) {
1389
			trace_hns3_over_max_bd(skb);
1390
			return -ENOMEM;
1391
		}
1392

1393 1394 1395
		u64_stats_update_begin(&ring->syncp);
		ring->stats.tx_copy++;
		u64_stats_update_end(&ring->syncp);
P
Peng Li 已提交
1396 1397
	}

1398
out:
1399 1400
	if (likely(ring_space(ring) >= bd_num))
		return bd_num;
1401

1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
	netif_stop_subqueue(netdev, ring->queue_index);
	smp_mb(); /* Memory barrier before checking ring_space */

	/* Start queue in case hns3_clean_tx_ring has just made room
	 * available and has not seen the queue stopped state performed
	 * by netif_stop_subqueue above.
	 */
	if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) &&
	    !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
		netif_start_subqueue(netdev, ring->queue_index);
		return bd_num;
	}

	return -EBUSY;
1416 1417
}

F
Fuyun Liang 已提交
1418
static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1419 1420 1421 1422 1423
{
	struct device *dev = ring_to_dev(ring);
	unsigned int i;

	for (i = 0; i < ring->desc_num; i++) {
1424 1425 1426 1427
		struct hns3_desc *desc = &ring->desc[ring->next_to_use];

		memset(desc, 0, sizeof(*desc));

1428 1429 1430 1431
		/* check if this is where we started */
		if (ring->next_to_use == next_to_use_orig)
			break;

1432 1433 1434
		/* rollback one */
		ring_ptr_move_bw(ring, next_to_use);

1435 1436 1437
		if (!ring->desc_cb[ring->next_to_use].dma)
			continue;

1438
		/* unmap the descriptor dma address */
1439 1440 1441
		if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB ||
		    ring->desc_cb[ring->next_to_use].type ==
		    DESC_TYPE_FRAGLIST_SKB)
1442 1443 1444 1445
			dma_unmap_single(dev,
					 ring->desc_cb[ring->next_to_use].dma,
					ring->desc_cb[ring->next_to_use].length,
					DMA_TO_DEVICE);
1446
		else if (ring->desc_cb[ring->next_to_use].length)
1447 1448 1449 1450 1451
			dma_unmap_page(dev,
				       ring->desc_cb[ring->next_to_use].dma,
				       ring->desc_cb[ring->next_to_use].length,
				       DMA_TO_DEVICE);

1452
		ring->desc_cb[ring->next_to_use].length = 0;
1453
		ring->desc_cb[ring->next_to_use].dma = 0;
1454
		ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN;
1455 1456 1457
	}
}

1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
				 struct sk_buff *skb, enum hns_desc_type type)
{
	unsigned int size = skb_headlen(skb);
	int i, ret, bd_num = 0;

	if (size) {
		ret = hns3_fill_desc(ring, skb, size, type);
		if (unlikely(ret < 0))
			return ret;

		bd_num += ret;
	}

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		size = skb_frag_size(frag);
		if (!size)
			continue;

		ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE);
		if (unlikely(ret < 0))
			return ret;

		bd_num += ret;
	}

	return bd_num;
}

1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
			     bool doorbell)
{
	ring->pending_buf += num;

	if (!doorbell) {
		u64_stats_update_begin(&ring->syncp);
		ring->stats.tx_more++;
		u64_stats_update_end(&ring->syncp);
		return;
	}

	if (!ring->pending_buf)
		return;

1504 1505
	writel(ring->pending_buf,
	       ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
1506
	ring->pending_buf = 0;
1507
	WRITE_ONCE(ring->last_to_use, ring->next_to_use);
1508 1509
}

1510
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1511 1512
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);
1513
	struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
1514
	struct netdev_queue *dev_queue;
1515 1516 1517
	int pre_ntu, next_to_use_head;
	struct sk_buff *frag_skb;
	int bd_num = 0;
1518
	bool doorbell;
1519 1520
	int ret;

1521
	/* Hardware can only handle short frames above 32 bytes */
1522 1523
	if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
		hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
1524
		return NETDEV_TX_OK;
1525
	}
1526

1527 1528 1529
	/* Prefetch the data used later */
	prefetch(skb->data);

1530
	ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
1531 1532
	if (unlikely(ret <= 0)) {
		if (ret == -EBUSY) {
1533 1534 1535
			u64_stats_update_begin(&ring->syncp);
			ring->stats.tx_busy++;
			u64_stats_update_end(&ring->syncp);
1536
			hns3_tx_doorbell(ring, 0, true);
1537
			return NETDEV_TX_BUSY;
1538
		} else if (ret == -ENOMEM) {
1539 1540 1541 1542
			u64_stats_update_begin(&ring->syncp);
			ring->stats.sw_err_cnt++;
			u64_stats_update_end(&ring->syncp);
		}
1543

1544
		hns3_rl_err(netdev, "xmit error: %d!\n", ret);
1545 1546 1547 1548 1549
		goto out_err_tx_ok;
	}

	next_to_use_head = ring->next_to_use;

1550 1551 1552 1553
	ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
	if (unlikely(ret < 0))
		goto fill_err;

1554 1555
	ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
	if (unlikely(ret < 0))
1556
		goto fill_err;
1557

1558
	bd_num += ret;
1559

1560
	skb_walk_frags(skb, frag_skb) {
1561 1562
		ret = hns3_fill_skb_to_desc(ring, frag_skb,
					    DESC_TYPE_FRAGLIST_SKB);
1563
		if (unlikely(ret < 0))
1564
			goto fill_err;
1565 1566

		bd_num += ret;
1567
	}
1568

1569 1570 1571 1572
	pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
					(ring->desc_num - 1);
	ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
				cpu_to_le16(BIT(HNS3_TXD_FE_B));
1573
	trace_hns3_tx_desc(ring, pre_ntu);
1574 1575

	/* Complete translate all packets */
1576
	dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
1577 1578 1579
	doorbell = __netdev_tx_sent_queue(dev_queue, skb->len,
					  netdev_xmit_more());
	hns3_tx_doorbell(ring, bd_num, doorbell);
1580 1581 1582

	return NETDEV_TX_OK;

1583
fill_err:
F
Fuyun Liang 已提交
1584
	hns3_clear_desc(ring, next_to_use_head);
1585 1586 1587

out_err_tx_ok:
	dev_kfree_skb_any(skb);
1588
	hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
1589 1590 1591 1592 1593
	return NETDEV_TX_OK;
}

static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
{
1594
	struct hnae3_handle *h = hns3_get_handle(netdev);
1595 1596 1597 1598 1599 1600
	struct sockaddr *mac_addr = p;
	int ret;

	if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
		return -EADDRNOTAVAIL;

1601 1602 1603 1604 1605 1606
	if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
		netdev_info(netdev, "already using mac address %pM\n",
			    mac_addr->sa_data);
		return 0;
	}

1607 1608 1609 1610 1611 1612 1613 1614 1615 1616
	/* For VF device, if there is a perm_addr, then the user will not
	 * be allowed to change the address.
	 */
	if (!hns3_is_phys_func(h->pdev) &&
	    !is_zero_ether_addr(netdev->perm_addr)) {
		netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n",
			   netdev->perm_addr, mac_addr->sa_data);
		return -EPERM;
	}

1617
	ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
	if (ret) {
		netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
		return ret;
	}

	ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);

	return 0;
}

1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
static int hns3_nic_do_ioctl(struct net_device *netdev,
			     struct ifreq *ifr, int cmd)
{
	struct hnae3_handle *h = hns3_get_handle(netdev);

	if (!netif_running(netdev))
		return -EINVAL;

	if (!h->ae_algo->ops->do_ioctl)
		return -EOPNOTSUPP;

	return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
}

1642 1643 1644
static int hns3_nic_set_features(struct net_device *netdev,
				 netdev_features_t features)
{
1645
	netdev_features_t changed = netdev->features ^ features;
1646
	struct hns3_nic_priv *priv = netdev_priv(netdev);
1647
	struct hnae3_handle *h = priv->ae_handle;
1648
	bool enable;
1649
	int ret;
1650

1651
	if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
1652 1653
		enable = !!(features & NETIF_F_GRO_HW);
		ret = h->ae_algo->ops->set_gro_en(h, enable);
1654 1655 1656 1657
		if (ret)
			return ret;
	}

1658 1659
	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
	    h->ae_algo->ops->enable_hw_strip_rxvtag) {
1660 1661
		enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
		ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
1662 1663 1664 1665
		if (ret)
			return ret;
	}

1666
	if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
1667 1668
		enable = !!(features & NETIF_F_NTUPLE);
		h->ae_algo->ops->enable_fd(h, enable);
1669 1670
	}

1671 1672 1673 1674
	netdev->features = features;
	return 0;
}

1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705
static netdev_features_t hns3_features_check(struct sk_buff *skb,
					     struct net_device *dev,
					     netdev_features_t features)
{
#define HNS3_MAX_HDR_LEN	480U
#define HNS3_MAX_L4_HDR_LEN	60U

	size_t len;

	if (skb->ip_summed != CHECKSUM_PARTIAL)
		return features;

	if (skb->encapsulation)
		len = skb_inner_transport_header(skb) - skb->data;
	else
		len = skb_transport_header(skb) - skb->data;

	/* Assume L4 is 60 byte as TCP is the only protocol with a
	 * a flexible value, and it's max len is 60 bytes.
	 */
	len += HNS3_MAX_L4_HDR_LEN;

	/* Hardware only supports checksum on the skb with a max header
	 * len of 480 bytes.
	 */
	if (len > HNS3_MAX_HDR_LEN)
		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);

	return features;
}

1706 1707
static void hns3_nic_get_stats64(struct net_device *netdev,
				 struct rtnl_link_stats64 *stats)
1708 1709 1710
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);
	int queue_num = priv->ae_handle->kinfo.num_tqps;
1711
	struct hnae3_handle *handle = priv->ae_handle;
1712
	struct hns3_enet_ring *ring;
1713 1714 1715
	u64 rx_length_errors = 0;
	u64 rx_crc_errors = 0;
	u64 rx_multicast = 0;
1716
	unsigned int start;
1717 1718
	u64 tx_errors = 0;
	u64 rx_errors = 0;
1719 1720 1721 1722 1723
	unsigned int idx;
	u64 tx_bytes = 0;
	u64 rx_bytes = 0;
	u64 tx_pkts = 0;
	u64 rx_pkts = 0;
1724 1725
	u64 tx_drop = 0;
	u64 rx_drop = 0;
1726

1727 1728 1729
	if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
		return;

1730 1731
	handle->ae_algo->ops->update_stats(handle, &netdev->stats);

1732 1733
	for (idx = 0; idx < queue_num; idx++) {
		/* fetch the tx stats */
1734
		ring = &priv->ring[idx];
1735
		do {
1736
			start = u64_stats_fetch_begin_irq(&ring->syncp);
1737 1738
			tx_bytes += ring->stats.tx_bytes;
			tx_pkts += ring->stats.tx_pkts;
1739
			tx_drop += ring->stats.sw_err_cnt;
1740 1741 1742 1743
			tx_drop += ring->stats.tx_vlan_err;
			tx_drop += ring->stats.tx_l4_proto_err;
			tx_drop += ring->stats.tx_l2l3l4_err;
			tx_drop += ring->stats.tx_tso_err;
1744
			tx_errors += ring->stats.sw_err_cnt;
1745 1746 1747 1748
			tx_errors += ring->stats.tx_vlan_err;
			tx_errors += ring->stats.tx_l4_proto_err;
			tx_errors += ring->stats.tx_l2l3l4_err;
			tx_errors += ring->stats.tx_tso_err;
1749 1750 1751
		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));

		/* fetch the rx stats */
1752
		ring = &priv->ring[idx + queue_num];
1753
		do {
1754
			start = u64_stats_fetch_begin_irq(&ring->syncp);
1755 1756
			rx_bytes += ring->stats.rx_bytes;
			rx_pkts += ring->stats.rx_pkts;
1757
			rx_drop += ring->stats.l2_err;
1758
			rx_errors += ring->stats.l2_err;
1759
			rx_errors += ring->stats.l3l4_csum_err;
1760 1761 1762
			rx_crc_errors += ring->stats.l2_err;
			rx_multicast += ring->stats.rx_multicast;
			rx_length_errors += ring->stats.err_pkt_len;
1763 1764 1765 1766 1767 1768 1769 1770
		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
	}

	stats->tx_bytes = tx_bytes;
	stats->tx_packets = tx_pkts;
	stats->rx_bytes = rx_bytes;
	stats->rx_packets = rx_pkts;

1771 1772 1773 1774
	stats->rx_errors = rx_errors;
	stats->multicast = rx_multicast;
	stats->rx_length_errors = rx_length_errors;
	stats->rx_crc_errors = rx_crc_errors;
1775 1776
	stats->rx_missed_errors = netdev->stats.rx_missed_errors;

1777 1778 1779
	stats->tx_errors = tx_errors;
	stats->rx_dropped = rx_drop;
	stats->tx_dropped = tx_drop;
1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792
	stats->collisions = netdev->stats.collisions;
	stats->rx_over_errors = netdev->stats.rx_over_errors;
	stats->rx_frame_errors = netdev->stats.rx_frame_errors;
	stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
	stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
	stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
	stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
	stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
	stats->tx_window_errors = netdev->stats.tx_window_errors;
	stats->rx_compressed = netdev->stats.rx_compressed;
	stats->tx_compressed = netdev->stats.tx_compressed;
}

1793
static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1794
{
1795
	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1796
	struct hnae3_knic_private_info *kinfo;
1797 1798 1799
	u8 tc = mqprio_qopt->qopt.num_tc;
	u16 mode = mqprio_qopt->mode;
	u8 hw = mqprio_qopt->qopt.hw;
1800
	struct hnae3_handle *h;
1801

1802 1803 1804 1805
	if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
	       mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
		return -EOPNOTSUPP;

1806 1807 1808 1809 1810 1811
	if (tc > HNAE3_MAX_TC)
		return -EINVAL;

	if (!netdev)
		return -EINVAL;

1812 1813 1814
	h = hns3_get_handle(netdev);
	kinfo = &h->kinfo;

1815 1816
	netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);

1817
	return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1818
		kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP;
1819 1820
}

1821
static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1822
			     void *type_data)
1823
{
1824
	if (type != TC_SETUP_QDISC_MQPRIO)
1825
		return -EOPNOTSUPP;
1826

1827
	return hns3_setup_tc(dev, type_data);
1828 1829 1830 1831 1832
}

static int hns3_vlan_rx_add_vid(struct net_device *netdev,
				__be16 proto, u16 vid)
{
1833
	struct hnae3_handle *h = hns3_get_handle(netdev);
1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844
	int ret = -EIO;

	if (h->ae_algo->ops->set_vlan_filter)
		ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);

	return ret;
}

static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
				 __be16 proto, u16 vid)
{
1845
	struct hnae3_handle *h = hns3_get_handle(netdev);
1846 1847 1848 1849 1850
	int ret = -EIO;

	if (h->ae_algo->ops->set_vlan_filter)
		ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);

1851
	return ret;
1852 1853
}

1854 1855 1856
static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
				u8 qos, __be16 vlan_proto)
{
1857
	struct hnae3_handle *h = hns3_get_handle(netdev);
1858 1859
	int ret = -EIO;

1860
	netif_dbg(h, drv, netdev,
1861 1862
		  "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n",
		  vf, vlan, qos, ntohs(vlan_proto));
1863

1864 1865
	if (h->ae_algo->ops->set_vf_vlan_filter)
		ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1866
							  qos, vlan_proto);
1867 1868 1869 1870

	return ret;
}

1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883
static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
{
	struct hnae3_handle *handle = hns3_get_handle(netdev);

	if (hns3_nic_resetting(netdev))
		return -EBUSY;

	if (!handle->ae_algo->ops->set_vf_spoofchk)
		return -EOPNOTSUPP;

	return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable);
}

1884 1885 1886 1887 1888 1889 1890 1891 1892 1893
static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable)
{
	struct hnae3_handle *handle = hns3_get_handle(netdev);

	if (!handle->ae_algo->ops->set_vf_trust)
		return -EOPNOTSUPP;

	return handle->ae_algo->ops->set_vf_trust(handle, vf, enable);
}

1894 1895
static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
{
1896
	struct hnae3_handle *h = hns3_get_handle(netdev);
1897 1898
	int ret;

1899 1900 1901
	if (hns3_nic_resetting(netdev))
		return -EBUSY;

1902 1903 1904
	if (!h->ae_algo->ops->set_mtu)
		return -EOPNOTSUPP;

1905 1906 1907
	netif_dbg(h, drv, netdev,
		  "change mtu from %u to %d\n", netdev->mtu, new_mtu);

1908
	ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1909
	if (ret)
1910 1911
		netdev_err(netdev, "failed to change MTU in hardware %d\n",
			   ret);
1912 1913
	else
		netdev->mtu = new_mtu;
F
Fuyun Liang 已提交
1914

1915 1916 1917
	return ret;
}

1918 1919 1920
static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
{
	struct hns3_nic_priv *priv = netdev_priv(ndev);
1921
	struct hnae3_handle *h = hns3_get_handle(ndev);
1922
	struct hns3_enet_ring *tx_ring;
1923
	struct napi_struct *napi;
1924 1925
	int timeout_queue = 0;
	int hw_head, hw_tail;
1926 1927 1928 1929
	int fbd_num, fbd_oft;
	int ebd_num, ebd_oft;
	int bd_num, bd_err;
	int ring_en, tc;
1930 1931 1932
	int i;

	/* Find the stopped queue the same way the stack does */
1933
	for (i = 0; i < ndev->num_tx_queues; i++) {
1934 1935 1936 1937 1938 1939 1940 1941 1942
		struct netdev_queue *q;
		unsigned long trans_start;

		q = netdev_get_tx_queue(ndev, i);
		trans_start = q->trans_start;
		if (netif_xmit_stopped(q) &&
		    time_after(jiffies,
			       (trans_start + ndev->watchdog_timeo))) {
			timeout_queue = i;
1943 1944 1945
			netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
				    q->state,
				    jiffies_to_msecs(jiffies - trans_start));
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
			break;
		}
	}

	if (i == ndev->num_tx_queues) {
		netdev_info(ndev,
			    "no netdev TX timeout queue found, timeout count: %llu\n",
			    priv->tx_timeout_count);
		return false;
	}

1957 1958
	priv->tx_timeout_count++;

1959
	tx_ring = &priv->ring[timeout_queue];
1960 1961 1962 1963 1964 1965 1966 1967
	napi = &tx_ring->tqp_vector->napi;

	netdev_info(ndev,
		    "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
		    priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
		    tx_ring->next_to_clean, napi->state);

	netdev_info(ndev,
1968
		    "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n",
1969
		    tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
1970
		    tx_ring->stats.sw_err_cnt, tx_ring->pending_buf);
1971 1972

	netdev_info(ndev,
1973 1974
		    "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
		    tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
1975 1976 1977 1978 1979
		    tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);

	/* When mac received many pause frames continuous, it's unable to send
	 * packets, which may cause tx timeout
	 */
1980 1981
	if (h->ae_algo->ops->get_mac_stats) {
		struct hns3_mac_stats mac_stats;
1982

1983
		h->ae_algo->ops->get_mac_stats(h, &mac_stats);
1984
		netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
1985
			    mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
1986
	}
1987 1988 1989 1990 1991

	hw_head = readl_relaxed(tx_ring->tqp->io_base +
				HNS3_RING_TX_RING_HEAD_REG);
	hw_tail = readl_relaxed(tx_ring->tqp->io_base +
				HNS3_RING_TX_RING_TAIL_REG);
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
	fbd_num = readl_relaxed(tx_ring->tqp->io_base +
				HNS3_RING_TX_RING_FBDNUM_REG);
	fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
				HNS3_RING_TX_RING_OFFSET_REG);
	ebd_num = readl_relaxed(tx_ring->tqp->io_base +
				HNS3_RING_TX_RING_EBDNUM_REG);
	ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
				HNS3_RING_TX_RING_EBD_OFFSET_REG);
	bd_num = readl_relaxed(tx_ring->tqp->io_base +
			       HNS3_RING_TX_RING_BD_NUM_REG);
	bd_err = readl_relaxed(tx_ring->tqp->io_base +
			       HNS3_RING_TX_RING_BD_ERR_REG);
	ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
	tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);

2007
	netdev_info(ndev,
2008 2009
		    "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
		    bd_num, hw_head, hw_tail, bd_err,
2010
		    readl(tx_ring->tqp_vector->mask_addr));
2011 2012 2013
	netdev_info(ndev,
		    "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
		    ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
2014 2015 2016 2017

	return true;
}

2018
static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
2019 2020 2021 2022 2023 2024 2025
{
	struct hns3_nic_priv *priv = netdev_priv(ndev);
	struct hnae3_handle *h = priv->ae_handle;

	if (!hns3_get_tx_timeo_queue_info(ndev))
		return;

2026 2027 2028
	/* request the reset, and let the hclge to determine
	 * which reset level should be done
	 */
2029
	if (h->ae_algo->ops->reset_event)
2030
		h->ae_algo->ops->reset_event(h->pdev, h);
2031 2032
}

J
Jian Shen 已提交
2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058
#ifdef CONFIG_RFS_ACCEL
static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
			      u16 rxq_index, u32 flow_id)
{
	struct hnae3_handle *h = hns3_get_handle(dev);
	struct flow_keys fkeys;

	if (!h->ae_algo->ops->add_arfs_entry)
		return -EOPNOTSUPP;

	if (skb->encapsulation)
		return -EPROTONOSUPPORT;

	if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
		return -EPROTONOSUPPORT;

	if ((fkeys.basic.n_proto != htons(ETH_P_IP) &&
	     fkeys.basic.n_proto != htons(ETH_P_IPV6)) ||
	    (fkeys.basic.ip_proto != IPPROTO_TCP &&
	     fkeys.basic.ip_proto != IPPROTO_UDP))
		return -EPROTONOSUPPORT;

	return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys);
}
#endif

2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080
static int hns3_nic_get_vf_config(struct net_device *ndev, int vf,
				  struct ifla_vf_info *ivf)
{
	struct hnae3_handle *h = hns3_get_handle(ndev);

	if (!h->ae_algo->ops->get_vf_config)
		return -EOPNOTSUPP;

	return h->ae_algo->ops->get_vf_config(h, vf, ivf);
}

static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf,
				      int link_state)
{
	struct hnae3_handle *h = hns3_get_handle(ndev);

	if (!h->ae_algo->ops->set_vf_link_state)
		return -EOPNOTSUPP;

	return h->ae_algo->ops->set_vf_link_state(h, vf, link_state);
}

2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
				int min_tx_rate, int max_tx_rate)
{
	struct hnae3_handle *h = hns3_get_handle(ndev);

	if (!h->ae_algo->ops->set_vf_rate)
		return -EOPNOTSUPP;

	return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate,
					    false);
}

2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109
static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
{
	struct hnae3_handle *h = hns3_get_handle(netdev);

	if (!h->ae_algo->ops->set_vf_mac)
		return -EOPNOTSUPP;

	if (is_multicast_ether_addr(mac)) {
		netdev_err(netdev,
			   "Invalid MAC:%pM specified. Could not set MAC\n",
			   mac);
		return -EINVAL;
	}

	return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
}

2110 2111 2112 2113
static const struct net_device_ops hns3_nic_netdev_ops = {
	.ndo_open		= hns3_nic_net_open,
	.ndo_stop		= hns3_nic_net_stop,
	.ndo_start_xmit		= hns3_nic_net_xmit,
2114
	.ndo_tx_timeout		= hns3_nic_net_timeout,
2115
	.ndo_set_mac_address	= hns3_nic_net_set_mac_address,
2116
	.ndo_do_ioctl		= hns3_nic_do_ioctl,
2117
	.ndo_change_mtu		= hns3_nic_change_mtu,
2118
	.ndo_set_features	= hns3_nic_set_features,
2119
	.ndo_features_check	= hns3_features_check,
2120 2121 2122 2123 2124 2125
	.ndo_get_stats64	= hns3_nic_get_stats64,
	.ndo_setup_tc		= hns3_nic_setup_tc,
	.ndo_set_rx_mode	= hns3_nic_set_rx_mode,
	.ndo_vlan_rx_add_vid	= hns3_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid	= hns3_vlan_rx_kill_vid,
	.ndo_set_vf_vlan	= hns3_ndo_set_vf_vlan,
2126
	.ndo_set_vf_spoofchk	= hns3_set_vf_spoofchk,
2127
	.ndo_set_vf_trust	= hns3_set_vf_trust,
J
Jian Shen 已提交
2128 2129 2130
#ifdef CONFIG_RFS_ACCEL
	.ndo_rx_flow_steer	= hns3_rx_flow_steer,
#endif
2131 2132
	.ndo_get_vf_config	= hns3_nic_get_vf_config,
	.ndo_set_vf_link_state	= hns3_nic_set_vf_link_state,
2133
	.ndo_set_vf_rate	= hns3_nic_set_vf_rate,
2134
	.ndo_set_vf_mac		= hns3_nic_set_vf_mac,
2135 2136
};

2137
bool hns3_is_phys_func(struct pci_dev *pdev)
2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148
{
	u32 dev_id = pdev->device;

	switch (dev_id) {
	case HNAE3_DEV_ID_GE:
	case HNAE3_DEV_ID_25GE:
	case HNAE3_DEV_ID_25GE_RDMA:
	case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
	case HNAE3_DEV_ID_50GE_RDMA:
	case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
	case HNAE3_DEV_ID_100G_RDMA_MACSEC:
2149
	case HNAE3_DEV_ID_200G_RDMA:
2150
		return true;
2151 2152
	case HNAE3_DEV_ID_VF:
	case HNAE3_DEV_ID_RDMA_DCB_PFC_VF:
2153 2154
		return false;
	default:
2155
		dev_warn(&pdev->dev, "un-recognized pci device-id %u",
2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176
			 dev_id);
	}

	return false;
}

static void hns3_disable_sriov(struct pci_dev *pdev)
{
	/* If our VFs are assigned we cannot shut down SR-IOV
	 * without causing issues, so just leave the hardware
	 * available but disabled
	 */
	if (pci_vfs_assigned(pdev)) {
		dev_warn(&pdev->dev,
			 "disabling driver while VFs are assigned\n");
		return;
	}

	pci_disable_sriov(pdev);
}

2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191
/* hns3_probe - Device initialization routine
 * @pdev: PCI device information struct
 * @ent: entry in hns3_pci_tbl
 *
 * hns3_probe initializes a PF identified by a pci_dev structure.
 * The OS initialization, configuring of the PF private structure,
 * and a hardware reset occur.
 *
 * Returns 0 on success, negative on failure
 */
static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct hnae3_ae_dev *ae_dev;
	int ret;

2192
	ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
2193 2194
	if (!ae_dev)
		return -ENOMEM;
2195 2196

	ae_dev->pdev = pdev;
2197
	ae_dev->flag = ent->driver_data;
2198 2199
	pci_set_drvdata(pdev, ae_dev);

2200
	ret = hnae3_register_ae_dev(ae_dev);
2201
	if (ret)
2202
		pci_set_drvdata(pdev, NULL);
2203

2204
	return ret;
2205 2206 2207 2208 2209 2210 2211 2212 2213
}

/* hns3_remove - Device removal routine
 * @pdev: PCI device information struct
 */
static void hns3_remove(struct pci_dev *pdev)
{
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);

2214 2215 2216
	if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
		hns3_disable_sriov(pdev);

2217
	hnae3_unregister_ae_dev(ae_dev);
2218
	pci_set_drvdata(pdev, NULL);
2219 2220
}

2221 2222 2223 2224 2225 2226 2227 2228
/**
 * hns3_pci_sriov_configure
 * @pdev: pointer to a pci_dev structure
 * @num_vfs: number of VFs to allocate
 *
 * Enable or change the number of VFs. Called when the user updates the number
 * of VFs in sysfs.
 **/
2229
static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241
{
	int ret;

	if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
		dev_warn(&pdev->dev, "Can not config SRIOV\n");
		return -EINVAL;
	}

	if (num_vfs) {
		ret = pci_enable_sriov(pdev, num_vfs);
		if (ret)
			dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
2242 2243
		else
			return num_vfs;
2244 2245 2246 2247 2248 2249 2250 2251 2252 2253
	} else if (!pci_vfs_assigned(pdev)) {
		pci_disable_sriov(pdev);
	} else {
		dev_warn(&pdev->dev,
			 "Unable to free VFs because some are assigned to VMs.\n");
	}

	return 0;
}

2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264
static void hns3_shutdown(struct pci_dev *pdev)
{
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);

	hnae3_unregister_ae_dev(ae_dev);
	pci_set_drvdata(pdev, NULL);

	if (system_state == SYSTEM_POWER_OFF)
		pci_set_power_state(pdev, PCI_D3hot);
}

2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275
static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
					    pci_channel_state_t state)
{
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
	pci_ers_result_t ret;

	dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);

	if (state == pci_channel_io_perm_failure)
		return PCI_ERS_RESULT_DISCONNECT;

2276
	if (!ae_dev || !ae_dev->ops) {
2277
		dev_err(&pdev->dev,
2278
			"Can't recover - error happened before device initialized\n");
2279 2280 2281
		return PCI_ERS_RESULT_NONE;
	}

2282 2283
	if (ae_dev->ops->handle_hw_ras_error)
		ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
2284 2285 2286 2287 2288 2289
	else
		return PCI_ERS_RESULT_NONE;

	return ret;
}

2290 2291 2292
static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
{
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2293
	const struct hnae3_ae_ops *ops;
2294
	enum hnae3_reset_type reset_type;
2295 2296
	struct device *dev = &pdev->dev;

2297 2298 2299
	if (!ae_dev || !ae_dev->ops)
		return PCI_ERS_RESULT_NONE;

2300
	ops = ae_dev->ops;
2301
	/* request the reset */
2302 2303
	if (ops->reset_event && ops->get_reset_level &&
	    ops->set_default_reset_request) {
2304
		if (ae_dev->hw_err_reset_req) {
2305 2306 2307 2308 2309 2310
			reset_type = ops->get_reset_level(ae_dev,
						&ae_dev->hw_err_reset_req);
			ops->set_default_reset_request(ae_dev, reset_type);
			dev_info(dev, "requesting reset due to PCI error\n");
			ops->reset_event(pdev, NULL);
		}
2311

2312 2313 2314 2315 2316 2317
		return PCI_ERS_RESULT_RECOVERED;
	}

	return PCI_ERS_RESULT_DISCONNECT;
}

2318 2319 2320 2321
static void hns3_reset_prepare(struct pci_dev *pdev)
{
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);

2322
	dev_info(&pdev->dev, "FLR prepare\n");
2323 2324 2325 2326 2327 2328 2329 2330
	if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
		ae_dev->ops->flr_prepare(ae_dev);
}

static void hns3_reset_done(struct pci_dev *pdev)
{
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);

2331
	dev_info(&pdev->dev, "FLR done\n");
2332 2333 2334 2335
	if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
		ae_dev->ops->flr_done(ae_dev);
}

2336 2337
static const struct pci_error_handlers hns3_err_handler = {
	.error_detected = hns3_error_detected,
2338
	.slot_reset     = hns3_slot_reset,
2339 2340
	.reset_prepare	= hns3_reset_prepare,
	.reset_done	= hns3_reset_done,
2341 2342
};

2343 2344 2345 2346 2347
static struct pci_driver hns3_driver = {
	.name     = hns3_driver_name,
	.id_table = hns3_pci_tbl,
	.probe    = hns3_probe,
	.remove   = hns3_remove,
2348
	.shutdown = hns3_shutdown,
2349
	.sriov_configure = hns3_pci_sriov_configure,
2350
	.err_handler    = &hns3_err_handler,
2351 2352 2353 2354 2355
};

/* set default feature to hns3 */
static void hns3_set_default_feature(struct net_device *netdev)
{
2356 2357
	struct hnae3_handle *h = hns3_get_handle(netdev);
	struct pci_dev *pdev = h->pdev;
2358
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2359

2360 2361
	netdev->priv_flags |= IFF_UNICAST_FLT;

2362
	netdev->hw_enc_features |= NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2363 2364
		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2365
		NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST;
2366 2367 2368

	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;

2369
	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
2370
		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2371 2372 2373
		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2374
		NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
2375

2376
	netdev->vlan_features |= NETIF_F_RXCSUM |
2377 2378 2379
		NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2380
		NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
2381

2382 2383
	netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
		NETIF_F_HW_VLAN_CTAG_RX |
2384 2385 2386
		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2387
		NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
2388

2389
	if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2390
		netdev->hw_features |= NETIF_F_GRO_HW;
2391
		netdev->features |= NETIF_F_GRO_HW;
2392 2393 2394 2395 2396 2397

		if (!(h->flags & HNAE3_SUPPORT_VF)) {
			netdev->hw_features |= NETIF_F_NTUPLE;
			netdev->features |= NETIF_F_NTUPLE;
		}
	}
2398 2399 2400 2401 2402 2403 2404

	if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) {
		netdev->hw_features |= NETIF_F_GSO_UDP_L4;
		netdev->features |= NETIF_F_GSO_UDP_L4;
		netdev->vlan_features |= NETIF_F_GSO_UDP_L4;
		netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
	}
2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416

	if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) {
		netdev->hw_features |= NETIF_F_HW_CSUM;
		netdev->features |= NETIF_F_HW_CSUM;
		netdev->vlan_features |= NETIF_F_HW_CSUM;
		netdev->hw_enc_features |= NETIF_F_HW_CSUM;
	} else {
		netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
		netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
		netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
		netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
	}
2417 2418 2419 2420 2421 2422 2423

	if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) {
		netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
		netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
		netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
	}
2424 2425 2426 2427 2428
}

static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
			     struct hns3_desc_cb *cb)
{
2429
	unsigned int order = hns3_page_order(ring);
2430 2431 2432 2433 2434 2435 2436 2437 2438 2439
	struct page *p;

	p = dev_alloc_pages(order);
	if (!p)
		return -ENOMEM;

	cb->priv = p;
	cb->page_offset = 0;
	cb->reuse_flag = 0;
	cb->buf  = page_address(p);
2440
	cb->length = hns3_page_size(ring);
2441
	cb->type = DESC_TYPE_PAGE;
2442 2443
	page_ref_add(p, USHRT_MAX - 1);
	cb->pagecnt_bias = USHRT_MAX;
2444 2445 2446 2447 2448

	return 0;
}

static void hns3_free_buffer(struct hns3_enet_ring *ring,
2449
			     struct hns3_desc_cb *cb, int budget)
2450 2451
{
	if (cb->type == DESC_TYPE_SKB)
2452
		napi_consume_skb(cb->priv, budget);
2453 2454
	else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias)
		__page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
2455 2456 2457 2458 2459 2460 2461 2462
	memset(cb, 0, sizeof(*cb));
}

static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
{
	cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
			       cb->length, ring_to_dma_dir(ring));

2463
	if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
2464 2465 2466 2467 2468 2469 2470 2471
		return -EIO;

	return 0;
}

static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
			      struct hns3_desc_cb *cb)
{
2472
	if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB)
2473 2474
		dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
				 ring_to_dma_dir(ring));
2475
	else if (cb->length)
2476 2477 2478 2479 2480 2481 2482 2483 2484 2485
		dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
			       ring_to_dma_dir(ring));
}

static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
{
	hns3_unmap_buffer(ring, &ring->desc_cb[i]);
	ring->desc[i].addr = 0;
}

2486 2487
static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
				    int budget)
2488 2489 2490 2491 2492 2493 2494
{
	struct hns3_desc_cb *cb = &ring->desc_cb[i];

	if (!ring->desc_cb[i].dma)
		return;

	hns3_buffer_detach(ring, i);
2495
	hns3_free_buffer(ring, cb, budget);
2496 2497 2498 2499 2500 2501 2502
}

static void hns3_free_buffers(struct hns3_enet_ring *ring)
{
	int i;

	for (i = 0; i < ring->desc_num; i++)
2503
		hns3_free_buffer_detach(ring, i, 0);
2504 2505 2506 2507 2508
}

/* free desc along with its attached buffer */
static void hns3_free_desc(struct hns3_enet_ring *ring)
{
2509 2510
	int size = ring->desc_num * sizeof(ring->desc[0]);

2511 2512
	hns3_free_buffers(ring);

2513 2514 2515 2516 2517
	if (ring->desc) {
		dma_free_coherent(ring_to_dev(ring), size,
				  ring->desc, ring->desc_dma_addr);
		ring->desc = NULL;
	}
2518 2519 2520 2521 2522 2523
}

static int hns3_alloc_desc(struct hns3_enet_ring *ring)
{
	int size = ring->desc_num * sizeof(ring->desc[0]);

2524 2525
	ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
					&ring->desc_dma_addr, GFP_KERNEL);
2526 2527 2528 2529 2530 2531
	if (!ring->desc)
		return -ENOMEM;

	return 0;
}

2532
static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring,
2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547
				   struct hns3_desc_cb *cb)
{
	int ret;

	ret = hns3_alloc_buffer(ring, cb);
	if (ret)
		goto out;

	ret = hns3_map_buffer(ring, cb);
	if (ret)
		goto out_with_buf;

	return 0;

out_with_buf:
2548
	hns3_free_buffer(ring, cb, 0);
2549 2550 2551 2552
out:
	return ret;
}

2553
static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
2554
{
2555
	int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]);
2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570

	if (ret)
		return ret;

	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);

	return 0;
}

/* Allocate memory for raw pkg, and map with dma */
static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
{
	int i, j, ret;

	for (i = 0; i < ring->desc_num; i++) {
2571
		ret = hns3_alloc_and_attach_buffer(ring, i);
2572 2573 2574 2575 2576 2577 2578 2579
		if (ret)
			goto out_buffer_fail;
	}

	return 0;

out_buffer_fail:
	for (j = i - 1; j >= 0; j--)
2580
		hns3_free_buffer_detach(ring, j, 0);
2581 2582 2583
	return ret;
}

2584
/* detach a in-used buffer and replace with a reserved one */
2585 2586 2587
static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
				struct hns3_desc_cb *res_cb)
{
2588
	hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2589 2590
	ring->desc_cb[i] = *res_cb;
	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2591
	ring->desc[i].rx.bd_base_info = 0;
2592 2593 2594 2595 2596
}

static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{
	ring->desc_cb[i].reuse_flag = 0;
2597 2598
	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
					 ring->desc_cb[i].page_offset);
2599
	ring->desc[i].rx.bd_base_info = 0;
2600 2601 2602 2603 2604

	dma_sync_single_for_device(ring_to_dev(ring),
			ring->desc_cb[i].dma + ring->desc_cb[i].page_offset,
			hns3_buf_size(ring),
			DMA_FROM_DEVICE);
2605 2606
}

2607
static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
2608
				  int *bytes, int *pkts, int budget)
2609
{
2610 2611 2612 2613 2614
	/* pair with ring->last_to_use update in hns3_tx_doorbell(),
	 * smp_store_release() is not used in hns3_tx_doorbell() because
	 * the doorbell operation already have the needed barrier operation.
	 */
	int ltu = smp_load_acquire(&ring->last_to_use);
2615 2616
	int ntc = ring->next_to_clean;
	struct hns3_desc_cb *desc_cb;
2617 2618 2619 2620 2621 2622 2623 2624 2625
	bool reclaimed = false;
	struct hns3_desc *desc;

	while (ltu != ntc) {
		desc = &ring->desc[ntc];

		if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) &
				BIT(HNS3_TXD_VLD_B))
			break;
2626

2627 2628 2629 2630
		desc_cb = &ring->desc_cb[ntc];
		(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
		(*bytes) += desc_cb->length;
		/* desc_cb will be cleaned, after hnae3_free_buffer_detach */
2631
		hns3_free_buffer_detach(ring, ntc, budget);
2632

2633 2634 2635 2636 2637
		if (++ntc == ring->desc_num)
			ntc = 0;

		/* Issue prefetch for next Tx descriptor */
		prefetch(&ring->desc_cb[ntc]);
2638
		reclaimed = true;
2639
	}
2640

2641 2642 2643
	if (unlikely(!reclaimed))
		return false;

2644 2645 2646 2647
	/* This smp_store_release() pairs with smp_load_acquire() in
	 * ring_space called by hns3_nic_net_xmit.
	 */
	smp_store_release(&ring->next_to_clean, ntc);
2648
	return true;
2649 2650
}

2651
void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
2652
{
2653
	struct net_device *netdev = ring_to_netdev(ring);
2654
	struct hns3_nic_priv *priv = netdev_priv(netdev);
2655 2656 2657 2658 2659
	struct netdev_queue *dev_queue;
	int bytes, pkts;

	bytes = 0;
	pkts = 0;
2660

2661
	if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget)))
2662
		return;
2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674

	ring->tqp_vector->tx_group.total_bytes += bytes;
	ring->tqp_vector->tx_group.total_packets += pkts;

	u64_stats_update_begin(&ring->syncp);
	ring->stats.tx_bytes += bytes;
	ring->stats.tx_pkts += pkts;
	u64_stats_update_end(&ring->syncp);

	dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
	netdev_tx_completed_queue(dev_queue, pkts, bytes);

2675
	if (unlikely(netif_carrier_ok(netdev) &&
2676
		     ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) {
2677 2678 2679 2680
		/* Make sure that anybody stopping the queue after this
		 * sees the new next_to_clean.
		 */
		smp_mb();
2681 2682
		if (netif_tx_queue_stopped(dev_queue) &&
		    !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696
			netif_tx_wake_queue(dev_queue);
			ring->stats.restart_queue++;
		}
	}
}

static int hns3_desc_unused(struct hns3_enet_ring *ring)
{
	int ntc = ring->next_to_clean;
	int ntu = ring->next_to_use;

	return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
}

2697 2698
static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
				      int cleand_count)
2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712
{
	struct hns3_desc_cb *desc_cb;
	struct hns3_desc_cb res_cbs;
	int i, ret;

	for (i = 0; i < cleand_count; i++) {
		desc_cb = &ring->desc_cb[ring->next_to_use];
		if (desc_cb->reuse_flag) {
			u64_stats_update_begin(&ring->syncp);
			ring->stats.reuse_pg_cnt++;
			u64_stats_update_end(&ring->syncp);

			hns3_reuse_buffer(ring, ring->next_to_use);
		} else {
2713
			ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
2714 2715 2716 2717 2718
			if (ret) {
				u64_stats_update_begin(&ring->syncp);
				ring->stats.sw_err_cnt++;
				u64_stats_update_end(&ring->syncp);

2719
				hns3_rl_err(ring_to_netdev(ring),
2720 2721
					    "alloc rx buffer failed: %d\n",
					    ret);
2722 2723 2724
				break;
			}
			hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2725 2726 2727 2728

			u64_stats_update_begin(&ring->syncp);
			ring->stats.non_reuse_pg++;
			u64_stats_update_end(&ring->syncp);
2729 2730 2731 2732 2733
		}

		ring_ptr_move_fw(ring, next_to_use);
	}

2734
	writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2735 2736
}

2737 2738 2739 2740 2741 2742
static bool hns3_page_is_reusable(struct page *page)
{
	return page_to_nid(page) == numa_mem_id() &&
		!page_is_pfmemalloc(page);
}

2743 2744 2745 2746 2747
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
{
	return (page_count(cb->priv) - cb->pagecnt_bias) == 1;
}

2748 2749 2750 2751
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
				struct hns3_enet_ring *ring, int pull_len,
				struct hns3_desc_cb *desc_cb)
{
2752 2753
	struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
	int size = le16_to_cpu(desc->rx.size);
2754
	u32 truesize = hns3_buf_size(ring);
2755

2756
	desc_cb->pagecnt_bias--;
2757
	skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2758
			size - pull_len, truesize);
2759

2760 2761 2762
	/* Avoid re-using remote pages, or the stack is still using the page
	 * when page_offset rollback to zero, flag default unreuse
	 */
2763
	if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
2764 2765
	    (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) {
		__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
2766
		return;
2767
	}
2768 2769 2770 2771

	/* Move offset up to the next cache line */
	desc_cb->page_offset += truesize;

2772
	if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
2773
		desc_cb->reuse_flag = 1;
2774
	} else if (hns3_can_reuse_page(desc_cb)) {
2775 2776
		desc_cb->reuse_flag = 1;
		desc_cb->page_offset = 0;
2777 2778 2779 2780 2781 2782 2783 2784
	} else if (desc_cb->pagecnt_bias) {
		__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
		return;
	}

	if (unlikely(!desc_cb->pagecnt_bias)) {
		page_ref_add(desc_cb->priv, USHRT_MAX);
		desc_cb->pagecnt_bias = USHRT_MAX;
2785 2786 2787
	}
}

2788
static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
2789 2790 2791 2792 2793
{
	__be16 type = skb->protocol;
	struct tcphdr *th;
	int depth = 0;

2794
	while (eth_type_vlan(type)) {
2795 2796 2797 2798 2799 2800 2801 2802 2803 2804
		struct vlan_hdr *vh;

		if ((depth + VLAN_HLEN) > skb_headlen(skb))
			return -EFAULT;

		vh = (struct vlan_hdr *)(skb->data + depth);
		type = vh->h_vlan_encapsulated_proto;
		depth += VLAN_HLEN;
	}

2805 2806
	skb_set_network_header(skb, depth);

2807
	if (type == htons(ETH_P_IP)) {
2808 2809
		const struct iphdr *iph = ip_hdr(skb);

2810
		depth += sizeof(struct iphdr);
2811 2812 2813 2814
		skb_set_transport_header(skb, depth);
		th = tcp_hdr(skb);
		th->check = ~tcp_v4_check(skb->len - depth, iph->saddr,
					  iph->daddr, 0);
2815
	} else if (type == htons(ETH_P_IPV6)) {
2816 2817
		const struct ipv6hdr *iph = ipv6_hdr(skb);

2818
		depth += sizeof(struct ipv6hdr);
2819 2820 2821 2822
		skb_set_transport_header(skb, depth);
		th = tcp_hdr(skb);
		th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
					  &iph->daddr, 0);
2823
	} else {
2824 2825 2826
		hns3_rl_err(skb->dev,
			    "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
			    be16_to_cpu(type), depth);
2827 2828 2829 2830 2831 2832 2833
		return -EFAULT;
	}

	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
	if (th->cwr)
		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;

2834 2835
	if (l234info & BIT(HNS3_RXD_GRO_FIXID_B))
		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
2836

2837 2838 2839
	skb->csum_start = (unsigned char *)th - skb->head;
	skb->csum_offset = offsetof(struct tcphdr, check);
	skb->ip_summed = CHECKSUM_PARTIAL;
2840 2841 2842

	trace_hns3_gro(skb);

2843 2844 2845
	return 0;
}

2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861
static void hns3_checksum_complete(struct hns3_enet_ring *ring,
				   struct sk_buff *skb, u32 l234info)
{
	u32 lo, hi;

	u64_stats_update_begin(&ring->syncp);
	ring->stats.csum_complete++;
	u64_stats_update_end(&ring->syncp);
	skb->ip_summed = CHECKSUM_COMPLETE;
	lo = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_L_M,
			     HNS3_RXD_L2_CSUM_L_S);
	hi = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_H_M,
			     HNS3_RXD_L2_CSUM_H_S);
	skb->csum = csum_unfold((__force __sum16)(lo | hi << 8));
}

2862
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2863
			     u32 l234info, u32 bd_base_info, u32 ol_info)
2864
{
2865
	struct net_device *netdev = ring_to_netdev(ring);
2866 2867 2868 2869 2870 2871 2872 2873 2874 2875
	int l3_type, l4_type;
	int ol4_type;

	skb->ip_summed = CHECKSUM_NONE;

	skb_checksum_none_assert(skb);

	if (!(netdev->features & NETIF_F_RXCSUM))
		return;

2876 2877 2878 2879 2880
	if (l234info & BIT(HNS3_RXD_L2_CSUM_B)) {
		hns3_checksum_complete(ring, skb, l234info);
		return;
	}

2881
	/* check if hardware has done checksum */
2882
	if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
2883 2884
		return;

2885 2886
	if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
				 BIT(HNS3_RXD_OL3E_B) |
2887
				 BIT(HNS3_RXD_OL4E_B)))) {
2888 2889 2890 2891 2892 2893 2894
		u64_stats_update_begin(&ring->syncp);
		ring->stats.l3l4_csum_err++;
		u64_stats_update_end(&ring->syncp);

		return;
	}

2895
	ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
P
Peng Li 已提交
2896
				   HNS3_RXD_OL4ID_S);
2897 2898 2899 2900
	switch (ol4_type) {
	case HNS3_OL4_TYPE_MAC_IN_UDP:
	case HNS3_OL4_TYPE_NVGRE:
		skb->csum_level = 1;
2901
		fallthrough;
2902
	case HNS3_OL4_TYPE_NO_TUN:
2903 2904 2905 2906 2907
		l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
					  HNS3_RXD_L3ID_S);
		l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
					  HNS3_RXD_L4ID_S);

2908
		/* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2909 2910 2911 2912 2913
		if ((l3_type == HNS3_L3_TYPE_IPV4 ||
		     l3_type == HNS3_L3_TYPE_IPV6) &&
		    (l4_type == HNS3_L4_TYPE_UDP ||
		     l4_type == HNS3_L4_TYPE_TCP ||
		     l4_type == HNS3_L4_TYPE_SCTP))
2914 2915
			skb->ip_summed = CHECKSUM_UNNECESSARY;
		break;
2916 2917
	default:
		break;
2918 2919 2920
	}
}

2921 2922
static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
2923 2924 2925
	if (skb_has_frag_list(skb))
		napi_gro_flush(&ring->tqp_vector->napi, false);

2926 2927 2928
	napi_gro_receive(&ring->tqp_vector->napi, skb);
}

2929 2930 2931
static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
				struct hns3_desc *desc, u32 l234info,
				u16 *vlan_tag)
2932
{
2933
	struct hnae3_handle *handle = ring->tqp->handle;
2934
	struct pci_dev *pdev = ring->tqp->handle->pdev;
2935
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2936

2937
	if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) {
2938 2939 2940
		*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
		if (!(*vlan_tag & VLAN_VID_MASK))
			*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2941

2942
		return (*vlan_tag != 0);
2943 2944 2945 2946
	}

#define HNS3_STRP_OUTER_VLAN	0x1
#define HNS3_STRP_INNER_VLAN	0x2
2947
#define HNS3_STRP_BOTH		0x3
2948

2949 2950 2951 2952
	/* Hardware always insert VLAN tag into RX descriptor when
	 * remove the tag from packet, driver needs to determine
	 * reporting which tag to stack.
	 */
P
Peng Li 已提交
2953 2954
	switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
				HNS3_RXD_STRP_TAGP_S)) {
2955
	case HNS3_STRP_OUTER_VLAN:
2956 2957 2958 2959
		if (handle->port_base_vlan_state !=
				HNAE3_PORT_BASE_VLAN_DISABLE)
			return false;

2960 2961
		*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
		return true;
2962
	case HNS3_STRP_INNER_VLAN:
2963 2964 2965 2966
		if (handle->port_base_vlan_state !=
				HNAE3_PORT_BASE_VLAN_DISABLE)
			return false;

2967
		*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2968 2969 2970 2971 2972 2973 2974 2975
		return true;
	case HNS3_STRP_BOTH:
		if (handle->port_base_vlan_state ==
				HNAE3_PORT_BASE_VLAN_DISABLE)
			*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
		else
			*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);

2976
		return true;
2977
	default:
2978
		return false;
2979 2980 2981
	}
}

2982 2983 2984 2985 2986 2987 2988 2989 2990 2991
static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
{
	ring->desc[ring->next_to_clean].rx.bd_base_info &=
		cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
	ring->next_to_clean += 1;

	if (unlikely(ring->next_to_clean == ring->desc_num))
		ring->next_to_clean = 0;
}

2992
static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
2993 2994 2995
			  unsigned char *va)
{
	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
2996
	struct net_device *netdev = ring_to_netdev(ring);
2997 2998 2999 3000 3001
	struct sk_buff *skb;

	ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
	skb = ring->skb;
	if (unlikely(!skb)) {
3002
		hns3_rl_err(netdev, "alloc rx skb fail\n");
3003 3004 3005 3006 3007 3008 3009 3010

		u64_stats_update_begin(&ring->syncp);
		ring->stats.sw_err_cnt++;
		u64_stats_update_end(&ring->syncp);

		return -ENOMEM;
	}

3011
	trace_hns3_rx_desc(ring);
3012 3013 3014
	prefetchw(skb->data);

	ring->pending_buf = 1;
3015 3016
	ring->frag_num = 0;
	ring->tail_skb = NULL;
3017 3018 3019 3020
	if (length <= HNS3_RX_HEAD_SIZE) {
		memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));

		/* We can reuse buffer as-is, just make sure it is local */
3021
		if (likely(hns3_page_is_reusable(desc_cb->priv)))
3022 3023
			desc_cb->reuse_flag = 1;
		else /* This page cannot be reused so discard it */
3024 3025
			__page_frag_cache_drain(desc_cb->priv,
						desc_cb->pagecnt_bias);
3026

3027
		hns3_rx_ring_move_fw(ring);
3028 3029 3030 3031 3032 3033
		return 0;
	}
	u64_stats_update_begin(&ring->syncp);
	ring->stats.seg_pkt_cnt++;
	u64_stats_update_end(&ring->syncp);

3034
	ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
3035
	__skb_put(skb, ring->pull_len);
3036
	hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
3037
			    desc_cb);
3038
	hns3_rx_ring_move_fw(ring);
3039

3040
	return 0;
3041 3042
}

3043
static int hns3_add_frag(struct hns3_enet_ring *ring)
3044
{
3045 3046
	struct sk_buff *skb = ring->skb;
	struct sk_buff *head_skb = skb;
3047
	struct sk_buff *new_skb;
3048
	struct hns3_desc_cb *desc_cb;
3049
	struct hns3_desc *desc;
3050 3051
	u32 bd_base_info;

3052
	do {
3053 3054 3055
		desc = &ring->desc[ring->next_to_clean];
		desc_cb = &ring->desc_cb[ring->next_to_clean];
		bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
3056 3057
		/* make sure HW write desc complete */
		dma_rmb();
3058
		if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
3059 3060
			return -ENXIO;

3061
		if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
3062
			new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0);
3063
			if (unlikely(!new_skb)) {
3064
				hns3_rl_err(ring_to_netdev(ring),
3065
					    "alloc rx fraglist skb fail\n");
3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079
				return -ENXIO;
			}
			ring->frag_num = 0;

			if (ring->tail_skb) {
				ring->tail_skb->next = new_skb;
				ring->tail_skb = new_skb;
			} else {
				skb_shinfo(skb)->frag_list = new_skb;
				ring->tail_skb = new_skb;
			}
		}

		if (ring->tail_skb) {
3080
			head_skb->truesize += hns3_buf_size(ring);
3081 3082 3083 3084 3085
			head_skb->data_len += le16_to_cpu(desc->rx.size);
			head_skb->len += le16_to_cpu(desc->rx.size);
			skb = ring->tail_skb;
		}

3086 3087 3088 3089 3090
		dma_sync_single_for_cpu(ring_to_dev(ring),
				desc_cb->dma + desc_cb->page_offset,
				hns3_buf_size(ring),
				DMA_FROM_DEVICE);

3091
		hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
3092
		trace_hns3_rx_desc(ring);
3093
		hns3_rx_ring_move_fw(ring);
3094
		ring->pending_buf++;
3095
	} while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
3096 3097 3098 3099

	return 0;
}

3100 3101
static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
				     struct sk_buff *skb, u32 l234info,
3102
				     u32 bd_base_info, u32 ol_info)
3103 3104 3105
{
	u32 l3_type;

3106 3107 3108
	skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
						    HNS3_RXD_GRO_SIZE_M,
						    HNS3_RXD_GRO_SIZE_S);
3109
	/* if there is no HW GRO, do not set gro params */
3110
	if (!skb_shinfo(skb)->gso_size) {
3111
		hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
3112 3113
		return 0;
	}
3114

3115 3116 3117
	NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info,
						  HNS3_RXD_GRO_COUNT_M,
						  HNS3_RXD_GRO_COUNT_S);
3118

3119
	l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
3120 3121 3122 3123 3124
	if (l3_type == HNS3_L3_TYPE_IPV4)
		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
	else if (l3_type == HNS3_L3_TYPE_IPV6)
		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
	else
3125
		return -EFAULT;
3126

3127
	return  hns3_gro_complete(skb, l234info);
3128 3129
}

3130
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
3131
				     struct sk_buff *skb, u32 rss_hash)
3132 3133 3134 3135
{
	struct hnae3_handle *handle = ring->tqp->handle;
	enum pkt_hash_types rss_type;

3136
	if (rss_hash)
3137 3138 3139 3140
		rss_type = handle->kinfo.rss_type;
	else
		rss_type = PKT_HASH_TYPE_NONE;

3141
	skb_set_hash(skb, rss_hash, rss_type);
3142 3143
}

3144
static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
3145
{
3146
	struct net_device *netdev = ring_to_netdev(ring);
3147
	enum hns3_pkt_l2t_type l2_frame_type;
3148
	u32 bd_base_info, l234info, ol_info;
3149
	struct hns3_desc *desc;
3150
	unsigned int len;
3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161
	int pre_ntc, ret;

	/* bdinfo handled below is only valid on the last BD of the
	 * current packet, and ring->next_to_clean indicates the first
	 * descriptor of next packet, so need - 1 below.
	 */
	pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
					(ring->desc_num - 1);
	desc = &ring->desc[pre_ntc];
	bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
	l234info = le32_to_cpu(desc->rx.l234_info);
3162
	ol_info = le32_to_cpu(desc->rx.ol_info);
3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193

	/* Based on hw strategy, the tag offloaded will be stored at
	 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
	 * in one layer tag case.
	 */
	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
		u16 vlan_tag;

		if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
					       vlan_tag);
	}

	if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
				  BIT(HNS3_RXD_L2E_B))))) {
		u64_stats_update_begin(&ring->syncp);
		if (l234info & BIT(HNS3_RXD_L2E_B))
			ring->stats.l2_err++;
		else
			ring->stats.err_pkt_len++;
		u64_stats_update_end(&ring->syncp);

		return -EFAULT;
	}

	len = skb->len;

	/* Do update ip stack process */
	skb->protocol = eth_type_trans(skb, netdev);

	/* This is needed in order to enable forwarding support */
3194 3195
	ret = hns3_set_gro_and_checksum(ring, skb, l234info,
					bd_base_info, ol_info);
3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215
	if (unlikely(ret)) {
		u64_stats_update_begin(&ring->syncp);
		ring->stats.rx_err_cnt++;
		u64_stats_update_end(&ring->syncp);
		return ret;
	}

	l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
					HNS3_RXD_DMAC_S);

	u64_stats_update_begin(&ring->syncp);
	ring->stats.rx_pkts++;
	ring->stats.rx_bytes += len;

	if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
		ring->stats.rx_multicast++;

	u64_stats_update_end(&ring->syncp);

	ring->tqp_vector->rx_group.total_bytes += len;
3216 3217

	hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
3218 3219 3220
	return 0;
}

3221
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
3222
{
3223
	struct sk_buff *skb = ring->skb;
3224 3225
	struct hns3_desc_cb *desc_cb;
	struct hns3_desc *desc;
3226
	unsigned int length;
3227
	u32 bd_base_info;
3228
	int ret;
3229 3230 3231 3232 3233 3234

	desc = &ring->desc[ring->next_to_clean];
	desc_cb = &ring->desc_cb[ring->next_to_clean];

	prefetch(desc);

3235 3236
	if (!skb) {
		bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
3237

3238 3239 3240 3241 3242 3243
		/* Check valid BD */
		if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
			return -ENXIO;

		dma_rmb();
		length = le16_to_cpu(desc->rx.size);
3244

3245
		ring->va = desc_cb->buf + desc_cb->page_offset;
3246

3247 3248 3249 3250 3251
		dma_sync_single_for_cpu(ring_to_dev(ring),
				desc_cb->dma + desc_cb->page_offset,
				hns3_buf_size(ring),
				DMA_FROM_DEVICE);

3252 3253 3254 3255 3256 3257 3258 3259
		/* Prefetch first cache line of first page.
		 * Idea is to cache few bytes of the header of the packet.
		 * Our L1 Cache line size is 64B so need to prefetch twice to make
		 * it 128B. But in actual we can have greater size of caches with
		 * 128B Level 1 cache lines. In such a case, single fetch would
		 * suffice to cache in the relevant part of the header.
		 */
		net_prefetch(ring->va);
3260

3261
		ret = hns3_alloc_skb(ring, length, ring->va);
3262
		skb = ring->skb;
3263

3264 3265
		if (ret < 0) /* alloc buffer fail */
			return ret;
3266 3267
		if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */
			ret = hns3_add_frag(ring);
3268 3269 3270
			if (ret)
				return ret;
		}
3271
	} else {
3272
		ret = hns3_add_frag(ring);
3273 3274
		if (ret)
			return ret;
3275
	}
3276

3277 3278 3279 3280
	/* As the head data may be changed when GRO enable, copy
	 * the head data in after other data rx completed
	 */
	if (skb->len > HNS3_RX_HEAD_SIZE)
3281 3282
		memcpy(skb->data, ring->va,
		       ALIGN(ring->pull_len, sizeof(long)));
3283

3284
	ret = hns3_handle_bdinfo(ring, skb);
3285
	if (unlikely(ret)) {
3286
		dev_kfree_skb_any(skb);
3287
		return ret;
3288 3289
	}

J
Jian Shen 已提交
3290
	skb_record_rx_queue(skb, ring->tqp->tqp_index);
3291 3292 3293
	return 0;
}

3294 3295
int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
		       void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
3296 3297
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
3298
	int unused_count = hns3_desc_unused(ring);
3299
	int recv_pkts = 0;
3300
	int err;
3301

3302
	unused_count -= ring->pending_buf;
3303

3304
	while (recv_pkts < budget) {
3305
		/* Reuse or realloc buffers */
3306 3307
		if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
			hns3_nic_alloc_rx_buffers(ring, unused_count);
3308 3309
			unused_count = hns3_desc_unused(ring) -
					ring->pending_buf;
3310 3311 3312
		}

		/* Poll one pkt */
3313 3314 3315
		err = hns3_handle_rx_bd(ring);
		/* Do not get FE for the packet or failed to alloc skb */
		if (unlikely(!ring->skb || err == -ENXIO)) {
3316
			goto out;
3317 3318 3319
		} else if (likely(!err)) {
			rx_fn(ring, ring->skb);
			recv_pkts++;
3320 3321
		}

3322
		unused_count += ring->pending_buf;
3323 3324
		ring->skb = NULL;
		ring->pending_buf = 0;
3325 3326 3327 3328
	}

out:
	/* Make all data has been write before submit */
3329 3330
	if (unused_count > 0)
		hns3_nic_alloc_rx_buffers(ring, unused_count);
3331 3332 3333 3334

	return recv_pkts;
}

3335
static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group)
3336
{
3337 3338 3339 3340
#define HNS3_RX_LOW_BYTE_RATE 10000
#define HNS3_RX_MID_BYTE_RATE 20000
#define HNS3_RX_ULTRA_PACKET_RATE 40

3341
	enum hns3_flow_level_range new_flow_level;
3342 3343
	struct hns3_enet_tqp_vector *tqp_vector;
	int packets_per_msecs, bytes_per_msecs;
3344
	u32 time_passed_ms;
3345

3346
	tqp_vector = ring_group->ring->tqp_vector;
3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357
	time_passed_ms =
		jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
	if (!time_passed_ms)
		return false;

	do_div(ring_group->total_packets, time_passed_ms);
	packets_per_msecs = ring_group->total_packets;

	do_div(ring_group->total_bytes, time_passed_ms);
	bytes_per_msecs = ring_group->total_bytes;

3358
	new_flow_level = ring_group->coal.flow_level;
3359

3360 3361 3362 3363 3364 3365
	/* Simple throttlerate management
	 * 0-10MB/s   lower     (50000 ints/s)
	 * 10-20MB/s   middle    (20000 ints/s)
	 * 20-1249MB/s high      (18000 ints/s)
	 * > 40000pps  ultra     (8000 ints/s)
	 */
3366 3367
	switch (new_flow_level) {
	case HNS3_FLOW_LOW:
3368
		if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
3369 3370 3371
			new_flow_level = HNS3_FLOW_MID;
		break;
	case HNS3_FLOW_MID:
3372
		if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
3373
			new_flow_level = HNS3_FLOW_HIGH;
3374
		else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
3375 3376 3377 3378 3379
			new_flow_level = HNS3_FLOW_LOW;
		break;
	case HNS3_FLOW_HIGH:
	case HNS3_FLOW_ULTRA:
	default:
3380
		if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
3381 3382 3383 3384
			new_flow_level = HNS3_FLOW_MID;
		break;
	}

3385 3386
	if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
	    &tqp_vector->rx_group == ring_group)
3387 3388
		new_flow_level = HNS3_FLOW_ULTRA;

3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418
	ring_group->total_bytes = 0;
	ring_group->total_packets = 0;
	ring_group->coal.flow_level = new_flow_level;

	return true;
}

static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
{
	struct hns3_enet_tqp_vector *tqp_vector;
	u16 new_int_gl;

	if (!ring_group->ring)
		return false;

	tqp_vector = ring_group->ring->tqp_vector;
	if (!tqp_vector->last_jiffies)
		return false;

	if (ring_group->total_packets == 0) {
		ring_group->coal.int_gl = HNS3_INT_GL_50K;
		ring_group->coal.flow_level = HNS3_FLOW_LOW;
		return true;
	}

	if (!hns3_get_new_flow_lvl(ring_group))
		return false;

	new_int_gl = ring_group->coal.int_gl;
	switch (ring_group->coal.flow_level) {
3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434
	case HNS3_FLOW_LOW:
		new_int_gl = HNS3_INT_GL_50K;
		break;
	case HNS3_FLOW_MID:
		new_int_gl = HNS3_INT_GL_20K;
		break;
	case HNS3_FLOW_HIGH:
		new_int_gl = HNS3_INT_GL_18K;
		break;
	case HNS3_FLOW_ULTRA:
		new_int_gl = HNS3_INT_GL_8K;
		break;
	default:
		break;
	}

3435 3436
	if (new_int_gl != ring_group->coal.int_gl) {
		ring_group->coal.int_gl = new_int_gl;
3437 3438 3439 3440 3441 3442 3443
		return true;
	}
	return false;
}

static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
{
3444 3445 3446 3447
	struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
	struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
	bool rx_update, tx_update;

3448 3449 3450
	/* update param every 1000ms */
	if (time_before(jiffies,
			tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
F
Fuyun Liang 已提交
3451 3452
		return;

3453
	if (rx_group->coal.adapt_enable) {
3454 3455 3456
		rx_update = hns3_get_new_int_gl(rx_group);
		if (rx_update)
			hns3_set_vector_coalesce_rx_gl(tqp_vector,
3457
						       rx_group->coal.int_gl);
3458 3459
	}

3460
	if (tx_group->coal.adapt_enable) {
3461
		tx_update = hns3_get_new_int_gl(tx_group);
3462 3463
		if (tx_update)
			hns3_set_vector_coalesce_tx_gl(tqp_vector,
3464
						       tx_group->coal.int_gl);
3465
	}
F
Fuyun Liang 已提交
3466

3467
	tqp_vector->last_jiffies = jiffies;
3468 3469 3470 3471
}

static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
{
3472
	struct hns3_nic_priv *priv = netdev_priv(napi->dev);
3473 3474 3475 3476 3477 3478
	struct hns3_enet_ring *ring;
	int rx_pkt_total = 0;

	struct hns3_enet_tqp_vector *tqp_vector =
		container_of(napi, struct hns3_enet_tqp_vector, napi);
	bool clean_complete = true;
3479
	int rx_budget = budget;
3480

3481 3482 3483 3484 3485
	if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
		napi_complete(napi);
		return 0;
	}

3486 3487 3488
	/* Since the actual Tx work is minimal, we can give the Tx a larger
	 * budget and be more aggressive about cleaning up the Tx descriptors.
	 */
3489
	hns3_for_each_ring(ring, tqp_vector->tx_group)
3490
		hns3_clean_tx_ring(ring, budget);
3491 3492

	/* make sure rx ring budget not smaller than 1 */
3493 3494
	if (tqp_vector->num_tqps > 1)
		rx_budget = max(budget / tqp_vector->num_tqps, 1);
3495 3496

	hns3_for_each_ring(ring, tqp_vector->rx_group) {
3497 3498
		int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
						    hns3_rx_skb);
3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510

		if (rx_cleaned >= rx_budget)
			clean_complete = false;

		rx_pkt_total += rx_cleaned;
	}

	tqp_vector->rx_group.total_packets += rx_pkt_total;

	if (!clean_complete)
		return budget;

3511 3512
	if (napi_complete(napi) &&
	    likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
3513 3514 3515
		hns3_update_new_int_gl(tqp_vector);
		hns3_mask_vector_irq(tqp_vector, 1);
	}
3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531

	return rx_pkt_total;
}

static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
				      struct hnae3_ring_chain_node *head)
{
	struct pci_dev *pdev = tqp_vector->handle->pdev;
	struct hnae3_ring_chain_node *cur_chain = head;
	struct hnae3_ring_chain_node *chain;
	struct hns3_enet_ring *tx_ring;
	struct hns3_enet_ring *rx_ring;

	tx_ring = tqp_vector->tx_group.ring;
	if (tx_ring) {
		cur_chain->tqp_index = tx_ring->tqp->tqp_index;
P
Peng Li 已提交
3532 3533 3534 3535
		hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
			      HNAE3_RING_TYPE_TX);
		hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
				HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
3536 3537 3538 3539 3540 3541 3542 3543 3544

		cur_chain->next = NULL;

		while (tx_ring->next) {
			tx_ring = tx_ring->next;

			chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
					     GFP_KERNEL);
			if (!chain)
3545
				goto err_free_chain;
3546 3547 3548

			cur_chain->next = chain;
			chain->tqp_index = tx_ring->tqp->tqp_index;
P
Peng Li 已提交
3549 3550 3551 3552 3553 3554
			hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
				      HNAE3_RING_TYPE_TX);
			hnae3_set_field(chain->int_gl_idx,
					HNAE3_RING_GL_IDX_M,
					HNAE3_RING_GL_IDX_S,
					HNAE3_RING_GL_TX);
3555 3556 3557 3558 3559 3560 3561 3562 3563

			cur_chain = chain;
		}
	}

	rx_ring = tqp_vector->rx_group.ring;
	if (!tx_ring && rx_ring) {
		cur_chain->next = NULL;
		cur_chain->tqp_index = rx_ring->tqp->tqp_index;
P
Peng Li 已提交
3564 3565 3566 3567
		hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
			      HNAE3_RING_TYPE_RX);
		hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
				HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
3568 3569 3570 3571 3572 3573 3574

		rx_ring = rx_ring->next;
	}

	while (rx_ring) {
		chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
		if (!chain)
3575
			goto err_free_chain;
3576 3577 3578

		cur_chain->next = chain;
		chain->tqp_index = rx_ring->tqp->tqp_index;
P
Peng Li 已提交
3579 3580 3581 3582
		hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
			      HNAE3_RING_TYPE_RX);
		hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
				HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
3583

3584 3585 3586 3587 3588 3589
		cur_chain = chain;

		rx_ring = rx_ring->next;
	}

	return 0;
3590 3591 3592 3593 3594

err_free_chain:
	cur_chain = head->next;
	while (cur_chain) {
		chain = cur_chain->next;
3595
		devm_kfree(&pdev->dev, cur_chain);
3596 3597
		cur_chain = chain;
	}
3598
	head->next = NULL;
3599 3600

	return -ENOMEM;
3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626
}

static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
					struct hnae3_ring_chain_node *head)
{
	struct pci_dev *pdev = tqp_vector->handle->pdev;
	struct hnae3_ring_chain_node *chain_tmp, *chain;

	chain = head->next;

	while (chain) {
		chain_tmp = chain->next;
		devm_kfree(&pdev->dev, chain);
		chain = chain_tmp;
	}
}

static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
				   struct hns3_enet_ring *ring)
{
	ring->next = group->ring;
	group->ring = ring;

	group->count++;
}

P
Peng Li 已提交
3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643
static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
{
	struct pci_dev *pdev = priv->ae_handle->pdev;
	struct hns3_enet_tqp_vector *tqp_vector;
	int num_vectors = priv->vector_num;
	int numa_node;
	int vector_i;

	numa_node = dev_to_node(&pdev->dev);

	for (vector_i = 0; vector_i < num_vectors; vector_i++) {
		tqp_vector = &priv->tqp_vector[vector_i];
		cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
				&tqp_vector->affinity_mask);
	}
}

3644 3645 3646 3647 3648
static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
{
	struct hnae3_ring_chain_node vector_ring_chain;
	struct hnae3_handle *h = priv->ae_handle;
	struct hns3_enet_tqp_vector *tqp_vector;
3649
	int ret;
3650
	int i;
3651

P
Peng Li 已提交
3652 3653
	hns3_nic_set_cpumask(priv);

3654 3655
	for (i = 0; i < priv->vector_num; i++) {
		tqp_vector = &priv->tqp_vector[i];
3656
		hns3_vector_coalesce_init_hw(tqp_vector, priv);
3657 3658
		tqp_vector->num_tqps = 0;
	}
3659

3660 3661 3662
	for (i = 0; i < h->kinfo.num_tqps; i++) {
		u16 vector_i = i % priv->vector_num;
		u16 tqp_num = h->kinfo.num_tqps;
3663 3664 3665 3666

		tqp_vector = &priv->tqp_vector[vector_i];

		hns3_add_ring_to_group(&tqp_vector->tx_group,
3667
				       &priv->ring[i]);
3668 3669

		hns3_add_ring_to_group(&tqp_vector->rx_group,
3670
				       &priv->ring[i + tqp_num]);
3671

3672 3673
		priv->ring[i].tqp_vector = tqp_vector;
		priv->ring[i + tqp_num].tqp_vector = tqp_vector;
3674
		tqp_vector->num_tqps++;
3675 3676
	}

3677
	for (i = 0; i < priv->vector_num; i++) {
3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688
		tqp_vector = &priv->tqp_vector[i];

		tqp_vector->rx_group.total_bytes = 0;
		tqp_vector->rx_group.total_packets = 0;
		tqp_vector->tx_group.total_bytes = 0;
		tqp_vector->tx_group.total_packets = 0;
		tqp_vector->handle = h;

		ret = hns3_get_vector_ring_chain(tqp_vector,
						 &vector_ring_chain);
		if (ret)
3689
			goto map_ring_fail;
3690 3691 3692 3693 3694 3695

		ret = h->ae_algo->ops->map_ring_to_vector(h,
			tqp_vector->vector_irq, &vector_ring_chain);

		hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);

3696
		if (ret)
3697
			goto map_ring_fail;
3698

3699 3700 3701 3702
		netif_napi_add(priv->netdev, &tqp_vector->napi,
			       hns3_nic_common_poll, NAPI_POLL_WEIGHT);
	}

3703
	return 0;
3704 3705 3706 3707 3708 3709

map_ring_fail:
	while (i--)
		netif_napi_del(&priv->tqp_vector[i].napi);

	return ret;
3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725
}

static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
{
	struct hnae3_handle *h = priv->ae_handle;
	struct hns3_enet_tqp_vector *tqp_vector;
	struct hnae3_vector_info *vector;
	struct pci_dev *pdev = h->pdev;
	u16 tqp_num = h->kinfo.num_tqps;
	u16 vector_num;
	int ret = 0;
	u16 i;

	/* RSS size, cpu online and vector_num should be the same */
	/* Should consider 2p/4p later */
	vector_num = min_t(u16, num_online_cpus(), tqp_num);
3726

3727 3728 3729 3730 3731
	vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
			      GFP_KERNEL);
	if (!vector)
		return -ENOMEM;

3732
	/* save the actual available vector number */
3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748
	vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);

	priv->vector_num = vector_num;
	priv->tqp_vector = (struct hns3_enet_tqp_vector *)
		devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
			     GFP_KERNEL);
	if (!priv->tqp_vector) {
		ret = -ENOMEM;
		goto out;
	}

	for (i = 0; i < priv->vector_num; i++) {
		tqp_vector = &priv->tqp_vector[i];
		tqp_vector->idx = i;
		tqp_vector->mask_addr = vector[i].io_addr;
		tqp_vector->vector_irq = vector[i].vector;
3749
		hns3_vector_coalesce_init(tqp_vector, priv);
3750 3751
	}

3752 3753 3754 3755 3756
out:
	devm_kfree(&pdev->dev, vector);
	return ret;
}

3757 3758 3759 3760 3761 3762
static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
{
	group->ring = NULL;
	group->count = 0;
}

3763
static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
3764 3765 3766 3767
{
	struct hnae3_ring_chain_node vector_ring_chain;
	struct hnae3_handle *h = priv->ae_handle;
	struct hns3_enet_tqp_vector *tqp_vector;
3768
	int i;
3769 3770 3771 3772

	for (i = 0; i < priv->vector_num; i++) {
		tqp_vector = &priv->tqp_vector[i];

3773 3774 3775
		if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
			continue;

3776 3777 3778 3779 3780 3781
		/* Since the mapping can be overwritten, when fail to get the
		 * chain between vector and ring, we should go on to deal with
		 * the remaining options.
		 */
		if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain))
			dev_warn(priv->dev, "failed to get ring chain\n");
3782

3783
		h->ae_algo->ops->unmap_ring_from_vector(h,
3784 3785 3786 3787
			tqp_vector->vector_irq, &vector_ring_chain);

		hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);

3788 3789
		hns3_clear_ring_group(&tqp_vector->rx_group);
		hns3_clear_ring_group(&tqp_vector->tx_group);
3790 3791
		netif_napi_del(&priv->tqp_vector[i].napi);
	}
3792 3793
}

3794
static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805
{
	struct hnae3_handle *h = priv->ae_handle;
	struct pci_dev *pdev = h->pdev;
	int i, ret;

	for (i = 0; i < priv->vector_num; i++) {
		struct hns3_enet_tqp_vector *tqp_vector;

		tqp_vector = &priv->tqp_vector[i];
		ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
		if (ret)
3806
			return;
3807
	}
3808

3809
	devm_kfree(&pdev->dev, priv->tqp_vector);
3810 3811
}

3812 3813
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
			      unsigned int ring_type)
3814 3815 3816
{
	int queue_num = priv->ae_handle->kinfo.num_tqps;
	struct hns3_enet_ring *ring;
3817
	int desc_num;
3818 3819

	if (ring_type == HNAE3_RING_TYPE_TX) {
3820
		ring = &priv->ring[q->tqp_index];
3821
		desc_num = priv->ae_handle->kinfo.num_tx_desc;
3822
		ring->queue_index = q->tqp_index;
3823
	} else {
3824
		ring = &priv->ring[q->tqp_index + queue_num];
3825
		desc_num = priv->ae_handle->kinfo.num_rx_desc;
3826
		ring->queue_index = q->tqp_index;
3827 3828
	}

P
Peng Li 已提交
3829
	hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
3830 3831 3832 3833 3834 3835 3836

	ring->tqp = q;
	ring->desc = NULL;
	ring->desc_cb = NULL;
	ring->dev = priv->dev;
	ring->desc_dma_addr = 0;
	ring->buf_size = q->buf_size;
3837
	ring->desc_num = desc_num;
3838 3839
	ring->next_to_use = 0;
	ring->next_to_clean = 0;
3840
	ring->last_to_use = 0;
3841 3842
}

3843 3844
static void hns3_queue_to_ring(struct hnae3_queue *tqp,
			       struct hns3_nic_priv *priv)
3845
{
3846 3847
	hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
	hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
3848 3849 3850 3851 3852 3853
}

static int hns3_get_ring_config(struct hns3_nic_priv *priv)
{
	struct hnae3_handle *h = priv->ae_handle;
	struct pci_dev *pdev = h->pdev;
3854
	int i;
3855

3856 3857 3858 3859 3860
	priv->ring = devm_kzalloc(&pdev->dev,
				  array3_size(h->kinfo.num_tqps,
					      sizeof(*priv->ring), 2),
				  GFP_KERNEL);
	if (!priv->ring)
3861 3862
		return -ENOMEM;

3863 3864
	for (i = 0; i < h->kinfo.num_tqps; i++)
		hns3_queue_to_ring(h->kinfo.tqp[i], priv);
3865 3866 3867 3868

	return 0;
}

3869 3870
static void hns3_put_ring_config(struct hns3_nic_priv *priv)
{
3871
	if (!priv->ring)
3872 3873
		return;

3874 3875
	devm_kfree(priv->dev, priv->ring);
	priv->ring = NULL;
3876 3877
}

3878 3879 3880 3881 3882 3883 3884
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
{
	int ret;

	if (ring->desc_num <= 0 || ring->buf_size <= 0)
		return -EINVAL;

3885 3886
	ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
				     sizeof(ring->desc_cb[0]), GFP_KERNEL);
3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906
	if (!ring->desc_cb) {
		ret = -ENOMEM;
		goto out;
	}

	ret = hns3_alloc_desc(ring);
	if (ret)
		goto out_with_desc_cb;

	if (!HNAE3_IS_TX_RING(ring)) {
		ret = hns3_alloc_ring_buffers(ring);
		if (ret)
			goto out_with_desc;
	}

	return 0;

out_with_desc:
	hns3_free_desc(ring);
out_with_desc_cb:
3907
	devm_kfree(ring_to_dev(ring), ring->desc_cb);
3908 3909 3910 3911 3912
	ring->desc_cb = NULL;
out:
	return ret;
}

3913
void hns3_fini_ring(struct hns3_enet_ring *ring)
3914 3915
{
	hns3_free_desc(ring);
3916
	devm_kfree(ring_to_dev(ring), ring->desc_cb);
3917 3918 3919
	ring->desc_cb = NULL;
	ring->next_to_clean = 0;
	ring->next_to_use = 0;
3920
	ring->last_to_use = 0;
3921 3922 3923 3924 3925
	ring->pending_buf = 0;
	if (ring->skb) {
		dev_kfree_skb_any(ring->skb);
		ring->skb = NULL;
	}
3926 3927
}

3928
static int hns3_buf_size2type(u32 buf_size)
3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957
{
	int bd_size_type;

	switch (buf_size) {
	case 512:
		bd_size_type = HNS3_BD_SIZE_512_TYPE;
		break;
	case 1024:
		bd_size_type = HNS3_BD_SIZE_1024_TYPE;
		break;
	case 2048:
		bd_size_type = HNS3_BD_SIZE_2048_TYPE;
		break;
	case 4096:
		bd_size_type = HNS3_BD_SIZE_4096_TYPE;
		break;
	default:
		bd_size_type = HNS3_BD_SIZE_2048_TYPE;
	}

	return bd_size_type;
}

static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
{
	dma_addr_t dma = ring->desc_dma_addr;
	struct hnae3_queue *q = ring->tqp;

	if (!HNAE3_IS_TX_RING(ring)) {
3958
		hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977
		hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
			       (u32)((dma >> 31) >> 1));

		hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
			       hns3_buf_size2type(ring->buf_size));
		hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
			       ring->desc_num / 8 - 1);

	} else {
		hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
			       (u32)dma);
		hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
			       (u32)((dma >> 31) >> 1));

		hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
			       ring->desc_num / 8 - 1);
	}
}

3978 3979 3980
static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
{
	struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3981
	struct hnae3_tc_info *tc_info = &kinfo->tc_info;
3982 3983 3984 3985 3986
	int i;

	for (i = 0; i < HNAE3_MAX_TC; i++) {
		int j;

3987
		if (!test_bit(i, &tc_info->tc_en))
3988 3989
			continue;

3990
		for (j = 0; j < tc_info->tqp_count[i]; j++) {
3991 3992
			struct hnae3_queue *q;

3993 3994
			q = priv->ring[tc_info->tqp_offset[i] + j].tqp;
			hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i);
3995 3996 3997 3998
		}
	}
}

L
Lipeng 已提交
3999
int hns3_init_all_ring(struct hns3_nic_priv *priv)
4000 4001 4002 4003 4004 4005 4006
{
	struct hnae3_handle *h = priv->ae_handle;
	int ring_num = h->kinfo.num_tqps * 2;
	int i, j;
	int ret;

	for (i = 0; i < ring_num; i++) {
4007
		ret = hns3_alloc_ring_memory(&priv->ring[i]);
4008 4009 4010 4011 4012 4013
		if (ret) {
			dev_err(priv->dev,
				"Alloc ring memory fail! ret=%d\n", ret);
			goto out_when_alloc_ring_memory;
		}

4014
		u64_stats_init(&priv->ring[i].syncp);
4015 4016 4017 4018 4019 4020
	}

	return 0;

out_when_alloc_ring_memory:
	for (j = i - 1; j >= 0; j--)
4021
		hns3_fini_ring(&priv->ring[j]);
4022 4023 4024 4025

	return -ENOMEM;
}

L
Lipeng 已提交
4026
int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
4027 4028 4029 4030 4031
{
	struct hnae3_handle *h = priv->ae_handle;
	int i;

	for (i = 0; i < h->kinfo.num_tqps; i++) {
4032 4033
		hns3_fini_ring(&priv->ring[i]);
		hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]);
4034 4035 4036 4037 4038
	}
	return 0;
}

/* Set mac addr if it is configured. or leave it to the AE driver */
4039
static int hns3_init_mac_addr(struct net_device *netdev)
4040 4041 4042 4043
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);
	struct hnae3_handle *h = priv->ae_handle;
	u8 mac_addr_temp[ETH_ALEN];
4044
	int ret = 0;
4045

4046
	if (h->ae_algo->ops->get_mac_addr)
4047 4048 4049
		h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);

	/* Check if the MAC address is valid, if not get a random one */
4050
	if (!is_valid_ether_addr(mac_addr_temp)) {
4051 4052 4053
		eth_hw_addr_random(netdev);
		dev_warn(priv->dev, "using random MAC address %pM\n",
			 netdev->dev_addr);
4054
	} else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
4055 4056
		ether_addr_copy(netdev->dev_addr, mac_addr_temp);
		ether_addr_copy(netdev->perm_addr, mac_addr_temp);
4057 4058
	} else {
		return 0;
4059
	}
4060 4061

	if (h->ae_algo->ops->set_mac_addr)
4062
		ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
4063

4064
	return ret;
4065 4066
}

4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085
static int hns3_init_phy(struct net_device *netdev)
{
	struct hnae3_handle *h = hns3_get_handle(netdev);
	int ret = 0;

	if (h->ae_algo->ops->mac_connect_phy)
		ret = h->ae_algo->ops->mac_connect_phy(h);

	return ret;
}

static void hns3_uninit_phy(struct net_device *netdev)
{
	struct hnae3_handle *h = hns3_get_handle(netdev);

	if (h->ae_algo->ops->mac_disconnect_phy)
		h->ae_algo->ops->mac_disconnect_phy(h);
}

4086 4087 4088 4089 4090 4091 4092 4093
static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
{
	struct hnae3_handle *h = hns3_get_handle(netdev);

	if (h->ae_algo->ops->del_all_fd_entries)
		h->ae_algo->ops->del_all_fd_entries(h, clear_list);
}

4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109
static int hns3_client_start(struct hnae3_handle *handle)
{
	if (!handle->ae_algo->ops->client_start)
		return 0;

	return handle->ae_algo->ops->client_start(handle);
}

static void hns3_client_stop(struct hnae3_handle *handle)
{
	if (!handle->ae_algo->ops->client_stop)
		return;

	handle->ae_algo->ops->client_stop(handle);
}

4110 4111 4112 4113 4114
static void hns3_info_show(struct hns3_nic_priv *priv)
{
	struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;

	dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
4115 4116 4117 4118 4119 4120
	dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
	dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
	dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
	dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
	dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
	dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
4121 4122
	dev_info(priv->dev, "Total number of enabled TCs: %u\n",
		 kinfo->tc_info.num_tc);
4123
	dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu);
4124 4125
}

4126 4127 4128
static int hns3_client_init(struct hnae3_handle *handle)
{
	struct pci_dev *pdev = handle->pdev;
4129
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4130
	u16 alloc_tqps, max_rss_size;
4131 4132 4133 4134
	struct hns3_nic_priv *priv;
	struct net_device *netdev;
	int ret;

4135 4136 4137
	handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
						    &max_rss_size);
	netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
4138 4139 4140 4141 4142 4143 4144
	if (!netdev)
		return -ENOMEM;

	priv = netdev_priv(netdev);
	priv->dev = &pdev->dev;
	priv->netdev = netdev;
	priv->ae_handle = handle;
4145
	priv->tx_timeout_count = 0;
4146
	priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
4147
	set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
4148

4149 4150
	handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);

4151 4152 4153
	handle->kinfo.netdev = netdev;
	handle->priv = (void *)priv;

4154
	hns3_init_mac_addr(netdev);
4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172

	hns3_set_default_feature(netdev);

	netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
	netdev->priv_flags |= IFF_UNICAST_FLT;
	netdev->netdev_ops = &hns3_nic_netdev_ops;
	SET_NETDEV_DEV(netdev, &pdev->dev);
	hns3_ethtool_set_ops(netdev);

	/* Carrier off reporting is important to ethtool even BEFORE open */
	netif_carrier_off(netdev);

	ret = hns3_get_ring_config(priv);
	if (ret) {
		ret = -ENOMEM;
		goto out_get_ring_cfg;
	}

4173 4174 4175 4176 4177 4178
	ret = hns3_nic_alloc_vector_data(priv);
	if (ret) {
		ret = -ENOMEM;
		goto out_alloc_vector_data;
	}

4179 4180 4181 4182 4183 4184 4185 4186 4187
	ret = hns3_nic_init_vector_data(priv);
	if (ret) {
		ret = -ENOMEM;
		goto out_init_vector_data;
	}

	ret = hns3_init_all_ring(priv);
	if (ret) {
		ret = -ENOMEM;
4188
		goto out_init_ring;
4189 4190
	}

4191 4192 4193 4194
	ret = hns3_init_phy(netdev);
	if (ret)
		goto out_init_phy;

4195 4196 4197 4198 4199 4200
	ret = register_netdev(netdev);
	if (ret) {
		dev_err(priv->dev, "probe register netdev fail!\n");
		goto out_reg_netdev_fail;
	}

4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212
	/* the device can work without cpu rmap, only aRFS needs it */
	ret = hns3_set_rx_cpu_rmap(netdev);
	if (ret)
		dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);

	ret = hns3_nic_init_irq(priv);
	if (ret) {
		dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
		hns3_free_rx_cpu_rmap(netdev);
		goto out_init_irq_fail;
	}

4213 4214 4215
	ret = hns3_client_start(handle);
	if (ret) {
		dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
4216
		goto out_client_start;
4217 4218
	}

4219 4220
	hns3_dcbnl_setup(handle);

4221 4222
	hns3_dbg_init(handle);

4223
	/* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
4224
	netdev->max_mtu = HNS3_MAX_MTU;
4225

4226 4227 4228
	if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
		set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);

4229 4230
	set_bit(HNS3_NIC_STATE_INITED, &priv->state);

4231 4232 4233
	if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
		set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);

4234 4235 4236
	if (netif_msg_drv(handle))
		hns3_info_show(priv);

4237 4238
	return ret;

4239
out_client_start:
4240 4241 4242
	hns3_free_rx_cpu_rmap(netdev);
	hns3_nic_uninit_irq(priv);
out_init_irq_fail:
4243
	unregister_netdev(netdev);
4244
out_reg_netdev_fail:
4245 4246 4247
	hns3_uninit_phy(netdev);
out_init_phy:
	hns3_uninit_all_ring(priv);
4248
out_init_ring:
4249
	hns3_nic_uninit_vector_data(priv);
4250
out_init_vector_data:
4251 4252
	hns3_nic_dealloc_vector_data(priv);
out_alloc_vector_data:
4253
	priv->ring = NULL;
4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268
out_get_ring_cfg:
	priv->ae_handle = NULL;
	free_netdev(netdev);
	return ret;
}

static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
{
	struct net_device *netdev = handle->kinfo.netdev;
	struct hns3_nic_priv *priv = netdev_priv(netdev);
	int ret;

	if (netdev->reg_state != NETREG_UNINITIALIZED)
		unregister_netdev(netdev);

4269 4270
	hns3_client_stop(handle);

4271 4272
	hns3_uninit_phy(netdev);

4273 4274 4275 4276 4277
	if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
		netdev_warn(netdev, "already uninitialized\n");
		goto out_netdev_free;
	}

4278 4279 4280 4281
	hns3_free_rx_cpu_rmap(netdev);

	hns3_nic_uninit_irq(priv);

4282 4283
	hns3_del_all_fd_rules(netdev, true);

4284
	hns3_clear_all_ring(handle, true);
4285

4286
	hns3_nic_uninit_vector_data(priv);
4287

4288
	hns3_nic_dealloc_vector_data(priv);
4289

4290 4291 4292 4293
	ret = hns3_uninit_all_ring(priv);
	if (ret)
		netdev_err(netdev, "uninit ring error\n");

4294 4295
	hns3_put_ring_config(priv);

4296
out_netdev_free:
4297
	hns3_dbg_uninit(handle);
4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309
	free_netdev(netdev);
}

static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
{
	struct net_device *netdev = handle->kinfo.netdev;

	if (!netdev)
		return;

	if (linkup) {
		netif_tx_wake_all_queues(netdev);
Y
Yonglong Liu 已提交
4310
		netif_carrier_on(netdev);
4311 4312
		if (netif_msg_link(handle))
			netdev_info(netdev, "link up\n");
4313 4314 4315
	} else {
		netif_carrier_off(netdev);
		netif_tx_stop_all_queues(netdev);
4316 4317
		if (netif_msg_link(handle))
			netdev_info(netdev, "link down\n");
4318 4319 4320
	}
}

4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331
static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
{
	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
	struct net_device *ndev = kinfo->netdev;

	if (tc > HNAE3_MAX_TC)
		return -EINVAL;

	if (!ndev)
		return -ENODEV;

4332
	return hns3_nic_set_real_num_queue(ndev);
4333 4334
}

4335
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
4336
{
4337
	while (ring->next_to_clean != ring->next_to_use) {
4338
		ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
4339
		hns3_free_buffer_detach(ring, ring->next_to_clean, 0);
4340 4341
		ring_ptr_move_fw(ring, next_to_clean);
	}
4342 4343

	ring->pending_buf = 0;
4344 4345
}

4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356
static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
{
	struct hns3_desc_cb res_cbs;
	int ret;

	while (ring->next_to_use != ring->next_to_clean) {
		/* When a buffer is not reused, it's memory has been
		 * freed in hns3_handle_rx_bd or will be freed by
		 * stack, so we need to replace the buffer here.
		 */
		if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
4357
			ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
4358 4359 4360 4361 4362 4363 4364
			if (ret) {
				u64_stats_update_begin(&ring->syncp);
				ring->stats.sw_err_cnt++;
				u64_stats_update_end(&ring->syncp);
				/* if alloc new buffer fail, exit directly
				 * and reclear in up flow.
				 */
4365
				netdev_warn(ring_to_netdev(ring),
4366 4367 4368 4369
					    "reserve buffer map failed, ret = %d\n",
					    ret);
				return ret;
			}
4370
			hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
4371 4372 4373 4374
		}
		ring_ptr_move_fw(ring, next_to_use);
	}

4375 4376 4377 4378 4379 4380 4381
	/* Free the pending skb in rx ring */
	if (ring->skb) {
		dev_kfree_skb_any(ring->skb);
		ring->skb = NULL;
		ring->pending_buf = 0;
	}

4382 4383 4384 4385
	return 0;
}

static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399
{
	while (ring->next_to_use != ring->next_to_clean) {
		/* When a buffer is not reused, it's memory has been
		 * freed in hns3_handle_rx_bd or will be freed by
		 * stack, so only need to unmap the buffer here.
		 */
		if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
			hns3_unmap_buffer(ring,
					  &ring->desc_cb[ring->next_to_use]);
			ring->desc_cb[ring->next_to_use].dma = 0;
		}

		ring_ptr_move_fw(ring, next_to_use);
	}
4400 4401
}

4402
static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
4403 4404 4405 4406 4407 4408 4409 4410
{
	struct net_device *ndev = h->kinfo.netdev;
	struct hns3_nic_priv *priv = netdev_priv(ndev);
	u32 i;

	for (i = 0; i < h->kinfo.num_tqps; i++) {
		struct hns3_enet_ring *ring;

4411
		ring = &priv->ring[i];
4412
		hns3_clear_tx_ring(ring);
4413

4414
		ring = &priv->ring[i + h->kinfo.num_tqps];
4415 4416 4417
		/* Continue to clear other rings even if clearing some
		 * rings failed.
		 */
4418 4419 4420 4421
		if (force)
			hns3_force_clear_rx_ring(ring);
		else
			hns3_clear_rx_ring(ring);
4422 4423 4424
	}
}

4425 4426 4427 4428 4429 4430 4431 4432 4433
int hns3_nic_reset_all_ring(struct hnae3_handle *h)
{
	struct net_device *ndev = h->kinfo.netdev;
	struct hns3_nic_priv *priv = netdev_priv(ndev);
	struct hns3_enet_ring *rx_ring;
	int i, j;
	int ret;

	for (i = 0; i < h->kinfo.num_tqps; i++) {
4434 4435 4436 4437
		ret = h->ae_algo->ops->reset_queue(h, i);
		if (ret)
			return ret;

4438
		hns3_init_ring_hw(&priv->ring[i]);
4439 4440 4441 4442

		/* We need to clear tx ring here because self test will
		 * use the ring and will not run down before up
		 */
4443 4444 4445
		hns3_clear_tx_ring(&priv->ring[i]);
		priv->ring[i].next_to_clean = 0;
		priv->ring[i].next_to_use = 0;
4446
		priv->ring[i].last_to_use = 0;
4447

4448
		rx_ring = &priv->ring[i + h->kinfo.num_tqps];
4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463
		hns3_init_ring_hw(rx_ring);
		ret = hns3_clear_rx_ring(rx_ring);
		if (ret)
			return ret;

		/* We can not know the hardware head and tail when this
		 * function is called in reset flow, so we reuse all desc.
		 */
		for (j = 0; j < rx_ring->desc_num; j++)
			hns3_reuse_buffer(rx_ring, j);

		rx_ring->next_to_clean = 0;
		rx_ring->next_to_use = 0;
	}

4464 4465
	hns3_init_tx_ring_tc(priv);

4466 4467 4468
	return 0;
}

4469 4470 4471
static void hns3_store_coal(struct hns3_nic_priv *priv)
{
	/* ethtool only support setting and querying one coal
G
Guojia Liao 已提交
4472 4473
	 * configuration for now, so save the vector 0' coal
	 * configuration here in order to restore it.
4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493
	 */
	memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
	       sizeof(struct hns3_enet_coalesce));
	memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
	       sizeof(struct hns3_enet_coalesce));
}

static void hns3_restore_coal(struct hns3_nic_priv *priv)
{
	u16 vector_num = priv->vector_num;
	int i;

	for (i = 0; i < vector_num; i++) {
		memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
		       sizeof(struct hns3_enet_coalesce));
		memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
		       sizeof(struct hns3_enet_coalesce));
	}
}

4494 4495 4496 4497
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{
	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
	struct net_device *ndev = kinfo->netdev;
4498 4499 4500 4501
	struct hns3_nic_priv *priv = netdev_priv(ndev);

	if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
		return 0;
4502 4503

	if (!netif_running(ndev))
4504
		return 0;
4505 4506 4507 4508 4509 4510 4511

	return hns3_nic_net_stop(ndev);
}

static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
{
	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
4512
	struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
4513 4514
	int ret = 0;

4515 4516
	clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);

4517
	if (netif_running(kinfo->netdev)) {
4518
		ret = hns3_nic_net_open(kinfo->netdev);
4519
		if (ret) {
4520
			set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
4521
			netdev_err(kinfo->netdev,
4522
				   "net up fail, ret=%d!\n", ret);
4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538
			return ret;
		}
	}

	return ret;
}

static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
{
	struct net_device *netdev = handle->kinfo.netdev;
	struct hns3_nic_priv *priv = netdev_priv(netdev);
	int ret;

	/* Carrier off reporting is important to ethtool even BEFORE open */
	netif_carrier_off(netdev);

4539
	ret = hns3_get_ring_config(priv);
4540 4541 4542
	if (ret)
		return ret;

4543 4544 4545 4546
	ret = hns3_nic_alloc_vector_data(priv);
	if (ret)
		goto err_put_ring;

4547 4548
	hns3_restore_coal(priv);

4549 4550
	ret = hns3_nic_init_vector_data(priv);
	if (ret)
4551
		goto err_dealloc_vector;
4552 4553

	ret = hns3_init_all_ring(priv);
4554 4555
	if (ret)
		goto err_uninit_vector;
4556

4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568
	/* the device can work without cpu rmap, only aRFS needs it */
	ret = hns3_set_rx_cpu_rmap(netdev);
	if (ret)
		dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);

	ret = hns3_nic_init_irq(priv);
	if (ret) {
		dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
		hns3_free_rx_cpu_rmap(netdev);
		goto err_init_irq_fail;
	}

4569 4570 4571
	if (!hns3_is_phys_func(handle->pdev))
		hns3_init_mac_addr(netdev);

4572 4573 4574
	ret = hns3_client_start(handle);
	if (ret) {
		dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
4575
		goto err_client_start_fail;
4576 4577
	}

4578 4579
	set_bit(HNS3_NIC_STATE_INITED, &priv->state);

4580 4581
	return ret;

4582 4583 4584 4585
err_client_start_fail:
	hns3_free_rx_cpu_rmap(netdev);
	hns3_nic_uninit_irq(priv);
err_init_irq_fail:
4586
	hns3_uninit_all_ring(priv);
4587 4588 4589 4590
err_uninit_vector:
	hns3_nic_uninit_vector_data(priv);
err_dealloc_vector:
	hns3_nic_dealloc_vector_data(priv);
4591 4592
err_put_ring:
	hns3_put_ring_config(priv);
4593

4594 4595 4596 4597 4598 4599 4600 4601 4602
	return ret;
}

static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
	struct net_device *netdev = handle->kinfo.netdev;
	struct hns3_nic_priv *priv = netdev_priv(netdev);
	int ret;

4603
	if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
4604 4605 4606 4607
		netdev_warn(netdev, "already uninitialized\n");
		return 0;
	}

4608 4609
	hns3_free_rx_cpu_rmap(netdev);
	hns3_nic_uninit_irq(priv);
4610 4611
	hns3_clear_all_ring(handle, true);
	hns3_reset_tx_queue(priv->ae_handle);
4612

4613
	hns3_nic_uninit_vector_data(priv);
4614

4615 4616
	hns3_store_coal(priv);

4617
	hns3_nic_dealloc_vector_data(priv);
4618

4619 4620 4621 4622
	ret = hns3_uninit_all_ring(priv);
	if (ret)
		netdev_err(netdev, "uninit ring error\n");

4623 4624
	hns3_put_ring_config(priv);

4625 4626 4627 4628 4629 4630 4631 4632 4633 4634
	return ret;
}

static int hns3_reset_notify(struct hnae3_handle *handle,
			     enum hnae3_reset_notify_type type)
{
	int ret = 0;

	switch (type) {
	case HNAE3_UP_CLIENT:
4635 4636
		ret = hns3_reset_notify_up_enet(handle);
		break;
4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652
	case HNAE3_DOWN_CLIENT:
		ret = hns3_reset_notify_down_enet(handle);
		break;
	case HNAE3_INIT_CLIENT:
		ret = hns3_reset_notify_init_enet(handle);
		break;
	case HNAE3_UNINIT_CLIENT:
		ret = hns3_reset_notify_uninit_enet(handle);
		break;
	default:
		break;
	}

	return ret;
}

4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676
static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num,
				bool rxfh_configured)
{
	int ret;

	ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num,
						 rxfh_configured);
	if (ret) {
		dev_err(&handle->pdev->dev,
			"Change tqp num(%u) fail.\n", new_tqp_num);
		return ret;
	}

	ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT);
	if (ret)
		return ret;

	ret =  hns3_reset_notify(handle, HNAE3_UP_CLIENT);
	if (ret)
		hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT);

	return ret;
}

4677 4678 4679 4680 4681
int hns3_set_channels(struct net_device *netdev,
		      struct ethtool_channels *ch)
{
	struct hnae3_handle *h = hns3_get_handle(netdev);
	struct hnae3_knic_private_info *kinfo = &h->kinfo;
4682
	bool rxfh_configured = netif_is_rxfh_configured(netdev);
4683 4684 4685 4686
	u32 new_tqp_num = ch->combined_count;
	u16 org_tqp_num;
	int ret;

4687 4688 4689
	if (hns3_nic_resetting(netdev))
		return -EBUSY;

4690 4691 4692
	if (ch->rx_count || ch->tx_count)
		return -EINVAL;

4693 4694 4695 4696 4697 4698
	if (kinfo->tc_info.mqprio_active) {
		dev_err(&netdev->dev,
			"it's not allowed to set channels via ethtool when MQPRIO mode is on\n");
		return -EINVAL;
	}

4699
	if (new_tqp_num > hns3_get_max_available_channels(h) ||
4700
	    new_tqp_num < 1) {
4701
		dev_err(&netdev->dev,
4702
			"Change tqps fail, the tqp range is from 1 to %u",
4703
			hns3_get_max_available_channels(h));
4704 4705 4706
		return -EINVAL;
	}

4707
	if (kinfo->rss_size == new_tqp_num)
4708 4709
		return 0;

4710 4711 4712 4713
	netif_dbg(h, drv, netdev,
		  "set channels: tqp_num=%u, rxfh=%d\n",
		  new_tqp_num, rxfh_configured);

4714 4715 4716
	ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
	if (ret)
		return ret;
4717

4718 4719 4720
	ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
	if (ret)
		return ret;
4721 4722

	org_tqp_num = h->kinfo.num_tqps;
4723
	ret = hns3_change_channels(h, new_tqp_num, rxfh_configured);
4724
	if (ret) {
4725 4726 4727 4728 4729 4730 4731 4732 4733
		int ret1;

		netdev_warn(netdev,
			    "Change channels fail, revert to old value\n");
		ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured);
		if (ret1) {
			netdev_err(netdev,
				   "revert to old channel fail\n");
			return ret1;
4734
		}
4735

4736
		return ret;
4737
	}
4738

4739
	return 0;
4740 4741
}

4742 4743 4744 4745 4746 4747 4748
static const struct hns3_hw_error_info hns3_hw_err[] = {
	{ .type = HNAE3_PPU_POISON_ERROR,
	  .msg = "PPU poison" },
	{ .type = HNAE3_CMDQ_ECC_ERROR,
	  .msg = "IMP CMDQ error" },
	{ .type = HNAE3_IMP_RD_POISON_ERROR,
	  .msg = "IMP RD poison" },
4749 4750
	{ .type = HNAE3_ROCEE_AXI_RESP_ERROR,
	  .msg = "ROCEE AXI RESP error" },
4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766
};

static void hns3_process_hw_error(struct hnae3_handle *handle,
				  enum hnae3_hw_error_type type)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
		if (hns3_hw_err[i].type == type) {
			dev_err(&handle->pdev->dev, "Detected %s!\n",
				hns3_hw_err[i].msg);
			break;
		}
	}
}

4767
static const struct hnae3_client_ops client_ops = {
4768 4769 4770
	.init_instance = hns3_client_init,
	.uninit_instance = hns3_client_uninit,
	.link_status_change = hns3_link_status_change,
4771
	.setup_tc = hns3_client_setup_tc,
4772
	.reset_notify = hns3_reset_notify,
4773
	.process_hw_error = hns3_process_hw_error,
4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787
};

/* hns3_init_module - Driver registration routine
 * hns3_init_module is the first routine called when the driver is
 * loaded. All it does is register with the PCI subsystem.
 */
static int __init hns3_init_module(void)
{
	int ret;

	pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
	pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);

	client.type = HNAE3_CLIENT_KNIC;
4788
	snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
4789 4790 4791 4792
		 hns3_driver_name);

	client.ops = &client_ops;

4793 4794
	INIT_LIST_HEAD(&client.node);

4795 4796
	hns3_dbg_register_debugfs(hns3_driver_name);

4797 4798
	ret = hnae3_register_client(&client);
	if (ret)
4799
		goto err_reg_client;
4800 4801 4802

	ret = pci_register_driver(&hns3_driver);
	if (ret)
4803
		goto err_reg_driver;
4804 4805

	return ret;
4806 4807 4808 4809 4810 4811

err_reg_driver:
	hnae3_unregister_client(&client);
err_reg_client:
	hns3_dbg_unregister_debugfs();
	return ret;
4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822
}
module_init(hns3_init_module);

/* hns3_exit_module - Driver exit cleanup routine
 * hns3_exit_module is called just before the driver is removed
 * from memory.
 */
static void __exit hns3_exit_module(void)
{
	pci_unregister_driver(&hns3_driver);
	hnae3_unregister_client(&client);
4823
	hns3_dbg_unregister_debugfs();
4824 4825 4826 4827 4828 4829 4830
}
module_exit(hns3_exit_module);

MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_LICENSE("GPL");
MODULE_ALIAS("pci:hns-nic");