aq_nic.c 32.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
/* Atlantic Network Driver
 *
 * Copyright (C) 2014-2019 aQuantia Corporation
 * Copyright (C) 2019-2020 Marvell International Ltd.
6 7 8 9 10 11 12 13 14
 */

/* File aq_nic.c: Definition of common code for NIC. */

#include "aq_nic.h"
#include "aq_ring.h"
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
15
#include "aq_macsec.h"
16
#include "aq_main.h"
17
#include "aq_phy.h"
18
#include "aq_ptp.h"
19
#include "aq_filters.h"
20

21
#include <linux/moduleparam.h>
22 23 24 25 26 27 28
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/timer.h>
#include <linux/cpu.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <net/ip.h>
29
#include <net/pkt_cls.h>
30

31 32 33 34 35 36 37 38 39 40 41 42
static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
module_param_named(aq_itr, aq_itr, uint, 0644);
MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");

static unsigned int aq_itr_tx;
module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");

static unsigned int aq_itr_rx;
module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");

43 44
static void aq_nic_update_ndev_stats(struct aq_nic_s *self);

45 46
static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
{
47
	static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
48 49 50 51 52 53
		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
	};
N
Nikita Danilov 已提交
54 55 56 57 58
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
	struct aq_rss_parameters *rss_params;
	int i = 0;

	rss_params = &cfg->aq_rss;
59 60 61 62 63 64 65 66 67

	rss_params->hash_secret_key_size = sizeof(rss_key);
	memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
	rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;

	for (i = rss_params->indirection_table_size; i--;)
		rss_params->indirection_table[i] = i & (num_rss_queues - 1);
}

68 69
/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
void aq_nic_cfg_start(struct aq_nic_s *self)
70 71
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
72
	int i;
73 74 75 76 77

	cfg->tcs = AQ_CFG_TCS_DEF;

	cfg->is_polling = AQ_CFG_IS_POLLING_DEF;

78 79 80
	cfg->itr = aq_itr;
	cfg->tx_itr = aq_itr_tx;
	cfg->rx_itr = aq_itr_rx;
81

82
	cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
83 84 85
	cfg->is_rss = AQ_CFG_IS_RSS_DEF;
	cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
	cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
86
	cfg->fc.req = AQ_CFG_FC_MODE;
87
	cfg->wol = AQ_CFG_WOL_MODES;
88 89 90 91 92 93

	cfg->mtu = AQ_CFG_MTU_DEF;
	cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
	cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;

	cfg->is_lro = AQ_CFG_IS_LRO_DEF;
94
	cfg->is_ptp = true;
95 96

	/*descriptors */
97 98
	cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
	cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
99 100

	/*rss rings */
101
	cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
102
	cfg->vecs = min(cfg->vecs, num_online_cpus());
103 104
	if (self->irqvecs > AQ_HW_SERVICE_IRQS)
		cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
105 106 107 108 109 110 111 112 113 114
	/* cfg->vecs should be power of 2 for RSS */
	if (cfg->vecs >= 8U)
		cfg->vecs = 8U;
	else if (cfg->vecs >= 4U)
		cfg->vecs = 4U;
	else if (cfg->vecs >= 2U)
		cfg->vecs = 2U;
	else
		cfg->vecs = 1U;

115 116
	cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);

117 118
	aq_nic_rss_init(self, cfg->num_rss_queues);

119
	cfg->irq_type = aq_pci_func_get_irq_type(self);
120 121

	if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
122
	    (cfg->aq_hw_caps->vecs == 1U) ||
123 124 125 126 127
	    (cfg->vecs == 1U)) {
		cfg->is_rss = 0U;
		cfg->vecs = 1U;
	}

128 129 130 131 132
	if (cfg->vecs <= 4)
		cfg->tc_mode = AQ_TC_MODE_8TCS;
	else
		cfg->tc_mode = AQ_TC_MODE_4TCS;

133 134 135 136 137 138 139 140 141
	/* Check if we have enough vectors allocated for
	 * link status IRQ. If no - we'll know link state from
	 * slower service task.
	 */
	if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
		cfg->link_irq_vec = cfg->vecs;
	else
		cfg->link_irq_vec = 0;

142
	cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
143
	cfg->features = cfg->aq_hw_caps->hw_features;
144 145
	cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
	cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
146
	cfg->is_vlan_force_promisc = true;
147 148 149

	for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
		cfg->prio_tc_map[i] = cfg->tcs * i / 8;
150 151
}

I
Igor Russkikh 已提交
152 153
static int aq_nic_update_link_status(struct aq_nic_s *self)
{
154
	int err = self->aq_fw_ops->update_link_status(self->aq_hw);
155
	u32 fc = 0;
I
Igor Russkikh 已提交
156 157 158 159

	if (err)
		return err;

160 161 162 163
	if (self->aq_fw_ops->get_flow_control)
		self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
	self->aq_nic_cfg.fc.cur = fc;

164
	if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
165 166 167
		netdev_info(self->ndev, "%s: link change old %d new %d\n",
			    AQ_CFG_DRV_NAME, self->link_status.mbps,
			    self->aq_hw->aq_link_status.mbps);
168
		aq_nic_update_interrupt_moderation_settings(self);
169

170
		if (self->aq_ptp) {
171
			aq_ptp_clock_init(self);
172 173
			aq_ptp_tm_offset_set(self,
					     self->aq_hw->aq_link_status.mbps);
174
			aq_ptp_link_change(self);
175
		}
176

177 178 179 180 181 182
		/* Driver has to update flow control settings on RX block
		 * on any link event.
		 * We should query FW whether it negotiated FC.
		 */
		if (self->aq_hw_ops->hw_set_fc)
			self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
183
	}
I
Igor Russkikh 已提交
184 185 186

	self->link_status = self->aq_hw->aq_link_status;
	if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
187
		aq_utils_obj_set(&self->flags,
I
Igor Russkikh 已提交
188
				 AQ_NIC_FLAG_STARTED);
189
		aq_utils_obj_clear(&self->flags,
I
Igor Russkikh 已提交
190 191
				   AQ_NIC_LINK_DOWN);
		netif_carrier_on(self->ndev);
192 193 194
#if IS_ENABLED(CONFIG_MACSEC)
		aq_macsec_enable(self);
#endif
I
Igor Russkikh 已提交
195 196 197 198 199
		netif_tx_wake_all_queues(self->ndev);
	}
	if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
		netif_carrier_off(self->ndev);
		netif_tx_disable(self->ndev);
200
		aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
I
Igor Russkikh 已提交
201
	}
N
Nikita Danilov 已提交
202

I
Igor Russkikh 已提交
203 204 205
	return 0;
}

206 207 208 209 210 211 212 213 214 215 216
static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
{
	struct aq_nic_s *self = private;

	if (!self)
		return IRQ_NONE;

	aq_nic_update_link_status(self);

	self->aq_hw_ops->hw_irq_enable(self->aq_hw,
				       BIT(self->aq_nic_cfg.link_irq_vec));
N
Nikita Danilov 已提交
217

218 219 220
	return IRQ_HANDLED;
}

221
static void aq_nic_service_task(struct work_struct *work)
222
{
223 224 225
	struct aq_nic_s *self = container_of(work, struct aq_nic_s,
					     service_task);
	int err;
226

227 228
	aq_ptp_service_task(self);

229
	if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
230
		return;
231

I
Igor Russkikh 已提交
232 233
	err = aq_nic_update_link_status(self);
	if (err)
234
		return;
235

236 237 238 239
#if IS_ENABLED(CONFIG_MACSEC)
	aq_macsec_work(self);
#endif

240
	mutex_lock(&self->fwreq_mutex);
241 242
	if (self->aq_fw_ops->update_stats)
		self->aq_fw_ops->update_stats(self->aq_hw);
243
	mutex_unlock(&self->fwreq_mutex);
244

245
	aq_nic_update_ndev_stats(self);
246 247 248 249 250
}

static void aq_nic_service_timer_cb(struct timer_list *t)
{
	struct aq_nic_s *self = from_timer(self, t, service_timer);
251

N
Nikita Danilov 已提交
252 253
	mod_timer(&self->service_timer,
		  jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
254 255

	aq_ndev_schedule_work(&self->service_task);
256 257
}

258
static void aq_nic_polling_timer_cb(struct timer_list *t)
259
{
260
	struct aq_nic_s *self = from_timer(self, t, polling_timer);
261 262 263 264 265 266 267 268
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;

	for (i = 0U, aq_vec = self->aq_vec[0];
		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
		aq_vec_isr(i, (void *)aq_vec);

	mod_timer(&self->polling_timer, jiffies +
269
		  AQ_CFG_POLLING_TIMER_INTERVAL);
270 271
}

272 273 274 275 276 277 278 279 280 281 282 283 284 285
static int aq_nic_hw_prepare(struct aq_nic_s *self)
{
	int err = 0;

	err = self->aq_hw_ops->hw_soft_reset(self->aq_hw);
	if (err)
		goto exit;

	err = self->aq_hw_ops->hw_prepare(self->aq_hw, &self->aq_fw_ops);

exit:
	return err;
}

286 287 288 289 290 291 292 293
static bool aq_nic_is_valid_ether_addr(const u8 *addr)
{
	/* Some engineering samples of Aquantia NICs are provisioned with a
	 * partially populated MAC, which is still invalid.
	 */
	return !(addr[0] == 0 && addr[1] == 0 && addr[2] == 0);
}

294 295 296 297 298 299 300 301
int aq_nic_ndev_register(struct aq_nic_s *self)
{
	int err = 0;

	if (!self->ndev) {
		err = -EINVAL;
		goto err_exit;
	}
302

303
	err = aq_nic_hw_prepare(self);
304 305 306
	if (err)
		goto err_exit;

307 308 309 310
#if IS_ENABLED(CONFIG_MACSEC)
	aq_macsec_init(self);
#endif

311
	mutex_lock(&self->fwreq_mutex);
312
	err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
313
			    self->ndev->dev_addr);
314
	mutex_unlock(&self->fwreq_mutex);
315
	if (err)
316 317
		goto err_exit;

318 319 320 321 322 323
	if (!is_valid_ether_addr(self->ndev->dev_addr) ||
	    !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
		netdev_warn(self->ndev, "MAC is invalid, will use random.");
		eth_hw_addr_random(self->ndev);
	}

324 325 326 327 328 329 330 331
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
	{
		static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;

		ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
	}
#endif

332 333 334 335 336 337 338 339 340 341
	for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
	     self->aq_vecs++) {
		self->aq_vec[self->aq_vecs] =
		    aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
		if (!self->aq_vec[self->aq_vecs]) {
			err = -ENOMEM;
			goto err_exit;
		}
	}

342 343
	netif_carrier_off(self->ndev);

I
Igor Russkikh 已提交
344
	netif_tx_disable(self->ndev);
345

346
	err = register_netdev(self->ndev);
347
	if (err)
348 349
		goto err_exit;

350
err_exit:
351 352 353 354
#if IS_ENABLED(CONFIG_MACSEC)
	if (err)
		aq_macsec_free(self);
#endif
355 356 357
	return err;
}

358
void aq_nic_ndev_init(struct aq_nic_s *self)
359
{
360
	const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
361 362 363 364
	struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;

	self->ndev->hw_features |= aq_hw_caps->hw_features;
	self->ndev->features = aq_hw_caps->hw_features;
365
	self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
366 367
				     NETIF_F_RXHASH | NETIF_F_SG |
				     NETIF_F_LRO | NETIF_F_TSO;
368
	self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4;
369
	self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
370 371
	self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;

372
	self->msg_enable = NETIF_MSG_DRV | NETIF_MSG_LINK;
373
	self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
374
	self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392

}

void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
			struct aq_ring_s *ring)
{
	self->aq_ring_tx[idx] = ring;
}

struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
{
	return self->ndev;
}

int aq_nic_init(struct aq_nic_s *self)
{
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;
N
Nikita Danilov 已提交
393
	int err = 0;
394 395

	self->power_state = AQ_HW_POWER_STATE_D0;
396
	mutex_lock(&self->fwreq_mutex);
397
	err = self->aq_hw_ops->hw_reset(self->aq_hw);
398
	mutex_unlock(&self->fwreq_mutex);
399 400 401
	if (err < 0)
		goto err_exit;

402
	err = self->aq_hw_ops->hw_init(self->aq_hw,
403
				       aq_nic_get_ndev(self)->dev_addr);
404 405 406
	if (err < 0)
		goto err_exit;

407 408
	if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ATLANTIC) &&
	    self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
409 410 411 412
		self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
		err = aq_phy_init(self->aq_hw);
	}

413 414 415 416 417 418 419
	for (i = 0U; i < self->aq_vecs; i++) {
		aq_vec = self->aq_vec[i];
		err = aq_vec_ring_alloc(aq_vec, self, i,
					aq_nic_get_cfg(self));
		if (err)
			goto err_exit;

420
		aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
421
	}
422

423 424 425 426
	if (aq_nic_get_cfg(self)->is_ptp) {
		err = aq_ptp_init(self, self->irqvecs - 1);
		if (err < 0)
			goto err_exit;
427

428 429 430
		err = aq_ptp_ring_alloc(self);
		if (err < 0)
			goto err_exit;
431

432 433 434 435
		err = aq_ptp_ring_init(self);
		if (err < 0)
			goto err_exit;
	}
436

437 438
	netif_carrier_off(self->ndev);

439 440 441 442 443 444 445
err_exit:
	return err;
}

int aq_nic_start(struct aq_nic_s *self)
{
	struct aq_vec_s *aq_vec = NULL;
446
	struct aq_nic_cfg_s *cfg;
447
	unsigned int i = 0U;
N
Nikita Danilov 已提交
448
	int err = 0;
449

450 451
	cfg = aq_nic_get_cfg(self);

452
	err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
453 454
						     self->mc_list.ar,
						     self->mc_list.count);
455 456 457
	if (err < 0)
		goto err_exit;

458
	err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
459
						    self->packet_filter);
460 461 462 463 464 465 466 467 468 469
	if (err < 0)
		goto err_exit;

	for (i = 0U, aq_vec = self->aq_vec[0];
		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
		err = aq_vec_start(aq_vec);
		if (err < 0)
			goto err_exit;
	}

470 471 472 473
	err = aq_ptp_ring_start(self);
	if (err < 0)
		goto err_exit;

474 475
	aq_nic_set_loopback(self);

476
	err = self->aq_hw_ops->hw_start(self->aq_hw);
477 478 479
	if (err < 0)
		goto err_exit;

480 481
	err = aq_nic_update_interrupt_moderation_settings(self);
	if (err)
482
		goto err_exit;
483 484 485

	INIT_WORK(&self->service_task, aq_nic_service_task);

486
	timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
487
	aq_nic_service_timer_cb(&self->service_timer);
488

489
	if (cfg->is_polling) {
490
		timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
491 492 493 494 495
		mod_timer(&self->polling_timer, jiffies +
			  AQ_CFG_POLLING_TIMER_INTERVAL);
	} else {
		for (i = 0U, aq_vec = self->aq_vec[0];
			self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
496 497
			err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
						    aq_vec_isr, aq_vec,
498
						    aq_vec_get_affinity_mask(aq_vec));
499 500 501 502
			if (err < 0)
				goto err_exit;
		}

503 504 505 506
		err = aq_ptp_irq_alloc(self);
		if (err < 0)
			goto err_exit;

507
		if (cfg->link_irq_vec) {
508
			int irqvec = pci_irq_vector(self->pdev,
509
						    cfg->link_irq_vec);
510 511
			err = request_threaded_irq(irqvec, NULL,
						   aq_linkstate_threaded_isr,
512
						   IRQF_SHARED | IRQF_ONESHOT,
513 514 515
						   self->ndev->name, self);
			if (err < 0)
				goto err_exit;
516
			self->msix_entry_mask |= (1 << cfg->link_irq_vec);
517 518
		}

519
		err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
520
						     AQ_CFG_IRQ_MASK);
521 522 523 524
		if (err < 0)
			goto err_exit;
	}

525 526
	err = netif_set_real_num_tx_queues(self->ndev,
					   self->aq_vecs * cfg->tcs);
527 528 529
	if (err < 0)
		goto err_exit;

530 531
	err = netif_set_real_num_rx_queues(self->ndev,
					   self->aq_vecs * cfg->tcs);
532 533 534
	if (err < 0)
		goto err_exit;

535 536 537 538 539
	for (i = 0; i < cfg->tcs; i++) {
		u16 offset = self->aq_vecs * i;

		netdev_set_tc_queue(self->ndev, i, self->aq_vecs, offset);
	}
I
Igor Russkikh 已提交
540 541
	netif_tx_start_all_queues(self->ndev);

542 543 544 545
err_exit:
	return err;
}

546 547
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
			    struct aq_ring_s *ring)
548 549
{
	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
550 551
	struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
	struct device *dev = aq_nic_get_dev(self);
P
Pavel Belous 已提交
552
	struct aq_ring_buff_s *first = NULL;
553
	u8 ipver = ip_hdr(skb)->version;
N
Nikita Danilov 已提交
554
	struct aq_ring_buff_s *dx_buff;
555
	bool need_context_tag = false;
N
Nikita Danilov 已提交
556 557 558
	unsigned int frag_count = 0U;
	unsigned int ret = 0U;
	unsigned int dx;
559 560 561 562 563 564
	u8 l4proto = 0;

	if (ipver == 4)
		l4proto = ip_hdr(skb)->protocol;
	else if (ipver == 6)
		l4proto = ipv6_hdr(skb)->nexthdr;
565

N
Nikita Danilov 已提交
566 567
	dx = ring->sw_tail;
	dx_buff = &ring->buff_ring[dx];
568
	dx_buff->flags = 0U;
569

570
	if (unlikely(skb_is_gso(skb))) {
571
		dx_buff->mss = skb_shinfo(skb)->gso_size;
572 573 574 575 576 577 578 579 580 581 582 583 584
		if (l4proto == IPPROTO_TCP) {
			dx_buff->is_gso_tcp = 1U;
			dx_buff->len_l4 = tcp_hdrlen(skb);
		} else if (l4proto == IPPROTO_UDP) {
			dx_buff->is_gso_udp = 1U;
			dx_buff->len_l4 = sizeof(struct udphdr);
			/* UDP GSO Hardware does not replace packet length. */
			udp_hdr(skb)->len = htons(dx_buff->mss +
						  dx_buff->len_l4);
		} else {
			WARN_ONCE(true, "Bad GSO mode");
			goto exit;
		}
585 586
		dx_buff->len_pkt = skb->len;
		dx_buff->len_l2 = ETH_HLEN;
587
		dx_buff->len_l3 = skb_network_header_len(skb);
P
Pavel Belous 已提交
588
		dx_buff->eop_index = 0xffffU;
589
		dx_buff->is_ipv6 = (ipver == 6);
590 591
		need_context_tag = true;
	}
592

593
	if (cfg->is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
594 595 596 597 598 599 600
		dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
		dx_buff->len_pkt = skb->len;
		dx_buff->is_vlan = 1U;
		need_context_tag = true;
	}

	if (need_context_tag) {
601 602
		dx = aq_ring_next_dx(ring, dx);
		dx_buff = &ring->buff_ring[dx];
603
		dx_buff->flags = 0U;
604 605 606 607
		++ret;
	}

	dx_buff->len = skb_headlen(skb);
608
	dx_buff->pa = dma_map_single(dev,
609 610 611
				     skb->data,
				     dx_buff->len,
				     DMA_TO_DEVICE);
612

613
	if (unlikely(dma_mapping_error(dev, dx_buff->pa))) {
614
		ret = 0;
615
		goto exit;
616
	}
617

P
Pavel Belous 已提交
618
	first = dx_buff;
619 620 621
	dx_buff->len_pkt = skb->len;
	dx_buff->is_sop = 1U;
	dx_buff->is_mapped = 1U;
622 623 624
	++ret;

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
625 626 627
		dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol);
		dx_buff->is_tcp_cso = (l4proto == IPPROTO_TCP);
		dx_buff->is_udp_cso = (l4proto == IPPROTO_UDP);
628 629 630
	}

	for (; nr_frags--; ++frag_count) {
631
		unsigned int frag_len = 0U;
P
Pavel Belous 已提交
632 633
		unsigned int buff_offset = 0U;
		unsigned int buff_size = 0U;
634 635 636 637 638
		dma_addr_t frag_pa;
		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];

		frag_len = skb_frag_size(frag);

P
Pavel Belous 已提交
639 640 641 642 643 644
		while (frag_len) {
			if (frag_len > AQ_CFG_TX_FRAME_MAX)
				buff_size = AQ_CFG_TX_FRAME_MAX;
			else
				buff_size = frag_len;

645
			frag_pa = skb_frag_dma_map(dev,
P
Pavel Belous 已提交
646 647 648 649 650
						   frag,
						   buff_offset,
						   buff_size,
						   DMA_TO_DEVICE);

651
			if (unlikely(dma_mapping_error(dev,
P
Pavel Belous 已提交
652 653
						       frag_pa)))
				goto mapping_error;
654 655 656 657 658

			dx = aq_ring_next_dx(ring, dx);
			dx_buff = &ring->buff_ring[dx];

			dx_buff->flags = 0U;
P
Pavel Belous 已提交
659
			dx_buff->len = buff_size;
660 661
			dx_buff->pa = frag_pa;
			dx_buff->is_mapped = 1U;
P
Pavel Belous 已提交
662 663 664 665
			dx_buff->eop_index = 0xffffU;

			frag_len -= buff_size;
			buff_offset += buff_size;
666

667
			++ret;
668 669 670
		}
	}

P
Pavel Belous 已提交
671
	first->eop_index = dx;
672 673 674 675 676 677 678 679 680 681
	dx_buff->is_eop = 1U;
	dx_buff->skb = skb;
	goto exit;

mapping_error:
	for (dx = ring->sw_tail;
	     ret > 0;
	     --ret, dx = aq_ring_next_dx(ring, dx)) {
		dx_buff = &ring->buff_ring[dx];

682 683
		if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) &&
		    !dx_buff->is_vlan && dx_buff->pa) {
684
			if (unlikely(dx_buff->is_sop)) {
685
				dma_unmap_single(dev,
686 687 688 689
						 dx_buff->pa,
						 dx_buff->len,
						 DMA_TO_DEVICE);
			} else {
690
				dma_unmap_page(dev,
691 692 693 694 695
					       dx_buff->pa,
					       dx_buff->len,
					       DMA_TO_DEVICE);
			}
		}
696 697
	}

698
exit:
699 700 701 702 703
	return ret;
}

int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
704 705 706
	struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
	unsigned int vec = skb->queue_mapping % cfg->vecs;
	unsigned int tc = skb->queue_mapping / cfg->vecs;
707 708
	struct aq_ring_s *ring = NULL;
	unsigned int frags = 0U;
709
	int err = NETDEV_TX_OK;
710 711 712

	frags = skb_shinfo(skb)->nr_frags + 1;

713
	ring = self->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(cfg, tc, vec)];
714 715 716 717 718 719

	if (frags > AQ_CFG_SKB_FRAGS_MAX) {
		dev_kfree_skb_any(skb);
		goto err_exit;
	}

I
Igor Russkikh 已提交
720
	aq_ring_update_queue_state(ring);
721

722
	if (cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
723 724 725 726
		err = NETDEV_TX_BUSY;
		goto err_exit;
	}

I
Igor Russkikh 已提交
727
	/* Above status update may stop the queue. Check this. */
728 729
	if (__netif_subqueue_stopped(self->ndev,
				     AQ_NIC_RING2QMAP(self, ring->idx))) {
730 731 732 733
		err = NETDEV_TX_BUSY;
		goto err_exit;
	}

734
	frags = aq_nic_map_skb(self, skb, ring);
735

736
	if (likely(frags)) {
737
		err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
738
						       ring, frags);
739
	} else {
740 741 742 743 744 745 746
		err = NETDEV_TX_BUSY;
	}

err_exit:
	return err;
}

747 748
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
{
749
	return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
750 751
}

752 753 754 755
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
{
	int err = 0;

756
	err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
757 758 759 760 761 762 763 764 765 766 767
	if (err < 0)
		goto err_exit;

	self->packet_filter = flags;

err_exit:
	return err;
}

int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
{
768 769 770
	const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
	unsigned int packet_filter = ndev->flags;
771 772
	struct netdev_hw_addr *ha = NULL;
	unsigned int i = 0U;
773
	int err = 0;
774

775 776 777 778 779 780 781
	self->mc_list.count = 0;
	if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
		packet_filter |= IFF_PROMISC;
	} else {
		netdev_for_each_uc_addr(ha, ndev) {
			ether_addr_copy(self->mc_list.ar[i++], ha->addr);
		}
782 783
	}

784 785 786 787 788 789 790 791 792
	cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
	if (cfg->is_mc_list_enabled) {
		if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
			packet_filter |= IFF_ALLMULTI;
		} else {
			netdev_for_each_mc_addr(ha, ndev) {
				ether_addr_copy(self->mc_list.ar[i++],
						ha->addr);
			}
793 794 795
		}
	}

796
	if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
797
		self->mc_list.count = i;
798 799 800
		err = hw_ops->hw_multicast_list_set(self->aq_hw,
						    self->mc_list.ar,
						    self->mc_list.count);
801 802
		if (err < 0)
			return err;
803
	}
N
Nikita Danilov 已提交
804

805
	return aq_nic_set_packet_filter(self, packet_filter);
806 807 808 809 810 811
}

int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
{
	self->aq_nic_cfg.mtu = new_mtu;

812
	return 0;
813 814 815 816
}

int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
{
817
	return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
818 819 820 821 822 823 824 825 826 827 828 829
}

unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
{
	return self->link_status.mbps;
}

int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
{
	u32 *regs_buff = p;
	int err = 0;

830 831 832
	if (unlikely(!self->aq_hw_ops->hw_get_regs))
		return -EOPNOTSUPP;

833 834
	regs->version = 1;

835 836 837
	err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
					   self->aq_nic_cfg.aq_hw_caps,
					   regs_buff);
838 839 840 841 842 843 844 845 846
	if (err < 0)
		goto err_exit;

err_exit:
	return err;
}

int aq_nic_get_regs_count(struct aq_nic_s *self)
{
847 848 849
	if (unlikely(!self->aq_hw_ops->hw_get_regs))
		return 0;

850
	return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
851 852
}

853
u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
854
{
855
	struct aq_vec_s *aq_vec = NULL;
856
	struct aq_stats_s *stats;
N
Nikita Danilov 已提交
857 858
	unsigned int count = 0U;
	unsigned int i = 0U;
859
	unsigned int tc;
860 861 862 863 864 865 866

	if (self->aq_fw_ops->update_stats) {
		mutex_lock(&self->fwreq_mutex);
		self->aq_fw_ops->update_stats(self->aq_hw);
		mutex_unlock(&self->fwreq_mutex);
	}
	stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
867

868
	if (!stats)
869 870
		goto err_exit;

871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
	data[i] = stats->uprc + stats->mprc + stats->bprc;
	data[++i] = stats->uprc;
	data[++i] = stats->mprc;
	data[++i] = stats->bprc;
	data[++i] = stats->erpt;
	data[++i] = stats->uptc + stats->mptc + stats->bptc;
	data[++i] = stats->uptc;
	data[++i] = stats->mptc;
	data[++i] = stats->bptc;
	data[++i] = stats->ubrc;
	data[++i] = stats->ubtc;
	data[++i] = stats->mbrc;
	data[++i] = stats->mbtc;
	data[++i] = stats->bbrc;
	data[++i] = stats->bbtc;
	data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
	data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
	data[++i] = stats->dma_pkt_rc;
	data[++i] = stats->dma_pkt_tc;
	data[++i] = stats->dma_oct_rc;
	data[++i] = stats->dma_oct_tc;
	data[++i] = stats->dpc;

	i++;

	data += i;
897

898 899 900 901 902 903 904
	for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
		for (i = 0U, aq_vec = self->aq_vec[0];
		     aq_vec && self->aq_vecs > i;
		     ++i, aq_vec = self->aq_vec[i]) {
			data += count;
			aq_vec_get_sw_stats(aq_vec, tc, data, &count);
		}
905 906
	}

907 908
	data += count;

909
err_exit:;
910
	return data;
911 912
}

913 914
static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
{
915
	struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
N
Nikita Danilov 已提交
916
	struct net_device *ndev = self->ndev;
917

918 919
	ndev->stats.rx_packets = stats->dma_pkt_rc;
	ndev->stats.rx_bytes = stats->dma_oct_rc;
920
	ndev->stats.rx_errors = stats->erpr;
921 922 923
	ndev->stats.rx_dropped = stats->dpc;
	ndev->stats.tx_packets = stats->dma_pkt_tc;
	ndev->stats.tx_bytes = stats->dma_oct_tc;
924
	ndev->stats.tx_errors = stats->erpt;
925
	ndev->stats.multicast = stats->mprc;
926 927
}

928 929
void aq_nic_get_link_ksettings(struct aq_nic_s *self,
			       struct ethtool_link_ksettings *cmd)
930
{
931 932 933 934
	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
		cmd->base.port = PORT_FIBRE;
	else
		cmd->base.port = PORT_TP;
935
	/* This driver supports only 10G capable adapters, so DUPLEX_FULL */
936 937 938
	cmd->base.duplex = DUPLEX_FULL;
	cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;

939 940
	ethtool_link_ksettings_zero_link_mode(cmd, supported);

941
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
942 943 944
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     10000baseT_Full);

945
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
946 947 948
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     5000baseT_Full);

949
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2G5)
950 951 952
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     2500baseT_Full);

953
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
954 955 956
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     1000baseT_Full);

957
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
958 959 960
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     100baseT_Full);

961 962 963 964
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M)
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     10baseT_Full);

965
	if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
966 967
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     Pause);
968 969 970
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     Asym_Pause);
	}
971 972

	ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
973 974 975 976 977

	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
	else
		ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
978 979 980 981 982 983 984 985 986 987 988 989 990 991

	ethtool_link_ksettings_zero_link_mode(cmd, advertising);

	if (self->aq_nic_cfg.is_autoneg)
		ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);

	if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_10G)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     10000baseT_Full);

	if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_5G)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     5000baseT_Full);

992
	if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_2G5)
993 994 995 996 997 998 999 1000 1001 1002 1003
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     2500baseT_Full);

	if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_1G)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     1000baseT_Full);

	if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_100M)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     100baseT_Full);

1004 1005 1006 1007
	if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_10M)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     10baseT_Full);

1008
	if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
1009 1010 1011
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     Pause);

1012
	/* Asym is when either RX or TX, but not both */
1013 1014
	if (!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_TX) ^
	    !!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX))
1015 1016 1017
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     Asym_Pause);

1018 1019 1020 1021
	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
		ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
	else
		ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
1022 1023
}

1024 1025
int aq_nic_set_link_ksettings(struct aq_nic_s *self,
			      const struct ethtool_link_ksettings *cmd)
1026 1027 1028 1029 1030
{
	u32 speed = 0U;
	u32 rate = 0U;
	int err = 0;

1031
	if (cmd->base.autoneg == AUTONEG_ENABLE) {
1032
		rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
1033 1034
		self->aq_nic_cfg.is_autoneg = true;
	} else {
1035
		speed = cmd->base.speed;
1036 1037

		switch (speed) {
1038 1039 1040 1041
		case SPEED_10:
			rate = AQ_NIC_RATE_10M;
			break;

1042 1043 1044 1045 1046 1047 1048 1049 1050
		case SPEED_100:
			rate = AQ_NIC_RATE_100M;
			break;

		case SPEED_1000:
			rate = AQ_NIC_RATE_1G;
			break;

		case SPEED_2500:
1051
			rate = AQ_NIC_RATE_2G5;
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
			break;

		case SPEED_5000:
			rate = AQ_NIC_RATE_5G;
			break;

		case SPEED_10000:
			rate = AQ_NIC_RATE_10G;
			break;

		default:
			err = -1;
			goto err_exit;
		break;
		}
1067
		if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
1068 1069 1070 1071 1072 1073 1074
			err = -1;
			goto err_exit;
		}

		self->aq_nic_cfg.is_autoneg = false;
	}

1075
	mutex_lock(&self->fwreq_mutex);
1076
	err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
1077
	mutex_unlock(&self->fwreq_mutex);
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
	if (err < 0)
		goto err_exit;

	self->aq_nic_cfg.link_speed_msk = rate;

err_exit:
	return err;
}

struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
{
	return &self->aq_nic_cfg;
}

u32 aq_nic_get_fw_version(struct aq_nic_s *self)
{
1094
	return self->aq_hw_ops->hw_get_fw_version(self->aq_hw);
1095 1096
}

1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
int aq_nic_set_loopback(struct aq_nic_s *self)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;

	if (!self->aq_hw_ops->hw_set_loopback ||
	    !self->aq_fw_ops->set_phyloopback)
		return -ENOTSUPP;

	mutex_lock(&self->fwreq_mutex);
	self->aq_hw_ops->hw_set_loopback(self->aq_hw,
					 AQ_HW_LOOPBACK_DMA_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_DMA_SYS)));

	self->aq_hw_ops->hw_set_loopback(self->aq_hw,
					 AQ_HW_LOOPBACK_PKT_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_PKT_SYS)));

	self->aq_hw_ops->hw_set_loopback(self->aq_hw,
					 AQ_HW_LOOPBACK_DMA_NET,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_DMA_NET)));

	self->aq_fw_ops->set_phyloopback(self->aq_hw,
					 AQ_HW_LOOPBACK_PHYINT_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_PHYINT_SYS)));

	self->aq_fw_ops->set_phyloopback(self->aq_hw,
					 AQ_HW_LOOPBACK_PHYEXT_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_PHYEXT_SYS)));
	mutex_unlock(&self->fwreq_mutex);

	return 0;
}

1135 1136 1137 1138 1139
int aq_nic_stop(struct aq_nic_s *self)
{
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;

I
Igor Russkikh 已提交
1140
	netif_tx_disable(self->ndev);
1141
	netif_carrier_off(self->ndev);
1142 1143

	del_timer_sync(&self->service_timer);
1144
	cancel_work_sync(&self->service_task);
1145

1146
	self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
1147 1148 1149 1150

	if (self->aq_nic_cfg.is_polling)
		del_timer_sync(&self->polling_timer);
	else
1151
		aq_pci_func_free_irqs(self);
1152

1153 1154
	aq_ptp_irq_free(self);

1155 1156 1157 1158
	for (i = 0U, aq_vec = self->aq_vec[0];
		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
		aq_vec_stop(aq_vec);

1159 1160
	aq_ptp_ring_stop(self);

1161
	return self->aq_hw_ops->hw_stop(self->aq_hw);
1162 1163
}

1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
void aq_nic_set_power(struct aq_nic_s *self)
{
	if (self->power_state != AQ_HW_POWER_STATE_D0 ||
	    self->aq_hw->aq_nic_cfg->wol)
		if (likely(self->aq_fw_ops->set_power)) {
			mutex_lock(&self->fwreq_mutex);
			self->aq_fw_ops->set_power(self->aq_hw,
						   self->power_state,
						   self->ndev->dev_addr);
			mutex_unlock(&self->fwreq_mutex);
		}
}

void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
1178 1179 1180 1181 1182 1183 1184
{
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;

	if (!self)
		goto err_exit;

1185 1186
	for (i = 0U; i < self->aq_vecs; i++) {
		aq_vec = self->aq_vec[i];
1187
		aq_vec_deinit(aq_vec);
1188 1189
		aq_vec_ring_free(aq_vec);
	}
1190

1191
	aq_ptp_unregister(self);
1192 1193
	aq_ptp_ring_deinit(self);
	aq_ptp_ring_free(self);
1194 1195
	aq_ptp_free(self);

1196
	if (likely(self->aq_fw_ops->deinit) && link_down) {
1197 1198 1199 1200
		mutex_lock(&self->fwreq_mutex);
		self->aq_fw_ops->deinit(self->aq_hw);
		mutex_unlock(&self->fwreq_mutex);
	}
1201

1202 1203 1204
err_exit:;
}

1205
void aq_nic_free_vectors(struct aq_nic_s *self)
1206 1207 1208 1209 1210 1211
{
	unsigned int i = 0U;

	if (!self)
		goto err_exit;

1212
	for (i = ARRAY_SIZE(self->aq_vec); i--;) {
1213
		if (self->aq_vec[i]) {
1214
			aq_vec_free(self->aq_vec[i]);
1215 1216
			self->aq_vec[i] = NULL;
		}
1217 1218 1219 1220 1221
	}

err_exit:;
}

1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
void aq_nic_shutdown(struct aq_nic_s *self)
{
	int err = 0;

	if (!self->ndev)
		return;

	rtnl_lock();

	netif_device_detach(self->ndev);

1233 1234 1235 1236 1237
	if (netif_running(self->ndev)) {
		err = aq_nic_stop(self);
		if (err < 0)
			goto err_exit;
	}
1238 1239
	aq_nic_deinit(self, !self->aq_hw->aq_nic_cfg->wol);
	aq_nic_set_power(self);
1240 1241 1242

err_exit:
	rtnl_unlock();
1243
}
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286

u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type)
{
	u8 location = 0xFF;
	u32 fltr_cnt;
	u32 n_bit;

	switch (type) {
	case aq_rx_filter_ethertype:
		location = AQ_RX_LAST_LOC_FETHERT - AQ_RX_FIRST_LOC_FETHERT -
			   self->aq_hw_rx_fltrs.fet_reserved_count;
		self->aq_hw_rx_fltrs.fet_reserved_count++;
		break;
	case aq_rx_filter_l3l4:
		fltr_cnt = AQ_RX_LAST_LOC_FL3L4 - AQ_RX_FIRST_LOC_FL3L4;
		n_bit = fltr_cnt - self->aq_hw_rx_fltrs.fl3l4.reserved_count;

		self->aq_hw_rx_fltrs.fl3l4.active_ipv4 |= BIT(n_bit);
		self->aq_hw_rx_fltrs.fl3l4.reserved_count++;
		location = n_bit;
		break;
	default:
		break;
	}

	return location;
}

void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
			   u32 location)
{
	switch (type) {
	case aq_rx_filter_ethertype:
		self->aq_hw_rx_fltrs.fet_reserved_count--;
		break;
	case aq_rx_filter_l3l4:
		self->aq_hw_rx_fltrs.fl3l4.reserved_count--;
		self->aq_hw_rx_fltrs.fl3l4.active_ipv4 &= ~BIT(location);
		break;
	default:
		break;
	}
}
1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326

int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
	bool ndev_running;
	int err = 0;
	int i;

	/* if already the same configuration or
	 * disable request (tcs is 0) and we already is disabled
	 */
	if (tcs == cfg->tcs || (tcs == 0 && !cfg->is_qos))
		return 0;

	ndev_running = netif_running(self->ndev);
	if (ndev_running)
		dev_close(self->ndev);

	cfg->tcs = tcs;
	if (cfg->tcs == 0)
		cfg->tcs = 1;
	if (prio_tc_map)
		memcpy(cfg->prio_tc_map, prio_tc_map, sizeof(cfg->prio_tc_map));
	else
		for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
			cfg->prio_tc_map[i] = cfg->tcs * i / 8;

	cfg->is_qos = (tcs != 0 ? true : false);
	cfg->is_ptp = (cfg->tcs <= AQ_HW_PTP_TC);
	if (!cfg->is_ptp)
		netdev_warn(self->ndev, "%s\n",
			    "PTP is auto disabled due to requested TC count.");

	netdev_set_num_tc(self->ndev, cfg->tcs);

	if (ndev_running)
		err = dev_open(self->ndev, NULL);

	return err;
}
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346

int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
			     const u32 max_rate)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;

	if (tc >= AQ_CFG_TCS_MAX)
		return -EINVAL;

	if (max_rate && max_rate < 10) {
		netdev_warn(self->ndev,
			"Setting %s to the minimum usable value of %dMbps.\n",
			"max rate", 10);
		cfg->tc_max_rate[tc] = 10;
	} else {
		cfg->tc_max_rate[tc] = max_rate;
	}

	return 0;
}