aq_nic.c 34.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
/* Atlantic Network Driver
 *
 * Copyright (C) 2014-2019 aQuantia Corporation
 * Copyright (C) 2019-2020 Marvell International Ltd.
6 7 8 9 10 11 12 13 14
 */

/* File aq_nic.c: Definition of common code for NIC. */

#include "aq_nic.h"
#include "aq_ring.h"
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
15
#include "aq_macsec.h"
16
#include "aq_main.h"
17
#include "aq_phy.h"
18
#include "aq_ptp.h"
19
#include "aq_filters.h"
20

21
#include <linux/moduleparam.h>
22 23 24 25 26 27 28
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/timer.h>
#include <linux/cpu.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <net/ip.h>
29
#include <net/pkt_cls.h>
30

31 32 33 34 35 36 37 38 39 40 41 42
static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
module_param_named(aq_itr, aq_itr, uint, 0644);
MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");

static unsigned int aq_itr_tx;
module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");

static unsigned int aq_itr_rx;
module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");

43 44
static void aq_nic_update_ndev_stats(struct aq_nic_s *self);

45 46
static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
{
47
	static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
48 49 50 51 52 53
		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
	};
N
Nikita Danilov 已提交
54 55 56 57 58
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
	struct aq_rss_parameters *rss_params;
	int i = 0;

	rss_params = &cfg->aq_rss;
59 60 61 62 63 64 65 66 67

	rss_params->hash_secret_key_size = sizeof(rss_key);
	memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
	rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;

	for (i = rss_params->indirection_table_size; i--;)
		rss_params->indirection_table[i] = i & (num_rss_queues - 1);
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
/* Recalculate the number of vectors */
static void aq_nic_cfg_update_num_vecs(struct aq_nic_s *self)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;

	cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
	cfg->vecs = min(cfg->vecs, num_online_cpus());
	if (self->irqvecs > AQ_HW_SERVICE_IRQS)
		cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
	/* cfg->vecs should be power of 2 for RSS */
	cfg->vecs = rounddown_pow_of_two(cfg->vecs);

	if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
		if (cfg->tcs > 2)
			cfg->vecs = min(cfg->vecs, 4U);
	}

	if (cfg->vecs <= 4)
		cfg->tc_mode = AQ_TC_MODE_8TCS;
	else
		cfg->tc_mode = AQ_TC_MODE_4TCS;

	/*rss rings */
	cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
	aq_nic_rss_init(self, cfg->num_rss_queues);
}

95 96
/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
void aq_nic_cfg_start(struct aq_nic_s *self)
97 98
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
99
	int i;
100 101 102 103 104

	cfg->tcs = AQ_CFG_TCS_DEF;

	cfg->is_polling = AQ_CFG_IS_POLLING_DEF;

105 106 107
	cfg->itr = aq_itr;
	cfg->tx_itr = aq_itr_tx;
	cfg->rx_itr = aq_itr_rx;
108

109
	cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
110 111
	cfg->is_rss = AQ_CFG_IS_RSS_DEF;
	cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
112
	cfg->fc.req = AQ_CFG_FC_MODE;
113
	cfg->wol = AQ_CFG_WOL_MODES;
114 115 116 117 118 119

	cfg->mtu = AQ_CFG_MTU_DEF;
	cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
	cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;

	cfg->is_lro = AQ_CFG_IS_LRO_DEF;
120
	cfg->is_ptp = true;
121 122

	/*descriptors */
123 124
	cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
	cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
125

126
	aq_nic_cfg_update_num_vecs(self);
127

128
	cfg->irq_type = aq_pci_func_get_irq_type(self);
129 130

	if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
131
	    (cfg->aq_hw_caps->vecs == 1U) ||
132 133 134 135 136
	    (cfg->vecs == 1U)) {
		cfg->is_rss = 0U;
		cfg->vecs = 1U;
	}

137 138 139 140 141 142 143 144 145
	/* Check if we have enough vectors allocated for
	 * link status IRQ. If no - we'll know link state from
	 * slower service task.
	 */
	if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
		cfg->link_irq_vec = cfg->vecs;
	else
		cfg->link_irq_vec = 0;

146
	cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
147
	cfg->features = cfg->aq_hw_caps->hw_features;
148 149
	cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
	cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
150
	cfg->is_vlan_force_promisc = true;
151 152 153

	for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
		cfg->prio_tc_map[i] = cfg->tcs * i / 8;
154 155
}

I
Igor Russkikh 已提交
156 157
static int aq_nic_update_link_status(struct aq_nic_s *self)
{
158
	int err = self->aq_fw_ops->update_link_status(self->aq_hw);
159
	u32 fc = 0;
I
Igor Russkikh 已提交
160 161 162 163

	if (err)
		return err;

164 165 166 167
	if (self->aq_fw_ops->get_flow_control)
		self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
	self->aq_nic_cfg.fc.cur = fc;

168
	if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
169 170 171
		netdev_info(self->ndev, "%s: link change old %d new %d\n",
			    AQ_CFG_DRV_NAME, self->link_status.mbps,
			    self->aq_hw->aq_link_status.mbps);
172
		aq_nic_update_interrupt_moderation_settings(self);
173

174
		if (self->aq_ptp) {
175
			aq_ptp_clock_init(self);
176 177
			aq_ptp_tm_offset_set(self,
					     self->aq_hw->aq_link_status.mbps);
178
			aq_ptp_link_change(self);
179
		}
180

181 182 183 184 185 186
		/* Driver has to update flow control settings on RX block
		 * on any link event.
		 * We should query FW whether it negotiated FC.
		 */
		if (self->aq_hw_ops->hw_set_fc)
			self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
187
	}
I
Igor Russkikh 已提交
188 189 190

	self->link_status = self->aq_hw->aq_link_status;
	if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
191
		aq_utils_obj_set(&self->flags,
I
Igor Russkikh 已提交
192
				 AQ_NIC_FLAG_STARTED);
193
		aq_utils_obj_clear(&self->flags,
I
Igor Russkikh 已提交
194 195
				   AQ_NIC_LINK_DOWN);
		netif_carrier_on(self->ndev);
196 197 198
#if IS_ENABLED(CONFIG_MACSEC)
		aq_macsec_enable(self);
#endif
199 200 201
		if (self->aq_hw_ops->hw_tc_rate_limit_set)
			self->aq_hw_ops->hw_tc_rate_limit_set(self->aq_hw);

I
Igor Russkikh 已提交
202 203 204 205 206
		netif_tx_wake_all_queues(self->ndev);
	}
	if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
		netif_carrier_off(self->ndev);
		netif_tx_disable(self->ndev);
207
		aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
I
Igor Russkikh 已提交
208
	}
N
Nikita Danilov 已提交
209

I
Igor Russkikh 已提交
210 211 212
	return 0;
}

213 214 215 216 217 218 219 220 221 222 223
static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
{
	struct aq_nic_s *self = private;

	if (!self)
		return IRQ_NONE;

	aq_nic_update_link_status(self);

	self->aq_hw_ops->hw_irq_enable(self->aq_hw,
				       BIT(self->aq_nic_cfg.link_irq_vec));
N
Nikita Danilov 已提交
224

225 226 227
	return IRQ_HANDLED;
}

228
static void aq_nic_service_task(struct work_struct *work)
229
{
230 231 232
	struct aq_nic_s *self = container_of(work, struct aq_nic_s,
					     service_task);
	int err;
233

234 235
	aq_ptp_service_task(self);

236
	if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
237
		return;
238

I
Igor Russkikh 已提交
239 240
	err = aq_nic_update_link_status(self);
	if (err)
241
		return;
242

243 244 245 246
#if IS_ENABLED(CONFIG_MACSEC)
	aq_macsec_work(self);
#endif

247
	mutex_lock(&self->fwreq_mutex);
248 249
	if (self->aq_fw_ops->update_stats)
		self->aq_fw_ops->update_stats(self->aq_hw);
250
	mutex_unlock(&self->fwreq_mutex);
251

252
	aq_nic_update_ndev_stats(self);
253 254 255 256 257
}

static void aq_nic_service_timer_cb(struct timer_list *t)
{
	struct aq_nic_s *self = from_timer(self, t, service_timer);
258

N
Nikita Danilov 已提交
259 260
	mod_timer(&self->service_timer,
		  jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
261 262

	aq_ndev_schedule_work(&self->service_task);
263 264
}

265
static void aq_nic_polling_timer_cb(struct timer_list *t)
266
{
267
	struct aq_nic_s *self = from_timer(self, t, polling_timer);
268 269 270 271 272 273 274 275
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;

	for (i = 0U, aq_vec = self->aq_vec[0];
		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
		aq_vec_isr(i, (void *)aq_vec);

	mod_timer(&self->polling_timer, jiffies +
276
		  AQ_CFG_POLLING_TIMER_INTERVAL);
277 278
}

279 280 281 282 283 284 285 286 287 288 289 290 291 292
static int aq_nic_hw_prepare(struct aq_nic_s *self)
{
	int err = 0;

	err = self->aq_hw_ops->hw_soft_reset(self->aq_hw);
	if (err)
		goto exit;

	err = self->aq_hw_ops->hw_prepare(self->aq_hw, &self->aq_fw_ops);

exit:
	return err;
}

293 294 295 296 297 298 299 300
static bool aq_nic_is_valid_ether_addr(const u8 *addr)
{
	/* Some engineering samples of Aquantia NICs are provisioned with a
	 * partially populated MAC, which is still invalid.
	 */
	return !(addr[0] == 0 && addr[1] == 0 && addr[2] == 0);
}

301 302 303 304 305 306 307 308
int aq_nic_ndev_register(struct aq_nic_s *self)
{
	int err = 0;

	if (!self->ndev) {
		err = -EINVAL;
		goto err_exit;
	}
309

310
	err = aq_nic_hw_prepare(self);
311 312 313
	if (err)
		goto err_exit;

314 315 316 317
#if IS_ENABLED(CONFIG_MACSEC)
	aq_macsec_init(self);
#endif

318
	mutex_lock(&self->fwreq_mutex);
319
	err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
320
			    self->ndev->dev_addr);
321
	mutex_unlock(&self->fwreq_mutex);
322
	if (err)
323 324
		goto err_exit;

325 326 327 328 329 330
	if (!is_valid_ether_addr(self->ndev->dev_addr) ||
	    !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
		netdev_warn(self->ndev, "MAC is invalid, will use random.");
		eth_hw_addr_random(self->ndev);
	}

331 332 333 334 335 336 337 338
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
	{
		static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;

		ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
	}
#endif

339 340 341 342 343 344 345 346 347 348
	for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
	     self->aq_vecs++) {
		self->aq_vec[self->aq_vecs] =
		    aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
		if (!self->aq_vec[self->aq_vecs]) {
			err = -ENOMEM;
			goto err_exit;
		}
	}

349 350
	netif_carrier_off(self->ndev);

I
Igor Russkikh 已提交
351
	netif_tx_disable(self->ndev);
352

353
	err = register_netdev(self->ndev);
354
	if (err)
355 356
		goto err_exit;

357
err_exit:
358 359 360 361
#if IS_ENABLED(CONFIG_MACSEC)
	if (err)
		aq_macsec_free(self);
#endif
362 363 364
	return err;
}

365
void aq_nic_ndev_init(struct aq_nic_s *self)
366
{
367
	const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
368 369 370 371
	struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;

	self->ndev->hw_features |= aq_hw_caps->hw_features;
	self->ndev->features = aq_hw_caps->hw_features;
372
	self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
373 374
				     NETIF_F_RXHASH | NETIF_F_SG |
				     NETIF_F_LRO | NETIF_F_TSO;
375
	self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4;
376
	self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
377 378
	self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;

379
	self->msg_enable = NETIF_MSG_DRV | NETIF_MSG_LINK;
380
	self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
381
	self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399

}

void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
			struct aq_ring_s *ring)
{
	self->aq_ring_tx[idx] = ring;
}

struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
{
	return self->ndev;
}

int aq_nic_init(struct aq_nic_s *self)
{
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;
N
Nikita Danilov 已提交
400
	int err = 0;
401 402

	self->power_state = AQ_HW_POWER_STATE_D0;
403
	mutex_lock(&self->fwreq_mutex);
404
	err = self->aq_hw_ops->hw_reset(self->aq_hw);
405
	mutex_unlock(&self->fwreq_mutex);
406 407 408
	if (err < 0)
		goto err_exit;

409
	err = self->aq_hw_ops->hw_init(self->aq_hw,
410
				       aq_nic_get_ndev(self)->dev_addr);
411 412 413
	if (err < 0)
		goto err_exit;

414 415
	if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ATLANTIC) &&
	    self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
416 417 418 419
		self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
		err = aq_phy_init(self->aq_hw);
	}

420 421 422 423 424 425 426
	for (i = 0U; i < self->aq_vecs; i++) {
		aq_vec = self->aq_vec[i];
		err = aq_vec_ring_alloc(aq_vec, self, i,
					aq_nic_get_cfg(self));
		if (err)
			goto err_exit;

427
		aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
428
	}
429

430 431 432 433
	if (aq_nic_get_cfg(self)->is_ptp) {
		err = aq_ptp_init(self, self->irqvecs - 1);
		if (err < 0)
			goto err_exit;
434

435 436 437
		err = aq_ptp_ring_alloc(self);
		if (err < 0)
			goto err_exit;
438

439 440 441 442
		err = aq_ptp_ring_init(self);
		if (err < 0)
			goto err_exit;
	}
443

444 445
	netif_carrier_off(self->ndev);

446 447 448 449 450 451 452
err_exit:
	return err;
}

int aq_nic_start(struct aq_nic_s *self)
{
	struct aq_vec_s *aq_vec = NULL;
453
	struct aq_nic_cfg_s *cfg;
454
	unsigned int i = 0U;
N
Nikita Danilov 已提交
455
	int err = 0;
456

457 458
	cfg = aq_nic_get_cfg(self);

459
	err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
460 461
						     self->mc_list.ar,
						     self->mc_list.count);
462 463 464
	if (err < 0)
		goto err_exit;

465
	err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
466
						    self->packet_filter);
467 468 469 470 471 472 473 474 475 476
	if (err < 0)
		goto err_exit;

	for (i = 0U, aq_vec = self->aq_vec[0];
		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
		err = aq_vec_start(aq_vec);
		if (err < 0)
			goto err_exit;
	}

477 478 479 480
	err = aq_ptp_ring_start(self);
	if (err < 0)
		goto err_exit;

481 482
	aq_nic_set_loopback(self);

483
	err = self->aq_hw_ops->hw_start(self->aq_hw);
484 485 486
	if (err < 0)
		goto err_exit;

487 488
	err = aq_nic_update_interrupt_moderation_settings(self);
	if (err)
489
		goto err_exit;
490 491 492

	INIT_WORK(&self->service_task, aq_nic_service_task);

493
	timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
494
	aq_nic_service_timer_cb(&self->service_timer);
495

496
	if (cfg->is_polling) {
497
		timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
498 499 500 501 502
		mod_timer(&self->polling_timer, jiffies +
			  AQ_CFG_POLLING_TIMER_INTERVAL);
	} else {
		for (i = 0U, aq_vec = self->aq_vec[0];
			self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
503 504
			err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
						    aq_vec_isr, aq_vec,
505
						    aq_vec_get_affinity_mask(aq_vec));
506 507 508 509
			if (err < 0)
				goto err_exit;
		}

510 511 512 513
		err = aq_ptp_irq_alloc(self);
		if (err < 0)
			goto err_exit;

514
		if (cfg->link_irq_vec) {
515
			int irqvec = pci_irq_vector(self->pdev,
516
						    cfg->link_irq_vec);
517 518
			err = request_threaded_irq(irqvec, NULL,
						   aq_linkstate_threaded_isr,
519
						   IRQF_SHARED | IRQF_ONESHOT,
520 521 522
						   self->ndev->name, self);
			if (err < 0)
				goto err_exit;
523
			self->msix_entry_mask |= (1 << cfg->link_irq_vec);
524 525
		}

526
		err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
527
						     AQ_CFG_IRQ_MASK);
528 529 530 531
		if (err < 0)
			goto err_exit;
	}

532 533
	err = netif_set_real_num_tx_queues(self->ndev,
					   self->aq_vecs * cfg->tcs);
534 535 536
	if (err < 0)
		goto err_exit;

537 538
	err = netif_set_real_num_rx_queues(self->ndev,
					   self->aq_vecs * cfg->tcs);
539 540 541
	if (err < 0)
		goto err_exit;

542 543 544 545 546
	for (i = 0; i < cfg->tcs; i++) {
		u16 offset = self->aq_vecs * i;

		netdev_set_tc_queue(self->ndev, i, self->aq_vecs, offset);
	}
I
Igor Russkikh 已提交
547 548
	netif_tx_start_all_queues(self->ndev);

549 550 551 552
err_exit:
	return err;
}

553 554
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
			    struct aq_ring_s *ring)
555 556
{
	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
557 558
	struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
	struct device *dev = aq_nic_get_dev(self);
P
Pavel Belous 已提交
559
	struct aq_ring_buff_s *first = NULL;
560
	u8 ipver = ip_hdr(skb)->version;
N
Nikita Danilov 已提交
561
	struct aq_ring_buff_s *dx_buff;
562
	bool need_context_tag = false;
N
Nikita Danilov 已提交
563 564 565
	unsigned int frag_count = 0U;
	unsigned int ret = 0U;
	unsigned int dx;
566 567 568 569 570 571
	u8 l4proto = 0;

	if (ipver == 4)
		l4proto = ip_hdr(skb)->protocol;
	else if (ipver == 6)
		l4proto = ipv6_hdr(skb)->nexthdr;
572

N
Nikita Danilov 已提交
573 574
	dx = ring->sw_tail;
	dx_buff = &ring->buff_ring[dx];
575
	dx_buff->flags = 0U;
576

577
	if (unlikely(skb_is_gso(skb))) {
578
		dx_buff->mss = skb_shinfo(skb)->gso_size;
579 580 581 582 583 584 585 586 587 588 589 590 591
		if (l4proto == IPPROTO_TCP) {
			dx_buff->is_gso_tcp = 1U;
			dx_buff->len_l4 = tcp_hdrlen(skb);
		} else if (l4proto == IPPROTO_UDP) {
			dx_buff->is_gso_udp = 1U;
			dx_buff->len_l4 = sizeof(struct udphdr);
			/* UDP GSO Hardware does not replace packet length. */
			udp_hdr(skb)->len = htons(dx_buff->mss +
						  dx_buff->len_l4);
		} else {
			WARN_ONCE(true, "Bad GSO mode");
			goto exit;
		}
592 593
		dx_buff->len_pkt = skb->len;
		dx_buff->len_l2 = ETH_HLEN;
594
		dx_buff->len_l3 = skb_network_header_len(skb);
P
Pavel Belous 已提交
595
		dx_buff->eop_index = 0xffffU;
596
		dx_buff->is_ipv6 = (ipver == 6);
597 598
		need_context_tag = true;
	}
599

600
	if (cfg->is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
601 602 603 604 605 606 607
		dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
		dx_buff->len_pkt = skb->len;
		dx_buff->is_vlan = 1U;
		need_context_tag = true;
	}

	if (need_context_tag) {
608 609
		dx = aq_ring_next_dx(ring, dx);
		dx_buff = &ring->buff_ring[dx];
610
		dx_buff->flags = 0U;
611 612 613 614
		++ret;
	}

	dx_buff->len = skb_headlen(skb);
615
	dx_buff->pa = dma_map_single(dev,
616 617 618
				     skb->data,
				     dx_buff->len,
				     DMA_TO_DEVICE);
619

620
	if (unlikely(dma_mapping_error(dev, dx_buff->pa))) {
621
		ret = 0;
622
		goto exit;
623
	}
624

P
Pavel Belous 已提交
625
	first = dx_buff;
626 627 628
	dx_buff->len_pkt = skb->len;
	dx_buff->is_sop = 1U;
	dx_buff->is_mapped = 1U;
629 630 631
	++ret;

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
632 633 634
		dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol);
		dx_buff->is_tcp_cso = (l4proto == IPPROTO_TCP);
		dx_buff->is_udp_cso = (l4proto == IPPROTO_UDP);
635 636 637
	}

	for (; nr_frags--; ++frag_count) {
638
		unsigned int frag_len = 0U;
P
Pavel Belous 已提交
639 640
		unsigned int buff_offset = 0U;
		unsigned int buff_size = 0U;
641 642 643 644 645
		dma_addr_t frag_pa;
		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];

		frag_len = skb_frag_size(frag);

P
Pavel Belous 已提交
646 647 648 649 650 651
		while (frag_len) {
			if (frag_len > AQ_CFG_TX_FRAME_MAX)
				buff_size = AQ_CFG_TX_FRAME_MAX;
			else
				buff_size = frag_len;

652
			frag_pa = skb_frag_dma_map(dev,
P
Pavel Belous 已提交
653 654 655 656 657
						   frag,
						   buff_offset,
						   buff_size,
						   DMA_TO_DEVICE);

658
			if (unlikely(dma_mapping_error(dev,
P
Pavel Belous 已提交
659 660
						       frag_pa)))
				goto mapping_error;
661 662 663 664 665

			dx = aq_ring_next_dx(ring, dx);
			dx_buff = &ring->buff_ring[dx];

			dx_buff->flags = 0U;
P
Pavel Belous 已提交
666
			dx_buff->len = buff_size;
667 668
			dx_buff->pa = frag_pa;
			dx_buff->is_mapped = 1U;
P
Pavel Belous 已提交
669 670 671 672
			dx_buff->eop_index = 0xffffU;

			frag_len -= buff_size;
			buff_offset += buff_size;
673

674
			++ret;
675 676 677
		}
	}

P
Pavel Belous 已提交
678
	first->eop_index = dx;
679 680 681 682 683 684 685 686 687 688
	dx_buff->is_eop = 1U;
	dx_buff->skb = skb;
	goto exit;

mapping_error:
	for (dx = ring->sw_tail;
	     ret > 0;
	     --ret, dx = aq_ring_next_dx(ring, dx)) {
		dx_buff = &ring->buff_ring[dx];

689 690
		if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) &&
		    !dx_buff->is_vlan && dx_buff->pa) {
691
			if (unlikely(dx_buff->is_sop)) {
692
				dma_unmap_single(dev,
693 694 695 696
						 dx_buff->pa,
						 dx_buff->len,
						 DMA_TO_DEVICE);
			} else {
697
				dma_unmap_page(dev,
698 699 700 701 702
					       dx_buff->pa,
					       dx_buff->len,
					       DMA_TO_DEVICE);
			}
		}
703 704
	}

705
exit:
706 707 708 709 710
	return ret;
}

int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
711 712 713
	struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
	unsigned int vec = skb->queue_mapping % cfg->vecs;
	unsigned int tc = skb->queue_mapping / cfg->vecs;
714 715
	struct aq_ring_s *ring = NULL;
	unsigned int frags = 0U;
716
	int err = NETDEV_TX_OK;
717 718 719

	frags = skb_shinfo(skb)->nr_frags + 1;

720
	ring = self->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(cfg, tc, vec)];
721 722 723 724 725 726

	if (frags > AQ_CFG_SKB_FRAGS_MAX) {
		dev_kfree_skb_any(skb);
		goto err_exit;
	}

I
Igor Russkikh 已提交
727
	aq_ring_update_queue_state(ring);
728

729
	if (cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
730 731 732 733
		err = NETDEV_TX_BUSY;
		goto err_exit;
	}

I
Igor Russkikh 已提交
734
	/* Above status update may stop the queue. Check this. */
735 736
	if (__netif_subqueue_stopped(self->ndev,
				     AQ_NIC_RING2QMAP(self, ring->idx))) {
737 738 739 740
		err = NETDEV_TX_BUSY;
		goto err_exit;
	}

741
	frags = aq_nic_map_skb(self, skb, ring);
742

743
	if (likely(frags)) {
744
		err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
745
						       ring, frags);
746
	} else {
747 748 749 750 751 752 753
		err = NETDEV_TX_BUSY;
	}

err_exit:
	return err;
}

754 755
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
{
756
	return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
757 758
}

759 760 761 762
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
{
	int err = 0;

763
	err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
764 765 766 767 768 769 770 771 772 773 774
	if (err < 0)
		goto err_exit;

	self->packet_filter = flags;

err_exit:
	return err;
}

int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
{
775 776 777
	const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
	unsigned int packet_filter = ndev->flags;
778 779
	struct netdev_hw_addr *ha = NULL;
	unsigned int i = 0U;
780
	int err = 0;
781

782 783 784 785 786 787 788
	self->mc_list.count = 0;
	if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
		packet_filter |= IFF_PROMISC;
	} else {
		netdev_for_each_uc_addr(ha, ndev) {
			ether_addr_copy(self->mc_list.ar[i++], ha->addr);
		}
789 790
	}

791 792 793 794 795 796 797 798 799
	cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
	if (cfg->is_mc_list_enabled) {
		if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
			packet_filter |= IFF_ALLMULTI;
		} else {
			netdev_for_each_mc_addr(ha, ndev) {
				ether_addr_copy(self->mc_list.ar[i++],
						ha->addr);
			}
800 801 802
		}
	}

803
	if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
804
		self->mc_list.count = i;
805 806 807
		err = hw_ops->hw_multicast_list_set(self->aq_hw,
						    self->mc_list.ar,
						    self->mc_list.count);
808 809
		if (err < 0)
			return err;
810
	}
N
Nikita Danilov 已提交
811

812
	return aq_nic_set_packet_filter(self, packet_filter);
813 814 815 816 817 818
}

int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
{
	self->aq_nic_cfg.mtu = new_mtu;

819
	return 0;
820 821 822 823
}

int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
{
824
	return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
825 826 827 828 829 830 831 832 833 834 835 836
}

unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
{
	return self->link_status.mbps;
}

int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
{
	u32 *regs_buff = p;
	int err = 0;

837 838 839
	if (unlikely(!self->aq_hw_ops->hw_get_regs))
		return -EOPNOTSUPP;

840 841
	regs->version = 1;

842 843 844
	err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
					   self->aq_nic_cfg.aq_hw_caps,
					   regs_buff);
845 846 847 848 849 850 851 852 853
	if (err < 0)
		goto err_exit;

err_exit:
	return err;
}

int aq_nic_get_regs_count(struct aq_nic_s *self)
{
854 855 856
	if (unlikely(!self->aq_hw_ops->hw_get_regs))
		return 0;

857
	return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
858 859
}

860
u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
861
{
862
	struct aq_vec_s *aq_vec = NULL;
863
	struct aq_stats_s *stats;
N
Nikita Danilov 已提交
864 865
	unsigned int count = 0U;
	unsigned int i = 0U;
866
	unsigned int tc;
867 868 869 870 871 872 873

	if (self->aq_fw_ops->update_stats) {
		mutex_lock(&self->fwreq_mutex);
		self->aq_fw_ops->update_stats(self->aq_hw);
		mutex_unlock(&self->fwreq_mutex);
	}
	stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
874

875
	if (!stats)
876 877
		goto err_exit;

878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
	data[i] = stats->uprc + stats->mprc + stats->bprc;
	data[++i] = stats->uprc;
	data[++i] = stats->mprc;
	data[++i] = stats->bprc;
	data[++i] = stats->erpt;
	data[++i] = stats->uptc + stats->mptc + stats->bptc;
	data[++i] = stats->uptc;
	data[++i] = stats->mptc;
	data[++i] = stats->bptc;
	data[++i] = stats->ubrc;
	data[++i] = stats->ubtc;
	data[++i] = stats->mbrc;
	data[++i] = stats->mbtc;
	data[++i] = stats->bbrc;
	data[++i] = stats->bbtc;
	data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
	data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
	data[++i] = stats->dma_pkt_rc;
	data[++i] = stats->dma_pkt_tc;
	data[++i] = stats->dma_oct_rc;
	data[++i] = stats->dma_oct_tc;
	data[++i] = stats->dpc;

	i++;

	data += i;
904

905 906 907 908 909 910 911
	for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
		for (i = 0U, aq_vec = self->aq_vec[0];
		     aq_vec && self->aq_vecs > i;
		     ++i, aq_vec = self->aq_vec[i]) {
			data += count;
			aq_vec_get_sw_stats(aq_vec, tc, data, &count);
		}
912 913
	}

914 915
	data += count;

916
err_exit:;
917
	return data;
918 919
}

920 921
static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
{
922
	struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
N
Nikita Danilov 已提交
923
	struct net_device *ndev = self->ndev;
924

925 926
	ndev->stats.rx_packets = stats->dma_pkt_rc;
	ndev->stats.rx_bytes = stats->dma_oct_rc;
927
	ndev->stats.rx_errors = stats->erpr;
928 929 930
	ndev->stats.rx_dropped = stats->dpc;
	ndev->stats.tx_packets = stats->dma_pkt_tc;
	ndev->stats.tx_bytes = stats->dma_oct_tc;
931
	ndev->stats.tx_errors = stats->erpt;
932
	ndev->stats.multicast = stats->mprc;
933 934
}

935 936
void aq_nic_get_link_ksettings(struct aq_nic_s *self,
			       struct ethtool_link_ksettings *cmd)
937
{
938 939 940 941
	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
		cmd->base.port = PORT_FIBRE;
	else
		cmd->base.port = PORT_TP;
942 943 944 945 946

	cmd->base.duplex = DUPLEX_UNKNOWN;
	if (self->link_status.mbps)
		cmd->base.duplex = self->link_status.full_duplex ?
				   DUPLEX_FULL : DUPLEX_HALF;
947 948
	cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;

949 950
	ethtool_link_ksettings_zero_link_mode(cmd, supported);

951
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
952 953 954
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     10000baseT_Full);

955
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
956 957 958
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     5000baseT_Full);

959
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2G5)
960 961 962
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     2500baseT_Full);

963
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
964 965 966
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     1000baseT_Full);

967 968 969 970
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G_HALF)
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     1000baseT_Half);

971
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
972 973 974
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     100baseT_Full);

975 976 977 978
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M_HALF)
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     100baseT_Half);

979 980 981 982
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M)
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     10baseT_Full);

983 984 985 986
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M_HALF)
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     10baseT_Half);

987
	if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
988 989
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     Pause);
990 991 992
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     Asym_Pause);
	}
993 994

	ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
995 996 997 998 999

	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
	else
		ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
1000 1001 1002 1003 1004 1005

	ethtool_link_ksettings_zero_link_mode(cmd, advertising);

	if (self->aq_nic_cfg.is_autoneg)
		ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);

1006
	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
1007 1008 1009
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     10000baseT_Full);

1010
	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
1011 1012 1013
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     5000baseT_Full);

1014
	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2G5)
1015 1016 1017
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     2500baseT_Full);

1018
	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
1019 1020 1021
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     1000baseT_Full);

1022 1023 1024 1025 1026
	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G_HALF)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     1000baseT_Half);

	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
1027 1028 1029
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     100baseT_Full);

1030 1031 1032 1033 1034
	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M_HALF)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     100baseT_Half);

	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M)
1035 1036 1037
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     10baseT_Full);

1038 1039 1040 1041
	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M_HALF)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     10baseT_Half);

1042
	if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
1043 1044 1045
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     Pause);

1046
	/* Asym is when either RX or TX, but not both */
1047 1048
	if (!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_TX) ^
	    !!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX))
1049 1050 1051
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     Asym_Pause);

1052 1053 1054 1055
	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
		ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
	else
		ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
1056 1057
}

1058 1059
int aq_nic_set_link_ksettings(struct aq_nic_s *self,
			      const struct ethtool_link_ksettings *cmd)
1060
{
1061 1062
	int fduplex = (cmd->base.duplex == DUPLEX_FULL);
	u32 speed = cmd->base.speed;
1063 1064 1065
	u32 rate = 0U;
	int err = 0;

1066 1067 1068 1069 1070
	if (!fduplex && speed > SPEED_1000) {
		err = -EINVAL;
		goto err_exit;
	}

1071
	if (cmd->base.autoneg == AUTONEG_ENABLE) {
1072
		rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
1073 1074 1075
		self->aq_nic_cfg.is_autoneg = true;
	} else {
		switch (speed) {
1076
		case SPEED_10:
1077
			rate = fduplex ? AQ_NIC_RATE_10M : AQ_NIC_RATE_10M_HALF;
1078 1079
			break;

1080
		case SPEED_100:
1081 1082
			rate = fduplex ? AQ_NIC_RATE_100M
				       : AQ_NIC_RATE_100M_HALF;
1083 1084 1085
			break;

		case SPEED_1000:
1086
			rate = fduplex ? AQ_NIC_RATE_1G : AQ_NIC_RATE_1G_HALF;
1087 1088 1089
			break;

		case SPEED_2500:
1090
			rate = AQ_NIC_RATE_2G5;
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
			break;

		case SPEED_5000:
			rate = AQ_NIC_RATE_5G;
			break;

		case SPEED_10000:
			rate = AQ_NIC_RATE_10G;
			break;

		default:
			err = -1;
			goto err_exit;
		break;
		}
1106
		if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
1107 1108 1109 1110 1111 1112 1113
			err = -1;
			goto err_exit;
		}

		self->aq_nic_cfg.is_autoneg = false;
	}

1114
	mutex_lock(&self->fwreq_mutex);
1115
	err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
1116
	mutex_unlock(&self->fwreq_mutex);
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
	if (err < 0)
		goto err_exit;

	self->aq_nic_cfg.link_speed_msk = rate;

err_exit:
	return err;
}

struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
{
	return &self->aq_nic_cfg;
}

u32 aq_nic_get_fw_version(struct aq_nic_s *self)
{
1133
	return self->aq_hw_ops->hw_get_fw_version(self->aq_hw);
1134 1135
}

1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
int aq_nic_set_loopback(struct aq_nic_s *self)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;

	if (!self->aq_hw_ops->hw_set_loopback ||
	    !self->aq_fw_ops->set_phyloopback)
		return -ENOTSUPP;

	mutex_lock(&self->fwreq_mutex);
	self->aq_hw_ops->hw_set_loopback(self->aq_hw,
					 AQ_HW_LOOPBACK_DMA_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_DMA_SYS)));

	self->aq_hw_ops->hw_set_loopback(self->aq_hw,
					 AQ_HW_LOOPBACK_PKT_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_PKT_SYS)));

	self->aq_hw_ops->hw_set_loopback(self->aq_hw,
					 AQ_HW_LOOPBACK_DMA_NET,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_DMA_NET)));

	self->aq_fw_ops->set_phyloopback(self->aq_hw,
					 AQ_HW_LOOPBACK_PHYINT_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_PHYINT_SYS)));

	self->aq_fw_ops->set_phyloopback(self->aq_hw,
					 AQ_HW_LOOPBACK_PHYEXT_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_PHYEXT_SYS)));
	mutex_unlock(&self->fwreq_mutex);

	return 0;
}

1174 1175 1176 1177 1178
int aq_nic_stop(struct aq_nic_s *self)
{
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;

I
Igor Russkikh 已提交
1179
	netif_tx_disable(self->ndev);
1180
	netif_carrier_off(self->ndev);
1181 1182

	del_timer_sync(&self->service_timer);
1183
	cancel_work_sync(&self->service_task);
1184

1185
	self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
1186 1187 1188 1189

	if (self->aq_nic_cfg.is_polling)
		del_timer_sync(&self->polling_timer);
	else
1190
		aq_pci_func_free_irqs(self);
1191

1192 1193
	aq_ptp_irq_free(self);

1194 1195 1196 1197
	for (i = 0U, aq_vec = self->aq_vec[0];
		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
		aq_vec_stop(aq_vec);

1198 1199
	aq_ptp_ring_stop(self);

1200
	return self->aq_hw_ops->hw_stop(self->aq_hw);
1201 1202
}

1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
void aq_nic_set_power(struct aq_nic_s *self)
{
	if (self->power_state != AQ_HW_POWER_STATE_D0 ||
	    self->aq_hw->aq_nic_cfg->wol)
		if (likely(self->aq_fw_ops->set_power)) {
			mutex_lock(&self->fwreq_mutex);
			self->aq_fw_ops->set_power(self->aq_hw,
						   self->power_state,
						   self->ndev->dev_addr);
			mutex_unlock(&self->fwreq_mutex);
		}
}

void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
1217 1218 1219 1220 1221 1222 1223
{
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;

	if (!self)
		goto err_exit;

1224 1225
	for (i = 0U; i < self->aq_vecs; i++) {
		aq_vec = self->aq_vec[i];
1226
		aq_vec_deinit(aq_vec);
1227 1228
		aq_vec_ring_free(aq_vec);
	}
1229

1230
	aq_ptp_unregister(self);
1231 1232
	aq_ptp_ring_deinit(self);
	aq_ptp_ring_free(self);
1233 1234
	aq_ptp_free(self);

1235
	if (likely(self->aq_fw_ops->deinit) && link_down) {
1236 1237 1238 1239
		mutex_lock(&self->fwreq_mutex);
		self->aq_fw_ops->deinit(self->aq_hw);
		mutex_unlock(&self->fwreq_mutex);
	}
1240

1241 1242 1243
err_exit:;
}

1244
void aq_nic_free_vectors(struct aq_nic_s *self)
1245 1246 1247 1248 1249 1250
{
	unsigned int i = 0U;

	if (!self)
		goto err_exit;

1251
	for (i = ARRAY_SIZE(self->aq_vec); i--;) {
1252
		if (self->aq_vec[i]) {
1253
			aq_vec_free(self->aq_vec[i]);
1254 1255
			self->aq_vec[i] = NULL;
		}
1256 1257 1258 1259 1260
	}

err_exit:;
}

1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
int aq_nic_realloc_vectors(struct aq_nic_s *self)
{
	struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);

	aq_nic_free_vectors(self);

	for (self->aq_vecs = 0; self->aq_vecs < cfg->vecs; self->aq_vecs++) {
		self->aq_vec[self->aq_vecs] = aq_vec_alloc(self, self->aq_vecs,
							   cfg);
		if (unlikely(!self->aq_vec[self->aq_vecs]))
			return -ENOMEM;
	}

	return 0;
}

1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
void aq_nic_shutdown(struct aq_nic_s *self)
{
	int err = 0;

	if (!self->ndev)
		return;

	rtnl_lock();

	netif_device_detach(self->ndev);

1288 1289 1290 1291 1292
	if (netif_running(self->ndev)) {
		err = aq_nic_stop(self);
		if (err < 0)
			goto err_exit;
	}
1293 1294
	aq_nic_deinit(self, !self->aq_hw->aq_nic_cfg->wol);
	aq_nic_set_power(self);
1295 1296 1297

err_exit:
	rtnl_unlock();
1298
}
1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341

u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type)
{
	u8 location = 0xFF;
	u32 fltr_cnt;
	u32 n_bit;

	switch (type) {
	case aq_rx_filter_ethertype:
		location = AQ_RX_LAST_LOC_FETHERT - AQ_RX_FIRST_LOC_FETHERT -
			   self->aq_hw_rx_fltrs.fet_reserved_count;
		self->aq_hw_rx_fltrs.fet_reserved_count++;
		break;
	case aq_rx_filter_l3l4:
		fltr_cnt = AQ_RX_LAST_LOC_FL3L4 - AQ_RX_FIRST_LOC_FL3L4;
		n_bit = fltr_cnt - self->aq_hw_rx_fltrs.fl3l4.reserved_count;

		self->aq_hw_rx_fltrs.fl3l4.active_ipv4 |= BIT(n_bit);
		self->aq_hw_rx_fltrs.fl3l4.reserved_count++;
		location = n_bit;
		break;
	default:
		break;
	}

	return location;
}

void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
			   u32 location)
{
	switch (type) {
	case aq_rx_filter_ethertype:
		self->aq_hw_rx_fltrs.fet_reserved_count--;
		break;
	case aq_rx_filter_l3l4:
		self->aq_hw_rx_fltrs.fl3l4.reserved_count--;
		self->aq_hw_rx_fltrs.fl3l4.active_ipv4 &= ~BIT(location);
		break;
	default:
		break;
	}
}
1342 1343 1344 1345

int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
1346
	const unsigned int prev_vecs = cfg->vecs;
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
	bool ndev_running;
	int err = 0;
	int i;

	/* if already the same configuration or
	 * disable request (tcs is 0) and we already is disabled
	 */
	if (tcs == cfg->tcs || (tcs == 0 && !cfg->is_qos))
		return 0;

	ndev_running = netif_running(self->ndev);
	if (ndev_running)
		dev_close(self->ndev);

	cfg->tcs = tcs;
	if (cfg->tcs == 0)
		cfg->tcs = 1;
	if (prio_tc_map)
		memcpy(cfg->prio_tc_map, prio_tc_map, sizeof(cfg->prio_tc_map));
	else
		for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
			cfg->prio_tc_map[i] = cfg->tcs * i / 8;

	cfg->is_qos = (tcs != 0 ? true : false);
	cfg->is_ptp = (cfg->tcs <= AQ_HW_PTP_TC);
	if (!cfg->is_ptp)
		netdev_warn(self->ndev, "%s\n",
			    "PTP is auto disabled due to requested TC count.");

	netdev_set_num_tc(self->ndev, cfg->tcs);

1378 1379 1380 1381 1382 1383 1384 1385
	/* Changing the number of TCs might change the number of vectors */
	aq_nic_cfg_update_num_vecs(self);
	if (prev_vecs != cfg->vecs) {
		err = aq_nic_realloc_vectors(self);
		if (err)
			goto err_exit;
	}

1386 1387 1388
	if (ndev_running)
		err = dev_open(self->ndev, NULL);

1389
err_exit:
1390 1391
	return err;
}
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411

int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
			     const u32 max_rate)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;

	if (tc >= AQ_CFG_TCS_MAX)
		return -EINVAL;

	if (max_rate && max_rate < 10) {
		netdev_warn(self->ndev,
			"Setting %s to the minimum usable value of %dMbps.\n",
			"max rate", 10);
		cfg->tc_max_rate[tc] = 10;
	} else {
		cfg->tc_max_rate[tc] = max_rate;
	}

	return 0;
}
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436

int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc,
			     const u32 min_rate)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;

	if (tc >= AQ_CFG_TCS_MAX)
		return -EINVAL;

	if (min_rate)
		set_bit(tc, &cfg->tc_min_rate_msk);
	else
		clear_bit(tc, &cfg->tc_min_rate_msk);

	if (min_rate && min_rate < 20) {
		netdev_warn(self->ndev,
			"Setting %s to the minimum usable value of %dMbps.\n",
			"min rate", 20);
		cfg->tc_min_rate[tc] = 20;
	} else {
		cfg->tc_min_rate[tc] = min_rate;
	}

	return 0;
}