aq_nic.c 38.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
/* Atlantic Network Driver
 *
 * Copyright (C) 2014-2019 aQuantia Corporation
 * Copyright (C) 2019-2020 Marvell International Ltd.
6 7 8 9 10 11 12 13 14
 */

/* File aq_nic.c: Definition of common code for NIC. */

#include "aq_nic.h"
#include "aq_ring.h"
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
15
#include "aq_macsec.h"
16
#include "aq_main.h"
17
#include "aq_phy.h"
18
#include "aq_ptp.h"
19
#include "aq_filters.h"
20

21
#include <linux/moduleparam.h>
22 23 24 25 26 27 28
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/timer.h>
#include <linux/cpu.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <net/ip.h>
29
#include <net/pkt_cls.h>
30

31 32 33 34 35 36 37 38 39 40 41 42
static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
module_param_named(aq_itr, aq_itr, uint, 0644);
MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");

static unsigned int aq_itr_tx;
module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");

static unsigned int aq_itr_rx;
module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");

43 44
static void aq_nic_update_ndev_stats(struct aq_nic_s *self);

45 46
static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
{
47
	static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
48 49 50 51 52 53
		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
	};
54 55 56 57 58
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
	struct aq_rss_parameters *rss_params;
	int i = 0;

	rss_params = &cfg->aq_rss;
59 60 61 62 63 64 65 66 67

	rss_params->hash_secret_key_size = sizeof(rss_key);
	memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
	rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;

	for (i = rss_params->indirection_table_size; i--;)
		rss_params->indirection_table[i] = i & (num_rss_queues - 1);
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
/* Recalculate the number of vectors */
static void aq_nic_cfg_update_num_vecs(struct aq_nic_s *self)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;

	cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
	cfg->vecs = min(cfg->vecs, num_online_cpus());
	if (self->irqvecs > AQ_HW_SERVICE_IRQS)
		cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
	/* cfg->vecs should be power of 2 for RSS */
	cfg->vecs = rounddown_pow_of_two(cfg->vecs);

	if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
		if (cfg->tcs > 2)
			cfg->vecs = min(cfg->vecs, 4U);
	}

	if (cfg->vecs <= 4)
		cfg->tc_mode = AQ_TC_MODE_8TCS;
	else
		cfg->tc_mode = AQ_TC_MODE_4TCS;

	/*rss rings */
	cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
	aq_nic_rss_init(self, cfg->num_rss_queues);
}

95 96
/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
void aq_nic_cfg_start(struct aq_nic_s *self)
97 98
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
99
	int i;
100 101 102 103 104

	cfg->tcs = AQ_CFG_TCS_DEF;

	cfg->is_polling = AQ_CFG_IS_POLLING_DEF;

105 106 107
	cfg->itr = aq_itr;
	cfg->tx_itr = aq_itr_tx;
	cfg->rx_itr = aq_itr_rx;
108

109
	cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
110 111
	cfg->is_rss = AQ_CFG_IS_RSS_DEF;
	cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
112
	cfg->fc.req = AQ_CFG_FC_MODE;
113
	cfg->wol = AQ_CFG_WOL_MODES;
114 115 116 117 118 119

	cfg->mtu = AQ_CFG_MTU_DEF;
	cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
	cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;

	cfg->is_lro = AQ_CFG_IS_LRO_DEF;
120
	cfg->is_ptp = true;
121 122

	/*descriptors */
123 124
	cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
	cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
125

126
	aq_nic_cfg_update_num_vecs(self);
127

128
	cfg->irq_type = aq_pci_func_get_irq_type(self);
129 130

	if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
131
	    (cfg->aq_hw_caps->vecs == 1U) ||
132 133 134 135 136
	    (cfg->vecs == 1U)) {
		cfg->is_rss = 0U;
		cfg->vecs = 1U;
	}

137 138 139 140 141 142 143 144 145
	/* Check if we have enough vectors allocated for
	 * link status IRQ. If no - we'll know link state from
	 * slower service task.
	 */
	if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
		cfg->link_irq_vec = cfg->vecs;
	else
		cfg->link_irq_vec = 0;

146
	cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
147
	cfg->features = cfg->aq_hw_caps->hw_features;
148 149
	cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
	cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
150
	cfg->is_vlan_force_promisc = true;
151 152 153

	for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
		cfg->prio_tc_map[i] = cfg->tcs * i / 8;
154 155
}

156 157
static int aq_nic_update_link_status(struct aq_nic_s *self)
{
158
	int err = self->aq_fw_ops->update_link_status(self->aq_hw);
159
	u32 fc = 0;
160 161 162 163

	if (err)
		return err;

164 165 166 167
	if (self->aq_fw_ops->get_flow_control)
		self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
	self->aq_nic_cfg.fc.cur = fc;

168
	if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
169 170 171
		netdev_info(self->ndev, "%s: link change old %d new %d\n",
			    AQ_CFG_DRV_NAME, self->link_status.mbps,
			    self->aq_hw->aq_link_status.mbps);
172
		aq_nic_update_interrupt_moderation_settings(self);
173

174
		if (self->aq_ptp) {
175
			aq_ptp_clock_init(self);
176 177
			aq_ptp_tm_offset_set(self,
					     self->aq_hw->aq_link_status.mbps);
178
			aq_ptp_link_change(self);
179
		}
180

181 182 183 184 185 186
		/* Driver has to update flow control settings on RX block
		 * on any link event.
		 * We should query FW whether it negotiated FC.
		 */
		if (self->aq_hw_ops->hw_set_fc)
			self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
187
	}
188 189 190

	self->link_status = self->aq_hw->aq_link_status;
	if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
191
		aq_utils_obj_set(&self->flags,
192
				 AQ_NIC_FLAG_STARTED);
193
		aq_utils_obj_clear(&self->flags,
194 195
				   AQ_NIC_LINK_DOWN);
		netif_carrier_on(self->ndev);
196 197 198
#if IS_ENABLED(CONFIG_MACSEC)
		aq_macsec_enable(self);
#endif
199 200 201
		if (self->aq_hw_ops->hw_tc_rate_limit_set)
			self->aq_hw_ops->hw_tc_rate_limit_set(self->aq_hw);

202 203 204 205 206
		netif_tx_wake_all_queues(self->ndev);
	}
	if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
		netif_carrier_off(self->ndev);
		netif_tx_disable(self->ndev);
207
		aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
208
	}
209

210 211 212
	return 0;
}

213 214 215 216 217 218 219 220 221 222 223
static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
{
	struct aq_nic_s *self = private;

	if (!self)
		return IRQ_NONE;

	aq_nic_update_link_status(self);

	self->aq_hw_ops->hw_irq_enable(self->aq_hw,
				       BIT(self->aq_nic_cfg.link_irq_vec));
224

225 226 227
	return IRQ_HANDLED;
}

228
static void aq_nic_service_task(struct work_struct *work)
229
{
230 231 232
	struct aq_nic_s *self = container_of(work, struct aq_nic_s,
					     service_task);
	int err;
233

234 235
	aq_ptp_service_task(self);

236
	if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
237
		return;
238

239 240
	err = aq_nic_update_link_status(self);
	if (err)
241
		return;
242

243 244 245 246
#if IS_ENABLED(CONFIG_MACSEC)
	aq_macsec_work(self);
#endif

247
	mutex_lock(&self->fwreq_mutex);
248 249
	if (self->aq_fw_ops->update_stats)
		self->aq_fw_ops->update_stats(self->aq_hw);
250
	mutex_unlock(&self->fwreq_mutex);
251

252
	aq_nic_update_ndev_stats(self);
253 254 255 256 257
}

static void aq_nic_service_timer_cb(struct timer_list *t)
{
	struct aq_nic_s *self = from_timer(self, t, service_timer);
258

259 260
	mod_timer(&self->service_timer,
		  jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
261 262

	aq_ndev_schedule_work(&self->service_task);
263 264
}

265
static void aq_nic_polling_timer_cb(struct timer_list *t)
266
{
267
	struct aq_nic_s *self = from_timer(self, t, polling_timer);
268 269
	unsigned int i = 0U;

270 271
	for (i = 0U; self->aq_vecs > i; ++i)
		aq_vec_isr(i, (void *)self->aq_vec[i]);
272 273

	mod_timer(&self->polling_timer, jiffies +
274
		  AQ_CFG_POLLING_TIMER_INTERVAL);
275 276
}

277 278 279 280 281 282 283 284 285 286 287 288 289 290
static int aq_nic_hw_prepare(struct aq_nic_s *self)
{
	int err = 0;

	err = self->aq_hw_ops->hw_soft_reset(self->aq_hw);
	if (err)
		goto exit;

	err = self->aq_hw_ops->hw_prepare(self->aq_hw, &self->aq_fw_ops);

exit:
	return err;
}

291 292 293 294 295 296 297 298
static bool aq_nic_is_valid_ether_addr(const u8 *addr)
{
	/* Some engineering samples of Aquantia NICs are provisioned with a
	 * partially populated MAC, which is still invalid.
	 */
	return !(addr[0] == 0 && addr[1] == 0 && addr[2] == 0);
}

299 300 301 302 303 304 305 306
int aq_nic_ndev_register(struct aq_nic_s *self)
{
	int err = 0;

	if (!self->ndev) {
		err = -EINVAL;
		goto err_exit;
	}
307

308
	err = aq_nic_hw_prepare(self);
309 310 311
	if (err)
		goto err_exit;

312 313 314 315
#if IS_ENABLED(CONFIG_MACSEC)
	aq_macsec_init(self);
#endif

316
	mutex_lock(&self->fwreq_mutex);
317
	err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
318
			    self->ndev->dev_addr);
319
	mutex_unlock(&self->fwreq_mutex);
320
	if (err)
321 322
		goto err_exit;

323 324 325 326 327 328
	if (!is_valid_ether_addr(self->ndev->dev_addr) ||
	    !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
		netdev_warn(self->ndev, "MAC is invalid, will use random.");
		eth_hw_addr_random(self->ndev);
	}

329 330 331 332 333 334 335 336
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
	{
		static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;

		ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
	}
#endif

337 338 339 340 341 342 343 344 345 346
	for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
	     self->aq_vecs++) {
		self->aq_vec[self->aq_vecs] =
		    aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
		if (!self->aq_vec[self->aq_vecs]) {
			err = -ENOMEM;
			goto err_exit;
		}
	}

347 348
	netif_carrier_off(self->ndev);

349
	netif_tx_disable(self->ndev);
350

351
	err = register_netdev(self->ndev);
352
	if (err)
353 354
		goto err_exit;

355
err_exit:
356 357 358 359
#if IS_ENABLED(CONFIG_MACSEC)
	if (err)
		aq_macsec_free(self);
#endif
360 361 362
	return err;
}

363
void aq_nic_ndev_init(struct aq_nic_s *self)
364
{
365
	const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
366 367 368 369
	struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;

	self->ndev->hw_features |= aq_hw_caps->hw_features;
	self->ndev->features = aq_hw_caps->hw_features;
370
	self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
371
				     NETIF_F_RXHASH | NETIF_F_SG |
372
				     NETIF_F_LRO | NETIF_F_TSO | NETIF_F_TSO6;
373
	self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4;
374
	self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
375 376
	self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;

377
	self->msg_enable = NETIF_MSG_DRV | NETIF_MSG_LINK;
378
	self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
379
	self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397

}

void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
			struct aq_ring_s *ring)
{
	self->aq_ring_tx[idx] = ring;
}

struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
{
	return self->ndev;
}

int aq_nic_init(struct aq_nic_s *self)
{
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;
398
	int err = 0;
399 400

	self->power_state = AQ_HW_POWER_STATE_D0;
401
	mutex_lock(&self->fwreq_mutex);
402
	err = self->aq_hw_ops->hw_reset(self->aq_hw);
403
	mutex_unlock(&self->fwreq_mutex);
404 405
	if (err < 0)
		goto err_exit;
406 407
	/* Restore default settings */
	aq_nic_set_downshift(self, self->aq_nic_cfg.downshift_counter);
408 409
	aq_nic_set_media_detect(self, self->aq_nic_cfg.is_media_detect ?
				AQ_HW_MEDIA_DETECT_CNT : 0);
410

411
	err = self->aq_hw_ops->hw_init(self->aq_hw,
412
				       aq_nic_get_ndev(self)->dev_addr);
413 414 415
	if (err < 0)
		goto err_exit;

416 417
	if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ATLANTIC) &&
	    self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
418 419
		self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
		err = aq_phy_init(self->aq_hw);
420 421 422 423 424 425 426 427 428

		/* Disable the PTP on NICs where it's known to cause datapath
		 * problems.
		 * Ideally this should have been done by PHY provisioning, but
		 * many units have been shipped with enabled PTP block already.
		 */
		if (self->aq_nic_cfg.aq_hw_caps->quirks & AQ_NIC_QUIRK_BAD_PTP)
			if (self->aq_hw->phy_id != HW_ATL_PHY_ID_MAX)
				aq_phy_disable_ptp(self->aq_hw);
429 430
	}

431 432 433 434 435 436 437
	for (i = 0U; i < self->aq_vecs; i++) {
		aq_vec = self->aq_vec[i];
		err = aq_vec_ring_alloc(aq_vec, self, i,
					aq_nic_get_cfg(self));
		if (err)
			goto err_exit;

438
		aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
439
	}
440

441 442 443 444
	if (aq_nic_get_cfg(self)->is_ptp) {
		err = aq_ptp_init(self, self->irqvecs - 1);
		if (err < 0)
			goto err_exit;
445

446 447 448
		err = aq_ptp_ring_alloc(self);
		if (err < 0)
			goto err_exit;
449

450 451 452 453
		err = aq_ptp_ring_init(self);
		if (err < 0)
			goto err_exit;
	}
454

455 456
	netif_carrier_off(self->ndev);

457 458 459 460 461 462 463
err_exit:
	return err;
}

int aq_nic_start(struct aq_nic_s *self)
{
	struct aq_vec_s *aq_vec = NULL;
464
	struct aq_nic_cfg_s *cfg;
465
	unsigned int i = 0U;
466
	int err = 0;
467

468 469
	cfg = aq_nic_get_cfg(self);

470
	err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
471 472
						     self->mc_list.ar,
						     self->mc_list.count);
473 474 475
	if (err < 0)
		goto err_exit;

476
	err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
477
						    self->packet_filter);
478 479 480
	if (err < 0)
		goto err_exit;

481 482
	for (i = 0U; self->aq_vecs > i; ++i) {
		aq_vec = self->aq_vec[i];
483 484 485 486 487
		err = aq_vec_start(aq_vec);
		if (err < 0)
			goto err_exit;
	}

488 489 490 491
	err = aq_ptp_ring_start(self);
	if (err < 0)
		goto err_exit;

492 493
	aq_nic_set_loopback(self);

494
	err = self->aq_hw_ops->hw_start(self->aq_hw);
495 496 497
	if (err < 0)
		goto err_exit;

498 499
	err = aq_nic_update_interrupt_moderation_settings(self);
	if (err)
500
		goto err_exit;
501 502 503

	INIT_WORK(&self->service_task, aq_nic_service_task);

504
	timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
505
	aq_nic_service_timer_cb(&self->service_timer);
506

507
	if (cfg->is_polling) {
508
		timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
509 510 511
		mod_timer(&self->polling_timer, jiffies +
			  AQ_CFG_POLLING_TIMER_INTERVAL);
	} else {
512 513
		for (i = 0U; self->aq_vecs > i; ++i) {
			aq_vec = self->aq_vec[i];
514 515
			err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
						    aq_vec_isr, aq_vec,
516
						    aq_vec_get_affinity_mask(aq_vec));
517 518 519 520
			if (err < 0)
				goto err_exit;
		}

521 522 523 524
		err = aq_ptp_irq_alloc(self);
		if (err < 0)
			goto err_exit;

525
		if (cfg->link_irq_vec) {
526
			int irqvec = pci_irq_vector(self->pdev,
527
						    cfg->link_irq_vec);
528 529
			err = request_threaded_irq(irqvec, NULL,
						   aq_linkstate_threaded_isr,
530
						   IRQF_SHARED | IRQF_ONESHOT,
531 532 533
						   self->ndev->name, self);
			if (err < 0)
				goto err_exit;
534
			self->msix_entry_mask |= (1 << cfg->link_irq_vec);
535 536
		}

537
		err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
538
						     AQ_CFG_IRQ_MASK);
539 540 541 542
		if (err < 0)
			goto err_exit;
	}

543 544
	err = netif_set_real_num_tx_queues(self->ndev,
					   self->aq_vecs * cfg->tcs);
545 546 547
	if (err < 0)
		goto err_exit;

548 549
	err = netif_set_real_num_rx_queues(self->ndev,
					   self->aq_vecs * cfg->tcs);
550 551 552
	if (err < 0)
		goto err_exit;

553 554 555 556 557
	for (i = 0; i < cfg->tcs; i++) {
		u16 offset = self->aq_vecs * i;

		netdev_set_tc_queue(self->ndev, i, self->aq_vecs, offset);
	}
558 559
	netif_tx_start_all_queues(self->ndev);

560 561 562 563
err_exit:
	return err;
}

564 565
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
			    struct aq_ring_s *ring)
566 567
{
	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
568 569
	struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
	struct device *dev = aq_nic_get_dev(self);
570
	struct aq_ring_buff_s *first = NULL;
571
	u8 ipver = ip_hdr(skb)->version;
572
	struct aq_ring_buff_s *dx_buff;
573
	bool need_context_tag = false;
574 575 576
	unsigned int frag_count = 0U;
	unsigned int ret = 0U;
	unsigned int dx;
577 578 579 580 581 582
	u8 l4proto = 0;

	if (ipver == 4)
		l4proto = ip_hdr(skb)->protocol;
	else if (ipver == 6)
		l4proto = ipv6_hdr(skb)->nexthdr;
583

584 585
	dx = ring->sw_tail;
	dx_buff = &ring->buff_ring[dx];
586
	dx_buff->flags = 0U;
587

588
	if (unlikely(skb_is_gso(skb))) {
589
		dx_buff->mss = skb_shinfo(skb)->gso_size;
590 591 592 593 594 595 596 597 598 599 600 601 602
		if (l4proto == IPPROTO_TCP) {
			dx_buff->is_gso_tcp = 1U;
			dx_buff->len_l4 = tcp_hdrlen(skb);
		} else if (l4proto == IPPROTO_UDP) {
			dx_buff->is_gso_udp = 1U;
			dx_buff->len_l4 = sizeof(struct udphdr);
			/* UDP GSO Hardware does not replace packet length. */
			udp_hdr(skb)->len = htons(dx_buff->mss +
						  dx_buff->len_l4);
		} else {
			WARN_ONCE(true, "Bad GSO mode");
			goto exit;
		}
603 604
		dx_buff->len_pkt = skb->len;
		dx_buff->len_l2 = ETH_HLEN;
605
		dx_buff->len_l3 = skb_network_header_len(skb);
606
		dx_buff->eop_index = 0xffffU;
607
		dx_buff->is_ipv6 = (ipver == 6);
608 609
		need_context_tag = true;
	}
610

611
	if (cfg->is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
612 613 614 615 616 617 618
		dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
		dx_buff->len_pkt = skb->len;
		dx_buff->is_vlan = 1U;
		need_context_tag = true;
	}

	if (need_context_tag) {
619 620
		dx = aq_ring_next_dx(ring, dx);
		dx_buff = &ring->buff_ring[dx];
621
		dx_buff->flags = 0U;
622 623 624 625
		++ret;
	}

	dx_buff->len = skb_headlen(skb);
626
	dx_buff->pa = dma_map_single(dev,
627 628 629
				     skb->data,
				     dx_buff->len,
				     DMA_TO_DEVICE);
630

631
	if (unlikely(dma_mapping_error(dev, dx_buff->pa))) {
632
		ret = 0;
633
		goto exit;
634
	}
635

636
	first = dx_buff;
637 638 639
	dx_buff->len_pkt = skb->len;
	dx_buff->is_sop = 1U;
	dx_buff->is_mapped = 1U;
640 641 642
	++ret;

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
643 644 645
		dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol);
		dx_buff->is_tcp_cso = (l4proto == IPPROTO_TCP);
		dx_buff->is_udp_cso = (l4proto == IPPROTO_UDP);
646 647 648
	}

	for (; nr_frags--; ++frag_count) {
649
		unsigned int frag_len = 0U;
650 651
		unsigned int buff_offset = 0U;
		unsigned int buff_size = 0U;
652 653 654 655 656
		dma_addr_t frag_pa;
		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];

		frag_len = skb_frag_size(frag);

657 658 659 660 661 662
		while (frag_len) {
			if (frag_len > AQ_CFG_TX_FRAME_MAX)
				buff_size = AQ_CFG_TX_FRAME_MAX;
			else
				buff_size = frag_len;

663
			frag_pa = skb_frag_dma_map(dev,
664 665 666 667 668
						   frag,
						   buff_offset,
						   buff_size,
						   DMA_TO_DEVICE);

669
			if (unlikely(dma_mapping_error(dev,
670 671
						       frag_pa)))
				goto mapping_error;
672 673 674 675 676

			dx = aq_ring_next_dx(ring, dx);
			dx_buff = &ring->buff_ring[dx];

			dx_buff->flags = 0U;
677
			dx_buff->len = buff_size;
678 679
			dx_buff->pa = frag_pa;
			dx_buff->is_mapped = 1U;
680 681 682 683
			dx_buff->eop_index = 0xffffU;

			frag_len -= buff_size;
			buff_offset += buff_size;
684

685
			++ret;
686 687 688
		}
	}

689
	first->eop_index = dx;
690 691 692 693 694 695 696 697 698 699
	dx_buff->is_eop = 1U;
	dx_buff->skb = skb;
	goto exit;

mapping_error:
	for (dx = ring->sw_tail;
	     ret > 0;
	     --ret, dx = aq_ring_next_dx(ring, dx)) {
		dx_buff = &ring->buff_ring[dx];

700 701
		if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) &&
		    !dx_buff->is_vlan && dx_buff->pa) {
702
			if (unlikely(dx_buff->is_sop)) {
703
				dma_unmap_single(dev,
704 705 706 707
						 dx_buff->pa,
						 dx_buff->len,
						 DMA_TO_DEVICE);
			} else {
708
				dma_unmap_page(dev,
709 710 711 712 713
					       dx_buff->pa,
					       dx_buff->len,
					       DMA_TO_DEVICE);
			}
		}
714 715
	}

716
exit:
717 718 719 720 721
	return ret;
}

int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
722 723 724
	struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
	unsigned int vec = skb->queue_mapping % cfg->vecs;
	unsigned int tc = skb->queue_mapping / cfg->vecs;
725 726
	struct aq_ring_s *ring = NULL;
	unsigned int frags = 0U;
727
	int err = NETDEV_TX_OK;
728 729 730

	frags = skb_shinfo(skb)->nr_frags + 1;

731
	ring = self->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(cfg, tc, vec)];
732 733 734 735 736 737

	if (frags > AQ_CFG_SKB_FRAGS_MAX) {
		dev_kfree_skb_any(skb);
		goto err_exit;
	}

738
	aq_ring_update_queue_state(ring);
739

740
	if (cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
741 742 743 744
		err = NETDEV_TX_BUSY;
		goto err_exit;
	}

745
	/* Above status update may stop the queue. Check this. */
746 747
	if (__netif_subqueue_stopped(self->ndev,
				     AQ_NIC_RING2QMAP(self, ring->idx))) {
748 749 750 751
		err = NETDEV_TX_BUSY;
		goto err_exit;
	}

752
	frags = aq_nic_map_skb(self, skb, ring);
753

754
	if (likely(frags)) {
755
		err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
756
						       ring, frags);
757
	} else {
758 759 760 761 762 763 764
		err = NETDEV_TX_BUSY;
	}

err_exit:
	return err;
}

765 766
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
{
767
	return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
768 769
}

770 771 772 773
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
{
	int err = 0;

774
	err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
775 776 777 778 779 780 781 782 783 784 785
	if (err < 0)
		goto err_exit;

	self->packet_filter = flags;

err_exit:
	return err;
}

int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
{
786 787 788
	const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
	unsigned int packet_filter = ndev->flags;
789 790
	struct netdev_hw_addr *ha = NULL;
	unsigned int i = 0U;
791
	int err = 0;
792

793 794 795 796 797 798 799
	self->mc_list.count = 0;
	if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
		packet_filter |= IFF_PROMISC;
	} else {
		netdev_for_each_uc_addr(ha, ndev) {
			ether_addr_copy(self->mc_list.ar[i++], ha->addr);
		}
800 801
	}

802 803 804 805 806 807 808 809 810
	cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
	if (cfg->is_mc_list_enabled) {
		if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
			packet_filter |= IFF_ALLMULTI;
		} else {
			netdev_for_each_mc_addr(ha, ndev) {
				ether_addr_copy(self->mc_list.ar[i++],
						ha->addr);
			}
811 812 813
		}
	}

814
	if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
815
		self->mc_list.count = i;
816 817 818
		err = hw_ops->hw_multicast_list_set(self->aq_hw,
						    self->mc_list.ar,
						    self->mc_list.count);
819 820
		if (err < 0)
			return err;
821
	}
822

823
	return aq_nic_set_packet_filter(self, packet_filter);
824 825 826 827 828 829
}

int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
{
	self->aq_nic_cfg.mtu = new_mtu;

830
	return 0;
831 832 833 834
}

int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
{
835
	return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
836 837 838 839 840 841 842 843 844 845 846 847
}

unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
{
	return self->link_status.mbps;
}

int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
{
	u32 *regs_buff = p;
	int err = 0;

848 849 850
	if (unlikely(!self->aq_hw_ops->hw_get_regs))
		return -EOPNOTSUPP;

851 852
	regs->version = 1;

853 854 855
	err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
					   self->aq_nic_cfg.aq_hw_caps,
					   regs_buff);
856 857 858 859 860 861 862 863 864
	if (err < 0)
		goto err_exit;

err_exit:
	return err;
}

int aq_nic_get_regs_count(struct aq_nic_s *self)
{
865 866 867
	if (unlikely(!self->aq_hw_ops->hw_get_regs))
		return 0;

868
	return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
869 870
}

871
u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
872
{
873
	struct aq_stats_s *stats;
874 875
	unsigned int count = 0U;
	unsigned int i = 0U;
876
	unsigned int tc;
877 878 879 880 881 882 883

	if (self->aq_fw_ops->update_stats) {
		mutex_lock(&self->fwreq_mutex);
		self->aq_fw_ops->update_stats(self->aq_hw);
		mutex_unlock(&self->fwreq_mutex);
	}
	stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
884

885
	if (!stats)
886 887
		goto err_exit;

888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
	data[i] = stats->uprc + stats->mprc + stats->bprc;
	data[++i] = stats->uprc;
	data[++i] = stats->mprc;
	data[++i] = stats->bprc;
	data[++i] = stats->erpt;
	data[++i] = stats->uptc + stats->mptc + stats->bptc;
	data[++i] = stats->uptc;
	data[++i] = stats->mptc;
	data[++i] = stats->bptc;
	data[++i] = stats->ubrc;
	data[++i] = stats->ubtc;
	data[++i] = stats->mbrc;
	data[++i] = stats->mbtc;
	data[++i] = stats->bbrc;
	data[++i] = stats->bbtc;
903 904 905 906 907 908 909 910
	if (stats->brc)
		data[++i] = stats->brc;
	else
		data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
	if (stats->btc)
		data[++i] = stats->btc;
	else
		data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
911 912 913 914 915 916 917 918 919
	data[++i] = stats->dma_pkt_rc;
	data[++i] = stats->dma_pkt_tc;
	data[++i] = stats->dma_oct_rc;
	data[++i] = stats->dma_oct_tc;
	data[++i] = stats->dpc;

	i++;

	data += i;
920

921
	for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
922 923 924
		for (i = 0U; self->aq_vecs > i; ++i) {
			if (!self->aq_vec[i])
				break;
925
			data += count;
926
			count = aq_vec_get_sw_stats(self->aq_vec[i], tc, data);
927
		}
928 929
	}

930 931
	data += count;

932
err_exit:
933
	return data;
934 935
}

936 937
static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
{
938
	struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
939
	struct net_device *ndev = self->ndev;
940

941 942
	ndev->stats.rx_packets = stats->dma_pkt_rc;
	ndev->stats.rx_bytes = stats->dma_oct_rc;
943
	ndev->stats.rx_errors = stats->erpr;
944 945 946
	ndev->stats.rx_dropped = stats->dpc;
	ndev->stats.tx_packets = stats->dma_pkt_tc;
	ndev->stats.tx_bytes = stats->dma_oct_tc;
947
	ndev->stats.tx_errors = stats->erpt;
948
	ndev->stats.multicast = stats->mprc;
949 950
}

951 952
void aq_nic_get_link_ksettings(struct aq_nic_s *self,
			       struct ethtool_link_ksettings *cmd)
953
{
954 955
	u32 lp_link_speed_msk;

956 957 958 959
	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
		cmd->base.port = PORT_FIBRE;
	else
		cmd->base.port = PORT_TP;
960 961 962 963 964

	cmd->base.duplex = DUPLEX_UNKNOWN;
	if (self->link_status.mbps)
		cmd->base.duplex = self->link_status.full_duplex ?
				   DUPLEX_FULL : DUPLEX_HALF;
965 966
	cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;

967 968
	ethtool_link_ksettings_zero_link_mode(cmd, supported);

969
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
970 971 972
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     10000baseT_Full);

973
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
974 975 976
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     5000baseT_Full);

977
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2G5)
978 979 980
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     2500baseT_Full);

981
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
982 983 984
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     1000baseT_Full);

985 986 987 988
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G_HALF)
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     1000baseT_Half);

989
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
990 991 992
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     100baseT_Full);

993 994 995 996
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M_HALF)
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     100baseT_Half);

997 998 999 1000
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M)
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     10baseT_Full);

1001 1002 1003 1004
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M_HALF)
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     10baseT_Half);

1005
	if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
1006 1007
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     Pause);
1008 1009 1010
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     Asym_Pause);
	}
1011 1012

	ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
1013 1014 1015 1016 1017

	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
	else
		ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
1018 1019 1020 1021 1022 1023

	ethtool_link_ksettings_zero_link_mode(cmd, advertising);

	if (self->aq_nic_cfg.is_autoneg)
		ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);

1024
	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
1025 1026 1027
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     10000baseT_Full);

1028
	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
1029 1030 1031
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     5000baseT_Full);

1032
	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2G5)
1033 1034 1035
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     2500baseT_Full);

1036
	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
1037 1038 1039
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     1000baseT_Full);

1040 1041 1042 1043 1044
	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G_HALF)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     1000baseT_Half);

	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
1045 1046 1047
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     100baseT_Full);

1048 1049 1050 1051 1052
	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M_HALF)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     100baseT_Half);

	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M)
1053 1054 1055
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     10baseT_Full);

1056 1057 1058 1059
	if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M_HALF)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     10baseT_Half);

1060
	if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
1061 1062 1063
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     Pause);

1064
	/* Asym is when either RX or TX, but not both */
1065 1066
	if (!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_TX) ^
	    !!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX))
1067 1068 1069
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     Asym_Pause);

1070 1071 1072 1073
	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
		ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
	else
		ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120

	ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
	lp_link_speed_msk = self->aq_hw->aq_link_status.lp_link_speed_msk;

	if (lp_link_speed_msk & AQ_NIC_RATE_10G)
		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
						     10000baseT_Full);

	if (lp_link_speed_msk & AQ_NIC_RATE_5G)
		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
						     5000baseT_Full);

	if (lp_link_speed_msk & AQ_NIC_RATE_2G5)
		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
						     2500baseT_Full);

	if (lp_link_speed_msk & AQ_NIC_RATE_1G)
		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
						     1000baseT_Full);

	if (lp_link_speed_msk & AQ_NIC_RATE_1G_HALF)
		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
						     1000baseT_Half);

	if (lp_link_speed_msk & AQ_NIC_RATE_100M)
		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
						     100baseT_Full);

	if (lp_link_speed_msk & AQ_NIC_RATE_100M_HALF)
		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
						     100baseT_Half);

	if (lp_link_speed_msk & AQ_NIC_RATE_10M)
		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
						     10baseT_Full);

	if (lp_link_speed_msk & AQ_NIC_RATE_10M_HALF)
		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
						     10baseT_Half);

	if (self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX)
		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
						     Pause);
	if (!!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_TX) ^
	    !!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX))
		ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
						     Asym_Pause);
1121 1122
}

1123 1124
int aq_nic_set_link_ksettings(struct aq_nic_s *self,
			      const struct ethtool_link_ksettings *cmd)
1125
{
1126 1127
	int fduplex = (cmd->base.duplex == DUPLEX_FULL);
	u32 speed = cmd->base.speed;
1128 1129 1130
	u32 rate = 0U;
	int err = 0;

1131 1132 1133 1134 1135
	if (!fduplex && speed > SPEED_1000) {
		err = -EINVAL;
		goto err_exit;
	}

1136
	if (cmd->base.autoneg == AUTONEG_ENABLE) {
1137
		rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
1138 1139 1140
		self->aq_nic_cfg.is_autoneg = true;
	} else {
		switch (speed) {
1141
		case SPEED_10:
1142
			rate = fduplex ? AQ_NIC_RATE_10M : AQ_NIC_RATE_10M_HALF;
1143 1144
			break;

1145
		case SPEED_100:
1146 1147
			rate = fduplex ? AQ_NIC_RATE_100M
				       : AQ_NIC_RATE_100M_HALF;
1148 1149 1150
			break;

		case SPEED_1000:
1151
			rate = fduplex ? AQ_NIC_RATE_1G : AQ_NIC_RATE_1G_HALF;
1152 1153 1154
			break;

		case SPEED_2500:
1155
			rate = AQ_NIC_RATE_2G5;
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
			break;

		case SPEED_5000:
			rate = AQ_NIC_RATE_5G;
			break;

		case SPEED_10000:
			rate = AQ_NIC_RATE_10G;
			break;

		default:
			err = -1;
			goto err_exit;
		}
1170
		if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
1171 1172 1173 1174 1175 1176 1177
			err = -1;
			goto err_exit;
		}

		self->aq_nic_cfg.is_autoneg = false;
	}

1178
	mutex_lock(&self->fwreq_mutex);
1179
	err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
1180
	mutex_unlock(&self->fwreq_mutex);
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
	if (err < 0)
		goto err_exit;

	self->aq_nic_cfg.link_speed_msk = rate;

err_exit:
	return err;
}

struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
{
	return &self->aq_nic_cfg;
}

u32 aq_nic_get_fw_version(struct aq_nic_s *self)
{
1197
	return self->aq_hw_ops->hw_get_fw_version(self->aq_hw);
1198 1199
}

1200 1201 1202 1203 1204 1205
int aq_nic_set_loopback(struct aq_nic_s *self)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;

	if (!self->aq_hw_ops->hw_set_loopback ||
	    !self->aq_fw_ops->set_phyloopback)
1206
		return -EOPNOTSUPP;
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237

	mutex_lock(&self->fwreq_mutex);
	self->aq_hw_ops->hw_set_loopback(self->aq_hw,
					 AQ_HW_LOOPBACK_DMA_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_DMA_SYS)));

	self->aq_hw_ops->hw_set_loopback(self->aq_hw,
					 AQ_HW_LOOPBACK_PKT_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_PKT_SYS)));

	self->aq_hw_ops->hw_set_loopback(self->aq_hw,
					 AQ_HW_LOOPBACK_DMA_NET,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_DMA_NET)));

	self->aq_fw_ops->set_phyloopback(self->aq_hw,
					 AQ_HW_LOOPBACK_PHYINT_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_PHYINT_SYS)));

	self->aq_fw_ops->set_phyloopback(self->aq_hw,
					 AQ_HW_LOOPBACK_PHYEXT_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_PHYEXT_SYS)));
	mutex_unlock(&self->fwreq_mutex);

	return 0;
}

1238 1239 1240 1241
int aq_nic_stop(struct aq_nic_s *self)
{
	unsigned int i = 0U;

1242
	netif_tx_disable(self->ndev);
1243
	netif_carrier_off(self->ndev);
1244 1245

	del_timer_sync(&self->service_timer);
1246
	cancel_work_sync(&self->service_task);
1247

1248
	self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
1249 1250 1251 1252

	if (self->aq_nic_cfg.is_polling)
		del_timer_sync(&self->polling_timer);
	else
1253
		aq_pci_func_free_irqs(self);
1254

1255 1256
	aq_ptp_irq_free(self);

1257 1258
	for (i = 0U; self->aq_vecs > i; ++i)
		aq_vec_stop(self->aq_vec[i]);
1259

1260 1261
	aq_ptp_ring_stop(self);

1262
	return self->aq_hw_ops->hw_stop(self->aq_hw);
1263 1264
}

1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
void aq_nic_set_power(struct aq_nic_s *self)
{
	if (self->power_state != AQ_HW_POWER_STATE_D0 ||
	    self->aq_hw->aq_nic_cfg->wol)
		if (likely(self->aq_fw_ops->set_power)) {
			mutex_lock(&self->fwreq_mutex);
			self->aq_fw_ops->set_power(self->aq_hw,
						   self->power_state,
						   self->ndev->dev_addr);
			mutex_unlock(&self->fwreq_mutex);
		}
}

void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
1279 1280 1281 1282 1283 1284 1285
{
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;

	if (!self)
		goto err_exit;

1286 1287
	for (i = 0U; i < self->aq_vecs; i++) {
		aq_vec = self->aq_vec[i];
1288
		aq_vec_deinit(aq_vec);
1289 1290
		aq_vec_ring_free(aq_vec);
	}
1291

1292
	aq_ptp_unregister(self);
1293 1294
	aq_ptp_ring_deinit(self);
	aq_ptp_ring_free(self);
1295 1296
	aq_ptp_free(self);

1297
	if (likely(self->aq_fw_ops->deinit) && link_down) {
1298 1299 1300 1301
		mutex_lock(&self->fwreq_mutex);
		self->aq_fw_ops->deinit(self->aq_hw);
		mutex_unlock(&self->fwreq_mutex);
	}
1302

1303 1304 1305
err_exit:;
}

1306
void aq_nic_free_vectors(struct aq_nic_s *self)
1307 1308 1309 1310 1311 1312
{
	unsigned int i = 0U;

	if (!self)
		goto err_exit;

1313
	for (i = ARRAY_SIZE(self->aq_vec); i--;) {
1314
		if (self->aq_vec[i]) {
1315
			aq_vec_free(self->aq_vec[i]);
1316 1317
			self->aq_vec[i] = NULL;
		}
1318 1319 1320 1321 1322
	}

err_exit:;
}

1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
int aq_nic_realloc_vectors(struct aq_nic_s *self)
{
	struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);

	aq_nic_free_vectors(self);

	for (self->aq_vecs = 0; self->aq_vecs < cfg->vecs; self->aq_vecs++) {
		self->aq_vec[self->aq_vecs] = aq_vec_alloc(self, self->aq_vecs,
							   cfg);
		if (unlikely(!self->aq_vec[self->aq_vecs]))
			return -ENOMEM;
	}

	return 0;
}

1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
void aq_nic_shutdown(struct aq_nic_s *self)
{
	int err = 0;

	if (!self->ndev)
		return;

	rtnl_lock();

	netif_device_detach(self->ndev);

1350 1351 1352 1353 1354
	if (netif_running(self->ndev)) {
		err = aq_nic_stop(self);
		if (err < 0)
			goto err_exit;
	}
1355 1356
	aq_nic_deinit(self, !self->aq_hw->aq_nic_cfg->wol);
	aq_nic_set_power(self);
1357 1358 1359

err_exit:
	rtnl_unlock();
1360
}
1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403

u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type)
{
	u8 location = 0xFF;
	u32 fltr_cnt;
	u32 n_bit;

	switch (type) {
	case aq_rx_filter_ethertype:
		location = AQ_RX_LAST_LOC_FETHERT - AQ_RX_FIRST_LOC_FETHERT -
			   self->aq_hw_rx_fltrs.fet_reserved_count;
		self->aq_hw_rx_fltrs.fet_reserved_count++;
		break;
	case aq_rx_filter_l3l4:
		fltr_cnt = AQ_RX_LAST_LOC_FL3L4 - AQ_RX_FIRST_LOC_FL3L4;
		n_bit = fltr_cnt - self->aq_hw_rx_fltrs.fl3l4.reserved_count;

		self->aq_hw_rx_fltrs.fl3l4.active_ipv4 |= BIT(n_bit);
		self->aq_hw_rx_fltrs.fl3l4.reserved_count++;
		location = n_bit;
		break;
	default:
		break;
	}

	return location;
}

void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
			   u32 location)
{
	switch (type) {
	case aq_rx_filter_ethertype:
		self->aq_hw_rx_fltrs.fet_reserved_count--;
		break;
	case aq_rx_filter_l3l4:
		self->aq_hw_rx_fltrs.fl3l4.reserved_count--;
		self->aq_hw_rx_fltrs.fl3l4.active_ipv4 &= ~BIT(location);
		break;
	default:
		break;
	}
}
1404

1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
int aq_nic_set_downshift(struct aq_nic_s *self, int val)
{
	int err = 0;
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;

	if (!self->aq_fw_ops->set_downshift)
		return -EOPNOTSUPP;

	if (val > 15) {
		netdev_err(self->ndev, "downshift counter should be <= 15\n");
		return -EINVAL;
	}
	cfg->downshift_counter = val;

	mutex_lock(&self->fwreq_mutex);
	err = self->aq_fw_ops->set_downshift(self->aq_hw, cfg->downshift_counter);
	mutex_unlock(&self->fwreq_mutex);

	return err;
}

1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
int aq_nic_set_media_detect(struct aq_nic_s *self, int val)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
	int err = 0;

	if (!self->aq_fw_ops->set_media_detect)
		return -EOPNOTSUPP;

	if (val > 0 && val != AQ_HW_MEDIA_DETECT_CNT) {
		netdev_err(self->ndev, "EDPD on this device could have only fixed value of %d\n",
			   AQ_HW_MEDIA_DETECT_CNT);
		return -EINVAL;
	}

	mutex_lock(&self->fwreq_mutex);
	err = self->aq_fw_ops->set_media_detect(self->aq_hw, !!val);
	mutex_unlock(&self->fwreq_mutex);

	/* msecs plays no role - configuration is always fixed in PHY */
	if (!err)
		cfg->is_media_detect = !!val;

	return err;
}

1451 1452 1453
int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
1454
	const unsigned int prev_vecs = cfg->vecs;
1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
	bool ndev_running;
	int err = 0;
	int i;

	/* if already the same configuration or
	 * disable request (tcs is 0) and we already is disabled
	 */
	if (tcs == cfg->tcs || (tcs == 0 && !cfg->is_qos))
		return 0;

	ndev_running = netif_running(self->ndev);
	if (ndev_running)
		dev_close(self->ndev);

	cfg->tcs = tcs;
	if (cfg->tcs == 0)
		cfg->tcs = 1;
	if (prio_tc_map)
		memcpy(cfg->prio_tc_map, prio_tc_map, sizeof(cfg->prio_tc_map));
	else
		for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
			cfg->prio_tc_map[i] = cfg->tcs * i / 8;

	cfg->is_qos = (tcs != 0 ? true : false);
	cfg->is_ptp = (cfg->tcs <= AQ_HW_PTP_TC);
	if (!cfg->is_ptp)
		netdev_warn(self->ndev, "%s\n",
			    "PTP is auto disabled due to requested TC count.");

	netdev_set_num_tc(self->ndev, cfg->tcs);

1486 1487 1488 1489 1490 1491 1492 1493
	/* Changing the number of TCs might change the number of vectors */
	aq_nic_cfg_update_num_vecs(self);
	if (prev_vecs != cfg->vecs) {
		err = aq_nic_realloc_vectors(self);
		if (err)
			goto err_exit;
	}

1494 1495 1496
	if (ndev_running)
		err = dev_open(self->ndev, NULL);

1497
err_exit:
1498 1499
	return err;
}
1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519

int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
			     const u32 max_rate)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;

	if (tc >= AQ_CFG_TCS_MAX)
		return -EINVAL;

	if (max_rate && max_rate < 10) {
		netdev_warn(self->ndev,
			"Setting %s to the minimum usable value of %dMbps.\n",
			"max rate", 10);
		cfg->tc_max_rate[tc] = 10;
	} else {
		cfg->tc_max_rate[tc] = max_rate;
	}

	return 0;
}
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544

int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc,
			     const u32 min_rate)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;

	if (tc >= AQ_CFG_TCS_MAX)
		return -EINVAL;

	if (min_rate)
		set_bit(tc, &cfg->tc_min_rate_msk);
	else
		clear_bit(tc, &cfg->tc_min_rate_msk);

	if (min_rate && min_rate < 20) {
		netdev_warn(self->ndev,
			"Setting %s to the minimum usable value of %dMbps.\n",
			"min rate", 20);
		cfg->tc_min_rate[tc] = 20;
	} else {
		cfg->tc_min_rate[tc] = min_rate;
	}

	return 0;
}
新手
引导
客服 返回
顶部