aq_nic.c 29.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3
/*
 * aQuantia Corporation Network Driver
4
 * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
5 6 7 8 9 10 11 12 13
 */

/* File aq_nic.c: Definition of common code for NIC. */

#include "aq_nic.h"
#include "aq_ring.h"
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
14
#include "aq_macsec.h"
15
#include "aq_main.h"
16
#include "aq_phy.h"
17
#include "aq_ptp.h"
18
#include "aq_filters.h"
19

20
#include <linux/moduleparam.h>
21 22 23 24 25 26 27 28
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/timer.h>
#include <linux/cpu.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <net/ip.h>

29 30 31 32 33 34 35 36 37 38 39 40
static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
module_param_named(aq_itr, aq_itr, uint, 0644);
MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");

static unsigned int aq_itr_tx;
module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");

static unsigned int aq_itr_rx;
module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");

41 42
static void aq_nic_update_ndev_stats(struct aq_nic_s *self);

43 44
static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
{
45
	static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
46 47 48 49 50 51
		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
	};
N
Nikita Danilov 已提交
52 53 54 55 56
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
	struct aq_rss_parameters *rss_params;
	int i = 0;

	rss_params = &cfg->aq_rss;
57 58 59 60 61 62 63 64 65

	rss_params->hash_secret_key_size = sizeof(rss_key);
	memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
	rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;

	for (i = rss_params->indirection_table_size; i--;)
		rss_params->indirection_table[i] = i & (num_rss_queues - 1);
}

66 67
/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
void aq_nic_cfg_start(struct aq_nic_s *self)
68 69 70 71 72 73 74
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;

	cfg->tcs = AQ_CFG_TCS_DEF;

	cfg->is_polling = AQ_CFG_IS_POLLING_DEF;

75 76 77
	cfg->itr = aq_itr;
	cfg->tx_itr = aq_itr_tx;
	cfg->rx_itr = aq_itr_rx;
78

79
	cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
80 81 82
	cfg->is_rss = AQ_CFG_IS_RSS_DEF;
	cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
	cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
83
	cfg->fc.req = AQ_CFG_FC_MODE;
84
	cfg->wol = AQ_CFG_WOL_MODES;
85 86 87 88 89 90 91 92

	cfg->mtu = AQ_CFG_MTU_DEF;
	cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
	cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;

	cfg->is_lro = AQ_CFG_IS_LRO_DEF;

	/*descriptors */
93 94
	cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
	cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
95 96

	/*rss rings */
97
	cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
98
	cfg->vecs = min(cfg->vecs, num_online_cpus());
99 100
	if (self->irqvecs > AQ_HW_SERVICE_IRQS)
		cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
101 102 103 104 105 106 107 108 109 110
	/* cfg->vecs should be power of 2 for RSS */
	if (cfg->vecs >= 8U)
		cfg->vecs = 8U;
	else if (cfg->vecs >= 4U)
		cfg->vecs = 4U;
	else if (cfg->vecs >= 2U)
		cfg->vecs = 2U;
	else
		cfg->vecs = 1U;

111 112
	cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);

113 114
	aq_nic_rss_init(self, cfg->num_rss_queues);

115
	cfg->irq_type = aq_pci_func_get_irq_type(self);
116 117

	if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
118
	    (cfg->aq_hw_caps->vecs == 1U) ||
119 120 121 122 123
	    (cfg->vecs == 1U)) {
		cfg->is_rss = 0U;
		cfg->vecs = 1U;
	}

124 125 126 127 128 129 130 131 132
	/* Check if we have enough vectors allocated for
	 * link status IRQ. If no - we'll know link state from
	 * slower service task.
	 */
	if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
		cfg->link_irq_vec = cfg->vecs;
	else
		cfg->link_irq_vec = 0;

133
	cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
134
	cfg->features = cfg->aq_hw_caps->hw_features;
135 136
	cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
	cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
137
	cfg->is_vlan_force_promisc = true;
138 139
}

I
Igor Russkikh 已提交
140 141
static int aq_nic_update_link_status(struct aq_nic_s *self)
{
142
	int err = self->aq_fw_ops->update_link_status(self->aq_hw);
143
	u32 fc = 0;
I
Igor Russkikh 已提交
144 145 146 147

	if (err)
		return err;

148 149 150 151
	if (self->aq_fw_ops->get_flow_control)
		self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
	self->aq_nic_cfg.fc.cur = fc;

152
	if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
153 154 155
		netdev_info(self->ndev, "%s: link change old %d new %d\n",
			    AQ_CFG_DRV_NAME, self->link_status.mbps,
			    self->aq_hw->aq_link_status.mbps);
156
		aq_nic_update_interrupt_moderation_settings(self);
157

158
		if (self->aq_ptp) {
159
			aq_ptp_clock_init(self);
160 161
			aq_ptp_tm_offset_set(self,
					     self->aq_hw->aq_link_status.mbps);
162
			aq_ptp_link_change(self);
163
		}
164

165 166 167 168 169 170
		/* Driver has to update flow control settings on RX block
		 * on any link event.
		 * We should query FW whether it negotiated FC.
		 */
		if (self->aq_hw_ops->hw_set_fc)
			self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
171
	}
I
Igor Russkikh 已提交
172 173 174

	self->link_status = self->aq_hw->aq_link_status;
	if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
175
		aq_utils_obj_set(&self->flags,
I
Igor Russkikh 已提交
176
				 AQ_NIC_FLAG_STARTED);
177
		aq_utils_obj_clear(&self->flags,
I
Igor Russkikh 已提交
178 179
				   AQ_NIC_LINK_DOWN);
		netif_carrier_on(self->ndev);
180 181 182
#if IS_ENABLED(CONFIG_MACSEC)
		aq_macsec_enable(self);
#endif
I
Igor Russkikh 已提交
183 184 185 186 187
		netif_tx_wake_all_queues(self->ndev);
	}
	if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
		netif_carrier_off(self->ndev);
		netif_tx_disable(self->ndev);
188
		aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
I
Igor Russkikh 已提交
189
	}
N
Nikita Danilov 已提交
190

I
Igor Russkikh 已提交
191 192 193
	return 0;
}

194 195 196 197 198 199 200 201 202 203 204
static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
{
	struct aq_nic_s *self = private;

	if (!self)
		return IRQ_NONE;

	aq_nic_update_link_status(self);

	self->aq_hw_ops->hw_irq_enable(self->aq_hw,
				       BIT(self->aq_nic_cfg.link_irq_vec));
N
Nikita Danilov 已提交
205

206 207 208
	return IRQ_HANDLED;
}

209
static void aq_nic_service_task(struct work_struct *work)
210
{
211 212 213
	struct aq_nic_s *self = container_of(work, struct aq_nic_s,
					     service_task);
	int err;
214

215 216
	aq_ptp_service_task(self);

217
	if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
218
		return;
219

I
Igor Russkikh 已提交
220 221
	err = aq_nic_update_link_status(self);
	if (err)
222
		return;
223

224 225 226 227
#if IS_ENABLED(CONFIG_MACSEC)
	aq_macsec_work(self);
#endif

228
	mutex_lock(&self->fwreq_mutex);
229 230
	if (self->aq_fw_ops->update_stats)
		self->aq_fw_ops->update_stats(self->aq_hw);
231
	mutex_unlock(&self->fwreq_mutex);
232

233
	aq_nic_update_ndev_stats(self);
234 235 236 237 238
}

static void aq_nic_service_timer_cb(struct timer_list *t)
{
	struct aq_nic_s *self = from_timer(self, t, service_timer);
239

N
Nikita Danilov 已提交
240 241
	mod_timer(&self->service_timer,
		  jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
242 243

	aq_ndev_schedule_work(&self->service_task);
244 245
}

246
static void aq_nic_polling_timer_cb(struct timer_list *t)
247
{
248
	struct aq_nic_s *self = from_timer(self, t, polling_timer);
249 250 251 252 253 254 255 256
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;

	for (i = 0U, aq_vec = self->aq_vec[0];
		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
		aq_vec_isr(i, (void *)aq_vec);

	mod_timer(&self->polling_timer, jiffies +
257
		  AQ_CFG_POLLING_TIMER_INTERVAL);
258 259
}

260 261 262 263 264 265 266 267 268 269 270 271 272 273
static int aq_nic_hw_prepare(struct aq_nic_s *self)
{
	int err = 0;

	err = self->aq_hw_ops->hw_soft_reset(self->aq_hw);
	if (err)
		goto exit;

	err = self->aq_hw_ops->hw_prepare(self->aq_hw, &self->aq_fw_ops);

exit:
	return err;
}

274 275 276 277 278 279 280 281
int aq_nic_ndev_register(struct aq_nic_s *self)
{
	int err = 0;

	if (!self->ndev) {
		err = -EINVAL;
		goto err_exit;
	}
282

283
	err = aq_nic_hw_prepare(self);
284 285 286
	if (err)
		goto err_exit;

287 288 289 290
#if IS_ENABLED(CONFIG_MACSEC)
	aq_macsec_init(self);
#endif

291
	mutex_lock(&self->fwreq_mutex);
292
	err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
293
			    self->ndev->dev_addr);
294
	mutex_unlock(&self->fwreq_mutex);
295
	if (err)
296 297 298 299 300 301 302 303 304 305
		goto err_exit;

#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
	{
		static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;

		ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
	}
#endif

306 307 308 309 310 311 312 313 314 315
	for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
	     self->aq_vecs++) {
		self->aq_vec[self->aq_vecs] =
		    aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
		if (!self->aq_vec[self->aq_vecs]) {
			err = -ENOMEM;
			goto err_exit;
		}
	}

316 317
	netif_carrier_off(self->ndev);

I
Igor Russkikh 已提交
318
	netif_tx_disable(self->ndev);
319

320
	err = register_netdev(self->ndev);
321
	if (err)
322 323
		goto err_exit;

324
err_exit:
325 326 327 328
#if IS_ENABLED(CONFIG_MACSEC)
	if (err)
		aq_macsec_free(self);
#endif
329 330 331
	return err;
}

332
void aq_nic_ndev_init(struct aq_nic_s *self)
333
{
334
	const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
335 336 337 338
	struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;

	self->ndev->hw_features |= aq_hw_caps->hw_features;
	self->ndev->features = aq_hw_caps->hw_features;
339
	self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
340 341
				     NETIF_F_RXHASH | NETIF_F_SG |
				     NETIF_F_LRO | NETIF_F_TSO;
342
	self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4;
343
	self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
344 345
	self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;

346
	self->msg_enable = NETIF_MSG_DRV | NETIF_MSG_LINK;
347
	self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
348
	self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366

}

void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
			struct aq_ring_s *ring)
{
	self->aq_ring_tx[idx] = ring;
}

struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
{
	return self->ndev;
}

int aq_nic_init(struct aq_nic_s *self)
{
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;
N
Nikita Danilov 已提交
367
	int err = 0;
368 369

	self->power_state = AQ_HW_POWER_STATE_D0;
370
	mutex_lock(&self->fwreq_mutex);
371
	err = self->aq_hw_ops->hw_reset(self->aq_hw);
372
	mutex_unlock(&self->fwreq_mutex);
373 374 375
	if (err < 0)
		goto err_exit;

376
	err = self->aq_hw_ops->hw_init(self->aq_hw,
377
				       aq_nic_get_ndev(self)->dev_addr);
378 379 380
	if (err < 0)
		goto err_exit;

381 382 383 384 385
	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
		self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
		err = aq_phy_init(self->aq_hw);
	}

386 387
	for (i = 0U, aq_vec = self->aq_vec[0];
		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
388
		aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
389

390 391 392 393
	err = aq_ptp_init(self, self->irqvecs - 1);
	if (err < 0)
		goto err_exit;

394 395 396 397 398 399 400 401
	err = aq_ptp_ring_alloc(self);
	if (err < 0)
		goto err_exit;

	err = aq_ptp_ring_init(self);
	if (err < 0)
		goto err_exit;

402 403
	netif_carrier_off(self->ndev);

404 405 406 407 408 409 410 411
err_exit:
	return err;
}

int aq_nic_start(struct aq_nic_s *self)
{
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;
N
Nikita Danilov 已提交
412
	int err = 0;
413

414
	err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
415 416
						     self->mc_list.ar,
						     self->mc_list.count);
417 418 419
	if (err < 0)
		goto err_exit;

420
	err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
421
						    self->packet_filter);
422 423 424 425 426 427 428 429 430 431
	if (err < 0)
		goto err_exit;

	for (i = 0U, aq_vec = self->aq_vec[0];
		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
		err = aq_vec_start(aq_vec);
		if (err < 0)
			goto err_exit;
	}

432 433 434 435
	err = aq_ptp_ring_start(self);
	if (err < 0)
		goto err_exit;

436 437
	aq_nic_set_loopback(self);

438
	err = self->aq_hw_ops->hw_start(self->aq_hw);
439 440 441
	if (err < 0)
		goto err_exit;

442 443
	err = aq_nic_update_interrupt_moderation_settings(self);
	if (err)
444
		goto err_exit;
445 446 447

	INIT_WORK(&self->service_task, aq_nic_service_task);

448
	timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
449
	aq_nic_service_timer_cb(&self->service_timer);
450 451

	if (self->aq_nic_cfg.is_polling) {
452
		timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
453 454 455 456 457
		mod_timer(&self->polling_timer, jiffies +
			  AQ_CFG_POLLING_TIMER_INTERVAL);
	} else {
		for (i = 0U, aq_vec = self->aq_vec[0];
			self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
458 459
			err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
						    aq_vec_isr, aq_vec,
460
						    aq_vec_get_affinity_mask(aq_vec));
461 462 463 464
			if (err < 0)
				goto err_exit;
		}

465 466 467 468
		err = aq_ptp_irq_alloc(self);
		if (err < 0)
			goto err_exit;

469 470 471 472 473
		if (self->aq_nic_cfg.link_irq_vec) {
			int irqvec = pci_irq_vector(self->pdev,
						   self->aq_nic_cfg.link_irq_vec);
			err = request_threaded_irq(irqvec, NULL,
						   aq_linkstate_threaded_isr,
474
						   IRQF_SHARED | IRQF_ONESHOT,
475 476 477 478 479 480
						   self->ndev->name, self);
			if (err < 0)
				goto err_exit;
			self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec);
		}

481
		err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
482
						     AQ_CFG_IRQ_MASK);
483 484 485 486 487 488 489 490 491 492 493 494
		if (err < 0)
			goto err_exit;
	}

	err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
	if (err < 0)
		goto err_exit;

	err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
	if (err < 0)
		goto err_exit;

I
Igor Russkikh 已提交
495 496
	netif_tx_start_all_queues(self->ndev);

497 498 499 500
err_exit:
	return err;
}

501 502
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
			    struct aq_ring_s *ring)
503 504
{
	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
P
Pavel Belous 已提交
505
	struct aq_ring_buff_s *first = NULL;
506
	u8 ipver = ip_hdr(skb)->version;
N
Nikita Danilov 已提交
507
	struct aq_ring_buff_s *dx_buff;
508
	bool need_context_tag = false;
N
Nikita Danilov 已提交
509 510 511
	unsigned int frag_count = 0U;
	unsigned int ret = 0U;
	unsigned int dx;
512 513 514 515 516 517
	u8 l4proto = 0;

	if (ipver == 4)
		l4proto = ip_hdr(skb)->protocol;
	else if (ipver == 6)
		l4proto = ipv6_hdr(skb)->nexthdr;
518

N
Nikita Danilov 已提交
519 520
	dx = ring->sw_tail;
	dx_buff = &ring->buff_ring[dx];
521
	dx_buff->flags = 0U;
522

523
	if (unlikely(skb_is_gso(skb))) {
524
		dx_buff->mss = skb_shinfo(skb)->gso_size;
525 526 527 528 529 530 531 532 533 534 535 536 537
		if (l4proto == IPPROTO_TCP) {
			dx_buff->is_gso_tcp = 1U;
			dx_buff->len_l4 = tcp_hdrlen(skb);
		} else if (l4proto == IPPROTO_UDP) {
			dx_buff->is_gso_udp = 1U;
			dx_buff->len_l4 = sizeof(struct udphdr);
			/* UDP GSO Hardware does not replace packet length. */
			udp_hdr(skb)->len = htons(dx_buff->mss +
						  dx_buff->len_l4);
		} else {
			WARN_ONCE(true, "Bad GSO mode");
			goto exit;
		}
538 539
		dx_buff->len_pkt = skb->len;
		dx_buff->len_l2 = ETH_HLEN;
540
		dx_buff->len_l3 = skb_network_header_len(skb);
P
Pavel Belous 已提交
541
		dx_buff->eop_index = 0xffffU;
542
		dx_buff->is_ipv6 = (ipver == 6);
543 544
		need_context_tag = true;
	}
545

546 547 548 549 550 551 552 553
	if (self->aq_nic_cfg.is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
		dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
		dx_buff->len_pkt = skb->len;
		dx_buff->is_vlan = 1U;
		need_context_tag = true;
	}

	if (need_context_tag) {
554 555
		dx = aq_ring_next_dx(ring, dx);
		dx_buff = &ring->buff_ring[dx];
556
		dx_buff->flags = 0U;
557 558 559 560 561 562 563 564
		++ret;
	}

	dx_buff->len = skb_headlen(skb);
	dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
				     skb->data,
				     dx_buff->len,
				     DMA_TO_DEVICE);
565

566 567
	if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) {
		ret = 0;
568
		goto exit;
569
	}
570

P
Pavel Belous 已提交
571
	first = dx_buff;
572 573 574
	dx_buff->len_pkt = skb->len;
	dx_buff->is_sop = 1U;
	dx_buff->is_mapped = 1U;
575 576 577
	++ret;

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
578 579 580
		dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol);
		dx_buff->is_tcp_cso = (l4proto == IPPROTO_TCP);
		dx_buff->is_udp_cso = (l4proto == IPPROTO_UDP);
581 582 583
	}

	for (; nr_frags--; ++frag_count) {
584
		unsigned int frag_len = 0U;
P
Pavel Belous 已提交
585 586
		unsigned int buff_offset = 0U;
		unsigned int buff_size = 0U;
587 588 589 590 591
		dma_addr_t frag_pa;
		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];

		frag_len = skb_frag_size(frag);

P
Pavel Belous 已提交
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
		while (frag_len) {
			if (frag_len > AQ_CFG_TX_FRAME_MAX)
				buff_size = AQ_CFG_TX_FRAME_MAX;
			else
				buff_size = frag_len;

			frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
						   frag,
						   buff_offset,
						   buff_size,
						   DMA_TO_DEVICE);

			if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
						       frag_pa)))
				goto mapping_error;
607 608 609 610 611

			dx = aq_ring_next_dx(ring, dx);
			dx_buff = &ring->buff_ring[dx];

			dx_buff->flags = 0U;
P
Pavel Belous 已提交
612
			dx_buff->len = buff_size;
613 614
			dx_buff->pa = frag_pa;
			dx_buff->is_mapped = 1U;
P
Pavel Belous 已提交
615 616 617 618
			dx_buff->eop_index = 0xffffU;

			frag_len -= buff_size;
			buff_offset += buff_size;
619

620
			++ret;
621 622 623
		}
	}

P
Pavel Belous 已提交
624
	first->eop_index = dx;
625 626 627 628 629 630 631 632 633 634
	dx_buff->is_eop = 1U;
	dx_buff->skb = skb;
	goto exit;

mapping_error:
	for (dx = ring->sw_tail;
	     ret > 0;
	     --ret, dx = aq_ring_next_dx(ring, dx)) {
		dx_buff = &ring->buff_ring[dx];

635 636
		if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) &&
		    !dx_buff->is_vlan && dx_buff->pa) {
637 638 639 640 641 642 643 644 645 646 647 648
			if (unlikely(dx_buff->is_sop)) {
				dma_unmap_single(aq_nic_get_dev(self),
						 dx_buff->pa,
						 dx_buff->len,
						 DMA_TO_DEVICE);
			} else {
				dma_unmap_page(aq_nic_get_dev(self),
					       dx_buff->pa,
					       dx_buff->len,
					       DMA_TO_DEVICE);
			}
		}
649 650
	}

651
exit:
652 653 654 655 656
	return ret;
}

int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
N
Nikita Danilov 已提交
657
	unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
658 659
	struct aq_ring_s *ring = NULL;
	unsigned int frags = 0U;
660
	int err = NETDEV_TX_OK;
N
Nikita Danilov 已提交
661
	unsigned int tc = 0U;
662 663 664 665 666 667 668 669 670 671

	frags = skb_shinfo(skb)->nr_frags + 1;

	ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];

	if (frags > AQ_CFG_SKB_FRAGS_MAX) {
		dev_kfree_skb_any(skb);
		goto err_exit;
	}

I
Igor Russkikh 已提交
672
	aq_ring_update_queue_state(ring);
673

674 675 676 677 678
	if (self->aq_nic_cfg.priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
		err = NETDEV_TX_BUSY;
		goto err_exit;
	}

I
Igor Russkikh 已提交
679 680
	/* Above status update may stop the queue. Check this. */
	if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
681 682 683 684
		err = NETDEV_TX_BUSY;
		goto err_exit;
	}

685
	frags = aq_nic_map_skb(self, skb, ring);
686

687
	if (likely(frags)) {
688
		err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
689
						       ring, frags);
690
	} else {
691 692 693 694 695 696 697
		err = NETDEV_TX_BUSY;
	}

err_exit:
	return err;
}

698 699
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
{
700
	return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
701 702
}

703 704 705 706
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
{
	int err = 0;

707
	err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
708 709 710 711 712 713 714 715 716 717 718
	if (err < 0)
		goto err_exit;

	self->packet_filter = flags;

err_exit:
	return err;
}

int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
{
719 720 721
	const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
	unsigned int packet_filter = ndev->flags;
722 723
	struct netdev_hw_addr *ha = NULL;
	unsigned int i = 0U;
724
	int err = 0;
725

726 727 728 729 730 731 732
	self->mc_list.count = 0;
	if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
		packet_filter |= IFF_PROMISC;
	} else {
		netdev_for_each_uc_addr(ha, ndev) {
			ether_addr_copy(self->mc_list.ar[i++], ha->addr);
		}
733 734
	}

735 736 737 738 739 740 741 742 743
	cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
	if (cfg->is_mc_list_enabled) {
		if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
			packet_filter |= IFF_ALLMULTI;
		} else {
			netdev_for_each_mc_addr(ha, ndev) {
				ether_addr_copy(self->mc_list.ar[i++],
						ha->addr);
			}
744 745 746
		}
	}

747
	if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
748
		self->mc_list.count = i;
749 750 751
		err = hw_ops->hw_multicast_list_set(self->aq_hw,
						    self->mc_list.ar,
						    self->mc_list.count);
752 753
		if (err < 0)
			return err;
754
	}
N
Nikita Danilov 已提交
755

756
	return aq_nic_set_packet_filter(self, packet_filter);
757 758 759 760 761 762
}

int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
{
	self->aq_nic_cfg.mtu = new_mtu;

763
	return 0;
764 765 766 767
}

int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
{
768
	return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
769 770 771 772 773 774 775 776 777 778 779 780 781 782
}

unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
{
	return self->link_status.mbps;
}

int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
{
	u32 *regs_buff = p;
	int err = 0;

	regs->version = 1;

783 784 785
	err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
					   self->aq_nic_cfg.aq_hw_caps,
					   regs_buff);
786 787 788 789 790 791 792 793 794
	if (err < 0)
		goto err_exit;

err_exit:
	return err;
}

int aq_nic_get_regs_count(struct aq_nic_s *self)
{
795
	return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
796 797
}

798
u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
799
{
800
	struct aq_vec_s *aq_vec = NULL;
801
	struct aq_stats_s *stats;
N
Nikita Danilov 已提交
802 803
	unsigned int count = 0U;
	unsigned int i = 0U;
804 805 806 807 808 809 810

	if (self->aq_fw_ops->update_stats) {
		mutex_lock(&self->fwreq_mutex);
		self->aq_fw_ops->update_stats(self->aq_hw);
		mutex_unlock(&self->fwreq_mutex);
	}
	stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
811

812
	if (!stats)
813 814
		goto err_exit;

815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840
	data[i] = stats->uprc + stats->mprc + stats->bprc;
	data[++i] = stats->uprc;
	data[++i] = stats->mprc;
	data[++i] = stats->bprc;
	data[++i] = stats->erpt;
	data[++i] = stats->uptc + stats->mptc + stats->bptc;
	data[++i] = stats->uptc;
	data[++i] = stats->mptc;
	data[++i] = stats->bptc;
	data[++i] = stats->ubrc;
	data[++i] = stats->ubtc;
	data[++i] = stats->mbrc;
	data[++i] = stats->mbtc;
	data[++i] = stats->bbrc;
	data[++i] = stats->bbtc;
	data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
	data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
	data[++i] = stats->dma_pkt_rc;
	data[++i] = stats->dma_pkt_tc;
	data[++i] = stats->dma_oct_rc;
	data[++i] = stats->dma_oct_tc;
	data[++i] = stats->dpc;

	i++;

	data += i;
841 842

	for (i = 0U, aq_vec = self->aq_vec[0];
843
		aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
844 845 846 847
		data += count;
		aq_vec_get_sw_stats(aq_vec, data, &count);
	}

848 849
	data += count;

850
err_exit:;
851
	return data;
852 853
}

854 855
static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
{
856
	struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
N
Nikita Danilov 已提交
857
	struct net_device *ndev = self->ndev;
858

859 860
	ndev->stats.rx_packets = stats->dma_pkt_rc;
	ndev->stats.rx_bytes = stats->dma_oct_rc;
861
	ndev->stats.rx_errors = stats->erpr;
862 863 864
	ndev->stats.rx_dropped = stats->dpc;
	ndev->stats.tx_packets = stats->dma_pkt_tc;
	ndev->stats.tx_bytes = stats->dma_oct_tc;
865
	ndev->stats.tx_errors = stats->erpt;
866
	ndev->stats.multicast = stats->mprc;
867 868
}

869 870
void aq_nic_get_link_ksettings(struct aq_nic_s *self,
			       struct ethtool_link_ksettings *cmd)
871
{
872 873 874 875
	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
		cmd->base.port = PORT_FIBRE;
	else
		cmd->base.port = PORT_TP;
876
	/* This driver supports only 10G capable adapters, so DUPLEX_FULL */
877 878 879
	cmd->base.duplex = DUPLEX_FULL;
	cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;

880 881
	ethtool_link_ksettings_zero_link_mode(cmd, supported);

882
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
883 884 885
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     10000baseT_Full);

886
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
887 888 889
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     5000baseT_Full);

890
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
891 892 893
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     2500baseT_Full);

894
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
895 896 897
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     1000baseT_Full);

898
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
899 900 901
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     100baseT_Full);

902 903 904 905
	if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M)
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     10baseT_Full);

906
	if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
907 908
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     Pause);
909 910 911
		ethtool_link_ksettings_add_link_mode(cmd, supported,
						     Asym_Pause);
	}
912 913

	ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
914 915 916 917 918

	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
	else
		ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944

	ethtool_link_ksettings_zero_link_mode(cmd, advertising);

	if (self->aq_nic_cfg.is_autoneg)
		ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);

	if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_10G)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     10000baseT_Full);

	if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_5G)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     5000baseT_Full);

	if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_2GS)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     2500baseT_Full);

	if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_1G)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     1000baseT_Full);

	if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_100M)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     100baseT_Full);

945 946 947 948
	if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_10M)
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     10baseT_Full);

949
	if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
950 951 952
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     Pause);

953
	/* Asym is when either RX or TX, but not both */
954 955
	if (!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_TX) ^
	    !!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX))
956 957 958
		ethtool_link_ksettings_add_link_mode(cmd, advertising,
						     Asym_Pause);

959 960 961 962
	if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
		ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
	else
		ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
963 964
}

965 966
int aq_nic_set_link_ksettings(struct aq_nic_s *self,
			      const struct ethtool_link_ksettings *cmd)
967 968 969 970 971
{
	u32 speed = 0U;
	u32 rate = 0U;
	int err = 0;

972
	if (cmd->base.autoneg == AUTONEG_ENABLE) {
973
		rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
974 975
		self->aq_nic_cfg.is_autoneg = true;
	} else {
976
		speed = cmd->base.speed;
977 978

		switch (speed) {
979 980 981 982
		case SPEED_10:
			rate = AQ_NIC_RATE_10M;
			break;

983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
		case SPEED_100:
			rate = AQ_NIC_RATE_100M;
			break;

		case SPEED_1000:
			rate = AQ_NIC_RATE_1G;
			break;

		case SPEED_2500:
			rate = AQ_NIC_RATE_2GS;
			break;

		case SPEED_5000:
			rate = AQ_NIC_RATE_5G;
			break;

		case SPEED_10000:
			rate = AQ_NIC_RATE_10G;
			break;

		default:
			err = -1;
			goto err_exit;
		break;
		}
1008
		if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
1009 1010 1011 1012 1013 1014 1015
			err = -1;
			goto err_exit;
		}

		self->aq_nic_cfg.is_autoneg = false;
	}

1016
	mutex_lock(&self->fwreq_mutex);
1017
	err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
1018
	mutex_unlock(&self->fwreq_mutex);
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
	if (err < 0)
		goto err_exit;

	self->aq_nic_cfg.link_speed_msk = rate;

err_exit:
	return err;
}

struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
{
	return &self->aq_nic_cfg;
}

u32 aq_nic_get_fw_version(struct aq_nic_s *self)
{
1035
	return self->aq_hw_ops->hw_get_fw_version(self->aq_hw);
1036 1037
}

1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
int aq_nic_set_loopback(struct aq_nic_s *self)
{
	struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;

	if (!self->aq_hw_ops->hw_set_loopback ||
	    !self->aq_fw_ops->set_phyloopback)
		return -ENOTSUPP;

	mutex_lock(&self->fwreq_mutex);
	self->aq_hw_ops->hw_set_loopback(self->aq_hw,
					 AQ_HW_LOOPBACK_DMA_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_DMA_SYS)));

	self->aq_hw_ops->hw_set_loopback(self->aq_hw,
					 AQ_HW_LOOPBACK_PKT_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_PKT_SYS)));

	self->aq_hw_ops->hw_set_loopback(self->aq_hw,
					 AQ_HW_LOOPBACK_DMA_NET,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_DMA_NET)));

	self->aq_fw_ops->set_phyloopback(self->aq_hw,
					 AQ_HW_LOOPBACK_PHYINT_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_PHYINT_SYS)));

	self->aq_fw_ops->set_phyloopback(self->aq_hw,
					 AQ_HW_LOOPBACK_PHYEXT_SYS,
					 !!(cfg->priv_flags &
					    BIT(AQ_HW_LOOPBACK_PHYEXT_SYS)));
	mutex_unlock(&self->fwreq_mutex);

	return 0;
}

1076 1077 1078 1079 1080
int aq_nic_stop(struct aq_nic_s *self)
{
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;

I
Igor Russkikh 已提交
1081
	netif_tx_disable(self->ndev);
1082
	netif_carrier_off(self->ndev);
1083 1084

	del_timer_sync(&self->service_timer);
1085
	cancel_work_sync(&self->service_task);
1086

1087
	self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
1088 1089 1090 1091

	if (self->aq_nic_cfg.is_polling)
		del_timer_sync(&self->polling_timer);
	else
1092
		aq_pci_func_free_irqs(self);
1093

1094 1095
	aq_ptp_irq_free(self);

1096 1097 1098 1099
	for (i = 0U, aq_vec = self->aq_vec[0];
		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
		aq_vec_stop(aq_vec);

1100 1101
	aq_ptp_ring_stop(self);

1102
	return self->aq_hw_ops->hw_stop(self->aq_hw);
1103 1104
}

1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
void aq_nic_set_power(struct aq_nic_s *self)
{
	if (self->power_state != AQ_HW_POWER_STATE_D0 ||
	    self->aq_hw->aq_nic_cfg->wol)
		if (likely(self->aq_fw_ops->set_power)) {
			mutex_lock(&self->fwreq_mutex);
			self->aq_fw_ops->set_power(self->aq_hw,
						   self->power_state,
						   self->ndev->dev_addr);
			mutex_unlock(&self->fwreq_mutex);
		}
}

void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
{
	struct aq_vec_s *aq_vec = NULL;
	unsigned int i = 0U;

	if (!self)
		goto err_exit;

	for (i = 0U, aq_vec = self->aq_vec[0];
		self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
		aq_vec_deinit(aq_vec);

1130
	aq_ptp_unregister(self);
1131 1132
	aq_ptp_ring_deinit(self);
	aq_ptp_ring_free(self);
1133 1134
	aq_ptp_free(self);

1135
	if (likely(self->aq_fw_ops->deinit) && link_down) {
1136 1137 1138 1139
		mutex_lock(&self->fwreq_mutex);
		self->aq_fw_ops->deinit(self->aq_hw);
		mutex_unlock(&self->fwreq_mutex);
	}
1140

1141 1142 1143
err_exit:;
}

1144
void aq_nic_free_vectors(struct aq_nic_s *self)
1145 1146 1147 1148 1149 1150
{
	unsigned int i = 0U;

	if (!self)
		goto err_exit;

1151
	for (i = ARRAY_SIZE(self->aq_vec); i--;) {
1152
		if (self->aq_vec[i]) {
1153
			aq_vec_free(self->aq_vec[i]);
1154 1155
			self->aq_vec[i] = NULL;
		}
1156 1157 1158 1159 1160
	}

err_exit:;
}

1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
void aq_nic_shutdown(struct aq_nic_s *self)
{
	int err = 0;

	if (!self->ndev)
		return;

	rtnl_lock();

	netif_device_detach(self->ndev);

1172 1173 1174 1175 1176
	if (netif_running(self->ndev)) {
		err = aq_nic_stop(self);
		if (err < 0)
			goto err_exit;
	}
1177 1178
	aq_nic_deinit(self, !self->aq_hw->aq_nic_cfg->wol);
	aq_nic_set_power(self);
1179 1180 1181

err_exit:
	rtnl_unlock();
1182
}
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225

u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type)
{
	u8 location = 0xFF;
	u32 fltr_cnt;
	u32 n_bit;

	switch (type) {
	case aq_rx_filter_ethertype:
		location = AQ_RX_LAST_LOC_FETHERT - AQ_RX_FIRST_LOC_FETHERT -
			   self->aq_hw_rx_fltrs.fet_reserved_count;
		self->aq_hw_rx_fltrs.fet_reserved_count++;
		break;
	case aq_rx_filter_l3l4:
		fltr_cnt = AQ_RX_LAST_LOC_FL3L4 - AQ_RX_FIRST_LOC_FL3L4;
		n_bit = fltr_cnt - self->aq_hw_rx_fltrs.fl3l4.reserved_count;

		self->aq_hw_rx_fltrs.fl3l4.active_ipv4 |= BIT(n_bit);
		self->aq_hw_rx_fltrs.fl3l4.reserved_count++;
		location = n_bit;
		break;
	default:
		break;
	}

	return location;
}

void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
			   u32 location)
{
	switch (type) {
	case aq_rx_filter_ethertype:
		self->aq_hw_rx_fltrs.fet_reserved_count--;
		break;
	case aq_rx_filter_l3l4:
		self->aq_hw_rx_fltrs.fl3l4.reserved_count--;
		self->aq_hw_rx_fltrs.fl3l4.active_ipv4 &= ~BIT(location);
		break;
	default:
		break;
	}
}