hw_atl_b0.c 27.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * aQuantia Corporation Network Driver
 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 */

/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */

#include "../aq_hw.h"
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
15
#include "../aq_nic.h"
16 17 18 19
#include "hw_atl_b0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
#include "hw_atl_b0_internal.h"
20
#include "hw_atl_llh_internal.h"
21

22
#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
	.is_64_dma = true,		  \
	.msix_irqs = 4U,		  \
	.irq_mask = ~0U,		  \
	.vecs = HW_ATL_B0_RSS_MAX,	  \
	.tcs = HW_ATL_B0_TC_MAX,	  \
	.rxd_alignment = 1U,		  \
	.rxd_size = HW_ATL_B0_RXD_SIZE,   \
	.rxds_max = HW_ATL_B0_MAX_RXD,    \
	.rxds_min = HW_ATL_B0_MIN_RXD,    \
	.txd_alignment = 1U,		  \
	.txd_size = HW_ATL_B0_TXD_SIZE,   \
	.txds_max = HW_ATL_B0_MAX_TXD,    \
	.txds_min = HW_ATL_B0_MIN_TXD,    \
	.txhwb_alignment = 4096U,	  \
	.tx_rings = HW_ATL_B0_TX_RINGS,   \
	.rx_rings = HW_ATL_B0_RX_RINGS,   \
	.hw_features = NETIF_F_HW_CSUM |  \
			NETIF_F_RXCSUM |  \
			NETIF_F_RXHASH |  \
			NETIF_F_SG |      \
			NETIF_F_TSO |     \
			NETIF_F_LRO,      \
	.hw_priv_flags = IFF_UNICAST_FLT, \
	.flow_control = true,		  \
	.mtu = HW_ATL_B0_MTU_JUMBO,	  \
	.mac_regs_count = 88,		  \
49
	.hw_alive_check_addr = 0x10U
50 51 52 53 54 55 56 57 58 59

const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
	DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
	.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
	.link_speed_msk = HW_ATL_B0_RATE_10G |
			  HW_ATL_B0_RATE_5G  |
			  HW_ATL_B0_RATE_2G5 |
			  HW_ATL_B0_RATE_1G  |
			  HW_ATL_B0_RATE_100M,
};
60

61 62 63 64 65 66 67 68 69
const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
	DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
	.media_type = AQ_HW_MEDIA_TYPE_TP,
	.link_speed_msk = HW_ATL_B0_RATE_10G |
			  HW_ATL_B0_RATE_5G  |
			  HW_ATL_B0_RATE_2G5 |
			  HW_ATL_B0_RATE_1G  |
			  HW_ATL_B0_RATE_100M,
};
70

71 72 73 74 75 76 77 78
const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
	DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
	.media_type = AQ_HW_MEDIA_TYPE_TP,
	.link_speed_msk = HW_ATL_B0_RATE_5G  |
			  HW_ATL_B0_RATE_2G5 |
			  HW_ATL_B0_RATE_1G  |
			  HW_ATL_B0_RATE_100M,
};
79

80 81 82 83 84 85 86
const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
	DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
	.media_type = AQ_HW_MEDIA_TYPE_TP,
	.link_speed_msk = HW_ATL_B0_RATE_2G5 |
			  HW_ATL_B0_RATE_1G  |
			  HW_ATL_B0_RATE_100M,
};
87 88 89 90 91

static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
{
	int err = 0;

92 93 94
	err = hw_atl_utils_soft_reset(self);
	if (err)
		return err;
95

96
	self->aq_fw_ops->set_state(self, MPI_RESET);
97 98 99 100 101 102 103 104 105 106 107 108 109 110

	err = aq_hw_err_from_flags(self);

	return err;
}

static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
	u32 tc = 0U;
	u32 buff_size = 0U;
	unsigned int i_priority = 0U;
	bool is_rx_flow_control = false;

	/* TPS Descriptor rate init */
111 112
	hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
	hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
113 114

	/* TPS VM init */
115
	hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
116 117

	/* TPS TC credits init */
118 119
	hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
	hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
120

121 122 123 124
	hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
	hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
	hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
	hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
125 126 127 128

	/* Tx buf size */
	buff_size = HW_ATL_B0_TXBUF_MAX;

129 130 131 132 133 134 135 136 137
	hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
	hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
						   (buff_size *
						   (1024 / 32U) * 66U) /
						   100U, tc);
	hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
						   (buff_size *
						   (1024 / 32U) * 50U) /
						   100U, tc);
138 139 140 141 142 143

	/* QoS Rx buf size per TC */
	tc = 0;
	is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
	buff_size = HW_ATL_B0_RXBUF_MAX;

144 145 146 147 148 149 150 151 152 153
	hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
	hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
						   (buff_size *
						   (1024U / 32U) * 66U) /
						   100U, tc);
	hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
						   (buff_size *
						   (1024U / 32U) * 50U) /
						   100U, tc);
	hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
154 155 156

	/* QoS 802.1p priority -> TC mapping */
	for (i_priority = 8U; i_priority--;)
157
		hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
158 159 160 161 162 163 164

	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
				     struct aq_rss_parameters *rss_params)
{
165
	struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
166 167 168 169 170 171 172
	int err = 0;
	unsigned int i = 0U;
	unsigned int addr = 0U;

	for (i = 10, addr = 0U; i--; ++addr) {
		u32 key_data = cfg->is_rss ?
			__swab32(rss_params->hash_secret_key[i]) : 0U;
173 174 175 176 177
		hw_atl_rpf_rss_key_wr_data_set(self, key_data);
		hw_atl_rpf_rss_key_addr_set(self, addr);
		hw_atl_rpf_rss_key_wr_en_set(self, 1U);
		AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
			       1000U, 10U);
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
		if (err < 0)
			goto err_exit;
	}

	err = aq_hw_err_from_flags(self);

err_exit:
	return err;
}

static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
				struct aq_rss_parameters *rss_params)
{
	u8 *indirection_table =	rss_params->indirection_table;
	u32 i = 0U;
	u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
	int err = 0;
	u16 bitary[(HW_ATL_B0_RSS_REDIRECTION_MAX *
					HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];

	memset(bitary, 0, sizeof(bitary));

	for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) {
		(*(u32 *)(bitary + ((i * 3U) / 16U))) |=
			((indirection_table[i] % num_rss_queues) <<
			((i * 3U) & 0xFU));
	}

206
	for (i = ARRAY_SIZE(bitary); i--;) {
207 208 209 210 211
		hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
		hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
		hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
		AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
			       1000U, 10U);
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
		if (err < 0)
			goto err_exit;
	}

	err = aq_hw_err_from_flags(self);

err_exit:
	return err;
}

static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
				    struct aq_nic_cfg_s *aq_nic_cfg)
{
	unsigned int i;

	/* TX checksums offloads*/
228 229
	hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
	hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
230 231

	/* RX checksums offloads*/
232 233
	hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
	hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
234 235

	/* LSO offloads*/
236
	hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
237 238 239 240 241 242 243 244

/* LRO offloads */
	{
		unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
			((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
			((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));

		for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
245
			hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
246

247 248 249
		hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
		hw_atl_rpo_lro_inactive_interval_set(self, 0);
		hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
250

251
		hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
252

253
		hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
254

255
		hw_atl_rpo_lro_patch_optimization_en_set(self, 0U);
256

257
		hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
258

259
		hw_atl_rpo_lro_pkt_lim_set(self, 1U);
260

261 262
		hw_atl_rpo_lro_en_set(self,
				      aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
263
	}
264
	return aq_hw_err_from_flags(self);
265 266 267 268
}

static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
269 270 271
	hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
	hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
	hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
272 273

	/* Tx interrupts */
274
	hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
275 276 277 278

	/* misc */
	aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
			0x00010000U : 0x00000000U);
279 280
	hw_atl_tdm_tx_dca_en_set(self, 0U);
	hw_atl_tdm_tx_dca_mode_set(self, 0U);
281

282
	hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
283 284 285 286 287 288 289 290 291 292

	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
{
	struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
	int i;

	/* Rx TC/RSS number config */
293
	hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
294 295

	/* Rx flow control */
296
	hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
297 298

	/* RSS Ring selection */
299
	hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
300 301 302 303
					0xB3333333U : 0x00000000U);

	/* Multicast filters */
	for (i = HW_ATL_B0_MAC_MAX; i--;) {
304 305
		hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
		hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
306 307
	}

308 309
	hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
	hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
310 311

	/* Vlan filters */
312 313
	hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
	hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
314 315

	if (cfg->vlan_id) {
316 317 318
		hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
		hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
		hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
319

320 321
		hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
		hw_atl_rpf_vlan_untagged_act_set(self, 1U);
322

323 324 325
		hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
		hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
		hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
326
	} else {
327
		hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
328 329 330
	}

	/* Rx Interrupts */
331
	hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
332 333 334 335 336

	/* misc */
	aq_hw_write_reg(self, 0x00005040U,
			IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);

337 338
	hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
	hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
339

340 341
	hw_atl_rdm_rx_dca_en_set(self, 0U);
	hw_atl_rdm_rx_dca_mode_set(self, 0U);
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359

	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
{
	int err = 0;
	unsigned int h = 0U;
	unsigned int l = 0U;

	if (!mac_addr) {
		err = -EINVAL;
		goto err_exit;
	}
	h = (mac_addr[0] << 8) | (mac_addr[1]);
	l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
		(mac_addr[4] << 8) | mac_addr[5];

360 361 362 363
	hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
	hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
	hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
	hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
364 365 366 367 368 369 370

	err = aq_hw_err_from_flags(self);

err_exit:
	return err;
}

371
static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
372 373 374 375 376 377 378 379 380
{
	static u32 aq_hw_atl_igcr_table_[4][2] = {
		{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
		{ 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
		{ 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
		{ 0x20000022U, 0x20000026U }  /* AQ_IRQ_MSIX */
	};

	int err = 0;
381
	u32 val;
382

383
	struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
384 385 386 387 388 389

	hw_atl_b0_hw_init_tx_path(self);
	hw_atl_b0_hw_init_rx_path(self);

	hw_atl_b0_hw_mac_addr_set(self, mac_addr);

390 391
	self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
	self->aq_fw_ops->set_state(self, MPI_INIT);
392 393 394 395 396

	hw_atl_b0_hw_qos_set(self);
	hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
	hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);

397
	/* Force limit MRRS on RDM/TDM to 2K */
398 399 400
	val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
	aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
			(val & ~0x707) | 0x404);
401 402 403 404 405

	/* TX DMA total request limit. B0 hardware is not capable to
	 * handle more than (8K-MRRS) incoming DMA data.
	 * Value 24 in 256byte units
	 */
406
	aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
407

408 409
	/* Reset link status and read out initial hardware counters */
	self->aq_link_status.mbps = 0;
410
	self->aq_fw_ops->update_stats(self);
411

412 413 414 415 416
	err = aq_hw_err_from_flags(self);
	if (err < 0)
		goto err_exit;

	/* Interrupts */
417 418
	hw_atl_reg_irq_glb_ctl_set(self,
				   aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
419 420 421
						 [(aq_nic_cfg->vecs > 1U) ?
						 1 : 0]);

422
	hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
423 424

	/* Interrupts */
425 426
	hw_atl_reg_gen_irq_map_set(self,
				   ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
427 428 429 430 431 432 433 434 435 436 437
			    ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);

	hw_atl_b0_hw_offload_set(self, aq_nic_cfg);

err_exit:
	return err;
}

static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
				      struct aq_ring_s *ring)
{
438
	hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
439 440 441 442 443 444
	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
				      struct aq_ring_s *ring)
{
445
	hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
446 447 448 449 450
	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_start(struct aq_hw_s *self)
{
451 452
	hw_atl_tpb_tx_buff_en_set(self, 1);
	hw_atl_rpb_rx_buff_en_set(self, 1);
453 454 455 456 457 458
	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
					    struct aq_ring_s *ring)
{
459
	hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
	return 0;
}

static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
				     struct aq_ring_s *ring,
				     unsigned int frags)
{
	struct aq_ring_buff_s *buff = NULL;
	struct hw_atl_txd_s *txd = NULL;
	unsigned int buff_pa_len = 0U;
	unsigned int pkt_len = 0U;
	unsigned int frag_count = 0U;
	bool is_gso = false;

	buff = &ring->buff_ring[ring->sw_tail];
	pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;

	for (frag_count = 0; frag_count < frags; frag_count++) {
		txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
						HW_ATL_B0_TXD_SIZE];
		txd->ctl = 0;
		txd->ctl2 = 0;
		txd->buf_addr = 0;

		buff = &ring->buff_ring[ring->sw_tail];

		if (buff->is_txc) {
			txd->ctl |= (buff->len_l3 << 31) |
				(buff->len_l2 << 24) |
				HW_ATL_B0_TXD_CTL_CMD_TCP |
				HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
			txd->ctl2 |= (buff->mss << 16) |
				(buff->len_l4 << 8) |
				(buff->len_l3 >> 1);

			pkt_len -= (buff->len_l4 +
				    buff->len_l3 +
				    buff->len_l2);
			is_gso = true;
499 500 501

			if (buff->is_ipv6)
				txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
		} else {
			buff_pa_len = buff->len;

			txd->buf_addr = buff->pa;
			txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
						((u32)buff_pa_len << 4));
			txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
			/* PAY_LEN */
			txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);

			if (is_gso) {
				txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
				txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
			}

			/* Tx checksum offloads */
			if (buff->is_ip_cso)
				txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO;

			if (buff->is_udp_cso || buff->is_tcp_cso)
				txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;

			if (unlikely(buff->is_eop)) {
				txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
				txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
527
				is_gso = false;
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
			}
		}

		ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
	}

	hw_atl_b0_hw_tx_ring_tail_update(self, ring);
	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
				     struct aq_ring_s *aq_ring,
				     struct aq_ring_param_s *aq_ring_param)
{
	u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
	u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);

545
	hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
546

547
	hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
548

549 550
	hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
						  aq_ring->idx);
551

552 553
	hw_atl_reg_rx_dma_desc_base_addressmswset(self,
						  dma_desc_addr_msw, aq_ring->idx);
554

555
	hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
556

557 558
	hw_atl_rdm_rx_desc_data_buff_size_set(self,
					      AQ_CFG_RX_FRAME_MAX / 1024U,
559 560
				       aq_ring->idx);

561 562 563
	hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
	hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
	hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
564 565 566 567

	/* Rx ring set mode */

	/* Mapping interrupt vector */
568 569
	hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
	hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
570

571 572 573 574
	hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
	hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
	hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
	hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
575 576 577 578 579 580 581 582 583 584 585

	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
				     struct aq_ring_s *aq_ring,
				     struct aq_ring_param_s *aq_ring_param)
{
	u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
	u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);

586 587
	hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
						  aq_ring->idx);
588

589 590
	hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
						  aq_ring->idx);
591

592
	hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
593 594 595 596

	hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);

	/* Set Tx threshold */
597
	hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
598 599

	/* Mapping interrupt vector */
600 601
	hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
	hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
602

603 604
	hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
	hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624

	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
				     struct aq_ring_s *ring,
				     unsigned int sw_tail_old)
{
	for (; sw_tail_old != ring->sw_tail;
		sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
		struct hw_atl_rxd_s *rxd =
			(struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
							HW_ATL_B0_RXD_SIZE];

		struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];

		rxd->buf_addr = buff->pa;
		rxd->hdr_addr = 0U;
	}

625
	hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
626 627 628 629 630 631 632 633

	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
					    struct aq_ring_s *ring)
{
	int err = 0;
634
	unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
635

636
	if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
		err = -ENXIO;
		goto err_exit;
	}
	ring->hw_head = hw_head_;
	err = aq_hw_err_from_flags(self);

err_exit:
	return err;
}

static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
					struct aq_ring_s *ring)
{
	struct device *ndev = aq_nic_get_dev(ring->aq_nic);

	for (; ring->hw_head != ring->sw_tail;
		ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
		struct aq_ring_buff_s *buff = NULL;
		struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
			&ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];

		unsigned int is_err = 1U;
		unsigned int is_rx_check_sum_enabled = 0U;
		unsigned int pkt_type = 0U;

		if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
			break;
		}

		buff = &ring->buff_ring[ring->hw_head];

		is_err = (0x0000003CU & rxd_wb->status);

		is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
		is_err &= ~0x20U; /* exclude validity bit */

		pkt_type = 0xFFU & (rxd_wb->type >> 4);

		if (is_rx_check_sum_enabled) {
			if (0x0U == (pkt_type & 0x3U))
				buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U;

			if (0x4U == (pkt_type & 0x1CU))
				buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
			else if (0x0U == (pkt_type & 0x1CU))
				buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
683 684 685 686 687 688

			/* Checksum offload workaround for small packets */
			if (rxd_wb->pkt_len <= 60) {
				buff->is_ip_cso = 0U;
				buff->is_cso_err = 0U;
			}
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
		}

		is_err &= ~0x18U;

		dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);

		if (is_err || rxd_wb->type & 0x1000U) {
			/* status error or DMA error */
			buff->is_error = 1U;
		} else {
			if (self->aq_nic_cfg->is_rss) {
				/* last 4 byte */
				u16 rss_type = rxd_wb->type & 0xFU;

				if (rss_type && rss_type < 0x8U) {
					buff->is_hash_l4 = (rss_type == 0x4 ||
					rss_type == 0x5);
					buff->rss_hash = rxd_wb->rss_hash;
				}
			}

			if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
711 712
				buff->len = rxd_wb->pkt_len %
					AQ_CFG_RX_FRAME_MAX;
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
				buff->len = buff->len ?
					buff->len : AQ_CFG_RX_FRAME_MAX;
				buff->next = 0U;
				buff->is_eop = 1U;
			} else {
				if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
					rxd_wb->status) {
					/* LRO */
					buff->next = rxd_wb->next_desc_ptr;
					++ring->stats.rx.lro_packets;
				} else {
					/* jumbo */
					buff->next =
						aq_ring_next_dx(ring,
								ring->hw_head);
					++ring->stats.rx.jumbo_packets;
				}
			}
		}
	}

	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
739
	hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
740 741 742 743 744
	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
745 746
	hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
	hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
747

748
	atomic_inc(&self->dpc);
749 750 751 752 753
	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
754
	*mask = hw_atl_itr_irq_statuslsw_get(self);
755 756 757 758 759 760 761 762 763 764
	return aq_hw_err_from_flags(self);
}

#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)

static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
					  unsigned int packet_filter)
{
	unsigned int i = 0U;

765 766 767
	hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
	hw_atl_rpfl2multicast_flr_en_set(self,
					 IS_FILTER_ENABLED(IFF_MULTICAST), 0);
768

769 770
	hw_atl_rpfl2_accept_all_mc_packets_set(self,
					       IS_FILTER_ENABLED(IFF_ALLMULTI));
771

772
	hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
773 774 775 776

	self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);

	for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
777 778
		hw_atl_rpfl2_uc_flr_en_set(self,
					   (self->aq_nic_cfg->is_mc_list_enabled &&
779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
				    (i <= self->aq_nic_cfg->mc_list_count)) ?
				    1U : 0U, i);

	return aq_hw_err_from_flags(self);
}

#undef IS_FILTER_ENABLED

static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
					   u8 ar_mac
					   [AQ_CFG_MULTICAST_ADDRESS_MAX]
					   [ETH_ALEN],
					   u32 count)
{
	int err = 0;

	if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
		err = -EBADRQC;
		goto err_exit;
	}
	for (self->aq_nic_cfg->mc_list_count = 0U;
			self->aq_nic_cfg->mc_list_count < count;
			++self->aq_nic_cfg->mc_list_count) {
		u32 i = self->aq_nic_cfg->mc_list_count;
		u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
		u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
					(ar_mac[i][4] << 8) | ar_mac[i][5];

807
		hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
808

809 810
		hw_atl_rpfl2unicast_dest_addresslsw_set(self,
							l, HW_ATL_B0_MAC_MIN + i);
811

812 813
		hw_atl_rpfl2unicast_dest_addressmsw_set(self,
							h, HW_ATL_B0_MAC_MIN + i);
814

815 816
		hw_atl_rpfl2_uc_flr_en_set(self,
					   (self->aq_nic_cfg->is_mc_list_enabled),
817 818 819 820 821 822 823 824 825
				    HW_ATL_B0_MAC_MIN + i);
	}

	err = aq_hw_err_from_flags(self);

err_exit:
	return err;
}

826
static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
827 828
{
	unsigned int i = 0U;
829 830
	u32 itr_tx = 2U;
	u32 itr_rx = 2U;
831

832 833 834
	switch (self->aq_nic_cfg->itr) {
	case  AQ_CFG_INTERRUPT_MODERATION_ON:
	case  AQ_CFG_INTERRUPT_MODERATION_AUTO:
835 836 837 838
		hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
		hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
		hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
		hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
839

840 841 842 843
		if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
			/* HW timers are in 2us units */
			int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
			int tx_min_timer = tx_max_timer / 2;
844

845 846
			int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
			int rx_min_timer = rx_max_timer / 2;
847

848 849 850 851
			tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
			tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
			rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
			rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
852

853 854 855 856
			itr_tx |= tx_min_timer << 0x8U;
			itr_tx |= tx_max_timer << 0x10U;
			itr_rx |= rx_min_timer << 0x8U;
			itr_rx |= rx_max_timer << 0x10U;
857 858
		} else {
			static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
859 860 861 862 863 864
				{0xfU, 0xffU}, /* 10Gbit */
				{0xfU, 0x1ffU}, /* 5Gbit */
				{0xfU, 0x1ffU}, /* 5Gbit 5GS */
				{0xfU, 0x1ffU}, /* 2.5Gbit */
				{0xfU, 0x1ffU}, /* 1Gbit */
				{0xfU, 0x1ffU}, /* 100Mbit */
865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
			};

			static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
				{0x6U, 0x38U},/* 10Gbit */
				{0xCU, 0x70U},/* 5Gbit */
				{0xCU, 0x70U},/* 5Gbit 5GS */
				{0x18U, 0xE0U},/* 2.5Gbit */
				{0x30U, 0x80U},/* 1Gbit */
				{0x4U, 0x50U},/* 100Mbit */
			};

			unsigned int speed_index =
					hw_atl_utils_mbps_2_speed_index(
						self->aq_link_status.mbps);

880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
			/* Update user visible ITR settings */
			self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
							[speed_index][1] * 2;
			self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
							[speed_index][1] * 2;

			itr_tx |= hw_atl_b0_timers_table_tx_
						[speed_index][0] << 0x8U;
			itr_tx |= hw_atl_b0_timers_table_tx_
						[speed_index][1] << 0x10U;

			itr_rx |= hw_atl_b0_timers_table_rx_
						[speed_index][0] << 0x8U;
			itr_rx |= hw_atl_b0_timers_table_rx_
						[speed_index][1] << 0x10U;
895
		}
896 897
		break;
	case AQ_CFG_INTERRUPT_MODERATION_OFF:
898 899 900 901
		hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
		hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
		hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
		hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
902 903 904
		itr_tx = 0U;
		itr_rx = 0U;
		break;
905 906 907
	}

	for (i = HW_ATL_B0_RINGS_MAX; i--;) {
908 909
		hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
		hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
910 911 912 913 914 915 916 917 918 919 920 921 922 923
	}

	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
	hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
				     struct aq_ring_s *ring)
{
924
	hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
925 926 927 928 929 930
	return aq_hw_err_from_flags(self);
}

static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
				     struct aq_ring_s *ring)
{
931
	hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
932 933 934
	return aq_hw_err_from_flags(self);
}

935
const struct aq_hw_ops hw_atl_ops_b0 = {
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
	.hw_set_mac_address   = hw_atl_b0_hw_mac_addr_set,
	.hw_init              = hw_atl_b0_hw_init,
	.hw_deinit            = hw_atl_utils_hw_deinit,
	.hw_set_power         = hw_atl_utils_hw_set_power,
	.hw_reset             = hw_atl_b0_hw_reset,
	.hw_start             = hw_atl_b0_hw_start,
	.hw_ring_tx_start     = hw_atl_b0_hw_ring_tx_start,
	.hw_ring_tx_stop      = hw_atl_b0_hw_ring_tx_stop,
	.hw_ring_rx_start     = hw_atl_b0_hw_ring_rx_start,
	.hw_ring_rx_stop      = hw_atl_b0_hw_ring_rx_stop,
	.hw_stop              = hw_atl_b0_hw_stop,

	.hw_ring_tx_xmit         = hw_atl_b0_hw_ring_tx_xmit,
	.hw_ring_tx_head_update  = hw_atl_b0_hw_ring_tx_head_update,

	.hw_ring_rx_receive      = hw_atl_b0_hw_ring_rx_receive,
	.hw_ring_rx_fill         = hw_atl_b0_hw_ring_rx_fill,

	.hw_irq_enable           = hw_atl_b0_hw_irq_enable,
	.hw_irq_disable          = hw_atl_b0_hw_irq_disable,
	.hw_irq_read             = hw_atl_b0_hw_irq_read,

	.hw_ring_rx_init             = hw_atl_b0_hw_ring_rx_init,
	.hw_ring_tx_init             = hw_atl_b0_hw_ring_tx_init,
	.hw_packet_filter_set        = hw_atl_b0_hw_packet_filter_set,
	.hw_multicast_list_set       = hw_atl_b0_hw_multicast_list_set,
	.hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
	.hw_rss_set                  = hw_atl_b0_hw_rss_set,
	.hw_rss_hash_set             = hw_atl_b0_hw_rss_hash_set,
	.hw_get_regs                 = hw_atl_utils_hw_get_regs,
	.hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
	.hw_get_fw_version           = hw_atl_utils_get_fw_version,
};