qed.h 24.1 KB
Newer Older
1
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2
/* QLogic qed NIC Driver
M
Mintz, Yuval 已提交
3
 * Copyright (c) 2015-2017  QLogic Corporation
4
 * Copyright (c) 2019-2020 Marvell International Ltd.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 */

#ifndef _QED_H
#define _QED_H

#include <linux/types.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <linux/zlib.h>
#include <linux/hashtable.h>
#include <linux/qed/qed_if.h>
24
#include "qed_debug.h"
25 26
#include "qed_hsi.h"

Y
Yuval Mintz 已提交
27
extern const struct qed_common_ops qed_common_ops_pass;
28

29
#define QED_MAJOR_VERSION		8
30
#define QED_MINOR_VERSION		37
31 32
#define QED_REVISION_VERSION		0
#define QED_ENGINEERING_VERSION		20
33 34 35 36 37 38 39 40

#define QED_VERSION						 \
	((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
	 (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)

#define STORM_FW_VERSION				       \
	((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
	 (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
41 42 43 44 45

#define MAX_HWFNS_PER_DEVICE    (4)
#define NAME_SIZE 16
#define VER_SIZE 16

46 47
#define QED_WFQ_UNIT	100

R
Ram Amrani 已提交
48
#define QED_WID_SIZE            (1024)
49
#define QED_MIN_WIDS		(4)
R
Ram Amrani 已提交
50 51
#define QED_PF_DEMS_SIZE        (4)

52 53 54 55 56 57
/* cau states */
enum qed_coalescing_mode {
	QED_COAL_MODE_DISABLE,
	QED_COAL_MODE_ENABLE
};

58 59 60 61 62 63 64
enum qed_nvm_cmd {
	QED_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN,
	QED_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA,
	QED_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
	QED_GET_MCP_NVM_RESP = 0xFFFFFF00
};

65 66
struct qed_eth_cb_ops;
struct qed_dev_info;
67 68
union qed_mcp_protocol_stats;
enum qed_mcp_protocol_type;
69 70
enum qed_mfw_tlv_type;
union qed_mfw_tlv_data;
71 72

/* helpers */
73 74 75 76 77
#define QED_MFW_GET_FIELD(name, field) \
	(((name) & (field ## _MASK)) >> (field ## _SHIFT))

#define QED_MFW_SET_FIELD(name, field, value)				       \
	do {								       \
78
		(name)	&= ~(field ## _MASK);	       \
79 80 81
		(name)	|= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
	} while (0)

82
static inline u32 qed_db_addr(u32 cid, u32 DEMS)
R
Ram Amrani 已提交
83 84 85 86 87 88 89 90
{
	u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
		      (cid * QED_PF_DEMS_SIZE);

	return db_addr;
}

static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
{
	u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
		      FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);

	return db_addr;
}

#define ALIGNED_TYPE_SIZE(type_name, p_hwfn)				     \
	((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
	 ~((1 << (p_hwfn->cdev->cache_shift)) - 1))

#define for_each_hwfn(cdev, i)  for (i = 0; i < cdev->num_hwfns; i++)

#define D_TRINE(val, cond1, cond2, true1, true2, def) \
	(val == (cond1) ? true1 :		      \
	 (val == (cond2) ? true2 : def))

/* forward */
struct qed_ptt_pool;
struct qed_spq;
struct qed_sb_info;
struct qed_sb_attn_info;
struct qed_cxt_mngr;
struct qed_sb_sp_info;
Y
Yuval Mintz 已提交
115
struct qed_ll2_info;
116
struct qed_mcp_info;
117
struct qed_llh_info;
118 119

struct qed_rt_data {
Y
Yuval Mintz 已提交
120 121
	u32	*init_val;
	bool	*b_valid;
122 123
};

124 125 126 127 128 129 130 131 132 133 134 135 136
enum qed_tunn_mode {
	QED_MODE_L2GENEVE_TUNN,
	QED_MODE_IPGENEVE_TUNN,
	QED_MODE_L2GRE_TUNN,
	QED_MODE_IPGRE_TUNN,
	QED_MODE_VXLAN_TUNN,
};

enum qed_tunn_clss {
	QED_TUNN_CLSS_MAC_VLAN,
	QED_TUNN_CLSS_MAC_VNI,
	QED_TUNN_CLSS_INNER_MAC_VLAN,
	QED_TUNN_CLSS_INNER_MAC_VNI,
137
	QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
138 139 140
	MAX_QED_TUNN_CLSS,
};

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
struct qed_tunn_update_type {
	bool b_update_mode;
	bool b_mode_enabled;
	enum qed_tunn_clss tun_cls;
};

struct qed_tunn_update_udp_port {
	bool b_update_port;
	u16 port;
};

struct qed_tunnel_info {
	struct qed_tunn_update_type vxlan;
	struct qed_tunn_update_type l2_geneve;
	struct qed_tunn_update_type ip_geneve;
	struct qed_tunn_update_type l2_gre;
	struct qed_tunn_update_type ip_gre;

	struct qed_tunn_update_udp_port vxlan_port;
	struct qed_tunn_update_udp_port geneve_port;

	bool b_update_rx_cls;
	bool b_update_tx_cls;
};

166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
struct qed_tunn_start_params {
	unsigned long	tunn_mode;
	u16		vxlan_udp_port;
	u16		geneve_udp_port;
	u8		update_vxlan_udp_port;
	u8		update_geneve_udp_port;
	u8		tunn_clss_vxlan;
	u8		tunn_clss_l2geneve;
	u8		tunn_clss_ipgeneve;
	u8		tunn_clss_l2gre;
	u8		tunn_clss_ipgre;
};

struct qed_tunn_update_params {
	unsigned long	tunn_mode_update_mask;
	unsigned long	tunn_mode;
	u16		vxlan_udp_port;
	u16		geneve_udp_port;
	u8		update_rx_pf_clss;
	u8		update_tx_pf_clss;
	u8		update_vxlan_udp_port;
	u8		update_geneve_udp_port;
	u8		tunn_clss_vxlan;
	u8		tunn_clss_l2geneve;
	u8		tunn_clss_ipgeneve;
	u8		tunn_clss_l2gre;
	u8		tunn_clss_ipgre;
};

195 196
/* The PCI personality is not quite synonymous to protocol ID:
 * 1. All personalities need CORE connections
197
 * 2. The Ethernet personality may support also the RoCE/iWARP protocol
198 199 200
 */
enum qed_pci_personality {
	QED_PCI_ETH,
201
	QED_PCI_FCOE,
Y
Yuval Mintz 已提交
202 203
	QED_PCI_ISCSI,
	QED_PCI_ETH_ROCE,
204 205 206
	QED_PCI_ETH_IWARP,
	QED_PCI_ETH_RDMA,
	QED_PCI_DEFAULT, /* default in shmem */
207 208 209 210 211 212 213 214 215
};

/* All VFs are symmetric, all counters are PF + all VFs */
struct qed_qm_iids {
	u32 cids;
	u32 vf_cids;
	u32 tids;
};

216 217 218 219
/* HW / FW resources, output of features supported below, most information
 * is received from MFW.
 */
enum qed_resources {
220
	QED_SB,
Y
Yuval Mintz 已提交
221
	QED_L2_QUEUE,
222
	QED_VPORT,
Y
Yuval Mintz 已提交
223
	QED_RSS_ENG,
224 225
	QED_PQ,
	QED_RL,
Y
Yuval Mintz 已提交
226 227
	QED_MAC,
	QED_VLAN,
R
Ram Amrani 已提交
228
	QED_RDMA_CNQ_RAM,
229
	QED_ILT,
230 231
	QED_LL2_RAM_QUEUE,
	QED_LL2_CTX_QUEUE,
232
	QED_CMDQS_CQS,
R
Ram Amrani 已提交
233
	QED_RDMA_STATS_QUEUE,
234
	QED_BDQ,
235 236 237
	QED_MAX_RESC,
};

Y
Yuval Mintz 已提交
238 239
enum QED_FEATURE {
	QED_PF_L2_QUE,
Y
Yuval Mintz 已提交
240
	QED_VF,
R
Ram Amrani 已提交
241
	QED_RDMA_CNQ,
242
	QED_ISCSI_CQ,
243
	QED_FCOE_CQ,
244
	QED_VF_L2_QUE,
Y
Yuval Mintz 已提交
245 246 247
	QED_MAX_FEATURES,
};

Y
Yuval Mintz 已提交
248 249
enum qed_dev_cap {
	QED_DEV_CAP_ETH,
250
	QED_DEV_CAP_FCOE,
Y
Yuval Mintz 已提交
251 252
	QED_DEV_CAP_ISCSI,
	QED_DEV_CAP_ROCE,
253
	QED_DEV_CAP_IWARP,
Y
Yuval Mintz 已提交
254 255
};

M
Mintz, Yuval 已提交
256 257 258 259 260
enum qed_wol_support {
	QED_WOL_SUPPORT_NONE,
	QED_WOL_SUPPORT_PME,
};

261 262 263 264 265 266
enum qed_db_rec_exec {
	DB_REC_DRY_RUN,
	DB_REC_REAL_DEAL,
	DB_REC_ONCE,
};

267 268
struct qed_hw_info {
	/* PCI personality */
269 270 271 272
	enum qed_pci_personality	personality;
#define QED_IS_RDMA_PERSONALITY(dev)					\
	((dev)->hw_info.personality == QED_PCI_ETH_ROCE ||		\
	 (dev)->hw_info.personality == QED_PCI_ETH_IWARP ||		\
273
	 (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
274 275
#define QED_IS_ROCE_PERSONALITY(dev)					\
	((dev)->hw_info.personality == QED_PCI_ETH_ROCE ||		\
276
	 (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
277 278
#define QED_IS_IWARP_PERSONALITY(dev)					\
	((dev)->hw_info.personality == QED_PCI_ETH_IWARP ||		\
279
	 (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
280 281
#define QED_IS_L2_PERSONALITY(dev)					\
	((dev)->hw_info.personality == QED_PCI_ETH ||			\
282
	 QED_IS_RDMA_PERSONALITY(dev))
283
#define QED_IS_FCOE_PERSONALITY(dev)					\
284
	((dev)->hw_info.personality == QED_PCI_FCOE)
285
#define QED_IS_ISCSI_PERSONALITY(dev)					\
286
	((dev)->hw_info.personality == QED_PCI_ISCSI)
287 288 289 290

	/* Resource Allocation scheme results */
	u32				resc_start[QED_MAX_RESC];
	u32				resc_num[QED_MAX_RESC];
291 292 293 294
#define RESC_START(_p_hwfn, resc)	((_p_hwfn)->hw_info.resc_start[resc])
#define RESC_NUM(_p_hwfn, resc)		((_p_hwfn)->hw_info.resc_num[resc])
#define RESC_END(_p_hwfn, resc)		(RESC_START(_p_hwfn, resc) +	\
					 RESC_NUM(_p_hwfn, resc))
295

296 297
	u32				feat_num[QED_MAX_FEATURES];
#define FEAT_NUM(_p_hwfn, resc)		((_p_hwfn)->hw_info.feat_num[resc])
298

A
Ariel Elior 已提交
299
	/* Amount of traffic classes HW supports */
300
	u8				num_hw_tc;
A
Ariel Elior 已提交
301 302 303 304

	/* Amount of TCs which should be active according to DCBx or upper
	 * layer driver configuration.
	 */
305 306
	u8				num_active_tc;

307
	u8				offload_tc;
308
	bool				offload_tc_set;
309

D
Denis Bolotin 已提交
310
	bool				multi_tc_roce_en;
311
#define IS_QED_MULTI_TC_ROCE(p_hwfn)	((p_hwfn)->hw_info.multi_tc_roce_en)
D
Denis Bolotin 已提交
312

313 314 315 316 317 318
	u32				concrete_fid;
	u16				opaque_fid;
	u16				ovlan;
	u32				part_num[4];

	unsigned char			hw_mac_addr[ETH_ALEN];
319 320 321 322
	u64				node_wwn;
	u64				port_wwn;

	u16				num_fcoe_conns;
323 324 325 326

	struct qed_igu_info		*p_igu_info;

	u32				hw_mode;
327
	unsigned long			device_capabilities;
328
	u16				mtu;
M
Mintz, Yuval 已提交
329

330
	enum qed_wol_support		b_wol_support;
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
};

/* maximun size of read/write commands (HW limit) */
#define DMAE_MAX_RW_SIZE        0x2000

struct qed_dmae_info {
	/* Mutex for synchronizing access to functions */
	struct mutex	mutex;

	u8		channel;

	dma_addr_t	completion_word_phys_addr;

	/* The memory location where the DMAE writes the completion
	 * value when an operation is finished on this context.
	 */
	u32		*p_completion_word;

	dma_addr_t	intermediate_buffer_phys_addr;

	/* An intermediate buffer for DMAE operations that use virtual
	 * addresses - data is DMA'd to/from this buffer and then
	 * memcpy'd to/from the virtual address
	 */
	u32		*p_intermediate_buffer;

	dma_addr_t	dmae_cmd_phys_addr;
	struct dmae_cmd *p_dmae_cmd;
};

361 362 363 364 365 366
struct qed_wfq_data {
	/* when feature is configured for at least 1 vport */
	u32	min_speed;
	bool	configured;
};

367 368 369 370 371 372
struct qed_qm_info {
	struct init_qm_pq_params	*qm_pq_params;
	struct init_qm_vport_params	*qm_vport_params;
	struct init_qm_port_params	*qm_port_params;
	u16				start_pq;
	u8				start_vport;
A
Ariel Elior 已提交
373
	u16				 pure_lb_pq;
D
Denis Bolotin 已提交
374 375
	u16				first_ofld_pq;
	u16				first_llt_pq;
A
Ariel Elior 已提交
376 377 378 379 380
	u16				pure_ack_pq;
	u16				ooo_pq;
	u16				first_vf_pq;
	u16				first_mcos_pq;
	u16				first_rl_pq;
381 382 383 384
	u16				num_pqs;
	u16				num_vf_pqs;
	u8				num_vports;
	u8				max_phys_tcs_per_port;
A
Ariel Elior 已提交
385
	u8				ooo_tc;
386 387 388 389 390 391
	bool				pf_rl_en;
	bool				pf_wfq_en;
	bool				vport_rl_en;
	bool				vport_wfq_en;
	u8				pf_wfq;
	u32				pf_rl;
392
	struct qed_wfq_data		*wfq_data;
393
	u8 num_pf_rls;
394 395
};

396 397
#define QED_OVERFLOW_BIT	1

398 399 400 401 402
struct qed_db_recovery_info {
	struct list_head list;

	/* Lock to protect the doorbell recovery mechanism list */
	spinlock_t lock;
D
Denis Bolotin 已提交
403
	bool dorq_attn;
404
	u32 db_recovery_counter;
405
	unsigned long overflow;
406 407
};

M
Manish Chopra 已提交
408 409 410 411 412 413 414 415 416 417 418 419
struct storm_stats {
	u32     address;
	u32     len;
};

struct qed_storm_stats {
	struct storm_stats mstats;
	struct storm_stats pstats;
	struct storm_stats tstats;
	struct storm_stats ustats;
};

420
struct qed_fw_data {
M
Manish Chopra 已提交
421
	struct fw_ver_info	*fw_ver_info;
422 423 424
	const u8		*modes_tree_buf;
	union init_op		*init_ops;
	const u32		*arr_data;
425 426
	const u32		*fw_overlays;
	u32			fw_overlays_len;
427 428 429
	u32			init_ops_size;
};

430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
enum qed_mf_mode_bit {
	/* Supports PF-classification based on tag */
	QED_MF_OVLAN_CLSS,

	/* Supports PF-classification based on MAC */
	QED_MF_LLH_MAC_CLSS,

	/* Supports PF-classification based on protocol type */
	QED_MF_LLH_PROTO_CLSS,

	/* Requires a default PF to be set */
	QED_MF_NEED_DEF_PF,

	/* Allow LL2 to multicast/broadcast */
	QED_MF_LL2_NON_UNICAST,

	/* Allow Cross-PF [& child VFs] Tx-switching */
	QED_MF_INTER_PF_SWITCH,

	/* Unified Fabtic Port support enabled */
	QED_MF_UFP_SPECIFIC,

	/* Disable Accelerated Receive Flow Steering (aRFS) */
	QED_MF_DISABLE_ARFS,

	/* Use vlan for steering */
	QED_MF_8021Q_TAGGING,

	/* Use stag for steering */
	QED_MF_8021AD_TAGGING,

	/* Allow DSCP to TC mapping */
	QED_MF_DSCP_TO_TC_MAP,
463 464 465

	/* Do not insert a vlan tag with id 0 */
	QED_MF_DONT_ADD_VLAN0_TAG,
466 467
};

468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
enum qed_ufp_mode {
	QED_UFP_MODE_ETS,
	QED_UFP_MODE_VNIC_BW,
	QED_UFP_MODE_UNKNOWN
};

enum qed_ufp_pri_type {
	QED_UFP_PRI_OS,
	QED_UFP_PRI_VNIC,
	QED_UFP_PRI_UNKNOWN
};

struct qed_ufp_info {
	enum qed_ufp_pri_type pri_type;
	enum qed_ufp_mode mode;
	u8 tc;
};

486 487 488 489 490
enum BAR_ID {
	BAR_ID_0,		/* used for GRC */
	BAR_ID_1		/* Used for doorbells */
};

491 492 493
struct qed_nvm_image_info {
	u32 num_images;
	struct bist_nvm_image_att *image_att;
494
	bool valid;
495 496
};

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
enum qed_hsi_def_type {
	QED_HSI_DEF_MAX_NUM_VFS,
	QED_HSI_DEF_MAX_NUM_L2_QUEUES,
	QED_HSI_DEF_MAX_NUM_PORTS,
	QED_HSI_DEF_MAX_SB_PER_PATH,
	QED_HSI_DEF_MAX_NUM_PFS,
	QED_HSI_DEF_MAX_NUM_VPORTS,
	QED_HSI_DEF_NUM_ETH_RSS_ENGINE,
	QED_HSI_DEF_MAX_QM_TX_QUEUES,
	QED_HSI_DEF_NUM_PXP_ILT_RECORDS,
	QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
	QED_HSI_DEF_MAX_QM_GLOBAL_RLS,
	QED_HSI_DEF_MAX_PBF_CMD_LINES,
	QED_HSI_DEF_MAX_BTB_BLOCKS,
	QED_NUM_HSI_DEFS
};

514 515 516 517 518 519
#define DRV_MODULE_VERSION		      \
	__stringify(QED_MAJOR_VERSION) "."    \
	__stringify(QED_MINOR_VERSION) "."    \
	__stringify(QED_REVISION_VERSION) "." \
	__stringify(QED_ENGINEERING_VERSION)

520 521 522 523 524
struct qed_simd_fp_handler {
	void	*token;
	void	(*func)(void *);
};

525 526
enum qed_slowpath_wq_flag {
	QED_SLOWPATH_MFW_TLV_REQ,
527
	QED_SLOWPATH_PERIODIC_DB_REC,
528 529
};

530 531 532 533 534 535
struct qed_hwfn {
	struct qed_dev			*cdev;
	u8				my_id;          /* ID inside the PF */
#define IS_LEAD_HWFN(edev)              (!((edev)->my_id))
	u8				rel_pf_id;      /* Relative to engine*/
	u8				abs_pf_id;
536 537
#define QED_PATH_ID(_p_hwfn) \
	(QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
538 539 540 541 542 543 544 545 546
	u8				port_id;
	bool				b_active;

	u32				dp_module;
	u8				dp_level;
	char				name[NAME_SIZE];

	bool				hw_init_done;

Y
Yuval Mintz 已提交
547
	u8				num_funcs_on_engine;
548
	u8 enabled_func_idx;
Y
Yuval Mintz 已提交
549

550 551 552 553 554 555 556 557 558 559 560 561 562
	/* BAR access */
	void __iomem			*regview;
	void __iomem			*doorbells;
	u64				db_phys_addr;
	unsigned long			db_size;

	/* PTT pool */
	struct qed_ptt_pool		*p_ptt_pool;

	/* HW info */
	struct qed_hw_info		hw_info;

	/* rt_array (for init-tool) */
Y
Yuval Mintz 已提交
563
	struct qed_rt_data		rt_data;
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580

	/* SPQ */
	struct qed_spq			*p_spq;

	/* EQ */
	struct qed_eq			*p_eq;

	/* Consolidate Q*/
	struct qed_consq		*p_consq;

	/* Slow-Path definitions */
	struct tasklet_struct		*sp_dpc;
	bool				b_sp_dpc_enabled;

	struct qed_ptt			*p_main_ptt;
	struct qed_ptt			*p_dpc_ptt;

581 582 583 584 585
	/* PTP will be used only by the leading function.
	 * Usage of all PTP-apis should be synchronized as result.
	 */
	struct qed_ptt *p_ptp_ptt;

586 587 588 589
	struct qed_sb_sp_info		*p_sp_sb;
	struct qed_sb_attn_info		*p_sb_attn;

	/* Protocol related */
Y
Yuval Mintz 已提交
590 591
	bool				using_ll2;
	struct qed_ll2_info		*p_ll2_info;
592
	struct qed_ooo_info		*p_ooo_info;
R
Ram Amrani 已提交
593
	struct qed_rdma_info		*p_rdma_info;
594
	struct qed_iscsi_info		*p_iscsi_info;
595
	struct qed_fcoe_info		*p_fcoe_info;
596 597
	struct qed_pf_params		pf_params;

598 599 600
	bool b_rdma_enabled_in_prs;
	u32 rdma_prs_search_reg;

601 602 603 604
	struct qed_cxt_mngr		*p_cxt_mngr;

	/* Flag indicating whether interrupts are enabled or not*/
	bool				b_int_enabled;
605
	bool				b_int_requested;
606

607 608 609
	/* True if the driver requests for the link */
	bool				b_drv_link_init;

Y
Yuval Mintz 已提交
610
	struct qed_vf_iov		*vf_iov_info;
Y
Yuval Mintz 已提交
611
	struct qed_pf_iov		*pf_iov_info;
612 613
	struct qed_mcp_info		*mcp_info;

614 615
	struct qed_dcbx_info		*p_dcbx_info;

616 617
	struct qed_ufp_info		ufp_info;

618 619 620 621
	struct qed_dmae_info		dmae_info;

	/* QM init */
	struct qed_qm_info		qm_info;
M
Manish Chopra 已提交
622
	struct qed_storm_stats		storm_stats;
623 624 625 626

	/* Buffer for unzipping firmware data */
	void				*unzip_buf;

627
	struct dbg_tools_data		dbg_info;
D
Denis Bolotin 已提交
628
	void				*dbg_user_info;
629
	struct virt_mem_desc		dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE];
630

R
Ram Amrani 已提交
631
	/* PWM region specific data */
632
	u16				wid_count;
R
Ram Amrani 已提交
633 634 635 636 637 638 639 640 641 642
	u32				dpi_size;
	u32				dpi_count;

	/* This is used to calculate the doorbell address */
	u32 dpi_start_offset;

	/* If one of the following is set then EDPM shouldn't be used */
	u8 dcbx_no_edpm;
	u8 db_bar_no_edpm;

M
Mintz, Yuval 已提交
643 644 645
	/* L2-related */
	struct qed_l2_info *p_l2_info;

646 647 648
	/* Mechanism for recovering from doorbell drop */
	struct qed_db_recovery_info db_recovery_info;

649 650 651
	/* Nvm images number and attributes */
	struct qed_nvm_image_info nvm_info;

652
	struct phys_mem_desc *fw_overlay_mem;
653 654
	struct qed_ptt *p_arfs_ptt;

655 656
	struct qed_simd_fp_handler	simd_proto_handler[64];

657 658 659 660 661
#ifdef CONFIG_QED_SRIOV
	struct workqueue_struct *iov_wq;
	struct delayed_work iov_task;
	unsigned long iov_task_flags;
#endif
662 663
	struct z_stream_s *stream;
	bool slowpath_wq_active;
664 665 666
	struct workqueue_struct *slowpath_wq;
	struct delayed_work slowpath_task;
	unsigned long slowpath_task_flags;
667
	u32 periodic_db_rec_count;
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
};

struct pci_params {
	int		pm_cap;

	unsigned long	mem_start;
	unsigned long	mem_end;
	unsigned int	irq;
	u8		pf_num;
};

struct qed_int_param {
	u32	int_mode;
	u8	num_vectors;
	u8	min_msix_cnt; /* for minimal functionality */
};

struct qed_int_params {
	struct qed_int_param	in;
	struct qed_int_param	out;
	struct msix_entry	*msix_table;
	bool			fp_initialized;
	u8			fp_msix_base;
	u8			fp_msix_cnt;
R
Ram Amrani 已提交
692 693
	u8			rdma_msix_base;
	u8			rdma_msix_cnt;
694 695
};

696 697 698 699 700 701 702
struct qed_dbg_feature {
	struct dentry *dentry;
	u8 *dump_buf;
	u32 buf_size;
	u32 dumped_dwords;
};

703
struct qed_dev {
704 705 706 707 708 709 710 711 712 713 714
	u32				dp_module;
	u8				dp_level;
	char				name[NAME_SIZE];

	enum qed_dev_type		type;
	/* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev)			((dev)->type == QED_DEV_TYPE_BB)
#define QED_IS_BB_B0(dev)		(QED_IS_BB(dev) && CHIP_REV_IS_B0(dev))
#define QED_IS_AH(dev)			((dev)->type == QED_DEV_TYPE_AH)
#define QED_IS_K2(dev)			QED_IS_AH(dev)
#define QED_IS_E4(dev)			(QED_IS_BB(dev) || QED_IS_AH(dev))
715
#define QED_IS_E5(dev)			((dev)->type == QED_DEV_TYPE_E5)
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731

	u16				vendor_id;

	u16				device_id;
#define QED_DEV_ID_MASK			0xff00
#define QED_DEV_ID_MASK_BB		0x1600
#define QED_DEV_ID_MASK_AH		0x8000

	u16				chip_num;
#define CHIP_NUM_MASK			0xffff
#define CHIP_NUM_SHIFT			16

	u16				chip_rev;
#define CHIP_REV_MASK			0xf
#define CHIP_REV_SHIFT			12
#define CHIP_REV_IS_B0(_cdev)		((_cdev)->chip_rev == 1)
732 733

	u16				chip_metal;
734 735
#define CHIP_METAL_MASK			0xff
#define CHIP_METAL_SHIFT		4
736 737

	u16				chip_bond_id;
738 739
#define CHIP_BOND_ID_MASK		0xf
#define CHIP_BOND_ID_SHIFT		0
740 741

	u8				num_engines;
742
	u8				num_ports;
743
	u8				num_ports_in_engine;
744 745 746
	u8				num_funcs_in_port;

	u8				path_id;
747 748

	unsigned long			mf_bits;
749 750 751 752 753 754 755 756

	int				pcie_width;
	int				pcie_speed;

	/* Add MF related configuration */
	u8				mcp_rev;
	u8				boot_mode;

M
Mintz, Yuval 已提交
757 758 759
	/* WoL related configurations */
	u8 wol_config;
	u8 wol_mac[ETH_ALEN];
760 761 762

	u32				int_mode;
	enum qed_coalescing_mode	int_coalescing_mode;
763 764
	u16				rx_coalesce_usecs;
	u16				tx_coalesce_usecs;
765 766 767 768 769 770 771 772 773 774 775

	/* Start Bar offset of first hwfn */
	void __iomem			*regview;
	void __iomem			*doorbells;
	u64				db_phys_addr;
	unsigned long			db_size;

	/* PCI */
	u8				cache_shift;

	/* Init */
776 777
	const u32 *iro_arr;
#define IRO ((const struct iro *)p_hwfn->cdev->iro_arr)
778 779 780 781 782

	/* HW functions */
	u8				num_hwfns;
	struct qed_hwfn			hwfns[MAX_HWFNS_PER_DEVICE];

783 784 785 786 787
	/* Engine affinity */
	u8				l2_affin_hint;
	u8				fir_affin;
	u8				iwarp_affin;

Y
Yuval Mintz 已提交
788 789 790
	/* SRIOV */
	struct qed_hw_sriov_info *p_iov_info;
#define IS_QED_SRIOV(cdev)              (!!(cdev)->p_iov_info)
791
	struct qed_tunnel_info		tunnel;
Y
Yuval Mintz 已提交
792
	bool				b_is_vf;
793 794 795 796 797 798
	u32				drv_type;
	struct qed_eth_stats		*reset_stats;
	struct qed_fw_data		*fw_data;

	u32				mcp_nvm_resp;

799 800 801
	/* Recovery */
	bool recov_in_prog;

802 803 804
	/* Indicates whether should prevent attentions from being reasserted */
	bool attn_clr_en;

805 806 807 808
	/* LLH info */
	u8 ppfid_bitmap;
	struct qed_llh_info *p_llh_info;

809
	/* Linux specific here */
810
	struct qed_dev_info		common_dev_info;
811 812
	struct  qede_dev		*edev;
	struct  pci_dev			*pdev;
813 814
	u32 flags;
#define QED_FLAG_STORAGE_STARTED	(BIT(0))
815 816 817 818 819 820 821 822
	int				msg_enable;

	struct pci_params		pci_params;

	struct qed_int_params		int_params;

	u8				protocol;
#define IS_QED_ETH_IF(cdev)     ((cdev)->protocol == QED_PROTOCOL_ETH)
823
#define IS_QED_FCOE_IF(cdev)    ((cdev)->protocol == QED_PROTOCOL_FCOE)
824

Y
Yuval Mintz 已提交
825 826 827 828
	/* Callbacks to protocol driver */
	union {
		struct qed_common_cb_ops	*common;
		struct qed_eth_cb_ops		*eth;
829
		struct qed_fcoe_cb_ops		*fcoe;
830
		struct qed_iscsi_cb_ops		*iscsi;
Y
Yuval Mintz 已提交
831 832 833
	} protocol_ops;
	void				*ops_cookie;

Y
Yuval Mintz 已提交
834 835 836 837
#ifdef CONFIG_QED_LL2
	struct qed_cb_ll2_info		*ll2;
	u8				ll2_mac_address[ETH_ALEN];
#endif
838
	struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
839
	u8 engine_for_debug;
840
	bool disable_ilt_dump;
841 842
	bool				dbg_bin_dump;

843
	DECLARE_HASHTABLE(connections, 10);
844
	const struct firmware		*firmware;
R
Ram Amrani 已提交
845

846 847
	bool print_dbg_data;

R
Ram Amrani 已提交
848 849 850
	u32 rdma_max_sge;
	u32 rdma_max_inline;
	u32 rdma_max_srq_sge;
851
	u16 tunn_feature_mask;
852 853

	bool				iwarp_cmt;
854 855
};

856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type);

#define NUM_OF_VFS(dev)	\
	qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VFS)
#define NUM_OF_L2_QUEUES(dev) \
	qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_L2_QUEUES)
#define NUM_OF_PORTS(dev) \
	qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PORTS)
#define NUM_OF_SBS(dev)	\
	qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_SB_PER_PATH)
#define NUM_OF_ENG_PFS(dev) \
	qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PFS)
#define NUM_OF_VPORTS(dev) \
	qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VPORTS)
#define NUM_OF_RSS_ENGINES(dev)	\
	qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_ETH_RSS_ENGINE)
#define NUM_OF_QM_TX_QUEUES(dev) \
	qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_TX_QUEUES)
#define NUM_OF_PXP_ILT_RECORDS(dev) \
	qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_PXP_ILT_RECORDS)
#define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \
	qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS)
#define NUM_OF_QM_GLOBAL_RLS(dev) \
	qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_GLOBAL_RLS)
#define NUM_OF_PBF_CMD_LINES(dev) \
	qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_PBF_CMD_LINES)
#define NUM_OF_BTB_BLOCKS(dev) \
	qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)

885 886 887 888 889 890 891 892 893 894 895 896

/**
 * @brief qed_concrete_to_sw_fid - get the sw function id from
 *        the concrete value.
 *
 * @param concrete_fid
 *
 * @return inline u8
 */
static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
					u32 concrete_fid)
{
897
	u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
898
	u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
899 900 901
	u8 vf_valid = GET_FIELD(concrete_fid,
				PXP_CONCRETE_FID_VFVALID);
	u8 sw_fid;
902

903 904 905 906 907 908
	if (vf_valid)
		sw_fid = vfid + MAX_NUM_PFS;
	else
		sw_fid = pfid;

	return sw_fid;
909 910
}

911
#define PKT_LB_TC	9
T
Tomer Tayar 已提交
912
#define MAX_NUM_VOQS_E4	20
913

Y
Yuval Mintz 已提交
914
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
915 916 917
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
					 struct qed_ptt *p_ptt,
					 u32 min_pf_rate);
918

Y
Yuval Mintz 已提交
919
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
920
int qed_device_num_engines(struct qed_dev *cdev);
921 922
void qed_set_fw_mac_addr(__le16 *fw_msb,
			 __le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
923

A
Ariel Elior 已提交
924
#define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])
925 926 927 928 929 930 931 932
#define QED_IS_CMT(dev)		((dev)->num_hwfns > 1)
/* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */
#define QED_FIR_AFFIN_HWFN(dev)		(&(dev)->hwfns[dev->fir_affin])
#define QED_IWARP_AFFIN_HWFN(dev)       (&(dev)->hwfns[dev->iwarp_affin])
#define QED_AFFIN_HWFN(dev)				   \
	(QED_IS_IWARP_PERSONALITY(QED_LEADING_HWFN(dev)) ? \
	 QED_IWARP_AFFIN_HWFN(dev) : QED_FIR_AFFIN_HWFN(dev))
#define QED_AFFIN_HWFN_IDX(dev) (IS_LEAD_HWFN(QED_AFFIN_HWFN(dev)) ? 0 : 1)
A
Ariel Elior 已提交
933 934 935 936 937 938 939 940 941 942

/* Flags for indication of required queues */
#define PQ_FLAGS_RLS    (BIT(0))
#define PQ_FLAGS_MCOS   (BIT(1))
#define PQ_FLAGS_LB     (BIT(2))
#define PQ_FLAGS_OOO    (BIT(3))
#define PQ_FLAGS_ACK    (BIT(4))
#define PQ_FLAGS_OFLD   (BIT(5))
#define PQ_FLAGS_VFS    (BIT(6))
#define PQ_FLAGS_LLT    (BIT(7))
D
Denis Bolotin 已提交
943
#define PQ_FLAGS_MTC    (BIT(8))
A
Ariel Elior 已提交
944 945 946 947 948

/* physical queue index for cm context intialization */
u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
D
Denis Bolotin 已提交
949 950
u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
A
Ariel Elior 已提交
951

952 953
/* doorbell recovery mechanism */
void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
954
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
955 956
bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);

957 958 959 960 961 962 963 964 965 966 967 968 969 970 971
/* Other Linux specific common definitions */
#define DP_NAME(cdev) ((cdev)->name)

#define REG_ADDR(cdev, offset)          (void __iomem *)((u8 __iomem *)\
						(cdev->regview) + \
							 (offset))

#define REG_RD(cdev, offset)            readl(REG_ADDR(cdev, offset))
#define REG_WR(cdev, offset, val)       writel((u32)val, REG_ADDR(cdev, offset))
#define REG_WR16(cdev, offset, val)     writew((u16)val, REG_ADDR(cdev, offset))

#define DOORBELL(cdev, db_addr, val)			 \
	writel((u32)val, (void __iomem *)((u8 __iomem *)\
					  (cdev->doorbells) + (db_addr)))

972 973 974 975
#define MFW_PORT(_p_hwfn)       ((_p_hwfn)->abs_pf_id %			  \
				  qed_device_num_ports((_p_hwfn)->cdev))
int qed_device_num_ports(struct qed_dev *cdev);

976 977 978
/* Prototypes */
int qed_fill_dev_info(struct qed_dev *cdev,
		      struct qed_dev_info *dev_info);
979
void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
980
void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
981 982 983
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
		   u32 input_len, u8 *input_buf,
		   u32 max_size, u8 *unzip_buf);
984
int qed_recovery_process(struct qed_dev *cdev);
985
void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn);
986 987
void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
			   enum qed_hw_err_type err_type);
988 989 990
void qed_get_protocol_stats(struct qed_dev *cdev,
			    enum qed_mcp_protocol_type type,
			    union qed_mcp_protocol_stats *stats);
991
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
T
Tomer Tayar 已提交
992
void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
993
int qed_mfw_tlv_req(struct qed_hwfn *hwfn);
994

995 996 997
int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
			  enum qed_mfw_tlv_type type,
			  union qed_mfw_tlv_data *tlv_data);
998 999

void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc);
1000 1001

void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn);
1002
#endif /* _QED_H */