qed.h 13.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/* QLogic qed NIC Driver
 * Copyright (c) 2015 QLogic Corporation
 *
 * This software is available under the terms of the GNU General Public License
 * (GPL) Version 2, available from the file COPYING in the main directory of
 * this source tree.
 */

#ifndef _QED_H
#define _QED_H

#include <linux/types.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <linux/zlib.h>
#include <linux/hashtable.h>
#include <linux/qed/qed_if.h>
#include "qed_hsi.h"

Y
Yuval Mintz 已提交
28
extern const struct qed_common_ops qed_common_ops_pass;
29
#define DRV_MODULE_VERSION "8.7.1.20"
30 31 32 33 34

#define MAX_HWFNS_PER_DEVICE    (4)
#define NAME_SIZE 16
#define VER_SIZE 16

35 36
#define QED_WFQ_UNIT	100

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
/* cau states */
enum qed_coalescing_mode {
	QED_COAL_MODE_DISABLE,
	QED_COAL_MODE_ENABLE
};

struct qed_eth_cb_ops;
struct qed_dev_info;

/* helpers */
static inline u32 qed_db_addr(u32 cid, u32 DEMS)
{
	u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
		      FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);

	return db_addr;
}

#define ALIGNED_TYPE_SIZE(type_name, p_hwfn)				     \
	((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
	 ~((1 << (p_hwfn->cdev->cache_shift)) - 1))

#define for_each_hwfn(cdev, i)  for (i = 0; i < cdev->num_hwfns; i++)

#define D_TRINE(val, cond1, cond2, true1, true2, def) \
	(val == (cond1) ? true1 :		      \
	 (val == (cond2) ? true2 : def))

/* forward */
struct qed_ptt_pool;
struct qed_spq;
struct qed_sb_info;
struct qed_sb_attn_info;
struct qed_cxt_mngr;
struct qed_sb_sp_info;
struct qed_mcp_info;

struct qed_rt_data {
Y
Yuval Mintz 已提交
75 76
	u32	*init_val;
	bool	*b_valid;
77 78
};

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
enum qed_tunn_mode {
	QED_MODE_L2GENEVE_TUNN,
	QED_MODE_IPGENEVE_TUNN,
	QED_MODE_L2GRE_TUNN,
	QED_MODE_IPGRE_TUNN,
	QED_MODE_VXLAN_TUNN,
};

enum qed_tunn_clss {
	QED_TUNN_CLSS_MAC_VLAN,
	QED_TUNN_CLSS_MAC_VNI,
	QED_TUNN_CLSS_INNER_MAC_VLAN,
	QED_TUNN_CLSS_INNER_MAC_VNI,
	MAX_QED_TUNN_CLSS,
};

struct qed_tunn_start_params {
	unsigned long	tunn_mode;
	u16		vxlan_udp_port;
	u16		geneve_udp_port;
	u8		update_vxlan_udp_port;
	u8		update_geneve_udp_port;
	u8		tunn_clss_vxlan;
	u8		tunn_clss_l2geneve;
	u8		tunn_clss_ipgeneve;
	u8		tunn_clss_l2gre;
	u8		tunn_clss_ipgre;
};

struct qed_tunn_update_params {
	unsigned long	tunn_mode_update_mask;
	unsigned long	tunn_mode;
	u16		vxlan_udp_port;
	u16		geneve_udp_port;
	u8		update_rx_pf_clss;
	u8		update_tx_pf_clss;
	u8		update_vxlan_udp_port;
	u8		update_geneve_udp_port;
	u8		tunn_clss_vxlan;
	u8		tunn_clss_l2geneve;
	u8		tunn_clss_ipgeneve;
	u8		tunn_clss_l2gre;
	u8		tunn_clss_ipgre;
};

124 125 126 127 128 129
/* The PCI personality is not quite synonymous to protocol ID:
 * 1. All personalities need CORE connections
 * 2. The Ethernet personality may support also the RoCE protocol
 */
enum qed_pci_personality {
	QED_PCI_ETH,
Y
Yuval Mintz 已提交
130 131
	QED_PCI_ISCSI,
	QED_PCI_ETH_ROCE,
132 133 134 135 136 137 138 139 140 141 142 143
	QED_PCI_DEFAULT /* default in shmem */
};

/* All VFs are symmetric, all counters are PF + all VFs */
struct qed_qm_iids {
	u32 cids;
	u32 vf_cids;
	u32 tids;
};

enum QED_RESOURCES {
	QED_SB,
Y
Yuval Mintz 已提交
144
	QED_L2_QUEUE,
145
	QED_VPORT,
Y
Yuval Mintz 已提交
146
	QED_RSS_ENG,
147 148
	QED_PQ,
	QED_RL,
Y
Yuval Mintz 已提交
149 150
	QED_MAC,
	QED_VLAN,
151 152 153 154
	QED_ILT,
	QED_MAX_RESC,
};

Y
Yuval Mintz 已提交
155 156
enum QED_FEATURE {
	QED_PF_L2_QUE,
Y
Yuval Mintz 已提交
157
	QED_VF,
Y
Yuval Mintz 已提交
158 159 160
	QED_MAX_FEATURES,
};

Y
Yuval Mintz 已提交
161 162 163 164 165 166 167 168 169 170 171 172
enum QED_PORT_MODE {
	QED_PORT_MODE_DE_2X40G,
	QED_PORT_MODE_DE_2X50G,
	QED_PORT_MODE_DE_1X100G,
	QED_PORT_MODE_DE_4X10G_F,
	QED_PORT_MODE_DE_4X10G_E,
	QED_PORT_MODE_DE_4X20G,
	QED_PORT_MODE_DE_1X40G,
	QED_PORT_MODE_DE_2X25G,
	QED_PORT_MODE_DE_1X25G
};

Y
Yuval Mintz 已提交
173 174
enum qed_dev_cap {
	QED_DEV_CAP_ETH,
Y
Yuval Mintz 已提交
175 176
	QED_DEV_CAP_ISCSI,
	QED_DEV_CAP_ROCE,
Y
Yuval Mintz 已提交
177 178
};

179 180 181 182 183 184 185
struct qed_hw_info {
	/* PCI personality */
	enum qed_pci_personality	personality;

	/* Resource Allocation scheme results */
	u32				resc_start[QED_MAX_RESC];
	u32				resc_num[QED_MAX_RESC];
Y
Yuval Mintz 已提交
186
	u32				feat_num[QED_MAX_FEATURES];
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206

#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])

	u8				num_tc;
	u8				offload_tc;
	u8				non_offload_tc;

	u32				concrete_fid;
	u16				opaque_fid;
	u16				ovlan;
	u32				part_num[4];

	unsigned char			hw_mac_addr[ETH_ALEN];

	struct qed_igu_info		*p_igu_info;

	u32				port_mode;
	u32				hw_mode;
Y
Yuval Mintz 已提交
207
	unsigned long		device_capabilities;
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
};

struct qed_hw_cid_data {
	u32	cid;
	bool	b_cid_allocated;

	/* Additional identifiers */
	u16	opaque_fid;
	u8	vport_id;
};

/* maximun size of read/write commands (HW limit) */
#define DMAE_MAX_RW_SIZE        0x2000

struct qed_dmae_info {
	/* Mutex for synchronizing access to functions */
	struct mutex	mutex;

	u8		channel;

	dma_addr_t	completion_word_phys_addr;

	/* The memory location where the DMAE writes the completion
	 * value when an operation is finished on this context.
	 */
	u32		*p_completion_word;

	dma_addr_t	intermediate_buffer_phys_addr;

	/* An intermediate buffer for DMAE operations that use virtual
	 * addresses - data is DMA'd to/from this buffer and then
	 * memcpy'd to/from the virtual address
	 */
	u32		*p_intermediate_buffer;

	dma_addr_t	dmae_cmd_phys_addr;
	struct dmae_cmd *p_dmae_cmd;
};

247 248 249 250 251 252
struct qed_wfq_data {
	/* when feature is configured for at least 1 vport */
	u32	min_speed;
	bool	configured;
};

253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
struct qed_qm_info {
	struct init_qm_pq_params	*qm_pq_params;
	struct init_qm_vport_params	*qm_vport_params;
	struct init_qm_port_params	*qm_port_params;
	u16				start_pq;
	u8				start_vport;
	u8				pure_lb_pq;
	u8				offload_pq;
	u8				pure_ack_pq;
	u8				vf_queues_offset;
	u16				num_pqs;
	u16				num_vf_pqs;
	u8				num_vports;
	u8				max_phys_tcs_per_port;
	bool				pf_rl_en;
	bool				pf_wfq_en;
	bool				vport_rl_en;
	bool				vport_wfq_en;
	u8				pf_wfq;
	u32				pf_rl;
273
	struct qed_wfq_data		*wfq_data;
274 275
};

M
Manish Chopra 已提交
276 277 278 279 280 281 282 283 284 285 286 287
struct storm_stats {
	u32     address;
	u32     len;
};

struct qed_storm_stats {
	struct storm_stats mstats;
	struct storm_stats pstats;
	struct storm_stats tstats;
	struct storm_stats ustats;
};

288
struct qed_fw_data {
M
Manish Chopra 已提交
289
	struct fw_ver_info	*fw_ver_info;
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
	const u8		*modes_tree_buf;
	union init_op		*init_ops;
	const u32		*arr_data;
	u32			init_ops_size;
};

struct qed_simd_fp_handler {
	void	*token;
	void	(*func)(void *);
};

struct qed_hwfn {
	struct qed_dev			*cdev;
	u8				my_id;          /* ID inside the PF */
#define IS_LEAD_HWFN(edev)              (!((edev)->my_id))
	u8				rel_pf_id;      /* Relative to engine*/
	u8				abs_pf_id;
#define QED_PATH_ID(_p_hwfn)		((_p_hwfn)->abs_pf_id & 1)
	u8				port_id;
	bool				b_active;

	u32				dp_module;
	u8				dp_level;
	char				name[NAME_SIZE];

	bool				first_on_engine;
	bool				hw_init_done;

Y
Yuval Mintz 已提交
318 319
	u8				num_funcs_on_engine;

320 321 322 323 324 325 326 327 328 329 330 331 332
	/* BAR access */
	void __iomem			*regview;
	void __iomem			*doorbells;
	u64				db_phys_addr;
	unsigned long			db_size;

	/* PTT pool */
	struct qed_ptt_pool		*p_ptt_pool;

	/* HW info */
	struct qed_hw_info		hw_info;

	/* rt_array (for init-tool) */
Y
Yuval Mintz 已提交
333
	struct qed_rt_data		rt_data;
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364

	/* SPQ */
	struct qed_spq			*p_spq;

	/* EQ */
	struct qed_eq			*p_eq;

	/* Consolidate Q*/
	struct qed_consq		*p_consq;

	/* Slow-Path definitions */
	struct tasklet_struct		*sp_dpc;
	bool				b_sp_dpc_enabled;

	struct qed_ptt			*p_main_ptt;
	struct qed_ptt			*p_dpc_ptt;

	struct qed_sb_sp_info		*p_sp_sb;
	struct qed_sb_attn_info		*p_sb_attn;

	/* Protocol related */
	struct qed_pf_params		pf_params;

	/* Array of sb_info of all status blocks */
	struct qed_sb_info		*sbs_info[MAX_SB_PER_PF_MIMD];
	u16				num_sbs;

	struct qed_cxt_mngr		*p_cxt_mngr;

	/* Flag indicating whether interrupts are enabled or not*/
	bool				b_int_enabled;
365
	bool				b_int_requested;
366

367 368 369
	/* True if the driver requests for the link */
	bool				b_drv_link_init;

Y
Yuval Mintz 已提交
370
	struct qed_vf_iov		*vf_iov_info;
Y
Yuval Mintz 已提交
371
	struct qed_pf_iov		*pf_iov_info;
372 373
	struct qed_mcp_info		*mcp_info;

374 375
	struct qed_dcbx_info		*p_dcbx_info;

Y
Yuval Mintz 已提交
376 377 378
	struct qed_hw_cid_data		*p_tx_cids;
	struct qed_hw_cid_data		*p_rx_cids;

379 380 381 382
	struct qed_dmae_info		dmae_info;

	/* QM init */
	struct qed_qm_info		qm_info;
M
Manish Chopra 已提交
383
	struct qed_storm_stats		storm_stats;
384 385 386 387 388 389

	/* Buffer for unzipping firmware data */
	void				*unzip_buf;

	struct qed_simd_fp_handler	simd_proto_handler[64];

390 391 392 393 394 395
#ifdef CONFIG_QED_SRIOV
	struct workqueue_struct *iov_wq;
	struct delayed_work iov_task;
	unsigned long iov_task_flags;
#endif

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
	struct z_stream_s		*stream;
};

struct pci_params {
	int		pm_cap;

	unsigned long	mem_start;
	unsigned long	mem_end;
	unsigned int	irq;
	u8		pf_num;
};

struct qed_int_param {
	u32	int_mode;
	u8	num_vectors;
	u8	min_msix_cnt; /* for minimal functionality */
};

struct qed_int_params {
	struct qed_int_param	in;
	struct qed_int_param	out;
	struct msix_entry	*msix_table;
	bool			fp_initialized;
	u8			fp_msix_base;
	u8			fp_msix_cnt;
};

struct qed_dev {
	u32	dp_module;
	u8	dp_level;
	char	name[NAME_SIZE];

	u8	type;
Y
Yuval Mintz 已提交
429 430 431 432 433 434 435 436 437 438 439 440 441 442
#define QED_DEV_TYPE_BB (0 << 0)
#define QED_DEV_TYPE_AH BIT(0)
/* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev)  ((dev)->type == QED_DEV_TYPE_BB)
#define QED_IS_BB_A0(dev)       (QED_IS_BB(dev) && \
				 CHIP_REV_IS_A0(dev))
#define QED_IS_BB_B0(dev)       (QED_IS_BB(dev) && \
				 CHIP_REV_IS_B0(dev))

#define QED_GET_TYPE(dev)       (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
				 QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)

	u16	vendor_id;
	u16	device_id;
443 444 445 446 447 448 449 450

	u16	chip_num;
#define CHIP_NUM_MASK                   0xffff
#define CHIP_NUM_SHIFT                  16

	u16	chip_rev;
#define CHIP_REV_MASK                   0xf
#define CHIP_REV_SHIFT                  12
Y
Yuval Mintz 已提交
451 452
#define CHIP_REV_IS_A0(_cdev)   (!(_cdev)->chip_rev)
#define CHIP_REV_IS_B0(_cdev)   ((_cdev)->chip_rev == 1)
453 454 455 456 457 458 459 460 461 462 463 464 465 466

	u16				chip_metal;
#define CHIP_METAL_MASK                 0xff
#define CHIP_METAL_SHIFT                4

	u16				chip_bond_id;
#define CHIP_BOND_ID_MASK               0xf
#define CHIP_BOND_ID_SHIFT              0

	u8				num_engines;
	u8				num_ports_in_engines;
	u8				num_funcs_in_port;

	u8				path_id;
Y
Yuval Mintz 已提交
467 468 469 470
	enum qed_mf_mode		mf_mode;
#define IS_MF_DEFAULT(_p_hwfn)  (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
#define IS_MF_SI(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
#define IS_MF_SD(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503

	int				pcie_width;
	int				pcie_speed;
	u8				ver_str[VER_SIZE];

	/* Add MF related configuration */
	u8				mcp_rev;
	u8				boot_mode;

	u8				wol;

	u32				int_mode;
	enum qed_coalescing_mode	int_coalescing_mode;
	u8				rx_coalesce_usecs;
	u8				tx_coalesce_usecs;

	/* Start Bar offset of first hwfn */
	void __iomem			*regview;
	void __iomem			*doorbells;
	u64				db_phys_addr;
	unsigned long			db_size;

	/* PCI */
	u8				cache_shift;

	/* Init */
	const struct iro		*iro_arr;
#define IRO (p_hwfn->cdev->iro_arr)

	/* HW functions */
	u8				num_hwfns;
	struct qed_hwfn			hwfns[MAX_HWFNS_PER_DEVICE];

Y
Yuval Mintz 已提交
504 505 506 507
	/* SRIOV */
	struct qed_hw_sriov_info *p_iov_info;
#define IS_QED_SRIOV(cdev)              (!!(cdev)->p_iov_info)

508
	unsigned long			tunn_mode;
Y
Yuval Mintz 已提交
509 510

	bool				b_is_vf;
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
	u32				drv_type;

	struct qed_eth_stats		*reset_stats;
	struct qed_fw_data		*fw_data;

	u32				mcp_nvm_resp;

	/* Linux specific here */
	struct  qede_dev		*edev;
	struct  pci_dev			*pdev;
	int				msg_enable;

	struct pci_params		pci_params;

	struct qed_int_params		int_params;

	u8				protocol;
#define IS_QED_ETH_IF(cdev)     ((cdev)->protocol == QED_PROTOCOL_ETH)

Y
Yuval Mintz 已提交
530 531 532 533 534 535 536
	/* Callbacks to protocol driver */
	union {
		struct qed_common_cb_ops	*common;
		struct qed_eth_cb_ops		*eth;
	} protocol_ops;
	void				*ops_cookie;

537 538 539
	const struct firmware		*firmware;
};

Y
Yuval Mintz 已提交
540
#define NUM_OF_VFS(dev)         MAX_NUM_VFS_BB
Y
Yuval Mintz 已提交
541
#define NUM_OF_L2_QUEUES(dev)	MAX_NUM_L2_QUEUES_BB
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
#define NUM_OF_SBS(dev)         MAX_SB_PER_PATH_BB
#define NUM_OF_ENG_PFS(dev)     MAX_NUM_PFS_BB

/**
 * @brief qed_concrete_to_sw_fid - get the sw function id from
 *        the concrete value.
 *
 * @param concrete_fid
 *
 * @return inline u8
 */
static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
					u32 concrete_fid)
{
	u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);

	return pfid;
}

#define PURE_LB_TC 8

Y
Yuval Mintz 已提交
563
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
564 565
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);

Y
Yuval Mintz 已提交
566
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
#define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])

/* Other Linux specific common definitions */
#define DP_NAME(cdev) ((cdev)->name)

#define REG_ADDR(cdev, offset)          (void __iomem *)((u8 __iomem *)\
						(cdev->regview) + \
							 (offset))

#define REG_RD(cdev, offset)            readl(REG_ADDR(cdev, offset))
#define REG_WR(cdev, offset, val)       writel((u32)val, REG_ADDR(cdev, offset))
#define REG_WR16(cdev, offset, val)     writew((u16)val, REG_ADDR(cdev, offset))

#define DOORBELL(cdev, db_addr, val)			 \
	writel((u32)val, (void __iomem *)((u8 __iomem *)\
					  (cdev->doorbells) + (db_addr)))

/* Prototypes */
int qed_fill_dev_info(struct qed_dev *cdev,
		      struct qed_dev_info *dev_info);
Y
Yuval Mintz 已提交
587
void qed_link_update(struct qed_hwfn *hwfn);
588 589 590 591
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
		   u32 input_len, u8 *input_buf,
		   u32 max_size, u8 *unzip_buf);

592 593
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);

594
#endif /* _QED_H */