rvu.h 25.9 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/* Marvell RVU Admin Function driver
3
 *
4
 * Copyright (C) 2018 Marvell.
5 6 7 8 9 10
 *
 */

#ifndef RVU_H
#define RVU_H

11
#include <linux/pci.h>
12 13
#include <net/devlink.h>

S
Sunil Goutham 已提交
14
#include "rvu_struct.h"
15
#include "rvu_devlink.h"
16
#include "common.h"
17
#include "mbox.h"
18
#include "npc.h"
19
#include "rvu_reg.h"
S
Sunil Goutham 已提交
20

21 22
/* PCI device IDs */
#define	PCI_DEVID_OCTEONTX2_RVU_AF		0xA065
23
#define	PCI_DEVID_OCTEONTX2_LBK			0xA061
24

25 26
/* Subsystem Device ID */
#define PCI_SUBSYS_DEVID_96XX                  0xB200
27
#define PCI_SUBSYS_DEVID_CN10K_A	       0xB900
28 29
#define PCI_SUBSYS_DEVID_CNF10K_B              0xBC00
#define PCI_SUBSYS_DEVID_CN10K_B               0xBD00
30

31 32 33 34 35 36
/* PCI BAR nos */
#define	PCI_AF_REG_BAR_NUM			0
#define	PCI_PF_REG_BAR_NUM			2
#define	PCI_MBOX_BAR_NUM			4

#define NAME_SIZE				32
R
Rakesh Babu 已提交
37
#define MAX_NIX_BLKS				2
38
#define MAX_CPT_BLKS				2
39

40 41 42 43 44 45
/* PF_FUNC */
#define RVU_PFVF_PF_SHIFT	10
#define RVU_PFVF_PF_MASK	0x3F
#define RVU_PFVF_FUNC_SHIFT	0
#define RVU_PFVF_FUNC_MASK	0x3FF

46
#ifdef CONFIG_DEBUG_FS
47 48 49 50 51 52
struct dump_ctx {
	int	lf;
	int	id;
	bool	all;
};

53 54 55 56 57
struct cpt_ctx {
	int blkaddr;
	struct rvu *rvu;
};

58 59
struct rvu_debugfs {
	struct dentry *root;
60 61 62
	struct dentry *cgx_root;
	struct dentry *cgx;
	struct dentry *lmac;
63
	struct dentry *npa;
64
	struct dentry *nix;
65
	struct dentry *npc;
66
	struct dentry *cpt;
67 68
	struct dump_ctx npa_aura_ctx;
	struct dump_ctx npa_pool_ctx;
69 70 71
	struct dump_ctx nix_cq_ctx;
	struct dump_ctx nix_rq_ctx;
	struct dump_ctx nix_sq_ctx;
72
	struct cpt_ctx cpt_ctx[MAX_CPT_BLKS];
73
	int npa_qsize_id;
74
	int nix_qsize_id;
75 76 77
};
#endif

78 79 80
struct rvu_work {
	struct	work_struct work;
	struct	rvu *rvu;
81 82
	int num_msgs;
	int up_num_msgs;
83 84
};

85 86 87 88 89
struct rsrc_bmap {
	unsigned long *bmap;	/* Pointer to resource bitmap */
	u16  max;		/* Max resource id or count */
};

S
Sunil Goutham 已提交
90
struct rvu_block {
91 92
	struct rsrc_bmap	lf;
	struct admin_queue	*aq; /* NIX/NPA AQ */
93
	u16  *fn_map; /* LF to pcifunc mapping */
94
	bool multislot;
S
Sunil Goutham 已提交
95
	bool implemented;
96
	u8   addr;  /* RVU_BLOCK_ADDR_E */
97
	u8   type;  /* RVU_BLOCK_TYPE_E */
98 99 100 101 102 103 104 105
	u8   lfshift;
	u64  lookup_reg;
	u64  pf_lfcnt_reg;
	u64  vf_lfcnt_reg;
	u64  lfcfg_reg;
	u64  msixcfg_reg;
	u64  lfreset_reg;
	unsigned char name[NAME_SIZE];
106
	struct rvu *rvu;
S
Sunil Goutham 已提交
107 108
};

109 110 111 112 113
struct nix_mcast {
	struct qmem	*mce_ctx;
	struct qmem	*mcast_buf;
	int		replay_pkind;
	int		next_free_mce;
114
	struct mutex	mce_lock; /* Serialize MCE updates */
115 116 117 118 119 120 121 122
};

struct nix_mce_list {
	struct hlist_head	head;
	int			count;
	int			max;
};

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
/* layer metadata to uniquely identify a packet header field */
struct npc_layer_mdata {
	u8 lid;
	u8 ltype;
	u8 hdr;
	u8 key;
	u8 len;
};

/* Structure to represent a field present in the
 * generated key. A key field may present anywhere and can
 * be of any size in the generated key. Once this structure
 * is populated for fields of interest then field's presence
 * and location (if present) can be known.
 */
struct npc_key_field {
	/* Masks where all set bits indicate position
	 * of a field in the key
	 */
	u64 kw_mask[NPC_MAX_KWS_IN_KEY];
	/* Number of words in the key a field spans. If a field is
	 * of 16 bytes and key offset is 4 then the field will use
	 * 4 bytes in KW0, 8 bytes in KW1 and 4 bytes in KW2 and
	 * nr_kws will be 3(KW0, KW1 and KW2).
	 */
	int nr_kws;
	/* used by packet header fields */
	struct npc_layer_mdata layer_mdata;
};

153
struct npc_mcam {
154
	struct rsrc_bmap counters;
155
	struct mutex	lock;	/* MCAM entries and counters update lock */
156 157 158 159 160
	unsigned long	*bmap;		/* bitmap, 0 => bmap_entries */
	unsigned long	*bmap_reverse;	/* Reverse bitmap, bmap_entries => 0 */
	u16	bmap_entries;	/* Number of unreserved MCAM entries */
	u16	bmap_fcnt;	/* MCAM entries free count */
	u16	*entry2pfvf_map;
161
	u16	*entry2cntr_map;
162
	u16	*cntr2pfvf_map;
163
	u16	*cntr_refcnt;
164
	u16	*entry2target_pffunc;
165 166 167 168 169 170 171
	u8	keysize;	/* MCAM keysize 112/224/448 bits */
	u8	banks;		/* Number of MCAM banks */
	u8	banks_per_entry;/* Number of keywords in key */
	u16	banksize;	/* Number of MCAM entries in each bank */
	u16	total_entries;	/* Total number of MCAM entries */
	u16	nixlf_offset;	/* Offset of nixlf rsvd uncast entries */
	u16	pf_offset;	/* Offset of PF's rsvd bcast, promisc entries */
172 173 174 175
	u16	lprio_count;
	u16	lprio_start;
	u16	hprio_count;
	u16	hprio_end;
176
	u16     rx_miss_act_cntr; /* Counter for RX MISS action */
177 178 179 180 181
	/* fields present in the generated key */
	struct npc_key_field	tx_key_fields[NPC_KEY_FIELDS_MAX];
	struct npc_key_field	rx_key_fields[NPC_KEY_FIELDS_MAX];
	u64	tx_features;
	u64	rx_features;
182
	struct list_head mcam_rules;
183 184
};

185 186 187 188 189 190 191 192
/* Structure for per RVU func info ie PF/VF */
struct rvu_pfvf {
	bool		npalf; /* Only one NPALF per RVU_FUNC */
	bool		nixlf; /* Only one NIXLF per RVU_FUNC */
	u16		sso;
	u16		ssow;
	u16		cptlfs;
	u16		timlfs;
193
	u16		cpt1_lfs;
194
	u8		cgx_lmac;
195 196 197 198 199

	/* Block LF's MSIX vector info */
	struct rsrc_bmap msix;      /* Bitmap for MSIX vector alloc */
#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF))
	u16		 *msix_lfmap; /* Vector to block LF mapping */
200 201 202 203 204

	/* NPA contexts */
	struct qmem	*aura_ctx;
	struct qmem	*pool_ctx;
	struct qmem	*npa_qints_ctx;
205 206
	unsigned long	*aura_bmap;
	unsigned long	*pool_bmap;
207 208 209 210 211 212 213 214

	/* NIX contexts */
	struct qmem	*rq_ctx;
	struct qmem	*sq_ctx;
	struct qmem	*cq_ctx;
	struct qmem	*rss_ctx;
	struct qmem	*cq_ints_ctx;
	struct qmem	*nix_qints_ctx;
215 216 217
	unsigned long	*sq_bmap;
	unsigned long	*rq_bmap;
	unsigned long	*cq_bmap;
218

219 220 221 222
	u16		rx_chan_base;
	u16		tx_chan_base;
	u8              rx_chan_cnt; /* total number of RX channels */
	u8              tx_chan_cnt; /* total number of TX channels */
223 224
	u16		maxlen;
	u16		minlen;
225

226
	bool		hw_rx_tstamp_en; /* Is rx_tstamp enabled */
227
	u8		mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
228
	u8		default_mac[ETH_ALEN]; /* MAC address from FWdata */
229

230
	/* Broadcast/Multicast/Promisc pkt replication info */
231
	u16			bcast_mce_idx;
232 233
	u16			mcast_mce_idx;
	u16			promisc_mce_idx;
234
	struct nix_mce_list	bcast_mce_list;
235 236 237
	struct nix_mce_list	mcast_mce_list;
	struct nix_mce_list	promisc_mce_list;
	bool			use_mce_list;
238

239 240
	struct rvu_npc_mcam_rule *def_ucast_rule;

241 242
	bool	cgx_in_use; /* this PF/VF using CGX? */
	int	cgx_users;  /* number of cgx users - used only by PFs */
243

244
	int     intf_mode;
245
	u8	nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
246 247
	u8	nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
	u8	nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
248
	u8	lbkid;	     /* NIX0/1 lbk link ID */
249
	u64     lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/
250
	u64     lmt_map_ent_w1; /* Preseving the word1 of lmtst map table entry*/
251
	unsigned long flags;
252
	struct  sdp_node_info *sdp_info;
253 254 255 256
};

enum rvu_pfvf_flags {
	NIXLF_INITIALIZED = 0,
257 258 259
	PF_SET_VF_MAC,
	PF_SET_VF_CFG,
	PF_SET_VF_TRUSTED,
260 261
};

262 263
#define RVU_CLEAR_VF_PERM  ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC)

264 265 266
struct nix_txsch {
	struct rsrc_bmap schq;
	u8   lvl;
267 268
#define NIX_TXSCHQ_FREE		      BIT_ULL(1)
#define NIX_TXSCHQ_CFG_DONE	      BIT_ULL(0)
269 270 271
#define TXSCH_MAP_FUNC(__pfvf_map)    ((__pfvf_map) & 0xFFFF)
#define TXSCH_MAP_FLAGS(__pfvf_map)   ((__pfvf_map) >> 16)
#define TXSCH_MAP(__func, __flags)    (((__func) & 0xFFFF) | ((__flags) << 16))
272
#define TXSCH_SET_FLAG(__pfvf_map, flag)    ((__pfvf_map) | ((flag) << 16))
273
	u32  *pfvf_map;
274 275
};

276 277 278 279 280 281
struct nix_mark_format {
	u8 total;
	u8 in_use;
	u32 *cfg;
};

282 283 284 285 286
struct npc_pkind {
	struct rsrc_bmap rsrc;
	u32	*pfchan_map;
};

287 288 289 290 291 292
struct nix_flowkey {
#define NIX_FLOW_KEY_ALG_MAX 32
	u32 flowkey[NIX_FLOW_KEY_ALG_MAX];
	int in_use;
};

293 294 295 296 297
struct nix_lso {
	u8 total;
	u8 in_use;
};

298 299 300 301 302 303 304
struct nix_txvlan {
#define NIX_TX_VTAG_DEF_MAX 0x400
	struct rsrc_bmap rsrc;
	u16 *entry2pfvf_map;
	struct mutex rsrc_lock; /* Serialize resource alloc/free */
};

305 306 307 308 309 310 311
struct nix_ipolicer {
	struct rsrc_bmap band_prof;
	u16 *pfvf_map;
	u16 *match_id;
	u16 *ref_count;
};

312
struct nix_hw {
R
Rakesh Babu 已提交
313 314
	int blkaddr;
	struct rvu *rvu;
315
	struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
316
	struct nix_mcast mcast;
317
	struct nix_flowkey flowkey;
318
	struct nix_mark_format mark_format;
319
	struct nix_lso lso;
320
	struct nix_txvlan txvlan;
321
	struct nix_ipolicer *ipolicer;
322
	u64    *tx_credits;
323 324
};

325 326 327 328 329 330 331 332 333 334 335
/* RVU block's capabilities or functionality,
 * which vary by silicon version/skew.
 */
struct hw_cap {
	/* Transmit side supported functionality */
	u8	nix_tx_aggr_lvl; /* Tx link's traffic aggregation level */
	u16	nix_txsch_per_cgx_lmac; /* Max Q's transmitting to CGX LMAC */
	u16	nix_txsch_per_lbk_lmac; /* Max Q's transmitting to LBK LMAC */
	u16	nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */
	bool	nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
	bool	nix_shaping;		 /* Is shaping and coloring supported */
336
	bool    nix_shaper_toggle_wait; /* Shaping toggle needs poll/wait */
337
	bool	nix_tx_link_bp;		 /* Can link backpressure TL queues ? */
338
	bool	nix_rx_multicast;	 /* Rx packet replication support */
339
	bool	nix_common_dwrr_mtu;	 /* Common DWRR MTU for quantum config */
340
	bool	per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
341
	bool	programmable_chans; /* Channels programmable ? */
342
	bool	ipolicer;
343
	bool	npc_hash_extract; /* Hash extract enabled ? */
344
	bool	npc_exact_match_enabled; /* Exact match supported ? */
345 346
};

S
Sunil Goutham 已提交
347
struct rvu_hwinfo {
348 349 350
	u8	total_pfs;   /* MAX RVU PFs HW supports */
	u16	total_vfs;   /* Max RVU VFs HW supports */
	u16	max_vfs_per_pf; /* Max VFs that can be attached to a PF */
351 352
	u8	cgx;
	u8	lmac_per_cgx;
353 354 355 356
	u16	cgx_chan_base;	/* CGX base channel number */
	u16	lbk_chan_base;	/* LBK base channel number */
	u16	sdp_chan_base;	/* SDP base channel number */
	u16	cpt_chan_base;	/* CPT base channel number */
357 358 359
	u8	cgx_links;
	u8	lbk_links;
	u8	sdp_links;
360
	u8	cpt_links;	/* Number of CPT links */
361
	u8	npc_kpus;          /* No of parser units */
362 363 364 365
	u8	npc_pkinds;        /* No of port kinds */
	u8	npc_intfs;         /* No of interfaces */
	u8	npc_kpu_entries;   /* No of KPU entries */
	u16	npc_counters;	   /* No of match stats counters */
366
	u32	lbk_bufsize;	   /* FIFO size supported by LBK */
367
	bool	npc_ext_set;	   /* Extended register set */
368
	u64     npc_stat_ena;      /* Match stats enable bit */
369

370
	struct hw_cap    cap;
S
Sunil Goutham 已提交
371
	struct rvu_block block[BLK_COUNT]; /* Block info */
R
Rakesh Babu 已提交
372 373
	struct nix_hw    *nix;
	struct rvu	 *rvu;
374
	struct npc_pkind pkind;
375
	struct npc_mcam  mcam;
376
	struct npc_exact_table *table;
S
Sunil Goutham 已提交
377 378
};

379 380 381 382 383 384 385 386 387 388
struct mbox_wq_info {
	struct otx2_mbox mbox;
	struct rvu_work *mbox_wrk;

	struct otx2_mbox mbox_up;
	struct rvu_work *mbox_wrk_up;

	struct workqueue_struct *mbox_wq;
};

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
struct rvu_fwdata {
#define RVU_FWDATA_HEADER_MAGIC	0xCFDA	/* Custom Firmware Data*/
#define RVU_FWDATA_VERSION	0x0001
	u32 header_magic;
	u32 version;		/* version id */

	/* MAC address */
#define PF_MACNUM_MAX	32
#define VF_MACNUM_MAX	256
	u64 pf_macs[PF_MACNUM_MAX];
	u64 vf_macs[VF_MACNUM_MAX];
	u64 sclk;
	u64 rclk;
	u64 mcam_addr;
	u64 mcam_sz;
	u64 msixtr_base;
405 406 407
	u32 ptp_ext_clk_rate;
	u32 ptp_ext_tstamp;
#define FWDATA_RESERVED_MEM 1022
408
	u64 reserved[FWDATA_RESERVED_MEM];
409 410 411 412
#define CGX_MAX         5
#define CGX_LMACS_MAX   4
	struct cgx_lmac_fwdata_s cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
	/* Do not add new fields below this line */
413 414
};

415 416
struct ptp;

417 418 419 420 421 422 423 424 425
/* KPU profile adapter structure gathering all KPU configuration data and abstracting out the
 * source where it came from.
 */
struct npc_kpu_profile_adapter {
	const char			*name;
	u64				version;
	const struct npc_lt_def_cfg	*lt_def;
	const struct npc_kpu_profile_action	*ikpu; /* array[pkinds] */
	const struct npc_kpu_profile	*kpu; /* array[kpus] */
426
	struct npc_mcam_kex		*mkex;
427
	struct npc_mcam_kex_hash	*mkex_hash;
428
	bool				custom;
429 430 431 432
	size_t				pkinds;
	size_t				kpus;
};

433 434
#define RVU_SWITCH_LBK_CHAN	63

435 436 437 438 439 440 441 442
struct rvu_switch {
	struct mutex switch_lock; /* Serialize flow installation */
	u32 used_entries;
	u16 *entry2pcifunc;
	u16 mode;
	u16 start_entry;
};

443 444 445 446 447
struct rvu {
	void __iomem		*afreg_base;
	void __iomem		*pfreg_base;
	struct pci_dev		*pdev;
	struct device		*dev;
S
Sunil Goutham 已提交
448
	struct rvu_hwinfo       *hw;
449 450
	struct rvu_pfvf		*pf;
	struct rvu_pfvf		*hwvf;
451 452
	struct mutex		rsrc_lock; /* Serialize resource alloc/free */
	int			vfs; /* Number of VFs attached to RVU */
R
Rakesh Babu 已提交
453
	int			nix_blkaddr[MAX_NIX_BLKS];
454 455

	/* Mbox */
456 457
	struct mbox_wq_info	afpf_wq_info;
	struct mbox_wq_info	afvf_wq_info;
458

459 460 461 462 463
	/* PF FLR */
	struct rvu_work		*flr_wrk;
	struct workqueue_struct *flr_wq;
	struct mutex		flr_lock; /* Serialize FLRs */

464 465 466 467
	/* MSI-X */
	u16			num_vec;
	char			*irq_name;
	bool			*irq_allocated;
468
	dma_addr_t		msix_base_iova;
469
	u64			msixtr_base_phy; /* Register reset value */
470 471 472

	/* CGX */
#define PF_CGXMAP_BASE		1 /* PF 0 is reserved for RVU PF */
473
	u16			cgx_mapped_vfs; /* maximum CGX mapped VFs */
474
	u8			cgx_mapped_pfs;
475
	u8			cgx_cnt_max;	 /* CGX port count max */
476 477 478 479
	u8			*pf2cgxlmac_map; /* pf to cgx_lmac map */
	u16			*cgxlmac2pf_map; /* bitmap of mapped pfs for
						  * every cgx lmac port
						  */
480
	unsigned long		pf_notify_bmap; /* Flags for PF notification */
481
	void			**cgx_idmap; /* cgx id to cgx data map table */
482 483 484 485
	struct			work_struct cgx_evh_work;
	struct			workqueue_struct *cgx_evh_wq;
	spinlock_t		cgx_evq_lock; /* cgx event queue lock */
	struct list_head	cgx_evq_head; /* cgx event queue head */
486
	struct mutex		cgx_cfg_lock; /* serialize cgx configuration */
V
Vamsi Attunuru 已提交
487 488

	char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
489
	char kpu_pfl_name[KPU_NAME_LEN]; /* Configured KPU profile name */
490

491 492
	/* Firmware data */
	struct rvu_fwdata	*fwdata;
493 494
	void			*kpu_fwdata;
	size_t			kpu_fwdata_sz;
495
	void __iomem		*kpu_prfl_addr;
496

497 498 499
	/* NPC KPU data */
	struct npc_kpu_profile_adapter kpu;

500 501
	struct ptp		*ptp;

502 503
	int			mcs_blk_cnt;

504 505 506
#ifdef CONFIG_DEBUG_FS
	struct rvu_debugfs	rvu_dbg;
#endif
507
	struct rvu_devlink	*rvu_dl;
508 509 510

	/* RVU switch implementation over NPC with DMAC rules */
	struct rvu_switch	rswitch;
511 512
};

S
Sunil Goutham 已提交
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
{
	writeq(val, rvu->afreg_base + ((block << 28) | offset));
}

static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset)
{
	return readq(rvu->afreg_base + ((block << 28) | offset));
}

static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val)
{
	writeq(val, rvu->pfreg_base + offset);
}

static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
{
	return readq(rvu->pfreg_base + offset);
}

533
/* Silicon revisions */
534 535 536 537 538 539 540 541 542
static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu)
{
	struct pci_dev *pdev = rvu->pdev;
	/* 96XX A0/B0, 95XX A0/A1/B0 chips */
	return ((pdev->revision == 0x00) || (pdev->revision == 0x01) ||
		(pdev->revision == 0x10) || (pdev->revision == 0x11) ||
		(pdev->revision == 0x14));
}

543
static inline bool is_rvu_96xx_A0(struct rvu *rvu)
544 545 546
{
	struct pci_dev *pdev = rvu->pdev;

547
	return (pdev->revision == 0x00);
548 549
}

550 551 552 553
static inline bool is_rvu_96xx_B0(struct rvu *rvu)
{
	struct pci_dev *pdev = rvu->pdev;

554 555 556 557 558 559 560 561
	return (pdev->revision == 0x00) || (pdev->revision == 0x01);
}

static inline bool is_rvu_95xx_A0(struct rvu *rvu)
{
	struct pci_dev *pdev = rvu->pdev;

	return (pdev->revision == 0x10) || (pdev->revision == 0x11);
562 563
}

564 565 566 567 568 569
/* REVID for PCIe devices.
 * Bits 0..1: minor pass, bit 3..2: major pass
 * bits 7..4: midr id
 */
#define PCI_REVISION_ID_96XX		0x00
#define PCI_REVISION_ID_95XX		0x10
570
#define PCI_REVISION_ID_95XXN		0x20
571 572
#define PCI_REVISION_ID_98XX		0x30
#define PCI_REVISION_ID_95XXMM		0x40
573
#define PCI_REVISION_ID_95XXO		0xE0
574 575 576 577 578 579 580 581

static inline bool is_rvu_otx2(struct rvu *rvu)
{
	struct pci_dev *pdev = rvu->pdev;

	u8 midr = pdev->revision & 0xF0;

	return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
582 583
		midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX ||
		midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
584 585
}

586 587 588 589 590 591 592 593 594 595 596
static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
{
	u64 npc_const3;

	npc_const3 = rvu_read64(rvu, BLKADDR_NPC, NPC_AF_CONST3);
	if (!(npc_const3 & BIT_ULL(62)))
		return false;

	return true;
}

597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid,
				   u8 lmacid, u8 chan)
{
	u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST);
	u16 cgx_chans = nix_const & 0xFFULL;
	struct rvu_hwinfo *hw = rvu->hw;

	if (!hw->cap.programmable_chans)
		return NIX_CHAN_CGX_LMAC_CHX(cgxid, lmacid, chan);

	return rvu->hw->cgx_chan_base +
		(cgxid * hw->lmac_per_cgx + lmacid) * cgx_chans + chan;
}

static inline u16 rvu_nix_chan_lbk(struct rvu *rvu, u8 lbkid,
				   u8 chan)
{
	u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST);
	u16 lbk_chans = (nix_const >> 16) & 0xFFULL;
	struct rvu_hwinfo *hw = rvu->hw;

	if (!hw->cap.programmable_chans)
		return NIX_CHAN_LBK_CHX(lbkid, chan);

	return rvu->hw->lbk_chan_base + lbkid * lbk_chans + chan;
}

624 625 626 627 628 629 630 631 632 633
static inline u16 rvu_nix_chan_sdp(struct rvu *rvu, u8 chan)
{
	struct rvu_hwinfo *hw = rvu->hw;

	if (!hw->cap.programmable_chans)
		return NIX_CHAN_SDP_CHX(chan);

	return hw->sdp_chan_base + chan;
}

634 635 636 637 638
static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan)
{
	return rvu->hw->cpt_chan_base + chan;
}

S
Sunil Goutham 已提交
639 640 641
/* Function Prototypes
 * RVU
 */
642
static inline bool is_afvf(u16 pcifunc)
643 644 645 646
{
	return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
}

647 648 649 650 651
static inline bool is_vf(u16 pcifunc)
{
	return !!(pcifunc & RVU_PFVF_FUNC_MASK);
}

652 653 654 655 656 657
/* check if PF_FUNC is AF */
static inline bool is_pffunc_af(u16 pcifunc)
{
	return !pcifunc;
}

658 659 660 661 662 663
static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
{
	return (rvu->fwdata->header_magic == RVU_FWDATA_HEADER_MAGIC) &&
		(rvu->fwdata->version == RVU_FWDATA_VERSION);
}

664
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
665
void rvu_free_bitmap(struct rsrc_bmap *rsrc);
666 667
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
668
bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
669
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
670 671
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
672
u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
673 674
int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
675
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
676
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
677
bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype);
678
int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
679
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
680 681
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
682
int rvu_get_num_lbk_chans(void);
683 684
int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
			      u16 global_slot, u16 *slot_in_block);
S
Sunil Goutham 已提交
685

686 687 688 689 690 691 692 693
/* RVU HW reg validation */
enum regmap_block {
	TXSCHQ_HWREGMAP = 0,
	MAX_HWREGMAP,
};

bool rvu_check_valid_reg(int regmap, int regblk, u64 reg);

694 695 696 697 698
/* NPA/NIX AQ APIs */
int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
		 int qsize, int inst_size, int res_size);
void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);

699 700 701 702 703 704
/* SDP APIs */
int rvu_sdp_init(struct rvu *rvu);
bool is_sdp_pfvf(u16 pcifunc);
bool is_sdp_pf(u16 pcifunc);
bool is_sdp_vf(u16 pcifunc);

705
/* CGX APIs */
706 707
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
{
708 709
	return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) &&
		!is_sdp_pf(pf << RVU_PFVF_PF_SHIFT);
710 711 712 713 714 715 716 717
}

static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
{
	*cgx_id = (map >> 4) & 0xF;
	*lmac_id = (map & 0xF);
}

718 719 720 721 722 723
static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc)
{
	return ((pcifunc & RVU_PFVF_FUNC_MASK) &&
		is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)));
}

724 725 726 727 728
#define M(_name, _id, fn_name, req, rsp)				\
int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
MBOX_MESSAGES
#undef M

729 730
int rvu_cgx_init(struct rvu *rvu);
int rvu_cgx_exit(struct rvu *rvu);
731
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
732
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
733
void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
734
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
735 736
int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
			   int rxtxflag, u64 *stat);
737 738
void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc);

739 740
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
741
void rvu_npa_freemem(struct rvu *rvu);
742
void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
743 744
int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
			struct npa_aq_enq_rsp *rsp);
745 746

/* NIX APIs */
747
bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
748
int rvu_nix_init(struct rvu *rvu);
749 750
int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
				int blkaddr, u32 cfg);
751
void rvu_nix_freemem(struct rvu *rvu);
752
int rvu_get_nixlf_count(struct rvu *rvu);
753
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
754
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
755 756 757 758 759
int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
			struct nix_mce_list *mce_list,
			int mce_idx, int mcam_index, bool add);
void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
		      struct nix_mce_list **mce_list, int *mce_idx);
R
Rakesh Babu 已提交
760 761
struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr);
int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr);
762
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
763 764
int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
			struct nix_hw **nix_hw, int *blkaddr);
765 766
int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
				 u16 rq_idx, u16 match_id);
767 768 769 770
int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
			struct nix_cn10k_aq_enq_req *aq_req,
			struct nix_cn10k_aq_enq_rsp *aq_rsp,
			u16 pcifunc, u8 ctype, u32 qidx);
771
int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc);
772 773
u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
u32 convert_bytes_to_dwrr_mtu(u32 bytes);
774 775 776

/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
777 778
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
779
int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en);
780 781
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
				 int nixlf, u64 chan, u8 *mac_addr);
782
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
783 784 785
				   int nixlf, u64 chan, u8 chan_cnt);
void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
				  bool enable);
786 787
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
				       int nixlf, u64 chan);
788 789 790 791 792 793
void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
				bool enable);
void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
				    u64 chan);
void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
				   bool enable);
794

795 796
void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
				  int nixlf, int type, bool enable);
797
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
798
bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable);
799
void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
800 801
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
802 803
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
				    int group, int alg_idx, int mcam_index);
804

805 806 807 808 809 810
void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
				       int blkaddr, int *alloc_cnt,
				       int *enable_cnt);
void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
					 int blkaddr, int *alloc_cnt,
					 int *enable_cnt);
811 812 813 814
bool is_npc_intf_tx(u8 intf);
bool is_npc_intf_rx(u8 intf);
bool is_npc_interface_valid(struct rvu *rvu, u8 intf);
int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena);
815 816
int npc_flow_steering_init(struct rvu *rvu, int blkaddr);
const char *npc_get_field_name(u8 hdr);
817 818 819 820 821
int npc_get_bank(struct npc_mcam *mcam, int index);
void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
void npc_mcam_disable_flows(struct rvu *rvu, u16 target);
void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
			   int blkaddr, int index, bool enable);
822 823 824
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
			 int blkaddr, u16 src, struct mcam_entry *entry,
			 u8 *intf, u8 *ena);
825
bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc);
826
bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
827
u32  rvu_cgx_get_fifolen(struct rvu *rvu);
828
void *rvu_first_cgx_pdata(struct rvu *rvu);
829
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
830
int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
831 832 833
int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
			       u16 pfc_en);
int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
834
u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac);
835 836 837 838
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
			     int type);
bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
			   int index);
839 840 841 842
int rvu_npc_init(struct rvu *rvu);
int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
			       u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
			       u64 bcast_mcast_val, u64 bcast_mcast_mask);
843
void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx);
844

845
/* CPT APIs */
846 847
int rvu_cpt_register_interrupts(struct rvu *rvu);
void rvu_cpt_unregister_interrupts(struct rvu *rvu);
848 849
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
			int slot);
850
int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
851

852 853 854 855
/* CN10K RVU */
int rvu_set_channels_base(struct rvu *rvu);
void rvu_program_channels(struct rvu *rvu);

856 857 858
/* CN10K RVU - LMT*/
void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);

859 860 861 862 863 864 865
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
void rvu_dbg_exit(struct rvu *rvu);
#else
static inline void rvu_dbg_init(struct rvu *rvu) {}
static inline void rvu_dbg_exit(struct rvu *rvu) {}
#endif
866 867 868 869 870 871

/* RVU Switch */
void rvu_switch_enable(struct rvu *rvu);
void rvu_switch_disable(struct rvu *rvu);
void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);

872 873 874
int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
			   u64 pkind, u8 var_len_off, u8 var_len_off_mask,
			   u8 shift_dir);
875 876 877

/* CN10K MCS */
int rvu_mcs_init(struct rvu *rvu);
878
int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
879

880
#endif /* RVU_H */