rvu.h 14.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/* SPDX-License-Identifier: GPL-2.0
 * Marvell OcteonTx2 RVU Admin Function driver
 *
 * Copyright (C) 2018 Marvell International Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#ifndef RVU_H
#define RVU_H

S
Sunil Goutham 已提交
14
#include "rvu_struct.h"
15
#include "common.h"
16
#include "mbox.h"
S
Sunil Goutham 已提交
17

18 19 20 21 22 23 24 25 26 27
/* PCI device IDs */
#define	PCI_DEVID_OCTEONTX2_RVU_AF		0xA065

/* PCI BAR nos */
#define	PCI_AF_REG_BAR_NUM			0
#define	PCI_PF_REG_BAR_NUM			2
#define	PCI_MBOX_BAR_NUM			4

#define NAME_SIZE				32

28 29 30 31 32 33 34 35 36 37 38
/* PF_FUNC */
#define RVU_PFVF_PF_SHIFT	10
#define RVU_PFVF_PF_MASK	0x3F
#define RVU_PFVF_FUNC_SHIFT	0
#define RVU_PFVF_FUNC_MASK	0x3FF

struct rvu_work {
	struct	work_struct work;
	struct	rvu *rvu;
};

39 40 41 42 43
struct rsrc_bmap {
	unsigned long *bmap;	/* Pointer to resource bitmap */
	u16  max;		/* Max resource id or count */
};

S
Sunil Goutham 已提交
44
struct rvu_block {
45 46
	struct rsrc_bmap	lf;
	struct admin_queue	*aq; /* NIX/NPA AQ */
47
	u16  *fn_map; /* LF to pcifunc mapping */
48
	bool multislot;
S
Sunil Goutham 已提交
49
	bool implemented;
50
	u8   addr;  /* RVU_BLOCK_ADDR_E */
51
	u8   type;  /* RVU_BLOCK_TYPE_E */
52 53 54 55 56 57 58 59
	u8   lfshift;
	u64  lookup_reg;
	u64  pf_lfcnt_reg;
	u64  vf_lfcnt_reg;
	u64  lfcfg_reg;
	u64  msixcfg_reg;
	u64  lfreset_reg;
	unsigned char name[NAME_SIZE];
S
Sunil Goutham 已提交
60 61
};

62 63 64 65 66
struct nix_mcast {
	struct qmem	*mce_ctx;
	struct qmem	*mcast_buf;
	int		replay_pkind;
	int		next_free_mce;
67
	struct mutex	mce_lock; /* Serialize MCE updates */
68 69 70 71 72 73 74 75
};

struct nix_mce_list {
	struct hlist_head	head;
	int			count;
	int			max;
};

76
struct npc_mcam {
77
	struct rsrc_bmap counters;
78
	struct mutex	lock;	/* MCAM entries and counters update lock */
79 80 81 82 83
	unsigned long	*bmap;		/* bitmap, 0 => bmap_entries */
	unsigned long	*bmap_reverse;	/* Reverse bitmap, bmap_entries => 0 */
	u16	bmap_entries;	/* Number of unreserved MCAM entries */
	u16	bmap_fcnt;	/* MCAM entries free count */
	u16	*entry2pfvf_map;
84
	u16	*entry2cntr_map;
85
	u16	*cntr2pfvf_map;
86
	u16	*cntr_refcnt;
87 88 89 90 91 92 93
	u8	keysize;	/* MCAM keysize 112/224/448 bits */
	u8	banks;		/* Number of MCAM banks */
	u8	banks_per_entry;/* Number of keywords in key */
	u16	banksize;	/* Number of MCAM entries in each bank */
	u16	total_entries;	/* Total number of MCAM entries */
	u16	nixlf_offset;	/* Offset of nixlf rsvd uncast entries */
	u16	pf_offset;	/* Offset of PF's rsvd bcast, promisc entries */
94 95 96 97
	u16	lprio_count;
	u16	lprio_start;
	u16	hprio_count;
	u16	hprio_end;
98 99
};

100 101 102 103 104 105 106 107
/* Structure for per RVU func info ie PF/VF */
struct rvu_pfvf {
	bool		npalf; /* Only one NPALF per RVU_FUNC */
	bool		nixlf; /* Only one NIXLF per RVU_FUNC */
	u16		sso;
	u16		ssow;
	u16		cptlfs;
	u16		timlfs;
108
	u8		cgx_lmac;
109 110 111 112 113

	/* Block LF's MSIX vector info */
	struct rsrc_bmap msix;      /* Bitmap for MSIX vector alloc */
#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF))
	u16		 *msix_lfmap; /* Vector to block LF mapping */
114 115 116 117 118

	/* NPA contexts */
	struct qmem	*aura_ctx;
	struct qmem	*pool_ctx;
	struct qmem	*npa_qints_ctx;
119 120
	unsigned long	*aura_bmap;
	unsigned long	*pool_bmap;
121 122 123 124 125 126 127 128

	/* NIX contexts */
	struct qmem	*rq_ctx;
	struct qmem	*sq_ctx;
	struct qmem	*cq_ctx;
	struct qmem	*rss_ctx;
	struct qmem	*cq_ints_ctx;
	struct qmem	*nix_qints_ctx;
129 130 131
	unsigned long	*sq_bmap;
	unsigned long	*rq_bmap;
	unsigned long	*cq_bmap;
132

133 134 135 136
	u16		rx_chan_base;
	u16		tx_chan_base;
	u8              rx_chan_cnt; /* total number of RX channels */
	u8              tx_chan_cnt; /* total number of TX channels */
137 138
	u16		maxlen;
	u16		minlen;
139

140
	u8		mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
141 142 143 144

	/* Broadcast pkt replication info */
	u16			bcast_mce_idx;
	struct nix_mce_list	bcast_mce_list;
145 146
};

147 148 149 150 151 152
struct nix_txsch {
	struct rsrc_bmap schq;
	u8   lvl;
	u16  *pfvf_map;
};

153 154 155 156 157
struct npc_pkind {
	struct rsrc_bmap rsrc;
	u32	*pfchan_map;
};

158 159
struct nix_hw {
	struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
160
	struct nix_mcast mcast;
161 162
};

S
Sunil Goutham 已提交
163
struct rvu_hwinfo {
164 165 166
	u8	total_pfs;   /* MAX RVU PFs HW supports */
	u16	total_vfs;   /* Max RVU VFs HW supports */
	u16	max_vfs_per_pf; /* Max VFs that can be attached to a PF */
167 168 169 170 171
	u8	cgx;
	u8	lmac_per_cgx;
	u8	cgx_links;
	u8	lbk_links;
	u8	sdp_links;
172 173
	u8	npc_kpus;          /* No of parser units */

174

S
Sunil Goutham 已提交
175
	struct rvu_block block[BLK_COUNT]; /* Block info */
176
	struct nix_hw    *nix0;
177
	struct npc_pkind pkind;
178
	struct npc_mcam  mcam;
S
Sunil Goutham 已提交
179 180
};

181 182 183 184 185
struct rvu {
	void __iomem		*afreg_base;
	void __iomem		*pfreg_base;
	struct pci_dev		*pdev;
	struct device		*dev;
S
Sunil Goutham 已提交
186
	struct rvu_hwinfo       *hw;
187 188
	struct rvu_pfvf		*pf;
	struct rvu_pfvf		*hwvf;
189
	struct mutex            rsrc_lock; /* Serialize resource alloc/free */
190 191 192 193

	/* Mbox */
	struct otx2_mbox	mbox;
	struct rvu_work		*mbox_wrk;
194 195
	struct otx2_mbox        mbox_up;
	struct rvu_work		*mbox_wrk_up;
196 197 198 199 200 201
	struct workqueue_struct *mbox_wq;

	/* MSI-X */
	u16			num_vec;
	char			*irq_name;
	bool			*irq_allocated;
202
	dma_addr_t		msix_base_iova;
203 204 205 206 207 208 209 210 211

	/* CGX */
#define PF_CGXMAP_BASE		1 /* PF 0 is reserved for RVU PF */
	u8			cgx_mapped_pfs;
	u8			cgx_cnt; /* available cgx ports */
	u8			*pf2cgxlmac_map; /* pf to cgx_lmac map */
	u16			*cgxlmac2pf_map; /* bitmap of mapped pfs for
						  * every cgx lmac port
						  */
212
	unsigned long		pf_notify_bmap; /* Flags for PF notification */
213
	void			**cgx_idmap; /* cgx id to cgx data map table */
214 215 216 217
	struct			work_struct cgx_evh_work;
	struct			workqueue_struct *cgx_evh_wq;
	spinlock_t		cgx_evq_lock; /* cgx event queue lock */
	struct list_head	cgx_evq_head; /* cgx event queue head */
218 219
};

S
Sunil Goutham 已提交
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
{
	writeq(val, rvu->afreg_base + ((block << 28) | offset));
}

static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset)
{
	return readq(rvu->afreg_base + ((block << 28) | offset));
}

static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val)
{
	writeq(val, rvu->pfreg_base + offset);
}

static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
{
	return readq(rvu->pfreg_base + offset);
}

/* Function Prototypes
 * RVU
 */
243
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
244 245 246
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
247 248
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
249 250
int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
251
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
252
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
253
int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
254
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
255 256
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
S
Sunil Goutham 已提交
257

258 259 260 261 262 263 264 265
/* RVU HW reg validation */
enum regmap_block {
	TXSCHQ_HWREGMAP = 0,
	MAX_HWREGMAP,
};

bool rvu_check_valid_reg(int regmap, int regblk, u64 reg);

266 267 268 269 270
/* NPA/NIX AQ APIs */
int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
		 int qsize, int inst_size, int res_size);
void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);

271
/* CGX APIs */
272 273 274 275 276 277 278 279 280 281 282
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
{
	return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs);
}

static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
{
	*cgx_id = (map >> 4) & 0xF;
	*lmac_id = (map & 0xF);
}

283
int rvu_cgx_probe(struct rvu *rvu);
284
void rvu_cgx_wq_destroy(struct rvu *rvu);
285
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
286
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
287
int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
288
				    struct msg_rsp *rsp);
289
int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
290
				   struct msg_rsp *rsp);
291
int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
292
			       struct cgx_stats_rsp *rsp);
293
int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
294 295
				      struct cgx_mac_addr_set_or_get *req,
				      struct cgx_mac_addr_set_or_get *rsp);
296
int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
297 298
				      struct cgx_mac_addr_set_or_get *req,
				      struct cgx_mac_addr_set_or_get *rsp);
299
int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
300
					struct msg_rsp *rsp);
301
int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
302
					 struct msg_rsp *rsp);
303
int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
304
					  struct msg_rsp *rsp);
305
int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
306
					 struct msg_rsp *rsp);
307
int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
308
				      struct cgx_link_info_msg *rsp);
309
int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
310
				       struct msg_rsp *rsp);
311
int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
312
					struct msg_rsp *rsp);
313 314 315

/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
316
void rvu_npa_freemem(struct rvu *rvu);
317
int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
318 319
				struct npa_aq_enq_req *req,
				struct npa_aq_enq_rsp *rsp);
320
int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
321 322
				       struct hwctx_disable_req *req,
				       struct msg_rsp *rsp);
323
int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
324 325
				  struct npa_lf_alloc_req *req,
				  struct npa_lf_alloc_rsp *rsp);
326
int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
327
				 struct msg_rsp *rsp);
328 329

/* NIX APIs */
330
bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
331 332
int rvu_nix_init(struct rvu *rvu);
void rvu_nix_freemem(struct rvu *rvu);
333
int rvu_get_nixlf_count(struct rvu *rvu);
334
int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
335 336
				  struct nix_lf_alloc_req *req,
				  struct nix_lf_alloc_rsp *rsp);
337
int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
338
				 struct msg_rsp *rsp);
339
int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
340 341
				struct nix_aq_enq_req *req,
				struct nix_aq_enq_rsp *rsp);
342
int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
343 344
				       struct hwctx_disable_req *req,
				       struct msg_rsp *rsp);
345
int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
346 347
				     struct nix_txsch_alloc_req *req,
				     struct nix_txsch_alloc_rsp *rsp);
348
int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
349 350
				    struct nix_txsch_free_req *req,
				    struct msg_rsp *rsp);
351
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
352 353
				    struct nix_txschq_config *req,
				    struct msg_rsp *rsp);
354
int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
355
				   struct msg_rsp *rsp);
356
int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
357 358
				  struct nix_vtag_config *req,
				  struct msg_rsp *rsp);
359
int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
360 361
					 struct nix_rss_flowkey_cfg *req,
					 struct msg_rsp *rsp);
362
int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
363 364
				      struct nix_set_mac_addr *req,
				      struct msg_rsp *rsp);
365
int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
366
				     struct msg_rsp *rsp);
367 368
int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
				    struct msg_rsp *rsp);
369 370 371 372
int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
				     struct msg_rsp *rsp);
int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
				    struct msg_rsp *rsp);
373 374 375 376

/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
void rvu_npc_freemem(struct rvu *rvu);
377 378
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
379 380
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
				 int nixlf, u64 chan, u8 *mac_addr);
381 382 383
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
				   int nixlf, u64 chan, bool allmulti);
void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
384
void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
385 386 387
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
				       int nixlf, u64 chan);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
388 389
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
390 391
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
				    int group, int alg_idx, int mcam_index);
392 393 394 395 396 397
int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
					  struct npc_mcam_alloc_entry_req *req,
					  struct npc_mcam_alloc_entry_rsp *rsp);
int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
					 struct npc_mcam_free_entry_req *req,
					 struct msg_rsp *rsp);
398 399 400 401 402 403 404 405 406 407 408 409
int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
					  struct npc_mcam_write_entry_req *req,
					  struct msg_rsp *rsp);
int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
					struct npc_mcam_ena_dis_entry_req *req,
					struct msg_rsp *rsp);
int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
					struct npc_mcam_ena_dis_entry_req *req,
					struct msg_rsp *rsp);
int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
					  struct npc_mcam_shift_entry_req *req,
					  struct npc_mcam_shift_entry_rsp *rsp);
410 411 412 413 414 415 416
int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
				struct npc_mcam_alloc_counter_req *req,
				struct npc_mcam_alloc_counter_rsp *rsp);
int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
		   struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
		struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
417 418
int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
		struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp);
419 420 421
int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
			struct npc_mcam_oper_counter_req *req,
			struct npc_mcam_oper_counter_rsp *rsp);
422 423 424
int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
			  struct npc_mcam_alloc_and_write_entry_req *req,
			  struct npc_mcam_alloc_and_write_entry_rsp *rsp);
425 426
int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
				     struct npc_get_kex_cfg_rsp *rsp);
427
#endif /* RVU_H */