rvu.h 6.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/* SPDX-License-Identifier: GPL-2.0
 * Marvell OcteonTx2 RVU Admin Function driver
 *
 * Copyright (C) 2018 Marvell International Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#ifndef RVU_H
#define RVU_H

S
Sunil Goutham 已提交
14
#include "rvu_struct.h"
15
#include "common.h"
16
#include "mbox.h"
S
Sunil Goutham 已提交
17

18 19 20 21 22 23 24 25 26 27
/* PCI device IDs */
#define	PCI_DEVID_OCTEONTX2_RVU_AF		0xA065

/* PCI BAR nos */
#define	PCI_AF_REG_BAR_NUM			0
#define	PCI_PF_REG_BAR_NUM			2
#define	PCI_MBOX_BAR_NUM			4

#define NAME_SIZE				32

28 29 30 31 32 33 34 35 36 37 38
/* PF_FUNC */
#define RVU_PFVF_PF_SHIFT	10
#define RVU_PFVF_PF_MASK	0x3F
#define RVU_PFVF_FUNC_SHIFT	0
#define RVU_PFVF_FUNC_MASK	0x3FF

struct rvu_work {
	struct	work_struct work;
	struct	rvu *rvu;
};

39 40 41 42 43
struct rsrc_bmap {
	unsigned long *bmap;	/* Pointer to resource bitmap */
	u16  max;		/* Max resource id or count */
};

S
Sunil Goutham 已提交
44
struct rvu_block {
45 46
	struct rsrc_bmap	lf;
	struct admin_queue	*aq; /* NIX/NPA AQ */
47
	u16  *fn_map; /* LF to pcifunc mapping */
48
	bool multislot;
S
Sunil Goutham 已提交
49
	bool implemented;
50
	u8   addr;  /* RVU_BLOCK_ADDR_E */
51
	u8   type;  /* RVU_BLOCK_TYPE_E */
52 53 54 55 56 57 58 59
	u8   lfshift;
	u64  lookup_reg;
	u64  pf_lfcnt_reg;
	u64  vf_lfcnt_reg;
	u64  lfcfg_reg;
	u64  msixcfg_reg;
	u64  lfreset_reg;
	unsigned char name[NAME_SIZE];
S
Sunil Goutham 已提交
60 61
};

62 63 64 65 66 67 68 69
/* Structure for per RVU func info ie PF/VF */
struct rvu_pfvf {
	bool		npalf; /* Only one NPALF per RVU_FUNC */
	bool		nixlf; /* Only one NIXLF per RVU_FUNC */
	u16		sso;
	u16		ssow;
	u16		cptlfs;
	u16		timlfs;
70 71 72 73 74

	/* Block LF's MSIX vector info */
	struct rsrc_bmap msix;      /* Bitmap for MSIX vector alloc */
#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF))
	u16		 *msix_lfmap; /* Vector to block LF mapping */
75 76
};

S
Sunil Goutham 已提交
77
struct rvu_hwinfo {
78 79 80 81
	u8	total_pfs;   /* MAX RVU PFs HW supports */
	u16	total_vfs;   /* Max RVU VFs HW supports */
	u16	max_vfs_per_pf; /* Max VFs that can be attached to a PF */

S
Sunil Goutham 已提交
82 83 84
	struct rvu_block block[BLK_COUNT]; /* Block info */
};

85 86 87 88 89
struct rvu {
	void __iomem		*afreg_base;
	void __iomem		*pfreg_base;
	struct pci_dev		*pdev;
	struct device		*dev;
S
Sunil Goutham 已提交
90
	struct rvu_hwinfo       *hw;
91 92
	struct rvu_pfvf		*pf;
	struct rvu_pfvf		*hwvf;
93
	spinlock_t		rsrc_lock; /* Serialize resource alloc/free */
94 95 96 97

	/* Mbox */
	struct otx2_mbox	mbox;
	struct rvu_work		*mbox_wrk;
98 99
	struct otx2_mbox        mbox_up;
	struct rvu_work		*mbox_wrk_up;
100 101 102 103 104 105
	struct workqueue_struct *mbox_wq;

	/* MSI-X */
	u16			num_vec;
	char			*irq_name;
	bool			*irq_allocated;
106
	dma_addr_t		msix_base_iova;
107 108 109 110 111 112 113 114 115

	/* CGX */
#define PF_CGXMAP_BASE		1 /* PF 0 is reserved for RVU PF */
	u8			cgx_mapped_pfs;
	u8			cgx_cnt; /* available cgx ports */
	u8			*pf2cgxlmac_map; /* pf to cgx_lmac map */
	u16			*cgxlmac2pf_map; /* bitmap of mapped pfs for
						  * every cgx lmac port
						  */
116
	unsigned long		pf_notify_bmap; /* Flags for PF notification */
117
	void			**cgx_idmap; /* cgx id to cgx data map table */
118 119 120 121
	struct			work_struct cgx_evh_work;
	struct			workqueue_struct *cgx_evh_wq;
	spinlock_t		cgx_evq_lock; /* cgx event queue lock */
	struct list_head	cgx_evq_head; /* cgx event queue head */
122 123
};

S
Sunil Goutham 已提交
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
{
	writeq(val, rvu->afreg_base + ((block << 28) | offset));
}

static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset)
{
	return readq(rvu->afreg_base + ((block << 28) | offset));
}

static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val)
{
	writeq(val, rvu->pfreg_base + offset);
}

static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
{
	return readq(rvu->pfreg_base + offset);
}

/* Function Prototypes
 * RVU
 */

148
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
149 150 151
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
152 153
int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
154
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
155
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
156
int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
157 158
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
S
Sunil Goutham 已提交
159

160 161 162 163 164
/* NPA/NIX AQ APIs */
int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
		 int qsize, int inst_size, int res_size);
void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);

165
/* CGX APIs */
166 167 168 169 170 171 172 173 174 175 176
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
{
	return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs);
}

static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
{
	*cgx_id = (map >> 4) & 0xF;
	*lmac_id = (map & 0xF);
}

177
int rvu_cgx_probe(struct rvu *rvu);
178
void rvu_cgx_wq_destroy(struct rvu *rvu);
179 180 181 182 183
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
				    struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
				   struct msg_rsp *rsp);
184 185
int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
			       struct cgx_stats_rsp *rsp);
186 187 188 189 190 191 192 193 194 195
int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
				      struct cgx_mac_addr_set_or_get *req,
				      struct cgx_mac_addr_set_or_get *rsp);
int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
				      struct cgx_mac_addr_set_or_get *req,
				      struct cgx_mac_addr_set_or_get *rsp);
int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
					struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
					 struct msg_rsp *rsp);
196 197 198 199 200 201
int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
					  struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
					 struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
				      struct cgx_link_info_msg *rsp);
202 203 204 205
int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
				       struct msg_rsp *rsp);
int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
					struct msg_rsp *rsp);
206 207 208 209

/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
int rvu_npa_freemem(struct rvu *rvu);
210
#endif /* RVU_H */