提交 12e4c9ab 编写于 作者: L Linu Cherian 提交者: David S. Miller

octeontx2-af: Handle non-contiguous CGX LMAC interfaces

For this, cgx_id(struct cgx) definition has been changed to
reflect cgx port id instead of device instance id.
Now cgx_id can be directly used as channel offset for NPC configuration.
Assumptions on contiguous cgx port ids has been removed from
nix_calibrate_x2p as well.

As a side effect, allocation of conversion tables that were based
on cgx count are changed to cgx port id max value.
Tables would return NULL for invalid cgx ports.
Signed-off-by: NLinu Cherian <lcherian@marvell.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 44990aaa
......@@ -92,17 +92,21 @@ static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
return cgx->lmac_idmap[lmac_id];
}
int cgx_get_cgx_cnt(void)
int cgx_get_cgxcnt_max(void)
{
struct cgx *cgx_dev;
int count = 0;
int idmax = -ENODEV;
list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
count++;
if (cgx_dev->cgx_id > idmax)
idmax = cgx_dev->cgx_id;
return count;
if (idmax < 0)
return 0;
return idmax + 1;
}
EXPORT_SYMBOL(cgx_get_cgx_cnt);
EXPORT_SYMBOL(cgx_get_cgxcnt_max);
int cgx_get_lmac_cnt(void *cgxd)
{
......@@ -679,8 +683,10 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
& CGX_ID_MASK;
list_add(&cgx->cgx_list, &cgx_list);
cgx->cgx_id = cgx_get_cgx_cnt() - 1;
cgx_link_usertable_init();
......
......@@ -20,7 +20,7 @@
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0
#define MAX_CGX 3
#define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4
#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
......@@ -95,7 +95,7 @@ struct cgx_event_cb {
extern struct pci_driver cgx_driver;
int cgx_get_cgx_cnt(void);
int cgx_get_cgxcnt_max(void);
int cgx_get_lmac_cnt(void *cgxd);
void *cgx_get_pdata(int cgx_id);
int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
......
......@@ -226,7 +226,7 @@ struct rvu {
/* CGX */
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
u8 cgx_mapped_pfs;
u8 cgx_cnt; /* available cgx ports */
u8 cgx_cnt_max; /* CGX port count max */
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
* every cgx lmac port
......
......@@ -52,7 +52,7 @@ static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
{
if (cgx_id >= rvu->cgx_cnt)
if (cgx_id >= rvu->cgx_cnt_max)
return NULL;
return rvu->cgx_idmap[cgx_id];
......@@ -61,38 +61,40 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
int cgx_cnt = rvu->cgx_cnt;
int cgx_cnt_max = rvu->cgx_cnt_max;
int cgx, lmac_cnt, lmac;
int pf = PF_CGXMAP_BASE;
int size, free_pkind;
if (!cgx_cnt)
if (!cgx_cnt_max)
return 0;
if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF)
if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
return -EINVAL;
/* Alloc map table
* An additional entry is required since PF id starts from 1 and
* hence entry at offset 0 is invalid.
*/
size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL);
size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
if (!rvu->pf2cgxlmac_map)
return -ENOMEM;
/* Initialize offset 0 with an invalid cgx and lmac id */
rvu->pf2cgxlmac_map[0] = 0xFF;
/* Initialize all entries with an invalid cgx and lmac id */
memset(rvu->pf2cgxlmac_map, 0xFF, size);
/* Reverse map table */
rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16),
cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
GFP_KERNEL);
if (!rvu->cgxlmac2pf_map)
return -ENOMEM;
rvu->cgx_mapped_pfs = 0;
for (cgx = 0; cgx < cgx_cnt; cgx++) {
for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
if (!rvu_cgx_pdata(cgx, rvu))
continue;
lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
......@@ -234,7 +236,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
cb.data = rvu;
for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
cgxd = rvu_cgx_pdata(cgx, rvu);
for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
......@@ -261,20 +263,22 @@ int rvu_cgx_init(struct rvu *rvu)
{
int cgx, err;
/* find available cgx ports */
rvu->cgx_cnt = cgx_get_cgx_cnt();
if (!rvu->cgx_cnt) {
/* CGX port id starts from 0 and are not necessarily contiguous
* Hence we allocate resources based on the maximum port id value.
*/
rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
if (!rvu->cgx_cnt_max) {
dev_info(rvu->dev, "No CGX devices found!\n");
return -ENODEV;
}
rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *),
GFP_KERNEL);
rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
sizeof(void *), GFP_KERNEL);
if (!rvu->cgx_idmap)
return -ENOMEM;
/* Initialize the cgxdata table */
for (cgx = 0; cgx < rvu->cgx_cnt; cgx++)
for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
/* Map CGX LMAC interfaces to RVU PFs */
......
......@@ -2107,8 +2107,10 @@ static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
/* Check if CGX devices are ready */
for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
if (status & (BIT_ULL(16 + idx)))
for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
/* Skip when cgx port is not available */
if (!rvu_cgx_pdata(idx, rvu) ||
(status & (BIT_ULL(16 + idx))))
continue;
dev_err(rvu->dev,
"CGX%d didn't respond to NIX X2P calibration\n", idx);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册