提交 b279bbb3 编写于 作者: S Sunil Goutham 提交者: David S. Miller

octeontx2-af: NIX Tx scheduler queue config support

This patch adds support for a PF/VF driver to configure
NIX transmit scheduler queues via mbox. Since PF/VF doesn't
know the absolute HW index of the NIXLF attached to it, AF
traps the register config and overwrites with the correct
NIXLF index.

HW supports shaping, colouring and policing of packets with
these multilevel traffic scheduler queues. Instead of
introducing different mbox message formats for different
configurations and making both AF & PF/VF driver implementation
cumbersome, access to the scheduler queue's CSRs is provided
via mbox. AF checks whether the sender PF/VF has the
corresponding queue allocated or not and dumps the config
to HW. With a single mbox msg 20 registers can be configured.
Signed-off-by: NSunil Goutham <sgoutham@marvell.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 a3e7121c
......@@ -7,4 +7,5 @@ obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
octeontx2_mbox-y := mbox.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o
......@@ -154,7 +154,8 @@ M(NIX_LF_FREE, 0x8001, msg_req, msg_rsp) \
M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \
M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp) \
M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free_req, msg_rsp)
M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free_req, msg_rsp) \
M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_config, msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
......@@ -448,4 +449,16 @@ struct nix_txsch_free_req {
u16 schq;
};
struct nix_txschq_config {
struct mbox_msghdr hdr;
u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
#define TXSCHQ_IDX_SHIFT 16
#define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
#define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
u8 num_regs;
#define MAX_REGS_PER_MBOX_MSG 20
u64 reg[MAX_REGS_PER_MBOX_MSG];
u64 regval[MAX_REGS_PER_MBOX_MSG];
};
#endif /* MBOX_H */
......@@ -195,6 +195,14 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
/* RVU HW reg validation */
enum regmap_block {
TXSCHQ_HWREGMAP = 0,
MAX_HWREGMAP,
};
bool rvu_check_valid_reg(int regmap, int regblk, u64 reg);
/* NPA/NIX AQ APIs */
int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
int qsize, int inst_size, int res_size);
......@@ -277,4 +285,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
struct nix_txsch_free_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp);
#endif /* RVU_H */
......@@ -738,10 +738,10 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
if (lvl == NIX_TXSCH_LVL_TL4)
rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
if (lvl != NIX_TXSCH_LVL_TL3)
if (lvl != NIX_TXSCH_LVL_TL2)
return;
/* Reset TL3's CGX or LBK link config */
/* Reset TL2's CGX or LBK link config */
for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
rvu_write64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
......@@ -851,7 +851,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
/* Disable TL2/3 queue links before SMQ flush*/
spin_lock(&rvu->rsrc_lock);
for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
if (lvl != NIX_TXSCH_LVL_TL3 && lvl != NIX_TXSCH_LVL_TL4)
if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
continue;
txsch = &nix_hw->txsch[lvl];
......@@ -909,6 +909,104 @@ int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
return nix_txschq_free(rvu, req->hdr.pcifunc);
}
static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
int lvl, u64 reg, u64 regval)
{
u64 regbase = reg & 0xFFFF;
u16 schq, parent;
if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
return false;
schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
/* Check if this schq belongs to this PF/VF or not */
if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
return false;
parent = (regval >> 16) & 0x1FF;
/* Validate MDQ's TL4 parent */
if (regbase == NIX_AF_MDQX_PARENT(0) &&
!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
return false;
/* Validate TL4's TL3 parent */
if (regbase == NIX_AF_TL4X_PARENT(0) &&
!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
return false;
/* Validate TL3's TL2 parent */
if (regbase == NIX_AF_TL3X_PARENT(0) &&
!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
return false;
/* Validate TL2's TL1 parent */
if (regbase == NIX_AF_TL2X_PARENT(0) &&
!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
return false;
return true;
}
int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
u64 reg, regval, schq_regbase;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
int blkaddr, idx, err;
int nixlf;
if (req->lvl >= NIX_TXSCH_LVL_CNT ||
req->num_regs > MAX_REGS_PER_MBOX_MSG)
return NIX_AF_INVAL_TXSCHQ_CFG;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
txsch = &nix_hw->txsch[req->lvl];
for (idx = 0; idx < req->num_regs; idx++) {
reg = req->reg[idx];
regval = req->regval[idx];
schq_regbase = reg & 0xFFFF;
if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
txsch->lvl, reg, regval))
return NIX_AF_INVAL_TXSCHQ_CFG;
/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
pcifunc, 0);
regval &= ~(0x7FULL << 24);
regval |= ((u64)nixlf << 24);
}
rvu_write64(rvu, blkaddr, reg, regval);
/* Check for SMQ flush, if so, poll for its completion */
if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
(regval & BIT_ULL(49))) {
err = rvu_poll_reg(rvu, blkaddr,
reg, BIT_ULL(49), true);
if (err)
return NIX_AF_SMQ_FLUSH_FAILED;
}
}
return 0;
}
static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
{
struct nix_txsch *txsch;
......
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include "rvu_struct.h"
#include "common.h"
#include "mbox.h"
#include "rvu.h"
struct reg_range {
u64 start;
u64 end;
};
struct hw_reg_map {
u8 regblk;
u8 num_ranges;
u64 mask;
#define MAX_REG_RANGES 8
struct reg_range range[MAX_REG_RANGES];
};
static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
{NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } },
{NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
{0x1200, 0x12E0} } },
{NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
{0x1610, 0x1618} } },
{NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x1768} } },
{NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
};
bool rvu_check_valid_reg(int regmap, int regblk, u64 reg)
{
int idx;
struct hw_reg_map *map;
/* Only 64bit offsets */
if (reg & 0x07)
return false;
if (regmap == TXSCHQ_HWREGMAP) {
if (regblk >= NIX_TXSCH_LVL_CNT)
return false;
map = &txsch_reg_map[regblk];
} else {
return false;
}
/* Should never happen */
if (map->regblk != regblk)
return false;
reg &= map->mask;
for (idx = 0; idx < map->num_ranges; idx++) {
if (reg >= map->range[idx].start &&
reg < map->range[idx].end)
return true;
}
return false;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册