提交 a0291766 编写于 作者: S Sunil Goutham 提交者: David S. Miller

octeontx2-af: Add option to disable dynamic entry caching in NDC

A config option is added to disable caching of dynamic entries
like SQEs and stack pages. Also locks down all HW contexts in NDC,
preventing them from being evicted.

This option is useful when the queue count is large and there are
huge NDC cache misses. It's trade off between SQ context misses and
dynamically changing entries like SQE and stack page pointers.
Signed-off-by: NSunil Goutham <sgoutham@marvell.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 ee1e7591
......@@ -16,3 +16,12 @@ config OCTEONTX2_AF
Unit's admin function manager which manages all RVU HW resources
and provides a medium to other PF/VFs to configure HW. Should be
enabled for other RVU device drivers to work.
config NDC_DIS_DYNAMIC_CACHING
bool "Disable caching of dynamic entries in NDC"
depends on OCTEONTX2_AF
default n
---help---
This config option disables caching of dynamic entries such as NIX SQEs
, NPA stack pages etc in NDC. Also locks down NIX SQ/CQ/RQ/RSS and
NPA Aura/Pool contexts.
......@@ -661,6 +661,21 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
return 0;
}
static const char *nix_get_ctx_name(int ctype)
{
switch (ctype) {
case NIX_AQ_CTYPE_CQ:
return "CQ";
case NIX_AQ_CTYPE_SQ:
return "SQ";
case NIX_AQ_CTYPE_RQ:
return "RQ";
case NIX_AQ_CTYPE_RSS:
return "RSS";
}
return "";
}
static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
......@@ -705,21 +720,60 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
if (rc) {
err = rc;
dev_err(rvu->dev, "Failed to disable %s:%d context\n",
(req->ctype == NIX_AQ_CTYPE_CQ) ?
"CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
"RQ" : "SQ"), qidx);
nix_get_ctx_name(req->ctype), qidx);
}
}
return err;
}
#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
{
struct nix_aq_enq_req lock_ctx_req;
int err;
if (req->op != NIX_AQ_INSTOP_INIT)
return 0;
if (req->ctype == NIX_AQ_CTYPE_MCE ||
req->ctype == NIX_AQ_CTYPE_DYNO)
return 0;
memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
lock_ctx_req.ctype = req->ctype;
lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
lock_ctx_req.qidx = req->qidx;
err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
if (err)
dev_err(rvu->dev,
"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
req->hdr.pcifunc,
nix_get_ctx_name(req->ctype), req->qidx);
return err;
}
int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
int err;
err = rvu_nix_aq_enq_inst(rvu, req, rsp);
if (!err)
err = nix_lf_hwctx_lockdown(rvu, req);
return err;
}
#else
int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
return rvu_nix_aq_enq_inst(rvu, req, rsp);
}
#endif
int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
......@@ -2871,6 +2925,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
/* Do not bypass NDC cache */
cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
cfg &= ~0x3FFEULL;
#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
/* Disable caching of SQB aka SQEs */
cfg |= 0x04ULL;
#endif
rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
/* Result structure can be followed by RQ/SQ/CQ context at
......
......@@ -241,12 +241,50 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
return err;
}
#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
{
struct npa_aq_enq_req lock_ctx_req;
int err;
if (req->op != NPA_AQ_INSTOP_INIT)
return 0;
memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
lock_ctx_req.ctype = req->ctype;
lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
lock_ctx_req.aura_id = req->aura_id;
err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
if (err)
dev_err(rvu->dev,
"PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
req->hdr.pcifunc,
(req->ctype == NPA_AQ_CTYPE_AURA) ?
"Aura" : "Pool", req->aura_id);
return err;
}
int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
int err;
err = rvu_npa_aq_enq_inst(rvu, req, rsp);
if (!err)
err = npa_lf_hwctx_lockdown(rvu, req);
return err;
}
#else
int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
return rvu_npa_aq_enq_inst(rvu, req, rsp);
}
#endif
int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
......@@ -427,6 +465,10 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
/* Do not bypass NDC cache */
cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
cfg &= ~0x03DULL;
#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
/* Disable caching of stack pages */
cfg |= 0x10ULL;
#endif
rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
/* Result structure can be followed by Aura/Pool context at
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册