提交 c856e2b6 编写于 作者: R Rahul Lakkireddy 提交者: David S. Miller

cxgb4: fix Tx multi channel port rate limit

T6 can support 2 egress traffic management channels per port to
double the total number of traffic classes that can be configured.
In this configuration, if the class belongs to the other channel,
then all the queues must be bound again explicitly to the new class,
for the rate limit parameters on the other channel to take effect.

So, always explicitly bind all queues to the port rate limit traffic
class, regardless of the traffic management channel that it belongs
to. Also, only bind queues to port rate limit traffic class, if all
the queues don't already belong to an existing different traffic
class.

Fixes: 4ec4762d ("cxgb4: add TC-MATCHALL classifier egress offload")
Signed-off-by: NRahul Lakkireddy <rahul.lakkireddy@chelsio.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 09d4f10a
......@@ -3135,9 +3135,9 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
struct ch_sched_queue qe = { 0 };
struct ch_sched_params p = { 0 };
struct sched_class *e;
struct ch_sched_params p;
struct ch_sched_queue qe;
u32 req_rate;
int err = 0;
......@@ -3154,6 +3154,15 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return -EINVAL;
}
qe.queue = index;
e = cxgb4_sched_queue_lookup(dev, &qe);
if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
dev_err(adap->pdev_dev,
"Queue %u already bound to class %u of type: %u\n",
index, e->idx, e->info.u.params.level);
return -EBUSY;
}
/* Convert from Mbps to Kbps */
req_rate = rate * 1000;
......@@ -3183,7 +3192,6 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return 0;
/* Fetch any available unused or matching scheduling class */
memset(&p, 0, sizeof(p));
p.type = SCHED_CLASS_TYPE_PACKET;
p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
p.u.params.mode = SCHED_CLASS_MODE_CLASS;
......
......@@ -15,6 +15,8 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
struct flow_action *actions = &cls->rule->action;
struct port_info *pi = netdev2pinfo(dev);
struct flow_action_entry *entry;
struct ch_sched_queue qe;
struct sched_class *e;
u64 max_link_rate;
u32 i, speed;
int ret;
......@@ -60,9 +62,61 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
}
}
for (i = 0; i < pi->nqsets; i++) {
memset(&qe, 0, sizeof(qe));
qe.queue = i;
e = cxgb4_sched_queue_lookup(dev, &qe);
if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
NL_SET_ERR_MSG_MOD(extack,
"Some queues are already bound to different class");
return -EBUSY;
}
}
return 0;
}
static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
{
struct port_info *pi = netdev2pinfo(dev);
struct ch_sched_queue qe;
int ret;
u32 i;
for (i = 0; i < pi->nqsets; i++) {
qe.queue = i;
qe.class = tc;
ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
if (ret)
goto out_free;
}
return 0;
out_free:
while (i--) {
qe.queue = i;
qe.class = SCHED_CLS_NONE;
cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
}
return ret;
}
static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
{
struct port_info *pi = netdev2pinfo(dev);
struct ch_sched_queue qe;
u32 i;
for (i = 0; i < pi->nqsets; i++) {
qe.queue = i;
qe.class = SCHED_CLS_NONE;
cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
}
}
static int cxgb4_matchall_alloc_tc(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
......@@ -83,6 +137,7 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
struct adapter *adap = netdev2adap(dev);
struct flow_action_entry *entry;
struct sched_class *e;
int ret;
u32 i;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
......@@ -101,10 +156,21 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
return -ENOMEM;
}
ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
if (ret) {
NL_SET_ERR_MSG_MOD(extack,
"Could not bind queues to traffic class");
goto out_free;
}
tc_port_matchall->egress.hwtc = e->idx;
tc_port_matchall->egress.cookie = cls->cookie;
tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
return 0;
out_free:
cxgb4_sched_class_free(dev, e->idx);
return ret;
}
static void cxgb4_matchall_free_tc(struct net_device *dev)
......@@ -114,6 +180,7 @@ static void cxgb4_matchall_free_tc(struct net_device *dev)
struct adapter *adap = netdev2adap(dev);
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
cxgb4_matchall_tc_unbind_queues(dev);
cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
......
......@@ -165,6 +165,22 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
return found;
}
struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
struct ch_sched_queue *p)
{
struct port_info *pi = netdev2pinfo(dev);
struct sched_queue_entry *qe = NULL;
struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
if (p->queue < 0 || p->queue >= pi->nqsets)
return NULL;
txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL;
}
static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
{
struct sched_queue_entry *qe = NULL;
......
......@@ -103,6 +103,8 @@ static inline bool valid_class_id(struct net_device *dev, u8 class_id)
return true;
}
struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
struct ch_sched_queue *p);
int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
enum sched_bind_type type);
int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册