提交 d8936657 编写于 作者: M Maxime Ripard 提交者: David S. Miller

net: mvneta: Allow different queues

The mvneta driver allows to change the default RX queue trough the rxq_def
kernel parameter.

However, the current code doesn't allow to have any value but 0. It is
actively checked for in the driver's probe because the drivers makes a
number of assumption and takes a number of shortcuts in order to just use
that RX queue.

Remove these limitations in order to be able to specify any available
queue.
Signed-off-by: NMaxime Ripard <maxime.ripard@free-electrons.com>
Signed-off-by: NGregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 12bb03b4
...@@ -478,7 +478,7 @@ struct mvneta_rx_queue { ...@@ -478,7 +478,7 @@ struct mvneta_rx_queue {
/* The hardware supports eight (8) rx queues, but we are only allowing /* The hardware supports eight (8) rx queues, but we are only allowing
* the first one to be used. Therefore, let's just allocate one queue. * the first one to be used. Therefore, let's just allocate one queue.
*/ */
static int rxq_number = 1; static int rxq_number = 8;
static int txq_number = 8; static int txq_number = 8;
static int rxq_def; static int rxq_def;
...@@ -766,14 +766,7 @@ static void mvneta_port_up(struct mvneta_port *pp) ...@@ -766,14 +766,7 @@ static void mvneta_port_up(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_TXQ_CMD, q_map); mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
/* Enable all initialized RXQs. */ /* Enable all initialized RXQs. */
q_map = 0; mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def));
for (queue = 0; queue < rxq_number; queue++) {
struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
if (rxq->descs != NULL)
q_map |= (1 << queue);
}
mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
} }
/* Stop the Ethernet port activity */ /* Stop the Ethernet port activity */
...@@ -1436,17 +1429,6 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) ...@@ -1436,17 +1429,6 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
return MVNETA_TX_L4_CSUM_NOT; return MVNETA_TX_L4_CSUM_NOT;
} }
/* Returns rx queue pointer (find last set bit) according to causeRxTx
* value
*/
static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
u32 cause)
{
int queue = fls(cause >> 8) - 1;
return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
}
/* Drop packets received by the RXQ and free buffers */ /* Drop packets received by the RXQ and free buffers */
static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
struct mvneta_rx_queue *rxq) struct mvneta_rx_queue *rxq)
...@@ -2146,33 +2128,8 @@ static int mvneta_poll(struct napi_struct *napi, int budget) ...@@ -2146,33 +2128,8 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
* RX packets * RX packets
*/ */
cause_rx_tx |= port->cause_rx_tx; cause_rx_tx |= port->cause_rx_tx;
if (rxq_number > 1) { rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) { budget -= rx_done;
int count;
struct mvneta_rx_queue *rxq;
/* get rx queue number from cause_rx_tx */
rxq = mvneta_rx_policy(pp, cause_rx_tx);
if (!rxq)
break;
/* process the packet in that rx queue */
count = mvneta_rx(pp, budget, rxq);
rx_done += count;
budget -= count;
if (budget > 0) {
/* set off the rx bit of the
* corresponding bit in the cause rx
* tx register, so that next iteration
* will find the next rx queue where
* packets are received on
*/
cause_rx_tx &= ~((1 << rxq->id) << 8);
}
}
} else {
rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
budget -= rx_done;
}
if (budget > 0) { if (budget > 0) {
cause_rx_tx = 0; cause_rx_tx = 0;
...@@ -2384,26 +2341,19 @@ static void mvneta_cleanup_txqs(struct mvneta_port *pp) ...@@ -2384,26 +2341,19 @@ static void mvneta_cleanup_txqs(struct mvneta_port *pp)
/* Cleanup all Rx queues */ /* Cleanup all Rx queues */
static void mvneta_cleanup_rxqs(struct mvneta_port *pp) static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
{ {
int queue; mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]);
for (queue = 0; queue < rxq_number; queue++)
mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
} }
/* Init all Rx queues */ /* Init all Rx queues */
static int mvneta_setup_rxqs(struct mvneta_port *pp) static int mvneta_setup_rxqs(struct mvneta_port *pp)
{ {
int queue; int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]);
if (err) {
for (queue = 0; queue < rxq_number; queue++) { netdev_err(pp->dev, "%s: can't create rxq=%d\n",
int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); __func__, rxq_def);
if (err) { mvneta_cleanup_rxqs(pp);
netdev_err(pp->dev, "%s: can't create rxq=%d\n", return err;
__func__, queue);
mvneta_cleanup_rxqs(pp);
return err;
}
} }
return 0; return 0;
...@@ -3051,14 +3001,6 @@ static int mvneta_probe(struct platform_device *pdev) ...@@ -3051,14 +3001,6 @@ static int mvneta_probe(struct platform_device *pdev)
int err; int err;
int cpu; int cpu;
/* Our multiqueue support is not complete, so for now, only
* allow the usage of the first RX queue
*/
if (rxq_def != 0) {
dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
return -EINVAL;
}
dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
if (!dev) if (!dev)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册