提交 19433646 编写于 作者: D David S. Miller

Merge branch 'gianfar-next'

Claudiu Manoil says:

====================
gianfar: Tx timeout issue

There's an older Tx timeout issue showing up on etsec2 devices
with 2 CPUs.  I pinned this issue down to processing overhead
incurred by supporting multiple Tx/Rx rings, as explained in
the 2nd patch below.  But before this, there's also a concurency
issue leading to Rx/Tx spurrious interrupts, addressed by the
'Tx NAPI' patch below.
The Tx timeout can be triggered with multiple Tx flows,
'iperf -c -N 8' commands, on a 2 CPUs etsec2 based (P1020) board.

Before the patches:
"""
root@p1020rdb-pc:~# iperf -c 172.16.1.3 -n 1000M -P 8 &
[...]
root@p1020rdb-pc:~# NETDEV WATCHDOG: eth1 (fsl-gianfar): transmit queue 1 timed out
WARNING: at net/sched/sch_generic.c:279
Modules linked in:
CPU: 1 PID: 0 Comm: swapper/1 Not tainted 3.13.0-rc3-03386-g89ea59c #23
task: ed84ef40 ti: ed868000 task.ti: ed868000
NIP: c04627a8 LR: c04627a8 CTR: c02fb270
REGS: ed869d00 TRAP: 0700   Not tainted  (3.13.0-rc3-03386-g89ea59c)
MSR: 00029000 <CE,EE,ME>  CR: 44000022  XER: 20000000
[...]

root@p1020rdb-pc:~# [ ID] Interval       Transfer     Bandwidth
[  5]  0.0-19.3 sec  1000 MBytes    434 Mbits/sec
[  8]  0.0-39.7 sec  1000 MBytes    211 Mbits/sec
[  9]  0.0-40.1 sec  1000 MBytes    209 Mbits/sec
[  3]  0.0-40.2 sec  1000 MBytes    209 Mbits/sec
[ 10]  0.0-59.0 sec  1000 MBytes    142 Mbits/sec
[  7]  0.0-74.6 sec  1000 MBytes    112 Mbits/sec
[  6]  0.0-74.7 sec  1000 MBytes    112 Mbits/sec
[  4]  0.0-74.7 sec  1000 MBytes    112 Mbits/sec
[SUM]  0.0-74.7 sec  7.81 GBytes    898 Mbits/sec

root@p1020rdb-pc:~# ifconfig eth1
eth1      Link encap:Ethernet  HWaddr 00:04:9f:00:13:01
          inet addr:172.16.1.1  Bcast:172.16.255.255  Mask:255.255.0.0
          inet6 addr: fe80::204:9fff:fe00:1301/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:708722 errors:0 dropped:0 overruns:0 frame:0
          TX packets:8717849 errors:6 dropped:0 overruns:1470 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:58118018 (55.4 MiB)  TX bytes:274069482 (261.3 MiB)
          Base address:0xa000

"""

After applying the patches:
"""
root@p1020rdb-pc:~# iperf -c 172.16.1.3 -n 1000M -P 8 &
[...]
root@p1020rdb-pc:~# [ ID] Interval       Transfer     Bandwidth
[  9]  0.0-70.5 sec  1000 MBytes    119 Mbits/sec
[  5]  0.0-70.5 sec  1000 MBytes    119 Mbits/sec
[  6]  0.0-70.7 sec  1000 MBytes    119 Mbits/sec
[  4]  0.0-71.0 sec  1000 MBytes    118 Mbits/sec
[  8]  0.0-71.1 sec  1000 MBytes    118 Mbits/sec
[  3]  0.0-71.2 sec  1000 MBytes    118 Mbits/sec
[ 10]  0.0-71.3 sec  1000 MBytes    118 Mbits/sec
[  7]  0.0-71.3 sec  1000 MBytes    118 Mbits/sec
[SUM]  0.0-71.3 sec  7.81 GBytes    942 Mbits/sec

root@p1020rdb-pc:~# ifconfig eth1
eth1      Link encap:Ethernet  HWaddr 00:04:9f:00:13:01
          inet addr:172.16.1.1  Bcast:172.16.255.255  Mask:255.255.0.0
          inet6 addr: fe80::204:9fff:fe00:1301/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:728446 errors:0 dropped:0 overruns:0 frame:0
          TX packets:8690057 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:59732650 (56.9 MiB)  TX bytes:271554306 (258.9 MiB)
          Base address:0xa000
"""
v2: PATCH 2:
    Replaced CPP check with run-time condition to
    limit the number of queues. Updated comments.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -128,8 +128,10 @@ static void free_skb_resources(struct gfar_private *priv); ...@@ -128,8 +128,10 @@ static void free_skb_resources(struct gfar_private *priv);
static void gfar_set_multi(struct net_device *dev); static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
static void gfar_configure_serdes(struct net_device *dev); static void gfar_configure_serdes(struct net_device *dev);
static int gfar_poll(struct napi_struct *napi, int budget); static int gfar_poll_rx(struct napi_struct *napi, int budget);
static int gfar_poll_sq(struct napi_struct *napi, int budget); static int gfar_poll_tx(struct napi_struct *napi, int budget);
static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
static void gfar_netpoll(struct net_device *dev); static void gfar_netpoll(struct net_device *dev);
#endif #endif
...@@ -361,7 +363,10 @@ static void gfar_mac_rx_config(struct gfar_private *priv) ...@@ -361,7 +363,10 @@ static void gfar_mac_rx_config(struct gfar_private *priv)
if (priv->rx_filer_enable) { if (priv->rx_filer_enable) {
rctrl |= RCTRL_FILREN; rctrl |= RCTRL_FILREN;
/* Program the RIR0 reg with the required distribution */ /* Program the RIR0 reg with the required distribution */
gfar_write(&regs->rir0, DEFAULT_RIR0); if (priv->poll_mode == GFAR_SQ_POLLING)
gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
else /* GFAR_MQ_POLLING */
gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
} }
/* Restore PROMISC mode */ /* Restore PROMISC mode */
...@@ -614,23 +619,26 @@ static void disable_napi(struct gfar_private *priv) ...@@ -614,23 +619,26 @@ static void disable_napi(struct gfar_private *priv)
{ {
int i; int i;
for (i = 0; i < priv->num_grps; i++) for (i = 0; i < priv->num_grps; i++) {
napi_disable(&priv->gfargrp[i].napi); napi_disable(&priv->gfargrp[i].napi_rx);
napi_disable(&priv->gfargrp[i].napi_tx);
}
} }
static void enable_napi(struct gfar_private *priv) static void enable_napi(struct gfar_private *priv)
{ {
int i; int i;
for (i = 0; i < priv->num_grps; i++) for (i = 0; i < priv->num_grps; i++) {
napi_enable(&priv->gfargrp[i].napi); napi_enable(&priv->gfargrp[i].napi_rx);
napi_enable(&priv->gfargrp[i].napi_tx);
}
} }
static int gfar_parse_group(struct device_node *np, static int gfar_parse_group(struct device_node *np,
struct gfar_private *priv, const char *model) struct gfar_private *priv, const char *model)
{ {
struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
u32 *queue_mask;
int i; int i;
for (i = 0; i < GFAR_NUM_IRQS; i++) { for (i = 0; i < GFAR_NUM_IRQS; i++) {
...@@ -659,12 +667,20 @@ static int gfar_parse_group(struct device_node *np, ...@@ -659,12 +667,20 @@ static int gfar_parse_group(struct device_node *np,
grp->priv = priv; grp->priv = priv;
spin_lock_init(&grp->grplock); spin_lock_init(&grp->grplock);
if (priv->mode == MQ_MG_MODE) { if (priv->mode == MQ_MG_MODE) {
queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); u32 *rxq_mask, *txq_mask;
grp->rx_bit_map = queue_mask ? rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
*queue_mask : (DEFAULT_MAPPING >> priv->num_grps); txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
grp->tx_bit_map = queue_mask ? if (priv->poll_mode == GFAR_SQ_POLLING) {
*queue_mask : (DEFAULT_MAPPING >> priv->num_grps); /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
} else { /* GFAR_MQ_POLLING */
grp->rx_bit_map = rxq_mask ?
*rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
grp->tx_bit_map = txq_mask ?
*txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
}
} else { } else {
grp->rx_bit_map = 0xFF; grp->rx_bit_map = 0xFF;
grp->tx_bit_map = 0xFF; grp->tx_bit_map = 0xFF;
...@@ -680,6 +696,8 @@ static int gfar_parse_group(struct device_node *np, ...@@ -680,6 +696,8 @@ static int gfar_parse_group(struct device_node *np,
* also assign queues to groups * also assign queues to groups
*/ */
for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
if (!grp->rx_queue)
grp->rx_queue = priv->rx_queue[i];
grp->num_rx_queues++; grp->num_rx_queues++;
grp->rstat |= (RSTAT_CLEAR_RHALT >> i); grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
...@@ -687,6 +705,8 @@ static int gfar_parse_group(struct device_node *np, ...@@ -687,6 +705,8 @@ static int gfar_parse_group(struct device_node *np,
} }
for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
if (!grp->tx_queue)
grp->tx_queue = priv->tx_queue[i];
grp->num_tx_queues++; grp->num_tx_queues++;
grp->tstat |= (TSTAT_CLEAR_THALT >> i); grp->tstat |= (TSTAT_CLEAR_THALT >> i);
priv->tqueue |= (TQUEUE_EN0 >> i); priv->tqueue |= (TQUEUE_EN0 >> i);
...@@ -717,9 +737,22 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) ...@@ -717,9 +737,22 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
if (!np || !of_device_is_available(np)) if (!np || !of_device_is_available(np))
return -ENODEV; return -ENODEV;
/* parse the num of tx and rx queues */ /* parse the num of HW tx and rx queues */
tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
num_tx_qs = tx_queues ? *tx_queues : 1; rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
if (priv->mode == SQ_SG_MODE) {
num_tx_qs = 1;
num_rx_qs = 1;
} else { /* MQ_MG_MODE */
if (priv->poll_mode == GFAR_SQ_POLLING) {
num_tx_qs = 2; /* one q per int group */
num_rx_qs = 2; /* one q per int group */
} else { /* GFAR_MQ_POLLING */
num_tx_qs = tx_queues ? *tx_queues : 1;
num_rx_qs = rx_queues ? *rx_queues : 1;
}
}
if (num_tx_qs > MAX_TX_QS) { if (num_tx_qs > MAX_TX_QS) {
pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
...@@ -728,9 +761,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) ...@@ -728,9 +761,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
return -EINVAL; return -EINVAL;
} }
rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
num_rx_qs = rx_queues ? *rx_queues : 1;
if (num_rx_qs > MAX_RX_QS) { if (num_rx_qs > MAX_RX_QS) {
pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
num_rx_qs, MAX_RX_QS); num_rx_qs, MAX_RX_QS);
...@@ -771,6 +801,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) ...@@ -771,6 +801,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
/* Parse and initialize group specific information */ /* Parse and initialize group specific information */
if (of_device_is_compatible(np, "fsl,etsec2")) { if (of_device_is_compatible(np, "fsl,etsec2")) {
priv->mode = MQ_MG_MODE; priv->mode = MQ_MG_MODE;
priv->poll_mode = GFAR_SQ_POLLING;
for_each_child_of_node(np, child) { for_each_child_of_node(np, child) {
err = gfar_parse_group(child, priv, model); err = gfar_parse_group(child, priv, model);
if (err) if (err)
...@@ -778,6 +809,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) ...@@ -778,6 +809,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
} }
} else { } else {
priv->mode = SQ_SG_MODE; priv->mode = SQ_SG_MODE;
priv->poll_mode = GFAR_SQ_POLLING;
err = gfar_parse_group(np, priv, model); err = gfar_parse_group(np, priv, model);
if (err) if (err)
goto err_grp_init; goto err_grp_init;
...@@ -1257,13 +1289,19 @@ static int gfar_probe(struct platform_device *ofdev) ...@@ -1257,13 +1289,19 @@ static int gfar_probe(struct platform_device *ofdev)
dev->ethtool_ops = &gfar_ethtool_ops; dev->ethtool_ops = &gfar_ethtool_ops;
/* Register for napi ...We are registering NAPI for each grp */ /* Register for napi ...We are registering NAPI for each grp */
if (priv->mode == SQ_SG_MODE) for (i = 0; i < priv->num_grps; i++) {
netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq, if (priv->poll_mode == GFAR_SQ_POLLING) {
GFAR_DEV_WEIGHT); netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
else gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
for (i = 0; i < priv->num_grps; i++) netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, gfar_poll_tx_sq, 2);
GFAR_DEV_WEIGHT); } else {
netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
gfar_poll_rx, GFAR_DEV_WEIGHT);
netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
gfar_poll_tx, 2);
}
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
...@@ -2538,31 +2576,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) ...@@ -2538,31 +2576,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
netdev_tx_completed_queue(txq, howmany, bytes_sent); netdev_tx_completed_queue(txq, howmany, bytes_sent);
} }
static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
{
unsigned long flags;
spin_lock_irqsave(&gfargrp->grplock, flags);
if (napi_schedule_prep(&gfargrp->napi)) {
gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
__napi_schedule(&gfargrp->napi);
} else {
/* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived.
*/
gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
}
spin_unlock_irqrestore(&gfargrp->grplock, flags);
}
/* Interrupt Handler for Transmit complete */
static irqreturn_t gfar_transmit(int irq, void *grp_id)
{
gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
return IRQ_HANDLED;
}
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
struct sk_buff *skb) struct sk_buff *skb)
{ {
...@@ -2633,7 +2646,48 @@ static inline void count_errors(unsigned short status, struct net_device *dev) ...@@ -2633,7 +2646,48 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
irqreturn_t gfar_receive(int irq, void *grp_id) irqreturn_t gfar_receive(int irq, void *grp_id)
{ {
gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
unsigned long flags;
u32 imask;
if (likely(napi_schedule_prep(&grp->napi_rx))) {
spin_lock_irqsave(&grp->grplock, flags);
imask = gfar_read(&grp->regs->imask);
imask &= IMASK_RX_DISABLED;
gfar_write(&grp->regs->imask, imask);
spin_unlock_irqrestore(&grp->grplock, flags);
__napi_schedule(&grp->napi_rx);
} else {
/* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived.
*/
gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
}
return IRQ_HANDLED;
}
/* Interrupt Handler for Transmit complete */
static irqreturn_t gfar_transmit(int irq, void *grp_id)
{
struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
unsigned long flags;
u32 imask;
if (likely(napi_schedule_prep(&grp->napi_tx))) {
spin_lock_irqsave(&grp->grplock, flags);
imask = gfar_read(&grp->regs->imask);
imask &= IMASK_TX_DISABLED;
gfar_write(&grp->regs->imask, imask);
spin_unlock_irqrestore(&grp->grplock, flags);
__napi_schedule(&grp->napi_tx);
} else {
/* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived.
*/
gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -2757,7 +2811,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) ...@@ -2757,7 +2811,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
rx_queue->stats.rx_bytes += pkt_len; rx_queue->stats.rx_bytes += pkt_len;
skb_record_rx_queue(skb, rx_queue->qindex); skb_record_rx_queue(skb, rx_queue->qindex);
gfar_process_frame(dev, skb, amount_pull, gfar_process_frame(dev, skb, amount_pull,
&rx_queue->grp->napi); &rx_queue->grp->napi_rx);
} else { } else {
netif_warn(priv, rx_err, dev, "Missing skb!\n"); netif_warn(priv, rx_err, dev, "Missing skb!\n");
...@@ -2786,55 +2840,81 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) ...@@ -2786,55 +2840,81 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
return howmany; return howmany;
} }
static int gfar_poll_sq(struct napi_struct *napi, int budget) static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
{ {
struct gfar_priv_grp *gfargrp = struct gfar_priv_grp *gfargrp =
container_of(napi, struct gfar_priv_grp, napi); container_of(napi, struct gfar_priv_grp, napi_rx);
struct gfar __iomem *regs = gfargrp->regs; struct gfar __iomem *regs = gfargrp->regs;
struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0]; struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0];
int work_done = 0; int work_done = 0;
/* Clear IEVENT, so interrupts aren't called again /* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived * because of the packets that have already arrived
*/ */
gfar_write(&regs->ievent, IEVENT_RTX_MASK); gfar_write(&regs->ievent, IEVENT_RX_MASK);
/* run Tx cleanup to completion */
if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
gfar_clean_tx_ring(tx_queue);
work_done = gfar_clean_rx_ring(rx_queue, budget); work_done = gfar_clean_rx_ring(rx_queue, budget);
if (work_done < budget) { if (work_done < budget) {
u32 imask;
napi_complete(napi); napi_complete(napi);
/* Clear the halt bit in RSTAT */ /* Clear the halt bit in RSTAT */
gfar_write(&regs->rstat, gfargrp->rstat); gfar_write(&regs->rstat, gfargrp->rstat);
gfar_write(&regs->imask, IMASK_DEFAULT); spin_lock_irq(&gfargrp->grplock);
imask = gfar_read(&regs->imask);
imask |= IMASK_RX_DEFAULT;
gfar_write(&regs->imask, imask);
spin_unlock_irq(&gfargrp->grplock);
} }
return work_done; return work_done;
} }
static int gfar_poll(struct napi_struct *napi, int budget) static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
{
struct gfar_priv_grp *gfargrp =
container_of(napi, struct gfar_priv_grp, napi_tx);
struct gfar __iomem *regs = gfargrp->regs;
struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
u32 imask;
/* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived
*/
gfar_write(&regs->ievent, IEVENT_TX_MASK);
/* run Tx cleanup to completion */
if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
gfar_clean_tx_ring(tx_queue);
napi_complete(napi);
spin_lock_irq(&gfargrp->grplock);
imask = gfar_read(&regs->imask);
imask |= IMASK_TX_DEFAULT;
gfar_write(&regs->imask, imask);
spin_unlock_irq(&gfargrp->grplock);
return 0;
}
static int gfar_poll_rx(struct napi_struct *napi, int budget)
{ {
struct gfar_priv_grp *gfargrp = struct gfar_priv_grp *gfargrp =
container_of(napi, struct gfar_priv_grp, napi); container_of(napi, struct gfar_priv_grp, napi_rx);
struct gfar_private *priv = gfargrp->priv; struct gfar_private *priv = gfargrp->priv;
struct gfar __iomem *regs = gfargrp->regs; struct gfar __iomem *regs = gfargrp->regs;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL;
int work_done = 0, work_done_per_q = 0; int work_done = 0, work_done_per_q = 0;
int i, budget_per_q = 0; int i, budget_per_q = 0;
int has_tx_work = 0;
unsigned long rstat_rxf; unsigned long rstat_rxf;
int num_act_queues; int num_act_queues;
/* Clear IEVENT, so interrupts aren't called again /* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived * because of the packets that have already arrived
*/ */
gfar_write(&regs->ievent, IEVENT_RTX_MASK); gfar_write(&regs->ievent, IEVENT_RX_MASK);
rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK; rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
...@@ -2842,15 +2922,6 @@ static int gfar_poll(struct napi_struct *napi, int budget) ...@@ -2842,15 +2922,6 @@ static int gfar_poll(struct napi_struct *napi, int budget)
if (num_act_queues) if (num_act_queues)
budget_per_q = budget/num_act_queues; budget_per_q = budget/num_act_queues;
for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
tx_queue = priv->tx_queue[i];
/* run Tx cleanup to completion */
if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
gfar_clean_tx_ring(tx_queue);
has_tx_work = 1;
}
}
for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
/* skip queue if not active */ /* skip queue if not active */
if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
...@@ -2873,19 +2944,62 @@ static int gfar_poll(struct napi_struct *napi, int budget) ...@@ -2873,19 +2944,62 @@ static int gfar_poll(struct napi_struct *napi, int budget)
} }
} }
if (!num_act_queues && !has_tx_work) { if (!num_act_queues) {
u32 imask;
napi_complete(napi); napi_complete(napi);
/* Clear the halt bit in RSTAT */ /* Clear the halt bit in RSTAT */
gfar_write(&regs->rstat, gfargrp->rstat); gfar_write(&regs->rstat, gfargrp->rstat);
gfar_write(&regs->imask, IMASK_DEFAULT); spin_lock_irq(&gfargrp->grplock);
imask = gfar_read(&regs->imask);
imask |= IMASK_RX_DEFAULT;
gfar_write(&regs->imask, imask);
spin_unlock_irq(&gfargrp->grplock);
} }
return work_done; return work_done;
} }
static int gfar_poll_tx(struct napi_struct *napi, int budget)
{
struct gfar_priv_grp *gfargrp =
container_of(napi, struct gfar_priv_grp, napi_tx);
struct gfar_private *priv = gfargrp->priv;
struct gfar __iomem *regs = gfargrp->regs;
struct gfar_priv_tx_q *tx_queue = NULL;
int has_tx_work = 0;
int i;
/* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived
*/
gfar_write(&regs->ievent, IEVENT_TX_MASK);
for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
tx_queue = priv->tx_queue[i];
/* run Tx cleanup to completion */
if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
gfar_clean_tx_ring(tx_queue);
has_tx_work = 1;
}
}
if (!has_tx_work) {
u32 imask;
napi_complete(napi);
spin_lock_irq(&gfargrp->grplock);
imask = gfar_read(&regs->imask);
imask |= IMASK_TX_DEFAULT;
gfar_write(&regs->imask, imask);
spin_unlock_irq(&gfargrp->grplock);
}
return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
/* Polling 'interrupt' - used by things like netconsole to send skbs /* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while * without having to re-enable interrupts. It's not called while
......
...@@ -377,8 +377,11 @@ extern const char gfar_driver_version[]; ...@@ -377,8 +377,11 @@ extern const char gfar_driver_version[];
IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \ IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \ IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
| IMASK_PERR) | IMASK_PERR)
#define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \ #define IMASK_RX_DEFAULT (IMASK_RXFEN0 | IMASK_BSY)
& IMASK_DEFAULT) #define IMASK_TX_DEFAULT (IMASK_TXFEN | IMASK_TXBEN)
#define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT)
#define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT)
/* Fifo management */ /* Fifo management */
#define FIFO_TX_THR_MASK 0x01ff #define FIFO_TX_THR_MASK 0x01ff
...@@ -409,7 +412,9 @@ extern const char gfar_driver_version[]; ...@@ -409,7 +412,9 @@ extern const char gfar_driver_version[];
/* This default RIR value directly corresponds /* This default RIR value directly corresponds
* to the 3-bit hash value generated */ * to the 3-bit hash value generated */
#define DEFAULT_RIR0 0x05397700 #define DEFAULT_8RXQ_RIR0 0x05397700
/* Map even hash values to Q0, and odd ones to Q1 */
#define DEFAULT_2RXQ_RIR0 0x04104100
/* RQFCR register bits */ /* RQFCR register bits */
#define RQFCR_GPI 0x80000000 #define RQFCR_GPI 0x80000000
...@@ -904,6 +909,22 @@ enum { ...@@ -904,6 +909,22 @@ enum {
MQ_MG_MODE MQ_MG_MODE
}; };
/* GFAR_SQ_POLLING: Single Queue NAPI polling mode
* The driver supports a single pair of RX/Tx queues
* per interrupt group (Rx/Tx int line). MQ_MG mode
* devices have 2 interrupt groups, so the device will
* have a total of 2 Tx and 2 Rx queues in this case.
* GFAR_MQ_POLLING: Multi Queue NAPI polling mode
* The driver supports all the 8 Rx and Tx HW queues
* each queue mapped by the Device Tree to one of
* the 2 interrupt groups. This mode implies significant
* processing overhead (CPU and controller level).
*/
enum gfar_poll_mode {
GFAR_SQ_POLLING = 0,
GFAR_MQ_POLLING
};
/* /*
* Per TX queue stats * Per TX queue stats
*/ */
...@@ -1013,17 +1034,20 @@ struct gfar_irqinfo { ...@@ -1013,17 +1034,20 @@ struct gfar_irqinfo {
*/ */
struct gfar_priv_grp { struct gfar_priv_grp {
spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES))); spinlock_t grplock __aligned(SMP_CACHE_BYTES);
struct napi_struct napi; struct napi_struct napi_rx;
struct gfar_private *priv; struct napi_struct napi_tx;
struct gfar __iomem *regs; struct gfar __iomem *regs;
unsigned int rstat; struct gfar_priv_tx_q *tx_queue;
unsigned long num_rx_queues; struct gfar_priv_rx_q *rx_queue;
unsigned long rx_bit_map;
/* cacheline 3 */
unsigned int tstat; unsigned int tstat;
unsigned int rstat;
struct gfar_private *priv;
unsigned long num_tx_queues; unsigned long num_tx_queues;
unsigned long tx_bit_map; unsigned long tx_bit_map;
unsigned long num_rx_queues;
unsigned long rx_bit_map;
struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS]; struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS];
}; };
...@@ -1053,8 +1077,6 @@ enum gfar_dev_state { ...@@ -1053,8 +1077,6 @@ enum gfar_dev_state {
* the buffer descriptor determines the actual condition. * the buffer descriptor determines the actual condition.
*/ */
struct gfar_private { struct gfar_private {
unsigned int num_rx_queues;
struct device *dev; struct device *dev;
struct net_device *ndev; struct net_device *ndev;
enum gfar_errata errata; enum gfar_errata errata;
...@@ -1062,6 +1084,7 @@ struct gfar_private { ...@@ -1062,6 +1084,7 @@ struct gfar_private {
u16 uses_rxfcb; u16 uses_rxfcb;
u16 padding; u16 padding;
u32 device_flags;
/* HW time stamping enabled flag */ /* HW time stamping enabled flag */
int hwts_rx_en; int hwts_rx_en;
...@@ -1072,10 +1095,11 @@ struct gfar_private { ...@@ -1072,10 +1095,11 @@ struct gfar_private {
struct gfar_priv_grp gfargrp[MAXGROUPS]; struct gfar_priv_grp gfargrp[MAXGROUPS];
unsigned long state; unsigned long state;
u32 device_flags;
unsigned int mode; unsigned short mode;
unsigned short poll_mode;
unsigned int num_tx_queues; unsigned int num_tx_queues;
unsigned int num_rx_queues;
unsigned int num_grps; unsigned int num_grps;
/* Network Statistics */ /* Network Statistics */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册