提交 603607de 编写于 作者: D David S. Miller

Merge branch 'mvneta-fixes'

Gregory CLEMENT says:

====================
mvneta fixes for SMP

Following this bug report:
http://thread.gmane.org/gmane.linux.ports.arm.kernel/468173 and the
suggestions from Russell King, I reviewed all the code involving
multi-CPU. It ended with this series of patches which should improve
the stability of the driver.

During my test I found another bug which is fixed by new patch (the
second one of this new version of the series)

The two first patches fix real bugs, the others fix potential issues
in the driver.

Changelog:

v1 -> v2
Fix spinlock comment. Pointed by David Miller

v2 -> v3
 - Fix typos and mistake in the comments. Pointed by Sergei Shtylyov
 - Add a new patch fixing the CPU choice in mvneta_percpu_elect
 - Use lock in last patch to prevent remaining race condition. Pointed
   by Jisheng
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -370,6 +370,11 @@ struct mvneta_port { ...@@ -370,6 +370,11 @@ struct mvneta_port {
struct net_device *dev; struct net_device *dev;
struct notifier_block cpu_notifier; struct notifier_block cpu_notifier;
int rxq_def; int rxq_def;
/* Protect the access to the percpu interrupt registers,
* ensuring that the configuration remains coherent.
*/
spinlock_t lock;
bool is_stopped;
/* Core clock */ /* Core clock */
struct clk *clk; struct clk *clk;
...@@ -1038,6 +1043,43 @@ static void mvneta_set_autoneg(struct mvneta_port *pp, int enable) ...@@ -1038,6 +1043,43 @@ static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
} }
} }
static void mvneta_percpu_unmask_interrupt(void *arg)
{
struct mvneta_port *pp = arg;
/* All the queue are unmasked, but actually only the ones
* mapped to this CPU will be unmasked
*/
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
MVNETA_RX_INTR_MASK_ALL |
MVNETA_TX_INTR_MASK_ALL |
MVNETA_MISCINTR_INTR_MASK);
}
static void mvneta_percpu_mask_interrupt(void *arg)
{
struct mvneta_port *pp = arg;
/* All the queue are masked, but actually only the ones
* mapped to this CPU will be masked
*/
mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
}
static void mvneta_percpu_clear_intr_cause(void *arg)
{
struct mvneta_port *pp = arg;
/* All the queue are cleared, but actually only the ones
* mapped to this CPU will be cleared
*/
mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
}
/* This method sets defaults to the NETA port: /* This method sets defaults to the NETA port:
* Clears interrupt Cause and Mask registers. * Clears interrupt Cause and Mask registers.
* Clears all MAC tables. * Clears all MAC tables.
...@@ -1055,14 +1097,10 @@ static void mvneta_defaults_set(struct mvneta_port *pp) ...@@ -1055,14 +1097,10 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
int max_cpu = num_present_cpus(); int max_cpu = num_present_cpus();
/* Clear all Cause registers */ /* Clear all Cause registers */
mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
/* Mask all interrupts */ /* Mask all interrupts */
mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
mvreg_write(pp, MVNETA_INTR_ENABLE, 0); mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
/* Enable MBUS Retry bit16 */ /* Enable MBUS Retry bit16 */
...@@ -2528,34 +2566,9 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) ...@@ -2528,34 +2566,9 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
return 0; return 0;
} }
static void mvneta_percpu_unmask_interrupt(void *arg)
{
struct mvneta_port *pp = arg;
/* All the queue are unmasked, but actually only the ones
* maped to this CPU will be unmasked
*/
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
MVNETA_RX_INTR_MASK_ALL |
MVNETA_TX_INTR_MASK_ALL |
MVNETA_MISCINTR_INTR_MASK);
}
static void mvneta_percpu_mask_interrupt(void *arg)
{
struct mvneta_port *pp = arg;
/* All the queue are masked, but actually only the ones
* maped to this CPU will be masked
*/
mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
}
static void mvneta_start_dev(struct mvneta_port *pp) static void mvneta_start_dev(struct mvneta_port *pp)
{ {
unsigned int cpu; int cpu;
mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_max_rx_size_set(pp, pp->pkt_size);
mvneta_txq_max_tx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
...@@ -2564,16 +2577,15 @@ static void mvneta_start_dev(struct mvneta_port *pp) ...@@ -2564,16 +2577,15 @@ static void mvneta_start_dev(struct mvneta_port *pp)
mvneta_port_enable(pp); mvneta_port_enable(pp);
/* Enable polling on the port */ /* Enable polling on the port */
for_each_present_cpu(cpu) { for_each_online_cpu(cpu) {
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
napi_enable(&port->napi); napi_enable(&port->napi);
} }
/* Unmask interrupts. It has to be done from each CPU */ /* Unmask interrupts. It has to be done from each CPU */
for_each_online_cpu(cpu) on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
pp, true);
mvreg_write(pp, MVNETA_INTR_MISC_MASK, mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_PHY_STATUS_CHANGE |
MVNETA_CAUSE_LINK_CHANGE | MVNETA_CAUSE_LINK_CHANGE |
...@@ -2589,7 +2601,7 @@ static void mvneta_stop_dev(struct mvneta_port *pp) ...@@ -2589,7 +2601,7 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
phy_stop(pp->phy_dev); phy_stop(pp->phy_dev);
for_each_present_cpu(cpu) { for_each_online_cpu(cpu) {
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
napi_disable(&port->napi); napi_disable(&port->napi);
...@@ -2604,13 +2616,10 @@ static void mvneta_stop_dev(struct mvneta_port *pp) ...@@ -2604,13 +2616,10 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
mvneta_port_disable(pp); mvneta_port_disable(pp);
/* Clear all ethernet port interrupts */ /* Clear all ethernet port interrupts */
mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
/* Mask all ethernet port interrupts */ /* Mask all ethernet port interrupts */
mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
mvneta_tx_reset(pp); mvneta_tx_reset(pp);
mvneta_rx_reset(pp); mvneta_rx_reset(pp);
...@@ -2847,11 +2856,20 @@ static void mvneta_percpu_disable(void *arg) ...@@ -2847,11 +2856,20 @@ static void mvneta_percpu_disable(void *arg)
disable_percpu_irq(pp->dev->irq); disable_percpu_irq(pp->dev->irq);
} }
/* Electing a CPU must be done in an atomic way: it should be done
* after or before the removal/insertion of a CPU and this function is
* not reentrant.
*/
static void mvneta_percpu_elect(struct mvneta_port *pp) static void mvneta_percpu_elect(struct mvneta_port *pp)
{ {
int online_cpu_idx, max_cpu, cpu, i = 0; int elected_cpu = 0, max_cpu, cpu, i = 0;
/* Use the cpu associated to the rxq when it is online, in all
* the other cases, use the cpu 0 which can't be offline.
*/
if (cpu_online(pp->rxq_def))
elected_cpu = pp->rxq_def;
online_cpu_idx = pp->rxq_def % num_online_cpus();
max_cpu = num_present_cpus(); max_cpu = num_present_cpus();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
...@@ -2862,7 +2880,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) ...@@ -2862,7 +2880,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
if ((rxq % max_cpu) == cpu) if ((rxq % max_cpu) == cpu)
rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
if (i == online_cpu_idx) if (cpu == elected_cpu)
/* Map the default receive queue queue to the /* Map the default receive queue queue to the
* elected CPU * elected CPU
*/ */
...@@ -2873,7 +2891,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) ...@@ -2873,7 +2891,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
* the CPU bound to the default RX queue * the CPU bound to the default RX queue
*/ */
if (txq_number == 1) if (txq_number == 1)
txq_map = (i == online_cpu_idx) ? txq_map = (cpu == elected_cpu) ?
MVNETA_CPU_TXQ_ACCESS(1) : 0; MVNETA_CPU_TXQ_ACCESS(1) : 0;
else else
txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
...@@ -2902,6 +2920,14 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, ...@@ -2902,6 +2920,14 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
switch (action) { switch (action) {
case CPU_ONLINE: case CPU_ONLINE:
case CPU_ONLINE_FROZEN: case CPU_ONLINE_FROZEN:
spin_lock(&pp->lock);
/* Configuring the driver for a new CPU while the
* driver is stopping is racy, so just avoid it.
*/
if (pp->is_stopped) {
spin_unlock(&pp->lock);
break;
}
netif_tx_stop_all_queues(pp->dev); netif_tx_stop_all_queues(pp->dev);
/* We have to synchronise on tha napi of each CPU /* We have to synchronise on tha napi of each CPU
...@@ -2917,9 +2943,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, ...@@ -2917,9 +2943,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
} }
/* Mask all ethernet port interrupts */ /* Mask all ethernet port interrupts */
mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
napi_enable(&port->napi); napi_enable(&port->napi);
...@@ -2934,27 +2958,25 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, ...@@ -2934,27 +2958,25 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
*/ */
mvneta_percpu_elect(pp); mvneta_percpu_elect(pp);
/* Unmask all ethernet port interrupts, as this /* Unmask all ethernet port interrupts */
* notifier is called for each CPU then the CPU to on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
* Queue mapping is applied
*/
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
MVNETA_RX_INTR_MASK(rxq_number) |
MVNETA_TX_INTR_MASK(txq_number) |
MVNETA_MISCINTR_INTR_MASK);
mvreg_write(pp, MVNETA_INTR_MISC_MASK, mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_PHY_STATUS_CHANGE |
MVNETA_CAUSE_LINK_CHANGE | MVNETA_CAUSE_LINK_CHANGE |
MVNETA_CAUSE_PSC_SYNC_CHANGE); MVNETA_CAUSE_PSC_SYNC_CHANGE);
netif_tx_start_all_queues(pp->dev); netif_tx_start_all_queues(pp->dev);
spin_unlock(&pp->lock);
break; break;
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN: case CPU_DOWN_PREPARE_FROZEN:
netif_tx_stop_all_queues(pp->dev); netif_tx_stop_all_queues(pp->dev);
/* Thanks to this lock we are sure that any pending
* cpu election is done
*/
spin_lock(&pp->lock);
/* Mask all ethernet port interrupts */ /* Mask all ethernet port interrupts */
mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); spin_unlock(&pp->lock);
mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
napi_synchronize(&port->napi); napi_synchronize(&port->napi);
napi_disable(&port->napi); napi_disable(&port->napi);
...@@ -2968,12 +2990,11 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, ...@@ -2968,12 +2990,11 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
/* Check if a new CPU must be elected now this on is down */ /* Check if a new CPU must be elected now this on is down */
spin_lock(&pp->lock);
mvneta_percpu_elect(pp); mvneta_percpu_elect(pp);
spin_unlock(&pp->lock);
/* Unmask all ethernet port interrupts */ /* Unmask all ethernet port interrupts */
mvreg_write(pp, MVNETA_INTR_NEW_MASK, on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
MVNETA_RX_INTR_MASK(rxq_number) |
MVNETA_TX_INTR_MASK(txq_number) |
MVNETA_MISCINTR_INTR_MASK);
mvreg_write(pp, MVNETA_INTR_MISC_MASK, mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_PHY_STATUS_CHANGE |
MVNETA_CAUSE_LINK_CHANGE | MVNETA_CAUSE_LINK_CHANGE |
...@@ -2988,7 +3009,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, ...@@ -2988,7 +3009,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
static int mvneta_open(struct net_device *dev) static int mvneta_open(struct net_device *dev)
{ {
struct mvneta_port *pp = netdev_priv(dev); struct mvneta_port *pp = netdev_priv(dev);
int ret, cpu; int ret;
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
...@@ -3010,22 +3031,12 @@ static int mvneta_open(struct net_device *dev) ...@@ -3010,22 +3031,12 @@ static int mvneta_open(struct net_device *dev)
goto err_cleanup_txqs; goto err_cleanup_txqs;
} }
/* Even though the documentation says that request_percpu_irq
* doesn't enable the interrupts automatically, it actually
* does so on the local CPU.
*
* Make sure it's disabled.
*/
mvneta_percpu_disable(pp);
/* Enable per-CPU interrupt on all the CPU to handle our RX /* Enable per-CPU interrupt on all the CPU to handle our RX
* queue interrupts * queue interrupts
*/ */
for_each_online_cpu(cpu) on_each_cpu(mvneta_percpu_enable, pp, true);
smp_call_function_single(cpu, mvneta_percpu_enable,
pp, true);
pp->is_stopped = false;
/* Register a CPU notifier to handle the case where our CPU /* Register a CPU notifier to handle the case where our CPU
* might be taken offline. * might be taken offline.
*/ */
...@@ -3057,13 +3068,20 @@ static int mvneta_open(struct net_device *dev) ...@@ -3057,13 +3068,20 @@ static int mvneta_open(struct net_device *dev)
static int mvneta_stop(struct net_device *dev) static int mvneta_stop(struct net_device *dev)
{ {
struct mvneta_port *pp = netdev_priv(dev); struct mvneta_port *pp = netdev_priv(dev);
int cpu;
/* Inform that we are stopping so we don't want to setup the
* driver for new CPUs in the notifiers
*/
spin_lock(&pp->lock);
pp->is_stopped = true;
mvneta_stop_dev(pp); mvneta_stop_dev(pp);
mvneta_mdio_remove(pp); mvneta_mdio_remove(pp);
unregister_cpu_notifier(&pp->cpu_notifier); unregister_cpu_notifier(&pp->cpu_notifier);
for_each_present_cpu(cpu) /* Now that the notifier are unregistered, we can release le
smp_call_function_single(cpu, mvneta_percpu_disable, pp, true); * lock
*/
spin_unlock(&pp->lock);
on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(dev->irq, pp->ports); free_percpu_irq(dev->irq, pp->ports);
mvneta_cleanup_rxqs(pp); mvneta_cleanup_rxqs(pp);
mvneta_cleanup_txqs(pp); mvneta_cleanup_txqs(pp);
...@@ -3312,9 +3330,7 @@ static int mvneta_config_rss(struct mvneta_port *pp) ...@@ -3312,9 +3330,7 @@ static int mvneta_config_rss(struct mvneta_port *pp)
netif_tx_stop_all_queues(pp->dev); netif_tx_stop_all_queues(pp->dev);
for_each_online_cpu(cpu) on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
smp_call_function_single(cpu, mvneta_percpu_mask_interrupt,
pp, true);
/* We have to synchronise on the napi of each CPU */ /* We have to synchronise on the napi of each CPU */
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
...@@ -3335,7 +3351,9 @@ static int mvneta_config_rss(struct mvneta_port *pp) ...@@ -3335,7 +3351,9 @@ static int mvneta_config_rss(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_PORT_CONFIG, val); mvreg_write(pp, MVNETA_PORT_CONFIG, val);
/* Update the elected CPU matching the new rxq_def */ /* Update the elected CPU matching the new rxq_def */
spin_lock(&pp->lock);
mvneta_percpu_elect(pp); mvneta_percpu_elect(pp);
spin_unlock(&pp->lock);
/* We have to synchronise on the napi of each CPU */ /* We have to synchronise on the napi of each CPU */
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册