提交 59e90b2d 编写于 作者: R Roland Dreier 提交者: David S. Miller

ibm_emac: Convert to use napi_struct independent of struct net_device

Commit da3dedd9 ("[NET]: Make NAPI polling independent of struct
net_device objects.") changed the interface to NAPI polling.  Fix up
the ibm_newemac driver so that it works with this new interface.  This
is actually a nice cleanup because ibm_newemac is one of the drivers
that wants to have multiple NAPI structures for a single net_device.

Compile-tested only as I don't have a system that uses the ibm_newemac
driver.  This conversion the conversion for the ibm_emac driver that
was tested on real PowerPC 440SPe hardware.
Signed-off-by: NRoland Dreier <rolandd@cisco.com>
Signed-off-by: NJeff Garzik <jeff@garzik.org>
上级 61ba5b3c
...@@ -235,10 +235,10 @@ static irqreturn_t mal_serr(int irq, void *dev_instance) ...@@ -235,10 +235,10 @@ static irqreturn_t mal_serr(int irq, void *dev_instance)
static inline void mal_schedule_poll(struct mal_instance *mal) static inline void mal_schedule_poll(struct mal_instance *mal)
{ {
if (likely(netif_rx_schedule_prep(&mal->poll_dev))) { if (likely(napi_schedule_prep(&mal->napi))) {
MAL_DBG2(mal, "schedule_poll" NL); MAL_DBG2(mal, "schedule_poll" NL);
mal_disable_eob_irq(mal); mal_disable_eob_irq(mal);
__netif_rx_schedule(&mal->poll_dev); __napi_schedule(&mal->napi);
} else } else
MAL_DBG2(mal, "already in poll" NL); MAL_DBG2(mal, "already in poll" NL);
} }
...@@ -318,8 +318,7 @@ void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac) ...@@ -318,8 +318,7 @@ void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
msleep(1); msleep(1);
/* Synchronize with the MAL NAPI poller. */ /* Synchronize with the MAL NAPI poller. */
while (test_bit(__LINK_STATE_RX_SCHED, &mal->poll_dev.state)) napi_disable(&mal->napi);
msleep(1);
} }
void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
...@@ -330,11 +329,11 @@ void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) ...@@ -330,11 +329,11 @@ void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
// XXX might want to kick a poll now... // XXX might want to kick a poll now...
} }
static int mal_poll(struct net_device *ndev, int *budget) static int mal_poll(struct napi_struct *napi, int budget)
{ {
struct mal_instance *mal = netdev_priv(ndev); struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
struct list_head *l; struct list_head *l;
int rx_work_limit = min(ndev->quota, *budget), received = 0, done; int received = 0;
unsigned long flags; unsigned long flags;
MAL_DBG2(mal, "poll(%d) %d ->" NL, *budget, MAL_DBG2(mal, "poll(%d) %d ->" NL, *budget,
...@@ -358,26 +357,21 @@ static int mal_poll(struct net_device *ndev, int *budget) ...@@ -358,26 +357,21 @@ static int mal_poll(struct net_device *ndev, int *budget)
int n; int n;
if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
continue; continue;
n = mc->ops->poll_rx(mc->dev, rx_work_limit); n = mc->ops->poll_rx(mc->dev, budget);
if (n) { if (n) {
received += n; received += n;
rx_work_limit -= n; budget -= n;
if (rx_work_limit <= 0) { if (budget <= 0)
done = 0; goto more_work; // XXX What if this is the last one ?
// XXX What if this is the last one ?
goto more_work;
}
} }
} }
/* We need to disable IRQs to protect from RXDE IRQ here */ /* We need to disable IRQs to protect from RXDE IRQ here */
spin_lock_irqsave(&mal->lock, flags); spin_lock_irqsave(&mal->lock, flags);
__netif_rx_complete(ndev); __napi_complete(napi);
mal_enable_eob_irq(mal); mal_enable_eob_irq(mal);
spin_unlock_irqrestore(&mal->lock, flags); spin_unlock_irqrestore(&mal->lock, flags);
done = 1;
/* Check for "rotting" packet(s) */ /* Check for "rotting" packet(s) */
list_for_each(l, &mal->poll_list) { list_for_each(l, &mal->poll_list) {
struct mal_commac *mc = struct mal_commac *mc =
...@@ -387,12 +381,12 @@ static int mal_poll(struct net_device *ndev, int *budget) ...@@ -387,12 +381,12 @@ static int mal_poll(struct net_device *ndev, int *budget)
if (unlikely(mc->ops->peek_rx(mc->dev) || if (unlikely(mc->ops->peek_rx(mc->dev) ||
test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) { test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
MAL_DBG2(mal, "rotting packet" NL); MAL_DBG2(mal, "rotting packet" NL);
if (netif_rx_reschedule(ndev, received)) if (napi_reschedule(napi))
mal_disable_eob_irq(mal); mal_disable_eob_irq(mal);
else else
MAL_DBG2(mal, "already in poll list" NL); MAL_DBG2(mal, "already in poll list" NL);
if (rx_work_limit > 0) if (budget > 0)
goto again; goto again;
else else
goto more_work; goto more_work;
...@@ -401,13 +395,8 @@ static int mal_poll(struct net_device *ndev, int *budget) ...@@ -401,13 +395,8 @@ static int mal_poll(struct net_device *ndev, int *budget)
} }
more_work: more_work:
ndev->quota -= received; MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
*budget -= received; return received;
MAL_DBG2(mal, "poll() %d <- %d" NL, *budget,
done ? 0 : 1);
return done ? 0 : 1;
} }
static void mal_reset(struct mal_instance *mal) static void mal_reset(struct mal_instance *mal)
...@@ -538,11 +527,8 @@ static int __devinit mal_probe(struct of_device *ofdev, ...@@ -538,11 +527,8 @@ static int __devinit mal_probe(struct of_device *ofdev,
} }
INIT_LIST_HEAD(&mal->poll_list); INIT_LIST_HEAD(&mal->poll_list);
set_bit(__LINK_STATE_START, &mal->poll_dev.state); mal->napi.weight = CONFIG_IBM_NEW_EMAC_POLL_WEIGHT;
mal->poll_dev.weight = CONFIG_IBM_NEW_EMAC_POLL_WEIGHT; mal->napi.poll = mal_poll;
mal->poll_dev.poll = mal_poll;
mal->poll_dev.priv = mal;
atomic_set(&mal->poll_dev.refcnt, 1);
INIT_LIST_HEAD(&mal->list); INIT_LIST_HEAD(&mal->list);
spin_lock_init(&mal->lock); spin_lock_init(&mal->lock);
...@@ -653,11 +639,8 @@ static int __devexit mal_remove(struct of_device *ofdev) ...@@ -653,11 +639,8 @@ static int __devexit mal_remove(struct of_device *ofdev)
MAL_DBG(mal, "remove" NL); MAL_DBG(mal, "remove" NL);
/* Syncronize with scheduled polling, /* Synchronize with scheduled polling */
stolen from net/core/dev.c:dev_close() napi_disable(&mal->napi);
*/
clear_bit(__LINK_STATE_START, &mal->poll_dev.state);
netif_poll_disable(&mal->poll_dev);
if (!list_empty(&mal->list)) { if (!list_empty(&mal->list)) {
/* This is *very* bad */ /* This is *very* bad */
......
...@@ -197,7 +197,7 @@ struct mal_instance { ...@@ -197,7 +197,7 @@ struct mal_instance {
int serr_irq; /* MAL System Error IRQ */ int serr_irq; /* MAL System Error IRQ */
struct list_head poll_list; struct list_head poll_list;
struct net_device poll_dev; struct napi_struct napi;
struct list_head list; struct list_head list;
u32 tx_chan_mask; u32 tx_chan_mask;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册