提交 63788ea9 编写于 作者: L Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

......@@ -3789,6 +3789,7 @@ e1000_netpoll(struct net_device *netdev)
struct e1000_adapter *adapter = netdev_priv(netdev);
disable_irq(adapter->pdev->irq);
e1000_intr(adapter->pdev->irq, netdev, NULL);
e1000_clean_tx_irq(adapter);
enable_irq(adapter->pdev->irq);
}
#endif
......
......@@ -9,6 +9,7 @@
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
struct netpoll;
......@@ -26,6 +27,7 @@ struct netpoll {
struct netpoll_info {
spinlock_t poll_lock;
int poll_owner;
int tries;
int rx_flags;
spinlock_t rx_lock;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
......@@ -60,25 +62,31 @@ static inline int netpoll_rx(struct sk_buff *skb)
return ret;
}
static inline void netpoll_poll_lock(struct net_device *dev)
static inline void *netpoll_poll_lock(struct net_device *dev)
{
rcu_read_lock(); /* deal with race on ->npinfo */
if (dev->npinfo) {
spin_lock(&dev->npinfo->poll_lock);
dev->npinfo->poll_owner = smp_processor_id();
return dev->npinfo;
}
return NULL;
}
static inline void netpoll_poll_unlock(struct net_device *dev)
static inline void netpoll_poll_unlock(void *have)
{
if (dev->npinfo) {
dev->npinfo->poll_owner = -1;
spin_unlock(&dev->npinfo->poll_lock);
struct netpoll_info *npi = have;
if (npi) {
npi->poll_owner = -1;
spin_unlock(&npi->poll_lock);
}
rcu_read_unlock();
}
#else
#define netpoll_rx(a) 0
#define netpoll_poll_lock(a)
#define netpoll_poll_lock(a) 0
#define netpoll_poll_unlock(a)
#endif
......
......@@ -255,7 +255,7 @@ struct sk_buff {
nohdr:1;
/* 3 bits spare */
__u8 pkt_type;
__u16 protocol;
__be16 protocol;
void (*destructor)(struct sk_buff *skb);
#ifdef CONFIG_NETFILTER
......
......@@ -1696,7 +1696,8 @@ static void net_rx_action(struct softirq_action *h)
struct softnet_data *queue = &__get_cpu_var(softnet_data);
unsigned long start_time = jiffies;
int budget = netdev_budget;
void *have;
local_irq_disable();
while (!list_empty(&queue->poll_list)) {
......@@ -1709,10 +1710,10 @@ static void net_rx_action(struct softirq_action *h)
dev = list_entry(queue->poll_list.next,
struct net_device, poll_list);
netpoll_poll_lock(dev);
have = netpoll_poll_lock(dev);
if (dev->quota <= 0 || dev->poll(dev, &budget)) {
netpoll_poll_unlock(dev);
netpoll_poll_unlock(have);
local_irq_disable();
list_del(&dev->poll_list);
list_add_tail(&dev->poll_list, &queue->poll_list);
......@@ -1721,7 +1722,7 @@ static void net_rx_action(struct softirq_action *h)
else
dev->quota = dev->weight;
} else {
netpoll_poll_unlock(dev);
netpoll_poll_unlock(have);
dev_put(dev);
local_irq_disable();
}
......
......@@ -33,6 +33,7 @@
#define MAX_UDP_CHUNK 1460
#define MAX_SKBS 32
#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
#define MAX_RETRIES 20000
static DEFINE_SPINLOCK(skb_list_lock);
static int nr_skbs;
......@@ -248,14 +249,14 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
int status;
struct netpoll_info *npinfo;
repeat:
if(!np || !np->dev || !netif_running(np->dev)) {
if (!np || !np->dev || !netif_running(np->dev)) {
__kfree_skb(skb);
return;
}
/* avoid recursion */
npinfo = np->dev->npinfo;
/* avoid recursion */
if (npinfo->poll_owner == smp_processor_id() ||
np->dev->xmit_lock_owner == smp_processor_id()) {
if (np->drop)
......@@ -265,30 +266,37 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
return;
}
spin_lock(&np->dev->xmit_lock);
np->dev->xmit_lock_owner = smp_processor_id();
do {
npinfo->tries--;
spin_lock(&np->dev->xmit_lock);
np->dev->xmit_lock_owner = smp_processor_id();
/*
* network drivers do not expect to be called if the queue is
* stopped.
*/
if (netif_queue_stopped(np->dev)) {
/*
* network drivers do not expect to be called if the queue is
* stopped.
*/
if (netif_queue_stopped(np->dev)) {
np->dev->xmit_lock_owner = -1;
spin_unlock(&np->dev->xmit_lock);
netpoll_poll(np);
udelay(50);
continue;
}
status = np->dev->hard_start_xmit(skb, np->dev);
np->dev->xmit_lock_owner = -1;
spin_unlock(&np->dev->xmit_lock);
netpoll_poll(np);
goto repeat;
}
status = np->dev->hard_start_xmit(skb, np->dev);
np->dev->xmit_lock_owner = -1;
spin_unlock(&np->dev->xmit_lock);
/* success */
if(!status) {
npinfo->tries = MAX_RETRIES; /* reset */
return;
}
/* transmit busy */
if(status) {
/* transmit busy */
netpoll_poll(np);
goto repeat;
}
udelay(50);
} while (npinfo->tries > 0);
}
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
......@@ -349,15 +357,11 @@ static void arp_reply(struct sk_buff *skb)
unsigned char *arp_ptr;
int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
u32 sip, tip;
unsigned long flags;
struct sk_buff *send_skb;
struct netpoll *np = NULL;
spin_lock_irqsave(&npinfo->rx_lock, flags);
if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
np = npinfo->rx_np;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
if (!np)
return;
......@@ -639,9 +643,11 @@ int netpoll_setup(struct netpoll *np)
if (!npinfo)
goto release;
npinfo->rx_flags = 0;
npinfo->rx_np = NULL;
npinfo->poll_lock = SPIN_LOCK_UNLOCKED;
npinfo->poll_owner = -1;
npinfo->tries = MAX_RETRIES;
npinfo->rx_lock = SPIN_LOCK_UNLOCKED;
} else
npinfo = ndev->npinfo;
......@@ -718,9 +724,16 @@ int netpoll_setup(struct netpoll *np)
npinfo->rx_np = np;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
/* fill up the skb queue */
refill_skbs();
/* last thing to do is link it to the net device structure */
ndev->npinfo = npinfo;
/* avoid racing with NAPI reading npinfo */
synchronize_rcu();
return 0;
release:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册