提交 ef7d1b24 编写于 作者: G Greg Kroah-Hartman

Merge gregkh@master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

......@@ -294,15 +294,15 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
Default: 87380*2 bytes.
tcp_mem - vector of 3 INTEGERs: min, pressure, max
low: below this number of pages TCP is not bothered about its
min: below this number of pages TCP is not bothered about its
memory appetite.
pressure: when amount of memory allocated by TCP exceeds this number
of pages, TCP moderates its memory consumption and enters memory
pressure mode, which is exited when memory consumption falls
under "low".
under "min".
high: number of pages allowed for queueing by all TCP sockets.
max: number of pages allowed for queueing by all TCP sockets.
Defaults are calculated at boot time from amount of available
memory.
......
......@@ -56,8 +56,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.4.43"
#define DRV_MODULE_RELDATE "June 28, 2006"
#define DRV_MODULE_VERSION "1.4.44"
#define DRV_MODULE_RELDATE "August 10, 2006"
#define RUN_AT(x) (jiffies + (x))
......@@ -209,8 +209,10 @@ MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
static inline u32 bnx2_tx_avail(struct bnx2 *bp)
{
u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
u32 diff;
smp_mb();
diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
if (diff > MAX_TX_DESC_CNT)
diff = (diff & MAX_TX_DESC_CNT) - 1;
return (bp->tx_ring_size - diff);
......@@ -1569,7 +1571,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
unsigned long align;
skb = dev_alloc_skb(bp->rx_buf_size);
skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
if (skb == NULL) {
return -ENOMEM;
}
......@@ -1578,7 +1580,6 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
skb_reserve(skb, 8 - align);
}
skb->dev = bp->dev;
mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
......@@ -1686,15 +1687,20 @@ bnx2_tx_int(struct bnx2 *bp)
}
bp->tx_cons = sw_cons;
/* Need to make the tx_cons update visible to bnx2_start_xmit()
* before checking for netif_queue_stopped(). Without the
* memory barrier, there is a small possibility that bnx2_start_xmit()
* will miss it and cause the queue to be stopped forever.
*/
smp_mb();
if (unlikely(netif_queue_stopped(bp->dev))) {
spin_lock(&bp->tx_lock);
if (unlikely(netif_queue_stopped(bp->dev)) &&
(bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
netif_tx_lock(bp->dev);
if ((netif_queue_stopped(bp->dev)) &&
(bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
(bnx2_tx_avail(bp) > bp->tx_wake_thresh))
netif_wake_queue(bp->dev);
}
spin_unlock(&bp->tx_lock);
netif_tx_unlock(bp->dev);
}
}
......@@ -1786,7 +1792,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
struct sk_buff *new_skb;
new_skb = dev_alloc_skb(len + 2);
new_skb = netdev_alloc_skb(bp->dev, len + 2);
if (new_skb == NULL)
goto reuse_rx;
......@@ -1797,7 +1803,6 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
skb_reserve(new_skb, 2);
skb_put(new_skb, len);
new_skb->dev = bp->dev;
bnx2_reuse_rx_skb(bp, skb,
sw_ring_cons, sw_ring_prod);
......@@ -3503,6 +3508,8 @@ bnx2_init_tx_ring(struct bnx2 *bp)
struct tx_bd *txbd;
u32 val;
bp->tx_wake_thresh = bp->tx_ring_size / 2;
txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
......@@ -3952,7 +3959,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
return -EINVAL;
pkt_size = 1514;
skb = dev_alloc_skb(pkt_size);
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
return -ENOMEM;
packet = skb_put(skb, pkt_size);
......@@ -4390,10 +4397,8 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
#endif
/* Called with netif_tx_lock.
* hard_start_xmit is pseudo-lockless - a lock is only required when
* the tx queue is full. This way, we get the benefit of lockless
* operations most of the time without the complexities to handle
* netif_stop_queue/wake_queue race conditions.
* bnx2_tx_int() runs without netif_tx_lock unless it needs to call
* netif_wake_queue().
*/
static int
bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
......@@ -4512,12 +4517,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
spin_lock(&bp->tx_lock);
netif_stop_queue(dev);
if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
netif_wake_queue(dev);
spin_unlock(&bp->tx_lock);
}
return NETDEV_TX_OK;
......@@ -5628,7 +5630,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->pdev = pdev;
spin_lock_init(&bp->phy_lock);
spin_lock_init(&bp->tx_lock);
INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
......@@ -5751,7 +5752,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->mac_addr[5] = (u8) reg;
bp->tx_ring_size = MAX_TX_DESC_CNT;
bnx2_set_rx_ring_size(bp, 100);
bnx2_set_rx_ring_size(bp, 255);
bp->rx_csum = 1;
......
......@@ -3890,10 +3890,6 @@ struct bnx2 {
u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES)));
u16 tx_prod;
struct tx_bd *tx_desc_ring;
struct sw_bd *tx_buf_ring;
int tx_ring_size;
u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES)));
u16 hw_tx_cons;
......@@ -3916,9 +3912,11 @@ struct bnx2 {
struct sw_bd *rx_buf_ring;
struct rx_bd *rx_desc_ring[MAX_RX_RINGS];
/* Only used to synchronize netif_stop_queue/wake_queue when tx */
/* ring is full */
spinlock_t tx_lock;
/* TX constants */
struct tx_bd *tx_desc_ring;
struct sw_bd *tx_buf_ring;
int tx_ring_size;
u32 tx_wake_thresh;
/* End of fields used in the performance code paths. */
......
......@@ -192,7 +192,7 @@ struct cardmap {
void *ptr[CARDMAP_WIDTH];
};
static void *cardmap_get(struct cardmap *map, unsigned int nr);
static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
static unsigned int cardmap_find_first_free(struct cardmap *map);
static void cardmap_destroy(struct cardmap **map);
......@@ -1995,10 +1995,9 @@ ppp_register_channel(struct ppp_channel *chan)
{
struct channel *pch;
pch = kmalloc(sizeof(struct channel), GFP_KERNEL);
pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
if (pch == 0)
return -ENOMEM;
memset(pch, 0, sizeof(struct channel));
pch->ppp = NULL;
pch->chan = chan;
chan->ppp = pch;
......@@ -2408,13 +2407,12 @@ ppp_create_interface(int unit, int *retp)
int ret = -ENOMEM;
int i;
ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL);
ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL);
if (!ppp)
goto out;
dev = alloc_netdev(0, "", ppp_setup);
if (!dev)
goto out1;
memset(ppp, 0, sizeof(struct ppp));
ppp->mru = PPP_MRU;
init_ppp_file(&ppp->file, INTERFACE);
......@@ -2454,11 +2452,16 @@ ppp_create_interface(int unit, int *retp)
}
atomic_inc(&ppp_unit_count);
cardmap_set(&all_ppp_units, unit, ppp);
ret = cardmap_set(&all_ppp_units, unit, ppp);
if (ret != 0)
goto out3;
mutex_unlock(&all_ppp_mutex);
*retp = 0;
return ppp;
out3:
atomic_dec(&ppp_unit_count);
out2:
mutex_unlock(&all_ppp_mutex);
free_netdev(dev);
......@@ -2695,7 +2698,7 @@ static void *cardmap_get(struct cardmap *map, unsigned int nr)
return NULL;
}
static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
{
struct cardmap *p;
int i;
......@@ -2704,8 +2707,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) {
do {
/* need a new top level */
struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL);
memset(np, 0, sizeof(*np));
struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
goto enomem;
np->ptr[0] = p;
if (p != NULL) {
np->shift = p->shift + CARDMAP_ORDER;
......@@ -2719,8 +2723,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
while (p->shift > 0) {
i = (nr >> p->shift) & CARDMAP_MASK;
if (p->ptr[i] == NULL) {
struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL);
memset(np, 0, sizeof(*np));
struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
goto enomem;
np->shift = p->shift - CARDMAP_ORDER;
np->parent = p;
p->ptr[i] = np;
......@@ -2735,6 +2740,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
set_bit(i, &p->inuse);
else
clear_bit(i, &p->inuse);
return 0;
enomem:
return -ENOMEM;
}
static unsigned int cardmap_find_first_free(struct cardmap *map)
......
......@@ -155,6 +155,11 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
{
struct net_device_stats *stats;
if (skb_bond_should_drop(skb)) {
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK];
if (skb->dev == NULL) {
dev_kfree_skb_any(skb);
......
......@@ -320,6 +320,9 @@ struct net_device
#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
/* List of features with software fallbacks. */
#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
......@@ -1012,6 +1015,30 @@ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
unlikely(skb->ip_summed != CHECKSUM_HW));
}
/* On bonding slaves other than the currently active slave, suppress
* duplicates except for 802.3ad ETH_P_SLOW and alb non-mcast/bcast.
*/
static inline int skb_bond_should_drop(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct net_device *master = dev->master;
if (master &&
(dev->priv_flags & IFF_SLAVE_INACTIVE)) {
if (master->priv_flags & IFF_MASTER_ALB) {
if (skb->pkt_type != PACKET_BROADCAST &&
skb->pkt_type != PACKET_MULTICAST)
return 0;
}
if (master->priv_flags & IFF_MASTER_8023AD &&
skb->protocol == __constant_htons(ETH_P_SLOW))
return 0;
return 1;
}
return 0;
}
#endif /* __KERNEL__ */
#endif /* _LINUX_DEV_H */
......@@ -507,7 +507,7 @@ int __init atm_proc_init(void)
goto out;
}
void __exit atm_proc_exit(void)
void atm_proc_exit(void)
{
atm_proc_dirs_remove();
}
......@@ -386,12 +386,17 @@ void br_features_recompute(struct net_bridge *br)
checksum = 0;
if (feature & NETIF_F_GSO)
feature |= NETIF_F_TSO;
feature |= NETIF_F_GSO_SOFTWARE;
feature |= NETIF_F_GSO;
features &= feature;
}
if (!(checksum & NETIF_F_ALL_CSUM))
features &= ~NETIF_F_SG;
if (!(features & NETIF_F_SG))
features &= ~NETIF_F_GSO_MASK;
br->dev->features = features | checksum | NETIF_F_LLTX |
NETIF_F_GSO_ROBUST;
}
......
......@@ -116,6 +116,7 @@
#include <linux/audit.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/ctype.h>
/*
* The list of packet types we will receive (as opposed to discard)
......@@ -632,14 +633,22 @@ struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mas
* @name: name string
*
* Network device names need to be valid file names to
* to allow sysfs to work
* to allow sysfs to work. We also disallow any kind of
* whitespace.
*/
int dev_valid_name(const char *name)
{
return !(*name == '\0'
|| !strcmp(name, ".")
|| !strcmp(name, "..")
|| strchr(name, '/'));
if (*name == '\0')
return 0;
if (!strcmp(name, ".") || !strcmp(name, ".."))
return 0;
while (*name) {
if (*name == '/' || isspace(*name))
return 0;
name++;
}
return 1;
}
/**
......@@ -1619,26 +1628,10 @@ static inline struct net_device *skb_bond(struct sk_buff *skb)
struct net_device *dev = skb->dev;
if (dev->master) {
/*
* On bonding slaves other than the currently active
* slave, suppress duplicates except for 802.3ad
* ETH_P_SLOW and alb non-mcast/bcast.
*/
if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
if (dev->master->priv_flags & IFF_MASTER_ALB) {
if (skb->pkt_type != PACKET_BROADCAST &&
skb->pkt_type != PACKET_MULTICAST)
goto keep;
}
if (dev->master->priv_flags & IFF_MASTER_8023AD &&
skb->protocol == __constant_htons(ETH_P_SLOW))
goto keep;
if (skb_bond_should_drop(skb)) {
kfree_skb(skb);
return NULL;
}
keep:
skb->dev = dev->master;
}
......
......@@ -130,12 +130,13 @@ void __init net_random_init(void)
static int net_random_reseed(void)
{
int i;
unsigned long seed[NR_CPUS];
unsigned long seed;
get_random_bytes(seed, sizeof(seed));
for_each_possible_cpu(i) {
struct nrnd_state *state = &per_cpu(net_rand_state,i);
__net_srandom(state, seed[i]);
get_random_bytes(&seed, sizeof(seed));
__net_srandom(state, seed);
}
return 0;
}
......
......@@ -159,7 +159,7 @@ void free_fib_info(struct fib_info *fi)
void fib_release_info(struct fib_info *fi)
{
write_lock(&fib_info_lock);
write_lock_bh(&fib_info_lock);
if (fi && --fi->fib_treeref == 0) {
hlist_del(&fi->fib_hash);
if (fi->fib_prefsrc)
......@@ -172,7 +172,7 @@ void fib_release_info(struct fib_info *fi)
fi->fib_dead = 1;
fib_info_put(fi);
}
write_unlock(&fib_info_lock);
write_unlock_bh(&fib_info_lock);
}
static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
......@@ -598,7 +598,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
unsigned int old_size = fib_hash_size;
unsigned int i, bytes;
write_lock(&fib_info_lock);
write_lock_bh(&fib_info_lock);
old_info_hash = fib_info_hash;
old_laddrhash = fib_info_laddrhash;
fib_hash_size = new_size;
......@@ -639,7 +639,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
}
fib_info_laddrhash = new_laddrhash;
write_unlock(&fib_info_lock);
write_unlock_bh(&fib_info_lock);
bytes = old_size * sizeof(struct hlist_head *);
fib_hash_free(old_info_hash, bytes);
......@@ -820,7 +820,7 @@ fib_create_info(const struct rtmsg *r, struct kern_rta *rta,
fi->fib_treeref++;
atomic_inc(&fi->fib_clntref);
write_lock(&fib_info_lock);
write_lock_bh(&fib_info_lock);
hlist_add_head(&fi->fib_hash,
&fib_info_hash[fib_info_hashfn(fi)]);
if (fi->fib_prefsrc) {
......@@ -839,7 +839,7 @@ fib_create_info(const struct rtmsg *r, struct kern_rta *rta,
head = &fib_info_devhash[hash];
hlist_add_head(&nh->nh_hash, head);
} endfor_nexthops(fi)
write_unlock(&fib_info_lock);
write_unlock_bh(&fib_info_lock);
return fi;
err_inval:
......
......@@ -1793,29 +1793,35 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
struct in_device *in_dev;
u32 group = imr->imr_multiaddr.s_addr;
u32 ifindex;
int ret = -EADDRNOTAVAIL;
rtnl_lock();
in_dev = ip_mc_find_dev(imr);
if (!in_dev) {
rtnl_unlock();
return -ENODEV;
}
ifindex = imr->imr_ifindex;
for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
if (iml->multi.imr_multiaddr.s_addr == group &&
iml->multi.imr_ifindex == ifindex) {
(void) ip_mc_leave_src(sk, iml, in_dev);
if (iml->multi.imr_multiaddr.s_addr != group)
continue;
if (ifindex) {
if (iml->multi.imr_ifindex != ifindex)
continue;
} else if (imr->imr_address.s_addr && imr->imr_address.s_addr !=
iml->multi.imr_address.s_addr)
continue;
(void) ip_mc_leave_src(sk, iml, in_dev);
*imlp = iml->next;
*imlp = iml->next;
if (in_dev)
ip_mc_dec_group(in_dev, group);
rtnl_unlock();
sock_kfree_s(sk, iml, sizeof(*iml));
return 0;
}
rtnl_unlock();
sock_kfree_s(sk, iml, sizeof(*iml));
return 0;
}
if (!in_dev)
ret = -ENODEV;
rtnl_unlock();
return -EADDRNOTAVAIL;
return ret;
}
int ip_mc_source(int add, int omode, struct sock *sk, struct
......@@ -2199,13 +2205,13 @@ void ip_mc_drop_socket(struct sock *sk)
struct in_device *in_dev;
inet->mc_list = iml->next;
if ((in_dev = inetdev_by_index(iml->multi.imr_ifindex)) != NULL) {
(void) ip_mc_leave_src(sk, iml, in_dev);
in_dev = inetdev_by_index(iml->multi.imr_ifindex);
(void) ip_mc_leave_src(sk, iml, in_dev);
if (in_dev != NULL) {
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
in_dev_put(in_dev);
}
sock_kfree_s(sk, iml, sizeof(*iml));
}
rtnl_unlock();
}
......
......@@ -415,21 +415,18 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[0], *id);
read_lock_bh(&ip_conntrack_lock);
last = (struct ip_conntrack *)cb->args[1];
for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) {
restart:
last = (struct ip_conntrack *)cb->args[1];
list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) {
h = (struct ip_conntrack_tuple_hash *) i;
if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
continue;
ct = tuplehash_to_ctrack(h);
if (last != NULL) {
if (ct == last) {
ip_conntrack_put(last);
cb->args[1] = 0;
last = NULL;
} else
if (cb->args[1]) {
if (ct != last)
continue;
cb->args[1] = 0;
}
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
......@@ -440,17 +437,17 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
goto out;
}
}
if (last != NULL) {
ip_conntrack_put(last);
if (cb->args[1]) {
cb->args[1] = 0;
goto restart;
}
}
out:
read_unlock_bh(&ip_conntrack_lock);
if (last)
ip_conntrack_put(last);
DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
return skb->len;
}
......
......@@ -230,7 +230,7 @@ ipt_do_table(struct sk_buff **pskb,
const char *indev, *outdev;
void *table_base;
struct ipt_entry *e, *back;
struct xt_table_info *private = table->private;
struct xt_table_info *private;
/* Initialization */
ip = (*pskb)->nh.iph;
......@@ -247,6 +247,7 @@ ipt_do_table(struct sk_buff **pskb,
read_lock_bh(&table->lock);
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
private = table->private;
table_base = (void *)private->entries[smp_processor_id()];
e = get_entry(table_base, private->hook_entry[hook]);
......
......@@ -712,6 +712,11 @@ static int icmpv6_rcv(struct sk_buff **pskb)
return 0;
}
/*
* Special lock-class for __icmpv6_socket:
*/
static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
int __init icmpv6_init(struct net_proto_family *ops)
{
struct sock *sk;
......@@ -730,6 +735,14 @@ int __init icmpv6_init(struct net_proto_family *ops)
sk = per_cpu(__icmpv6_socket, i)->sk;
sk->sk_allocation = GFP_ATOMIC;
/*
* Split off their lock-class, because sk->sk_dst_lock
* gets used from softirqs, which is safe for
* __icmpv6_socket (because those never get directly used
* via userspace syscalls), but unsafe for normal sockets.
*/
lockdep_set_class(&sk->sk_dst_lock,
&icmpv6_socket_sk_dst_lock_key);
/* Enough space for 2 64K ICMP packets, including
* sk_buff struct overhead.
......
......@@ -268,13 +268,14 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
if ((dev = dev_get_by_index(mc_lst->ifindex)) != NULL) {
struct inet6_dev *idev = in6_dev_get(dev);
(void) ip6_mc_leave_src(sk, mc_lst, idev);
if (idev) {
(void) ip6_mc_leave_src(sk,mc_lst,idev);
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
in6_dev_put(idev);
}
dev_put(dev);
}
} else
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return 0;
}
......@@ -334,13 +335,14 @@ void ipv6_sock_mc_close(struct sock *sk)
if (dev) {
struct inet6_dev *idev = in6_dev_get(dev);
(void) ip6_mc_leave_src(sk, mc_lst, idev);
if (idev) {
(void) ip6_mc_leave_src(sk, mc_lst, idev);
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
in6_dev_put(idev);
}
dev_put(dev);
}
} else
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
......
......@@ -429,9 +429,9 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[0], *id);
read_lock_bh(&nf_conntrack_lock);
last = (struct nf_conn *)cb->args[1];
for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
restart:
last = (struct nf_conn *)cb->args[1];
list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) {
h = (struct nf_conntrack_tuple_hash *) i;
if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
......@@ -442,13 +442,10 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
* then dump everything. */
if (l3proto && L3PROTO(ct) != l3proto)
continue;
if (last != NULL) {
if (ct == last) {
nf_ct_put(last);
cb->args[1] = 0;
last = NULL;
} else
if (cb->args[1]) {
if (ct != last)
continue;
cb->args[1] = 0;
}
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
......@@ -459,17 +456,17 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
goto out;
}
}
if (last != NULL) {
nf_ct_put(last);
if (cb->args[1]) {
cb->args[1] = 0;
goto restart;
}
}
out:
read_unlock_bh(&nf_conntrack_lock);
if (last)
nf_ct_put(last);
DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
return skb->len;
}
......
......@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter/xt_physdev.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge.h>
......
......@@ -796,7 +796,7 @@ static int __init init_u32(void)
{
printk("u32 classifier\n");
#ifdef CONFIG_CLS_U32_PERF
printk(" Perfomance counters on\n");
printk(" Performance counters on\n");
#endif
#ifdef CONFIG_NET_CLS_POLICE
printk(" OLD policer on \n");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册