提交 31083eba 编写于 作者: L Linus Torvalds

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (45 commits)
  [NETFILTER]: xt_time should not assume CONFIG_KTIME_SCALAR
  [NET]: Move unneeded data to initdata section.
  [NET]: Cleanup pernet operation without CONFIG_NET_NS
  [TEHUTI]: Fix incorrect usage of strncat in bdx_get_drvinfo()
  [MYRI_SBUS]: Prevent that myri_do_handshake lies about ticks.
  [NETFILTER]: bridge: fix double POSTROUTING hook invocation
  [NETFILTER]: Consolidate nf_sockopt and compat_nf_sockopt
  [NETFILTER]: nf_nat: fix memset error
  [INET]: Use list_head-s in inetpeer.c
  [IPVS]: Remove unused exports.
  [NET]: Unexport sysctl_{r,w}mem_max.
  [TG3]: Update version to 3.86
  [TG3]: MII => TP
  [TG3]: Add A1 revs
  [TG3]: Increase the PCI MRRS
  [TG3]: Prescaler fix
  [TG3]: Limit 5784 / 5764 to MAC LED mode
  [TG3]: Disable GPHY autopowerdown
  [TG3]: CPMU adjustments for loopback tests
  [TG3]: Fix nvram selftest failures
  ...
...@@ -14,8 +14,7 @@ If no base address is given at boot time, the driver will autoprobe ...@@ -14,8 +14,7 @@ If no base address is given at boot time, the driver will autoprobe
ports 0x300, 0x280 and 0x310 (in that order). If no IRQ is given, the driver ports 0x300, 0x280 and 0x310 (in that order). If no IRQ is given, the driver
will try to probe for it. will try to probe for it.
The driver can be used as a loadable module. See net-modules.txt for details The driver can be used as a loadable module.
of the parameters it can take.
Theoretically, one instance of the driver can now run multiple cards, Theoretically, one instance of the driver can now run multiple cards,
in the standard way (when loading a module, say "modprobe 3c505 in the standard way (when loading a module, say "modprobe 3c505
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <net/sock.h> #include <net/sock.h>
#include <linux/net.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -126,7 +127,7 @@ static void sock_shutdown(struct nbd_device *lo, int lock) ...@@ -126,7 +127,7 @@ static void sock_shutdown(struct nbd_device *lo, int lock)
if (lo->sock) { if (lo->sock) {
printk(KERN_WARNING "%s: shutting down socket\n", printk(KERN_WARNING "%s: shutting down socket\n",
lo->disk->disk_name); lo->disk->disk_name);
lo->sock->ops->shutdown(lo->sock, SEND_SHUTDOWN|RCV_SHUTDOWN); kernel_sock_shutdown(lo->sock, SHUT_RDWR);
lo->sock = NULL; lo->sock = NULL;
} }
if (lock) if (lock)
......
此差异已折叠。
...@@ -19,8 +19,7 @@ menuconfig ARCNET ...@@ -19,8 +19,7 @@ menuconfig ARCNET
from <http://www.tldp.org/docs.html#howto>(even though ARCnet from <http://www.tldp.org/docs.html#howto>(even though ARCnet
is not really Ethernet). is not really Ethernet).
To compile this driver as a module, choose M here and read To compile this driver as a module, choose M here. The module will
<file:Documentation/networking/net-modules.txt>. The module will
be called arcnet. be called arcnet.
if ARCNET if ARCNET
...@@ -81,8 +80,7 @@ config ARCNET_COM90xx ...@@ -81,8 +80,7 @@ config ARCNET_COM90xx
have always used the old ARCnet driver without knowing what type of have always used the old ARCnet driver without knowing what type of
card you had, this is probably the one for you. card you had, this is probably the one for you.
To compile this driver as a module, choose M here and read To compile this driver as a module, choose M here. The module will
<file:Documentation/networking/net-modules.txt>. The module will
be called com90xx. be called com90xx.
config ARCNET_COM90xxIO config ARCNET_COM90xxIO
...@@ -93,8 +91,7 @@ config ARCNET_COM90xxIO ...@@ -93,8 +91,7 @@ config ARCNET_COM90xxIO
the normal driver. Only use it if your card doesn't support shared the normal driver. Only use it if your card doesn't support shared
memory. memory.
To compile this driver as a module, choose M here and read To compile this driver as a module, choose M here. The module will
<file:Documentation/networking/net-modules.txt>. The module will
be called com90io. be called com90io.
config ARCNET_RIM_I config ARCNET_RIM_I
...@@ -105,8 +102,7 @@ config ARCNET_RIM_I ...@@ -105,8 +102,7 @@ config ARCNET_RIM_I
driver is completely untested, so if you have one of these cards, driver is completely untested, so if you have one of these cards,
please mail <dwmw2@infradead.org>, especially if it works! please mail <dwmw2@infradead.org>, especially if it works!
To compile this driver as a module, choose M here and read To compile this driver as a module, choose M here. The module will
<file:Documentation/networking/net-modules.txt>. The module will
be called arc-rimi. be called arc-rimi.
config ARCNET_COM20020 config ARCNET_COM20020
...@@ -116,8 +112,7 @@ config ARCNET_COM20020 ...@@ -116,8 +112,7 @@ config ARCNET_COM20020
things as promiscuous mode, so packet sniffing is possible, and things as promiscuous mode, so packet sniffing is possible, and
extra diagnostic information. extra diagnostic information.
To compile this driver as a module, choose M here and read To compile this driver as a module, choose M here. The module will
<file:Documentation/networking/net-modules.txt>. The module will
be called com20020. be called com20020.
config ARCNET_COM20020_ISA config ARCNET_COM20020_ISA
......
...@@ -284,7 +284,7 @@ static __net_exit void loopback_net_exit(struct net *net) ...@@ -284,7 +284,7 @@ static __net_exit void loopback_net_exit(struct net *net)
unregister_netdev(dev); unregister_netdev(dev);
} }
static struct pernet_operations loopback_net_ops = { static struct pernet_operations __net_initdata loopback_net_ops = {
.init = loopback_net_init, .init = loopback_net_init,
.exit = loopback_net_exit, .exit = loopback_net_exit,
}; };
......
...@@ -134,7 +134,7 @@ static int myri_do_handshake(struct myri_eth *mp) ...@@ -134,7 +134,7 @@ static int myri_do_handshake(struct myri_eth *mp)
myri_disable_irq(mp->lregs, cregs); myri_disable_irq(mp->lregs, cregs);
while (tick++ <= 25) { while (tick++ < 25) {
u32 softstate; u32 softstate;
/* Wake it up. */ /* Wake it up. */
......
...@@ -160,7 +160,7 @@ ppp_asynctty_open(struct tty_struct *tty) ...@@ -160,7 +160,7 @@ ppp_asynctty_open(struct tty_struct *tty)
err = -ENOMEM; err = -ENOMEM;
ap = kzalloc(sizeof(*ap), GFP_KERNEL); ap = kzalloc(sizeof(*ap), GFP_KERNEL);
if (ap == 0) if (!ap)
goto out; goto out;
/* initialize the asyncppp structure */ /* initialize the asyncppp structure */
...@@ -215,7 +215,7 @@ ppp_asynctty_close(struct tty_struct *tty) ...@@ -215,7 +215,7 @@ ppp_asynctty_close(struct tty_struct *tty)
ap = tty->disc_data; ap = tty->disc_data;
tty->disc_data = NULL; tty->disc_data = NULL;
write_unlock_irq(&disc_data_lock); write_unlock_irq(&disc_data_lock);
if (ap == 0) if (!ap)
return; return;
/* /*
...@@ -230,10 +230,10 @@ ppp_asynctty_close(struct tty_struct *tty) ...@@ -230,10 +230,10 @@ ppp_asynctty_close(struct tty_struct *tty)
tasklet_kill(&ap->tsk); tasklet_kill(&ap->tsk);
ppp_unregister_channel(&ap->chan); ppp_unregister_channel(&ap->chan);
if (ap->rpkt != 0) if (ap->rpkt)
kfree_skb(ap->rpkt); kfree_skb(ap->rpkt);
skb_queue_purge(&ap->rqueue); skb_queue_purge(&ap->rqueue);
if (ap->tpkt != 0) if (ap->tpkt)
kfree_skb(ap->tpkt); kfree_skb(ap->tpkt);
kfree(ap); kfree(ap);
} }
...@@ -285,13 +285,13 @@ ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file, ...@@ -285,13 +285,13 @@ ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
int err, val; int err, val;
int __user *p = (int __user *)arg; int __user *p = (int __user *)arg;
if (ap == 0) if (!ap)
return -ENXIO; return -ENXIO;
err = -EFAULT; err = -EFAULT;
switch (cmd) { switch (cmd) {
case PPPIOCGCHAN: case PPPIOCGCHAN:
err = -ENXIO; err = -ENXIO;
if (ap == 0) if (!ap)
break; break;
err = -EFAULT; err = -EFAULT;
if (put_user(ppp_channel_index(&ap->chan), p)) if (put_user(ppp_channel_index(&ap->chan), p))
...@@ -301,7 +301,7 @@ ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file, ...@@ -301,7 +301,7 @@ ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
case PPPIOCGUNIT: case PPPIOCGUNIT:
err = -ENXIO; err = -ENXIO;
if (ap == 0) if (!ap)
break; break;
err = -EFAULT; err = -EFAULT;
if (put_user(ppp_unit_number(&ap->chan), p)) if (put_user(ppp_unit_number(&ap->chan), p))
...@@ -350,7 +350,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, ...@@ -350,7 +350,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
struct asyncppp *ap = ap_get(tty); struct asyncppp *ap = ap_get(tty);
unsigned long flags; unsigned long flags;
if (ap == 0) if (!ap)
return; return;
spin_lock_irqsave(&ap->recv_lock, flags); spin_lock_irqsave(&ap->recv_lock, flags);
ppp_async_input(ap, buf, cflags, count); ppp_async_input(ap, buf, cflags, count);
...@@ -369,7 +369,7 @@ ppp_asynctty_wakeup(struct tty_struct *tty) ...@@ -369,7 +369,7 @@ ppp_asynctty_wakeup(struct tty_struct *tty)
struct asyncppp *ap = ap_get(tty); struct asyncppp *ap = ap_get(tty);
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
if (ap == 0) if (!ap)
return; return;
set_bit(XMIT_WAKEUP, &ap->xmit_flags); set_bit(XMIT_WAKEUP, &ap->xmit_flags);
tasklet_schedule(&ap->tsk); tasklet_schedule(&ap->tsk);
...@@ -684,7 +684,7 @@ ppp_async_push(struct asyncppp *ap) ...@@ -684,7 +684,7 @@ ppp_async_push(struct asyncppp *ap)
tty_stuffed = 1; tty_stuffed = 1;
continue; continue;
} }
if (ap->optr >= ap->olim && ap->tpkt != 0) { if (ap->optr >= ap->olim && ap->tpkt) {
if (ppp_async_encode(ap)) { if (ppp_async_encode(ap)) {
/* finished processing ap->tpkt */ /* finished processing ap->tpkt */
clear_bit(XMIT_FULL, &ap->xmit_flags); clear_bit(XMIT_FULL, &ap->xmit_flags);
...@@ -704,7 +704,7 @@ ppp_async_push(struct asyncppp *ap) ...@@ -704,7 +704,7 @@ ppp_async_push(struct asyncppp *ap)
clear_bit(XMIT_BUSY, &ap->xmit_flags); clear_bit(XMIT_BUSY, &ap->xmit_flags);
/* any more work to do? if not, exit the loop */ /* any more work to do? if not, exit the loop */
if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags)
|| (!tty_stuffed && ap->tpkt != 0))) || (!tty_stuffed && ap->tpkt)))
break; break;
/* more work to do, see if we can do it now */ /* more work to do, see if we can do it now */
if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags)) if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
...@@ -715,7 +715,7 @@ ppp_async_push(struct asyncppp *ap) ...@@ -715,7 +715,7 @@ ppp_async_push(struct asyncppp *ap)
flush: flush:
clear_bit(XMIT_BUSY, &ap->xmit_flags); clear_bit(XMIT_BUSY, &ap->xmit_flags);
if (ap->tpkt != 0) { if (ap->tpkt) {
kfree_skb(ap->tpkt); kfree_skb(ap->tpkt);
ap->tpkt = NULL; ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags); clear_bit(XMIT_FULL, &ap->xmit_flags);
...@@ -848,7 +848,7 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf, ...@@ -848,7 +848,7 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
s = 0; s = 0;
for (i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
c = buf[i]; c = buf[i];
if (flags != 0 && flags[i] != 0) if (flags && flags[i] != 0)
continue; continue;
s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0; s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0;
c = ((c >> 4) ^ c) & 0xf; c = ((c >> 4) ^ c) & 0xf;
...@@ -865,7 +865,7 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf, ...@@ -865,7 +865,7 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
n = scan_ordinary(ap, buf, count); n = scan_ordinary(ap, buf, count);
f = 0; f = 0;
if (flags != 0 && (ap->state & SC_TOSS) == 0) { if (flags && (ap->state & SC_TOSS) == 0) {
/* check the flags to see if any char had an error */ /* check the flags to see if any char had an error */
for (j = 0; j < n; ++j) for (j = 0; j < n; ++j)
if ((f = flags[j]) != 0) if ((f = flags[j]) != 0)
...@@ -878,9 +878,9 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf, ...@@ -878,9 +878,9 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
} else if (n > 0 && (ap->state & SC_TOSS) == 0) { } else if (n > 0 && (ap->state & SC_TOSS) == 0) {
/* stuff the chars in the skb */ /* stuff the chars in the skb */
skb = ap->rpkt; skb = ap->rpkt;
if (skb == 0) { if (!skb) {
skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2); skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
if (skb == 0) if (!skb)
goto nomem; goto nomem;
ap->rpkt = skb; ap->rpkt = skb;
} }
...@@ -927,7 +927,7 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf, ...@@ -927,7 +927,7 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
++n; ++n;
buf += n; buf += n;
if (flags != 0) if (flags)
flags += n; flags += n;
count -= n; count -= n;
} }
......
...@@ -367,7 +367,7 @@ static int ppp_release(struct inode *inode, struct file *file) ...@@ -367,7 +367,7 @@ static int ppp_release(struct inode *inode, struct file *file)
struct ppp_file *pf = file->private_data; struct ppp_file *pf = file->private_data;
struct ppp *ppp; struct ppp *ppp;
if (pf != 0) { if (pf) {
file->private_data = NULL; file->private_data = NULL;
if (pf->kind == INTERFACE) { if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf); ppp = PF_TO_PPP(pf);
...@@ -398,7 +398,7 @@ static ssize_t ppp_read(struct file *file, char __user *buf, ...@@ -398,7 +398,7 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
ret = count; ret = count;
if (pf == 0) if (!pf)
return -ENXIO; return -ENXIO;
add_wait_queue(&pf->rwait, &wait); add_wait_queue(&pf->rwait, &wait);
for (;;) { for (;;) {
...@@ -431,7 +431,7 @@ static ssize_t ppp_read(struct file *file, char __user *buf, ...@@ -431,7 +431,7 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
remove_wait_queue(&pf->rwait, &wait); remove_wait_queue(&pf->rwait, &wait);
if (skb == 0) if (!skb)
goto out; goto out;
ret = -EOVERFLOW; ret = -EOVERFLOW;
...@@ -455,11 +455,11 @@ static ssize_t ppp_write(struct file *file, const char __user *buf, ...@@ -455,11 +455,11 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
struct sk_buff *skb; struct sk_buff *skb;
ssize_t ret; ssize_t ret;
if (pf == 0) if (!pf)
return -ENXIO; return -ENXIO;
ret = -ENOMEM; ret = -ENOMEM;
skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
if (skb == 0) if (!skb)
goto out; goto out;
skb_reserve(skb, pf->hdrlen); skb_reserve(skb, pf->hdrlen);
ret = -EFAULT; ret = -EFAULT;
...@@ -491,11 +491,11 @@ static unsigned int ppp_poll(struct file *file, poll_table *wait) ...@@ -491,11 +491,11 @@ static unsigned int ppp_poll(struct file *file, poll_table *wait)
struct ppp_file *pf = file->private_data; struct ppp_file *pf = file->private_data;
unsigned int mask; unsigned int mask;
if (pf == 0) if (!pf)
return 0; return 0;
poll_wait(file, &pf->rwait, wait); poll_wait(file, &pf->rwait, wait);
mask = POLLOUT | POLLWRNORM; mask = POLLOUT | POLLWRNORM;
if (skb_peek(&pf->rq) != 0) if (skb_peek(&pf->rq))
mask |= POLLIN | POLLRDNORM; mask |= POLLIN | POLLRDNORM;
if (pf->dead) if (pf->dead)
mask |= POLLHUP; mask |= POLLHUP;
...@@ -559,7 +559,7 @@ static int ppp_ioctl(struct inode *inode, struct file *file, ...@@ -559,7 +559,7 @@ static int ppp_ioctl(struct inode *inode, struct file *file,
void __user *argp = (void __user *)arg; void __user *argp = (void __user *)arg;
int __user *p = argp; int __user *p = argp;
if (pf == 0) if (!pf)
return ppp_unattached_ioctl(pf, file, cmd, arg); return ppp_unattached_ioctl(pf, file, cmd, arg);
if (cmd == PPPIOCDETACH) { if (cmd == PPPIOCDETACH) {
...@@ -689,13 +689,13 @@ static int ppp_ioctl(struct inode *inode, struct file *file, ...@@ -689,13 +689,13 @@ static int ppp_ioctl(struct inode *inode, struct file *file,
val &= 0xffff; val &= 0xffff;
} }
vj = slhc_init(val2+1, val+1); vj = slhc_init(val2+1, val+1);
if (vj == 0) { if (!vj) {
printk(KERN_ERR "PPP: no memory (VJ compressor)\n"); printk(KERN_ERR "PPP: no memory (VJ compressor)\n");
err = -ENOMEM; err = -ENOMEM;
break; break;
} }
ppp_lock(ppp); ppp_lock(ppp);
if (ppp->vj != 0) if (ppp->vj)
slhc_free(ppp->vj); slhc_free(ppp->vj);
ppp->vj = vj; ppp->vj = vj;
ppp_unlock(ppp); ppp_unlock(ppp);
...@@ -786,7 +786,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, ...@@ -786,7 +786,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
if (get_user(unit, p)) if (get_user(unit, p))
break; break;
ppp = ppp_create_interface(unit, &err); ppp = ppp_create_interface(unit, &err);
if (ppp == 0) if (!ppp)
break; break;
file->private_data = &ppp->file; file->private_data = &ppp->file;
ppp->owner = file; ppp->owner = file;
...@@ -803,7 +803,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, ...@@ -803,7 +803,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
mutex_lock(&all_ppp_mutex); mutex_lock(&all_ppp_mutex);
err = -ENXIO; err = -ENXIO;
ppp = ppp_find_unit(unit); ppp = ppp_find_unit(unit);
if (ppp != 0) { if (ppp) {
atomic_inc(&ppp->file.refcnt); atomic_inc(&ppp->file.refcnt);
file->private_data = &ppp->file; file->private_data = &ppp->file;
err = 0; err = 0;
...@@ -817,7 +817,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, ...@@ -817,7 +817,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
spin_lock_bh(&all_channels_lock); spin_lock_bh(&all_channels_lock);
err = -ENXIO; err = -ENXIO;
chan = ppp_find_channel(unit); chan = ppp_find_channel(unit);
if (chan != 0) { if (chan) {
atomic_inc(&chan->file.refcnt); atomic_inc(&chan->file.refcnt);
file->private_data = &chan->file; file->private_data = &chan->file;
err = 0; err = 0;
...@@ -946,9 +946,9 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ...@@ -946,9 +946,9 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGPPPCSTATS: case SIOCGPPPCSTATS:
memset(&cstats, 0, sizeof(cstats)); memset(&cstats, 0, sizeof(cstats));
if (ppp->xc_state != 0) if (ppp->xc_state)
ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c); ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
if (ppp->rc_state != 0) if (ppp->rc_state)
ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d); ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
if (copy_to_user(addr, &cstats, sizeof(cstats))) if (copy_to_user(addr, &cstats, sizeof(cstats)))
break; break;
...@@ -993,14 +993,14 @@ ppp_xmit_process(struct ppp *ppp) ...@@ -993,14 +993,14 @@ ppp_xmit_process(struct ppp *ppp)
struct sk_buff *skb; struct sk_buff *skb;
ppp_xmit_lock(ppp); ppp_xmit_lock(ppp);
if (ppp->dev != 0) { if (ppp->dev) {
ppp_push(ppp); ppp_push(ppp);
while (ppp->xmit_pending == 0 while (!ppp->xmit_pending
&& (skb = skb_dequeue(&ppp->file.xq)) != 0) && (skb = skb_dequeue(&ppp->file.xq)))
ppp_send_frame(ppp, skb); ppp_send_frame(ppp, skb);
/* If there's no work left to do, tell the core net /* If there's no work left to do, tell the core net
code that we can accept some more. */ code that we can accept some more. */
if (ppp->xmit_pending == 0 && skb_peek(&ppp->file.xq) == 0) if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
netif_wake_queue(ppp->dev); netif_wake_queue(ppp->dev);
} }
ppp_xmit_unlock(ppp); ppp_xmit_unlock(ppp);
...@@ -1100,12 +1100,12 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) ...@@ -1100,12 +1100,12 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
switch (proto) { switch (proto) {
case PPP_IP: case PPP_IP:
if (ppp->vj == 0 || (ppp->flags & SC_COMP_TCP) == 0) if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
break; break;
/* try to do VJ TCP header compression */ /* try to do VJ TCP header compression */
new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
GFP_ATOMIC); GFP_ATOMIC);
if (new_skb == 0) { if (!new_skb) {
printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n"); printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n");
goto drop; goto drop;
} }
...@@ -1140,7 +1140,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) ...@@ -1140,7 +1140,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
} }
/* try to do packet compression */ /* try to do packet compression */
if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state != 0 if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state
&& proto != PPP_LCP && proto != PPP_CCP) { && proto != PPP_LCP && proto != PPP_CCP) {
if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
if (net_ratelimit()) if (net_ratelimit())
...@@ -1185,7 +1185,7 @@ ppp_push(struct ppp *ppp) ...@@ -1185,7 +1185,7 @@ ppp_push(struct ppp *ppp)
struct channel *pch; struct channel *pch;
struct sk_buff *skb = ppp->xmit_pending; struct sk_buff *skb = ppp->xmit_pending;
if (skb == 0) if (!skb)
return; return;
list = &ppp->channels; list = &ppp->channels;
...@@ -1355,7 +1355,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) ...@@ -1355,7 +1355,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
if (flen == len && nfree == 0) if (flen == len && nfree == 0)
bits |= E; bits |= E;
frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC); frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
if (frag == 0) if (!frag)
goto noskb; goto noskb;
q = skb_put(frag, flen + hdrlen); q = skb_put(frag, flen + hdrlen);
...@@ -1425,7 +1425,7 @@ ppp_channel_push(struct channel *pch) ...@@ -1425,7 +1425,7 @@ ppp_channel_push(struct channel *pch)
struct ppp *ppp; struct ppp *ppp;
spin_lock_bh(&pch->downl); spin_lock_bh(&pch->downl);
if (pch->chan != 0) { if (pch->chan) {
while (!skb_queue_empty(&pch->file.xq)) { while (!skb_queue_empty(&pch->file.xq)) {
skb = skb_dequeue(&pch->file.xq); skb = skb_dequeue(&pch->file.xq);
if (!pch->chan->ops->start_xmit(pch->chan, skb)) { if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
...@@ -1443,7 +1443,7 @@ ppp_channel_push(struct channel *pch) ...@@ -1443,7 +1443,7 @@ ppp_channel_push(struct channel *pch)
if (skb_queue_empty(&pch->file.xq)) { if (skb_queue_empty(&pch->file.xq)) {
read_lock_bh(&pch->upl); read_lock_bh(&pch->upl);
ppp = pch->ppp; ppp = pch->ppp;
if (ppp != 0) if (ppp)
ppp_xmit_process(ppp); ppp_xmit_process(ppp);
read_unlock_bh(&pch->upl); read_unlock_bh(&pch->upl);
} }
...@@ -1462,7 +1462,7 @@ ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) ...@@ -1462,7 +1462,7 @@ ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
{ {
ppp_recv_lock(ppp); ppp_recv_lock(ppp);
/* ppp->dev == 0 means interface is closing down */ /* ppp->dev == 0 means interface is closing down */
if (ppp->dev != 0) if (ppp->dev)
ppp_receive_frame(ppp, skb, pch); ppp_receive_frame(ppp, skb, pch);
else else
kfree_skb(skb); kfree_skb(skb);
...@@ -1475,19 +1475,19 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb) ...@@ -1475,19 +1475,19 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
struct channel *pch = chan->ppp; struct channel *pch = chan->ppp;
int proto; int proto;
if (pch == 0 || skb->len == 0) { if (!pch || skb->len == 0) {
kfree_skb(skb); kfree_skb(skb);
return; return;
} }
proto = PPP_PROTO(skb); proto = PPP_PROTO(skb);
read_lock_bh(&pch->upl); read_lock_bh(&pch->upl);
if (pch->ppp == 0 || proto >= 0xc000 || proto == PPP_CCPFRAG) { if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
/* put it on the channel queue */ /* put it on the channel queue */
skb_queue_tail(&pch->file.rq, skb); skb_queue_tail(&pch->file.rq, skb);
/* drop old frames if queue too long */ /* drop old frames if queue too long */
while (pch->file.rq.qlen > PPP_MAX_RQLEN while (pch->file.rq.qlen > PPP_MAX_RQLEN
&& (skb = skb_dequeue(&pch->file.rq)) != 0) && (skb = skb_dequeue(&pch->file.rq)))
kfree_skb(skb); kfree_skb(skb);
wake_up_interruptible(&pch->file.rwait); wake_up_interruptible(&pch->file.rwait);
} else { } else {
...@@ -1503,13 +1503,13 @@ ppp_input_error(struct ppp_channel *chan, int code) ...@@ -1503,13 +1503,13 @@ ppp_input_error(struct ppp_channel *chan, int code)
struct channel *pch = chan->ppp; struct channel *pch = chan->ppp;
struct sk_buff *skb; struct sk_buff *skb;
if (pch == 0) if (!pch)
return; return;
read_lock_bh(&pch->upl); read_lock_bh(&pch->upl);
if (pch->ppp != 0) { if (pch->ppp) {
skb = alloc_skb(0, GFP_ATOMIC); skb = alloc_skb(0, GFP_ATOMIC);
if (skb != 0) { if (skb) {
skb->len = 0; /* probably unnecessary */ skb->len = 0; /* probably unnecessary */
skb->cb[0] = code; skb->cb[0] = code;
ppp_do_recv(pch->ppp, skb, pch); ppp_do_recv(pch->ppp, skb, pch);
...@@ -1548,7 +1548,7 @@ static void ...@@ -1548,7 +1548,7 @@ static void
ppp_receive_error(struct ppp *ppp) ppp_receive_error(struct ppp *ppp)
{ {
++ppp->stats.rx_errors; ++ppp->stats.rx_errors;
if (ppp->vj != 0) if (ppp->vj)
slhc_toss(ppp->vj); slhc_toss(ppp->vj);
} }
...@@ -1563,7 +1563,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) ...@@ -1563,7 +1563,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
* Note that some decompressors need to see uncompressed frames * Note that some decompressors need to see uncompressed frames
* that come in as well as compressed frames. * that come in as well as compressed frames.
*/ */
if (ppp->rc_state != 0 && (ppp->rstate & SC_DECOMP_RUN) if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)
&& (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0) && (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
skb = ppp_decompress_frame(ppp, skb); skb = ppp_decompress_frame(ppp, skb);
...@@ -1574,13 +1574,13 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) ...@@ -1574,13 +1574,13 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
switch (proto) { switch (proto) {
case PPP_VJC_COMP: case PPP_VJC_COMP:
/* decompress VJ compressed packets */ /* decompress VJ compressed packets */
if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP)) if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
goto err; goto err;
if (skb_tailroom(skb) < 124 || skb_cloned(skb)) { if (skb_tailroom(skb) < 124 || skb_cloned(skb)) {
/* copy to a new sk_buff with more tailroom */ /* copy to a new sk_buff with more tailroom */
ns = dev_alloc_skb(skb->len + 128); ns = dev_alloc_skb(skb->len + 128);
if (ns == 0) { if (!ns) {
printk(KERN_ERR"PPP: no memory (VJ decomp)\n"); printk(KERN_ERR"PPP: no memory (VJ decomp)\n");
goto err; goto err;
} }
...@@ -1606,7 +1606,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) ...@@ -1606,7 +1606,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
break; break;
case PPP_VJC_UNCOMP: case PPP_VJC_UNCOMP:
if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP)) if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
goto err; goto err;
/* Until we fix the decompressor need to make sure /* Until we fix the decompressor need to make sure
...@@ -1636,7 +1636,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) ...@@ -1636,7 +1636,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
skb_queue_tail(&ppp->file.rq, skb); skb_queue_tail(&ppp->file.rq, skb);
/* limit queue length by dropping old frames */ /* limit queue length by dropping old frames */
while (ppp->file.rq.qlen > PPP_MAX_RQLEN while (ppp->file.rq.qlen > PPP_MAX_RQLEN
&& (skb = skb_dequeue(&ppp->file.rq)) != 0) && (skb = skb_dequeue(&ppp->file.rq)))
kfree_skb(skb); kfree_skb(skb);
/* wake up any process polling or blocking on read */ /* wake up any process polling or blocking on read */
wake_up_interruptible(&ppp->file.rwait); wake_up_interruptible(&ppp->file.rwait);
...@@ -1718,7 +1718,7 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) ...@@ -1718,7 +1718,7 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
} }
ns = dev_alloc_skb(obuff_size); ns = dev_alloc_skb(obuff_size);
if (ns == 0) { if (!ns) {
printk(KERN_ERR "ppp_decompress_frame: no memory\n"); printk(KERN_ERR "ppp_decompress_frame: no memory\n");
goto err; goto err;
} }
...@@ -1836,7 +1836,7 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) ...@@ -1836,7 +1836,7 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
ppp->minseq = ppp->mrq.next->sequence; ppp->minseq = ppp->mrq.next->sequence;
/* Pull completed packets off the queue and receive them. */ /* Pull completed packets off the queue and receive them. */
while ((skb = ppp_mp_reconstruct(ppp)) != 0) while ((skb = ppp_mp_reconstruct(ppp)))
ppp_receive_nonmp_frame(ppp, skb); ppp_receive_nonmp_frame(ppp, skb);
return; return;
...@@ -2002,7 +2002,7 @@ ppp_register_channel(struct ppp_channel *chan) ...@@ -2002,7 +2002,7 @@ ppp_register_channel(struct ppp_channel *chan)
struct channel *pch; struct channel *pch;
pch = kzalloc(sizeof(struct channel), GFP_KERNEL); pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
if (pch == 0) if (!pch)
return -ENOMEM; return -ENOMEM;
pch->ppp = NULL; pch->ppp = NULL;
pch->chan = chan; pch->chan = chan;
...@@ -2030,7 +2030,7 @@ int ppp_channel_index(struct ppp_channel *chan) ...@@ -2030,7 +2030,7 @@ int ppp_channel_index(struct ppp_channel *chan)
{ {
struct channel *pch = chan->ppp; struct channel *pch = chan->ppp;
if (pch != 0) if (pch)
return pch->file.index; return pch->file.index;
return -1; return -1;
} }
...@@ -2043,9 +2043,9 @@ int ppp_unit_number(struct ppp_channel *chan) ...@@ -2043,9 +2043,9 @@ int ppp_unit_number(struct ppp_channel *chan)
struct channel *pch = chan->ppp; struct channel *pch = chan->ppp;
int unit = -1; int unit = -1;
if (pch != 0) { if (pch) {
read_lock_bh(&pch->upl); read_lock_bh(&pch->upl);
if (pch->ppp != 0) if (pch->ppp)
unit = pch->ppp->file.index; unit = pch->ppp->file.index;
read_unlock_bh(&pch->upl); read_unlock_bh(&pch->upl);
} }
...@@ -2061,7 +2061,7 @@ ppp_unregister_channel(struct ppp_channel *chan) ...@@ -2061,7 +2061,7 @@ ppp_unregister_channel(struct ppp_channel *chan)
{ {
struct channel *pch = chan->ppp; struct channel *pch = chan->ppp;
if (pch == 0) if (!pch)
return; /* should never happen */ return; /* should never happen */
chan->ppp = NULL; chan->ppp = NULL;
...@@ -2093,7 +2093,7 @@ ppp_output_wakeup(struct ppp_channel *chan) ...@@ -2093,7 +2093,7 @@ ppp_output_wakeup(struct ppp_channel *chan)
{ {
struct channel *pch = chan->ppp; struct channel *pch = chan->ppp;
if (pch == 0) if (!pch)
return; return;
ppp_channel_push(pch); ppp_channel_push(pch);
} }
...@@ -2124,18 +2124,18 @@ ppp_set_compress(struct ppp *ppp, unsigned long arg) ...@@ -2124,18 +2124,18 @@ ppp_set_compress(struct ppp *ppp, unsigned long arg)
cp = find_compressor(ccp_option[0]); cp = find_compressor(ccp_option[0]);
#ifdef CONFIG_KMOD #ifdef CONFIG_KMOD
if (cp == 0) { if (!cp) {
request_module("ppp-compress-%d", ccp_option[0]); request_module("ppp-compress-%d", ccp_option[0]);
cp = find_compressor(ccp_option[0]); cp = find_compressor(ccp_option[0]);
} }
#endif /* CONFIG_KMOD */ #endif /* CONFIG_KMOD */
if (cp == 0) if (!cp)
goto out; goto out;
err = -ENOBUFS; err = -ENOBUFS;
if (data.transmit) { if (data.transmit) {
state = cp->comp_alloc(ccp_option, data.length); state = cp->comp_alloc(ccp_option, data.length);
if (state != 0) { if (state) {
ppp_xmit_lock(ppp); ppp_xmit_lock(ppp);
ppp->xstate &= ~SC_COMP_RUN; ppp->xstate &= ~SC_COMP_RUN;
ocomp = ppp->xcomp; ocomp = ppp->xcomp;
...@@ -2143,7 +2143,7 @@ ppp_set_compress(struct ppp *ppp, unsigned long arg) ...@@ -2143,7 +2143,7 @@ ppp_set_compress(struct ppp *ppp, unsigned long arg)
ppp->xcomp = cp; ppp->xcomp = cp;
ppp->xc_state = state; ppp->xc_state = state;
ppp_xmit_unlock(ppp); ppp_xmit_unlock(ppp);
if (ostate != 0) { if (ostate) {
ocomp->comp_free(ostate); ocomp->comp_free(ostate);
module_put(ocomp->owner); module_put(ocomp->owner);
} }
...@@ -2153,7 +2153,7 @@ ppp_set_compress(struct ppp *ppp, unsigned long arg) ...@@ -2153,7 +2153,7 @@ ppp_set_compress(struct ppp *ppp, unsigned long arg)
} else { } else {
state = cp->decomp_alloc(ccp_option, data.length); state = cp->decomp_alloc(ccp_option, data.length);
if (state != 0) { if (state) {
ppp_recv_lock(ppp); ppp_recv_lock(ppp);
ppp->rstate &= ~SC_DECOMP_RUN; ppp->rstate &= ~SC_DECOMP_RUN;
ocomp = ppp->rcomp; ocomp = ppp->rcomp;
...@@ -2161,7 +2161,7 @@ ppp_set_compress(struct ppp *ppp, unsigned long arg) ...@@ -2161,7 +2161,7 @@ ppp_set_compress(struct ppp *ppp, unsigned long arg)
ppp->rcomp = cp; ppp->rcomp = cp;
ppp->rc_state = state; ppp->rc_state = state;
ppp_recv_unlock(ppp); ppp_recv_unlock(ppp);
if (ostate != 0) { if (ostate) {
ocomp->decomp_free(ostate); ocomp->decomp_free(ostate);
module_put(ocomp->owner); module_put(ocomp->owner);
} }
...@@ -2228,7 +2228,7 @@ ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound) ...@@ -2228,7 +2228,7 @@ ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
break; break;
if (inbound) { if (inbound) {
/* we will start receiving compressed packets */ /* we will start receiving compressed packets */
if (ppp->rc_state == 0) if (!ppp->rc_state)
break; break;
if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len, if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
ppp->file.index, 0, ppp->mru, ppp->debug)) { ppp->file.index, 0, ppp->mru, ppp->debug)) {
...@@ -2237,7 +2237,7 @@ ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound) ...@@ -2237,7 +2237,7 @@ ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
} }
} else { } else {
/* we will soon start sending compressed packets */ /* we will soon start sending compressed packets */
if (ppp->xc_state == 0) if (!ppp->xc_state)
break; break;
if (ppp->xcomp->comp_init(ppp->xc_state, dp, len, if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
ppp->file.index, 0, ppp->debug)) ppp->file.index, 0, ppp->debug))
...@@ -2320,11 +2320,11 @@ ppp_register_compressor(struct compressor *cp) ...@@ -2320,11 +2320,11 @@ ppp_register_compressor(struct compressor *cp)
int ret; int ret;
spin_lock(&compressor_list_lock); spin_lock(&compressor_list_lock);
ret = -EEXIST; ret = -EEXIST;
if (find_comp_entry(cp->compress_proto) != 0) if (find_comp_entry(cp->compress_proto))
goto out; goto out;
ret = -ENOMEM; ret = -ENOMEM;
ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC); ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
if (ce == 0) if (!ce)
goto out; goto out;
ret = 0; ret = 0;
ce->comp = cp; ce->comp = cp;
...@@ -2342,7 +2342,7 @@ ppp_unregister_compressor(struct compressor *cp) ...@@ -2342,7 +2342,7 @@ ppp_unregister_compressor(struct compressor *cp)
spin_lock(&compressor_list_lock); spin_lock(&compressor_list_lock);
ce = find_comp_entry(cp->compress_proto); ce = find_comp_entry(cp->compress_proto);
if (ce != 0 && ce->comp == cp) { if (ce && ce->comp == cp) {
list_del(&ce->list); list_del(&ce->list);
kfree(ce); kfree(ce);
} }
...@@ -2358,7 +2358,7 @@ find_compressor(int type) ...@@ -2358,7 +2358,7 @@ find_compressor(int type)
spin_lock(&compressor_list_lock); spin_lock(&compressor_list_lock);
ce = find_comp_entry(type); ce = find_comp_entry(type);
if (ce != 0) { if (ce) {
cp = ce->comp; cp = ce->comp;
if (!try_module_get(cp->owner)) if (!try_module_get(cp->owner))
cp = NULL; cp = NULL;
...@@ -2383,7 +2383,7 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) ...@@ -2383,7 +2383,7 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
st->p.ppp_opackets = ppp->stats.tx_packets; st->p.ppp_opackets = ppp->stats.tx_packets;
st->p.ppp_oerrors = ppp->stats.tx_errors; st->p.ppp_oerrors = ppp->stats.tx_errors;
st->p.ppp_obytes = ppp->stats.tx_bytes; st->p.ppp_obytes = ppp->stats.tx_bytes;
if (vj == 0) if (!vj)
return; return;
st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
st->vj.vjs_compressed = vj->sls_o_compressed; st->vj.vjs_compressed = vj->sls_o_compressed;
...@@ -2604,11 +2604,11 @@ ppp_connect_channel(struct channel *pch, int unit) ...@@ -2604,11 +2604,11 @@ ppp_connect_channel(struct channel *pch, int unit)
mutex_lock(&all_ppp_mutex); mutex_lock(&all_ppp_mutex);
ppp = ppp_find_unit(unit); ppp = ppp_find_unit(unit);
if (ppp == 0) if (!ppp)
goto out; goto out;
write_lock_bh(&pch->upl); write_lock_bh(&pch->upl);
ret = -EINVAL; ret = -EINVAL;
if (pch->ppp != 0) if (pch->ppp)
goto outl; goto outl;
ppp_lock(ppp); ppp_lock(ppp);
...@@ -2644,7 +2644,7 @@ ppp_disconnect_channel(struct channel *pch) ...@@ -2644,7 +2644,7 @@ ppp_disconnect_channel(struct channel *pch)
ppp = pch->ppp; ppp = pch->ppp;
pch->ppp = NULL; pch->ppp = NULL;
write_unlock_bh(&pch->upl); write_unlock_bh(&pch->upl);
if (ppp != 0) { if (ppp) {
/* remove it from the ppp unit's list */ /* remove it from the ppp unit's list */
ppp_lock(ppp); ppp_lock(ppp);
list_del(&pch->clist); list_del(&pch->clist);
......
...@@ -209,7 +209,7 @@ ppp_sync_open(struct tty_struct *tty) ...@@ -209,7 +209,7 @@ ppp_sync_open(struct tty_struct *tty)
ap = kzalloc(sizeof(*ap), GFP_KERNEL); ap = kzalloc(sizeof(*ap), GFP_KERNEL);
err = -ENOMEM; err = -ENOMEM;
if (ap == 0) if (!ap)
goto out; goto out;
/* initialize the syncppp structure */ /* initialize the syncppp structure */
...@@ -262,7 +262,7 @@ ppp_sync_close(struct tty_struct *tty) ...@@ -262,7 +262,7 @@ ppp_sync_close(struct tty_struct *tty)
ap = tty->disc_data; ap = tty->disc_data;
tty->disc_data = NULL; tty->disc_data = NULL;
write_unlock_irq(&disc_data_lock); write_unlock_irq(&disc_data_lock);
if (ap == 0) if (!ap)
return; return;
/* /*
...@@ -278,7 +278,7 @@ ppp_sync_close(struct tty_struct *tty) ...@@ -278,7 +278,7 @@ ppp_sync_close(struct tty_struct *tty)
ppp_unregister_channel(&ap->chan); ppp_unregister_channel(&ap->chan);
skb_queue_purge(&ap->rqueue); skb_queue_purge(&ap->rqueue);
if (ap->tpkt != 0) if (ap->tpkt)
kfree_skb(ap->tpkt); kfree_skb(ap->tpkt);
kfree(ap); kfree(ap);
} }
...@@ -325,13 +325,13 @@ ppp_synctty_ioctl(struct tty_struct *tty, struct file *file, ...@@ -325,13 +325,13 @@ ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
int __user *p = (int __user *)arg; int __user *p = (int __user *)arg;
int err, val; int err, val;
if (ap == 0) if (!ap)
return -ENXIO; return -ENXIO;
err = -EFAULT; err = -EFAULT;
switch (cmd) { switch (cmd) {
case PPPIOCGCHAN: case PPPIOCGCHAN:
err = -ENXIO; err = -ENXIO;
if (ap == 0) if (!ap)
break; break;
err = -EFAULT; err = -EFAULT;
if (put_user(ppp_channel_index(&ap->chan), p)) if (put_user(ppp_channel_index(&ap->chan), p))
...@@ -341,7 +341,7 @@ ppp_synctty_ioctl(struct tty_struct *tty, struct file *file, ...@@ -341,7 +341,7 @@ ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
case PPPIOCGUNIT: case PPPIOCGUNIT:
err = -ENXIO; err = -ENXIO;
if (ap == 0) if (!ap)
break; break;
err = -EFAULT; err = -EFAULT;
if (put_user(ppp_unit_number(&ap->chan), p)) if (put_user(ppp_unit_number(&ap->chan), p))
...@@ -390,7 +390,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf, ...@@ -390,7 +390,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
struct syncppp *ap = sp_get(tty); struct syncppp *ap = sp_get(tty);
unsigned long flags; unsigned long flags;
if (ap == 0) if (!ap)
return; return;
spin_lock_irqsave(&ap->recv_lock, flags); spin_lock_irqsave(&ap->recv_lock, flags);
ppp_sync_input(ap, buf, cflags, count); ppp_sync_input(ap, buf, cflags, count);
...@@ -409,7 +409,7 @@ ppp_sync_wakeup(struct tty_struct *tty) ...@@ -409,7 +409,7 @@ ppp_sync_wakeup(struct tty_struct *tty)
struct syncppp *ap = sp_get(tty); struct syncppp *ap = sp_get(tty);
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
if (ap == 0) if (!ap)
return; return;
set_bit(XMIT_WAKEUP, &ap->xmit_flags); set_bit(XMIT_WAKEUP, &ap->xmit_flags);
tasklet_schedule(&ap->tsk); tasklet_schedule(&ap->tsk);
...@@ -651,7 +651,7 @@ ppp_sync_push(struct syncppp *ap) ...@@ -651,7 +651,7 @@ ppp_sync_push(struct syncppp *ap)
for (;;) { for (;;) {
if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags)) if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
tty_stuffed = 0; tty_stuffed = 0;
if (!tty_stuffed && ap->tpkt != 0) { if (!tty_stuffed && ap->tpkt) {
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
sent = tty->driver->write(tty, ap->tpkt->data, ap->tpkt->len); sent = tty->driver->write(tty, ap->tpkt->data, ap->tpkt->len);
if (sent < 0) if (sent < 0)
...@@ -669,7 +669,7 @@ ppp_sync_push(struct syncppp *ap) ...@@ -669,7 +669,7 @@ ppp_sync_push(struct syncppp *ap)
/* haven't made any progress */ /* haven't made any progress */
spin_unlock_bh(&ap->xmit_lock); spin_unlock_bh(&ap->xmit_lock);
if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags)
|| (!tty_stuffed && ap->tpkt != 0))) || (!tty_stuffed && ap->tpkt)))
break; break;
if (!spin_trylock_bh(&ap->xmit_lock)) if (!spin_trylock_bh(&ap->xmit_lock))
break; break;
...@@ -677,7 +677,7 @@ ppp_sync_push(struct syncppp *ap) ...@@ -677,7 +677,7 @@ ppp_sync_push(struct syncppp *ap)
return done; return done;
flush: flush:
if (ap->tpkt != 0) { if (ap->tpkt) {
kfree_skb(ap->tpkt); kfree_skb(ap->tpkt);
ap->tpkt = NULL; ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags); clear_bit(XMIT_FULL, &ap->xmit_flags);
...@@ -732,7 +732,8 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf, ...@@ -732,7 +732,8 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
ppp_print_buffer ("receive buffer", buf, count); ppp_print_buffer ("receive buffer", buf, count);
/* stuff the chars in the skb */ /* stuff the chars in the skb */
if ((skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2)) == 0) { skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
if (!skb) {
printk(KERN_ERR "PPPsync: no memory (input pkt)\n"); printk(KERN_ERR "PPPsync: no memory (input pkt)\n");
goto err; goto err;
} }
...@@ -740,7 +741,7 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf, ...@@ -740,7 +741,7 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
if (buf[0] != PPP_ALLSTATIONS) if (buf[0] != PPP_ALLSTATIONS)
skb_reserve(skb, 2 + (buf[0] & 1)); skb_reserve(skb, 2 + (buf[0] & 1));
if (flags != 0 && *flags) { if (flags && *flags) {
/* error flag set, ignore frame */ /* error flag set, ignore frame */
goto err; goto err;
} else if (count > skb_tailroom(skb)) { } else if (count > skb_tailroom(skb)) {
......
...@@ -2333,10 +2333,10 @@ static int gem_close(struct net_device *dev) ...@@ -2333,10 +2333,10 @@ static int gem_close(struct net_device *dev)
{ {
struct gem *gp = dev->priv; struct gem *gp = dev->priv;
napi_disable(&gp->napi);
mutex_lock(&gp->pm_mutex); mutex_lock(&gp->pm_mutex);
napi_disable(&gp->napi);
gp->opened = 0; gp->opened = 0;
if (!gp->asleep) if (!gp->asleep)
gem_do_stop(dev, 0); gem_do_stop(dev, 0);
...@@ -2355,8 +2355,6 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -2355,8 +2355,6 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
mutex_lock(&gp->pm_mutex); mutex_lock(&gp->pm_mutex);
napi_disable(&gp->napi);
printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", printk(KERN_INFO "%s: suspending, WakeOnLan %s\n",
dev->name, dev->name,
(gp->wake_on_lan && gp->opened) ? "enabled" : "disabled"); (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
...@@ -2370,6 +2368,8 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -2370,6 +2368,8 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
/* If the driver is opened, we stop the MAC */ /* If the driver is opened, we stop the MAC */
if (gp->opened) { if (gp->opened) {
napi_disable(&gp->napi);
/* Stop traffic, mark us closed */ /* Stop traffic, mark us closed */
netif_device_detach(dev); netif_device_detach(dev);
...@@ -2460,6 +2460,7 @@ static int gem_resume(struct pci_dev *pdev) ...@@ -2460,6 +2460,7 @@ static int gem_resume(struct pci_dev *pdev)
/* Re-attach net device */ /* Re-attach net device */
netif_device_attach(dev); netif_device_attach(dev);
napi_enable(&gp->napi);
} }
spin_lock_irqsave(&gp->lock, flags); spin_lock_irqsave(&gp->lock, flags);
...@@ -2479,8 +2480,6 @@ static int gem_resume(struct pci_dev *pdev) ...@@ -2479,8 +2480,6 @@ static int gem_resume(struct pci_dev *pdev)
spin_unlock(&gp->tx_lock); spin_unlock(&gp->tx_lock);
spin_unlock_irqrestore(&gp->lock, flags); spin_unlock_irqrestore(&gp->lock, flags);
napi_enable(&gp->napi);
mutex_unlock(&gp->pm_mutex); mutex_unlock(&gp->pm_mutex);
return 0; return 0;
......
...@@ -2168,10 +2168,10 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) ...@@ -2168,10 +2168,10 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{ {
struct bdx_priv *priv = netdev->priv; struct bdx_priv *priv = netdev->priv;
strncat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver)); strlcat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
strncat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version)); strlcat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
strncat(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcat(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strncat(drvinfo->bus_info, pci_name(priv->pdev), strlcat(drvinfo->bus_info, pci_name(priv->pdev),
sizeof(drvinfo->bus_info)); sizeof(drvinfo->bus_info));
drvinfo->n_stats = ((priv->stats_flag) ? drvinfo->n_stats = ((priv->stats_flag) ?
......
...@@ -64,8 +64,8 @@ ...@@ -64,8 +64,8 @@
#define DRV_MODULE_NAME "tg3" #define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": " #define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.85" #define DRV_MODULE_VERSION "3.86"
#define DRV_MODULE_RELDATE "October 18, 2007" #define DRV_MODULE_RELDATE "November 9, 2007"
#define TG3_DEF_MAC_MODE 0 #define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0 #define TG3_DEF_RX_MODE 0
...@@ -1106,6 +1106,24 @@ static int tg3_phy_reset(struct tg3 *tp) ...@@ -1106,6 +1106,24 @@ static int tg3_phy_reset(struct tg3 *tp)
if (err) if (err)
return err; return err;
if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
u32 val;
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
CPMU_LSPD_1000MB_MACCLK_12_5) {
val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
udelay(40);
tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
}
/* Disable GPHY autopowerdown. */
tg3_writephy(tp, MII_TG3_MISC_SHDW,
MII_TG3_MISC_SHDW_WREN |
MII_TG3_MISC_SHDW_APD_SEL |
MII_TG3_MISC_SHDW_APD_WKTM_84MS);
}
out: out:
if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) { if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
...@@ -1297,6 +1315,8 @@ static void tg3_nvram_unlock(struct tg3 *); ...@@ -1297,6 +1315,8 @@ static void tg3_nvram_unlock(struct tg3 *);
static void tg3_power_down_phy(struct tg3 *tp) static void tg3_power_down_phy(struct tg3 *tp)
{ {
u32 val;
if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
...@@ -1311,8 +1331,6 @@ static void tg3_power_down_phy(struct tg3 *tp) ...@@ -1311,8 +1331,6 @@ static void tg3_power_down_phy(struct tg3 *tp)
} }
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
u32 val;
tg3_bmcr_reset(tp); tg3_bmcr_reset(tp);
val = tr32(GRC_MISC_CFG); val = tr32(GRC_MISC_CFG);
tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
...@@ -1332,6 +1350,14 @@ static void tg3_power_down_phy(struct tg3 *tp) ...@@ -1332,6 +1350,14 @@ static void tg3_power_down_phy(struct tg3 *tp)
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
(tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
return; return;
if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
val |= CPMU_LSPD_1000MB_MACCLK_12_5;
tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
}
tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
} }
...@@ -3126,6 +3152,23 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) ...@@ -3126,6 +3152,23 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
err = tg3_setup_copper_phy(tp, force_reset); err = tg3_setup_copper_phy(tp, force_reset);
} }
if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
u32 val, scale;
val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
scale = 65;
else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
scale = 6;
else
scale = 12;
val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
tw32(GRC_MISC_CFG, val);
}
if (tp->link_config.active_speed == SPEED_1000 && if (tp->link_config.active_speed == SPEED_1000 &&
tp->link_config.active_duplex == DUPLEX_HALF) tp->link_config.active_duplex == DUPLEX_HALF)
tw32(MAC_TX_LENGTHS, tw32(MAC_TX_LENGTHS,
...@@ -5054,12 +5097,15 @@ static void tg3_restore_pci_state(struct tg3 *tp) ...@@ -5054,12 +5097,15 @@ static void tg3_restore_pci_state(struct tg3 *tp)
pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
pcie_set_readrq(tp->pdev, 4096);
else {
pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
tp->pci_cacheline_sz); tp->pci_cacheline_sz);
pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
tp->pci_lat_timer); tp->pci_lat_timer);
} }
/* Make sure PCI-X relaxed ordering bit is clear. */ /* Make sure PCI-X relaxed ordering bit is clear. */
if (tp->pcix_cap) { if (tp->pcix_cap) {
u16 pcix_cmd; u16 pcix_cmd;
...@@ -6343,10 +6389,26 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) ...@@ -6343,10 +6389,26 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_write_sig_legacy(tp, RESET_KIND_INIT); tg3_write_sig_legacy(tp, RESET_KIND_INIT);
if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) { if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
val = tr32(TG3_CPMU_CTRL); val = tr32(TG3_CPMU_CTRL);
val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
tw32(TG3_CPMU_CTRL, val); tw32(TG3_CPMU_CTRL, val);
val = tr32(TG3_CPMU_LSPD_10MB_CLK);
val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
val |= CPMU_LSPD_10MB_MACCLK_6_25;
tw32(TG3_CPMU_LSPD_10MB_CLK, val);
val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
val |= CPMU_LNK_AWARE_MACCLK_6_25;
tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
val = tr32(TG3_CPMU_HST_ACC);
val &= ~CPMU_HST_ACC_MACCLK_MASK;
val |= CPMU_HST_ACC_MACCLK_6_25;
tw32(TG3_CPMU_HST_ACC, val);
} }
/* This works around an issue with Athlon chipsets on /* This works around an issue with Athlon chipsets on
...@@ -8267,7 +8329,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) ...@@ -8267,7 +8329,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Full |
SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Full |
SUPPORTED_MII); SUPPORTED_TP);
cmd->port = PORT_TP; cmd->port = PORT_TP;
} else { } else {
cmd->supported |= SUPPORTED_FIBRE; cmd->supported |= SUPPORTED_FIBRE;
...@@ -8664,7 +8726,9 @@ static void tg3_get_ethtool_stats (struct net_device *dev, ...@@ -8664,7 +8726,9 @@ static void tg3_get_ethtool_stats (struct net_device *dev,
} }
#define NVRAM_TEST_SIZE 0x100 #define NVRAM_TEST_SIZE 0x100
#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
#define NVRAM_SELFBOOT_HW_SIZE 0x20 #define NVRAM_SELFBOOT_HW_SIZE 0x20
#define NVRAM_SELFBOOT_DATA_SIZE 0x1c #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
...@@ -8679,9 +8743,22 @@ static int tg3_test_nvram(struct tg3 *tp) ...@@ -8679,9 +8743,22 @@ static int tg3_test_nvram(struct tg3 *tp)
if (magic == TG3_EEPROM_MAGIC) if (magic == TG3_EEPROM_MAGIC)
size = NVRAM_TEST_SIZE; size = NVRAM_TEST_SIZE;
else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
if ((magic & 0xe00000) == 0x200000) if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
size = NVRAM_SELFBOOT_FORMAT1_SIZE; TG3_EEPROM_SB_FORMAT_1) {
else switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
case TG3_EEPROM_SB_REVISION_0:
size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
break;
case TG3_EEPROM_SB_REVISION_2:
size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
break;
case TG3_EEPROM_SB_REVISION_3:
size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
break;
default:
return 0;
}
} else
return 0; return 0;
} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
size = NVRAM_SELFBOOT_HW_SIZE; size = NVRAM_SELFBOOT_HW_SIZE;
...@@ -8708,8 +8785,17 @@ static int tg3_test_nvram(struct tg3 *tp) ...@@ -8708,8 +8785,17 @@ static int tg3_test_nvram(struct tg3 *tp)
TG3_EEPROM_MAGIC_FW) { TG3_EEPROM_MAGIC_FW) {
u8 *buf8 = (u8 *) buf, csum8 = 0; u8 *buf8 = (u8 *) buf, csum8 = 0;
for (i = 0; i < size; i++) if ((cpu_to_be32(buf[0]) & TG3_EEPROM_SB_REVISION_MASK) ==
csum8 += buf8[i]; TG3_EEPROM_SB_REVISION_2) {
/* For rev 2, the csum doesn't include the MBA. */
for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
csum8 += buf8[i];
for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
csum8 += buf8[i];
} else {
for (i = 0; i < size; i++)
csum8 += buf8[i];
}
if (csum8 == 0) { if (csum8 == 0) {
err = 0; err = 0;
...@@ -9293,7 +9379,7 @@ static int tg3_test_loopback(struct tg3 *tp) ...@@ -9293,7 +9379,7 @@ static int tg3_test_loopback(struct tg3 *tp)
if (err) if (err)
return TG3_LOOPBACK_FAILED; return TG3_LOOPBACK_FAILED;
if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
int i; int i;
u32 status; u32 status;
...@@ -9310,17 +9396,17 @@ static int tg3_test_loopback(struct tg3 *tp) ...@@ -9310,17 +9396,17 @@ static int tg3_test_loopback(struct tg3 *tp)
if (status != CPMU_MUTEX_GNT_DRIVER) if (status != CPMU_MUTEX_GNT_DRIVER)
return TG3_LOOPBACK_FAILED; return TG3_LOOPBACK_FAILED;
cpmuctrl = tr32(TG3_CPMU_CTRL);
/* Turn off power management based on link speed. */ /* Turn off power management based on link speed. */
cpmuctrl = tr32(TG3_CPMU_CTRL);
tw32(TG3_CPMU_CTRL, tw32(TG3_CPMU_CTRL,
cpmuctrl & ~CPMU_CTRL_LINK_SPEED_MODE); cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
CPMU_CTRL_LINK_AWARE_MODE));
} }
if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
err |= TG3_MAC_LOOPBACK_FAILED; err |= TG3_MAC_LOOPBACK_FAILED;
if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
tw32(TG3_CPMU_CTRL, cpmuctrl); tw32(TG3_CPMU_CTRL, cpmuctrl);
/* Release the mutex */ /* Release the mutex */
...@@ -10541,6 +10627,10 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) ...@@ -10541,6 +10627,10 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
tp->led_ctrl = LED_CTRL_MODE_PHY_2; tp->led_ctrl = LED_CTRL_MODE_PHY_2;
if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
tp->pci_chip_rev_id == CHIPREV_ID_5784_A1)
tp->led_ctrl = LED_CTRL_MODE_MAC;
if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
if ((tp->pdev->subsystem_vendor == if ((tp->pdev->subsystem_vendor ==
...@@ -10859,7 +10949,7 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp) ...@@ -10859,7 +10949,7 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
} }
if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
(tp->tg3_flags & TG3_FLG3_ENABLE_APE)) (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
return; return;
for (offset = TG3_NVM_DIR_START; for (offset = TG3_NVM_DIR_START;
...@@ -11127,6 +11217,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) ...@@ -11127,6 +11217,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP); pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
if (pcie_cap != 0) { if (pcie_cap != 0) {
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
pcie_set_readrq(tp->pdev, 4096);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
u16 lnkctl; u16 lnkctl;
...@@ -11307,9 +11400,16 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) ...@@ -11307,9 +11400,16 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
} }
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
}
/* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
* GPIO1 driven high will bring 5700's external PHY out of reset. * GPIO1 driven high will bring 5700's external PHY out of reset.
* It is also used as eeprom write protect on LOMs. * It is also used as eeprom write protect on LOMs.
...@@ -12464,6 +12564,28 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, ...@@ -12464,6 +12564,28 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
goto err_out_iounmap; goto err_out_iounmap;
} }
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
printk(KERN_ERR PFX "Cannot find proper PCI device "
"base address for APE, aborting.\n");
err = -ENODEV;
goto err_out_iounmap;
}
tg3reg_base = pci_resource_start(pdev, 2);
tg3reg_len = pci_resource_len(pdev, 2);
tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
if (tp->aperegs == 0UL) {
printk(KERN_ERR PFX "Cannot map APE registers, "
"aborting.\n");
err = -ENOMEM;
goto err_out_iounmap;
}
tg3_ape_lock_init(tp);
}
/* /*
* Reset chip in case UNDI or EFI driver did not shutdown * Reset chip in case UNDI or EFI driver did not shutdown
* DMA self test will enable WDMAC and we'll see (spurious) * DMA self test will enable WDMAC and we'll see (spurious)
...@@ -12478,7 +12600,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, ...@@ -12478,7 +12600,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_test_dma(tp); err = tg3_test_dma(tp);
if (err) { if (err) {
printk(KERN_ERR PFX "DMA engine test failed, aborting.\n"); printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
goto err_out_iounmap; goto err_out_apeunmap;
} }
/* Tigon3 can do ipv4 only... and some chips have buggy /* Tigon3 can do ipv4 only... and some chips have buggy
...@@ -12501,28 +12623,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, ...@@ -12501,28 +12623,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tg3_init_coal(tp); tg3_init_coal(tp);
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
printk(KERN_ERR PFX "Cannot find proper PCI device "
"base address for APE, aborting.\n");
err = -ENODEV;
goto err_out_iounmap;
}
tg3reg_base = pci_resource_start(pdev, 2);
tg3reg_len = pci_resource_len(pdev, 2);
tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
if (tp->aperegs == 0UL) {
printk(KERN_ERR PFX "Cannot map APE registers, "
"aborting.\n");
err = -ENOMEM;
goto err_out_iounmap;
}
tg3_ape_lock_init(tp);
}
pci_set_drvdata(pdev, dev); pci_set_drvdata(pdev, dev);
err = register_netdev(dev); err = register_netdev(dev);
......
...@@ -109,6 +109,9 @@ ...@@ -109,6 +109,9 @@
#define CHIPREV_ID_5714_A2 0x9002 #define CHIPREV_ID_5714_A2 0x9002
#define CHIPREV_ID_5906_A1 0xc001 #define CHIPREV_ID_5906_A1 0xc001
#define CHIPREV_ID_5784_A0 0x5784000 #define CHIPREV_ID_5784_A0 0x5784000
#define CHIPREV_ID_5784_A1 0x5784001
#define CHIPREV_ID_5761_A0 0x5761000
#define CHIPREV_ID_5761_A1 0x5761001
#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) #define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
#define ASIC_REV_5700 0x07 #define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00 #define ASIC_REV_5701 0x00
...@@ -856,7 +859,31 @@ ...@@ -856,7 +859,31 @@
#define CPMU_CTRL_LINK_IDLE_MODE 0x00000200 #define CPMU_CTRL_LINK_IDLE_MODE 0x00000200
#define CPMU_CTRL_LINK_AWARE_MODE 0x00000400 #define CPMU_CTRL_LINK_AWARE_MODE 0x00000400
#define CPMU_CTRL_LINK_SPEED_MODE 0x00004000 #define CPMU_CTRL_LINK_SPEED_MODE 0x00004000
/* 0x3604 --> 0x365c unused */ #define TG3_CPMU_LSPD_10MB_CLK 0x00003604
#define CPMU_LSPD_10MB_MACCLK_MASK 0x001f0000
#define CPMU_LSPD_10MB_MACCLK_6_25 0x00130000
/* 0x3608 --> 0x360c unused */
#define TG3_CPMU_LSPD_1000MB_CLK 0x0000360c
#define CPMU_LSPD_1000MB_MACCLK_62_5 0x00000000
#define CPMU_LSPD_1000MB_MACCLK_12_5 0x00110000
#define CPMU_LSPD_1000MB_MACCLK_MASK 0x001f0000
#define TG3_CPMU_LNK_AWARE_PWRMD 0x00003610
#define CPMU_LNK_AWARE_MACCLK_MASK 0x001f0000
#define CPMU_LNK_AWARE_MACCLK_6_25 0x00130000
/* 0x3614 --> 0x361c unused */
#define TG3_CPMU_HST_ACC 0x0000361c
#define CPMU_HST_ACC_MACCLK_MASK 0x001f0000
#define CPMU_HST_ACC_MACCLK_6_25 0x00130000
/* 0x3620 --> 0x3630 unused */
#define TG3_CPMU_CLCK_STAT 0x00003630
#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000
#define CPMU_CLCK_STAT_MAC_CLCK_12_5 0x00110000
#define CPMU_CLCK_STAT_MAC_CLCK_6_25 0x00130000
/* 0x3634 --> 0x365c unused */
#define TG3_CPMU_MUTEX_REQ 0x0000365c #define TG3_CPMU_MUTEX_REQ 0x0000365c
#define CPMU_MUTEX_REQ_DRIVER 0x00001000 #define CPMU_MUTEX_REQ_DRIVER 0x00001000
...@@ -1537,6 +1564,12 @@ ...@@ -1537,6 +1564,12 @@
#define TG3_EEPROM_MAGIC 0x669955aa #define TG3_EEPROM_MAGIC 0x669955aa
#define TG3_EEPROM_MAGIC_FW 0xa5000000 #define TG3_EEPROM_MAGIC_FW 0xa5000000
#define TG3_EEPROM_MAGIC_FW_MSK 0xff000000 #define TG3_EEPROM_MAGIC_FW_MSK 0xff000000
#define TG3_EEPROM_SB_FORMAT_MASK 0x00e00000
#define TG3_EEPROM_SB_FORMAT_1 0x00200000
#define TG3_EEPROM_SB_REVISION_MASK 0x001f0000
#define TG3_EEPROM_SB_REVISION_0 0x00000000
#define TG3_EEPROM_SB_REVISION_2 0x00020000
#define TG3_EEPROM_SB_REVISION_3 0x00030000
#define TG3_EEPROM_MAGIC_HW 0xabcd #define TG3_EEPROM_MAGIC_HW 0xabcd
#define TG3_EEPROM_MAGIC_HW_MSK 0xffff #define TG3_EEPROM_MAGIC_HW_MSK 0xffff
...@@ -1691,6 +1724,12 @@ ...@@ -1691,6 +1724,12 @@
#define MII_TG3_ISTAT 0x1a /* IRQ status register */ #define MII_TG3_ISTAT 0x1a /* IRQ status register */
#define MII_TG3_IMASK 0x1b /* IRQ mask register */ #define MII_TG3_IMASK 0x1b /* IRQ mask register */
#define MII_TG3_MISC_SHDW 0x1c
#define MII_TG3_MISC_SHDW_WREN 0x8000
#define MII_TG3_MISC_SHDW_APD_SEL 0x2800
#define MII_TG3_MISC_SHDW_APD_WKTM_84MS 0x0001
/* ISTAT/IMASK event bits */ /* ISTAT/IMASK event bits */
#define MII_TG3_INT_LINKCHG 0x0002 #define MII_TG3_INT_LINKCHG 0x0002
#define MII_TG3_INT_SPEEDCHG 0x0004 #define MII_TG3_INT_SPEEDCHG 0x0004
...@@ -1747,6 +1786,8 @@ ...@@ -1747,6 +1786,8 @@
/* APE convenience enumerations. */ /* APE convenience enumerations. */
#define TG3_APE_LOCK_MEM 4 #define TG3_APE_LOCK_MEM 4
#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
/* There are two ways to manage the TX descriptors on the tigon3. /* There are two ways to manage the TX descriptors on the tigon3.
* Either the descriptors are in host DMA'able memory, or they * Either the descriptors are in host DMA'able memory, or they
...@@ -2352,6 +2393,7 @@ struct tg3 { ...@@ -2352,6 +2393,7 @@ struct tg3 {
u32 tg3_flags3; u32 tg3_flags3;
#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001 #define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001
#define TG3_FLG3_ENABLE_APE 0x00000002 #define TG3_FLG3_ENABLE_APE 0x00000002
#define TG3_FLG3_5761_5784_AX_FIXES 0x00000004
struct timer_list timer; struct timer_list timer;
u16 timer_counter; u16 timer_counter;
......
...@@ -24,8 +24,7 @@ config DE2104X ...@@ -24,8 +24,7 @@ config DE2104X
will say Y here.) Do read the Ethernet-HOWTO, available from will say Y here.) Do read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>. <http://www.tldp.org/docs.html#howto>.
To compile this driver as a module, choose M here and read To compile this driver as a module, choose M here. The module will
<file:Documentation/networking/net-modules.txt>. The module will
be called de2104x. be called de2104x.
config TULIP config TULIP
...@@ -42,8 +41,7 @@ config TULIP ...@@ -42,8 +41,7 @@ config TULIP
will say Y here.) Do read the Ethernet-HOWTO, available from will say Y here.) Do read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>. <http://www.tldp.org/docs.html#howto>.
To compile this driver as a module, choose M here and read To compile this driver as a module, choose M here. The module will
<file:Documentation/networking/net-modules.txt>. The module will
be called tulip. be called tulip.
config TULIP_MWI config TULIP_MWI
...@@ -104,8 +102,7 @@ config DE4X5 ...@@ -104,8 +102,7 @@ config DE4X5
information is contained in information is contained in
<file:Documentation/networking/de4x5.txt>. <file:Documentation/networking/de4x5.txt>.
To compile this driver as a module, choose M here and read To compile this driver as a module, choose M here. The module will
<file:Documentation/networking/net-modules.txt>. The module will
be called de4x5. be called de4x5.
config WINBOND_840 config WINBOND_840
...@@ -129,8 +126,7 @@ config DM9102 ...@@ -129,8 +126,7 @@ config DM9102
(Ethernet) card, say Y. Some information is contained in the file (Ethernet) card, say Y. Some information is contained in the file
<file:Documentation/networking/dmfe.txt>. <file:Documentation/networking/dmfe.txt>.
To compile this driver as a module, choose M here and read To compile this driver as a module, choose M here. The module will
<file:Documentation/networking/net-modules.txt>. The module will
be called dmfe. be called dmfe.
config ULI526X config ULI526X
...@@ -141,8 +137,7 @@ config ULI526X ...@@ -141,8 +137,7 @@ config ULI526X
This driver is for ULi M5261/M5263 10/100M Ethernet Controller This driver is for ULi M5261/M5263 10/100M Ethernet Controller
(<http://www.uli.com.tw/>). (<http://www.uli.com.tw/>).
To compile this driver as a module, choose M here and read To compile this driver as a module, choose M here. The module will
<file:Documentation/networking/net-modules.txt>. The module will
be called uli526x. be called uli526x.
config PCMCIA_XIRCOM config PCMCIA_XIRCOM
...@@ -154,8 +149,7 @@ config PCMCIA_XIRCOM ...@@ -154,8 +149,7 @@ config PCMCIA_XIRCOM
as with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and as with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and
ASIX. ASIX.
To compile this driver as a module, choose M here and read To compile this driver as a module, choose M here. The module will
<file:Documentation/networking/net-modules.txt>. The module will
be called xircom_cb. If unsure, say N. be called xircom_cb. If unsure, say N.
config PCMCIA_XIRTULIP config PCMCIA_XIRTULIP
...@@ -168,8 +162,7 @@ config PCMCIA_XIRTULIP ...@@ -168,8 +162,7 @@ config PCMCIA_XIRTULIP
as with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and as with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and
ASIX. ASIX.
To compile this driver as a module, choose M here and read To compile this driver as a module, choose M here. The module will
<file:Documentation/networking/net-modules.txt>. The module will
be called xircom_tulip_cb. If unsure, say N. be called xircom_tulip_cb. If unsure, say N.
endif # NET_TULIP endif # NET_TULIP
...@@ -160,7 +160,7 @@ cifs_reconnect(struct TCP_Server_Info *server) ...@@ -160,7 +160,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
if (server->ssocket) { if (server->ssocket) {
cFYI(1, ("State: 0x%x Flags: 0x%lx", server->ssocket->state, cFYI(1, ("State: 0x%x Flags: 0x%lx", server->ssocket->state,
server->ssocket->flags)); server->ssocket->flags));
server->ssocket->ops->shutdown(server->ssocket, SEND_SHUTDOWN); kernel_sock_shutdown(server->ssocket, SHUT_WR);
cFYI(1, ("Post shutdown state: 0x%x Flags: 0x%lx", cFYI(1, ("Post shutdown state: 0x%x Flags: 0x%lx",
server->ssocket->state, server->ssocket->state,
server->ssocket->flags)); server->ssocket->flags));
......
...@@ -58,6 +58,7 @@ ...@@ -58,6 +58,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/net.h>
#include <net/tcp.h> #include <net/tcp.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -616,8 +617,7 @@ static void o2net_shutdown_sc(struct work_struct *work) ...@@ -616,8 +617,7 @@ static void o2net_shutdown_sc(struct work_struct *work)
del_timer_sync(&sc->sc_idle_timeout); del_timer_sync(&sc->sc_idle_timeout);
o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work); o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
sc_put(sc); sc_put(sc);
sc->sc_sock->ops->shutdown(sc->sc_sock, kernel_sock_shutdown(sc->sc_sock, SHUT_RDWR);
RCV_SHUTDOWN|SEND_SHUTDOWN);
} }
/* not fatal so failed connects before the other guy has our /* not fatal so failed connects before the other guy has our
......
...@@ -178,7 +178,7 @@ static __net_exit void proc_net_ns_exit(struct net *net) ...@@ -178,7 +178,7 @@ static __net_exit void proc_net_ns_exit(struct net *net)
kfree(net->proc_net_root); kfree(net->proc_net_root);
} }
static struct pernet_operations proc_net_ns_ops = { static struct pernet_operations __net_initdata proc_net_ns_ops = {
.init = proc_net_ns_init, .init = proc_net_ns_init,
.exit = proc_net_ns_exit, .exit = proc_net_ns_exit,
}; };
......
...@@ -95,6 +95,12 @@ enum sock_type { ...@@ -95,6 +95,12 @@ enum sock_type {
#endif /* ARCH_HAS_SOCKET_TYPES */ #endif /* ARCH_HAS_SOCKET_TYPES */
enum sock_shutdown_cmd {
SHUT_RD = 0,
SHUT_WR = 1,
SHUT_RDWR = 2,
};
/** /**
* struct socket - general BSD socket * struct socket - general BSD socket
* @state: socket state (%SS_CONNECTED, etc) * @state: socket state (%SS_CONNECTED, etc)
...@@ -223,6 +229,8 @@ extern int kernel_setsockopt(struct socket *sock, int level, int optname, ...@@ -223,6 +229,8 @@ extern int kernel_setsockopt(struct socket *sock, int level, int optname,
extern int kernel_sendpage(struct socket *sock, struct page *page, int offset, extern int kernel_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags); size_t size, int flags);
extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
extern int kernel_sock_shutdown(struct socket *sock,
enum sock_shutdown_cmd how);
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
#define SOCKOPS_WRAPPED(name) name #define SOCKOPS_WRAPPED(name) name
......
...@@ -491,9 +491,11 @@ struct nduseroptmsg ...@@ -491,9 +491,11 @@ struct nduseroptmsg
unsigned char nduseropt_family; unsigned char nduseropt_family;
unsigned char nduseropt_pad1; unsigned char nduseropt_pad1;
unsigned short nduseropt_opts_len; /* Total length of options */ unsigned short nduseropt_opts_len; /* Total length of options */
int nduseropt_ifindex;
__u8 nduseropt_icmp_type; __u8 nduseropt_icmp_type;
__u8 nduseropt_icmp_code; __u8 nduseropt_icmp_code;
unsigned short nduseropt_pad2; unsigned short nduseropt_pad2;
unsigned int nduseropt_pad3;
/* Followed by one or more ND options */ /* Followed by one or more ND options */
}; };
......
...@@ -13,9 +13,6 @@ struct sock; ...@@ -13,9 +13,6 @@ struct sock;
struct sockaddr; struct sockaddr;
struct socket; struct socket;
extern void inet_remove_sock(struct sock *sk1);
extern void inet_put_sock(unsigned short num,
struct sock *sk);
extern int inet_release(struct socket *sock); extern int inet_release(struct socket *sock);
extern int inet_stream_connect(struct socket *sock, extern int inet_stream_connect(struct socket *sock,
struct sockaddr * uaddr, struct sockaddr * uaddr,
...@@ -30,7 +27,6 @@ extern int inet_sendmsg(struct kiocb *iocb, ...@@ -30,7 +27,6 @@ extern int inet_sendmsg(struct kiocb *iocb,
struct msghdr *msg, struct msghdr *msg,
size_t size); size_t size);
extern int inet_shutdown(struct socket *sock, int how); extern int inet_shutdown(struct socket *sock, int how);
extern unsigned int inet_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
extern int inet_listen(struct socket *sock, int backlog); extern int inet_listen(struct socket *sock, int backlog);
extern void inet_sock_destruct(struct sock *sk); extern void inet_sock_destruct(struct sock *sk);
......
...@@ -22,7 +22,7 @@ struct inet_peer ...@@ -22,7 +22,7 @@ struct inet_peer
__be32 v4daddr; /* peer's address */ __be32 v4daddr; /* peer's address */
__u16 avl_height; __u16 avl_height;
__u16 ip_id_count; /* IP ID for the next packet */ __u16 ip_id_count; /* IP ID for the next packet */
struct inet_peer *unused_next, **unused_prevp; struct list_head unused;
__u32 dtime; /* the time of last use of not __u32 dtime; /* the time of last use of not
* referenced entries */ * referenced entries */
atomic_t refcnt; atomic_t refcnt;
......
...@@ -119,9 +119,11 @@ static inline struct net *maybe_get_net(struct net *net) ...@@ -119,9 +119,11 @@ static inline struct net *maybe_get_net(struct net *net)
#ifdef CONFIG_NET_NS #ifdef CONFIG_NET_NS
#define __net_init #define __net_init
#define __net_exit #define __net_exit
#define __net_initdata
#else #else
#define __net_init __init #define __net_init __init
#define __net_exit __exit_refok #define __net_exit __exit_refok
#define __net_initdata __initdata
#endif #endif
struct pernet_operations { struct pernet_operations {
......
...@@ -103,6 +103,7 @@ typedef enum { ...@@ -103,6 +103,7 @@ typedef enum {
SCTP_CMD_ASSOC_CHANGE, /* generate and send assoc_change event */ SCTP_CMD_ASSOC_CHANGE, /* generate and send assoc_change event */
SCTP_CMD_ADAPTATION_IND, /* generate and send adaptation event */ SCTP_CMD_ADAPTATION_IND, /* generate and send adaptation event */
SCTP_CMD_ASSOC_SHKEY, /* generate the association shared keys */ SCTP_CMD_ASSOC_SHKEY, /* generate the association shared keys */
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
SCTP_CMD_LAST SCTP_CMD_LAST
} sctp_verb_t; } sctp_verb_t;
......
...@@ -186,6 +186,8 @@ typedef enum { ...@@ -186,6 +186,8 @@ typedef enum {
SCTP_IERROR_AUTH_BAD_HMAC, SCTP_IERROR_AUTH_BAD_HMAC,
SCTP_IERROR_AUTH_BAD_KEYID, SCTP_IERROR_AUTH_BAD_KEYID,
SCTP_IERROR_PROTO_VIOLATION, SCTP_IERROR_PROTO_VIOLATION,
SCTP_IERROR_ERROR,
SCTP_IERROR_ABORT,
} sctp_ierror_t; } sctp_ierror_t;
...@@ -407,6 +409,7 @@ typedef enum { ...@@ -407,6 +409,7 @@ typedef enum {
SCTP_RTXR_T3_RTX, SCTP_RTXR_T3_RTX,
SCTP_RTXR_FAST_RTX, SCTP_RTXR_FAST_RTX,
SCTP_RTXR_PMTUD, SCTP_RTXR_PMTUD,
SCTP_RTXR_T1_RTX,
} sctp_retransmit_reason_t; } sctp_retransmit_reason_t;
/* Reasons to lower cwnd. */ /* Reasons to lower cwnd. */
......
...@@ -65,7 +65,6 @@ ...@@ -65,7 +65,6 @@
#ifdef TEST_FRAME #ifdef TEST_FRAME
#undef CONFIG_PROC_FS
#undef CONFIG_SCTP_DBG_OBJCNT #undef CONFIG_SCTP_DBG_OBJCNT
#undef CONFIG_SYSCTL #undef CONFIG_SYSCTL
#endif /* TEST_FRAME */ #endif /* TEST_FRAME */
...@@ -267,6 +266,7 @@ enum ...@@ -267,6 +266,7 @@ enum
SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS,
SCTP_MIB_DELAY_SACK_EXPIREDS, SCTP_MIB_DELAY_SACK_EXPIREDS,
SCTP_MIB_AUTOCLOSE_EXPIREDS, SCTP_MIB_AUTOCLOSE_EXPIREDS,
SCTP_MIB_T1_RETRANSMITS,
SCTP_MIB_T3_RETRANSMITS, SCTP_MIB_T3_RETRANSMITS,
SCTP_MIB_PMTUD_RETRANSMITS, SCTP_MIB_PMTUD_RETRANSMITS,
SCTP_MIB_FAST_RETRANSMITS, SCTP_MIB_FAST_RETRANSMITS,
...@@ -664,6 +664,9 @@ static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag) ...@@ -664,6 +664,9 @@ static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag)
return (h & (sctp_assoc_hashsize-1)); return (h & (sctp_assoc_hashsize-1));
} }
#define sctp_for_each_hentry(epb, node, head) \
hlist_for_each_entry(epb, node, head, node)
/* Is a socket of this style? */ /* Is a socket of this style? */
#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style)) #define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
static inline int __sctp_style(const struct sock *sk, sctp_socket_type_t style) static inline int __sctp_style(const struct sock *sk, sctp_socket_type_t style)
......
...@@ -100,20 +100,19 @@ struct crypto_hash; ...@@ -100,20 +100,19 @@ struct crypto_hash;
struct sctp_bind_bucket { struct sctp_bind_bucket {
unsigned short port; unsigned short port;
unsigned short fastreuse; unsigned short fastreuse;
struct sctp_bind_bucket *next; struct hlist_node node;
struct sctp_bind_bucket **pprev;
struct hlist_head owner; struct hlist_head owner;
}; };
struct sctp_bind_hashbucket { struct sctp_bind_hashbucket {
spinlock_t lock; spinlock_t lock;
struct sctp_bind_bucket *chain; struct hlist_head chain;
}; };
/* Used for hashing all associations. */ /* Used for hashing all associations. */
struct sctp_hashbucket { struct sctp_hashbucket {
rwlock_t lock; rwlock_t lock;
struct sctp_ep_common *chain; struct hlist_head chain;
} __attribute__((__aligned__(8))); } __attribute__((__aligned__(8)));
...@@ -212,6 +211,7 @@ extern struct sctp_globals { ...@@ -212,6 +211,7 @@ extern struct sctp_globals {
/* Flag to indicate if addip is enabled. */ /* Flag to indicate if addip is enabled. */
int addip_enable; int addip_enable;
int addip_noauth_enable;
/* Flag to indicate if PR-SCTP is enabled. */ /* Flag to indicate if PR-SCTP is enabled. */
int prsctp_enable; int prsctp_enable;
...@@ -249,6 +249,7 @@ extern struct sctp_globals { ...@@ -249,6 +249,7 @@ extern struct sctp_globals {
#define sctp_local_addr_list (sctp_globals.local_addr_list) #define sctp_local_addr_list (sctp_globals.local_addr_list)
#define sctp_local_addr_lock (sctp_globals.addr_list_lock) #define sctp_local_addr_lock (sctp_globals.addr_list_lock)
#define sctp_addip_enable (sctp_globals.addip_enable) #define sctp_addip_enable (sctp_globals.addip_enable)
#define sctp_addip_noauth (sctp_globals.addip_noauth_enable)
#define sctp_prsctp_enable (sctp_globals.prsctp_enable) #define sctp_prsctp_enable (sctp_globals.prsctp_enable)
#define sctp_auth_enable (sctp_globals.auth_enable) #define sctp_auth_enable (sctp_globals.auth_enable)
...@@ -873,10 +874,11 @@ struct sctp_transport { ...@@ -873,10 +874,11 @@ struct sctp_transport {
* address list derived from the INIT or INIT ACK chunk, a * address list derived from the INIT or INIT ACK chunk, a
* number of data elements needs to be maintained including: * number of data elements needs to be maintained including:
*/ */
__u32 rtt; /* This is the most recent RTT. */
/* RTO : The current retransmission timeout value. */ /* RTO : The current retransmission timeout value. */
unsigned long rto; unsigned long rto;
unsigned long last_rto;
__u32 rtt; /* This is the most recent RTT. */
/* RTTVAR : The current RTT variation. */ /* RTTVAR : The current RTT variation. */
__u32 rttvar; __u32 rttvar;
...@@ -1184,9 +1186,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest, ...@@ -1184,9 +1186,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
int flags); int flags);
int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
__u8 use_as_src, gfp_t gfp); __u8 use_as_src, gfp_t gfp);
int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *, int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *);
void fastcall (*rcu_call)(struct rcu_head *,
void (*func)(struct rcu_head *)));
int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
struct sctp_sock *); struct sctp_sock *);
union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
...@@ -1229,8 +1229,7 @@ typedef enum { ...@@ -1229,8 +1229,7 @@ typedef enum {
struct sctp_ep_common { struct sctp_ep_common {
/* Fields to help us manage our entries in the hash tables. */ /* Fields to help us manage our entries in the hash tables. */
struct sctp_ep_common *next; struct hlist_node node;
struct sctp_ep_common **pprev;
int hashent; int hashent;
/* Runtime type information. What kind of endpoint is this? */ /* Runtime type information. What kind of endpoint is this? */
...@@ -1541,7 +1540,6 @@ struct sctp_association { ...@@ -1541,7 +1540,6 @@ struct sctp_association {
__u8 asconf_capable; /* Does peer support ADDIP? */ __u8 asconf_capable; /* Does peer support ADDIP? */
__u8 prsctp_capable; /* Can peer do PR-SCTP? */ __u8 prsctp_capable; /* Can peer do PR-SCTP? */
__u8 auth_capable; /* Is peer doing SCTP-AUTH? */ __u8 auth_capable; /* Is peer doing SCTP-AUTH? */
__u8 addip_capable; /* Can peer do ADD-IP */
__u32 adaptation_ind; /* Adaptation Code point. */ __u32 adaptation_ind; /* Adaptation Code point. */
......
...@@ -766,6 +766,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb, ...@@ -766,6 +766,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
if (!nf_bridge) if (!nf_bridge)
return NF_ACCEPT; return NF_ACCEPT;
if (!(nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)))
return NF_ACCEPT;
if (!realoutdev) if (!realoutdev)
return NF_DROP; return NF_DROP;
......
...@@ -2688,7 +2688,7 @@ static void __net_exit dev_proc_net_exit(struct net *net) ...@@ -2688,7 +2688,7 @@ static void __net_exit dev_proc_net_exit(struct net *net)
proc_net_remove(net, "dev"); proc_net_remove(net, "dev");
} }
static struct pernet_operations dev_proc_ops = { static struct pernet_operations __net_initdata dev_proc_ops = {
.init = dev_proc_net_init, .init = dev_proc_net_init,
.exit = dev_proc_net_exit, .exit = dev_proc_net_exit,
}; };
...@@ -4353,7 +4353,7 @@ static void __net_exit netdev_exit(struct net *net) ...@@ -4353,7 +4353,7 @@ static void __net_exit netdev_exit(struct net *net)
kfree(net->dev_index_head); kfree(net->dev_index_head);
} }
static struct pernet_operations netdev_net_ops = { static struct pernet_operations __net_initdata netdev_net_ops = {
.init = netdev_init, .init = netdev_init,
.exit = netdev_exit, .exit = netdev_exit,
}; };
...@@ -4384,7 +4384,7 @@ static void __net_exit default_device_exit(struct net *net) ...@@ -4384,7 +4384,7 @@ static void __net_exit default_device_exit(struct net *net)
rtnl_unlock(); rtnl_unlock();
} }
static struct pernet_operations default_device_ops = { static struct pernet_operations __net_initdata default_device_ops = {
.exit = default_device_exit, .exit = default_device_exit,
}; };
......
...@@ -285,7 +285,7 @@ static void __net_exit dev_mc_net_exit(struct net *net) ...@@ -285,7 +285,7 @@ static void __net_exit dev_mc_net_exit(struct net *net)
proc_net_remove(net, "dev_mcast"); proc_net_remove(net, "dev_mcast");
} }
static struct pernet_operations dev_mc_net_ops = { static struct pernet_operations __net_initdata dev_mc_net_ops = {
.init = dev_mc_net_init, .init = dev_mc_net_init,
.exit = dev_mc_net_exit, .exit = dev_mc_net_exit,
}; };
......
...@@ -188,6 +188,7 @@ static int __init net_ns_init(void) ...@@ -188,6 +188,7 @@ static int __init net_ns_init(void)
pure_initcall(net_ns_init); pure_initcall(net_ns_init);
#ifdef CONFIG_NET_NS
static int register_pernet_operations(struct list_head *list, static int register_pernet_operations(struct list_head *list,
struct pernet_operations *ops) struct pernet_operations *ops)
{ {
...@@ -228,6 +229,23 @@ static void unregister_pernet_operations(struct pernet_operations *ops) ...@@ -228,6 +229,23 @@ static void unregister_pernet_operations(struct pernet_operations *ops)
ops->exit(net); ops->exit(net);
} }
#else
static int register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
if (ops->init == NULL)
return 0;
return ops->init(&init_net);
}
static void unregister_pernet_operations(struct pernet_operations *ops)
{
if (ops->exit)
ops->exit(&init_net);
}
#endif
/** /**
* register_pernet_subsys - register a network namespace subsystem * register_pernet_subsys - register a network namespace subsystem
* @ops: pernet operations structure for the subsystem * @ops: pernet operations structure for the subsystem
......
...@@ -2097,7 +2097,3 @@ EXPORT_SYMBOL(sock_wmalloc); ...@@ -2097,7 +2097,3 @@ EXPORT_SYMBOL(sock_wmalloc);
EXPORT_SYMBOL(sock_i_uid); EXPORT_SYMBOL(sock_i_uid);
EXPORT_SYMBOL(sock_i_ino); EXPORT_SYMBOL(sock_i_ino);
EXPORT_SYMBOL(sysctl_optmem_max); EXPORT_SYMBOL(sysctl_optmem_max);
#ifdef CONFIG_SYSCTL
EXPORT_SYMBOL(sysctl_rmem_max);
EXPORT_SYMBOL(sysctl_wmem_max);
#endif
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
* 4. Global variable peer_total is modified under the pool lock. * 4. Global variable peer_total is modified under the pool lock.
* 5. struct inet_peer fields modification: * 5. struct inet_peer fields modification:
* avl_left, avl_right, avl_parent, avl_height: pool lock * avl_left, avl_right, avl_parent, avl_height: pool lock
* unused_next, unused_prevp: unused node list lock * unused: unused node list lock
* refcnt: atomically against modifications on other CPU; * refcnt: atomically against modifications on other CPU;
* usually under some other lock to prevent node disappearing * usually under some other lock to prevent node disappearing
* dtime: unused node list lock * dtime: unused node list lock
...@@ -94,8 +94,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min ...@@ -94,8 +94,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
int inet_peer_gc_mintime __read_mostly = 10 * HZ; int inet_peer_gc_mintime __read_mostly = 10 * HZ;
int inet_peer_gc_maxtime __read_mostly = 120 * HZ; int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
static struct inet_peer *inet_peer_unused_head; static LIST_HEAD(unused_peers);
static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
static DEFINE_SPINLOCK(inet_peer_unused_lock); static DEFINE_SPINLOCK(inet_peer_unused_lock);
static void peer_check_expire(unsigned long dummy); static void peer_check_expire(unsigned long dummy);
...@@ -138,15 +137,7 @@ void __init inet_initpeers(void) ...@@ -138,15 +137,7 @@ void __init inet_initpeers(void)
static void unlink_from_unused(struct inet_peer *p) static void unlink_from_unused(struct inet_peer *p)
{ {
spin_lock_bh(&inet_peer_unused_lock); spin_lock_bh(&inet_peer_unused_lock);
if (p->unused_prevp != NULL) { list_del_init(&p->unused);
/* On unused list. */
*p->unused_prevp = p->unused_next;
if (p->unused_next != NULL)
p->unused_next->unused_prevp = p->unused_prevp;
else
inet_peer_unused_tailp = p->unused_prevp;
p->unused_prevp = NULL; /* mark it as removed */
}
spin_unlock_bh(&inet_peer_unused_lock); spin_unlock_bh(&inet_peer_unused_lock);
} }
...@@ -337,24 +328,24 @@ static void unlink_from_pool(struct inet_peer *p) ...@@ -337,24 +328,24 @@ static void unlink_from_pool(struct inet_peer *p)
/* May be called with local BH enabled. */ /* May be called with local BH enabled. */
static int cleanup_once(unsigned long ttl) static int cleanup_once(unsigned long ttl)
{ {
struct inet_peer *p; struct inet_peer *p = NULL;
/* Remove the first entry from the list of unused nodes. */ /* Remove the first entry from the list of unused nodes. */
spin_lock_bh(&inet_peer_unused_lock); spin_lock_bh(&inet_peer_unused_lock);
p = inet_peer_unused_head; if (!list_empty(&unused_peers)) {
if (p != NULL) { __u32 delta;
__u32 delta = (__u32)jiffies - p->dtime;
p = list_first_entry(&unused_peers, struct inet_peer, unused);
delta = (__u32)jiffies - p->dtime;
if (delta < ttl) { if (delta < ttl) {
/* Do not prune fresh entries. */ /* Do not prune fresh entries. */
spin_unlock_bh(&inet_peer_unused_lock); spin_unlock_bh(&inet_peer_unused_lock);
return -1; return -1;
} }
inet_peer_unused_head = p->unused_next;
if (p->unused_next != NULL) list_del_init(&p->unused);
p->unused_next->unused_prevp = p->unused_prevp;
else
inet_peer_unused_tailp = p->unused_prevp;
p->unused_prevp = NULL; /* mark as not on the list */
/* Grab an extra reference to prevent node disappearing /* Grab an extra reference to prevent node disappearing
* before unlink_from_pool() call. */ * before unlink_from_pool() call. */
atomic_inc(&p->refcnt); atomic_inc(&p->refcnt);
...@@ -412,7 +403,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) ...@@ -412,7 +403,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
/* Link the node. */ /* Link the node. */
link_to_pool(n); link_to_pool(n);
n->unused_prevp = NULL; /* not on the list */ INIT_LIST_HEAD(&n->unused);
peer_total++; peer_total++;
write_unlock_bh(&peer_pool_lock); write_unlock_bh(&peer_pool_lock);
...@@ -467,10 +458,7 @@ void inet_putpeer(struct inet_peer *p) ...@@ -467,10 +458,7 @@ void inet_putpeer(struct inet_peer *p)
{ {
spin_lock_bh(&inet_peer_unused_lock); spin_lock_bh(&inet_peer_unused_lock);
if (atomic_dec_and_test(&p->refcnt)) { if (atomic_dec_and_test(&p->refcnt)) {
p->unused_prevp = inet_peer_unused_tailp; list_add_tail(&p->unused, &unused_peers);
p->unused_next = NULL;
*inet_peer_unused_tailp = p;
inet_peer_unused_tailp = &p->unused_next;
p->dtime = (__u32)jiffies; p->dtime = (__u32)jiffies;
} }
spin_unlock_bh(&inet_peer_unused_lock); spin_unlock_bh(&inet_peer_unused_lock);
......
...@@ -441,7 +441,6 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp) ...@@ -441,7 +441,6 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
} else } else
return NULL; return NULL;
} }
EXPORT_SYMBOL(ip_vs_try_bind_dest);
/* /*
......
...@@ -604,7 +604,6 @@ struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport, ...@@ -604,7 +604,6 @@ struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport,
ip_vs_service_put(svc); ip_vs_service_put(svc);
return dest; return dest;
} }
EXPORT_SYMBOL(ip_vs_find_dest);
/* /*
* Lookup dest by {svc,addr,port} in the destination trash. * Lookup dest by {svc,addr,port} in the destination trash.
......
...@@ -681,7 +681,7 @@ static int clean_nat(struct nf_conn *i, void *data) ...@@ -681,7 +681,7 @@ static int clean_nat(struct nf_conn *i, void *data)
if (!nat) if (!nat)
return 0; return 0;
memset(nat, 0, sizeof(nat)); memset(nat, 0, sizeof(*nat));
i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST); i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
return 0; return 0;
} }
......
...@@ -1037,6 +1037,7 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt) ...@@ -1037,6 +1037,7 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
ndmsg = nlmsg_data(nlh); ndmsg = nlmsg_data(nlh);
ndmsg->nduseropt_family = AF_INET6; ndmsg->nduseropt_family = AF_INET6;
ndmsg->nduseropt_ifindex = ra->dev->ifindex;
ndmsg->nduseropt_icmp_type = icmp6h->icmp6_type; ndmsg->nduseropt_icmp_type = icmp6h->icmp6_type;
ndmsg->nduseropt_icmp_code = icmp6h->icmp6_code; ndmsg->nduseropt_icmp_code = icmp6h->icmp6_code;
ndmsg->nduseropt_opts_len = opt->nd_opt_len << 3; ndmsg->nduseropt_opts_len = opt->nd_opt_len << 3;
......
...@@ -60,46 +60,57 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg) ...@@ -60,46 +60,57 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
} }
EXPORT_SYMBOL(nf_unregister_sockopt); EXPORT_SYMBOL(nf_unregister_sockopt);
/* Call get/setsockopt() */ static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, int pf,
static int nf_sockopt(struct sock *sk, int pf, int val, int val, int get)
char __user *opt, int *len, int get)
{ {
struct nf_sockopt_ops *ops; struct nf_sockopt_ops *ops;
int ret;
if (sk->sk_net != &init_net) if (sk->sk_net != &init_net)
return -ENOPROTOOPT; return ERR_PTR(-ENOPROTOOPT);
if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
return -EINTR; return ERR_PTR(-EINTR);
list_for_each_entry(ops, &nf_sockopts, list) { list_for_each_entry(ops, &nf_sockopts, list) {
if (ops->pf == pf) { if (ops->pf == pf) {
if (!try_module_get(ops->owner)) if (!try_module_get(ops->owner))
goto out_nosup; goto out_nosup;
if (get) { if (get) {
if (val >= ops->get_optmin if (val >= ops->get_optmin &&
&& val < ops->get_optmax) { val < ops->get_optmax)
mutex_unlock(&nf_sockopt_mutex);
ret = ops->get(sk, val, opt, len);
goto out; goto out;
}
} else { } else {
if (val >= ops->set_optmin if (val >= ops->set_optmin &&
&& val < ops->set_optmax) { val < ops->set_optmax)
mutex_unlock(&nf_sockopt_mutex);
ret = ops->set(sk, val, opt, *len);
goto out; goto out;
}
} }
module_put(ops->owner); module_put(ops->owner);
} }
} }
out_nosup: out_nosup:
ops = ERR_PTR(-ENOPROTOOPT);
out:
mutex_unlock(&nf_sockopt_mutex); mutex_unlock(&nf_sockopt_mutex);
return -ENOPROTOOPT; return ops;
}
/* Call get/setsockopt() */
static int nf_sockopt(struct sock *sk, int pf, int val,
char __user *opt, int *len, int get)
{
struct nf_sockopt_ops *ops;
int ret;
ops = nf_sockopt_find(sk, pf, val, get);
if (IS_ERR(ops))
return PTR_ERR(ops);
if (get)
ret = ops->get(sk, val, opt, len);
else
ret = ops->set(sk, val, opt, *len);
out:
module_put(ops->owner); module_put(ops->owner);
return ret; return ret;
} }
...@@ -124,51 +135,22 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val, ...@@ -124,51 +135,22 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
struct nf_sockopt_ops *ops; struct nf_sockopt_ops *ops;
int ret; int ret;
if (sk->sk_net != &init_net) ops = nf_sockopt_find(sk, pf, val, get);
return -ENOPROTOOPT; if (IS_ERR(ops))
return PTR_ERR(ops);
if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) if (get) {
return -EINTR; if (ops->compat_get)
ret = ops->compat_get(sk, val, opt, len);
list_for_each_entry(ops, &nf_sockopts, list) { else
if (ops->pf == pf) { ret = ops->get(sk, val, ops, len);
if (!try_module_get(ops->owner)) } else {
goto out_nosup; if (ops->compat_set)
ret = ops->compat_set(sk, val, ops, *len);
if (get) { else
if (val >= ops->get_optmin ret = ops->set(sk, val, ops, *len);
&& val < ops->get_optmax) {
mutex_unlock(&nf_sockopt_mutex);
if (ops->compat_get)
ret = ops->compat_get(sk,
val, opt, len);
else
ret = ops->get(sk,
val, opt, len);
goto out;
}
} else {
if (val >= ops->set_optmin
&& val < ops->set_optmax) {
mutex_unlock(&nf_sockopt_mutex);
if (ops->compat_set)
ret = ops->compat_set(sk,
val, opt, *len);
else
ret = ops->set(sk,
val, opt, *len);
goto out;
}
}
module_put(ops->owner);
}
} }
out_nosup:
mutex_unlock(&nf_sockopt_mutex);
return -ENOPROTOOPT;
out:
module_put(ops->owner); module_put(ops->owner);
return ret; return ret;
} }
......
...@@ -170,7 +170,7 @@ static bool xt_time_match(const struct sk_buff *skb, ...@@ -170,7 +170,7 @@ static bool xt_time_match(const struct sk_buff *skb,
if (skb->tstamp.tv64 == 0) if (skb->tstamp.tv64 == 0)
__net_timestamp((struct sk_buff *)skb); __net_timestamp((struct sk_buff *)skb);
stamp = skb->tstamp.tv64; stamp = ktime_to_ns(skb->tstamp);
do_div(stamp, NSEC_PER_SEC); do_div(stamp, NSEC_PER_SEC);
if (info->flags & XT_TIME_LOCAL_TZ) if (info->flags & XT_TIME_LOCAL_TZ)
......
...@@ -1888,7 +1888,7 @@ static void __net_exit netlink_net_exit(struct net *net) ...@@ -1888,7 +1888,7 @@ static void __net_exit netlink_net_exit(struct net *net)
#endif #endif
} }
static struct pernet_operations netlink_net_ops = { static struct pernet_operations __net_initdata netlink_net_ops = {
.init = netlink_net_init, .init = netlink_net_init,
.exit = netlink_net_exit, .exit = netlink_net_exit,
}; };
......
...@@ -881,20 +881,14 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc ...@@ -881,20 +881,14 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
if (protocol == 0) if (protocol == 0)
goto out_unlock; goto out_unlock;
if (dev) { if (!dev || (dev->flags & IFF_UP)) {
if (dev->flags&IFF_UP) {
dev_add_pack(&po->prot_hook);
sock_hold(sk);
po->running = 1;
} else {
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
}
} else {
dev_add_pack(&po->prot_hook); dev_add_pack(&po->prot_hook);
sock_hold(sk); sock_hold(sk);
po->running = 1; po->running = 1;
} else {
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
} }
out_unlock: out_unlock:
......
...@@ -114,7 +114,7 @@ static int rxrpc_create_local(struct rxrpc_local *local) ...@@ -114,7 +114,7 @@ static int rxrpc_create_local(struct rxrpc_local *local)
return 0; return 0;
error: error:
local->socket->ops->shutdown(local->socket, 2); kernel_sock_shutdown(local->socket, SHUT_RDWR);
local->socket->sk->sk_user_data = NULL; local->socket->sk->sk_user_data = NULL;
sock_release(local->socket); sock_release(local->socket);
local->socket = NULL; local->socket = NULL;
...@@ -267,7 +267,7 @@ static void rxrpc_destroy_local(struct work_struct *work) ...@@ -267,7 +267,7 @@ static void rxrpc_destroy_local(struct work_struct *work)
/* finish cleaning up the local descriptor */ /* finish cleaning up the local descriptor */
rxrpc_purge_queue(&local->accept_queue); rxrpc_purge_queue(&local->accept_queue);
rxrpc_purge_queue(&local->reject_queue); rxrpc_purge_queue(&local->reject_queue);
local->socket->ops->shutdown(local->socket, 2); kernel_sock_shutdown(local->socket, SHUT_RDWR);
sock_release(local->socket); sock_release(local->socket);
up_read(&rxrpc_local_sem); up_read(&rxrpc_local_sem);
......
...@@ -262,10 +262,14 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a ...@@ -262,10 +262,14 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
*/ */
asoc->peer.sack_needed = 1; asoc->peer.sack_needed = 1;
/* Assume that the peer recongizes ASCONF until reported otherwise /* Assume that the peer will tell us if he recognizes ASCONF
* via an ERROR chunk. * as part of INIT exchange.
* The sctp_addip_noauth option is there for backward compatibilty
* and will revert old behavior.
*/ */
asoc->peer.asconf_capable = 1; asoc->peer.asconf_capable = 0;
if (sctp_addip_noauth)
asoc->peer.asconf_capable = 1;
/* Create an input queue. */ /* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue); sctp_inq_init(&asoc->base.inqueue);
......
...@@ -180,9 +180,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, ...@@ -180,9 +180,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
/* Delete an address from the bind address list in the SCTP_bind_addr /* Delete an address from the bind address list in the SCTP_bind_addr
* structure. * structure.
*/ */
int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr, int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr)
void fastcall (*rcu_call)(struct rcu_head *head,
void (*func)(struct rcu_head *head)))
{ {
struct sctp_sockaddr_entry *addr, *temp; struct sctp_sockaddr_entry *addr, *temp;
...@@ -198,15 +196,10 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr, ...@@ -198,15 +196,10 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr,
} }
} }
/* Call the rcu callback provided in the args. This function is
* called by both BH packet processing and user side socket option
* processing, but it works on different lists in those 2 contexts.
* Each context provides it's own callback, whether call_rcu_bh()
* or call_rcu(), to make sure that we wait for an appropriate time.
*/
if (addr && !addr->valid) { if (addr && !addr->valid) {
rcu_call(&addr->rcu, sctp_local_addr_free); call_rcu(&addr->rcu, sctp_local_addr_free);
SCTP_DBG_OBJCNT_DEC(addr); SCTP_DBG_OBJCNT_DEC(addr);
return 0;
} }
return -EINVAL; return -EINVAL;
......
...@@ -328,24 +328,35 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc( ...@@ -328,24 +328,35 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
const union sctp_addr *paddr, const union sctp_addr *paddr,
struct sctp_transport **transport) struct sctp_transport **transport)
{ {
struct sctp_association *asoc = NULL;
struct sctp_transport *t = NULL;
struct sctp_hashbucket *head;
struct sctp_ep_common *epb;
struct hlist_node *node;
int hash;
int rport; int rport;
struct sctp_association *asoc;
struct list_head *pos;
*transport = NULL;
rport = ntohs(paddr->v4.sin_port); rport = ntohs(paddr->v4.sin_port);
list_for_each(pos, &ep->asocs) { hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport);
asoc = list_entry(pos, struct sctp_association, asocs); head = &sctp_assoc_hashtable[hash];
if (rport == asoc->peer.port) { read_lock(&head->lock);
*transport = sctp_assoc_lookup_paddr(asoc, paddr); sctp_for_each_hentry(epb, node, &head->chain) {
asoc = sctp_assoc(epb);
if (*transport) if (asoc->ep != ep || rport != asoc->peer.port)
return asoc; goto next;
t = sctp_assoc_lookup_paddr(asoc, paddr);
if (t) {
*transport = t;
break;
} }
next:
asoc = NULL;
} }
read_unlock(&head->lock);
*transport = NULL; return asoc;
return NULL;
} }
/* Lookup association on an endpoint based on a peer address. BH-safe. */ /* Lookup association on an endpoint based on a peer address. BH-safe. */
......
...@@ -656,7 +656,6 @@ static int sctp_rcv_ootb(struct sk_buff *skb) ...@@ -656,7 +656,6 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
/* Insert endpoint into the hash table. */ /* Insert endpoint into the hash table. */
static void __sctp_hash_endpoint(struct sctp_endpoint *ep) static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
{ {
struct sctp_ep_common **epp;
struct sctp_ep_common *epb; struct sctp_ep_common *epb;
struct sctp_hashbucket *head; struct sctp_hashbucket *head;
...@@ -666,12 +665,7 @@ static void __sctp_hash_endpoint(struct sctp_endpoint *ep) ...@@ -666,12 +665,7 @@ static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
head = &sctp_ep_hashtable[epb->hashent]; head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock); sctp_write_lock(&head->lock);
epp = &head->chain; hlist_add_head(&epb->node, &head->chain);
epb->next = *epp;
if (epb->next)
(*epp)->pprev = &epb->next;
*epp = epb;
epb->pprev = epp;
sctp_write_unlock(&head->lock); sctp_write_unlock(&head->lock);
} }
...@@ -691,19 +685,15 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep) ...@@ -691,19 +685,15 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
epb = &ep->base; epb = &ep->base;
if (hlist_unhashed(&epb->node))
return;
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
head = &sctp_ep_hashtable[epb->hashent]; head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock); sctp_write_lock(&head->lock);
__hlist_del(&epb->node);
if (epb->pprev) {
if (epb->next)
epb->next->pprev = epb->pprev;
*epb->pprev = epb->next;
epb->pprev = NULL;
}
sctp_write_unlock(&head->lock); sctp_write_unlock(&head->lock);
} }
...@@ -721,12 +711,13 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l ...@@ -721,12 +711,13 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
struct sctp_hashbucket *head; struct sctp_hashbucket *head;
struct sctp_ep_common *epb; struct sctp_ep_common *epb;
struct sctp_endpoint *ep; struct sctp_endpoint *ep;
struct hlist_node *node;
int hash; int hash;
hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port)); hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port));
head = &sctp_ep_hashtable[hash]; head = &sctp_ep_hashtable[hash];
read_lock(&head->lock); read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) { sctp_for_each_hentry(epb, node, &head->chain) {
ep = sctp_ep(epb); ep = sctp_ep(epb);
if (sctp_endpoint_is_match(ep, laddr)) if (sctp_endpoint_is_match(ep, laddr))
goto hit; goto hit;
...@@ -744,7 +735,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l ...@@ -744,7 +735,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
/* Insert association into the hash table. */ /* Insert association into the hash table. */
static void __sctp_hash_established(struct sctp_association *asoc) static void __sctp_hash_established(struct sctp_association *asoc)
{ {
struct sctp_ep_common **epp;
struct sctp_ep_common *epb; struct sctp_ep_common *epb;
struct sctp_hashbucket *head; struct sctp_hashbucket *head;
...@@ -756,12 +746,7 @@ static void __sctp_hash_established(struct sctp_association *asoc) ...@@ -756,12 +746,7 @@ static void __sctp_hash_established(struct sctp_association *asoc)
head = &sctp_assoc_hashtable[epb->hashent]; head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock); sctp_write_lock(&head->lock);
epp = &head->chain; hlist_add_head(&epb->node, &head->chain);
epb->next = *epp;
if (epb->next)
(*epp)->pprev = &epb->next;
*epp = epb;
epb->pprev = epp;
sctp_write_unlock(&head->lock); sctp_write_unlock(&head->lock);
} }
...@@ -790,14 +775,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc) ...@@ -790,14 +775,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
head = &sctp_assoc_hashtable[epb->hashent]; head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock); sctp_write_lock(&head->lock);
__hlist_del(&epb->node);
if (epb->pprev) {
if (epb->next)
epb->next->pprev = epb->pprev;
*epb->pprev = epb->next;
epb->pprev = NULL;
}
sctp_write_unlock(&head->lock); sctp_write_unlock(&head->lock);
} }
...@@ -822,6 +800,7 @@ static struct sctp_association *__sctp_lookup_association( ...@@ -822,6 +800,7 @@ static struct sctp_association *__sctp_lookup_association(
struct sctp_ep_common *epb; struct sctp_ep_common *epb;
struct sctp_association *asoc; struct sctp_association *asoc;
struct sctp_transport *transport; struct sctp_transport *transport;
struct hlist_node *node;
int hash; int hash;
/* Optimize here for direct hit, only listening connections can /* Optimize here for direct hit, only listening connections can
...@@ -830,7 +809,7 @@ static struct sctp_association *__sctp_lookup_association( ...@@ -830,7 +809,7 @@ static struct sctp_association *__sctp_lookup_association(
hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port)); hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port));
head = &sctp_assoc_hashtable[hash]; head = &sctp_assoc_hashtable[hash];
read_lock(&head->lock); read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) { sctp_for_each_hentry(epb, node, &head->chain) {
asoc = sctp_assoc(epb); asoc = sctp_assoc(epb);
transport = sctp_assoc_is_match(asoc, local, peer); transport = sctp_assoc_is_match(asoc, local, peer);
if (transport) if (transport)
......
...@@ -90,6 +90,10 @@ void sctp_inq_free(struct sctp_inq *queue) ...@@ -90,6 +90,10 @@ void sctp_inq_free(struct sctp_inq *queue)
void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
{ {
/* Directly call the packet handling routine. */ /* Directly call the packet handling routine. */
if (chunk->rcvr->dead) {
sctp_chunk_free(chunk);
return;
}
/* We are now calling this either from the soft interrupt /* We are now calling this either from the soft interrupt
* or from the backlog processing. * or from the backlog processing.
......
...@@ -382,7 +382,7 @@ static void sctp_insert_list(struct list_head *head, struct list_head *new) ...@@ -382,7 +382,7 @@ static void sctp_insert_list(struct list_head *head, struct list_head *new)
/* Mark all the eligible packets on a transport for retransmission. */ /* Mark all the eligible packets on a transport for retransmission. */
void sctp_retransmit_mark(struct sctp_outq *q, void sctp_retransmit_mark(struct sctp_outq *q,
struct sctp_transport *transport, struct sctp_transport *transport,
__u8 fast_retransmit) __u8 reason)
{ {
struct list_head *lchunk, *ltemp; struct list_head *lchunk, *ltemp;
struct sctp_chunk *chunk; struct sctp_chunk *chunk;
...@@ -412,20 +412,20 @@ void sctp_retransmit_mark(struct sctp_outq *q, ...@@ -412,20 +412,20 @@ void sctp_retransmit_mark(struct sctp_outq *q,
continue; continue;
} }
/* If we are doing retransmission due to a fast retransmit, /* If we are doing retransmission due to a timeout or pmtu
* only the chunk's that are marked for fast retransmit * discovery, only the chunks that are not yet acked should
* should be added to the retransmit queue. If we are doing * be added to the retransmit queue.
* retransmission due to a timeout or pmtu discovery, only the
* chunks that are not yet acked should be added to the
* retransmit queue.
*/ */
if ((fast_retransmit && (chunk->fast_retransmit > 0)) || if ((reason == SCTP_RTXR_FAST_RTX &&
(!fast_retransmit && !chunk->tsn_gap_acked)) { (chunk->fast_retransmit > 0)) ||
(reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
/* If this chunk was sent less then 1 rto ago, do not /* If this chunk was sent less then 1 rto ago, do not
* retransmit this chunk, but give the peer time * retransmit this chunk, but give the peer time
* to acknowlege it. * to acknowlege it. Do this only when
* retransmitting due to T3 timeout.
*/ */
if ((jiffies - chunk->sent_at) < transport->rto) if (reason == SCTP_RTXR_T3_RTX &&
(jiffies - chunk->sent_at) < transport->last_rto)
continue; continue;
/* RFC 2960 6.2.1 Processing a Received SACK /* RFC 2960 6.2.1 Processing a Received SACK
...@@ -467,10 +467,10 @@ void sctp_retransmit_mark(struct sctp_outq *q, ...@@ -467,10 +467,10 @@ void sctp_retransmit_mark(struct sctp_outq *q,
} }
} }
SCTP_DEBUG_PRINTK("%s: transport: %p, fast_retransmit: %d, " SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, "
"cwnd: %d, ssthresh: %d, flight_size: %d, " "cwnd: %d, ssthresh: %d, flight_size: %d, "
"pba: %d\n", __FUNCTION__, "pba: %d\n", __FUNCTION__,
transport, fast_retransmit, transport, reason,
transport->cwnd, transport->ssthresh, transport->cwnd, transport->ssthresh,
transport->flight_size, transport->flight_size,
transport->partial_bytes_acked); transport->partial_bytes_acked);
...@@ -484,7 +484,6 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, ...@@ -484,7 +484,6 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
sctp_retransmit_reason_t reason) sctp_retransmit_reason_t reason)
{ {
int error = 0; int error = 0;
__u8 fast_retransmit = 0;
switch(reason) { switch(reason) {
case SCTP_RTXR_T3_RTX: case SCTP_RTXR_T3_RTX:
...@@ -499,16 +498,18 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, ...@@ -499,16 +498,18 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
case SCTP_RTXR_FAST_RTX: case SCTP_RTXR_FAST_RTX:
SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
fast_retransmit = 1;
break; break;
case SCTP_RTXR_PMTUD: case SCTP_RTXR_PMTUD:
SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS); SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
break; break;
case SCTP_RTXR_T1_RTX:
SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);
break;
default: default:
BUG(); BUG();
} }
sctp_retransmit_mark(q, transport, fast_retransmit); sctp_retransmit_mark(q, transport, reason);
/* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
* the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
...@@ -641,7 +642,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, ...@@ -641,7 +642,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
/* If we are here due to a retransmit timeout or a fast /* If we are here due to a retransmit timeout or a fast
* retransmit and if there are any chunks left in the retransmit * retransmit and if there are any chunks left in the retransmit
* queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit. * queue that could not fit in the PMTU sized packet, they need
* to be marked as ineligible for a subsequent fast retransmit.
*/ */
if (rtx_timeout && !lchunk) { if (rtx_timeout && !lchunk) {
list_for_each(lchunk1, lqueue) { list_for_each(lchunk1, lqueue) {
...@@ -660,10 +662,9 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, ...@@ -660,10 +662,9 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
int sctp_outq_uncork(struct sctp_outq *q) int sctp_outq_uncork(struct sctp_outq *q)
{ {
int error = 0; int error = 0;
if (q->cork) { if (q->cork)
q->cork = 0; q->cork = 0;
error = sctp_outq_flush(q, 0); error = sctp_outq_flush(q, 0);
}
return error; return error;
} }
......
...@@ -225,6 +225,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) ...@@ -225,6 +225,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
struct sctp_ep_common *epb; struct sctp_ep_common *epb;
struct sctp_endpoint *ep; struct sctp_endpoint *ep;
struct sock *sk; struct sock *sk;
struct hlist_node *node;
int hash = *(loff_t *)v; int hash = *(loff_t *)v;
if (hash >= sctp_ep_hashsize) if (hash >= sctp_ep_hashsize)
...@@ -233,7 +234,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v) ...@@ -233,7 +234,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
head = &sctp_ep_hashtable[hash]; head = &sctp_ep_hashtable[hash];
sctp_local_bh_disable(); sctp_local_bh_disable();
read_lock(&head->lock); read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) { sctp_for_each_hentry(epb, node, &head->chain) {
ep = sctp_ep(epb); ep = sctp_ep(epb);
sk = epb->sk; sk = epb->sk;
seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
...@@ -328,6 +329,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) ...@@ -328,6 +329,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
struct sctp_ep_common *epb; struct sctp_ep_common *epb;
struct sctp_association *assoc; struct sctp_association *assoc;
struct sock *sk; struct sock *sk;
struct hlist_node *node;
int hash = *(loff_t *)v; int hash = *(loff_t *)v;
if (hash >= sctp_assoc_hashsize) if (hash >= sctp_assoc_hashsize)
...@@ -336,7 +338,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) ...@@ -336,7 +338,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
head = &sctp_assoc_hashtable[hash]; head = &sctp_assoc_hashtable[hash];
sctp_local_bh_disable(); sctp_local_bh_disable();
read_lock(&head->lock); read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) { sctp_for_each_hentry(epb, node, &head->chain) {
assoc = sctp_assoc(epb); assoc = sctp_assoc(epb);
sk = epb->sk; sk = epb->sk;
seq_printf(seq, seq_printf(seq,
......
...@@ -1137,7 +1137,7 @@ SCTP_STATIC __init int sctp_init(void) ...@@ -1137,7 +1137,7 @@ SCTP_STATIC __init int sctp_init(void)
} }
for (i = 0; i < sctp_assoc_hashsize; i++) { for (i = 0; i < sctp_assoc_hashsize; i++) {
rwlock_init(&sctp_assoc_hashtable[i].lock); rwlock_init(&sctp_assoc_hashtable[i].lock);
sctp_assoc_hashtable[i].chain = NULL; INIT_HLIST_HEAD(&sctp_assoc_hashtable[i].chain);
} }
/* Allocate and initialize the endpoint hash table. */ /* Allocate and initialize the endpoint hash table. */
...@@ -1151,7 +1151,7 @@ SCTP_STATIC __init int sctp_init(void) ...@@ -1151,7 +1151,7 @@ SCTP_STATIC __init int sctp_init(void)
} }
for (i = 0; i < sctp_ep_hashsize; i++) { for (i = 0; i < sctp_ep_hashsize; i++) {
rwlock_init(&sctp_ep_hashtable[i].lock); rwlock_init(&sctp_ep_hashtable[i].lock);
sctp_ep_hashtable[i].chain = NULL; INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
} }
/* Allocate and initialize the SCTP port hash table. */ /* Allocate and initialize the SCTP port hash table. */
...@@ -1170,7 +1170,7 @@ SCTP_STATIC __init int sctp_init(void) ...@@ -1170,7 +1170,7 @@ SCTP_STATIC __init int sctp_init(void)
} }
for (i = 0; i < sctp_port_hashsize; i++) { for (i = 0; i < sctp_port_hashsize; i++) {
spin_lock_init(&sctp_port_hashtable[i].lock); spin_lock_init(&sctp_port_hashtable[i].lock);
sctp_port_hashtable[i].chain = NULL; INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
} }
printk(KERN_INFO "SCTP: Hash tables configured " printk(KERN_INFO "SCTP: Hash tables configured "
...@@ -1179,6 +1179,7 @@ SCTP_STATIC __init int sctp_init(void) ...@@ -1179,6 +1179,7 @@ SCTP_STATIC __init int sctp_init(void)
/* Disable ADDIP by default. */ /* Disable ADDIP by default. */
sctp_addip_enable = 0; sctp_addip_enable = 0;
sctp_addip_noauth = 0;
/* Enable PR-SCTP by default. */ /* Enable PR-SCTP by default. */
sctp_prsctp_enable = 1; sctp_prsctp_enable = 1;
......
...@@ -1788,9 +1788,14 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc, ...@@ -1788,9 +1788,14 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
sizeof(sctp_paramhdr_t); sizeof(sctp_paramhdr_t);
/* This is a fatal error. Any accumulated non-fatal errors are
* not reported.
*/
if (*errp)
sctp_chunk_free(*errp);
/* Create an error chunk and fill it in with our payload. */ /* Create an error chunk and fill it in with our payload. */
if (!*errp) *errp = sctp_make_op_error_space(asoc, chunk, payload_len);
*errp = sctp_make_op_error_space(asoc, chunk, payload_len);
if (*errp) { if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
...@@ -1813,9 +1818,15 @@ static int sctp_process_hn_param(const struct sctp_association *asoc, ...@@ -1813,9 +1818,15 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
{ {
__u16 len = ntohs(param.p->length); __u16 len = ntohs(param.p->length);
/* Make an ERROR chunk. */ /* Processing of the HOST_NAME parameter will generate an
if (!*errp) * ABORT. If we've accumulated any non-fatal errors, they
*errp = sctp_make_op_error_space(asoc, chunk, len); * would be unrecognized parameters and we should not include
* them in the ABORT.
*/
if (*errp)
sctp_chunk_free(*errp);
*errp = sctp_make_op_error_space(asoc, chunk, len);
if (*errp) { if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len);
...@@ -1847,7 +1858,7 @@ static void sctp_process_ext_param(struct sctp_association *asoc, ...@@ -1847,7 +1858,7 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
break; break;
case SCTP_CID_ASCONF: case SCTP_CID_ASCONF:
case SCTP_CID_ASCONF_ACK: case SCTP_CID_ASCONF_ACK:
asoc->peer.addip_capable = 1; asoc->peer.asconf_capable = 1;
break; break;
default: default:
break; break;
...@@ -1862,56 +1873,40 @@ static void sctp_process_ext_param(struct sctp_association *asoc, ...@@ -1862,56 +1873,40 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
* taken if the processing endpoint does not recognize the * taken if the processing endpoint does not recognize the
* Parameter Type. * Parameter Type.
* *
* 00 - Stop processing this SCTP chunk and discard it, * 00 - Stop processing this parameter; do not process any further
* do not process any further chunks within it. * parameters within this chunk
* *
* 01 - Stop processing this SCTP chunk and discard it, * 01 - Stop processing this parameter, do not process any further
* do not process any further chunks within it, and report * parameters within this chunk, and report the unrecognized
* the unrecognized parameter in an 'Unrecognized * parameter in an 'Unrecognized Parameter' ERROR chunk.
* Parameter Type' (in either an ERROR or in the INIT ACK).
* *
* 10 - Skip this parameter and continue processing. * 10 - Skip this parameter and continue processing.
* *
* 11 - Skip this parameter and continue processing but * 11 - Skip this parameter and continue processing but
* report the unrecognized parameter in an * report the unrecognized parameter in an
* 'Unrecognized Parameter Type' (in either an ERROR or in * 'Unrecognized Parameter' ERROR chunk.
* the INIT ACK).
* *
* Return value: * Return value:
* 0 - discard the chunk * SCTP_IERROR_NO_ERROR - continue with the chunk
* 1 - continue with the chunk * SCTP_IERROR_ERROR - stop and report an error.
* SCTP_IERROR_NOMEME - out of memory.
*/ */
static int sctp_process_unk_param(const struct sctp_association *asoc, static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
union sctp_params param, union sctp_params param,
struct sctp_chunk *chunk, struct sctp_chunk *chunk,
struct sctp_chunk **errp) struct sctp_chunk **errp)
{ {
int retval = 1; int retval = SCTP_IERROR_NO_ERROR;
switch (param.p->type & SCTP_PARAM_ACTION_MASK) { switch (param.p->type & SCTP_PARAM_ACTION_MASK) {
case SCTP_PARAM_ACTION_DISCARD: case SCTP_PARAM_ACTION_DISCARD:
retval = 0; retval = SCTP_IERROR_ERROR;
break;
case SCTP_PARAM_ACTION_DISCARD_ERR:
retval = 0;
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
*/
if (NULL == *errp)
*errp = sctp_make_op_error_space(asoc, chunk,
ntohs(chunk->chunk_hdr->length));
if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
WORD_ROUND(ntohs(param.p->length)));
sctp_addto_chunk(*errp,
WORD_ROUND(ntohs(param.p->length)),
param.v);
}
break; break;
case SCTP_PARAM_ACTION_SKIP: case SCTP_PARAM_ACTION_SKIP:
break; break;
case SCTP_PARAM_ACTION_DISCARD_ERR:
retval = SCTP_IERROR_ERROR;
/* Fall through */
case SCTP_PARAM_ACTION_SKIP_ERR: case SCTP_PARAM_ACTION_SKIP_ERR:
/* Make an ERROR chunk, preparing enough room for /* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters. * returning multiple unknown parameters.
...@@ -1932,9 +1927,8 @@ static int sctp_process_unk_param(const struct sctp_association *asoc, ...@@ -1932,9 +1927,8 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
* to the peer and the association won't be * to the peer and the association won't be
* established. * established.
*/ */
retval = 0; retval = SCTP_IERROR_NOMEM;
} }
break; break;
default: default:
break; break;
...@@ -1943,18 +1937,20 @@ static int sctp_process_unk_param(const struct sctp_association *asoc, ...@@ -1943,18 +1937,20 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
return retval; return retval;
} }
/* Find unrecognized parameters in the chunk. /* Verify variable length parameters
* Return values: * Return values:
* 0 - discard the chunk * SCTP_IERROR_ABORT - trigger an ABORT
* 1 - continue with the chunk * SCTP_IERROR_NOMEM - out of memory (abort)
* SCTP_IERROR_ERROR - stop processing, trigger an ERROR
* SCTP_IERROR_NO_ERROR - continue with the chunk
*/ */
static int sctp_verify_param(const struct sctp_association *asoc, static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
union sctp_params param, union sctp_params param,
sctp_cid_t cid, sctp_cid_t cid,
struct sctp_chunk *chunk, struct sctp_chunk *chunk,
struct sctp_chunk **err_chunk) struct sctp_chunk **err_chunk)
{ {
int retval = 1; int retval = SCTP_IERROR_NO_ERROR;
/* FIXME - This routine is not looking at each parameter per the /* FIXME - This routine is not looking at each parameter per the
* chunk type, i.e., unrecognized parameters should be further * chunk type, i.e., unrecognized parameters should be further
...@@ -1976,7 +1972,9 @@ static int sctp_verify_param(const struct sctp_association *asoc, ...@@ -1976,7 +1972,9 @@ static int sctp_verify_param(const struct sctp_association *asoc,
case SCTP_PARAM_HOST_NAME_ADDRESS: case SCTP_PARAM_HOST_NAME_ADDRESS:
/* Tell the peer, we won't support this param. */ /* Tell the peer, we won't support this param. */
return sctp_process_hn_param(asoc, param, chunk, err_chunk); sctp_process_hn_param(asoc, param, chunk, err_chunk);
retval = SCTP_IERROR_ABORT;
break;
case SCTP_PARAM_FWD_TSN_SUPPORT: case SCTP_PARAM_FWD_TSN_SUPPORT:
if (sctp_prsctp_enable) if (sctp_prsctp_enable)
...@@ -1993,9 +1991,11 @@ static int sctp_verify_param(const struct sctp_association *asoc, ...@@ -1993,9 +1991,11 @@ static int sctp_verify_param(const struct sctp_association *asoc,
* cause 'Protocol Violation'. * cause 'Protocol Violation'.
*/ */
if (SCTP_AUTH_RANDOM_LENGTH != if (SCTP_AUTH_RANDOM_LENGTH !=
ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) {
return sctp_process_inv_paramlength(asoc, param.p, sctp_process_inv_paramlength(asoc, param.p,
chunk, err_chunk); chunk, err_chunk);
retval = SCTP_IERROR_ABORT;
}
break; break;
case SCTP_PARAM_CHUNKS: case SCTP_PARAM_CHUNKS:
...@@ -2007,9 +2007,11 @@ static int sctp_verify_param(const struct sctp_association *asoc, ...@@ -2007,9 +2007,11 @@ static int sctp_verify_param(const struct sctp_association *asoc,
* INIT-ACK chunk if the sender wants to receive authenticated * INIT-ACK chunk if the sender wants to receive authenticated
* chunks. Its maximum length is 260 bytes. * chunks. Its maximum length is 260 bytes.
*/ */
if (260 < ntohs(param.p->length)) if (260 < ntohs(param.p->length)) {
return sctp_process_inv_paramlength(asoc, param.p, sctp_process_inv_paramlength(asoc, param.p,
chunk, err_chunk); chunk, err_chunk);
retval = SCTP_IERROR_ABORT;
}
break; break;
case SCTP_PARAM_HMAC_ALGO: case SCTP_PARAM_HMAC_ALGO:
...@@ -2020,8 +2022,7 @@ static int sctp_verify_param(const struct sctp_association *asoc, ...@@ -2020,8 +2022,7 @@ static int sctp_verify_param(const struct sctp_association *asoc,
default: default:
SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n", SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n",
ntohs(param.p->type), cid); ntohs(param.p->type), cid);
return sctp_process_unk_param(asoc, param, chunk, err_chunk); retval = sctp_process_unk_param(asoc, param, chunk, err_chunk);
break; break;
} }
return retval; return retval;
...@@ -2036,6 +2037,7 @@ int sctp_verify_init(const struct sctp_association *asoc, ...@@ -2036,6 +2037,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
{ {
union sctp_params param; union sctp_params param;
int has_cookie = 0; int has_cookie = 0;
int result;
/* Verify stream values are non-zero. */ /* Verify stream values are non-zero. */
if ((0 == peer_init->init_hdr.num_outbound_streams) || if ((0 == peer_init->init_hdr.num_outbound_streams) ||
...@@ -2043,8 +2045,7 @@ int sctp_verify_init(const struct sctp_association *asoc, ...@@ -2043,8 +2045,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
(0 == peer_init->init_hdr.init_tag) || (0 == peer_init->init_hdr.init_tag) ||
(SCTP_DEFAULT_MINWINDOW > ntohl(peer_init->init_hdr.a_rwnd))) { (SCTP_DEFAULT_MINWINDOW > ntohl(peer_init->init_hdr.a_rwnd))) {
sctp_process_inv_mandatory(asoc, chunk, errp); return sctp_process_inv_mandatory(asoc, chunk, errp);
return 0;
} }
/* Check for missing mandatory parameters. */ /* Check for missing mandatory parameters. */
...@@ -2062,29 +2063,29 @@ int sctp_verify_init(const struct sctp_association *asoc, ...@@ -2062,29 +2063,29 @@ int sctp_verify_init(const struct sctp_association *asoc,
* VIOLATION error. We build the ERROR chunk here and let the normal * VIOLATION error. We build the ERROR chunk here and let the normal
* error handling code build and send the packet. * error handling code build and send the packet.
*/ */
if (param.v != (void*)chunk->chunk_end) { if (param.v != (void*)chunk->chunk_end)
sctp_process_inv_paramlength(asoc, param.p, chunk, errp); return sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
return 0;
}
/* The only missing mandatory param possible today is /* The only missing mandatory param possible today is
* the state cookie for an INIT-ACK chunk. * the state cookie for an INIT-ACK chunk.
*/ */
if ((SCTP_CID_INIT_ACK == cid) && !has_cookie) { if ((SCTP_CID_INIT_ACK == cid) && !has_cookie)
sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE, return sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE,
chunk, errp); chunk, errp);
return 0;
}
/* Find unrecognized parameters. */
/* Verify all the variable length parameters */
sctp_walk_params(param, peer_init, init_hdr.params) { sctp_walk_params(param, peer_init, init_hdr.params) {
if (!sctp_verify_param(asoc, param, cid, chunk, errp)) { result = sctp_verify_param(asoc, param, cid, chunk, errp);
if (SCTP_PARAM_HOST_NAME_ADDRESS == param.p->type) switch (result) {
case SCTP_IERROR_ABORT:
case SCTP_IERROR_NOMEM:
return 0; return 0;
else case SCTP_IERROR_ERROR:
return 1; return 1;
case SCTP_IERROR_NO_ERROR:
default:
break;
} }
} /* for (loop through all parameters) */ } /* for (loop through all parameters) */
...@@ -2137,11 +2138,14 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, ...@@ -2137,11 +2138,14 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
/* If the peer claims support for ADD-IP without support /* If the peer claims support for ADD-IP without support
* for AUTH, disable support for ADD-IP. * for AUTH, disable support for ADD-IP.
* Do this only if backward compatible mode is turned off.
*/ */
if (asoc->peer.addip_capable && !asoc->peer.auth_capable) { if (!sctp_addip_noauth &&
(asoc->peer.asconf_capable && !asoc->peer.auth_capable)) {
asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP |
SCTP_PARAM_DEL_IP | SCTP_PARAM_DEL_IP |
SCTP_PARAM_SET_PRIMARY); SCTP_PARAM_SET_PRIMARY);
asoc->peer.asconf_capable = 0;
} }
/* Walk list of transports, removing transports in the UNKNOWN state. */ /* Walk list of transports, removing transports in the UNKNOWN state. */
...@@ -2848,10 +2852,11 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, ...@@ -2848,10 +2852,11 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
__be16 err_code; __be16 err_code;
int length = 0; int length = 0;
int chunk_len = asconf->skb->len; int chunk_len;
__u32 serial; __u32 serial;
int all_param_pass = 1; int all_param_pass = 1;
chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
hdr = (sctp_addiphdr_t *)asconf->skb->data; hdr = (sctp_addiphdr_t *)asconf->skb->data;
serial = ntohl(hdr->serial); serial = ntohl(hdr->serial);
...@@ -2952,13 +2957,17 @@ static int sctp_asconf_param_success(struct sctp_association *asoc, ...@@ -2952,13 +2957,17 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
/* This is always done in BH context with a socket lock /* This is always done in BH context with a socket lock
* held, so the list can not change. * held, so the list can not change.
*/ */
local_bh_disable();
list_for_each_entry(saddr, &bp->address_list, list) { list_for_each_entry(saddr, &bp->address_list, list) {
if (sctp_cmp_addr_exact(&saddr->a, &addr)) if (sctp_cmp_addr_exact(&saddr->a, &addr))
saddr->use_as_src = 1; saddr->use_as_src = 1;
} }
local_bh_enable();
break; break;
case SCTP_PARAM_DEL_IP: case SCTP_PARAM_DEL_IP:
retval = sctp_del_bind_addr(bp, &addr, call_rcu_bh); local_bh_disable();
retval = sctp_del_bind_addr(bp, &addr);
local_bh_enable();
list_for_each(pos, &asoc->peer.transport_addr_list) { list_for_each(pos, &asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport, transport = list_entry(pos, struct sctp_transport,
transports); transports);
...@@ -2990,7 +2999,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, ...@@ -2990,7 +2999,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
sctp_addip_param_t *asconf_ack_param; sctp_addip_param_t *asconf_ack_param;
sctp_errhdr_t *err_param; sctp_errhdr_t *err_param;
int length; int length;
int asconf_ack_len = asconf_ack->skb->len; int asconf_ack_len;
__be16 err_code; __be16 err_code;
if (no_err) if (no_err)
...@@ -2998,6 +3007,9 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, ...@@ -2998,6 +3007,9 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
else else
err_code = SCTP_ERROR_REQ_REFUSED; err_code = SCTP_ERROR_REQ_REFUSED;
asconf_ack_len = ntohs(asconf_ack->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t);
/* Skip the addiphdr from the asconf_ack chunk and store a pointer to /* Skip the addiphdr from the asconf_ack chunk and store a pointer to
* the first asconf_ack parameter. * the first asconf_ack parameter.
*/ */
......
...@@ -453,6 +453,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, ...@@ -453,6 +453,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
* maximum value discussed in rule C7 above (RTO.max) may be * maximum value discussed in rule C7 above (RTO.max) may be
* used to provide an upper bound to this doubling operation. * used to provide an upper bound to this doubling operation.
*/ */
transport->last_rto = transport->rto;
transport->rto = min((transport->rto * 2), transport->asoc->rto_max); transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
} }
...@@ -1267,6 +1268,12 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, ...@@ -1267,6 +1268,12 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
sctp_ootb_pkt_free(packet); sctp_ootb_pkt_free(packet);
break; break;
case SCTP_CMD_T1_RETRAN:
/* Mark a transport for retransmission. */
sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
SCTP_RTXR_T1_RTX);
break;
case SCTP_CMD_RETRAN: case SCTP_CMD_RETRAN:
/* Mark a transport for retransmission. */ /* Mark a transport for retransmission. */
sctp_retransmit(&asoc->outqueue, cmd->obj.transport, sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
...@@ -1393,7 +1400,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, ...@@ -1393,7 +1400,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
list_for_each(pos, &asoc->peer.transport_addr_list) { list_for_each(pos, &asoc->peer.transport_addr_list) {
t = list_entry(pos, struct sctp_transport, t = list_entry(pos, struct sctp_transport,
transports); transports);
sctp_retransmit_mark(&asoc->outqueue, t, 0); sctp_retransmit_mark(&asoc->outqueue, t,
SCTP_RTXR_T1_RTX);
} }
sctp_add_cmd_sf(commands, sctp_add_cmd_sf(commands,
......
...@@ -2305,7 +2305,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep, ...@@ -2305,7 +2305,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
/* If we've sent any data bundled with COOKIE-ECHO we will need to /* If we've sent any data bundled with COOKIE-ECHO we will need to
* resend * resend
*/ */
sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, sctp_add_cmd_sf(commands, SCTP_CMD_T1_RETRAN,
SCTP_TRANSPORT(asoc->peer.primary_path)); SCTP_TRANSPORT(asoc->peer.primary_path));
/* Cast away the const modifier, as we want to just /* Cast away the const modifier, as we want to just
...@@ -4064,11 +4064,6 @@ static sctp_disposition_t sctp_sf_abort_violation( ...@@ -4064,11 +4064,6 @@ static sctp_disposition_t sctp_sf_abort_violation(
struct sctp_chunk *chunk = arg; struct sctp_chunk *chunk = arg;
struct sctp_chunk *abort = NULL; struct sctp_chunk *abort = NULL;
/* Make the abort chunk. */
abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
if (!abort)
goto nomem;
/* SCTP-AUTH, Section 6.3: /* SCTP-AUTH, Section 6.3:
* It should be noted that if the receiver wants to tear * It should be noted that if the receiver wants to tear
* down an association in an authenticated way only, the * down an association in an authenticated way only, the
...@@ -4083,6 +4078,11 @@ static sctp_disposition_t sctp_sf_abort_violation( ...@@ -4083,6 +4078,11 @@ static sctp_disposition_t sctp_sf_abort_violation(
if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
goto discard; goto discard;
/* Make the abort chunk. */
abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
if (!abort)
goto nomem;
if (asoc) { if (asoc) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
......
...@@ -660,7 +660,7 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) ...@@ -660,7 +660,7 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
* socket routing and failover schemes. Refer to comments in * socket routing and failover schemes. Refer to comments in
* sctp_do_bind(). -daisy * sctp_do_bind(). -daisy
*/ */
retval = sctp_del_bind_addr(bp, sa_addr, call_rcu); retval = sctp_del_bind_addr(bp, sa_addr);
addr_buf += af->sockaddr_len; addr_buf += af->sockaddr_len;
err_bindx_rem: err_bindx_rem:
...@@ -5307,6 +5307,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -5307,6 +5307,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
{ {
struct sctp_bind_hashbucket *head; /* hash list */ struct sctp_bind_hashbucket *head; /* hash list */
struct sctp_bind_bucket *pp; /* hash list port iterator */ struct sctp_bind_bucket *pp; /* hash list port iterator */
struct hlist_node *node;
unsigned short snum; unsigned short snum;
int ret; int ret;
...@@ -5331,7 +5332,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -5331,7 +5332,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
index = sctp_phashfn(rover); index = sctp_phashfn(rover);
head = &sctp_port_hashtable[index]; head = &sctp_port_hashtable[index];
sctp_spin_lock(&head->lock); sctp_spin_lock(&head->lock);
for (pp = head->chain; pp; pp = pp->next) sctp_for_each_hentry(pp, node, &head->chain)
if (pp->port == rover) if (pp->port == rover)
goto next; goto next;
break; break;
...@@ -5358,7 +5359,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ...@@ -5358,7 +5359,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
*/ */
head = &sctp_port_hashtable[sctp_phashfn(snum)]; head = &sctp_port_hashtable[sctp_phashfn(snum)];
sctp_spin_lock(&head->lock); sctp_spin_lock(&head->lock);
for (pp = head->chain; pp; pp = pp->next) { sctp_for_each_hentry(pp, node, &head->chain) {
if (pp->port == snum) if (pp->port == snum)
goto pp_found; goto pp_found;
} }
...@@ -5702,10 +5703,7 @@ static struct sctp_bind_bucket *sctp_bucket_create( ...@@ -5702,10 +5703,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
pp->port = snum; pp->port = snum;
pp->fastreuse = 0; pp->fastreuse = 0;
INIT_HLIST_HEAD(&pp->owner); INIT_HLIST_HEAD(&pp->owner);
if ((pp->next = head->chain) != NULL) hlist_add_head(&pp->node, &head->chain);
pp->next->pprev = &pp->next;
head->chain = pp;
pp->pprev = &head->chain;
} }
return pp; return pp;
} }
...@@ -5714,9 +5712,7 @@ static struct sctp_bind_bucket *sctp_bucket_create( ...@@ -5714,9 +5712,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
{ {
if (pp && hlist_empty(&pp->owner)) { if (pp && hlist_empty(&pp->owner)) {
if (pp->next) __hlist_del(&pp->node);
pp->next->pprev = pp->pprev;
*(pp->pprev) = pp->next;
kmem_cache_free(sctp_bucket_cachep, pp); kmem_cache_free(sctp_bucket_cachep, pp);
SCTP_DBG_OBJCNT_DEC(bind_bucket); SCTP_DBG_OBJCNT_DEC(bind_bucket);
} }
......
...@@ -263,6 +263,15 @@ static ctl_table sctp_table[] = { ...@@ -263,6 +263,15 @@ static ctl_table sctp_table[] = {
.proc_handler = &proc_dointvec, .proc_handler = &proc_dointvec,
.strategy = &sysctl_intvec .strategy = &sysctl_intvec
}, },
{
.ctl_name = CTL_UNNUMBERED,
.procname = "addip_noauth_enable",
.data = &sctp_addip_noauth,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
.strategy = &sysctl_intvec
},
{ .ctl_name = 0 } { .ctl_name = 0 }
}; };
......
...@@ -74,8 +74,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, ...@@ -74,8 +74,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
* given destination transport address, set RTO to the protocol * given destination transport address, set RTO to the protocol
* parameter 'RTO.Initial'. * parameter 'RTO.Initial'.
*/ */
peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial);
peer->rtt = 0; peer->rtt = 0;
peer->rto = msecs_to_jiffies(sctp_rto_initial);
peer->rttvar = 0; peer->rttvar = 0;
peer->srtt = 0; peer->srtt = 0;
peer->rto_pending = 0; peer->rto_pending = 0;
...@@ -385,6 +385,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) ...@@ -385,6 +385,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
tp->rto = tp->asoc->rto_max; tp->rto = tp->asoc->rto_max;
tp->rtt = rtt; tp->rtt = rtt;
tp->last_rto = tp->rto;
/* Reset rto_pending so that a new RTT measurement is started when a /* Reset rto_pending so that a new RTT measurement is started when a
* new data chunk is sent. * new data chunk is sent.
...@@ -578,7 +579,7 @@ void sctp_transport_reset(struct sctp_transport *t) ...@@ -578,7 +579,7 @@ void sctp_transport_reset(struct sctp_transport *t)
*/ */
t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
t->ssthresh = asoc->peer.i.a_rwnd; t->ssthresh = asoc->peer.i.a_rwnd;
t->rto = asoc->rto_initial; t->last_rto = t->rto = asoc->rto_initial;
t->rtt = 0; t->rtt = 0;
t->srtt = 0; t->srtt = 0;
t->rttvar = 0; t->rttvar = 0;
......
...@@ -862,7 +862,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) ...@@ -862,7 +862,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
continue; continue;
/* see if this ssn has been marked by skipping */ /* see if this ssn has been marked by skipping */
if (!SSN_lt(cssn, sctp_ssn_peek(in, csid))) if (!SSN_lte(cssn, sctp_ssn_peek(in, csid)))
break; break;
__skb_unlink(pos, &ulpq->lobby); __skb_unlink(pos, &ulpq->lobby);
......
...@@ -2319,6 +2319,11 @@ int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) ...@@ -2319,6 +2319,11 @@ int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
return err; return err;
} }
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
{
return sock->ops->shutdown(sock, how);
}
/* ABI emulation layers need these two */ /* ABI emulation layers need these two */
EXPORT_SYMBOL(move_addr_to_kernel); EXPORT_SYMBOL(move_addr_to_kernel);
EXPORT_SYMBOL(move_addr_to_user); EXPORT_SYMBOL(move_addr_to_user);
...@@ -2345,3 +2350,4 @@ EXPORT_SYMBOL(kernel_getsockopt); ...@@ -2345,3 +2350,4 @@ EXPORT_SYMBOL(kernel_getsockopt);
EXPORT_SYMBOL(kernel_setsockopt); EXPORT_SYMBOL(kernel_setsockopt);
EXPORT_SYMBOL(kernel_sendpage); EXPORT_SYMBOL(kernel_sendpage);
EXPORT_SYMBOL(kernel_sock_ioctl); EXPORT_SYMBOL(kernel_sock_ioctl);
EXPORT_SYMBOL(kernel_sock_shutdown);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册