提交 ed7dc1df 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (30 commits)
  X25: remove duplicated #include
  tcp: use correct net ns in cookie_v4_check()
  rps: tcp: fix rps_sock_flow_table table updates
  ppp_generic: fix multilink fragment sizes
  syncookies: remove Kconfig text line about disabled-by-default
  ixgbe: only check pfc bits in hang logic if pfc is enabled
  net: check for refcount if pop a stacked dst_entry
  ixgbe: return IXGBE_ERR_RAR_INDEX when out of range
  act_pedit: access skb->data safely
  sfc: Store port number in net_device::dev_id
  epic100: Test __BIG_ENDIAN instead of (non-existent) CONFIG_BIG_ENDIAN
  tehuti: return -EFAULT on copy_to_user errors
  isdn/kcapi: return -EFAULT on copy_from_user errors
  e1000e: change logical negate to bitwise
  sfc: Get port number from CS_PORT_NUM, not PCI function number
  cls_u32: use skb_header_pointer() to dereference data safely
  TCP: tcp_hybla: Fix integer overflow in slow start increment
  act_nat: fix the wrong checksum when addr isn't in old_addr/mask
  net/fec: fix pm to survive to suspend/resume
  korina: count RX DMA OVR as rx_fifo_error
  ...
...@@ -1020,12 +1020,12 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data) ...@@ -1020,12 +1020,12 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
if (cmd == AVMB1_ADDCARD) { if (cmd == AVMB1_ADDCARD) {
if ((retval = copy_from_user(&cdef, data, if ((retval = copy_from_user(&cdef, data,
sizeof(avmb1_carddef)))) sizeof(avmb1_carddef))))
return retval; return -EFAULT;
cdef.cardtype = AVM_CARDTYPE_B1; cdef.cardtype = AVM_CARDTYPE_B1;
} else { } else {
if ((retval = copy_from_user(&cdef, data, if ((retval = copy_from_user(&cdef, data,
sizeof(avmb1_extcarddef)))) sizeof(avmb1_extcarddef))))
return retval; return -EFAULT;
} }
cparams.port = cdef.port; cparams.port = cdef.port;
cparams.irq = cdef.irq; cparams.irq = cdef.irq;
...@@ -1218,7 +1218,7 @@ int capi20_manufacturer(unsigned int cmd, void __user *data) ...@@ -1218,7 +1218,7 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
kcapi_carddef cdef; kcapi_carddef cdef;
if ((retval = copy_from_user(&cdef, data, sizeof(cdef)))) if ((retval = copy_from_user(&cdef, data, sizeof(cdef))))
return retval; return -EFAULT;
cparams.port = cdef.port; cparams.port = cdef.port;
cparams.irq = cdef.irq; cparams.irq = cdef.irq;
......
...@@ -247,6 +247,7 @@ static const struct flash_spec flash_5709 = { ...@@ -247,6 +247,7 @@ static const struct flash_spec flash_5709 = {
MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
static void bnx2_init_napi(struct bnx2 *bp); static void bnx2_init_napi(struct bnx2 *bp);
static void bnx2_del_napi(struct bnx2 *bp);
static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
{ {
...@@ -6270,6 +6271,7 @@ bnx2_open(struct net_device *dev) ...@@ -6270,6 +6271,7 @@ bnx2_open(struct net_device *dev)
bnx2_free_skbs(bp); bnx2_free_skbs(bp);
bnx2_free_irq(bp); bnx2_free_irq(bp);
bnx2_free_mem(bp); bnx2_free_mem(bp);
bnx2_del_napi(bp);
return rc; return rc;
} }
...@@ -6537,6 +6539,7 @@ bnx2_close(struct net_device *dev) ...@@ -6537,6 +6539,7 @@ bnx2_close(struct net_device *dev)
bnx2_free_irq(bp); bnx2_free_irq(bp);
bnx2_free_skbs(bp); bnx2_free_skbs(bp);
bnx2_free_mem(bp); bnx2_free_mem(bp);
bnx2_del_napi(bp);
bp->link_up = 0; bp->link_up = 0;
netif_carrier_off(bp->dev); netif_carrier_off(bp->dev);
bnx2_set_power_state(bp, PCI_D3hot); bnx2_set_power_state(bp, PCI_D3hot);
...@@ -8227,7 +8230,16 @@ bnx2_bus_string(struct bnx2 *bp, char *str) ...@@ -8227,7 +8230,16 @@ bnx2_bus_string(struct bnx2 *bp, char *str)
return str; return str;
} }
static void __devinit static void
bnx2_del_napi(struct bnx2 *bp)
{
int i;
for (i = 0; i < bp->irq_nvecs; i++)
netif_napi_del(&bp->bnx2_napi[i].napi);
}
static void
bnx2_init_napi(struct bnx2 *bp) bnx2_init_napi(struct bnx2 *bp)
{ {
int i; int i;
......
...@@ -2554,7 +2554,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter) ...@@ -2554,7 +2554,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
mdef = er32(MDEF(i)); mdef = er32(MDEF(i));
/* Ignore filters with anything other than IPMI ports */ /* Ignore filters with anything other than IPMI ports */
if (mdef & !(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
continue; continue;
/* Enable this decision filter in MANC2H */ /* Enable this decision filter in MANC2H */
......
...@@ -74,7 +74,14 @@ struct enic_msix_entry { ...@@ -74,7 +74,14 @@ struct enic_msix_entry {
void *devid; void *devid;
}; };
#define ENIC_SET_APPLIED (1 << 0)
#define ENIC_SET_REQUEST (1 << 1)
#define ENIC_SET_NAME (1 << 2)
#define ENIC_SET_INSTANCE (1 << 3)
#define ENIC_SET_HOST (1 << 4)
struct enic_port_profile { struct enic_port_profile {
u32 set;
u8 request; u8 request;
char name[PORT_PROFILE_MAX]; char name[PORT_PROFILE_MAX];
u8 instance_uuid[PORT_UUID_MAX]; u8 instance_uuid[PORT_UUID_MAX];
......
...@@ -1029,8 +1029,7 @@ static int enic_dev_init_done(struct enic *enic, int *done, int *error) ...@@ -1029,8 +1029,7 @@ static int enic_dev_init_done(struct enic *enic, int *done, int *error)
return err; return err;
} }
static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac, static int enic_set_port_profile(struct enic *enic, u8 *mac)
char *name, u8 *instance_uuid, u8 *host_uuid)
{ {
struct vic_provinfo *vp; struct vic_provinfo *vp;
u8 oui[3] = VIC_PROVINFO_CISCO_OUI; u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
...@@ -1040,97 +1039,112 @@ static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac, ...@@ -1040,97 +1039,112 @@ static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac,
"%02X%02X-%02X%02X%02X%02X%0X%02X"; "%02X%02X-%02X%02X%02X%02X%0X%02X";
int err; int err;
if (!name) err = enic_vnic_dev_deinit(enic);
return -EINVAL; if (err)
return err;
if (!is_valid_ether_addr(mac)) switch (enic->pp.request) {
return -EADDRNOTAVAIL;
vp = vic_provinfo_alloc(GFP_KERNEL, oui, VIC_PROVINFO_LINUX_TYPE); case PORT_REQUEST_ASSOCIATE:
if (!vp)
return -ENOMEM;
vic_provinfo_add_tlv(vp, if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR, return -EINVAL;
strlen(name) + 1, name);
vic_provinfo_add_tlv(vp,
VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR,
ETH_ALEN, mac);
if (instance_uuid) {
uuid = instance_uuid;
sprintf(uuid_str, uuid_fmt,
uuid[0], uuid[1], uuid[2], uuid[3],
uuid[4], uuid[5], uuid[6], uuid[7],
uuid[8], uuid[9], uuid[10], uuid[11],
uuid[12], uuid[13], uuid[14], uuid[15]);
vic_provinfo_add_tlv(vp,
VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
sizeof(uuid_str), uuid_str);
}
if (host_uuid) { if (!is_valid_ether_addr(mac))
uuid = host_uuid; return -EADDRNOTAVAIL;
sprintf(uuid_str, uuid_fmt,
uuid[0], uuid[1], uuid[2], uuid[3],
uuid[4], uuid[5], uuid[6], uuid[7],
uuid[8], uuid[9], uuid[10], uuid[11],
uuid[12], uuid[13], uuid[14], uuid[15]);
vic_provinfo_add_tlv(vp,
VIC_LINUX_PROV_TLV_HOST_UUID_STR,
sizeof(uuid_str), uuid_str);
}
err = enic_vnic_dev_deinit(enic); vp = vic_provinfo_alloc(GFP_KERNEL, oui,
if (err) VIC_PROVINFO_LINUX_TYPE);
goto err_out; if (!vp)
return -ENOMEM;
memset(&enic->pp, 0, sizeof(enic->pp)); vic_provinfo_add_tlv(vp,
VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR,
strlen(enic->pp.name) + 1, enic->pp.name);
err = enic_dev_init_prov(enic, vp); vic_provinfo_add_tlv(vp,
if (err) VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR,
goto err_out; ETH_ALEN, mac);
if (enic->pp.set & ENIC_SET_INSTANCE) {
uuid = enic->pp.instance_uuid;
sprintf(uuid_str, uuid_fmt,
uuid[0], uuid[1], uuid[2], uuid[3],
uuid[4], uuid[5], uuid[6], uuid[7],
uuid[8], uuid[9], uuid[10], uuid[11],
uuid[12], uuid[13], uuid[14], uuid[15]);
vic_provinfo_add_tlv(vp,
VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
sizeof(uuid_str), uuid_str);
}
enic->pp.request = request; if (enic->pp.set & ENIC_SET_HOST) {
memcpy(enic->pp.name, name, PORT_PROFILE_MAX); uuid = enic->pp.host_uuid;
if (instance_uuid) sprintf(uuid_str, uuid_fmt,
memcpy(enic->pp.instance_uuid, uuid[0], uuid[1], uuid[2], uuid[3],
instance_uuid, PORT_UUID_MAX); uuid[4], uuid[5], uuid[6], uuid[7],
if (host_uuid) uuid[8], uuid[9], uuid[10], uuid[11],
memcpy(enic->pp.host_uuid, uuid[12], uuid[13], uuid[14], uuid[15]);
host_uuid, PORT_UUID_MAX); vic_provinfo_add_tlv(vp,
VIC_LINUX_PROV_TLV_HOST_UUID_STR,
sizeof(uuid_str), uuid_str);
}
err_out: err = enic_dev_init_prov(enic, vp);
vic_provinfo_free(vp); vic_provinfo_free(vp);
if (err)
return err;
break;
return err; case PORT_REQUEST_DISASSOCIATE:
} break;
static int enic_unset_port_profile(struct enic *enic) default:
{ return -EINVAL;
memset(&enic->pp, 0, sizeof(enic->pp)); }
return enic_vnic_dev_deinit(enic);
enic->pp.set |= ENIC_SET_APPLIED;
return 0;
} }
static int enic_set_vf_port(struct net_device *netdev, int vf, static int enic_set_vf_port(struct net_device *netdev, int vf,
struct nlattr *port[]) struct nlattr *port[])
{ {
struct enic *enic = netdev_priv(netdev); struct enic *enic = netdev_priv(netdev);
char *name = NULL;
u8 *instance_uuid = NULL; memset(&enic->pp, 0, sizeof(enic->pp));
u8 *host_uuid = NULL;
u8 request = PORT_REQUEST_DISASSOCIATE; if (port[IFLA_PORT_REQUEST]) {
enic->pp.set |= ENIC_SET_REQUEST;
enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
}
if (port[IFLA_PORT_PROFILE]) {
enic->pp.set |= ENIC_SET_NAME;
memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]),
PORT_PROFILE_MAX);
}
if (port[IFLA_PORT_INSTANCE_UUID]) {
enic->pp.set |= ENIC_SET_INSTANCE;
memcpy(enic->pp.instance_uuid,
nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
}
if (port[IFLA_PORT_HOST_UUID]) {
enic->pp.set |= ENIC_SET_HOST;
memcpy(enic->pp.host_uuid,
nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
}
/* don't support VFs, yet */ /* don't support VFs, yet */
if (vf != PORT_SELF_VF) if (vf != PORT_SELF_VF)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (port[IFLA_PORT_REQUEST]) if (!(enic->pp.set & ENIC_SET_REQUEST))
request = nla_get_u8(port[IFLA_PORT_REQUEST]); return -EOPNOTSUPP;
switch (request) { if (enic->pp.request == PORT_REQUEST_ASSOCIATE) {
case PORT_REQUEST_ASSOCIATE:
/* If the interface mac addr hasn't been assigned, /* If the interface mac addr hasn't been assigned,
* assign a random mac addr before setting port- * assign a random mac addr before setting port-
...@@ -1139,30 +1153,9 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, ...@@ -1139,30 +1153,9 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
if (is_zero_ether_addr(netdev->dev_addr)) if (is_zero_ether_addr(netdev->dev_addr))
random_ether_addr(netdev->dev_addr); random_ether_addr(netdev->dev_addr);
if (port[IFLA_PORT_PROFILE])
name = nla_data(port[IFLA_PORT_PROFILE]);
if (port[IFLA_PORT_INSTANCE_UUID])
instance_uuid =
nla_data(port[IFLA_PORT_INSTANCE_UUID]);
if (port[IFLA_PORT_HOST_UUID])
host_uuid = nla_data(port[IFLA_PORT_HOST_UUID]);
return enic_set_port_profile(enic, request,
netdev->dev_addr, name,
instance_uuid, host_uuid);
case PORT_REQUEST_DISASSOCIATE:
return enic_unset_port_profile(enic);
default:
break;
} }
return -EOPNOTSUPP; return enic_set_port_profile(enic, netdev->dev_addr);
} }
static int enic_get_vf_port(struct net_device *netdev, int vf, static int enic_get_vf_port(struct net_device *netdev, int vf,
...@@ -1172,14 +1165,12 @@ static int enic_get_vf_port(struct net_device *netdev, int vf, ...@@ -1172,14 +1165,12 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
int err, error, done; int err, error, done;
u16 response = PORT_PROFILE_RESPONSE_SUCCESS; u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
/* don't support VFs, yet */ if (!(enic->pp.set & ENIC_SET_APPLIED))
if (vf != PORT_SELF_VF) return -ENODATA;
return -EOPNOTSUPP;
err = enic_dev_init_done(enic, &done, &error); err = enic_dev_init_done(enic, &done, &error);
if (err) if (err)
return err; error = err;
switch (error) { switch (error) {
case ERR_SUCCESS: case ERR_SUCCESS:
...@@ -1202,12 +1193,15 @@ static int enic_get_vf_port(struct net_device *netdev, int vf, ...@@ -1202,12 +1193,15 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request); NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response); NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, if (enic->pp.set & ENIC_SET_NAME)
enic->pp.name); NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, enic->pp.name);
enic->pp.instance_uuid); if (enic->pp.set & ENIC_SET_INSTANCE)
NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
enic->pp.host_uuid); enic->pp.instance_uuid);
if (enic->pp.set & ENIC_SET_HOST)
NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
enic->pp.host_uuid);
return 0; return 0;
......
...@@ -87,6 +87,7 @@ static int rx_copybreak; ...@@ -87,6 +87,7 @@ static int rx_copybreak;
#include <linux/bitops.h> #include <linux/bitops.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/byteorder.h>
/* These identify the driver base version and may not be removed. */ /* These identify the driver base version and may not be removed. */
static char version[] __devinitdata = static char version[] __devinitdata =
...@@ -230,7 +231,7 @@ static const u16 media2miictl[16] = { ...@@ -230,7 +231,7 @@ static const u16 media2miictl[16] = {
* The EPIC100 Rx and Tx buffer descriptors. Note that these * The EPIC100 Rx and Tx buffer descriptors. Note that these
* really ARE host-endian; it's not a misannotation. We tell * really ARE host-endian; it's not a misannotation. We tell
* the card to byteswap them internally on big-endian hosts - * the card to byteswap them internally on big-endian hosts -
* look for #ifdef CONFIG_BIG_ENDIAN in epic_open(). * look for #ifdef __BIG_ENDIAN in epic_open().
*/ */
struct epic_tx_desc { struct epic_tx_desc {
...@@ -690,7 +691,7 @@ static int epic_open(struct net_device *dev) ...@@ -690,7 +691,7 @@ static int epic_open(struct net_device *dev)
outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
/* Tell the chip to byteswap descriptors on big-endian hosts */ /* Tell the chip to byteswap descriptors on big-endian hosts */
#ifdef CONFIG_BIG_ENDIAN #ifdef __BIG_ENDIAN
outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
inl(ioaddr + GENCTL); inl(ioaddr + GENCTL);
outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
...@@ -806,7 +807,7 @@ static void epic_restart(struct net_device *dev) ...@@ -806,7 +807,7 @@ static void epic_restart(struct net_device *dev)
for (i = 16; i > 0; i--) for (i = 16; i > 0; i--)
outl(0x0008, ioaddr + TEST1); outl(0x0008, ioaddr + TEST1);
#ifdef CONFIG_BIG_ENDIAN #ifdef __BIG_ENDIAN
outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
#else #else
outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
......
...@@ -1373,10 +1373,9 @@ fec_suspend(struct platform_device *dev, pm_message_t state) ...@@ -1373,10 +1373,9 @@ fec_suspend(struct platform_device *dev, pm_message_t state)
if (ndev) { if (ndev) {
fep = netdev_priv(ndev); fep = netdev_priv(ndev);
if (netif_running(ndev)) { if (netif_running(ndev))
netif_device_detach(ndev); fec_enet_close(ndev);
fec_stop(ndev); clk_disable(fep->clk);
}
} }
return 0; return 0;
} }
...@@ -1385,12 +1384,13 @@ static int ...@@ -1385,12 +1384,13 @@ static int
fec_resume(struct platform_device *dev) fec_resume(struct platform_device *dev)
{ {
struct net_device *ndev = platform_get_drvdata(dev); struct net_device *ndev = platform_get_drvdata(dev);
struct fec_enet_private *fep;
if (ndev) { if (ndev) {
if (netif_running(ndev)) { fep = netdev_priv(ndev);
fec_enet_init(ndev, 0); clk_enable(fep->clk);
netif_device_attach(ndev); if (netif_running(ndev))
} fec_enet_open(ndev);
} }
return 0; return 0;
} }
......
...@@ -1188,6 +1188,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, ...@@ -1188,6 +1188,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
} else { } else {
hw_dbg(hw, "RAR index %d is out of range.\n", index); hw_dbg(hw, "RAR index %d is out of range.\n", index);
return IXGBE_ERR_RAR_INDEX;
} }
return 0; return 0;
...@@ -1219,6 +1220,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) ...@@ -1219,6 +1220,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
} else { } else {
hw_dbg(hw, "RAR index %d is out of range.\n", index); hw_dbg(hw, "RAR index %d is out of range.\n", index);
return IXGBE_ERR_RAR_INDEX;
} }
/* clear VMDq pool/queue selection for this RAR */ /* clear VMDq pool/queue selection for this RAR */
......
...@@ -642,7 +642,7 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, ...@@ -642,7 +642,7 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
u32 txoff = IXGBE_TFCS_TXOFF; u32 txoff = IXGBE_TFCS_TXOFF;
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { if (adapter->dcb_cfg.pfc_mode_enable) {
int tc; int tc;
int reg_idx = tx_ring->reg_idx; int reg_idx = tx_ring->reg_idx;
int dcb_i = adapter->ring_feature[RING_F_DCB].indices; int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
......
...@@ -2609,6 +2609,7 @@ struct ixgbe_info { ...@@ -2609,6 +2609,7 @@ struct ixgbe_info {
#define IXGBE_ERR_EEPROM_VERSION -24 #define IXGBE_ERR_EEPROM_VERSION -24
#define IXGBE_ERR_NO_SPACE -25 #define IXGBE_ERR_NO_SPACE -25
#define IXGBE_ERR_OVERTEMP -26 #define IXGBE_ERR_OVERTEMP -26
#define IXGBE_ERR_RAR_INDEX -27
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#endif /* _IXGBE_TYPE_H_ */ #endif /* _IXGBE_TYPE_H_ */
...@@ -135,6 +135,7 @@ struct korina_private { ...@@ -135,6 +135,7 @@ struct korina_private {
struct napi_struct napi; struct napi_struct napi;
struct timer_list media_check_timer; struct timer_list media_check_timer;
struct mii_if_info mii_if; struct mii_if_info mii_if;
struct work_struct restart_task;
struct net_device *dev; struct net_device *dev;
int phy_addr; int phy_addr;
}; };
...@@ -375,7 +376,7 @@ static int korina_rx(struct net_device *dev, int limit) ...@@ -375,7 +376,7 @@ static int korina_rx(struct net_device *dev, int limit)
if (devcs & ETH_RX_LE) if (devcs & ETH_RX_LE)
dev->stats.rx_length_errors++; dev->stats.rx_length_errors++;
if (devcs & ETH_RX_OVR) if (devcs & ETH_RX_OVR)
dev->stats.rx_over_errors++; dev->stats.rx_fifo_errors++;
if (devcs & ETH_RX_CV) if (devcs & ETH_RX_CV)
dev->stats.rx_frame_errors++; dev->stats.rx_frame_errors++;
if (devcs & ETH_RX_CES) if (devcs & ETH_RX_CES)
...@@ -764,10 +765,9 @@ static int korina_alloc_ring(struct net_device *dev) ...@@ -764,10 +765,9 @@ static int korina_alloc_ring(struct net_device *dev)
/* Initialize the receive descriptors */ /* Initialize the receive descriptors */
for (i = 0; i < KORINA_NUM_RDS; i++) { for (i = 0; i < KORINA_NUM_RDS; i++) {
skb = dev_alloc_skb(KORINA_RBSIZE + 2); skb = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
if (!skb) if (!skb)
return -ENOMEM; return -ENOMEM;
skb_reserve(skb, 2);
lp->rx_skb[i] = skb; lp->rx_skb[i] = skb;
lp->rd_ring[i].control = DMA_DESC_IOD | lp->rd_ring[i].control = DMA_DESC_IOD |
DMA_COUNT(KORINA_RBSIZE); DMA_COUNT(KORINA_RBSIZE);
...@@ -890,12 +890,12 @@ static int korina_init(struct net_device *dev) ...@@ -890,12 +890,12 @@ static int korina_init(struct net_device *dev)
/* /*
* Restart the RC32434 ethernet controller. * Restart the RC32434 ethernet controller.
* FIXME: check the return status where we call it
*/ */
static int korina_restart(struct net_device *dev) static void korina_restart_task(struct work_struct *work)
{ {
struct korina_private *lp = netdev_priv(dev); struct korina_private *lp = container_of(work,
int ret; struct korina_private, restart_task);
struct net_device *dev = lp->dev;
/* /*
* Disable interrupts * Disable interrupts
...@@ -916,10 +916,9 @@ static int korina_restart(struct net_device *dev) ...@@ -916,10 +916,9 @@ static int korina_restart(struct net_device *dev)
napi_disable(&lp->napi); napi_disable(&lp->napi);
ret = korina_init(dev); if (korina_init(dev) < 0) {
if (ret < 0) {
printk(KERN_ERR "%s: cannot restart device\n", dev->name); printk(KERN_ERR "%s: cannot restart device\n", dev->name);
return ret; return;
} }
korina_multicast_list(dev); korina_multicast_list(dev);
...@@ -927,8 +926,6 @@ static int korina_restart(struct net_device *dev) ...@@ -927,8 +926,6 @@ static int korina_restart(struct net_device *dev)
enable_irq(lp->ovr_irq); enable_irq(lp->ovr_irq);
enable_irq(lp->tx_irq); enable_irq(lp->tx_irq);
enable_irq(lp->rx_irq); enable_irq(lp->rx_irq);
return ret;
} }
static void korina_clear_and_restart(struct net_device *dev, u32 value) static void korina_clear_and_restart(struct net_device *dev, u32 value)
...@@ -937,7 +934,7 @@ static void korina_clear_and_restart(struct net_device *dev, u32 value) ...@@ -937,7 +934,7 @@ static void korina_clear_and_restart(struct net_device *dev, u32 value)
netif_stop_queue(dev); netif_stop_queue(dev);
writel(value, &lp->eth_regs->ethintfc); writel(value, &lp->eth_regs->ethintfc);
korina_restart(dev); schedule_work(&lp->restart_task);
} }
/* Ethernet Tx Underflow interrupt */ /* Ethernet Tx Underflow interrupt */
...@@ -962,11 +959,8 @@ static irqreturn_t korina_und_interrupt(int irq, void *dev_id) ...@@ -962,11 +959,8 @@ static irqreturn_t korina_und_interrupt(int irq, void *dev_id)
static void korina_tx_timeout(struct net_device *dev) static void korina_tx_timeout(struct net_device *dev)
{ {
struct korina_private *lp = netdev_priv(dev); struct korina_private *lp = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&lp->lock, flags); schedule_work(&lp->restart_task);
korina_restart(dev);
spin_unlock_irqrestore(&lp->lock, flags);
} }
/* Ethernet Rx Overflow interrupt */ /* Ethernet Rx Overflow interrupt */
...@@ -1086,6 +1080,8 @@ static int korina_close(struct net_device *dev) ...@@ -1086,6 +1080,8 @@ static int korina_close(struct net_device *dev)
napi_disable(&lp->napi); napi_disable(&lp->napi);
cancel_work_sync(&lp->restart_task);
free_irq(lp->rx_irq, dev); free_irq(lp->rx_irq, dev);
free_irq(lp->tx_irq, dev); free_irq(lp->tx_irq, dev);
free_irq(lp->ovr_irq, dev); free_irq(lp->ovr_irq, dev);
...@@ -1198,6 +1194,8 @@ static int korina_probe(struct platform_device *pdev) ...@@ -1198,6 +1194,8 @@ static int korina_probe(struct platform_device *pdev)
} }
setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev); setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev);
INIT_WORK(&lp->restart_task, korina_restart_task);
printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n", printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n",
dev->name); dev->name);
out: out:
......
...@@ -1422,7 +1422,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) ...@@ -1422,7 +1422,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
flen = len; flen = len;
if (nfree > 0) { if (nfree > 0) {
if (pch->speed == 0) { if (pch->speed == 0) {
flen = totlen/nfree; flen = len/nfree;
if (nbigger > 0) { if (nbigger > 0) {
flen++; flen++;
nbigger--; nbigger--;
......
...@@ -830,7 +830,7 @@ static inline const char *efx_dev_name(struct efx_nic *efx) ...@@ -830,7 +830,7 @@ static inline const char *efx_dev_name(struct efx_nic *efx)
static inline unsigned int efx_port_num(struct efx_nic *efx) static inline unsigned int efx_port_num(struct efx_nic *efx)
{ {
return PCI_FUNC(efx->pci_dev->devfn); return efx->net_dev->dev_id;
} }
/** /**
......
...@@ -206,6 +206,7 @@ static int siena_probe_nic(struct efx_nic *efx) ...@@ -206,6 +206,7 @@ static int siena_probe_nic(struct efx_nic *efx)
{ {
struct siena_nic_data *nic_data; struct siena_nic_data *nic_data;
bool already_attached = 0; bool already_attached = 0;
efx_oword_t reg;
int rc; int rc;
/* Allocate storage for hardware specific data */ /* Allocate storage for hardware specific data */
...@@ -220,6 +221,9 @@ static int siena_probe_nic(struct efx_nic *efx) ...@@ -220,6 +221,9 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail1; goto fail1;
} }
efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
efx_mcdi_init(efx); efx_mcdi_init(efx);
/* Recover from a failed assertion before probing */ /* Recover from a failed assertion before probing */
......
...@@ -646,7 +646,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) ...@@ -646,7 +646,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
error = copy_from_user(data, ifr->ifr_data, sizeof(data)); error = copy_from_user(data, ifr->ifr_data, sizeof(data));
if (error) { if (error) {
pr_err("cant copy from user\n"); pr_err("cant copy from user\n");
RET(error); RET(-EFAULT);
} }
DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]); DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
} }
...@@ -665,7 +665,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) ...@@ -665,7 +665,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
data[2]); data[2]);
error = copy_to_user(ifr->ifr_data, data, sizeof(data)); error = copy_to_user(ifr->ifr_data, data, sizeof(data));
if (error) if (error)
RET(error); RET(-EFAULT);
break; break;
case BDX_OP_WRITE: case BDX_OP_WRITE:
......
...@@ -37,8 +37,6 @@ ...@@ -37,8 +37,6 @@
#include <net/x25device.h> #include <net/x25device.h>
#include "x25_asy.h" #include "x25_asy.h"
#include <net/x25device.h>
static struct net_device **x25_asy_devs; static struct net_device **x25_asy_devs;
static int x25_asy_maxdev = SL_NRUNIT; static int x25_asy_maxdev = SL_NRUNIT;
......
...@@ -222,7 +222,6 @@ static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb); ...@@ -222,7 +222,6 @@ static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath5k_txq *txq); struct ath5k_txq *txq);
static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan); static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan);
static int ath5k_reset_wake(struct ath5k_softc *sc);
static int ath5k_start(struct ieee80211_hw *hw); static int ath5k_start(struct ieee80211_hw *hw);
static void ath5k_stop(struct ieee80211_hw *hw); static void ath5k_stop(struct ieee80211_hw *hw);
static int ath5k_add_interface(struct ieee80211_hw *hw, static int ath5k_add_interface(struct ieee80211_hw *hw,
...@@ -2770,7 +2769,7 @@ ath5k_tasklet_reset(unsigned long data) ...@@ -2770,7 +2769,7 @@ ath5k_tasklet_reset(unsigned long data)
{ {
struct ath5k_softc *sc = (void *)data; struct ath5k_softc *sc = (void *)data;
ath5k_reset_wake(sc); ath5k_reset(sc, sc->curchan);
} }
/* /*
...@@ -2941,23 +2940,13 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) ...@@ -2941,23 +2940,13 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
ath5k_beacon_config(sc); ath5k_beacon_config(sc);
/* intrs are enabled by ath5k_beacon_config */ /* intrs are enabled by ath5k_beacon_config */
ieee80211_wake_queues(sc->hw);
return 0; return 0;
err: err:
return ret; return ret;
} }
static int
ath5k_reset_wake(struct ath5k_softc *sc)
{
int ret;
ret = ath5k_reset(sc, sc->curchan);
if (!ret)
ieee80211_wake_queues(sc->hw);
return ret;
}
static int ath5k_start(struct ieee80211_hw *hw) static int ath5k_start(struct ieee80211_hw *hw)
{ {
return ath5k_init(hw->priv); return ath5k_init(hw->priv);
......
...@@ -250,11 +250,11 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) ...@@ -250,11 +250,11 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
* Linux networking. Thus, destinations are stackable. * Linux networking. Thus, destinations are stackable.
*/ */
static inline struct dst_entry *dst_pop(struct dst_entry *dst) static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
{ {
struct dst_entry *child = dst_clone(dst->child); struct dst_entry *child = skb_dst(skb)->child;
dst_release(dst); skb_dst_drop(skb);
return child; return child;
} }
......
...@@ -708,7 +708,8 @@ static int vlan_dev_init(struct net_device *dev) ...@@ -708,7 +708,8 @@ static int vlan_dev_init(struct net_device *dev)
netif_carrier_off(dev); netif_carrier_off(dev);
/* IFF_BROADCAST|IFF_MULTICAST; ??? */ /* IFF_BROADCAST|IFF_MULTICAST; ??? */
dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
IFF_MASTER | IFF_SLAVE);
dev->iflink = real_dev->ifindex; dev->iflink = real_dev->ifindex;
dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
(1<<__LINK_STATE_DORMANT))) | (1<<__LINK_STATE_DORMANT))) |
......
...@@ -2795,7 +2795,7 @@ static int __netif_receive_skb(struct sk_buff *skb) ...@@ -2795,7 +2795,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
struct net_device *orig_dev; struct net_device *orig_dev;
struct net_device *master; struct net_device *master;
struct net_device *null_or_orig; struct net_device *null_or_orig;
struct net_device *null_or_bond; struct net_device *orig_or_bond;
int ret = NET_RX_DROP; int ret = NET_RX_DROP;
__be16 type; __be16 type;
...@@ -2868,10 +2868,10 @@ static int __netif_receive_skb(struct sk_buff *skb) ...@@ -2868,10 +2868,10 @@ static int __netif_receive_skb(struct sk_buff *skb)
* device that may have registered for a specific ptype. The * device that may have registered for a specific ptype. The
* handler may have to adjust skb->dev and orig_dev. * handler may have to adjust skb->dev and orig_dev.
*/ */
null_or_bond = NULL; orig_or_bond = orig_dev;
if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
(vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
null_or_bond = vlan_dev_real_dev(skb->dev); orig_or_bond = vlan_dev_real_dev(skb->dev);
} }
type = skb->protocol; type = skb->protocol;
...@@ -2879,7 +2879,7 @@ static int __netif_receive_skb(struct sk_buff *skb) ...@@ -2879,7 +2879,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
if (ptype->type == type && (ptype->dev == null_or_orig || if (ptype->type == type && (ptype->dev == null_or_orig ||
ptype->dev == skb->dev || ptype->dev == orig_dev || ptype->dev == skb->dev || ptype->dev == orig_dev ||
ptype->dev == null_or_bond)) { ptype->dev == orig_or_bond)) {
if (pt_prev) if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev); ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype; pt_prev = ptype;
......
...@@ -303,7 +303,7 @@ config ARPD ...@@ -303,7 +303,7 @@ config ARPD
If unsure, say N. If unsure, say N.
config SYN_COOKIES config SYN_COOKIES
bool "IP: TCP syncookie support (disabled per default)" bool "IP: TCP syncookie support"
---help--- ---help---
Normal TCP/IP networking is open to an attack known as "SYN Normal TCP/IP networking is open to an attack known as "SYN
flooding". This denial-of-service attack prevents legitimate remote flooding". This denial-of-service attack prevents legitimate remote
...@@ -328,13 +328,13 @@ config SYN_COOKIES ...@@ -328,13 +328,13 @@ config SYN_COOKIES
server is really overloaded. If this happens frequently better turn server is really overloaded. If this happens frequently better turn
them off. them off.
If you say Y here, note that SYN cookies aren't enabled by default; If you say Y here, you can disable SYN cookies at run time by
you can enable them by saying Y to "/proc file system support" and saying Y to "/proc file system support" and
"Sysctl support" below and executing the command "Sysctl support" below and executing the command
echo 1 >/proc/sys/net/ipv4/tcp_syncookies echo 0 > /proc/sys/net/ipv4/tcp_syncookies
at boot time after the /proc file system has been mounted. after the /proc file system has been mounted.
If unsure, say N. If unsure, say N.
......
...@@ -347,7 +347,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, ...@@ -347,7 +347,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
{ .sport = th->dest, { .sport = th->dest,
.dport = th->source } } }; .dport = th->source } } };
security_req_classify_flow(req, &fl); security_req_classify_flow(req, &fl);
if (ip_route_output_key(&init_net, &rt, &fl)) { if (ip_route_output_key(sock_net(sk), &rt, &fl)) {
reqsk_free(req); reqsk_free(req);
goto out; goto out;
} }
......
...@@ -126,8 +126,8 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) ...@@ -126,8 +126,8 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
* calculate 2^fract in a <<7 value. * calculate 2^fract in a <<7 value.
*/ */
is_slowstart = 1; is_slowstart = 1;
increment = ((1 << ca->rho) * hybla_fraction(rho_fractions)) increment = ((1 << min(ca->rho, 16U)) *
- 128; hybla_fraction(rho_fractions)) - 128;
} else { } else {
/* /*
* congestion avoidance * congestion avoidance
......
...@@ -1555,6 +1555,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -1555,6 +1555,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
#endif #endif
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
sock_rps_save_rxhash(sk, skb->rxhash);
TCP_CHECK_TIMER(sk); TCP_CHECK_TIMER(sk);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk; rsk = sk;
...@@ -1579,7 +1580,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -1579,7 +1580,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
} }
return 0; return 0;
} }
} } else
sock_rps_save_rxhash(sk, skb->rxhash);
TCP_CHECK_TIMER(sk); TCP_CHECK_TIMER(sk);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
...@@ -1672,8 +1675,6 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1672,8 +1675,6 @@ int tcp_v4_rcv(struct sk_buff *skb)
skb->dev = NULL; skb->dev = NULL;
sock_rps_save_rxhash(sk, skb->rxhash);
bh_lock_sock_nested(sk); bh_lock_sock_nested(sk);
ret = 0; ret = 0;
if (!sock_owned_by_user(sk)) { if (!sock_owned_by_user(sk)) {
......
...@@ -332,14 +332,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) ...@@ -332,14 +332,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
IEEE80211_QUEUE_STOP_REASON_AGGREGATION); IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
spin_unlock(&local->ampdu_lock); spin_unlock(&local->ampdu_lock);
spin_unlock_bh(&sta->lock);
/* send an addBA request */ /* prepare tid data */
sta->ampdu_mlme.dialog_token_allocator++; sta->ampdu_mlme.dialog_token_allocator++;
sta->ampdu_mlme.tid_tx[tid]->dialog_token = sta->ampdu_mlme.tid_tx[tid]->dialog_token =
sta->ampdu_mlme.dialog_token_allocator; sta->ampdu_mlme.dialog_token_allocator;
sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
spin_unlock_bh(&sta->lock);
/* send AddBA request */
ieee80211_send_addba_request(sdata, pubsta->addr, tid, ieee80211_send_addba_request(sdata, pubsta->addr, tid,
sta->ampdu_mlme.tid_tx[tid]->dialog_token, sta->ampdu_mlme.tid_tx[tid]->dialog_token,
sta->ampdu_mlme.tid_tx[tid]->ssn, sta->ampdu_mlme.tid_tx[tid]->ssn,
......
...@@ -1818,17 +1818,26 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) ...@@ -1818,17 +1818,26 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
return RX_CONTINUE; return RX_CONTINUE;
if (ieee80211_is_back_req(bar->frame_control)) { if (ieee80211_is_back_req(bar->frame_control)) {
struct {
__le16 control, start_seq_num;
} __packed bar_data;
if (!rx->sta) if (!rx->sta)
return RX_DROP_MONITOR; return RX_DROP_MONITOR;
if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
&bar_data, sizeof(bar_data)))
return RX_DROP_MONITOR;
spin_lock(&rx->sta->lock); spin_lock(&rx->sta->lock);
tid = le16_to_cpu(bar->control) >> 12; tid = le16_to_cpu(bar_data.control) >> 12;
if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) {
spin_unlock(&rx->sta->lock); spin_unlock(&rx->sta->lock);
return RX_DROP_MONITOR; return RX_DROP_MONITOR;
} }
tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
/* reset session timer */ /* reset session timer */
if (tid_agg_rx->timeout) if (tid_agg_rx->timeout)
......
...@@ -159,6 +159,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, ...@@ -159,6 +159,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
iph->daddr = new_addr; iph->daddr = new_addr;
csum_replace4(&iph->check, addr, new_addr); csum_replace4(&iph->check, addr, new_addr);
} else if ((iph->frag_off & htons(IP_OFFSET)) ||
iph->protocol != IPPROTO_ICMP) {
goto out;
} }
ihl = iph->ihl * 4; ihl = iph->ihl * 4;
...@@ -247,6 +250,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, ...@@ -247,6 +250,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
break; break;
} }
out:
return action; return action;
drop: drop:
......
...@@ -125,7 +125,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, ...@@ -125,7 +125,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
{ {
struct tcf_pedit *p = a->priv; struct tcf_pedit *p = a->priv;
int i, munged = 0; int i, munged = 0;
u8 *pptr; unsigned int off;
if (!(skb->tc_verd & TC_OK2MUNGE)) { if (!(skb->tc_verd & TC_OK2MUNGE)) {
/* should we set skb->cloned? */ /* should we set skb->cloned? */
...@@ -134,7 +134,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, ...@@ -134,7 +134,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
} }
} }
pptr = skb_network_header(skb); off = skb_network_offset(skb);
spin_lock(&p->tcf_lock); spin_lock(&p->tcf_lock);
...@@ -144,17 +144,17 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, ...@@ -144,17 +144,17 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
struct tc_pedit_key *tkey = p->tcfp_keys; struct tc_pedit_key *tkey = p->tcfp_keys;
for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { for (i = p->tcfp_nkeys; i > 0; i--, tkey++) {
u32 *ptr; u32 *ptr, _data;
int offset = tkey->off; int offset = tkey->off;
if (tkey->offmask) { if (tkey->offmask) {
if (skb->len > tkey->at) { char *d, _d;
char *j = pptr + tkey->at;
offset += ((*j & tkey->offmask) >> d = skb_header_pointer(skb, off + tkey->at, 1,
tkey->shift); &_d);
} else { if (!d)
goto bad; goto bad;
} offset += (*d & tkey->offmask) >> tkey->shift;
} }
if (offset % 4) { if (offset % 4) {
...@@ -169,9 +169,13 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, ...@@ -169,9 +169,13 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
goto bad; goto bad;
} }
ptr = (u32 *)(pptr+offset); ptr = skb_header_pointer(skb, off + offset, 4, &_data);
if (!ptr)
goto bad;
/* just do it, baby */ /* just do it, baby */
*ptr = ((*ptr & tkey->mask) ^ tkey->val); *ptr = ((*ptr & tkey->mask) ^ tkey->val);
if (ptr == &_data)
skb_store_bits(skb, off + offset, ptr, 4);
munged++; munged++;
} }
......
...@@ -98,11 +98,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re ...@@ -98,11 +98,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
{ {
struct { struct {
struct tc_u_knode *knode; struct tc_u_knode *knode;
u8 *ptr; unsigned int off;
} stack[TC_U32_MAXDEPTH]; } stack[TC_U32_MAXDEPTH];
struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
u8 *ptr = skb_network_header(skb); unsigned int off = skb_network_offset(skb);
struct tc_u_knode *n; struct tc_u_knode *n;
int sdepth = 0; int sdepth = 0;
int off2 = 0; int off2 = 0;
...@@ -134,8 +134,14 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re ...@@ -134,8 +134,14 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
#endif #endif
for (i = n->sel.nkeys; i>0; i--, key++) { for (i = n->sel.nkeys; i>0; i--, key++) {
unsigned int toff;
if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) { __be32 *data, _data;
toff = off + key->off + (off2 & key->offmask);
data = skb_header_pointer(skb, toff, 4, &_data);
if (!data)
goto out;
if ((*data ^ key->val) & key->mask) {
n = n->next; n = n->next;
goto next_knode; goto next_knode;
} }
...@@ -174,29 +180,45 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re ...@@ -174,29 +180,45 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
if (sdepth >= TC_U32_MAXDEPTH) if (sdepth >= TC_U32_MAXDEPTH)
goto deadloop; goto deadloop;
stack[sdepth].knode = n; stack[sdepth].knode = n;
stack[sdepth].ptr = ptr; stack[sdepth].off = off;
sdepth++; sdepth++;
ht = n->ht_down; ht = n->ht_down;
sel = 0; sel = 0;
if (ht->divisor) if (ht->divisor) {
sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift); __be32 *data, _data;
data = skb_header_pointer(skb, off + n->sel.hoff, 4,
&_data);
if (!data)
goto out;
sel = ht->divisor & u32_hash_fold(*data, &n->sel,
n->fshift);
}
if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
goto next_ht; goto next_ht;
if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
off2 = n->sel.off + 3; off2 = n->sel.off + 3;
if (n->sel.flags&TC_U32_VAROFFSET) if (n->sel.flags & TC_U32_VAROFFSET) {
off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift; __be16 *data, _data;
data = skb_header_pointer(skb,
off + n->sel.offoff,
2, &_data);
if (!data)
goto out;
off2 += ntohs(n->sel.offmask & *data) >>
n->sel.offshift;
}
off2 &= ~3; off2 &= ~3;
} }
if (n->sel.flags&TC_U32_EAT) { if (n->sel.flags&TC_U32_EAT) {
ptr += off2; off += off2;
off2 = 0; off2 = 0;
} }
if (ptr < skb_tail_pointer(skb)) if (off < skb->len)
goto next_ht; goto next_ht;
} }
...@@ -204,9 +226,10 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re ...@@ -204,9 +226,10 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
if (sdepth--) { if (sdepth--) {
n = stack[sdepth].knode; n = stack[sdepth].knode;
ht = n->ht_up; ht = n->ht_up;
ptr = stack[sdepth].ptr; off = stack[sdepth].off;
goto check_terminal; goto check_terminal;
} }
out:
return -1; return -1;
deadloop: deadloop:
......
...@@ -95,13 +95,13 @@ static int xfrm_output_one(struct sk_buff *skb, int err) ...@@ -95,13 +95,13 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
goto error_nolock; goto error_nolock;
} }
dst = dst_pop(dst); dst = skb_dst_pop(skb);
if (!dst) { if (!dst) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
err = -EHOSTUNREACH; err = -EHOSTUNREACH;
goto error_nolock; goto error_nolock;
} }
skb_dst_set(skb, dst); skb_dst_set_noref(skb, dst);
x = dst->xfrm; x = dst->xfrm;
} while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
......
...@@ -2153,6 +2153,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) ...@@ -2153,6 +2153,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
return 0; return 0;
} }
skb_dst_force(skb);
dst = skb_dst(skb); dst = skb_dst(skb);
res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0; res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册