提交 228fdc08 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "Famouse last words: "final pull request" :-)

  I'm sending this because Jason Wang's fixes are pretty important

   1) Add missing per-cpu stats initialization to ip6_vti.  Otherwise
      lockdep spits out a call trace.  From Li RongQing.

   2) Fix NULL oops in wireless hwsim, from Javier Lopez

   3) TIPC deferred packet queue unlink must NULL out skb->next to avoid
      crashes.  From Erik Hugne

   4) Fix access to uninitialized buffer in nf_nat netfilter code, from
      Daniel Borkmann

   5) Fix lifetime of ipv6 loopback and SIT tunnel addresses, otherwise
      they basically timeout immediately.  From Hannes Frederic Sowa

   6) Fix DMA unmapping of TSO packets in bnx2x driver, from Michal
      Schmidt

   7) Do not allow L2 forwarding offload via macvtap device, the way
      things are now it will not end up being forwaded at all.  From
      Jason Wang

   8) Fix transmit queue selection via ndo_dfwd_start_xmit(), fixing
      things like applying NETIF_F_LLTX to the wrong device (!!) and
      eliding the proper transmit watchdog handling

   9) qlcnic driver was not updating tx statistics at all, from Manish
      Chopra"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  qlcnic: Fix ethtool statistics length calculation
  qlcnic: Fix bug in TX statistics
  net: core: explicitly select a txq before doing l2 forwarding
  macvlan: forbid L2 fowarding offload for macvtap
  bnx2x: fix DMA unmapping of TSO split BDs
  ipv6: add link-local, sit and loopback address with INFINITY_LIFE_TIME
  bnx2x: prevent WARN during driver unload
  tipc: correctly unlink packets from deferred packet queue
  ipv6: pcpu_tstats.syncp should be initialised in ip6_vti.c
  netfilter: only warn once on wrong seqadj usage
  netfilter: nf_nat: fix access to uninitialized buffer in IRC NAT helper
  NFC: Fix target mode p2p link establishment
  iwlwifi: add new devices for 7265 series
  mac80211: move "bufferable MMPDU" check to fix AP mode scan
  mac80211_hwsim: Fix NULL pointer dereference
......@@ -3732,7 +3732,8 @@ static inline int bond_slave_override(struct bonding *bond,
}
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv)
{
/*
* This helper function exists to help dev_pick_tx get the correct
......
......@@ -520,10 +520,12 @@ struct bnx2x_fastpath {
#define BNX2X_FP_STATE_IDLE 0
#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */
#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */
#define BNX2X_FP_STATE_DISABLED (1 << 2)
#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
/* protect state */
spinlock_t lock;
......@@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
{
bool rc = true;
spin_lock(&fp->lock);
spin_lock_bh(&fp->lock);
if (fp->state & BNX2X_FP_LOCKED) {
WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
......@@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
/* we don't care if someone yielded */
fp->state = BNX2X_FP_STATE_NAPI;
}
spin_unlock(&fp->lock);
spin_unlock_bh(&fp->lock);
return rc;
}
......@@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
{
bool rc = false;
spin_lock(&fp->lock);
spin_lock_bh(&fp->lock);
WARN_ON(fp->state &
(BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
rc = true;
fp->state = BNX2X_FP_STATE_IDLE;
spin_unlock(&fp->lock);
/* state ==> idle, unless currently disabled */
fp->state &= BNX2X_FP_STATE_DISABLED;
spin_unlock_bh(&fp->lock);
return rc;
}
......@@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
rc = true;
fp->state = BNX2X_FP_STATE_IDLE;
/* state ==> idle, unless currently disabled */
fp->state &= BNX2X_FP_STATE_DISABLED;
spin_unlock_bh(&fp->lock);
return rc;
}
......@@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
/* true if a socket is polling, even if it did not get the lock */
static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{
WARN_ON(!(fp->state & BNX2X_FP_LOCKED));
WARN_ON(!(fp->state & BNX2X_FP_OWNED));
return fp->state & BNX2X_FP_USER_PEND;
}
/* false if fp is currently owned */
static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
{
int rc = true;
spin_lock_bh(&fp->lock);
if (fp->state & BNX2X_FP_OWNED)
rc = false;
fp->state |= BNX2X_FP_STATE_DISABLED;
spin_unlock_bh(&fp->lock);
return rc;
}
#else
static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
{
......@@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{
return false;
}
static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
{
return true;
}
#endif /* CONFIG_NET_RX_BUSY_POLL */
/* Use 2500 as a mini-jumbo MTU for FCoE */
......
......@@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
struct sk_buff *skb = tx_buf->skb;
u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
int nbd;
u16 split_bd_len = 0;
/* prefetch skb end pointer to speedup dev_kfree_skb() */
prefetch(&skb->end);
......@@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
txdata->txq_index, idx, tx_buf, skb);
/* unmap first bd */
tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
#ifdef BNX2X_STOP_ON_ERROR
......@@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
/* ...and the TSO split header bd since they have no mapping */
/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
split_bd_len = BD_UNMAP_LEN(tx_data_bd);
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
}
/* unmap first bd */
dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
DMA_TO_DEVICE);
/* now free frags */
while (nbd > 0) {
......@@ -1790,26 +1795,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
{
int i;
local_bh_disable();
for_each_rx_queue_cnic(bp, i) {
napi_disable(&bnx2x_fp(bp, i, napi));
while (!bnx2x_fp_lock_napi(&bp->fp[i]))
mdelay(1);
while (!bnx2x_fp_ll_disable(&bp->fp[i]))
usleep_range(1000, 2000);
}
local_bh_enable();
}
static void bnx2x_napi_disable(struct bnx2x *bp)
{
int i;
local_bh_disable();
for_each_eth_queue(bp, i) {
napi_disable(&bnx2x_fp(bp, i, napi));
while (!bnx2x_fp_lock_napi(&bp->fp[i]))
mdelay(1);
while (!bnx2x_fp_ll_disable(&bp->fp[i]))
usleep_range(1000, 2000);
}
local_bh_enable();
}
void bnx2x_netif_start(struct bnx2x *bp)
......@@ -1832,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
bnx2x_napi_disable_cnic(bp);
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv)
{
struct bnx2x *bp = netdev_priv(dev);
......
......@@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
......
......@@ -6827,12 +6827,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return __ixgbe_maybe_stop_tx(tx_ring, size);
}
#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv)
{
struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter;
struct ixgbe_ring_feature *f;
int txq;
#endif
if (fwd_adapter)
return skb->queue_mapping + fwd_adapter->tx_base_queue;
#ifdef IXGBE_FCOE
/*
* only execute the code below if protocol is FCoE
......@@ -6858,9 +6866,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
txq -= f->indices;
return txq + f->offset;
#else
return __netdev_pick_tx(dev, skb);
#endif
}
#endif
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
......@@ -7629,27 +7639,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
kfree(fwd_adapter);
}
static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb,
struct net_device *dev,
void *priv)
{
struct ixgbe_fwd_adapter *fwd_adapter = priv;
unsigned int queue;
struct ixgbe_ring *tx_ring;
queue = skb->queue_mapping + fwd_adapter->tx_base_queue;
tx_ring = fwd_adapter->real_adapter->tx_ring[queue];
return __ixgbe_xmit_frame(skb, dev, tx_ring);
}
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
#ifdef IXGBE_FCOE
.ndo_select_queue = ixgbe_select_queue,
#endif
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
......@@ -7689,7 +7683,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
.ndo_dfwd_start_xmit = ixgbe_fwd_xmit,
};
/**
......
......@@ -619,7 +619,8 @@ ltq_etop_set_multicast_list(struct net_device *dev)
}
static u16
ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb)
ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv)
{
/* we are currently only using the first queue */
return 0;
......
......@@ -592,7 +592,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
}
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
......
......@@ -714,7 +714,8 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
......
......@@ -1711,6 +1711,7 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
void qlcnic_update_stats(struct qlcnic_adapter *);
/* Adapter hardware abstraction */
struct qlcnic_hardware_ops {
......
......@@ -167,27 +167,35 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
static inline int qlcnic_82xx_statistics(void)
static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter)
{
return ARRAY_SIZE(qlcnic_device_gstrings_stats) +
ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
return ARRAY_SIZE(qlcnic_gstrings_stats) +
ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
}
static inline int qlcnic_83xx_statistics(void)
static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter)
{
return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
return ARRAY_SIZE(qlcnic_gstrings_stats) +
ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) +
QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
}
static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
{
if (qlcnic_82xx_check(adapter))
return qlcnic_82xx_statistics();
else if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_statistics();
else
return -1;
int len = -1;
if (qlcnic_82xx_check(adapter)) {
len = qlcnic_82xx_statistics(adapter);
if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
len += ARRAY_SIZE(qlcnic_device_gstrings_stats);
} else if (qlcnic_83xx_check(adapter)) {
len = qlcnic_83xx_statistics(adapter);
}
return len;
}
#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412
......@@ -920,18 +928,13 @@ static int qlcnic_eeprom_test(struct net_device *dev)
static int qlcnic_get_sset_count(struct net_device *dev, int sset)
{
int len;
struct qlcnic_adapter *adapter = netdev_priv(dev);
switch (sset) {
case ETH_SS_TEST:
return QLCNIC_TEST_LEN;
case ETH_SS_STATS:
len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN;
if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
qlcnic_83xx_check(adapter))
return len;
return qlcnic_82xx_statistics();
return qlcnic_dev_statistics_len(adapter);
default:
return -EOPNOTSUPP;
}
......@@ -1267,7 +1270,7 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
return data;
}
static void qlcnic_update_stats(struct qlcnic_adapter *adapter)
void qlcnic_update_stats(struct qlcnic_adapter *adapter)
{
struct qlcnic_host_tx_ring *tx_ring;
int ring;
......
......@@ -2780,6 +2780,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct net_device_stats *stats = &netdev->stats;
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
qlcnic_update_stats(adapter);
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
......
......@@ -2080,7 +2080,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
}
/* Return subqueue id on this core (one per core). */
static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv)
{
return smp_processor_id();
}
......
......@@ -299,7 +299,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
if (vlan->fwd_priv) {
skb->dev = vlan->lowerdev;
ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv);
ret = dev_queue_xmit_accel(skb, vlan->fwd_priv);
} else {
ret = macvlan_queue_xmit(skb, dev);
}
......@@ -338,6 +338,8 @@ static const struct header_ops macvlan_hard_header_ops = {
.cache_update = eth_header_cache_update,
};
static struct rtnl_link_ops macvlan_link_ops;
static int macvlan_open(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
......@@ -353,7 +355,8 @@ static int macvlan_open(struct net_device *dev)
goto hash_add;
}
if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) {
if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD &&
dev->rtnl_link_ops == &macvlan_link_ops) {
vlan->fwd_priv =
lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
......@@ -362,10 +365,8 @@ static int macvlan_open(struct net_device *dev)
*/
if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
vlan->fwd_priv = NULL;
} else {
dev->features &= ~NETIF_F_LLTX;
} else
return 0;
}
}
err = -EBUSY;
......@@ -699,8 +700,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
features = netdev_increment_features(vlan->lowerdev->features,
features,
mask);
if (!vlan->fwd_priv)
features |= NETIF_F_LLTX;
features |= NETIF_F_LLTX;
return features;
}
......
......@@ -1647,7 +1647,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb)
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv)
{
/*
* This helper function exists to help dev_pick_tx get the correct
......
......@@ -348,7 +348,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
* different rxq no. here. If we could not get rxhash, then we would
* hope the rxq no. may help here.
*/
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv)
{
struct tun_struct *tun = netdev_priv(dev);
struct tun_flow_entry *e;
......
......@@ -357,21 +357,27 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
......
......@@ -2011,7 +2011,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
(hwsim_flags & HWSIM_TX_STAT_ACK)) {
if (skb->len >= 16) {
hdr = (struct ieee80211_hdr *) skb->data;
mac80211_hwsim_monitor_ack(txi->rate_driver_data[0],
mac80211_hwsim_monitor_ack(data2->channel,
hdr->addr2);
}
txi->flags |= IEEE80211_TX_STAT_ACK;
......
......@@ -746,7 +746,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
}
static u16
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb)
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv)
{
skb->priority = cfg80211_classify8021d(skb);
return mwifiex_1d_to_wmm_queue[skb->priority];
......
......@@ -39,7 +39,8 @@ static INT bcm_close(struct net_device *dev)
return 0;
}
static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb)
static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv)
{
return ClassifyPacket(netdev_priv(dev), skb);
}
......
......@@ -306,7 +306,8 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb)
static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv)
{
return (u16)smp_processor_id();
}
......
......@@ -652,7 +652,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
return dscp >> 5;
}
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb)
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
......
......@@ -769,7 +769,8 @@ struct netdev_phys_port_id {
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required can not be NULL.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* void *accel_priv);
* Called to decide which queue to when device supports multiple
* transmit queues.
*
......@@ -990,7 +991,8 @@ struct net_device_ops {
netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
struct net_device *dev);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb);
struct sk_buff *skb,
void *accel_priv);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
......@@ -1529,7 +1531,8 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
}
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb);
struct sk_buff *skb,
void *accel_priv);
u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
/*
......@@ -1819,6 +1822,7 @@ int dev_close(struct net_device *dev);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct sk_buff *newskb);
int dev_queue_xmit(struct sk_buff *skb);
int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
void unregister_netdevice_many(struct list_head *head);
......@@ -2426,7 +2430,7 @@ int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_port_id *ppid);
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, void *accel_priv);
struct netdev_queue *txq);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
extern int netdev_budget;
......
......@@ -2539,7 +2539,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
}
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, void *accel_priv)
struct netdev_queue *txq)
{
const struct net_device_ops *ops = dev->netdev_ops;
int rc = NETDEV_TX_OK;
......@@ -2605,13 +2605,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
dev_queue_xmit_nit(skb, dev);
skb_len = skb->len;
if (accel_priv)
rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
else
rc = ops->ndo_start_xmit(skb, dev);
trace_net_dev_xmit(skb, rc, dev, skb_len);
if (rc == NETDEV_TX_OK && txq)
if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
return rc;
}
......@@ -2627,10 +2624,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
dev_queue_xmit_nit(nskb, dev);
skb_len = nskb->len;
if (accel_priv)
rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
else
rc = ops->ndo_start_xmit(nskb, dev);
rc = ops->ndo_start_xmit(nskb, dev);
trace_net_dev_xmit(nskb, rc, dev, skb_len);
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
......@@ -2811,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit);
* the BH enable code must have IRQs enabled so that it will not deadlock.
* --BLG
*/
int dev_queue_xmit(struct sk_buff *skb)
int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
......@@ -2827,7 +2821,7 @@ int dev_queue_xmit(struct sk_buff *skb)
skb_update_prio(skb);
txq = netdev_pick_tx(dev, skb);
txq = netdev_pick_tx(dev, skb, accel_priv);
q = rcu_dereference_bh(txq->qdisc);
#ifdef CONFIG_NET_CLS_ACT
......@@ -2863,7 +2857,7 @@ int dev_queue_xmit(struct sk_buff *skb)
if (!netif_xmit_stopped(txq)) {
__this_cpu_inc(xmit_recursion);
rc = dev_hard_start_xmit(skb, dev, txq, NULL);
rc = dev_hard_start_xmit(skb, dev, txq);
__this_cpu_dec(xmit_recursion);
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
......@@ -2892,8 +2886,19 @@ int dev_queue_xmit(struct sk_buff *skb)
rcu_read_unlock_bh();
return rc;
}
int dev_queue_xmit(struct sk_buff *skb)
{
return __dev_queue_xmit(skb, NULL);
}
EXPORT_SYMBOL(dev_queue_xmit);
int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
{
return __dev_queue_xmit(skb, accel_priv);
}
EXPORT_SYMBOL(dev_queue_xmit_accel);
/*=======================================================================
Receiver routines
......
......@@ -395,17 +395,21 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
EXPORT_SYMBOL(__netdev_pick_tx);
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb)
struct sk_buff *skb,
void *accel_priv)
{
int queue_index = 0;
if (dev->real_num_tx_queues != 1) {
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue)
queue_index = ops->ndo_select_queue(dev, skb);
queue_index = ops->ndo_select_queue(dev, skb,
accel_priv);
else
queue_index = __netdev_pick_tx(dev, skb);
queue_index = dev_cap_txqueue(dev, queue_index);
if (!accel_priv)
queue_index = dev_cap_txqueue(dev, queue_index);
}
skb_set_queue_mapping(skb, queue_index);
......
......@@ -375,7 +375,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
struct netdev_queue *txq;
txq = netdev_pick_tx(dev, skb);
txq = netdev_pick_tx(dev, skb, NULL);
/* try until next clock tick */
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
......
......@@ -2509,7 +2509,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
struct inet6_ifaddr *ifp;
ifp = ipv6_add_addr(idev, addr, NULL, plen,
scope, IFA_F_PERMANENT, 0, 0);
scope, IFA_F_PERMANENT,
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
if (!IS_ERR(ifp)) {
spin_lock_bh(&ifp->lock);
ifp->flags &= ~IFA_F_TENTATIVE;
......@@ -2637,7 +2638,8 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
#endif
ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0);
ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags,
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
if (!IS_ERR(ifp)) {
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
addrconf_dad_start(ifp);
......
......@@ -732,12 +732,18 @@ static void vti6_dev_setup(struct net_device *dev)
static inline int vti6_dev_init_gen(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
int i;
t->dev = dev;
t->net = dev_net(dev);
dev->tstats = alloc_percpu(struct pcpu_tstats);
if (!dev->tstats)
return -ENOMEM;
for_each_possible_cpu(i) {
struct pcpu_tstats *stats;
stats = per_cpu_ptr(dev->tstats, i);
u64_stats_init(&stats->syncp);
}
return 0;
}
......
......@@ -1061,7 +1061,8 @@ static void ieee80211_uninit(struct net_device *dev)
}
static u16 ieee80211_netdev_select_queue(struct net_device *dev,
struct sk_buff *skb)
struct sk_buff *skb,
void *accel_priv)
{
return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
}
......@@ -1078,7 +1079,8 @@ static const struct net_device_ops ieee80211_dataif_ops = {
};
static u16 ieee80211_monitor_select_queue(struct net_device *dev,
struct sk_buff *skb)
struct sk_buff *skb,
void *accel_priv)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
......
......@@ -463,7 +463,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
{
struct sta_info *sta = tx->sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
struct ieee80211_local *local = tx->local;
if (unlikely(!sta))
......@@ -474,15 +473,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
!(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
int ac = skb_get_queue_mapping(tx->skb);
/* only deauth, disassoc and action are bufferable MMPDUs */
if (ieee80211_is_mgmt(hdr->frame_control) &&
!ieee80211_is_deauth(hdr->frame_control) &&
!ieee80211_is_disassoc(hdr->frame_control) &&
!ieee80211_is_action(hdr->frame_control)) {
info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
return TX_CONTINUE;
}
ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
sta->sta.addr, sta->sta.aid, ac);
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
......@@ -525,9 +515,22 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
return TX_CONTINUE;
/* only deauth, disassoc and action are bufferable MMPDUs */
if (ieee80211_is_mgmt(hdr->frame_control) &&
!ieee80211_is_deauth(hdr->frame_control) &&
!ieee80211_is_disassoc(hdr->frame_control) &&
!ieee80211_is_action(hdr->frame_control)) {
if (tx->flags & IEEE80211_TX_UNICAST)
info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
return TX_CONTINUE;
}
if (tx->flags & IEEE80211_TX_UNICAST)
return ieee80211_tx_h_unicast_ps_buf(tx);
else
......
......@@ -37,7 +37,7 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
return 0;
if (unlikely(!seqadj)) {
WARN(1, "Wrong seqadj usage, missing nfct_seqadj_ext_add()\n");
WARN_ONCE(1, "Missing nfct_seqadj_ext_add() setup call\n");
return 0;
}
......
......@@ -34,10 +34,14 @@ static unsigned int help(struct sk_buff *skb,
struct nf_conntrack_expect *exp)
{
char buffer[sizeof("4294967296 65635")];
struct nf_conn *ct = exp->master;
union nf_inet_addr newaddr;
u_int16_t port;
unsigned int ret;
/* Reply comes from server. */
newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3;
exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
exp->dir = IP_CT_DIR_REPLY;
exp->expectfn = nf_nat_follow_master;
......@@ -57,17 +61,35 @@ static unsigned int help(struct sk_buff *skb,
}
if (port == 0) {
nf_ct_helper_log(skb, exp->master, "all ports in use");
nf_ct_helper_log(skb, ct, "all ports in use");
return NF_DROP;
}
ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo,
protoff, matchoff, matchlen, buffer,
strlen(buffer));
/* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27
* strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28
* strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26
* strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26
* strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27
*
* AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits,
* 255.255.255.255==4294967296, 10 digits)
* P: bound port (min 1 d, max 5d (65635))
* F: filename (min 1 d )
* S: size (min 1 d )
* 0x01, \n: terminators
*/
/* AAA = "us", ie. where server normally talks to. */
snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port);
pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n",
buffer, &newaddr.ip, port);
ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
matchlen, buffer, strlen(buffer));
if (ret != NF_ACCEPT) {
nf_ct_helper_log(skb, exp->master, "cannot mangle packet");
nf_ct_helper_log(skb, ct, "cannot mangle packet");
nf_ct_unexpect_related(exp);
}
return ret;
}
......
......@@ -384,7 +384,7 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
{
dev->dep_link_up = true;
if (!dev->active_target) {
if (!dev->active_target && rf_mode == NFC_RF_INITIATOR) {
struct nfc_target *target;
target = nfc_find_target(dev, target_idx);
......
......@@ -126,7 +126,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
ret = dev_hard_start_xmit(skb, dev, txq, NULL);
ret = dev_hard_start_xmit(skb, dev, txq);
HARD_TX_UNLOCK(dev, txq);
......
......@@ -1498,6 +1498,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
int type;
head = head->next;
buf->next = NULL;
/* Ensure bearer is still enabled */
if (unlikely(!b_ptr->active))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册