提交 b0e7031a 编写于 作者: L Linus Torvalds

Merge git://github.com/davem330/net

* git://github.com/davem330/net: (62 commits)
  ipv6: don't use inetpeer to store metrics for routes.
  can: ti_hecc: include linux/io.h
  IRDA: Fix global type conflicts in net/irda/irsysctl.c v2
  net: Handle different key sizes between address families in flow cache
  net: Align AF-specific flowi structs to long
  ipv4: Fix fib_info->fib_metrics leak
  caif: fix a potential NULL dereference
  sctp: deal with multiple COOKIE_ECHO chunks
  ibmveth: Fix checksum offload failure handling
  ibmveth: Checksum offload is always disabled
  ibmveth: Fix issue with DMA mapping failure
  ibmveth: Fix DMA unmap error
  pch_gbe: support ML7831 IOH
  pch_gbe: added the process of FIFO over run error
  pch_gbe: fixed the issue which receives an unnecessary packet.
  sfc: Use 64-bit writes for TX push where possible
  Revert "sfc: Use write-combining to reduce TX latency" and follow-ups
  bnx2x: Fix ethtool advertisement
  bnx2x: Fix 578xx link LED
  bnx2x: Fix XMAC loopback test
  ...
Note: This driver doesn't have a maintainer.
Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver for Linux. Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver for Linux.
This program is free software; you can redistribute it and/or This program is free software; you can redistribute it and/or
...@@ -55,7 +57,6 @@ Test and make sure PCI latency is now correct for all cases. ...@@ -55,7 +57,6 @@ Test and make sure PCI latency is now correct for all cases.
Authors: Authors:
Sten Wang <sten_wang@davicom.com.tw > : Original Author Sten Wang <sten_wang@davicom.com.tw > : Original Author
Tobias Ringstrom <tori@unhappy.mine.nu> : Current Maintainer
Contributors: Contributors:
......
...@@ -1278,7 +1278,6 @@ F: drivers/input/misc/ati_remote2.c ...@@ -1278,7 +1278,6 @@ F: drivers/input/misc/ati_remote2.c
ATLX ETHERNET DRIVERS ATLX ETHERNET DRIVERS
M: Jay Cliburn <jcliburn@gmail.com> M: Jay Cliburn <jcliburn@gmail.com>
M: Chris Snook <chris.snook@gmail.com> M: Chris Snook <chris.snook@gmail.com>
M: Jie Yang <jie.yang@atheros.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/atl1 W: http://sourceforge.net/projects/atl1
W: http://atl1.sourceforge.net W: http://atl1.sourceforge.net
...@@ -1574,7 +1573,6 @@ F: drivers/scsi/bfa/ ...@@ -1574,7 +1573,6 @@ F: drivers/scsi/bfa/
BROCADE BNA 10 GIGABIT ETHERNET DRIVER BROCADE BNA 10 GIGABIT ETHERNET DRIVER
M: Rasesh Mody <rmody@brocade.com> M: Rasesh Mody <rmody@brocade.com>
M: Debashis Dutt <ddutt@brocade.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/net/bna/ F: drivers/net/bna/
...@@ -1758,7 +1756,6 @@ F: Documentation/zh_CN/ ...@@ -1758,7 +1756,6 @@ F: Documentation/zh_CN/
CISCO VIC ETHERNET NIC DRIVER CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com> M: Christian Benvenuti <benve@cisco.com>
M: Vasanthy Kolluri <vkolluri@cisco.com>
M: Roopa Prabhu <roprabhu@cisco.com> M: Roopa Prabhu <roprabhu@cisco.com>
M: David Wang <dwang2@cisco.com> M: David Wang <dwang2@cisco.com>
S: Supported S: Supported
...@@ -4415,7 +4412,8 @@ L: netfilter@vger.kernel.org ...@@ -4415,7 +4412,8 @@ L: netfilter@vger.kernel.org
L: coreteam@netfilter.org L: coreteam@netfilter.org
W: http://www.netfilter.org/ W: http://www.netfilter.org/
W: http://www.iptables.org/ W: http://www.iptables.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
S: Supported S: Supported
F: include/linux/netfilter* F: include/linux/netfilter*
F: include/linux/netfilter/ F: include/linux/netfilter/
......
...@@ -2535,7 +2535,7 @@ config S6GMAC ...@@ -2535,7 +2535,7 @@ config S6GMAC
source "drivers/net/stmmac/Kconfig" source "drivers/net/stmmac/Kconfig"
config PCH_GBE config PCH_GBE
tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
depends on PCI depends on PCI
select MII select MII
---help--- ---help---
...@@ -2548,10 +2548,11 @@ config PCH_GBE ...@@ -2548,10 +2548,11 @@ config PCH_GBE
This driver enables Gigabit Ethernet function. This driver enables Gigabit Ethernet function.
This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
Output Hub), ML7223. Output Hub), ML7223/ML7831.
ML7223 IOH is for MP(Media Phone) use. ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
ML7223 is companion chip for Intel Atom E6xx series. purpose use.
ML7223 is completely compatible for Intel EG20T PCH. ML7223/ML7831 is companion chip for Intel Atom E6xx series.
ML7223/ML7831 is completely compatible for Intel EG20T PCH.
config FTGMAC100 config FTGMAC100
tristate "Faraday FTGMAC100 Gigabit Ethernet support" tristate "Faraday FTGMAC100 Gigabit Ethernet support"
......
...@@ -315,6 +315,14 @@ union db_prod { ...@@ -315,6 +315,14 @@ union db_prod {
u32 raw; u32 raw;
}; };
/* dropless fc FW/HW related params */
#define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512)
#define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \
ETH_MAX_AGGREGATION_QUEUES_E1 :\
ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
#define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
#define FW_PREFETCH_CNT 16
#define DROPLESS_FC_HEADROOM 100
/* MC hsi */ /* MC hsi */
#define BCM_PAGE_SHIFT 12 #define BCM_PAGE_SHIFT 12
...@@ -331,15 +339,35 @@ union db_prod { ...@@ -331,15 +339,35 @@ union db_prod {
/* SGE ring related macros */ /* SGE ring related macros */
#define NUM_RX_SGE_PAGES 2 #define NUM_RX_SGE_PAGES 2
#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) #define NEXT_PAGE_SGE_DESC_CNT 2
#define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
/* RX_SGE_CNT is promised to be a power of 2 */ /* RX_SGE_CNT is promised to be a power of 2 */
#define RX_SGE_MASK (RX_SGE_CNT - 1) #define RX_SGE_MASK (RX_SGE_CNT - 1)
#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
#define MAX_RX_SGE (NUM_RX_SGE - 1) #define MAX_RX_SGE (NUM_RX_SGE - 1)
#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
(MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) (MAX_RX_SGE_CNT - 1)) ? \
(x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
(x) + 1)
#define RX_SGE(x) ((x) & MAX_RX_SGE) #define RX_SGE(x) ((x) & MAX_RX_SGE)
/*
* Number of required SGEs is the sum of two:
* 1. Number of possible opened aggregations (next packet for
* these aggregations will probably consume SGE immidiatelly)
* 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
* after placement on BD for new TPA aggregation)
*
* Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
*/
#define NUM_SGE_REQ (MAX_AGG_QS(bp) + \
(BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
#define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
MAX_RX_SGE_CNT)
#define SGE_TH_LO(bp) (NUM_SGE_REQ + \
NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
#define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
/* Manipulate a bit vector defined as an array of u64 */ /* Manipulate a bit vector defined as an array of u64 */
/* Number of bits in one sge_mask array element */ /* Number of bits in one sge_mask array element */
...@@ -551,24 +579,43 @@ struct bnx2x_fastpath { ...@@ -551,24 +579,43 @@ struct bnx2x_fastpath {
#define NUM_TX_RINGS 16 #define NUM_TX_RINGS 16
#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) #define NEXT_PAGE_TX_DESC_CNT 1
#define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
#define MAX_TX_BD (NUM_TX_BD - 1) #define MAX_TX_BD (NUM_TX_BD - 1)
#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
(MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) (MAX_TX_DESC_CNT - 1)) ? \
(x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
(x) + 1)
#define TX_BD(x) ((x) & MAX_TX_BD) #define TX_BD(x) ((x) & MAX_TX_BD)
#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
#define NUM_RX_RINGS 8 #define NUM_RX_RINGS 8
#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) #define NEXT_PAGE_RX_DESC_CNT 2
#define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
#define RX_DESC_MASK (RX_DESC_CNT - 1) #define RX_DESC_MASK (RX_DESC_CNT - 1)
#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
#define MAX_RX_BD (NUM_RX_BD - 1) #define MAX_RX_BD (NUM_RX_BD - 1)
#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
#define MIN_RX_AVAIL 128
/* dropless fc calculations for BDs
*
* Number of BDs should as number of buffers in BRB:
* Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
* "next" elements on each page
*/
#define NUM_BD_REQ BRB_SIZE(bp)
#define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
MAX_RX_DESC_CNT)
#define BD_TH_LO(bp) (NUM_BD_REQ + \
NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
FW_DROP_LEVEL(bp))
#define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
#define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \
ETH_MIN_RX_CQES_WITH_TPA_E1 : \ ETH_MIN_RX_CQES_WITH_TPA_E1 : \
...@@ -579,7 +626,9 @@ struct bnx2x_fastpath { ...@@ -579,7 +626,9 @@ struct bnx2x_fastpath {
MIN_RX_AVAIL)) MIN_RX_AVAIL))
#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
(MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) (MAX_RX_DESC_CNT - 1)) ? \
(x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
(x) + 1)
#define RX_BD(x) ((x) & MAX_RX_BD) #define RX_BD(x) ((x) & MAX_RX_BD)
/* /*
...@@ -589,14 +638,31 @@ struct bnx2x_fastpath { ...@@ -589,14 +638,31 @@ struct bnx2x_fastpath {
#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL)
#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) #define NEXT_PAGE_RCQ_DESC_CNT 1
#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
#define MAX_RCQ_BD (NUM_RCQ_BD - 1) #define MAX_RCQ_BD (NUM_RCQ_BD - 1)
#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
(MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) (MAX_RCQ_DESC_CNT - 1)) ? \
(x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
(x) + 1)
#define RCQ_BD(x) ((x) & MAX_RCQ_BD) #define RCQ_BD(x) ((x) & MAX_RCQ_BD)
/* dropless fc calculations for RCQs
*
* Number of RCQs should be as number of buffers in BRB:
* Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
* "next" elements on each page
*/
#define NUM_RCQ_REQ BRB_SIZE(bp)
#define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
MAX_RCQ_DESC_CNT)
#define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \
NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
FW_DROP_LEVEL(bp))
#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
/* This is needed for determining of last_max */ /* This is needed for determining of last_max */
#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
...@@ -685,24 +751,17 @@ struct bnx2x_fastpath { ...@@ -685,24 +751,17 @@ struct bnx2x_fastpath {
#define FP_CSB_FUNC_OFF \ #define FP_CSB_FUNC_OFF \
offsetof(struct cstorm_status_block_c, func) offsetof(struct cstorm_status_block_c, func)
#define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ #define HC_INDEX_ETH_RX_CQ_CONS 1
/* (HC_INDEX_U_TOE_RX_CQ_CONS) */
#define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */
/* (HC_INDEX_U_ETH_RX_CQ_CONS) */
#define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */
/* (HC_INDEX_U_ETH_RX_BD_CONS) */
#define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */
/* (HC_INDEX_C_TOE_TX_CQ_CONS) */
#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */
/* (HC_INDEX_C_ETH_TX_CQ_CONS) */
#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */
/* (HC_INDEX_C_ETH_TX_CQ_CONS) */
#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */
/* (HC_INDEX_C_ETH_TX_CQ_CONS) */
#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 #define HC_INDEX_OOO_TX_CQ_CONS 4
#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5
#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6
#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7
#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
#define BNX2X_RX_SB_INDEX \ #define BNX2X_RX_SB_INDEX \
(&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
...@@ -1100,11 +1159,12 @@ struct bnx2x { ...@@ -1100,11 +1159,12 @@ struct bnx2x {
#define BP_PORT(bp) (bp->pfid & 1) #define BP_PORT(bp) (bp->pfid & 1)
#define BP_FUNC(bp) (bp->pfid) #define BP_FUNC(bp) (bp->pfid)
#define BP_ABS_FUNC(bp) (bp->pf_num) #define BP_ABS_FUNC(bp) (bp->pf_num)
#define BP_E1HVN(bp) (bp->pfid >> 1) #define BP_VN(bp) ((bp)->pfid >> 1)
#define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ #define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) #define BP_L_ID(bp) (BP_VN(bp) << 2)
#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ #define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\
BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
struct net_device *dev; struct net_device *dev;
struct pci_dev *pdev; struct pci_dev *pdev;
...@@ -1767,7 +1827,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, ...@@ -1767,7 +1827,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define MAX_DMAE_C_PER_PORT 8 #define MAX_DMAE_C_PER_PORT 8
#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
BP_E1HVN(bp)) BP_VN(bp))
#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
E1HVN_MAX) E1HVN_MAX)
...@@ -1793,7 +1853,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, ...@@ -1793,7 +1853,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
/* must be used on a CID before placing it on a HW ring */ /* must be used on a CID before placing it on a HW ring */
#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
(BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
(x)) (x))
#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
......
...@@ -987,8 +987,6 @@ void __bnx2x_link_report(struct bnx2x *bp) ...@@ -987,8 +987,6 @@ void __bnx2x_link_report(struct bnx2x *bp)
void bnx2x_init_rx_rings(struct bnx2x *bp) void bnx2x_init_rx_rings(struct bnx2x *bp)
{ {
int func = BP_FUNC(bp); int func = BP_FUNC(bp);
int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
u16 ring_prod; u16 ring_prod;
int i, j; int i, j;
...@@ -1001,7 +999,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) ...@@ -1001,7 +999,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
if (!fp->disable_tpa) { if (!fp->disable_tpa) {
/* Fill the per-aggregtion pool */ /* Fill the per-aggregtion pool */
for (i = 0; i < max_agg_queues; i++) { for (i = 0; i < MAX_AGG_QS(bp); i++) {
struct bnx2x_agg_info *tpa_info = struct bnx2x_agg_info *tpa_info =
&fp->tpa_info[i]; &fp->tpa_info[i];
struct sw_rx_bd *first_buf = struct sw_rx_bd *first_buf =
...@@ -1041,7 +1039,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) ...@@ -1041,7 +1039,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
bnx2x_free_rx_sge_range(bp, fp, bnx2x_free_rx_sge_range(bp, fp,
ring_prod); ring_prod);
bnx2x_free_tpa_pool(bp, fp, bnx2x_free_tpa_pool(bp, fp,
max_agg_queues); MAX_AGG_QS(bp));
fp->disable_tpa = 1; fp->disable_tpa = 1;
ring_prod = 0; ring_prod = 0;
break; break;
...@@ -1137,9 +1135,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) ...@@ -1137,9 +1135,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
bnx2x_free_rx_bds(fp); bnx2x_free_rx_bds(fp);
if (!fp->disable_tpa) if (!fp->disable_tpa)
bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
ETH_MAX_AGGREGATION_QUEUES_E1 :
ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
} }
} }
...@@ -3095,15 +3091,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) ...@@ -3095,15 +3091,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
struct bnx2x_fastpath *fp = &bp->fp[index]; struct bnx2x_fastpath *fp = &bp->fp[index];
int ring_size = 0; int ring_size = 0;
u8 cos; u8 cos;
int rx_ring_size = 0;
/* if rx_ring_size specified - use it */ /* if rx_ring_size specified - use it */
int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : if (!bp->rx_ring_size) {
MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
/* allocate at least number of buffers required by FW */ rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
MIN_RX_SIZE_TPA, /* allocate at least number of buffers required by FW */
rx_ring_size); rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
MIN_RX_SIZE_TPA, rx_ring_size);
bp->rx_ring_size = rx_ring_size;
} else
rx_ring_size = bp->rx_ring_size;
/* Common */ /* Common */
sb = &bnx2x_fp(bp, index, status_blk); sb = &bnx2x_fp(bp, index, status_blk);
......
...@@ -363,13 +363,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ...@@ -363,13 +363,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
} }
/* advertise the requested speed and duplex if supported */ /* advertise the requested speed and duplex if supported */
cmd->advertising &= bp->port.supported[cfg_idx]; if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
DP(NETIF_MSG_LINK, "Advertisement parameters "
"are not supported\n");
return -EINVAL;
}
bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
cmd->advertising); cmd->advertising);
if (cmd->advertising) {
bp->link_params.speed_cap_mask[cfg_idx] = 0;
if (cmd->advertising & ADVERTISED_10baseT_Half) {
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
}
if (cmd->advertising & ADVERTISED_10baseT_Full)
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
if (cmd->advertising & ADVERTISED_100baseT_Full)
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
if (cmd->advertising & ADVERTISED_100baseT_Half) {
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
}
if (cmd->advertising & ADVERTISED_1000baseT_Half) {
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
}
if (cmd->advertising & (ADVERTISED_1000baseT_Full |
ADVERTISED_1000baseKX_Full))
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
if (cmd->advertising & (ADVERTISED_10000baseT_Full |
ADVERTISED_10000baseKX4_Full |
ADVERTISED_10000baseKR_Full))
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
}
} else { /* forced speed */ } else { /* forced speed */
/* advertise the requested speed and duplex if supported */ /* advertise the requested speed and duplex if supported */
switch (speed) { switch (speed) {
...@@ -1310,10 +1347,7 @@ static void bnx2x_get_ringparam(struct net_device *dev, ...@@ -1310,10 +1347,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,
if (bp->rx_ring_size) if (bp->rx_ring_size)
ering->rx_pending = bp->rx_ring_size; ering->rx_pending = bp->rx_ring_size;
else else
if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) ering->rx_pending = MAX_RX_AVAIL;
ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
else
ering->rx_pending = MAX_RX_AVAIL;
ering->rx_mini_pending = 0; ering->rx_mini_pending = 0;
ering->rx_jumbo_pending = 0; ering->rx_jumbo_pending = 0;
......
...@@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, ...@@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
{ {
u32 nig_reg_adress_crd_weight = 0; u32 nig_reg_adress_crd_weight = 0;
u32 pbf_reg_adress_crd_weight = 0; u32 pbf_reg_adress_crd_weight = 0;
/* Calculate and set BW for this COS*/ /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
switch (cos_entry) { switch (cos_entry) {
case 0: case 0:
...@@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw( ...@@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw(
/* Calculate total BW requested */ /* Calculate total BW requested */
for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
*total_bw +=
if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { ets_params->cos[cos_idx].params.bw_params.bw;
DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
"was set to 0\n");
return -EINVAL;
} }
*total_bw +=
ets_params->cos[cos_idx].params.bw_params.bw;
}
} }
/*Check taotl BW is valid */ /* Check total BW is valid */
if ((100 != *total_bw) || (0 == *total_bw)) { if ((100 != *total_bw) || (0 == *total_bw)) {
if (0 == *total_bw) { if (0 == *total_bw) {
DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW"
...@@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params, ...@@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
/* Check loopback mode */ /* Check loopback mode */
if (lb) if (lb)
val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
bnx2x_set_xumac_nig(params, bnx2x_set_xumac_nig(params,
((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
...@@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, ...@@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
/* Advertised and set FEC (Forward Error Correction) */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
(MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
/* Enable CL37 BAM */ /* Enable CL37 BAM */
if (REG_RD(bp, params->shmem_base + if (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info. offsetof(struct shmem_region, dev_info.
...@@ -5924,7 +5924,7 @@ int bnx2x_set_led(struct link_params *params, ...@@ -5924,7 +5924,7 @@ int bnx2x_set_led(struct link_params *params,
(tmp | EMAC_LED_OVERRIDE)); (tmp | EMAC_LED_OVERRIDE));
/* /*
* return here without enabling traffic * return here without enabling traffic
* LED blink andsetting rate in ON mode. * LED blink and setting rate in ON mode.
* In oper mode, enabling LED blink * In oper mode, enabling LED blink
* and setting rate is needed. * and setting rate is needed.
*/ */
...@@ -5936,7 +5936,11 @@ int bnx2x_set_led(struct link_params *params, ...@@ -5936,7 +5936,11 @@ int bnx2x_set_led(struct link_params *params,
* This is a work-around for HW issue found when link * This is a work-around for HW issue found when link
* is up in CL73 * is up in CL73
*/ */
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); if ((!CHIP_IS_E3(bp)) ||
(CHIP_IS_E3(bp) &&
mode == LED_MODE_ON))
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
if (CHIP_IS_E1x(bp) || if (CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp) || CHIP_IS_E2(bp) ||
(mode == LED_MODE_ON)) (mode == LED_MODE_ON))
...@@ -10638,8 +10642,7 @@ static struct bnx2x_phy phy_warpcore = { ...@@ -10638,8 +10642,7 @@ static struct bnx2x_phy phy_warpcore = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
.addr = 0xff, .addr = 0xff,
.def_md_devad = 0, .def_md_devad = 0,
.flags = (FLAGS_HW_LOCK_REQUIRED | .flags = FLAGS_HW_LOCK_REQUIRED,
FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0, .mdio_ctrl = 0,
...@@ -10765,8 +10768,7 @@ static struct bnx2x_phy phy_8706 = { ...@@ -10765,8 +10768,7 @@ static struct bnx2x_phy phy_8706 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
.addr = 0xff, .addr = 0xff,
.def_md_devad = 0, .def_md_devad = 0,
.flags = (FLAGS_INIT_XGXS_FIRST | .flags = FLAGS_INIT_XGXS_FIRST,
FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0, .mdio_ctrl = 0,
...@@ -10797,8 +10799,7 @@ static struct bnx2x_phy phy_8726 = { ...@@ -10797,8 +10799,7 @@ static struct bnx2x_phy phy_8726 = {
.addr = 0xff, .addr = 0xff,
.def_md_devad = 0, .def_md_devad = 0,
.flags = (FLAGS_HW_LOCK_REQUIRED | .flags = (FLAGS_HW_LOCK_REQUIRED |
FLAGS_INIT_XGXS_FIRST | FLAGS_INIT_XGXS_FIRST),
FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0, .mdio_ctrl = 0,
...@@ -10829,8 +10830,7 @@ static struct bnx2x_phy phy_8727 = { ...@@ -10829,8 +10830,7 @@ static struct bnx2x_phy phy_8727 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
.addr = 0xff, .addr = 0xff,
.def_md_devad = 0, .def_md_devad = 0,
.flags = (FLAGS_FAN_FAILURE_DET_REQ | .flags = FLAGS_FAN_FAILURE_DET_REQ,
FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0, .mdio_ctrl = 0,
......
...@@ -407,8 +407,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, ...@@ -407,8 +407,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
(BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
...@@ -1419,7 +1419,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) ...@@ -1419,7 +1419,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
if (!CHIP_IS_E1(bp)) { if (!CHIP_IS_E1(bp)) {
/* init leading/trailing edge */ /* init leading/trailing edge */
if (IS_MF(bp)) { if (IS_MF(bp)) {
val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); val = (0xee0f | (1 << (BP_VN(bp) + 4)));
if (bp->port.pmf) if (bp->port.pmf)
/* enable nig and gpio3 attention */ /* enable nig and gpio3 attention */
val |= 0x1100; val |= 0x1100;
...@@ -1471,7 +1471,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp) ...@@ -1471,7 +1471,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
/* init leading/trailing edge */ /* init leading/trailing edge */
if (IS_MF(bp)) { if (IS_MF(bp)) {
val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); val = (0xee0f | (1 << (BP_VN(bp) + 4)));
if (bp->port.pmf) if (bp->port.pmf)
/* enable nig and gpio3 attention */ /* enable nig and gpio3 attention */
val |= 0x1100; val |= 0x1100;
...@@ -2287,7 +2287,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) ...@@ -2287,7 +2287,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
int vn; int vn;
bp->vn_weight_sum = 0; bp->vn_weight_sum = 0;
for (vn = VN_0; vn < E1HVN_MAX; vn++) { for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
u32 vn_cfg = bp->mf_config[vn]; u32 vn_cfg = bp->mf_config[vn];
u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT) * 100; FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
...@@ -2320,12 +2320,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) ...@@ -2320,12 +2320,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
CMNG_FLAGS_PER_PORT_FAIRNESS_VN; CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
} }
/* returns func by VN for current port */
static inline int func_by_vn(struct bnx2x *bp, int vn)
{
return 2 * vn + BP_PORT(bp);
}
static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
{ {
struct rate_shaping_vars_per_vn m_rs_vn; struct rate_shaping_vars_per_vn m_rs_vn;
struct fairness_vars_per_vn m_fair_vn; struct fairness_vars_per_vn m_fair_vn;
u32 vn_cfg = bp->mf_config[vn]; u32 vn_cfg = bp->mf_config[vn];
int func = 2*vn + BP_PORT(bp); int func = func_by_vn(bp, vn);
u16 vn_min_rate, vn_max_rate; u16 vn_min_rate, vn_max_rate;
int i; int i;
...@@ -2422,7 +2428,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp) ...@@ -2422,7 +2428,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
* *
* and there are 2 functions per port * and there are 2 functions per port
*/ */
for (vn = VN_0; vn < E1HVN_MAX; vn++) { for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
if (func >= E1H_FUNC_MAX) if (func >= E1H_FUNC_MAX)
...@@ -2454,7 +2460,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) ...@@ -2454,7 +2460,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
/* calculate and set min-max rate for each vn */ /* calculate and set min-max rate for each vn */
if (bp->port.pmf) if (bp->port.pmf)
for (vn = VN_0; vn < E1HVN_MAX; vn++) for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
bnx2x_init_vn_minmax(bp, vn); bnx2x_init_vn_minmax(bp, vn);
/* always enable rate shaping and fairness */ /* always enable rate shaping and fairness */
...@@ -2473,16 +2479,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) ...@@ -2473,16 +2479,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
static inline void bnx2x_link_sync_notify(struct bnx2x *bp) static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
{ {
int port = BP_PORT(bp);
int func; int func;
int vn; int vn;
/* Set the attention towards other drivers on the same port */ /* Set the attention towards other drivers on the same port */
for (vn = VN_0; vn < E1HVN_MAX; vn++) { for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
if (vn == BP_E1HVN(bp)) if (vn == BP_VN(bp))
continue; continue;
func = ((vn << 1) | port); func = func_by_vn(bp, vn);
REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
(LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
} }
...@@ -2577,7 +2582,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp) ...@@ -2577,7 +2582,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
bnx2x_dcbx_pmf_update(bp); bnx2x_dcbx_pmf_update(bp);
/* enable nig attention */ /* enable nig attention */
val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); val = (0xff0f | (1 << (BP_VN(bp) + 4)));
if (bp->common.int_block == INT_BLOCK_HC) { if (bp->common.int_block == INT_BLOCK_HC) {
REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
...@@ -2756,8 +2761,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, ...@@ -2756,8 +2761,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
u16 tpa_agg_size = 0; u16 tpa_agg_size = 0;
if (!fp->disable_tpa) { if (!fp->disable_tpa) {
pause->sge_th_hi = 250; pause->sge_th_lo = SGE_TH_LO(bp);
pause->sge_th_lo = 150; pause->sge_th_hi = SGE_TH_HI(bp);
/* validate SGE ring has enough to cross high threshold */
WARN_ON(bp->dropless_fc &&
pause->sge_th_hi + FW_PREFETCH_CNT >
MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
tpa_agg_size = min_t(u32, tpa_agg_size = min_t(u32,
(min_t(u32, 8, MAX_SKB_FRAGS) * (min_t(u32, 8, MAX_SKB_FRAGS) *
SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
...@@ -2771,10 +2782,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, ...@@ -2771,10 +2782,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
/* pause - not for e1 */ /* pause - not for e1 */
if (!CHIP_IS_E1(bp)) { if (!CHIP_IS_E1(bp)) {
pause->bd_th_hi = 350; pause->bd_th_lo = BD_TH_LO(bp);
pause->bd_th_lo = 250; pause->bd_th_hi = BD_TH_HI(bp);
pause->rcq_th_hi = 350;
pause->rcq_th_lo = 250; pause->rcq_th_lo = RCQ_TH_LO(bp);
pause->rcq_th_hi = RCQ_TH_HI(bp);
/*
* validate that rings have enough entries to cross
* high thresholds
*/
WARN_ON(bp->dropless_fc &&
pause->bd_th_hi + FW_PREFETCH_CNT >
bp->rx_ring_size);
WARN_ON(bp->dropless_fc &&
pause->rcq_th_hi + FW_PREFETCH_CNT >
NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
pause->pri_map = 1; pause->pri_map = 1;
} }
...@@ -2802,9 +2824,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, ...@@ -2802,9 +2824,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
* For PF Clients it should be the maximum avaliable number. * For PF Clients it should be the maximum avaliable number.
* VF driver(s) may want to define it to a smaller value. * VF driver(s) may want to define it to a smaller value.
*/ */
rxq_init->max_tpa_queues = rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
(CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
rxq_init->fw_sb_id = fp->fw_sb_id; rxq_init->fw_sb_id = fp->fw_sb_id;
...@@ -4808,6 +4828,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, ...@@ -4808,6 +4828,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
hc_sm->time_to_expire = 0xFFFFFFFF; hc_sm->time_to_expire = 0xFFFFFFFF;
} }
/* allocates state machine ids. */
static inline
void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
{
/* zero out state machine indices */
/* rx indices */
index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
/* tx indices */
index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
/* map indices */
/* rx indices */
index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
/* tx indices */
index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
}
static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id) u8 vf_valid, int fw_sb_id, int igu_sb_id)
{ {
...@@ -4839,6 +4890,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, ...@@ -4839,6 +4890,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
hc_sm_p = sb_data_e2.common.state_machine; hc_sm_p = sb_data_e2.common.state_machine;
sb_data_p = (u32 *)&sb_data_e2; sb_data_p = (u32 *)&sb_data_e2;
data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
bnx2x_map_sb_state_machines(sb_data_e2.index_data);
} else { } else {
memset(&sb_data_e1x, 0, memset(&sb_data_e1x, 0,
sizeof(struct hc_status_block_data_e1x)); sizeof(struct hc_status_block_data_e1x));
...@@ -4853,6 +4905,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, ...@@ -4853,6 +4905,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
hc_sm_p = sb_data_e1x.common.state_machine; hc_sm_p = sb_data_e1x.common.state_machine;
sb_data_p = (u32 *)&sb_data_e1x; sb_data_p = (u32 *)&sb_data_e1x;
data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
} }
bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
...@@ -5802,7 +5855,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) ...@@ -5802,7 +5855,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
* take the UNDI lock to protect undi_unload flow from accessing * take the UNDI lock to protect undi_unload flow from accessing
* registers while we're resetting the chip * registers while we're resetting the chip
*/ */
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
bnx2x_reset_common(bp); bnx2x_reset_common(bp);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
...@@ -5814,7 +5867,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) ...@@ -5814,7 +5867,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
} }
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
...@@ -6671,12 +6724,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) ...@@ -6671,12 +6724,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
if (CHIP_MODE_IS_4_PORT(bp)) if (CHIP_MODE_IS_4_PORT(bp))
dsb_idx = BP_FUNC(bp); dsb_idx = BP_FUNC(bp);
else else
dsb_idx = BP_E1HVN(bp); dsb_idx = BP_VN(bp);
prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
IGU_BC_BASE_DSB_PROD + dsb_idx : IGU_BC_BASE_DSB_PROD + dsb_idx :
IGU_NORM_BASE_DSB_PROD + dsb_idx); IGU_NORM_BASE_DSB_PROD + dsb_idx);
/*
* igu prods come in chunks of E1HVN_MAX (4) -
* does not matters what is the current chip mode
*/
for (i = 0; i < (num_segs * E1HVN_MAX); for (i = 0; i < (num_segs * E1HVN_MAX);
i += E1HVN_MAX) { i += E1HVN_MAX) {
addr = IGU_REG_PROD_CONS_MEMORY + addr = IGU_REG_PROD_CONS_MEMORY +
...@@ -7570,7 +7627,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) ...@@ -7570,7 +7627,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
u32 val; u32 val;
/* The mac address is written to entries 1-4 to /* The mac address is written to entries 1-4 to
preserve entry 0 which is used by the PMF */ preserve entry 0 which is used by the PMF */
u8 entry = (BP_E1HVN(bp) + 1)*8; u8 entry = (BP_VN(bp) + 1)*8;
val = (mac_addr[0] << 8) | mac_addr[1]; val = (mac_addr[0] << 8) | mac_addr[1];
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
...@@ -8546,10 +8603,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) ...@@ -8546,10 +8603,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
/* Check if there is any driver already loaded */ /* Check if there is any driver already loaded */
val = REG_RD(bp, MISC_REG_UNPREPARED); val = REG_RD(bp, MISC_REG_UNPREPARED);
if (val == 0x1) { if (val == 0x1) {
/* Check if it is the UNDI driver
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
/*
* Check if it is the UNDI driver
* UNDI driver initializes CID offset for normal bell to 0x7 * UNDI driver initializes CID offset for normal bell to 0x7
*/ */
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
if (val == 0x7) { if (val == 0x7) {
u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
...@@ -8587,9 +8646,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) ...@@ -8587,9 +8646,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
bnx2x_fw_command(bp, reset_code, 0); bnx2x_fw_command(bp, reset_code, 0);
} }
/* now it's safe to release the lock */
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
bnx2x_undi_int_disable(bp); bnx2x_undi_int_disable(bp);
port = BP_PORT(bp); port = BP_PORT(bp);
...@@ -8639,8 +8695,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) ...@@ -8639,8 +8695,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
bp->fw_seq = bp->fw_seq =
(SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK); DRV_MSG_SEQ_NUMBER_MASK);
} else }
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
/* now it's safe to release the lock */
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
} }
} }
...@@ -8777,13 +8835,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) ...@@ -8777,13 +8835,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
{ {
int pfid = BP_FUNC(bp); int pfid = BP_FUNC(bp);
int vn = BP_E1HVN(bp);
int igu_sb_id; int igu_sb_id;
u32 val; u32 val;
u8 fid, igu_sb_cnt = 0; u8 fid, igu_sb_cnt = 0;
bp->igu_base_sb = 0xff; bp->igu_base_sb = 0xff;
if (CHIP_INT_MODE_IS_BC(bp)) { if (CHIP_INT_MODE_IS_BC(bp)) {
int vn = BP_VN(bp);
igu_sb_cnt = bp->igu_sb_cnt; igu_sb_cnt = bp->igu_sb_cnt;
bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
FP_SB_MAX_E1x; FP_SB_MAX_E1x;
...@@ -9416,6 +9474,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) ...@@ -9416,6 +9474,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->igu_base_sb = 0; bp->igu_base_sb = 0;
} else { } else {
bp->common.int_block = INT_BLOCK_IGU; bp->common.int_block = INT_BLOCK_IGU;
/* do not allow device reset during IGU info preocessing */
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
...@@ -9447,6 +9509,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) ...@@ -9447,6 +9509,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bnx2x_get_igu_cam_info(bp); bnx2x_get_igu_cam_info(bp);
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
} }
/* /*
...@@ -9473,7 +9536,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) ...@@ -9473,7 +9536,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_ov = 0; bp->mf_ov = 0;
bp->mf_mode = 0; bp->mf_mode = 0;
vn = BP_E1HVN(bp); vn = BP_VN(bp);
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
...@@ -9593,13 +9656,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) ...@@ -9593,13 +9656,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
/* port info */ /* port info */
bnx2x_get_port_hwinfo(bp); bnx2x_get_port_hwinfo(bp);
if (!BP_NOMCP(bp)) {
bp->fw_seq =
(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
}
/* Get MAC addresses */ /* Get MAC addresses */
bnx2x_get_mac_hwinfo(bp); bnx2x_get_mac_hwinfo(bp);
...@@ -9765,6 +9821,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) ...@@ -9765,6 +9821,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
if (!BP_NOMCP(bp)) if (!BP_NOMCP(bp))
bnx2x_undi_unload(bp); bnx2x_undi_unload(bp);
/* init fw_seq after undi_unload! */
if (!BP_NOMCP(bp)) {
bp->fw_seq =
(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
}
if (CHIP_REV_IS_FPGA(bp)) if (CHIP_REV_IS_FPGA(bp))
dev_err(&bp->pdev->dev, "FPGA detected\n"); dev_err(&bp->pdev->dev, "FPGA detected\n");
...@@ -10259,17 +10323,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, ...@@ -10259,17 +10323,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
/* clean indirect addresses */ /* clean indirect addresses */
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET); PCICFG_VENDOR_ID_OFFSET);
/* Clean the following indirect addresses for all functions since it /*
* Clean the following indirect addresses for all functions since it
* is not used by the driver. * is not used by the driver.
*/ */
REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); if (CHIP_IS_E1x(bp)) {
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
}
/* /*
* Enable internal target-read (in case we are probed after PF FLR). * Enable internal target-read (in case we are probed after PF FLR).
......
...@@ -5320,7 +5320,7 @@ ...@@ -5320,7 +5320,7 @@
#define XCM_REG_XX_OVFL_EVNT_ID 0x20058 #define XCM_REG_XX_OVFL_EVNT_ID 0x20058
#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0)
#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1)
#define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) #define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2)
#define XMAC_CTRL_REG_RX_EN (0x1<<1) #define XMAC_CTRL_REG_RX_EN (0x1<<1)
#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) #define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
#define XMAC_CTRL_REG_TX_EN (0x1<<0) #define XMAC_CTRL_REG_TX_EN (0x1<<0)
...@@ -5766,7 +5766,7 @@ ...@@ -5766,7 +5766,7 @@
#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
#define HW_LOCK_RESOURCE_SPIO 2 #define HW_LOCK_RESOURCE_SPIO 2
#define HW_LOCK_RESOURCE_UNDI 5 #define HW_LOCK_RESOURCE_RESET 5
#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
...@@ -6853,6 +6853,9 @@ Theotherbitsarereservedandshouldbezero*/ ...@@ -6853,6 +6853,9 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96
#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
......
...@@ -710,7 +710,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) ...@@ -710,7 +710,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
break; break;
case MAC_TYPE_NONE: /* unreached */ case MAC_TYPE_NONE: /* unreached */
BNX2X_ERR("stats updated by DMAE but no MAC active\n"); DP(BNX2X_MSG_STATS,
"stats updated by DMAE but no MAC active\n");
return -1; return -1;
default: /* unreached */ default: /* unreached */
...@@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp) ...@@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
static void bnx2x_func_stats_base_init(struct bnx2x *bp) static void bnx2x_func_stats_base_init(struct bnx2x *bp)
{ {
int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
u32 func_stx; u32 func_stx;
/* sanity */ /* sanity */
...@@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp) ...@@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
func_stx = bp->func_stx; func_stx = bp->func_stx;
for (vn = VN_0; vn < vn_max; vn++) { for (vn = VN_0; vn < vn_max; vn++) {
int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
bnx2x_func_stats_init(bp); bnx2x_func_stats_init(bp);
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/io.h>
#include <linux/can/dev.h> #include <linux/can/dev.h>
#include <linux/can/error.h> #include <linux/can/error.h>
......
...@@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) ...@@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
checksum += eeprom_data; checksum += eeprom_data;
} }
#ifdef CONFIG_PARISC
/* This is a signature and not a checksum on HP c8000 */
if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6))
return E1000_SUCCESS;
#endif
if (checksum == (u16) EEPROM_SUM) if (checksum == (u16) EEPROM_SUM)
return E1000_SUCCESS; return E1000_SUCCESS;
else { else {
......
...@@ -757,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) ...@@ -757,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
struct ibmveth_adapter *adapter = netdev_priv(dev); struct ibmveth_adapter *adapter = netdev_priv(dev);
unsigned long set_attr, clr_attr, ret_attr; unsigned long set_attr, clr_attr, ret_attr;
unsigned long set_attr6, clr_attr6; unsigned long set_attr6, clr_attr6;
long ret, ret6; long ret, ret4, ret6;
int rc1 = 0, rc2 = 0; int rc1 = 0, rc2 = 0;
int restart = 0; int restart = 0;
...@@ -770,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) ...@@ -770,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
set_attr = 0; set_attr = 0;
clr_attr = 0; clr_attr = 0;
set_attr6 = 0;
clr_attr6 = 0;
if (data) { if (data) {
set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
...@@ -784,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) ...@@ -784,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
!(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
(ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
set_attr, &ret_attr); set_attr, &ret_attr);
if (ret != H_SUCCESS) { if (ret4 != H_SUCCESS) {
netdev_err(dev, "unable to change IPv4 checksum " netdev_err(dev, "unable to change IPv4 checksum "
"offload settings. %d rc=%ld\n", "offload settings. %d rc=%ld\n",
data, ret); data, ret4);
h_illan_attributes(adapter->vdev->unit_address,
set_attr, clr_attr, &ret_attr);
if (data == 1)
dev->features &= ~NETIF_F_IP_CSUM;
ret = h_illan_attributes(adapter->vdev->unit_address,
set_attr, clr_attr, &ret_attr);
} else { } else {
adapter->fw_ipv4_csum_support = data; adapter->fw_ipv4_csum_support = data;
} }
...@@ -804,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) ...@@ -804,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
if (ret6 != H_SUCCESS) { if (ret6 != H_SUCCESS) {
netdev_err(dev, "unable to change IPv6 checksum " netdev_err(dev, "unable to change IPv6 checksum "
"offload settings. %d rc=%ld\n", "offload settings. %d rc=%ld\n",
data, ret); data, ret6);
h_illan_attributes(adapter->vdev->unit_address,
set_attr6, clr_attr6, &ret_attr);
if (data == 1)
dev->features &= ~NETIF_F_IPV6_CSUM;
ret = h_illan_attributes(adapter->vdev->unit_address,
set_attr6, clr_attr6,
&ret_attr);
} else } else
adapter->fw_ipv6_csum_support = data; adapter->fw_ipv6_csum_support = data;
if (ret != H_SUCCESS || ret6 != H_SUCCESS) if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
adapter->rx_csum = data; adapter->rx_csum = data;
else else
rc1 = -EIO; rc1 = -EIO;
...@@ -930,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, ...@@ -930,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
union ibmveth_buf_desc descs[6]; union ibmveth_buf_desc descs[6];
int last, i; int last, i;
int force_bounce = 0; int force_bounce = 0;
dma_addr_t dma_addr;
/* /*
* veth handles a maximum of 6 segments including the header, so * veth handles a maximum of 6 segments including the header, so
...@@ -994,17 +1004,16 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, ...@@ -994,17 +1004,16 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
} }
/* Map the header */ /* Map the header */
descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
skb_headlen(skb), skb_headlen(skb), DMA_TO_DEVICE);
DMA_TO_DEVICE); if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
goto map_failed; goto map_failed;
descs[0].fields.flags_len = desc_flags | skb_headlen(skb); descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
descs[0].fields.address = dma_addr;
/* Map the frags */ /* Map the frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
unsigned long dma_addr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr = dma_map_page(&adapter->vdev->dev, frag->page, dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
...@@ -1026,7 +1035,12 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, ...@@ -1026,7 +1035,12 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
netdev->stats.tx_bytes += skb->len; netdev->stats.tx_bytes += skb->len;
} }
for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++) dma_unmap_single(&adapter->vdev->dev,
descs[0].fields.address,
descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
DMA_TO_DEVICE);
for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
DMA_TO_DEVICE); DMA_TO_DEVICE);
......
...@@ -127,8 +127,8 @@ struct pch_gbe_regs { ...@@ -127,8 +127,8 @@ struct pch_gbe_regs {
/* Reset */ /* Reset */
#define PCH_GBE_ALL_RST 0x80000000 /* All reset */ #define PCH_GBE_ALL_RST 0x80000000 /* All reset */
#define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */ #define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */
#define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */ #define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */
/* TCP/IP Accelerator Control */ /* TCP/IP Accelerator Control */
#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */ #define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */
...@@ -276,6 +276,9 @@ struct pch_gbe_regs { ...@@ -276,6 +276,9 @@ struct pch_gbe_regs {
#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */ #define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */
#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */ #define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */
/* RX DMA STATUS */
#define PCH_GBE_IDLE_CHECK 0xFFFFFFFE
/* Wake On LAN Status */ /* Wake On LAN Status */
#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */ #define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */
#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */ #define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */
...@@ -471,6 +474,7 @@ struct pch_gbe_tx_desc { ...@@ -471,6 +474,7 @@ struct pch_gbe_tx_desc {
struct pch_gbe_buffer { struct pch_gbe_buffer {
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t dma; dma_addr_t dma;
unsigned char *rx_buffer;
unsigned long time_stamp; unsigned long time_stamp;
u16 length; u16 length;
bool mapped; bool mapped;
...@@ -511,6 +515,9 @@ struct pch_gbe_tx_ring { ...@@ -511,6 +515,9 @@ struct pch_gbe_tx_ring {
struct pch_gbe_rx_ring { struct pch_gbe_rx_ring {
struct pch_gbe_rx_desc *desc; struct pch_gbe_rx_desc *desc;
dma_addr_t dma; dma_addr_t dma;
unsigned char *rx_buff_pool;
dma_addr_t rx_buff_pool_logic;
unsigned int rx_buff_pool_size;
unsigned int size; unsigned int size;
unsigned int count; unsigned int count;
unsigned int next_to_use; unsigned int next_to_use;
...@@ -622,6 +629,7 @@ struct pch_gbe_adapter { ...@@ -622,6 +629,7 @@ struct pch_gbe_adapter {
unsigned long rx_buffer_len; unsigned long rx_buffer_len;
unsigned long tx_queue_len; unsigned long tx_queue_len;
bool have_msi; bool have_msi;
bool rx_stop_flag;
}; };
extern const char pch_driver_version[]; extern const char pch_driver_version[];
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "pch_gbe.h" #include "pch_gbe.h"
#include "pch_gbe_api.h" #include "pch_gbe_api.h"
#include <linux/prefetch.h>
#define DRV_VERSION "1.00" #define DRV_VERSION "1.00"
const char pch_driver_version[] = DRV_VERSION; const char pch_driver_version[] = DRV_VERSION;
...@@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION; ...@@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
#define PCH_GBE_COPYBREAK_DEFAULT 256 #define PCH_GBE_COPYBREAK_DEFAULT 256
#define PCH_GBE_PCI_BAR 1 #define PCH_GBE_PCI_BAR 1
#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
/* Macros for ML7223 */ /* Macros for ML7223 */
#define PCI_VENDOR_ID_ROHM 0x10db #define PCI_VENDOR_ID_ROHM 0x10db
#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
/* Macros for ML7831 */
#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
#define PCH_GBE_TX_WEIGHT 64 #define PCH_GBE_TX_WEIGHT 64
#define PCH_GBE_RX_WEIGHT 64 #define PCH_GBE_RX_WEIGHT 64
#define PCH_GBE_RX_BUFFER_WRITE 16 #define PCH_GBE_RX_BUFFER_WRITE 16
...@@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION; ...@@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION;
) )
/* Ethertype field values */ /* Ethertype field values */
#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
#define PCH_GBE_FRAME_SIZE_2048 2048 #define PCH_GBE_FRAME_SIZE_2048 2048
#define PCH_GBE_FRAME_SIZE_4096 4096 #define PCH_GBE_FRAME_SIZE_4096 4096
...@@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION; ...@@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_INT_ENABLE_MASK ( \ #define PCH_GBE_INT_ENABLE_MASK ( \
PCH_GBE_INT_RX_DMA_CMPLT | \ PCH_GBE_INT_RX_DMA_CMPLT | \
PCH_GBE_INT_RX_DSC_EMP | \ PCH_GBE_INT_RX_DSC_EMP | \
PCH_GBE_INT_RX_FIFO_ERR | \
PCH_GBE_INT_WOL_DET | \ PCH_GBE_INT_WOL_DET | \
PCH_GBE_INT_TX_CMPLT \ PCH_GBE_INT_TX_CMPLT \
) )
#define PCH_GBE_INT_DISABLE_ALL 0
static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
...@@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit) ...@@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
if (!tmp) if (!tmp)
pr_err("Error: busy bit is not cleared\n"); pr_err("Error: busy bit is not cleared\n");
} }
/**
* pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
* @reg: Pointer of register
* @busy: Busy bit
*/
static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
{
u32 tmp;
int ret = -1;
/* wait busy */
tmp = 20;
while ((ioread32(reg) & bit) && --tmp)
udelay(5);
if (!tmp)
pr_err("Error: busy bit is not cleared\n");
else
ret = 0;
return ret;
}
/** /**
* pch_gbe_mac_mar_set - Set MAC address register * pch_gbe_mac_mar_set - Set MAC address register
* @hw: Pointer to the HW structure * @hw: Pointer to the HW structure
...@@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw) ...@@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
return; return;
} }
static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
{
/* Read the MAC address. and store to the private data */
pch_gbe_mac_read_mac_addr(hw);
iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
/* Setup the MAC address */
pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
return;
}
/** /**
* pch_gbe_mac_init_rx_addrs - Initialize receive address's * pch_gbe_mac_init_rx_addrs - Initialize receive address's
* @hw: Pointer to the HW structure * @hw: Pointer to the HW structure
...@@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter) ...@@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
tcpip = ioread32(&hw->reg->TCPIP_ACC); tcpip = ioread32(&hw->reg->TCPIP_ACC);
if (netdev->features & NETIF_F_RXCSUM) { tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF; tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
tcpip |= PCH_GBE_RX_TCPIPACC_EN;
} else {
tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
}
iowrite32(tcpip, &hw->reg->TCPIP_ACC); iowrite32(tcpip, &hw->reg->TCPIP_ACC);
return; return;
} }
...@@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) ...@@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
iowrite32(rdba, &hw->reg->RX_DSC_BASE); iowrite32(rdba, &hw->reg->RX_DSC_BASE);
iowrite32(rdlen, &hw->reg->RX_DSC_SIZE); iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
/* Enables Receive DMA */
rxdma = ioread32(&hw->reg->DMA_CTRL);
rxdma |= PCH_GBE_RX_DMA_EN;
iowrite32(rxdma, &hw->reg->DMA_CTRL);
/* Enables Receive */
iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
} }
/** /**
...@@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter) ...@@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
spin_unlock_irqrestore(&adapter->stats_lock, flags); spin_unlock_irqrestore(&adapter->stats_lock, flags);
} }
static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
{
struct pch_gbe_hw *hw = &adapter->hw;
u32 rxdma;
u16 value;
int ret;
/* Disable Receive DMA */
rxdma = ioread32(&hw->reg->DMA_CTRL);
rxdma &= ~PCH_GBE_RX_DMA_EN;
iowrite32(rxdma, &hw->reg->DMA_CTRL);
/* Wait Rx DMA BUS is IDLE */
ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
if (ret) {
/* Disable Bus master */
pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
value &= ~PCI_COMMAND_MASTER;
pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
/* Stop Receive */
pch_gbe_mac_reset_rx(hw);
/* Enable Bus master */
value |= PCI_COMMAND_MASTER;
pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
} else {
/* Stop Receive */
pch_gbe_mac_reset_rx(hw);
}
}
static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
{
u32 rxdma;
/* Enables Receive DMA */
rxdma = ioread32(&hw->reg->DMA_CTRL);
rxdma |= PCH_GBE_RX_DMA_EN;
iowrite32(rxdma, &hw->reg->DMA_CTRL);
/* Enables Receive */
iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
return;
}
/** /**
* pch_gbe_intr - Interrupt Handler * pch_gbe_intr - Interrupt Handler
* @irq: Interrupt number * @irq: Interrupt number
...@@ -1123,7 +1191,15 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) ...@@ -1123,7 +1191,15 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
if (int_st & PCH_GBE_INT_RX_FRAME_ERR) if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
adapter->stats.intr_rx_frame_err_count++; adapter->stats.intr_rx_frame_err_count++;
if (int_st & PCH_GBE_INT_RX_FIFO_ERR) if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
adapter->stats.intr_rx_fifo_err_count++; if (!adapter->rx_stop_flag) {
adapter->stats.intr_rx_fifo_err_count++;
pr_debug("Rx fifo over run\n");
adapter->rx_stop_flag = true;
int_en = ioread32(&hw->reg->INT_EN);
iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
&hw->reg->INT_EN);
pch_gbe_stop_receive(adapter);
}
if (int_st & PCH_GBE_INT_RX_DMA_ERR) if (int_st & PCH_GBE_INT_RX_DMA_ERR)
adapter->stats.intr_rx_dma_err_count++; adapter->stats.intr_rx_dma_err_count++;
if (int_st & PCH_GBE_INT_TX_FIFO_ERR) if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
...@@ -1135,7 +1211,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) ...@@ -1135,7 +1211,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
/* When Rx descriptor is empty */ /* When Rx descriptor is empty */
if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
adapter->stats.intr_rx_dsc_empty_count++; adapter->stats.intr_rx_dsc_empty_count++;
pr_err("Rx descriptor is empty\n"); pr_debug("Rx descriptor is empty\n");
int_en = ioread32(&hw->reg->INT_EN); int_en = ioread32(&hw->reg->INT_EN);
iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
if (hw->mac.tx_fc_enable) { if (hw->mac.tx_fc_enable) {
...@@ -1185,29 +1261,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter, ...@@ -1185,29 +1261,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
unsigned int i; unsigned int i;
unsigned int bufsz; unsigned int bufsz;
bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN; bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
i = rx_ring->next_to_use; i = rx_ring->next_to_use;
while ((cleaned_count--)) { while ((cleaned_count--)) {
buffer_info = &rx_ring->buffer_info[i]; buffer_info = &rx_ring->buffer_info[i];
skb = buffer_info->skb; skb = netdev_alloc_skb(netdev, bufsz);
if (skb) { if (unlikely(!skb)) {
skb_trim(skb, 0); /* Better luck next round */
} else { adapter->stats.rx_alloc_buff_failed++;
skb = netdev_alloc_skb(netdev, bufsz); break;
if (unlikely(!skb)) {
/* Better luck next round */
adapter->stats.rx_alloc_buff_failed++;
break;
}
/* 64byte align */
skb_reserve(skb, PCH_GBE_DMA_ALIGN);
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
} }
/* align */
skb_reserve(skb, NET_IP_ALIGN);
buffer_info->skb = skb;
buffer_info->dma = dma_map_single(&pdev->dev, buffer_info->dma = dma_map_single(&pdev->dev,
skb->data, buffer_info->rx_buffer,
buffer_info->length, buffer_info->length,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
...@@ -1240,6 +1310,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter, ...@@ -1240,6 +1310,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
return; return;
} }
static int
pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
{
struct pci_dev *pdev = adapter->pdev;
struct pch_gbe_buffer *buffer_info;
unsigned int i;
unsigned int bufsz;
unsigned int size;
bufsz = adapter->rx_buffer_len;
size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
&rx_ring->rx_buff_pool_logic,
GFP_KERNEL);
if (!rx_ring->rx_buff_pool) {
pr_err("Unable to allocate memory for the receive poll buffer\n");
return -ENOMEM;
}
memset(rx_ring->rx_buff_pool, 0, size);
rx_ring->rx_buff_pool_size = size;
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
buffer_info->length = bufsz;
}
return 0;
}
/** /**
* pch_gbe_alloc_tx_buffers - Allocate transmit buffers * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
* @adapter: Board private structure * @adapter: Board private structure
...@@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, ...@@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
unsigned int i; unsigned int i;
unsigned int cleaned_count = 0; unsigned int cleaned_count = 0;
bool cleaned = false; bool cleaned = false;
struct sk_buff *skb, *new_skb; struct sk_buff *skb;
u8 dma_status; u8 dma_status;
u16 gbec_status; u16 gbec_status;
u32 tcp_ip_status; u32 tcp_ip_status;
...@@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, ...@@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
rx_desc->gbec_status = DSC_INIT16; rx_desc->gbec_status = DSC_INIT16;
buffer_info = &rx_ring->buffer_info[i]; buffer_info = &rx_ring->buffer_info[i];
skb = buffer_info->skb; skb = buffer_info->skb;
buffer_info->skb = NULL;
/* unmap dma */ /* unmap dma */
dma_unmap_single(&pdev->dev, buffer_info->dma, dma_unmap_single(&pdev->dev, buffer_info->dma,
buffer_info->length, DMA_FROM_DEVICE); buffer_info->length, DMA_FROM_DEVICE);
buffer_info->mapped = false; buffer_info->mapped = false;
/* Prefetch the packet */
prefetch(skb->data);
pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
"TCP:0x%08x] BufInf = 0x%p\n", "TCP:0x%08x] BufInf = 0x%p\n",
...@@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, ...@@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
pr_err("Receive CRC Error\n"); pr_err("Receive CRC Error\n");
} else { } else {
/* get receive length */ /* get receive length */
/* length convert[-3] */ /* length convert[-3], length includes FCS length */
length = (rx_desc->rx_words_eob) - 3; length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
if (rx_desc->rx_words_eob & 0x02)
/* Decide the data conversion method */ length = length - 4;
if (!(netdev->features & NETIF_F_RXCSUM)) { /*
/* [Header:14][payload] */ * buffer_info->rx_buffer: [Header:14][payload]
if (NET_IP_ALIGN) { * skb->data: [Reserve:2][Header:14][payload]
/* Because alignment differs, */
* the new_skb is newly allocated, memcpy(skb->data, buffer_info->rx_buffer, length);
* and data is copied to new_skb.*/
new_skb = netdev_alloc_skb(netdev,
length + NET_IP_ALIGN);
if (!new_skb) {
/* dorrop error */
pr_err("New skb allocation "
"Error\n");
goto dorrop;
}
skb_reserve(new_skb, NET_IP_ALIGN);
memcpy(new_skb->data, skb->data,
length);
skb = new_skb;
} else {
/* DMA buffer is used as SKB as it is.*/
buffer_info->skb = NULL;
}
} else {
/* [Header:14][padding:2][payload] */
/* The length includes padding length */
length = length - PCH_GBE_DMA_PADDING;
if ((length < copybreak) ||
(NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
/* Because alignment differs,
* the new_skb is newly allocated,
* and data is copied to new_skb.
* Padding data is deleted
* at the time of a copy.*/
new_skb = netdev_alloc_skb(netdev,
length + NET_IP_ALIGN);
if (!new_skb) {
/* dorrop error */
pr_err("New skb allocation "
"Error\n");
goto dorrop;
}
skb_reserve(new_skb, NET_IP_ALIGN);
memcpy(new_skb->data, skb->data,
ETH_HLEN);
memcpy(&new_skb->data[ETH_HLEN],
&skb->data[ETH_HLEN +
PCH_GBE_DMA_PADDING],
length - ETH_HLEN);
skb = new_skb;
} else {
/* Padding data is deleted
* by moving header data.*/
memmove(&skb->data[PCH_GBE_DMA_PADDING],
&skb->data[0], ETH_HLEN);
skb_reserve(skb, NET_IP_ALIGN);
buffer_info->skb = NULL;
}
}
/* The length includes FCS length */
length = length - ETH_FCS_LEN;
/* update status of driver */ /* update status of driver */
adapter->stats.rx_bytes += length; adapter->stats.rx_bytes += length;
adapter->stats.rx_packets++; adapter->stats.rx_packets++;
...@@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, ...@@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
pr_debug("Receive skb->ip_summed: %d length: %d\n", pr_debug("Receive skb->ip_summed: %d length: %d\n",
skb->ip_summed, length); skb->ip_summed, length);
} }
dorrop:
/* return some buffers to hardware, one at a time is too slow */ /* return some buffers to hardware, one at a time is too slow */
if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
pch_gbe_alloc_rx_buffers(adapter, rx_ring, pch_gbe_alloc_rx_buffers(adapter, rx_ring,
...@@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) ...@@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
pr_err("Error: can't bring device up\n"); pr_err("Error: can't bring device up\n");
return err; return err;
} }
err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
if (err) {
pr_err("Error: can't bring device up\n");
return err;
}
pch_gbe_alloc_tx_buffers(adapter, tx_ring); pch_gbe_alloc_tx_buffers(adapter, tx_ring);
pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
adapter->tx_queue_len = netdev->tx_queue_len; adapter->tx_queue_len = netdev->tx_queue_len;
pch_gbe_start_receive(&adapter->hw);
mod_timer(&adapter->watchdog_timer, jiffies); mod_timer(&adapter->watchdog_timer, jiffies);
...@@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) ...@@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
void pch_gbe_down(struct pch_gbe_adapter *adapter) void pch_gbe_down(struct pch_gbe_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
/* signal that we're down so the interrupt handler does not /* signal that we're down so the interrupt handler does not
* reschedule our watchdog timer */ * reschedule our watchdog timer */
...@@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter) ...@@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
pch_gbe_reset(adapter); pch_gbe_reset(adapter);
pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
rx_ring->rx_buff_pool_logic = 0;
rx_ring->rx_buff_pool_size = 0;
rx_ring->rx_buff_pool = NULL;
} }
/** /**
...@@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
{ {
struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pch_gbe_adapter *adapter = netdev_priv(netdev);
int max_frame; int max_frame;
unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
int err;
max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
...@@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
else if (max_frame <= PCH_GBE_FRAME_SIZE_8192) else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192; adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
else else
adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE; adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
netdev->mtu = new_mtu;
adapter->hw.mac.max_frame_size = max_frame;
if (netif_running(netdev)) if (netif_running(netdev)) {
pch_gbe_reinit_locked(adapter); pch_gbe_down(adapter);
else err = pch_gbe_up(adapter);
if (err) {
adapter->rx_buffer_len = old_rx_buffer_len;
pch_gbe_up(adapter);
return -ENOMEM;
} else {
netdev->mtu = new_mtu;
adapter->hw.mac.max_frame_size = max_frame;
}
} else {
pch_gbe_reset(adapter); pch_gbe_reset(adapter);
netdev->mtu = new_mtu;
adapter->hw.mac.max_frame_size = max_frame;
}
pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
...@@ -2103,6 +2172,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) ...@@ -2103,6 +2172,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
int work_done = 0; int work_done = 0;
bool poll_end_flag = false; bool poll_end_flag = false;
bool cleaned = false; bool cleaned = false;
u32 int_en;
pr_debug("budget : %d\n", budget); pr_debug("budget : %d\n", budget);
...@@ -2110,8 +2180,15 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) ...@@ -2110,8 +2180,15 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
if (!netif_carrier_ok(netdev)) { if (!netif_carrier_ok(netdev)) {
poll_end_flag = true; poll_end_flag = true;
} else { } else {
cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
if (adapter->rx_stop_flag) {
adapter->rx_stop_flag = false;
pch_gbe_start_receive(&adapter->hw);
int_en = ioread32(&adapter->hw.reg->INT_EN);
iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
&adapter->hw.reg->INT_EN);
}
cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
if (cleaned) if (cleaned)
work_done = budget; work_done = budget;
...@@ -2452,6 +2529,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { ...@@ -2452,6 +2529,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
.class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
.class_mask = (0xFFFF00) .class_mask = (0xFFFF00)
}, },
{.vendor = PCI_VENDOR_ID_ROHM,
.device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_NETWORK_ETHERNET << 8),
.class_mask = (0xFFFF00)
},
/* required last entry */ /* required last entry */
{0} {0}
}; };
......
...@@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx) ...@@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx)
{ {
struct pci_dev *pci_dev = efx->pci_dev; struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask; dma_addr_t dma_mask = efx->type->max_dma_mask;
bool use_wc;
int rc; int rc;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
...@@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx) ...@@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx)
rc = -EIO; rc = -EIO;
goto fail3; goto fail3;
} }
efx->membase = ioremap_nocache(efx->membase_phys,
/* bug22643: If SR-IOV is enabled then tx push over a write combined efx->type->mem_map_size);
* mapping is unsafe. We need to disable write combining in this case.
* MSI is unsupported when SR-IOV is enabled, and the firmware will
* have removed the MSI capability. So write combining is safe if
* there is an MSI capability.
*/
use_wc = (!EFX_WORKAROUND_22643(efx) ||
pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
if (use_wc)
efx->membase = ioremap_wc(efx->membase_phys,
efx->type->mem_map_size);
else
efx->membase = ioremap_nocache(efx->membase_phys,
efx->type->mem_map_size);
if (!efx->membase) { if (!efx->membase) {
netif_err(efx, probe, efx->net_dev, netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n", "could not map memory BAR at %llx+%x\n",
......
...@@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, ...@@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
_efx_writed(efx, value->u32[2], reg + 8); _efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12); _efx_writed(efx, value->u32[3], reg + 12);
#endif #endif
wmb();
mmiowb(); mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags); spin_unlock_irqrestore(&efx->biu_lock, flags);
} }
...@@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, ...@@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
__raw_writel((__force u32)value->u32[0], membase + addr); __raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4); __raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif #endif
wmb();
mmiowb(); mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags); spin_unlock_irqrestore(&efx->biu_lock, flags);
} }
...@@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, ...@@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
/* No lock required */ /* No lock required */
_efx_writed(efx, value->u32[0], reg); _efx_writed(efx, value->u32[0], reg);
wmb();
} }
/* Read a 128-bit CSR, locking as appropriate. */ /* Read a 128-bit CSR, locking as appropriate. */
...@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, ...@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
spin_lock_irqsave(&efx->biu_lock, flags); spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _efx_readd(efx, reg + 0); value->u32[0] = _efx_readd(efx, reg + 0);
rmb();
value->u32[1] = _efx_readd(efx, reg + 4); value->u32[1] = _efx_readd(efx, reg + 4);
value->u32[2] = _efx_readd(efx, reg + 8); value->u32[2] = _efx_readd(efx, reg + 8);
value->u32[3] = _efx_readd(efx, reg + 12); value->u32[3] = _efx_readd(efx, reg + 12);
...@@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, ...@@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
value->u64[0] = (__force __le64)__raw_readq(membase + addr); value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else #else
value->u32[0] = (__force __le32)__raw_readl(membase + addr); value->u32[0] = (__force __le32)__raw_readl(membase + addr);
rmb();
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif #endif
spin_unlock_irqrestore(&efx->biu_lock, flags); spin_unlock_irqrestore(&efx->biu_lock, flags);
...@@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, ...@@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
_efx_writed(efx, value->u32[2], reg + 8); _efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12); _efx_writed(efx, value->u32[3], reg + 12);
#endif #endif
wmb();
} }
#define efx_writeo_page(efx, value, reg, page) \ #define efx_writeo_page(efx, value, reg, page) \
_efx_writeo_page(efx, value, \ _efx_writeo_page(efx, value, \
......
...@@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) ...@@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
return &nic_data->mcdi; return &nic_data->mcdi;
} }
static inline void
efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
{
struct siena_nic_data *nic_data = efx->nic_data;
value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
}
static inline void
efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
{
struct siena_nic_data *nic_data = efx->nic_data;
__raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
}
void efx_mcdi_init(struct efx_nic *efx) void efx_mcdi_init(struct efx_nic *efx)
{ {
struct efx_mcdi_iface *mcdi; struct efx_mcdi_iface *mcdi;
...@@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, ...@@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
const u8 *inbuf, size_t inlen) const u8 *inbuf, size_t inlen)
{ {
struct efx_mcdi_iface *mcdi = efx_mcdi(efx); struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned pdu = MCDI_PDU(efx); unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
unsigned doorbell = MCDI_DOORBELL(efx); unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
unsigned int i; unsigned int i;
efx_dword_t hdr; efx_dword_t hdr;
u32 xflags, seqno; u32 xflags, seqno;
...@@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, ...@@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
MCDI_HEADER_SEQ, seqno, MCDI_HEADER_SEQ, seqno,
MCDI_HEADER_XFLAGS, xflags); MCDI_HEADER_XFLAGS, xflags);
efx_mcdi_writed(efx, &hdr, pdu); efx_writed(efx, &hdr, pdu);
for (i = 0; i < inlen; i += 4) for (i = 0; i < inlen; i += 4)
efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
pdu + 4 + i);
/* Ensure the payload is written out before the header */
wmb();
/* ring the doorbell with a distinctive value */ /* ring the doorbell with a distinctive value */
EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
efx_mcdi_writed(efx, &hdr, doorbell);
} }
static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
{ {
struct efx_mcdi_iface *mcdi = efx_mcdi(efx); struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned int pdu = MCDI_PDU(efx); unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
int i; int i;
BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
BUG_ON(outlen & 3 || outlen >= 0x100); BUG_ON(outlen & 3 || outlen >= 0x100);
for (i = 0; i < outlen; i += 4) for (i = 0; i < outlen; i += 4)
efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
} }
static int efx_mcdi_poll(struct efx_nic *efx) static int efx_mcdi_poll(struct efx_nic *efx)
...@@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) ...@@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
struct efx_mcdi_iface *mcdi = efx_mcdi(efx); struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned int time, finish; unsigned int time, finish;
unsigned int respseq, respcmd, error; unsigned int respseq, respcmd, error;
unsigned int pdu = MCDI_PDU(efx); unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
unsigned int rc, spins; unsigned int rc, spins;
efx_dword_t reg; efx_dword_t reg;
...@@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx) ...@@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx)
time = get_seconds(); time = get_seconds();
efx_mcdi_readd(efx, &reg, pdu); rmb();
efx_readd(efx, &reg, pdu);
/* All 1's indicates that shared memory is in reset (and is /* All 1's indicates that shared memory is in reset (and is
* not a valid header). Wait for it to come out reset before * not a valid header). Wait for it to come out reset before
...@@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) ...@@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
respseq, mcdi->seqno); respseq, mcdi->seqno);
rc = EIO; rc = EIO;
} else if (error) { } else if (error) {
efx_mcdi_readd(efx, &reg, pdu + 4); efx_readd(efx, &reg, pdu + 4);
switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
#define TRANSLATE_ERROR(name) \ #define TRANSLATE_ERROR(name) \
case MC_CMD_ERR_ ## name: \ case MC_CMD_ERR_ ## name: \
...@@ -222,21 +210,21 @@ static int efx_mcdi_poll(struct efx_nic *efx) ...@@ -222,21 +210,21 @@ static int efx_mcdi_poll(struct efx_nic *efx)
/* Test and clear MC-rebooted flag for this port/function */ /* Test and clear MC-rebooted flag for this port/function */
int efx_mcdi_poll_reboot(struct efx_nic *efx) int efx_mcdi_poll_reboot(struct efx_nic *efx)
{ {
unsigned int addr = MCDI_REBOOT_FLAG(efx); unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
efx_dword_t reg; efx_dword_t reg;
uint32_t value; uint32_t value;
if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
return false; return false;
efx_mcdi_readd(efx, &reg, addr); efx_readd(efx, &reg, addr);
value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
if (value == 0) if (value == 0)
return 0; return 0;
EFX_ZERO_DWORD(reg); EFX_ZERO_DWORD(reg);
efx_mcdi_writed(efx, &reg, addr); efx_writed(efx, &reg, addr);
if (value == MC_STATUS_DWORD_ASSERT) if (value == MC_STATUS_DWORD_ASSERT)
return -EINTR; return -EINTR;
......
...@@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) ...@@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
size = min_t(size_t, table->step, 16); size = min_t(size_t, table->step, 16);
if (table->offset >= efx->type->mem_map_size) {
/* No longer mapped; return dummy data */
memcpy(buf, "\xde\xc0\xad\xde", 4);
buf += table->rows * size;
continue;
}
for (i = 0; i < table->rows; i++) { for (i = 0; i < table->rows; i++) {
switch (table->step) { switch (table->step) {
case 4: /* 32-bit register or SRAM */ case 4: /* 32-bit register or SRAM */
......
...@@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx) ...@@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
/** /**
* struct siena_nic_data - Siena NIC state * struct siena_nic_data - Siena NIC state
* @mcdi: Management-Controller-to-Driver Interface * @mcdi: Management-Controller-to-Driver Interface
* @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
* @wol_filter_id: Wake-on-LAN packet filter id * @wol_filter_id: Wake-on-LAN packet filter id
*/ */
struct siena_nic_data { struct siena_nic_data {
struct efx_mcdi_iface mcdi; struct efx_mcdi_iface mcdi;
void __iomem *mcdi_smem;
int wol_filter_id; int wol_filter_id;
}; };
......
...@@ -250,26 +250,12 @@ static int siena_probe_nic(struct efx_nic *efx) ...@@ -250,26 +250,12 @@ static int siena_probe_nic(struct efx_nic *efx)
efx_reado(efx, &reg, FR_AZ_CS_DEBUG); efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
/* Initialise MCDI */
nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
FR_CZ_MC_TREG_SMEM,
FR_CZ_MC_TREG_SMEM_STEP *
FR_CZ_MC_TREG_SMEM_ROWS);
if (!nic_data->mcdi_smem) {
netif_err(efx, probe, efx->net_dev,
"could not map MCDI at %llx+%x\n",
(unsigned long long)efx->membase_phys +
FR_CZ_MC_TREG_SMEM,
FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
rc = -ENOMEM;
goto fail1;
}
efx_mcdi_init(efx); efx_mcdi_init(efx);
/* Recover from a failed assertion before probing */ /* Recover from a failed assertion before probing */
rc = efx_mcdi_handle_assertion(efx); rc = efx_mcdi_handle_assertion(efx);
if (rc) if (rc)
goto fail2; goto fail1;
/* Let the BMC know that the driver is now in charge of link and /* Let the BMC know that the driver is now in charge of link and
* filter settings. We must do this before we reset the NIC */ * filter settings. We must do this before we reset the NIC */
...@@ -324,7 +310,6 @@ static int siena_probe_nic(struct efx_nic *efx) ...@@ -324,7 +310,6 @@ static int siena_probe_nic(struct efx_nic *efx)
fail3: fail3:
efx_mcdi_drv_attach(efx, false, NULL); efx_mcdi_drv_attach(efx, false, NULL);
fail2: fail2:
iounmap(nic_data->mcdi_smem);
fail1: fail1:
kfree(efx->nic_data); kfree(efx->nic_data);
return rc; return rc;
...@@ -404,8 +389,6 @@ static int siena_init_nic(struct efx_nic *efx) ...@@ -404,8 +389,6 @@ static int siena_init_nic(struct efx_nic *efx)
static void siena_remove_nic(struct efx_nic *efx) static void siena_remove_nic(struct efx_nic *efx)
{ {
struct siena_nic_data *nic_data = efx->nic_data;
efx_nic_free_buffer(efx, &efx->irq_status); efx_nic_free_buffer(efx, &efx->irq_status);
siena_reset_hw(efx, RESET_TYPE_ALL); siena_reset_hw(efx, RESET_TYPE_ALL);
...@@ -415,8 +398,7 @@ static void siena_remove_nic(struct efx_nic *efx) ...@@ -415,8 +398,7 @@ static void siena_remove_nic(struct efx_nic *efx)
efx_mcdi_drv_attach(efx, false, NULL); efx_mcdi_drv_attach(efx, false, NULL);
/* Tear down the private nic state */ /* Tear down the private nic state */
iounmap(nic_data->mcdi_smem); kfree(efx->nic_data);
kfree(nic_data);
efx->nic_data = NULL; efx->nic_data = NULL;
} }
...@@ -656,7 +638,8 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -656,7 +638,8 @@ const struct efx_nic_type siena_a0_nic_type = {
.default_mac_ops = &efx_mcdi_mac_operations, .default_mac_ops = &efx_mcdi_mac_operations,
.revision = EFX_REV_SIENA_A0, .revision = EFX_REV_SIENA_A0,
.mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ .mem_map_size = (FR_CZ_MC_TREG_SMEM +
FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL, .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
......
...@@ -38,8 +38,6 @@ ...@@ -38,8 +38,6 @@
#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */ /* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
/* Write combining and sriov=enabled are incompatible */
#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
/* Spurious parity errors in TSORT buffers */ /* Spurious parity errors in TSORT buffers */
#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
......
...@@ -59,6 +59,7 @@ ...@@ -59,6 +59,7 @@
#define USB_PRODUCT_IPHONE_3G 0x1292 #define USB_PRODUCT_IPHONE_3G 0x1292
#define USB_PRODUCT_IPHONE_3GS 0x1294 #define USB_PRODUCT_IPHONE_3GS 0x1294
#define USB_PRODUCT_IPHONE_4 0x1297 #define USB_PRODUCT_IPHONE_4 0x1297
#define USB_PRODUCT_IPHONE_4_VZW 0x129c
#define IPHETH_USBINTF_CLASS 255 #define IPHETH_USBINTF_CLASS 255
#define IPHETH_USBINTF_SUBCLASS 253 #define IPHETH_USBINTF_SUBCLASS 253
...@@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = { ...@@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = {
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) }, IPHETH_USBINTF_PROTO) },
{ USB_DEVICE_AND_INTERFACE_INFO(
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
{ } { }
}; };
MODULE_DEVICE_TABLE(usb, ipheth_table); MODULE_DEVICE_TABLE(usb, ipheth_table);
......
...@@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah, ...@@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
case ADC_DC_CAL: case ADC_DC_CAL:
/* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */ /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
if (!IS_CHAN_B(chan) && if (!IS_CHAN_B(chan) &&
!(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
IS_CHAN_HT20(chan)))
supported = true; supported = true;
break; break;
} }
......
...@@ -671,7 +671,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, ...@@ -671,7 +671,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
REG_WRITE_ARRAY(&ah->iniModesAdditional, REG_WRITE_ARRAY(&ah->iniModesAdditional,
modesIndex, regWrites); modesIndex, regWrites);
if (AR_SREV_9300(ah)) if (AR_SREV_9330(ah))
REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites); REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);
if (AR_SREV_9340(ah) && !ah->is_clk_25mhz) if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
......
...@@ -2303,6 +2303,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) ...@@ -2303,6 +2303,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
mutex_lock(&sc->mutex); mutex_lock(&sc->mutex);
cancel_delayed_work_sync(&sc->tx_complete_work); cancel_delayed_work_sync(&sc->tx_complete_work);
if (ah->ah_flags & AH_UNPLUGGED) {
ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n");
mutex_unlock(&sc->mutex);
return;
}
if (sc->sc_flags & SC_OP_INVALID) { if (sc->sc_flags & SC_OP_INVALID) {
ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
mutex_unlock(&sc->mutex); mutex_unlock(&sc->mutex);
......
...@@ -822,12 +822,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, ...@@ -822,12 +822,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
out: out:
rs_sta->last_txrate_idx = index; if (sband->band == IEEE80211_BAND_5GHZ) {
if (sband->band == IEEE80211_BAND_5GHZ) if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
info->control.rates[0].idx = rs_sta->last_txrate_idx - index = IWL_FIRST_OFDM_RATE;
IWL_FIRST_OFDM_RATE; rs_sta->last_txrate_idx = index;
else info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
} else {
rs_sta->last_txrate_idx = index;
info->control.rates[0].idx = rs_sta->last_txrate_idx; info->control.rates[0].idx = rs_sta->last_txrate_idx;
}
IWL_DEBUG_RATE(priv, "leave: %d\n", index); IWL_DEBUG_RATE(priv, "leave: %d\n", index);
} }
......
...@@ -167,7 +167,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv) ...@@ -167,7 +167,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
memset(&cmd, 0, sizeof(cmd)); memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib)); memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
if (!(cmd.radio_sensor_offset)) if (!(cmd.radio_sensor_offset))
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
......
...@@ -771,6 +771,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) ...@@ -771,6 +771,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
cmd = txq->cmd[cmd_index]; cmd = txq->cmd[cmd_index];
meta = &txq->meta[cmd_index]; meta = &txq->meta[cmd_index];
txq->time_stamp = jiffies;
iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */ /* Input error checking is done when commands are added to queue. */
......
...@@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, ...@@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->link_state = MAC80211_NOLINK; mac->link_state = MAC80211_NOLINK;
memset(mac->bssid, 0, 6); memset(mac->bssid, 0, 6);
/* reset sec info */
rtl_cam_reset_sec_info(hw);
rtl_cam_reset_all_entry(hw);
mac->vendor = PEER_UNKNOWN; mac->vendor = PEER_UNKNOWN;
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
...@@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ...@@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
*or clear all entry here. *or clear all entry here.
*/ */
rtl_cam_delete_one_entry(hw, mac_addr, key_idx); rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
rtl_cam_reset_sec_info(hw);
break; break;
default: default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
......
...@@ -549,15 +549,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, ...@@ -549,15 +549,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
(tcb_desc->rts_use_shortpreamble ? 1 : 0) (tcb_desc->rts_use_shortpreamble ? 1 : 0)
: (tcb_desc->rts_use_shortgi ? 1 : 0))); : (tcb_desc->rts_use_shortgi ? 1 : 0)));
if (mac->bw_40) { if (mac->bw_40) {
if (tcb_desc->packet_bw) { if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {
SET_TX_DESC_DATA_BW(txdesc, 1); SET_TX_DESC_DATA_BW(txdesc, 1);
SET_TX_DESC_DATA_SC(txdesc, 3); SET_TX_DESC_DATA_SC(txdesc, 3);
} else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){
SET_TX_DESC_DATA_BW(txdesc, 1);
SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc);
} else { } else {
SET_TX_DESC_DATA_BW(txdesc, 0); SET_TX_DESC_DATA_BW(txdesc, 0);
if (rate_flag & IEEE80211_TX_RC_DUP_DATA) SET_TX_DESC_DATA_SC(txdesc, 0);
SET_TX_DESC_DATA_SC(txdesc, }
mac->cur_40_prime_sc);
}
} else { } else {
SET_TX_DESC_DATA_BW(txdesc, 0); SET_TX_DESC_DATA_BW(txdesc, 0);
SET_TX_DESC_DATA_SC(txdesc, 0); SET_TX_DESC_DATA_SC(txdesc, 0);
......
...@@ -524,6 +524,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, ...@@ -524,6 +524,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
extern struct sk_buff *skb_clone(struct sk_buff *skb, extern struct sk_buff *skb_clone(struct sk_buff *skb,
gfp_t priority); gfp_t priority);
extern struct sk_buff *skb_copy(const struct sk_buff *skb, extern struct sk_buff *skb_copy(const struct sk_buff *skb,
......
...@@ -231,6 +231,8 @@ enum ...@@ -231,6 +231,8 @@ enum
LINUX_MIB_TCPDEFERACCEPTDROP, LINUX_MIB_TCPDEFERACCEPTDROP,
LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */
LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */ LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */
LINUX_MIB_TCPREQQFULLDOCOOKIES, /* TCPReqQFullDoCookies */
LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */
__LINUX_MIB_MAX __LINUX_MIB_MAX
}; };
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#ifndef _NET_FLOW_H #ifndef _NET_FLOW_H
#define _NET_FLOW_H #define _NET_FLOW_H
#include <linux/socket.h>
#include <linux/in6.h> #include <linux/in6.h>
#include <linux/atomic.h> #include <linux/atomic.h>
...@@ -68,7 +69,7 @@ struct flowi4 { ...@@ -68,7 +69,7 @@ struct flowi4 {
#define fl4_ipsec_spi uli.spi #define fl4_ipsec_spi uli.spi
#define fl4_mh_type uli.mht.type #define fl4_mh_type uli.mht.type
#define fl4_gre_key uli.gre_key #define fl4_gre_key uli.gre_key
}; } __attribute__((__aligned__(BITS_PER_LONG/8)));
static inline void flowi4_init_output(struct flowi4 *fl4, int oif, static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
__u32 mark, __u8 tos, __u8 scope, __u32 mark, __u8 tos, __u8 scope,
...@@ -112,7 +113,7 @@ struct flowi6 { ...@@ -112,7 +113,7 @@ struct flowi6 {
#define fl6_ipsec_spi uli.spi #define fl6_ipsec_spi uli.spi
#define fl6_mh_type uli.mht.type #define fl6_mh_type uli.mht.type
#define fl6_gre_key uli.gre_key #define fl6_gre_key uli.gre_key
}; } __attribute__((__aligned__(BITS_PER_LONG/8)));
struct flowidn { struct flowidn {
struct flowi_common __fl_common; struct flowi_common __fl_common;
...@@ -127,7 +128,7 @@ struct flowidn { ...@@ -127,7 +128,7 @@ struct flowidn {
union flowi_uli uli; union flowi_uli uli;
#define fld_sport uli.ports.sport #define fld_sport uli.ports.sport
#define fld_dport uli.ports.dport #define fld_dport uli.ports.dport
}; } __attribute__((__aligned__(BITS_PER_LONG/8)));
struct flowi { struct flowi {
union { union {
...@@ -161,6 +162,24 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn) ...@@ -161,6 +162,24 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
return container_of(fldn, struct flowi, u.dn); return container_of(fldn, struct flowi, u.dn);
} }
typedef unsigned long flow_compare_t;
static inline size_t flow_key_size(u16 family)
{
switch (family) {
case AF_INET:
BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t));
return sizeof(struct flowi4) / sizeof(flow_compare_t);
case AF_INET6:
BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t));
return sizeof(struct flowi6) / sizeof(flow_compare_t);
case AF_DECnet:
BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t));
return sizeof(struct flowidn) / sizeof(flow_compare_t);
}
return 0;
}
#define FLOW_DIR_IN 0 #define FLOW_DIR_IN 0
#define FLOW_DIR_OUT 1 #define FLOW_DIR_OUT 1
#define FLOW_DIR_FWD 2 #define FLOW_DIR_FWD 2
......
...@@ -96,7 +96,8 @@ extern int sysctl_max_syn_backlog; ...@@ -96,7 +96,8 @@ extern int sysctl_max_syn_backlog;
*/ */
struct listen_sock { struct listen_sock {
u8 max_qlen_log; u8 max_qlen_log;
/* 3 bytes hole, try to use */ u8 synflood_warned;
/* 2 bytes hole, try to use */
int qlen; int qlen;
int qlen_young; int qlen_young;
int clock_hand; int clock_hand;
......
...@@ -109,6 +109,7 @@ typedef enum { ...@@ -109,6 +109,7 @@ typedef enum {
SCTP_CMD_SEND_MSG, /* Send the whole use message */ SCTP_CMD_SEND_MSG, /* Send the whole use message */
SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */ SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/ SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
SCTP_CMD_SET_ASOC, /* Restore association context */
SCTP_CMD_LAST SCTP_CMD_LAST
} sctp_verb_t; } sctp_verb_t;
......
...@@ -460,6 +460,9 @@ extern int tcp_write_wakeup(struct sock *); ...@@ -460,6 +460,9 @@ extern int tcp_write_wakeup(struct sock *);
extern void tcp_send_fin(struct sock *sk); extern void tcp_send_fin(struct sock *sk);
extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
extern int tcp_send_synack(struct sock *); extern int tcp_send_synack(struct sock *);
extern int tcp_syn_flood_action(struct sock *sk,
const struct sk_buff *skb,
const char *proto);
extern void tcp_push_one(struct sock *, unsigned int mss_now); extern void tcp_push_one(struct sock *, unsigned int mss_now);
extern void tcp_send_ack(struct sock *sk); extern void tcp_send_ack(struct sock *sk);
extern void tcp_send_delayed_ack(struct sock *sk); extern void tcp_send_delayed_ack(struct sock *sk);
......
...@@ -39,6 +39,7 @@ extern int datagram_recv_ctl(struct sock *sk, ...@@ -39,6 +39,7 @@ extern int datagram_recv_ctl(struct sock *sk,
struct sk_buff *skb); struct sk_buff *skb);
extern int datagram_send_ctl(struct net *net, extern int datagram_send_ctl(struct net *net,
struct sock *sk,
struct msghdr *msg, struct msghdr *msg,
struct flowi6 *fl6, struct flowi6 *fl6,
struct ipv6_txoptions *opt, struct ipv6_txoptions *opt,
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
menuconfig BRIDGE_NF_EBTABLES menuconfig BRIDGE_NF_EBTABLES
tristate "Ethernet Bridge tables (ebtables) support" tristate "Ethernet Bridge tables (ebtables) support"
depends on BRIDGE && BRIDGE_NETFILTER depends on BRIDGE && NETFILTER
select NETFILTER_XTABLES select NETFILTER_XTABLES
help help
ebtables is a general, extensible frame/packet identification ebtables is a general, extensible frame/packet identification
......
...@@ -93,10 +93,14 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev) ...@@ -93,10 +93,14 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
caifdevs = caif_device_list(dev_net(dev)); caifdevs = caif_device_list(dev_net(dev));
BUG_ON(!caifdevs); BUG_ON(!caifdevs);
caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
if (!caifd) if (!caifd)
return NULL; return NULL;
caifd->pcpu_refcnt = alloc_percpu(int); caifd->pcpu_refcnt = alloc_percpu(int);
if (!caifd->pcpu_refcnt) {
kfree(caifd);
return NULL;
}
caifd->netdev = dev; caifd->netdev = dev;
dev_hold(dev); dev_hold(dev);
return caifd; return caifd;
......
...@@ -857,7 +857,7 @@ static __exit void can_exit(void) ...@@ -857,7 +857,7 @@ static __exit void can_exit(void)
struct net_device *dev; struct net_device *dev;
if (stats_timer) if (stats_timer)
del_timer(&can_stattimer); del_timer_sync(&can_stattimer);
can_remove_proc(); can_remove_proc();
......
...@@ -1515,6 +1515,14 @@ static inline bool is_skb_forwardable(struct net_device *dev, ...@@ -1515,6 +1515,14 @@ static inline bool is_skb_forwardable(struct net_device *dev,
*/ */
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{ {
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
}
skb_orphan(skb); skb_orphan(skb);
nf_reset(skb); nf_reset(skb);
......
...@@ -30,6 +30,7 @@ struct flow_cache_entry { ...@@ -30,6 +30,7 @@ struct flow_cache_entry {
struct hlist_node hlist; struct hlist_node hlist;
struct list_head gc_list; struct list_head gc_list;
} u; } u;
struct net *net;
u16 family; u16 family;
u8 dir; u8 dir;
u32 genid; u32 genid;
...@@ -172,29 +173,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc, ...@@ -172,29 +173,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
static u32 flow_hash_code(struct flow_cache *fc, static u32 flow_hash_code(struct flow_cache *fc,
struct flow_cache_percpu *fcp, struct flow_cache_percpu *fcp,
const struct flowi *key) const struct flowi *key,
size_t keysize)
{ {
const u32 *k = (const u32 *) key; const u32 *k = (const u32 *) key;
const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) return jhash2(k, length, fcp->hash_rnd)
& (flow_cache_hash_size(fc) - 1); & (flow_cache_hash_size(fc) - 1);
} }
typedef unsigned long flow_compare_t;
/* I hear what you're saying, use memcmp. But memcmp cannot make /* I hear what you're saying, use memcmp. But memcmp cannot make
* important assumptions that we can here, such as alignment and * important assumptions that we can here, such as alignment.
* constant size.
*/ */
static int flow_key_compare(const struct flowi *key1, const struct flowi *key2) static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
size_t keysize)
{ {
const flow_compare_t *k1, *k1_lim, *k2; const flow_compare_t *k1, *k1_lim, *k2;
const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
k1 = (const flow_compare_t *) key1; k1 = (const flow_compare_t *) key1;
k1_lim = k1 + n_elem; k1_lim = k1 + keysize;
k2 = (const flow_compare_t *) key2; k2 = (const flow_compare_t *) key2;
...@@ -215,6 +213,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, ...@@ -215,6 +213,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
struct flow_cache_entry *fle, *tfle; struct flow_cache_entry *fle, *tfle;
struct hlist_node *entry; struct hlist_node *entry;
struct flow_cache_object *flo; struct flow_cache_object *flo;
size_t keysize;
unsigned int hash; unsigned int hash;
local_bh_disable(); local_bh_disable();
...@@ -222,6 +221,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, ...@@ -222,6 +221,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
fle = NULL; fle = NULL;
flo = NULL; flo = NULL;
keysize = flow_key_size(family);
if (!keysize)
goto nocache;
/* Packet really early in init? Making flow_cache_init a /* Packet really early in init? Making flow_cache_init a
* pre-smp initcall would solve this. --RR */ * pre-smp initcall would solve this. --RR */
if (!fcp->hash_table) if (!fcp->hash_table)
...@@ -230,11 +234,12 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, ...@@ -230,11 +234,12 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
if (fcp->hash_rnd_recalc) if (fcp->hash_rnd_recalc)
flow_new_hash_rnd(fc, fcp); flow_new_hash_rnd(fc, fcp);
hash = flow_hash_code(fc, fcp, key); hash = flow_hash_code(fc, fcp, key, keysize);
hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
if (tfle->family == family && if (tfle->net == net &&
tfle->family == family &&
tfle->dir == dir && tfle->dir == dir &&
flow_key_compare(key, &tfle->key) == 0) { flow_key_compare(key, &tfle->key, keysize) == 0) {
fle = tfle; fle = tfle;
break; break;
} }
...@@ -246,9 +251,10 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, ...@@ -246,9 +251,10 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
if (fle) { if (fle) {
fle->net = net;
fle->family = family; fle->family = family;
fle->dir = dir; fle->dir = dir;
memcpy(&fle->key, key, sizeof(*key)); memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
fle->object = NULL; fle->object = NULL;
hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
fcp->hash_count++; fcp->hash_count++;
......
...@@ -611,8 +611,21 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) ...@@ -611,8 +611,21 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
} }
EXPORT_SYMBOL_GPL(skb_morph); EXPORT_SYMBOL_GPL(skb_morph);
/* skb frags copy userspace buffers to kernel */ /* skb_copy_ubufs - copy userspace skb frags buffers to kernel
static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) * @skb: the skb to modify
* @gfp_mask: allocation priority
*
* This must be called on SKBTX_DEV_ZEROCOPY skb.
* It will copy all frags into kernel and drop the reference
* to userspace pages.
*
* If this function is called from an interrupt gfp_mask() must be
* %GFP_ATOMIC.
*
* Returns 0 on success or a negative error code on failure
* to allocate kernel memory to copy to.
*/
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
{ {
int i; int i;
int num_frags = skb_shinfo(skb)->nr_frags; int num_frags = skb_shinfo(skb)->nr_frags;
...@@ -652,6 +665,8 @@ static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) ...@@ -652,6 +665,8 @@ static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
skb_shinfo(skb)->frags[i - 1].page = head; skb_shinfo(skb)->frags[i - 1].page = head;
head = (struct page *)head->private; head = (struct page *)head->private;
} }
skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
return 0; return 0;
} }
...@@ -677,7 +692,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) ...@@ -677,7 +692,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, gfp_mask)) if (skb_copy_ubufs(skb, gfp_mask))
return NULL; return NULL;
skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
} }
n = skb + 1; n = skb + 1;
...@@ -803,7 +817,6 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) ...@@ -803,7 +817,6 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
n = NULL; n = NULL;
goto out; goto out;
} }
skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
} }
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
...@@ -896,7 +909,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, ...@@ -896,7 +909,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, gfp_mask)) if (skb_copy_ubufs(skb, gfp_mask))
goto nofrags; goto nofrags;
skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
} }
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
get_page(skb_shinfo(skb)->frags[i].page); get_page(skb_shinfo(skb)->frags[i].page);
......
...@@ -340,7 +340,7 @@ void ether_setup(struct net_device *dev) ...@@ -340,7 +340,7 @@ void ether_setup(struct net_device *dev)
dev->addr_len = ETH_ALEN; dev->addr_len = ETH_ALEN;
dev->tx_queue_len = 1000; /* Ethernet wants good queues */ dev->tx_queue_len = 1000; /* Ethernet wants good queues */
dev->flags = IFF_BROADCAST|IFF_MULTICAST; dev->flags = IFF_BROADCAST|IFF_MULTICAST;
dev->priv_flags = IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_TX_SKB_SHARING;
memset(dev->broadcast, 0xFF, ETH_ALEN); memset(dev->broadcast, 0xFF, ETH_ALEN);
......
...@@ -466,8 +466,13 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) ...@@ -466,8 +466,13 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out; goto out;
if (addr->sin_family != AF_INET) { if (addr->sin_family != AF_INET) {
/* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
* only if s_addr is INADDR_ANY.
*/
err = -EAFNOSUPPORT; err = -EAFNOSUPPORT;
goto out; if (addr->sin_family != AF_UNSPEC ||
addr->sin_addr.s_addr != htonl(INADDR_ANY))
goto out;
} }
chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
......
...@@ -142,6 +142,14 @@ const struct fib_prop fib_props[RTN_MAX + 1] = { ...@@ -142,6 +142,14 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
}; };
/* Release a nexthop info record */ /* Release a nexthop info record */
static void free_fib_info_rcu(struct rcu_head *head)
{
struct fib_info *fi = container_of(head, struct fib_info, rcu);
if (fi->fib_metrics != (u32 *) dst_default_metrics)
kfree(fi->fib_metrics);
kfree(fi);
}
void free_fib_info(struct fib_info *fi) void free_fib_info(struct fib_info *fi)
{ {
...@@ -156,7 +164,7 @@ void free_fib_info(struct fib_info *fi) ...@@ -156,7 +164,7 @@ void free_fib_info(struct fib_info *fi)
} endfor_nexthops(fi); } endfor_nexthops(fi);
fib_info_cnt--; fib_info_cnt--;
release_net(fi->fib_net); release_net(fi->fib_net);
kfree_rcu(fi, rcu); call_rcu(&fi->rcu, free_fib_info_rcu);
} }
void fib_release_info(struct fib_info *fi) void fib_release_info(struct fib_info *fi)
......
...@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) ...@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
return skb; return skb;
nlmsg_failure: nlmsg_failure:
kfree_skb(skb);
*errp = -EINVAL; *errp = -EINVAL;
printk(KERN_ERR "ip_queue: error creating packet message\n"); printk(KERN_ERR "ip_queue: error creating packet message\n");
return NULL; return NULL;
...@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) ...@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
{ {
struct nf_queue_entry *entry; struct nf_queue_entry *entry;
if (vmsg->value > NF_MAX_VERDICT) if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
return -EINVAL; return -EINVAL;
entry = ipq_find_dequeue_entry(vmsg->id); entry = ipq_find_dequeue_entry(vmsg->id);
...@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg, ...@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
break; break;
case IPQM_VERDICT: case IPQM_VERDICT:
if (pmsg->msg.verdict.value > NF_MAX_VERDICT) status = ipq_set_verdict(&pmsg->msg.verdict,
status = -EINVAL; len - sizeof(*pmsg));
else break;
status = ipq_set_verdict(&pmsg->msg.verdict,
len - sizeof(*pmsg));
break;
default: default:
status = -EINVAL; status = -EINVAL;
} }
......
...@@ -254,6 +254,8 @@ static const struct snmp_mib snmp4_net_list[] = { ...@@ -254,6 +254,8 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES),
SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
SNMP_MIB_SENTINEL SNMP_MIB_SENTINEL
}; };
......
...@@ -808,20 +808,38 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req) ...@@ -808,20 +808,38 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
kfree(inet_rsk(req)->opt); kfree(inet_rsk(req)->opt);
} }
static void syn_flood_warning(const struct sk_buff *skb) /*
* Return 1 if a syncookie should be sent
*/
int tcp_syn_flood_action(struct sock *sk,
const struct sk_buff *skb,
const char *proto)
{ {
const char *msg; const char *msg = "Dropping request";
int want_cookie = 0;
struct listen_sock *lopt;
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
if (sysctl_tcp_syncookies) if (sysctl_tcp_syncookies) {
msg = "Sending cookies"; msg = "Sending cookies";
else want_cookie = 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
} else
#endif #endif
msg = "Dropping request"; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
pr_info("TCP: Possible SYN flooding on port %d. %s.\n", lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
ntohs(tcp_hdr(skb)->dest), msg); if (!lopt->synflood_warned) {
lopt->synflood_warned = 1;
pr_info("%s: Possible SYN flooding on port %d. %s. "
" Check SNMP counters.\n",
proto, ntohs(tcp_hdr(skb)->dest), msg);
}
return want_cookie;
} }
EXPORT_SYMBOL(tcp_syn_flood_action);
/* /*
* Save and compile IPv4 options into the request_sock if needed. * Save and compile IPv4 options into the request_sock if needed.
...@@ -1235,11 +1253,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1235,11 +1253,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
__be32 saddr = ip_hdr(skb)->saddr; __be32 saddr = ip_hdr(skb)->saddr;
__be32 daddr = ip_hdr(skb)->daddr; __be32 daddr = ip_hdr(skb)->daddr;
__u32 isn = TCP_SKB_CB(skb)->when; __u32 isn = TCP_SKB_CB(skb)->when;
#ifdef CONFIG_SYN_COOKIES
int want_cookie = 0; int want_cookie = 0;
#else
#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
#endif
/* Never answer to SYNs send to broadcast or multicast */ /* Never answer to SYNs send to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
...@@ -1250,14 +1264,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1250,14 +1264,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* evidently real one. * evidently real one.
*/ */
if (inet_csk_reqsk_queue_is_full(sk) && !isn) { if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
if (net_ratelimit()) want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
syn_flood_warning(skb); if (!want_cookie)
#ifdef CONFIG_SYN_COOKIES goto drop;
if (sysctl_tcp_syncookies) {
want_cookie = 1;
} else
#endif
goto drop;
} }
/* Accept backlog is full. If we have already queued enough /* Accept backlog is full. If we have already queued enough
...@@ -1303,9 +1312,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1303,9 +1312,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
while (l-- > 0) while (l-- > 0)
*c++ ^= *hash_location++; *c++ ^= *hash_location++;
#ifdef CONFIG_SYN_COOKIES
want_cookie = 0; /* not our kind of cookie */ want_cookie = 0; /* not our kind of cookie */
#endif
tmp_ext.cookie_out_never = 0; /* false */ tmp_ext.cookie_out_never = 0; /* false */
tmp_ext.cookie_plus = tmp_opt.cookie_plus; tmp_ext.cookie_plus = tmp_opt.cookie_plus;
} else if (!tp->rx_opt.cookie_in_always) { } else if (!tp->rx_opt.cookie_in_always) {
......
...@@ -599,7 +599,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) ...@@ -599,7 +599,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
return 0; return 0;
} }
int datagram_send_ctl(struct net *net, int datagram_send_ctl(struct net *net, struct sock *sk,
struct msghdr *msg, struct flowi6 *fl6, struct msghdr *msg, struct flowi6 *fl6,
struct ipv6_txoptions *opt, struct ipv6_txoptions *opt,
int *hlimit, int *tclass, int *dontfrag) int *hlimit, int *tclass, int *dontfrag)
...@@ -658,7 +658,8 @@ int datagram_send_ctl(struct net *net, ...@@ -658,7 +658,8 @@ int datagram_send_ctl(struct net *net,
if (addr_type != IPV6_ADDR_ANY) { if (addr_type != IPV6_ADDR_ANY) {
int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
if (!ipv6_chk_addr(net, &src_info->ipi6_addr, if (!inet_sk(sk)->transparent &&
!ipv6_chk_addr(net, &src_info->ipi6_addr,
strict ? dev : NULL, 0)) strict ? dev : NULL, 0))
err = -EINVAL; err = -EINVAL;
else else
......
...@@ -322,8 +322,8 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo ...@@ -322,8 +322,8 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo
} }
static struct ip6_flowlabel * static struct ip6_flowlabel *
fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
int optlen, int *err_p) char __user *optval, int optlen, int *err_p)
{ {
struct ip6_flowlabel *fl = NULL; struct ip6_flowlabel *fl = NULL;
int olen; int olen;
...@@ -360,7 +360,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, ...@@ -360,7 +360,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
msg.msg_control = (void*)(fl->opt+1); msg.msg_control = (void*)(fl->opt+1);
memset(&flowi6, 0, sizeof(flowi6)); memset(&flowi6, 0, sizeof(flowi6));
err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk, err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
&junk, &junk); &junk, &junk);
if (err) if (err)
goto done; goto done;
...@@ -528,7 +528,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) ...@@ -528,7 +528,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
if (freq.flr_label & ~IPV6_FLOWLABEL_MASK) if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
return -EINVAL; return -EINVAL;
fl = fl_create(net, &freq, optval, optlen, &err); fl = fl_create(net, sk, &freq, optval, optlen, &err);
if (fl == NULL) if (fl == NULL)
return err; return err;
sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL); sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
......
...@@ -475,7 +475,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, ...@@ -475,7 +475,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
msg.msg_controllen = optlen; msg.msg_controllen = optlen;
msg.msg_control = (void*)(opt+1); msg.msg_control = (void*)(opt+1);
retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk, retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,
&junk); &junk);
if (retv) if (retv)
goto done; goto done;
......
...@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) ...@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
return skb; return skb;
nlmsg_failure: nlmsg_failure:
kfree_skb(skb);
*errp = -EINVAL; *errp = -EINVAL;
printk(KERN_ERR "ip6_queue: error creating packet message\n"); printk(KERN_ERR "ip6_queue: error creating packet message\n");
return NULL; return NULL;
...@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) ...@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
{ {
struct nf_queue_entry *entry; struct nf_queue_entry *entry;
if (vmsg->value > NF_MAX_VERDICT) if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
return -EINVAL; return -EINVAL;
entry = ipq_find_dequeue_entry(vmsg->id); entry = ipq_find_dequeue_entry(vmsg->id);
...@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg, ...@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
break; break;
case IPQM_VERDICT: case IPQM_VERDICT:
if (pmsg->msg.verdict.value > NF_MAX_VERDICT) status = ipq_set_verdict(&pmsg->msg.verdict,
status = -EINVAL; len - sizeof(*pmsg));
else break;
status = ipq_set_verdict(&pmsg->msg.verdict,
len - sizeof(*pmsg));
break;
default: default:
status = -EINVAL; status = -EINVAL;
} }
......
...@@ -817,8 +817,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, ...@@ -817,8 +817,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
memset(opt, 0, sizeof(struct ipv6_txoptions)); memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions); opt->tot_len = sizeof(struct ipv6_txoptions);
err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
&tclass, &dontfrag); &hlimit, &tclass, &dontfrag);
if (err < 0) { if (err < 0) {
fl6_sock_release(flowlabel); fl6_sock_release(flowlabel);
return err; return err;
......
...@@ -104,6 +104,9 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) ...@@ -104,6 +104,9 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
struct inet_peer *peer; struct inet_peer *peer;
u32 *p = NULL; u32 *p = NULL;
if (!(rt->dst.flags & DST_HOST))
return NULL;
if (!rt->rt6i_peer) if (!rt->rt6i_peer)
rt6_bind_peer(rt, 1); rt6_bind_peer(rt, 1);
...@@ -252,6 +255,9 @@ static void ip6_dst_destroy(struct dst_entry *dst) ...@@ -252,6 +255,9 @@ static void ip6_dst_destroy(struct dst_entry *dst)
struct inet6_dev *idev = rt->rt6i_idev; struct inet6_dev *idev = rt->rt6i_idev;
struct inet_peer *peer = rt->rt6i_peer; struct inet_peer *peer = rt->rt6i_peer;
if (!(rt->dst.flags & DST_HOST))
dst_destroy_metrics_generic(dst);
if (idev != NULL) { if (idev != NULL) {
rt->rt6i_idev = NULL; rt->rt6i_idev = NULL;
in6_dev_put(idev); in6_dev_put(idev);
...@@ -723,9 +729,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort, ...@@ -723,9 +729,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
ipv6_addr_copy(&rt->rt6i_gateway, daddr); ipv6_addr_copy(&rt->rt6i_gateway, daddr);
} }
rt->rt6i_dst.plen = 128;
rt->rt6i_flags |= RTF_CACHE; rt->rt6i_flags |= RTF_CACHE;
rt->dst.flags |= DST_HOST;
#ifdef CONFIG_IPV6_SUBTREES #ifdef CONFIG_IPV6_SUBTREES
if (rt->rt6i_src.plen && saddr) { if (rt->rt6i_src.plen && saddr) {
...@@ -775,9 +779,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, ...@@ -775,9 +779,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
struct rt6_info *rt = ip6_rt_copy(ort, daddr); struct rt6_info *rt = ip6_rt_copy(ort, daddr);
if (rt) { if (rt) {
rt->rt6i_dst.plen = 128;
rt->rt6i_flags |= RTF_CACHE; rt->rt6i_flags |= RTF_CACHE;
rt->dst.flags |= DST_HOST;
dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst))); dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
} }
return rt; return rt;
...@@ -1078,12 +1080,15 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, ...@@ -1078,12 +1080,15 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
neigh = NULL; neigh = NULL;
} }
rt->rt6i_idev = idev; rt->dst.flags |= DST_HOST;
rt->dst.output = ip6_output;
dst_set_neighbour(&rt->dst, neigh); dst_set_neighbour(&rt->dst, neigh);
atomic_set(&rt->dst.__refcnt, 1); atomic_set(&rt->dst.__refcnt, 1);
ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
rt->dst.output = ip6_output;
ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
rt->rt6i_dst.plen = 128;
rt->rt6i_idev = idev;
spin_lock_bh(&icmp6_dst_lock); spin_lock_bh(&icmp6_dst_lock);
rt->dst.next = icmp6_dst_gc_list; rt->dst.next = icmp6_dst_gc_list;
...@@ -1261,6 +1266,14 @@ int ip6_route_add(struct fib6_config *cfg) ...@@ -1261,6 +1266,14 @@ int ip6_route_add(struct fib6_config *cfg)
if (rt->rt6i_dst.plen == 128) if (rt->rt6i_dst.plen == 128)
rt->dst.flags |= DST_HOST; rt->dst.flags |= DST_HOST;
if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
if (!metrics) {
err = -ENOMEM;
goto out;
}
dst_init_metrics(&rt->dst, metrics, 0);
}
#ifdef CONFIG_IPV6_SUBTREES #ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
rt->rt6i_src.plen = cfg->fc_src_len; rt->rt6i_src.plen = cfg->fc_src_len;
...@@ -1607,9 +1620,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src, ...@@ -1607,9 +1620,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
if (on_link) if (on_link)
nrt->rt6i_flags &= ~RTF_GATEWAY; nrt->rt6i_flags &= ~RTF_GATEWAY;
nrt->rt6i_dst.plen = 128;
nrt->dst.flags |= DST_HOST;
ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
...@@ -1754,9 +1764,10 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, ...@@ -1754,9 +1764,10 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
if (rt) { if (rt) {
rt->dst.input = ort->dst.input; rt->dst.input = ort->dst.input;
rt->dst.output = ort->dst.output; rt->dst.output = ort->dst.output;
rt->dst.flags |= DST_HOST;
ipv6_addr_copy(&rt->rt6i_dst.addr, dest); ipv6_addr_copy(&rt->rt6i_dst.addr, dest);
rt->rt6i_dst.plen = ort->rt6i_dst.plen; rt->rt6i_dst.plen = 128;
dst_copy_metrics(&rt->dst, &ort->dst); dst_copy_metrics(&rt->dst, &ort->dst);
rt->dst.error = ort->dst.error; rt->dst.error = ort->dst.error;
rt->rt6i_idev = ort->rt6i_idev; rt->rt6i_idev = ort->rt6i_idev;
......
...@@ -531,20 +531,6 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, ...@@ -531,20 +531,6 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
return tcp_v6_send_synack(sk, req, rvp); return tcp_v6_send_synack(sk, req, rvp);
} }
static inline void syn_flood_warning(struct sk_buff *skb)
{
#ifdef CONFIG_SYN_COOKIES
if (sysctl_tcp_syncookies)
printk(KERN_INFO
"TCPv6: Possible SYN flooding on port %d. "
"Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
else
#endif
printk(KERN_INFO
"TCPv6: Possible SYN flooding on port %d. "
"Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
}
static void tcp_v6_reqsk_destructor(struct request_sock *req) static void tcp_v6_reqsk_destructor(struct request_sock *req)
{ {
kfree_skb(inet6_rsk(req)->pktopts); kfree_skb(inet6_rsk(req)->pktopts);
...@@ -1179,11 +1165,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1179,11 +1165,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
__u32 isn = TCP_SKB_CB(skb)->when; __u32 isn = TCP_SKB_CB(skb)->when;
struct dst_entry *dst = NULL; struct dst_entry *dst = NULL;
#ifdef CONFIG_SYN_COOKIES
int want_cookie = 0; int want_cookie = 0;
#else
#define want_cookie 0
#endif
if (skb->protocol == htons(ETH_P_IP)) if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_conn_request(sk, skb); return tcp_v4_conn_request(sk, skb);
...@@ -1192,14 +1174,9 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1192,14 +1174,9 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop; goto drop;
if (inet_csk_reqsk_queue_is_full(sk) && !isn) { if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
if (net_ratelimit()) want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
syn_flood_warning(skb); if (!want_cookie)
#ifdef CONFIG_SYN_COOKIES goto drop;
if (sysctl_tcp_syncookies)
want_cookie = 1;
else
#endif
goto drop;
} }
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
...@@ -1249,9 +1226,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1249,9 +1226,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
while (l-- > 0) while (l-- > 0)
*c++ ^= *hash_location++; *c++ ^= *hash_location++;
#ifdef CONFIG_SYN_COOKIES
want_cookie = 0; /* not our kind of cookie */ want_cookie = 0; /* not our kind of cookie */
#endif
tmp_ext.cookie_out_never = 0; /* false */ tmp_ext.cookie_out_never = 0; /* false */
tmp_ext.cookie_plus = tmp_opt.cookie_plus; tmp_ext.cookie_plus = tmp_opt.cookie_plus;
} else if (!tp->rx_opt.cookie_in_always) { } else if (!tp->rx_opt.cookie_in_always) {
......
...@@ -1090,8 +1090,8 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, ...@@ -1090,8 +1090,8 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
memset(opt, 0, sizeof(struct ipv6_txoptions)); memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(*opt); opt->tot_len = sizeof(*opt);
err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
&tclass, &dontfrag); &hlimit, &tclass, &dontfrag);
if (err < 0) { if (err < 0) {
fl6_sock_release(flowlabel); fl6_sock_release(flowlabel);
return err; return err;
......
...@@ -40,9 +40,9 @@ extern int sysctl_slot_timeout; ...@@ -40,9 +40,9 @@ extern int sysctl_slot_timeout;
extern int sysctl_fast_poll_increase; extern int sysctl_fast_poll_increase;
extern char sysctl_devname[]; extern char sysctl_devname[];
extern int sysctl_max_baud_rate; extern int sysctl_max_baud_rate;
extern int sysctl_min_tx_turn_time; extern unsigned int sysctl_min_tx_turn_time;
extern int sysctl_max_tx_data_size; extern unsigned int sysctl_max_tx_data_size;
extern int sysctl_max_tx_window; extern unsigned int sysctl_max_tx_window;
extern int sysctl_max_noreply_time; extern int sysctl_max_noreply_time;
extern int sysctl_warn_noreply_time; extern int sysctl_warn_noreply_time;
extern int sysctl_lap_keepalive_time; extern int sysctl_lap_keepalive_time;
......
...@@ -60,7 +60,7 @@ int sysctl_max_noreply_time = 12; ...@@ -60,7 +60,7 @@ int sysctl_max_noreply_time = 12;
* Default is 10us which means using the unmodified value given by the * Default is 10us which means using the unmodified value given by the
* peer except if it's 0 (0 is likely a bug in the other stack). * peer except if it's 0 (0 is likely a bug in the other stack).
*/ */
unsigned sysctl_min_tx_turn_time = 10; unsigned int sysctl_min_tx_turn_time = 10;
/* /*
* Maximum data size to be used in transmission in payload of LAP frame. * Maximum data size to be used in transmission in payload of LAP frame.
* There is a bit of confusion in the IrDA spec : * There is a bit of confusion in the IrDA spec :
...@@ -75,13 +75,13 @@ unsigned sysctl_min_tx_turn_time = 10; ...@@ -75,13 +75,13 @@ unsigned sysctl_min_tx_turn_time = 10;
* bytes frames or all negotiated frame sizes, but you can use the sysctl * bytes frames or all negotiated frame sizes, but you can use the sysctl
* to play with this value anyway. * to play with this value anyway.
* Jean II */ * Jean II */
unsigned sysctl_max_tx_data_size = 2042; unsigned int sysctl_max_tx_data_size = 2042;
/* /*
* Maximum transmit window, i.e. number of LAP frames between turn-around. * Maximum transmit window, i.e. number of LAP frames between turn-around.
* This allow to override what the peer told us. Some peers are buggy and * This allow to override what the peer told us. Some peers are buggy and
* don't always support what they tell us. * don't always support what they tell us.
* Jean II */ * Jean II */
unsigned sysctl_max_tx_window = 7; unsigned int sysctl_max_tx_window = 7;
static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get); static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get);
static int irlap_param_link_disconnect(void *instance, irda_param_t *parm, static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
......
...@@ -665,7 +665,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) ...@@ -665,7 +665,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
BUG_ON(!sdata->bss); BUG_ON(!sdata->bss);
atomic_dec(&sdata->bss->num_sta_ps); atomic_dec(&sdata->bss->num_sta_ps);
__sta_info_clear_tim_bit(sdata->bss, sta); sta_info_clear_tim_bit(sta);
} }
local->num_sta--; local->num_sta--;
......
...@@ -364,6 +364,7 @@ pptp_inbound_pkt(struct sk_buff *skb, ...@@ -364,6 +364,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
break; break;
case PPTP_WAN_ERROR_NOTIFY: case PPTP_WAN_ERROR_NOTIFY:
case PPTP_SET_LINK_INFO:
case PPTP_ECHO_REQUEST: case PPTP_ECHO_REQUEST:
case PPTP_ECHO_REPLY: case PPTP_ECHO_REPLY:
/* I don't have to explain these ;) */ /* I don't have to explain these ;) */
......
...@@ -409,7 +409,7 @@ static void tcp_options(const struct sk_buff *skb, ...@@ -409,7 +409,7 @@ static void tcp_options(const struct sk_buff *skb,
if (opsize < 2) /* "silly options" */ if (opsize < 2) /* "silly options" */
return; return;
if (opsize > length) if (opsize > length)
break; /* don't parse partial options */ return; /* don't parse partial options */
if (opcode == TCPOPT_SACK_PERM if (opcode == TCPOPT_SACK_PERM
&& opsize == TCPOLEN_SACK_PERM) && opsize == TCPOLEN_SACK_PERM)
...@@ -447,7 +447,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, ...@@ -447,7 +447,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
BUG_ON(ptr == NULL); BUG_ON(ptr == NULL);
/* Fast path for timestamp-only option */ /* Fast path for timestamp-only option */
if (length == TCPOLEN_TSTAMP_ALIGNED*4 if (length == TCPOLEN_TSTAMP_ALIGNED
&& *(__be32 *)ptr == htonl((TCPOPT_NOP << 24) && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
| (TCPOPT_NOP << 16) | (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8) | (TCPOPT_TIMESTAMP << 8)
...@@ -469,7 +469,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, ...@@ -469,7 +469,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
if (opsize < 2) /* "silly options" */ if (opsize < 2) /* "silly options" */
return; return;
if (opsize > length) if (opsize > length)
break; /* don't parse partial options */ return; /* don't parse partial options */
if (opcode == TCPOPT_SACK if (opcode == TCPOPT_SACK
&& opsize >= (TCPOLEN_SACK_BASE && opsize >= (TCPOLEN_SACK_BASE
......
...@@ -646,8 +646,8 @@ verdicthdr_get(const struct nlattr * const nfqa[]) ...@@ -646,8 +646,8 @@ verdicthdr_get(const struct nlattr * const nfqa[])
return NULL; return NULL;
vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
verdict = ntohl(vhdr->verdict); verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
return NULL; return NULL;
return vhdr; return vhdr;
} }
......
...@@ -78,7 +78,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par) ...@@ -78,7 +78,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
{ {
struct xt_rateest_match_info *info = par->matchinfo; struct xt_rateest_match_info *info = par->matchinfo;
struct xt_rateest *est1, *est2; struct xt_rateest *est1, *est2;
int ret = false; int ret = -EINVAL;
if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS | if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS |
XT_RATEEST_MATCH_REL)) != 1) XT_RATEEST_MATCH_REL)) != 1)
...@@ -101,13 +101,12 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par) ...@@ -101,13 +101,12 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
if (!est1) if (!est1)
goto err1; goto err1;
est2 = NULL;
if (info->flags & XT_RATEEST_MATCH_REL) { if (info->flags & XT_RATEEST_MATCH_REL) {
est2 = xt_rateest_lookup(info->name2); est2 = xt_rateest_lookup(info->name2);
if (!est2) if (!est2)
goto err2; goto err2;
} else }
est2 = NULL;
info->est1 = est1; info->est1 = est1;
info->est2 = est2; info->est2 = est2;
...@@ -116,7 +115,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par) ...@@ -116,7 +115,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
err2: err2:
xt_rateest_put(est1); xt_rateest_put(est1);
err1: err1:
return -EINVAL; return ret;
} }
static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par) static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par)
......
...@@ -425,7 +425,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, ...@@ -425,7 +425,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
struct rsvp_filter *f, **fp; struct rsvp_filter *f, **fp;
struct rsvp_session *s, **sp; struct rsvp_session *s, **sp;
struct tc_rsvp_pinfo *pinfo = NULL; struct tc_rsvp_pinfo *pinfo = NULL;
struct nlattr *opt = tca[TCA_OPTIONS-1]; struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_RSVP_MAX + 1]; struct nlattr *tb[TCA_RSVP_MAX + 1];
struct tcf_exts e; struct tcf_exts e;
unsigned int h1, h2; unsigned int h1, h2;
...@@ -439,7 +439,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, ...@@ -439,7 +439,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
if (err < 0) if (err < 0)
return err; return err;
err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &rsvp_ext_map); err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map);
if (err < 0) if (err < 0)
return err; return err;
...@@ -449,8 +449,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, ...@@ -449,8 +449,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
if (f->handle != handle && handle) if (f->handle != handle && handle)
goto errout2; goto errout2;
if (tb[TCA_RSVP_CLASSID-1]) { if (tb[TCA_RSVP_CLASSID]) {
f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
tcf_bind_filter(tp, &f->res, base); tcf_bind_filter(tp, &f->res, base);
} }
...@@ -462,7 +462,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, ...@@ -462,7 +462,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
err = -EINVAL; err = -EINVAL;
if (handle) if (handle)
goto errout2; goto errout2;
if (tb[TCA_RSVP_DST-1] == NULL) if (tb[TCA_RSVP_DST] == NULL)
goto errout2; goto errout2;
err = -ENOBUFS; err = -ENOBUFS;
...@@ -471,19 +471,19 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, ...@@ -471,19 +471,19 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
goto errout2; goto errout2;
h2 = 16; h2 = 16;
if (tb[TCA_RSVP_SRC-1]) { if (tb[TCA_RSVP_SRC]) {
memcpy(f->src, nla_data(tb[TCA_RSVP_SRC-1]), sizeof(f->src)); memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
h2 = hash_src(f->src); h2 = hash_src(f->src);
} }
if (tb[TCA_RSVP_PINFO-1]) { if (tb[TCA_RSVP_PINFO]) {
pinfo = nla_data(tb[TCA_RSVP_PINFO-1]); pinfo = nla_data(tb[TCA_RSVP_PINFO]);
f->spi = pinfo->spi; f->spi = pinfo->spi;
f->tunnelhdr = pinfo->tunnelhdr; f->tunnelhdr = pinfo->tunnelhdr;
} }
if (tb[TCA_RSVP_CLASSID-1]) if (tb[TCA_RSVP_CLASSID])
f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
dst = nla_data(tb[TCA_RSVP_DST-1]); dst = nla_data(tb[TCA_RSVP_DST]);
h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0); h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
err = -ENOMEM; err = -ENOMEM;
...@@ -642,8 +642,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh, ...@@ -642,8 +642,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
return -1; return -1;
} }
static struct tcf_proto_ops RSVP_OPS = { static struct tcf_proto_ops RSVP_OPS __read_mostly = {
.next = NULL,
.kind = RSVP_ID, .kind = RSVP_ID,
.classify = rsvp_classify, .classify = rsvp_classify,
.init = rsvp_init, .init = rsvp_init,
......
...@@ -1689,6 +1689,11 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, ...@@ -1689,6 +1689,11 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_PURGE_ASCONF_QUEUE: case SCTP_CMD_PURGE_ASCONF_QUEUE:
sctp_asconf_queue_teardown(asoc); sctp_asconf_queue_teardown(asoc);
break; break;
case SCTP_CMD_SET_ASOC:
asoc = cmd->obj.asoc;
break;
default: default:
pr_warn("Impossible command: %u, %p\n", pr_warn("Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr); cmd->verb, cmd->obj.ptr);
......
...@@ -2047,6 +2047,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep, ...@@ -2047,6 +2047,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
/* Restore association pointer to provide SCTP command interpeter
* with a valid context in case it needs to manipulate
* the queues */
sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC,
SCTP_ASOC((struct sctp_association *)asoc));
return retval; return retval;
nomem: nomem:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册