Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
7bece815
cloud-kernel
项目概览
openanolis
/
cloud-kernel
接近 2 年 前同步成功
通知
169
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7bece815
编写于
5月 23, 2008
作者:
D
David S. Miller
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'upstream-davem' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
上级
b9a2f2e4
bdefff1f
变更
48
展开全部
隐藏空白更改
内联
并排
Showing
48 changed file
with
853 addition
and
576 deletion
+853
-576
drivers/net/3c509.c
drivers/net/3c509.c
+1
-1
drivers/net/au1000_eth.c
drivers/net/au1000_eth.c
+1
-6
drivers/net/bfin_mac.c
drivers/net/bfin_mac.c
+0
-1
drivers/net/cpmac.c
drivers/net/cpmac.c
+179
-55
drivers/net/dm9000.c
drivers/net/dm9000.c
+1
-1
drivers/net/e1000e/netdev.c
drivers/net/e1000e/netdev.c
+2
-2
drivers/net/ehea/ehea_main.c
drivers/net/ehea/ehea_main.c
+2
-3
drivers/net/forcedeth.c
drivers/net/forcedeth.c
+1
-0
drivers/net/fs_enet/fs_enet-main.c
drivers/net/fs_enet/fs_enet-main.c
+1
-1
drivers/net/hamradio/scc.c
drivers/net/hamradio/scc.c
+2
-1
drivers/net/myri10ge/myri10ge.c
drivers/net/myri10ge/myri10ge.c
+1
-1
drivers/net/pcmcia/fmvj18x_cs.c
drivers/net/pcmcia/fmvj18x_cs.c
+3
-1
drivers/net/pcmcia/xirc2ps_cs.c
drivers/net/pcmcia/xirc2ps_cs.c
+8
-4
drivers/net/pcnet32.c
drivers/net/pcnet32.c
+2
-2
drivers/net/phy/Kconfig
drivers/net/phy/Kconfig
+1
-1
drivers/net/phy/phy_device.c
drivers/net/phy/phy_device.c
+1
-0
drivers/net/s2io-regs.h
drivers/net/s2io-regs.h
+1
-1
drivers/net/s2io.c
drivers/net/s2io.c
+293
-201
drivers/net/s2io.h
drivers/net/s2io.h
+15
-7
drivers/net/sb1250-mac.c
drivers/net/sb1250-mac.c
+31
-36
drivers/net/sc92031.c
drivers/net/sc92031.c
+5
-3
drivers/net/sfc/bitfield.h
drivers/net/sfc/bitfield.h
+2
-5
drivers/net/sfc/boards.c
drivers/net/sfc/boards.c
+3
-6
drivers/net/sfc/efx.c
drivers/net/sfc/efx.c
+40
-44
drivers/net/sfc/falcon.c
drivers/net/sfc/falcon.c
+38
-49
drivers/net/sfc/falcon.h
drivers/net/sfc/falcon.h
+4
-1
drivers/net/sfc/falcon_hwdefs.h
drivers/net/sfc/falcon_hwdefs.h
+2
-2
drivers/net/sfc/falcon_io.h
drivers/net/sfc/falcon_io.h
+21
-8
drivers/net/sfc/falcon_xmac.c
drivers/net/sfc/falcon_xmac.c
+5
-5
drivers/net/sfc/net_driver.h
drivers/net/sfc/net_driver.h
+22
-22
drivers/net/sfc/rx.c
drivers/net/sfc/rx.c
+27
-21
drivers/net/sfc/selftest.c
drivers/net/sfc/selftest.c
+8
-6
drivers/net/sfc/sfe4001.c
drivers/net/sfc/sfe4001.c
+7
-7
drivers/net/sfc/tenxpress.c
drivers/net/sfc/tenxpress.c
+3
-1
drivers/net/sfc/tx.c
drivers/net/sfc/tx.c
+7
-4
drivers/net/sfc/workarounds.h
drivers/net/sfc/workarounds.h
+1
-1
drivers/net/sfc/xfp_phy.c
drivers/net/sfc/xfp_phy.c
+3
-1
drivers/net/sky2.c
drivers/net/sky2.c
+19
-10
drivers/net/tokenring/3c359.h
drivers/net/tokenring/3c359.h
+1
-1
drivers/net/tokenring/olympic.h
drivers/net/tokenring/olympic.h
+1
-1
drivers/net/tulip/uli526x.c
drivers/net/tulip/uli526x.c
+15
-1
drivers/net/ucc_geth.c
drivers/net/ucc_geth.c
+5
-4
drivers/net/usb/asix.c
drivers/net/usb/asix.c
+4
-0
drivers/net/usb/rndis_host.c
drivers/net/usb/rndis_host.c
+1
-1
drivers/net/virtio_net.c
drivers/net/virtio_net.c
+1
-2
drivers/net/wan/hdlc.c
drivers/net/wan/hdlc.c
+11
-8
drivers/net/wan/hdlc_cisco.c
drivers/net/wan/hdlc_cisco.c
+49
-33
drivers/net/xen-netfront.c
drivers/net/xen-netfront.c
+2
-4
未找到文件。
drivers/net/3c509.c
浏览文件 @
7bece815
...
@@ -1063,7 +1063,6 @@ el3_rx(struct net_device *dev)
...
@@ -1063,7 +1063,6 @@ el3_rx(struct net_device *dev)
struct
sk_buff
*
skb
;
struct
sk_buff
*
skb
;
skb
=
dev_alloc_skb
(
pkt_len
+
5
);
skb
=
dev_alloc_skb
(
pkt_len
+
5
);
dev
->
stats
.
rx_bytes
+=
pkt_len
;
if
(
el3_debug
>
4
)
if
(
el3_debug
>
4
)
printk
(
"Receiving packet size %d status %4.4x.
\n
"
,
printk
(
"Receiving packet size %d status %4.4x.
\n
"
,
pkt_len
,
rx_status
);
pkt_len
,
rx_status
);
...
@@ -1078,6 +1077,7 @@ el3_rx(struct net_device *dev)
...
@@ -1078,6 +1077,7 @@ el3_rx(struct net_device *dev)
skb
->
protocol
=
eth_type_trans
(
skb
,
dev
);
skb
->
protocol
=
eth_type_trans
(
skb
,
dev
);
netif_rx
(
skb
);
netif_rx
(
skb
);
dev
->
last_rx
=
jiffies
;
dev
->
last_rx
=
jiffies
;
dev
->
stats
.
rx_bytes
+=
pkt_len
;
dev
->
stats
.
rx_packets
++
;
dev
->
stats
.
rx_packets
++
;
continue
;
continue
;
}
}
...
...
drivers/net/au1000_eth.c
浏览文件 @
7bece815
...
@@ -1239,12 +1239,7 @@ static int au1000_rx(struct net_device *dev)
...
@@ -1239,12 +1239,7 @@ static int au1000_rx(struct net_device *dev)
*/
*/
static
irqreturn_t
au1000_interrupt
(
int
irq
,
void
*
dev_id
)
static
irqreturn_t
au1000_interrupt
(
int
irq
,
void
*
dev_id
)
{
{
struct
net_device
*
dev
=
(
struct
net_device
*
)
dev_id
;
struct
net_device
*
dev
=
dev_id
;
if
(
dev
==
NULL
)
{
printk
(
KERN_ERR
"%s: isr: null dev ptr
\n
"
,
dev
->
name
);
return
IRQ_RETVAL
(
1
);
}
/* Handle RX interrupts first to minimize chance of overrun */
/* Handle RX interrupts first to minimize chance of overrun */
...
...
drivers/net/bfin_mac.c
浏览文件 @
7bece815
...
@@ -22,7 +22,6 @@
...
@@ -22,7 +22,6 @@
#include <linux/crc32.h>
#include <linux/crc32.h>
#include <linux/device.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/netdevice.h>
...
...
drivers/net/cpmac.c
浏览文件 @
7bece815
...
@@ -38,6 +38,7 @@
...
@@ -38,6 +38,7 @@
#include <linux/platform_device.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/dma-mapping.h>
#include <asm/gpio.h>
#include <asm/gpio.h>
#include <asm/atomic.h>
MODULE_AUTHOR
(
"Eugene Konev <ejka@imfi.kspu.ru>"
);
MODULE_AUTHOR
(
"Eugene Konev <ejka@imfi.kspu.ru>"
);
MODULE_DESCRIPTION
(
"TI AR7 ethernet driver (CPMAC)"
);
MODULE_DESCRIPTION
(
"TI AR7 ethernet driver (CPMAC)"
);
...
@@ -187,6 +188,7 @@ struct cpmac_desc {
...
@@ -187,6 +188,7 @@ struct cpmac_desc {
#define CPMAC_EOQ 0x1000
#define CPMAC_EOQ 0x1000
struct
sk_buff
*
skb
;
struct
sk_buff
*
skb
;
struct
cpmac_desc
*
next
;
struct
cpmac_desc
*
next
;
struct
cpmac_desc
*
prev
;
dma_addr_t
mapping
;
dma_addr_t
mapping
;
dma_addr_t
data_mapping
;
dma_addr_t
data_mapping
;
};
};
...
@@ -208,6 +210,7 @@ struct cpmac_priv {
...
@@ -208,6 +210,7 @@ struct cpmac_priv {
struct
work_struct
reset_work
;
struct
work_struct
reset_work
;
struct
platform_device
*
pdev
;
struct
platform_device
*
pdev
;
struct
napi_struct
napi
;
struct
napi_struct
napi
;
atomic_t
reset_pending
;
};
};
static
irqreturn_t
cpmac_irq
(
int
,
void
*
);
static
irqreturn_t
cpmac_irq
(
int
,
void
*
);
...
@@ -241,6 +244,16 @@ static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
...
@@ -241,6 +244,16 @@ static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
printk
(
"
\n
"
);
printk
(
"
\n
"
);
}
}
static
void
cpmac_dump_all_desc
(
struct
net_device
*
dev
)
{
struct
cpmac_priv
*
priv
=
netdev_priv
(
dev
);
struct
cpmac_desc
*
dump
=
priv
->
rx_head
;
do
{
cpmac_dump_desc
(
dev
,
dump
);
dump
=
dump
->
next
;
}
while
(
dump
!=
priv
->
rx_head
);
}
static
void
cpmac_dump_skb
(
struct
net_device
*
dev
,
struct
sk_buff
*
skb
)
static
void
cpmac_dump_skb
(
struct
net_device
*
dev
,
struct
sk_buff
*
skb
)
{
{
int
i
;
int
i
;
...
@@ -412,21 +425,42 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
...
@@ -412,21 +425,42 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
static
int
cpmac_poll
(
struct
napi_struct
*
napi
,
int
budget
)
static
int
cpmac_poll
(
struct
napi_struct
*
napi
,
int
budget
)
{
{
struct
sk_buff
*
skb
;
struct
sk_buff
*
skb
;
struct
cpmac_desc
*
desc
;
struct
cpmac_desc
*
desc
,
*
restart
;
int
received
=
0
;
struct
cpmac_priv
*
priv
=
container_of
(
napi
,
struct
cpmac_priv
,
napi
);
struct
cpmac_priv
*
priv
=
container_of
(
napi
,
struct
cpmac_priv
,
napi
);
int
received
=
0
,
processed
=
0
;
spin_lock
(
&
priv
->
rx_lock
);
spin_lock
(
&
priv
->
rx_lock
);
if
(
unlikely
(
!
priv
->
rx_head
))
{
if
(
unlikely
(
!
priv
->
rx_head
))
{
if
(
netif_msg_rx_err
(
priv
)
&&
net_ratelimit
())
if
(
netif_msg_rx_err
(
priv
)
&&
net_ratelimit
())
printk
(
KERN_WARNING
"%s: rx: polling, but no queue
\n
"
,
printk
(
KERN_WARNING
"%s: rx: polling, but no queue
\n
"
,
priv
->
dev
->
name
);
priv
->
dev
->
name
);
spin_unlock
(
&
priv
->
rx_lock
);
netif_rx_complete
(
priv
->
dev
,
napi
);
netif_rx_complete
(
priv
->
dev
,
napi
);
return
0
;
return
0
;
}
}
desc
=
priv
->
rx_head
;
desc
=
priv
->
rx_head
;
restart
=
NULL
;
while
(((
desc
->
dataflags
&
CPMAC_OWN
)
==
0
)
&&
(
received
<
budget
))
{
while
(((
desc
->
dataflags
&
CPMAC_OWN
)
==
0
)
&&
(
received
<
budget
))
{
processed
++
;
if
((
desc
->
dataflags
&
CPMAC_EOQ
)
!=
0
)
{
/* The last update to eoq->hw_next didn't happen
* soon enough, and the receiver stopped here.
*Remember this descriptor so we can restart
* the receiver after freeing some space.
*/
if
(
unlikely
(
restart
))
{
if
(
netif_msg_rx_err
(
priv
))
printk
(
KERN_ERR
"%s: poll found a"
" duplicate EOQ: %p and %p
\n
"
,
priv
->
dev
->
name
,
restart
,
desc
);
goto
fatal_error
;
}
restart
=
desc
->
next
;
}
skb
=
cpmac_rx_one
(
priv
,
desc
);
skb
=
cpmac_rx_one
(
priv
,
desc
);
if
(
likely
(
skb
))
{
if
(
likely
(
skb
))
{
netif_receive_skb
(
skb
);
netif_receive_skb
(
skb
);
...
@@ -435,19 +469,90 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
...
@@ -435,19 +469,90 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
desc
=
desc
->
next
;
desc
=
desc
->
next
;
}
}
if
(
desc
!=
priv
->
rx_head
)
{
/* We freed some buffers, but not the whole ring,
* add what we did free to the rx list */
desc
->
prev
->
hw_next
=
(
u32
)
0
;
priv
->
rx_head
->
prev
->
hw_next
=
priv
->
rx_head
->
mapping
;
}
/* Optimization: If we did not actually process an EOQ (perhaps because
* of quota limits), check to see if the tail of the queue has EOQ set.
* We should immediately restart in that case so that the receiver can
* restart and run in parallel with more packet processing.
* This lets us handle slightly larger bursts before running
* out of ring space (assuming dev->weight < ring_size) */
if
(
!
restart
&&
(
priv
->
rx_head
->
prev
->
dataflags
&
(
CPMAC_OWN
|
CPMAC_EOQ
))
==
CPMAC_EOQ
&&
(
priv
->
rx_head
->
dataflags
&
CPMAC_OWN
)
!=
0
)
{
/* reset EOQ so the poll loop (above) doesn't try to
* restart this when it eventually gets to this descriptor.
*/
priv
->
rx_head
->
prev
->
dataflags
&=
~
CPMAC_EOQ
;
restart
=
priv
->
rx_head
;
}
if
(
restart
)
{
priv
->
dev
->
stats
.
rx_errors
++
;
priv
->
dev
->
stats
.
rx_fifo_errors
++
;
if
(
netif_msg_rx_err
(
priv
)
&&
net_ratelimit
())
printk
(
KERN_WARNING
"%s: rx dma ring overrun
\n
"
,
priv
->
dev
->
name
);
if
(
unlikely
((
restart
->
dataflags
&
CPMAC_OWN
)
==
0
))
{
if
(
netif_msg_drv
(
priv
))
printk
(
KERN_ERR
"%s: cpmac_poll is trying to "
"restart rx from a descriptor that's "
"not free: %p
\n
"
,
priv
->
dev
->
name
,
restart
);
goto
fatal_error
;
}
cpmac_write
(
priv
->
regs
,
CPMAC_RX_PTR
(
0
),
restart
->
mapping
);
}
priv
->
rx_head
=
desc
;
priv
->
rx_head
=
desc
;
spin_unlock
(
&
priv
->
rx_lock
);
spin_unlock
(
&
priv
->
rx_lock
);
if
(
unlikely
(
netif_msg_rx_status
(
priv
)))
if
(
unlikely
(
netif_msg_rx_status
(
priv
)))
printk
(
KERN_DEBUG
"%s: poll processed %d packets
\n
"
,
printk
(
KERN_DEBUG
"%s: poll processed %d packets
\n
"
,
priv
->
dev
->
name
,
received
);
priv
->
dev
->
name
,
received
);
if
(
desc
->
dataflags
&
CPMAC_OWN
)
{
if
(
processed
==
0
)
{
/* we ran out of packets to read,
* revert to interrupt-driven mode */
netif_rx_complete
(
priv
->
dev
,
napi
);
netif_rx_complete
(
priv
->
dev
,
napi
);
cpmac_write
(
priv
->
regs
,
CPMAC_RX_PTR
(
0
),
(
u32
)
desc
->
mapping
);
cpmac_write
(
priv
->
regs
,
CPMAC_RX_INT_ENABLE
,
1
);
cpmac_write
(
priv
->
regs
,
CPMAC_RX_INT_ENABLE
,
1
);
return
0
;
return
0
;
}
}
return
1
;
return
1
;
fatal_error:
/* Something went horribly wrong.
* Reset hardware to try to recover rather than wedging. */
if
(
netif_msg_drv
(
priv
))
{
printk
(
KERN_ERR
"%s: cpmac_poll is confused. "
"Resetting hardware
\n
"
,
priv
->
dev
->
name
);
cpmac_dump_all_desc
(
priv
->
dev
);
printk
(
KERN_DEBUG
"%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x
\n
"
,
priv
->
dev
->
name
,
cpmac_read
(
priv
->
regs
,
CPMAC_RX_PTR
(
0
)),
cpmac_read
(
priv
->
regs
,
CPMAC_RX_ACK
(
0
)));
}
spin_unlock
(
&
priv
->
rx_lock
);
netif_rx_complete
(
priv
->
dev
,
napi
);
netif_stop_queue
(
priv
->
dev
);
napi_disable
(
&
priv
->
napi
);
atomic_inc
(
&
priv
->
reset_pending
);
cpmac_hw_stop
(
priv
->
dev
);
if
(
!
schedule_work
(
&
priv
->
reset_work
))
atomic_dec
(
&
priv
->
reset_pending
);
return
0
;
}
}
static
int
cpmac_start_xmit
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
)
static
int
cpmac_start_xmit
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
)
...
@@ -456,6 +561,9 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
...
@@ -456,6 +561,9 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct
cpmac_desc
*
desc
;
struct
cpmac_desc
*
desc
;
struct
cpmac_priv
*
priv
=
netdev_priv
(
dev
);
struct
cpmac_priv
*
priv
=
netdev_priv
(
dev
);
if
(
unlikely
(
atomic_read
(
&
priv
->
reset_pending
)))
return
NETDEV_TX_BUSY
;
if
(
unlikely
(
skb_padto
(
skb
,
ETH_ZLEN
)))
if
(
unlikely
(
skb_padto
(
skb
,
ETH_ZLEN
)))
return
NETDEV_TX_OK
;
return
NETDEV_TX_OK
;
...
@@ -621,8 +729,10 @@ static void cpmac_clear_rx(struct net_device *dev)
...
@@ -621,8 +729,10 @@ static void cpmac_clear_rx(struct net_device *dev)
desc
->
dataflags
=
CPMAC_OWN
;
desc
->
dataflags
=
CPMAC_OWN
;
dev
->
stats
.
rx_dropped
++
;
dev
->
stats
.
rx_dropped
++
;
}
}
desc
->
hw_next
=
desc
->
next
->
mapping
;
desc
=
desc
->
next
;
desc
=
desc
->
next
;
}
}
priv
->
rx_head
->
prev
->
hw_next
=
0
;
}
}
static
void
cpmac_clear_tx
(
struct
net_device
*
dev
)
static
void
cpmac_clear_tx
(
struct
net_device
*
dev
)
...
@@ -635,14 +745,14 @@ static void cpmac_clear_tx(struct net_device *dev)
...
@@ -635,14 +745,14 @@ static void cpmac_clear_tx(struct net_device *dev)
priv
->
desc_ring
[
i
].
dataflags
=
0
;
priv
->
desc_ring
[
i
].
dataflags
=
0
;
if
(
priv
->
desc_ring
[
i
].
skb
)
{
if
(
priv
->
desc_ring
[
i
].
skb
)
{
dev_kfree_skb_any
(
priv
->
desc_ring
[
i
].
skb
);
dev_kfree_skb_any
(
priv
->
desc_ring
[
i
].
skb
);
if
(
netif_subqueue_stopped
(
dev
,
i
))
priv
->
desc_ring
[
i
].
skb
=
NULL
;
netif_wake_subqueue
(
dev
,
i
);
}
}
}
}
}
}
static
void
cpmac_hw_error
(
struct
work_struct
*
work
)
static
void
cpmac_hw_error
(
struct
work_struct
*
work
)
{
{
int
i
;
struct
cpmac_priv
*
priv
=
struct
cpmac_priv
*
priv
=
container_of
(
work
,
struct
cpmac_priv
,
reset_work
);
container_of
(
work
,
struct
cpmac_priv
,
reset_work
);
...
@@ -651,8 +761,48 @@ static void cpmac_hw_error(struct work_struct *work)
...
@@ -651,8 +761,48 @@ static void cpmac_hw_error(struct work_struct *work)
spin_unlock
(
&
priv
->
rx_lock
);
spin_unlock
(
&
priv
->
rx_lock
);
cpmac_clear_tx
(
priv
->
dev
);
cpmac_clear_tx
(
priv
->
dev
);
cpmac_hw_start
(
priv
->
dev
);
cpmac_hw_start
(
priv
->
dev
);
napi_enable
(
&
priv
->
napi
);
barrier
();
netif_start_queue
(
priv
->
dev
);
atomic_dec
(
&
priv
->
reset_pending
);
for
(
i
=
0
;
i
<
CPMAC_QUEUES
;
i
++
)
netif_wake_subqueue
(
priv
->
dev
,
i
);
netif_wake_queue
(
priv
->
dev
);
cpmac_write
(
priv
->
regs
,
CPMAC_MAC_INT_ENABLE
,
3
);
}
static
void
cpmac_check_status
(
struct
net_device
*
dev
)
{
struct
cpmac_priv
*
priv
=
netdev_priv
(
dev
);
u32
macstatus
=
cpmac_read
(
priv
->
regs
,
CPMAC_MAC_STATUS
);
int
rx_channel
=
(
macstatus
>>
8
)
&
7
;
int
rx_code
=
(
macstatus
>>
12
)
&
15
;
int
tx_channel
=
(
macstatus
>>
16
)
&
7
;
int
tx_code
=
(
macstatus
>>
20
)
&
15
;
if
(
rx_code
||
tx_code
)
{
if
(
netif_msg_drv
(
priv
)
&&
net_ratelimit
())
{
/* Can't find any documentation on what these
*error codes actually are. So just log them and hope..
*/
if
(
rx_code
)
printk
(
KERN_WARNING
"%s: host error %d on rx "
"channel %d (macstatus %08x), resetting
\n
"
,
dev
->
name
,
rx_code
,
rx_channel
,
macstatus
);
if
(
tx_code
)
printk
(
KERN_WARNING
"%s: host error %d on tx "
"channel %d (macstatus %08x), resetting
\n
"
,
dev
->
name
,
tx_code
,
tx_channel
,
macstatus
);
}
netif_stop_queue
(
dev
);
cpmac_hw_stop
(
dev
);
if
(
schedule_work
(
&
priv
->
reset_work
))
atomic_inc
(
&
priv
->
reset_pending
);
if
(
unlikely
(
netif_msg_hw
(
priv
)))
cpmac_dump_regs
(
dev
);
}
cpmac_write
(
priv
->
regs
,
CPMAC_MAC_INT_CLEAR
,
0xff
);
}
}
static
irqreturn_t
cpmac_irq
(
int
irq
,
void
*
dev_id
)
static
irqreturn_t
cpmac_irq
(
int
irq
,
void
*
dev_id
)
...
@@ -683,49 +833,32 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
...
@@ -683,49 +833,32 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
cpmac_write
(
priv
->
regs
,
CPMAC_MAC_EOI_VECTOR
,
0
);
cpmac_write
(
priv
->
regs
,
CPMAC_MAC_EOI_VECTOR
,
0
);
if
(
unlikely
(
status
&
(
MAC_INT_HOST
|
MAC_INT_STATUS
)))
{
if
(
unlikely
(
status
&
(
MAC_INT_HOST
|
MAC_INT_STATUS
)))
if
(
netif_msg_drv
(
priv
)
&&
net_ratelimit
())
cpmac_check_status
(
dev
);
printk
(
KERN_ERR
"%s: hw error, resetting...
\n
"
,
dev
->
name
);
netif_stop_queue
(
dev
);
napi_disable
(
&
priv
->
napi
);
cpmac_hw_stop
(
dev
);
schedule_work
(
&
priv
->
reset_work
);
if
(
unlikely
(
netif_msg_hw
(
priv
)))
cpmac_dump_regs
(
dev
);
}
return
IRQ_HANDLED
;
return
IRQ_HANDLED
;
}
}
static
void
cpmac_tx_timeout
(
struct
net_device
*
dev
)
static
void
cpmac_tx_timeout
(
struct
net_device
*
dev
)
{
{
struct
cpmac_priv
*
priv
=
netdev_priv
(
dev
);
int
i
;
int
i
;
struct
cpmac_priv
*
priv
=
netdev_priv
(
dev
);
spin_lock
(
&
priv
->
lock
);
spin_lock
(
&
priv
->
lock
);
dev
->
stats
.
tx_errors
++
;
dev
->
stats
.
tx_errors
++
;
spin_unlock
(
&
priv
->
lock
);
spin_unlock
(
&
priv
->
lock
);
if
(
netif_msg_tx_err
(
priv
)
&&
net_ratelimit
())
if
(
netif_msg_tx_err
(
priv
)
&&
net_ratelimit
())
printk
(
KERN_WARNING
"%s: transmit timeout
\n
"
,
dev
->
name
);
printk
(
KERN_WARNING
"%s: transmit timeout
\n
"
,
dev
->
name
);
/*
* FIXME: waking up random queue is not the best thing to
atomic_inc
(
&
priv
->
reset_pending
);
* do... on the other hand why we got here at all?
barrier
();
*/
cpmac_clear_tx
(
dev
);
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
barrier
();
atomic_dec
(
&
priv
->
reset_pending
);
netif_wake_queue
(
priv
->
dev
);
for
(
i
=
0
;
i
<
CPMAC_QUEUES
;
i
++
)
for
(
i
=
0
;
i
<
CPMAC_QUEUES
;
i
++
)
if
(
priv
->
desc_ring
[
i
].
skb
)
{
netif_wake_subqueue
(
dev
,
i
);
priv
->
desc_ring
[
i
].
dataflags
=
0
;
dev_kfree_skb_any
(
priv
->
desc_ring
[
i
].
skb
);
netif_wake_subqueue
(
dev
,
i
);
break
;
}
#else
priv
->
desc_ring
[
0
].
dataflags
=
0
;
if
(
priv
->
desc_ring
[
0
].
skb
)
dev_kfree_skb_any
(
priv
->
desc_ring
[
0
].
skb
);
netif_wake_queue
(
dev
);
#endif
}
}
static
int
cpmac_ioctl
(
struct
net_device
*
dev
,
struct
ifreq
*
ifr
,
int
cmd
)
static
int
cpmac_ioctl
(
struct
net_device
*
dev
,
struct
ifreq
*
ifr
,
int
cmd
)
...
@@ -901,9 +1034,12 @@ static int cpmac_open(struct net_device *dev)
...
@@ -901,9 +1034,12 @@ static int cpmac_open(struct net_device *dev)
desc
->
buflen
=
CPMAC_SKB_SIZE
;
desc
->
buflen
=
CPMAC_SKB_SIZE
;
desc
->
dataflags
=
CPMAC_OWN
;
desc
->
dataflags
=
CPMAC_OWN
;
desc
->
next
=
&
priv
->
rx_head
[(
i
+
1
)
%
priv
->
ring_size
];
desc
->
next
=
&
priv
->
rx_head
[(
i
+
1
)
%
priv
->
ring_size
];
desc
->
next
->
prev
=
desc
;
desc
->
hw_next
=
(
u32
)
desc
->
next
->
mapping
;
desc
->
hw_next
=
(
u32
)
desc
->
next
->
mapping
;
}
}
priv
->
rx_head
->
prev
->
hw_next
=
(
u32
)
0
;
if
((
res
=
request_irq
(
dev
->
irq
,
cpmac_irq
,
IRQF_SHARED
,
if
((
res
=
request_irq
(
dev
->
irq
,
cpmac_irq
,
IRQF_SHARED
,
dev
->
name
,
dev
)))
{
dev
->
name
,
dev
)))
{
if
(
netif_msg_drv
(
priv
))
if
(
netif_msg_drv
(
priv
))
...
@@ -912,6 +1048,7 @@ static int cpmac_open(struct net_device *dev)
...
@@ -912,6 +1048,7 @@ static int cpmac_open(struct net_device *dev)
goto
fail_irq
;
goto
fail_irq
;
}
}
atomic_set
(
&
priv
->
reset_pending
,
0
);
INIT_WORK
(
&
priv
->
reset_work
,
cpmac_hw_error
);
INIT_WORK
(
&
priv
->
reset_work
,
cpmac_hw_error
);
cpmac_hw_start
(
dev
);
cpmac_hw_start
(
dev
);
...
@@ -1007,21 +1144,10 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
...
@@ -1007,21 +1144,10 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
if
(
phy_id
==
PHY_MAX_ADDR
)
{
if
(
phy_id
==
PHY_MAX_ADDR
)
{
if
(
external_switch
||
dumb_switch
)
{
if
(
external_switch
||
dumb_switch
)
{
struct
fixed_phy_status
status
=
{};
mdio_bus_id
=
0
;
/* fixed phys bus */
phy_id
=
pdev
->
id
;
/*
* FIXME: this should be in the platform code!
* Since there is not platform code at all (that is,
* no mainline users of that driver), place it here
* for now.
*/
phy_id
=
0
;
status
.
link
=
1
;
status
.
duplex
=
1
;
status
.
speed
=
100
;
fixed_phy_add
(
PHY_POLL
,
phy_id
,
&
status
);
}
else
{
}
else
{
printk
(
KERN_ERR
"cpmac:
no PHY present
\n
"
);
dev_err
(
&
pdev
->
dev
,
"
no PHY present
\n
"
);
return
-
ENODEV
;
return
-
ENODEV
;
}
}
}
}
...
@@ -1064,10 +1190,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
...
@@ -1064,10 +1190,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
priv
->
msg_enable
=
netif_msg_init
(
debug_level
,
0xff
);
priv
->
msg_enable
=
netif_msg_init
(
debug_level
,
0xff
);
memcpy
(
dev
->
dev_addr
,
pdata
->
dev_addr
,
sizeof
(
dev
->
dev_addr
));
memcpy
(
dev
->
dev_addr
,
pdata
->
dev_addr
,
sizeof
(
dev
->
dev_addr
));
snprintf
(
priv
->
phy_name
,
BUS_ID_SIZE
,
PHY_ID_FMT
,
mdio_bus_id
,
phy_id
);
priv
->
phy
=
phy_connect
(
dev
,
cpmac_mii
.
phy_map
[
phy_id
]
->
dev
.
bus_id
,
&
cpmac_adjust_link
,
0
,
PHY_INTERFACE_MODE_MII
);
priv
->
phy
=
phy_connect
(
dev
,
priv
->
phy_name
,
&
cpmac_adjust_link
,
0
,
PHY_INTERFACE_MODE_MII
);
if
(
IS_ERR
(
priv
->
phy
))
{
if
(
IS_ERR
(
priv
->
phy
))
{
if
(
netif_msg_drv
(
priv
))
if
(
netif_msg_drv
(
priv
))
printk
(
KERN_ERR
"%s: Could not attach to PHY
\n
"
,
printk
(
KERN_ERR
"%s: Could not attach to PHY
\n
"
,
...
...
drivers/net/dm9000.c
浏览文件 @
7bece815
...
@@ -903,7 +903,7 @@ dm9000_stop(struct net_device *ndev)
...
@@ -903,7 +903,7 @@ dm9000_stop(struct net_device *ndev)
if
(
netif_msg_ifdown
(
db
))
if
(
netif_msg_ifdown
(
db
))
dev_dbg
(
db
->
dev
,
"shutting down %s
\n
"
,
ndev
->
name
);
dev_dbg
(
db
->
dev
,
"shutting down %s
\n
"
,
ndev
->
name
);
cancel_delayed_work
(
&
db
->
phy_poll
);
cancel_delayed_work
_sync
(
&
db
->
phy_poll
);
netif_stop_queue
(
ndev
);
netif_stop_queue
(
ndev
);
netif_carrier_off
(
ndev
);
netif_carrier_off
(
ndev
);
...
...
drivers/net/e1000e/netdev.c
浏览文件 @
7bece815
...
@@ -4201,8 +4201,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
...
@@ -4201,8 +4201,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
struct
e1000_adapter
*
adapter
;
struct
e1000_adapter
*
adapter
;
struct
e1000_hw
*
hw
;
struct
e1000_hw
*
hw
;
const
struct
e1000_info
*
ei
=
e1000_info_tbl
[
ent
->
driver_data
];
const
struct
e1000_info
*
ei
=
e1000_info_tbl
[
ent
->
driver_data
];
unsigned
long
mmio_start
,
mmio_len
;
resource_size_t
mmio_start
,
mmio_len
;
unsigned
long
flash_start
,
flash_len
;
resource_size_t
flash_start
,
flash_len
;
static
int
cards_found
;
static
int
cards_found
;
int
i
,
err
,
pci_using_dac
;
int
i
,
err
,
pci_using_dac
;
...
...
drivers/net/ehea/ehea_main.c
浏览文件 @
7bece815
...
@@ -2213,8 +2213,6 @@ static void ehea_vlan_rx_register(struct net_device *dev,
...
@@ -2213,8 +2213,6 @@ static void ehea_vlan_rx_register(struct net_device *dev,
goto
out
;
goto
out
;
}
}
memset
(
cb1
->
vlan_filter
,
0
,
sizeof
(
cb1
->
vlan_filter
));
hret
=
ehea_h_modify_ehea_port
(
adapter
->
handle
,
port
->
logical_port_id
,
hret
=
ehea_h_modify_ehea_port
(
adapter
->
handle
,
port
->
logical_port_id
,
H_PORT_CB1
,
H_PORT_CB1_ALL
,
cb1
);
H_PORT_CB1
,
H_PORT_CB1_ALL
,
cb1
);
if
(
hret
!=
H_SUCCESS
)
if
(
hret
!=
H_SUCCESS
)
...
@@ -3178,11 +3176,12 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
...
@@ -3178,11 +3176,12 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
static
void
ehea_shutdown_single_port
(
struct
ehea_port
*
port
)
static
void
ehea_shutdown_single_port
(
struct
ehea_port
*
port
)
{
{
struct
ehea_adapter
*
adapter
=
port
->
adapter
;
unregister_netdev
(
port
->
netdev
);
unregister_netdev
(
port
->
netdev
);
ehea_unregister_port
(
port
);
ehea_unregister_port
(
port
);
kfree
(
port
->
mc_list
);
kfree
(
port
->
mc_list
);
free_netdev
(
port
->
netdev
);
free_netdev
(
port
->
netdev
);
port
->
adapter
->
active_ports
--
;
adapter
->
active_ports
--
;
}
}
static
int
ehea_setup_ports
(
struct
ehea_adapter
*
adapter
)
static
int
ehea_setup_ports
(
struct
ehea_adapter
*
adapter
)
...
...
drivers/net/forcedeth.c
浏览文件 @
7bece815
...
@@ -5823,6 +5823,7 @@ static int nv_resume(struct pci_dev *pdev)
...
@@ -5823,6 +5823,7 @@ static int nv_resume(struct pci_dev *pdev)
writel
(
txreg
,
base
+
NvRegTransmitPoll
);
writel
(
txreg
,
base
+
NvRegTransmitPoll
);
rc
=
nv_open
(
dev
);
rc
=
nv_open
(
dev
);
nv_set_multicast
(
dev
);
out:
out:
return
rc
;
return
rc
;
}
}
...
...
drivers/net/fs_enet/fs_enet-main.c
浏览文件 @
7bece815
...
@@ -1093,7 +1093,7 @@ static struct net_device *fs_init_instance(struct device *dev,
...
@@ -1093,7 +1093,7 @@ static struct net_device *fs_init_instance(struct device *dev,
if
(
registered
)
if
(
registered
)
unregister_netdev
(
ndev
);
unregister_netdev
(
ndev
);
if
(
fep
!=
NULL
)
{
if
(
fep
&&
fep
->
ops
)
{
(
*
fep
->
ops
->
free_bd
)(
ndev
);
(
*
fep
->
ops
->
free_bd
)(
ndev
);
(
*
fep
->
ops
->
cleanup_data
)(
ndev
);
(
*
fep
->
ops
->
cleanup_data
)(
ndev
);
}
}
...
...
drivers/net/hamradio/scc.c
浏览文件 @
7bece815
...
@@ -1340,9 +1340,10 @@ static unsigned int scc_set_param(struct scc_channel *scc, unsigned int cmd, uns
...
@@ -1340,9 +1340,10 @@ static unsigned int scc_set_param(struct scc_channel *scc, unsigned int cmd, uns
case
PARAM_RTS
:
case
PARAM_RTS
:
if
(
!
(
scc
->
wreg
[
R5
]
&
RTS
)
)
if
(
!
(
scc
->
wreg
[
R5
]
&
RTS
)
)
{
{
if
(
arg
!=
TX_OFF
)
if
(
arg
!=
TX_OFF
)
{
scc_key_trx
(
scc
,
TX_ON
);
scc_key_trx
(
scc
,
TX_ON
);
scc_start_tx_timer
(
scc
,
t_txdelay
,
scc
->
kiss
.
txdelay
);
scc_start_tx_timer
(
scc
,
t_txdelay
,
scc
->
kiss
.
txdelay
);
}
}
else
{
}
else
{
if
(
arg
==
TX_OFF
)
if
(
arg
==
TX_OFF
)
{
{
...
...
drivers/net/myri10ge/myri10ge.c
浏览文件 @
7bece815
...
@@ -631,7 +631,7 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
...
@@ -631,7 +631,7 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
return
status
;
return
status
;
}
}
int
myri10ge_get_firmware_capabilities
(
struct
myri10ge_priv
*
mgp
)
static
int
myri10ge_get_firmware_capabilities
(
struct
myri10ge_priv
*
mgp
)
{
{
struct
myri10ge_cmd
cmd
;
struct
myri10ge_cmd
cmd
;
int
status
;
int
status
;
...
...
drivers/net/pcmcia/fmvj18x_cs.c
浏览文件 @
7bece815
...
@@ -391,7 +391,9 @@ static int fmvj18x_config(struct pcmcia_device *link)
...
@@ -391,7 +391,9 @@ static int fmvj18x_config(struct pcmcia_device *link)
cardtype
=
CONTEC
;
cardtype
=
CONTEC
;
break
;
break
;
case
MANFID_FUJITSU
:
case
MANFID_FUJITSU
:
if
(
link
->
card_id
==
PRODID_FUJITSU_MBH10302
)
if
(
link
->
conf
.
ConfigBase
==
0x0fe0
)
cardtype
=
MBH10302
;
else
if
(
link
->
card_id
==
PRODID_FUJITSU_MBH10302
)
/* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302),
/* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302),
but these are MBH10304 based card. */
but these are MBH10304 based card. */
cardtype
=
MBH10304
;
cardtype
=
MBH10304
;
...
...
drivers/net/pcmcia/xirc2ps_cs.c
浏览文件 @
7bece815
...
@@ -1461,22 +1461,25 @@ static void
...
@@ -1461,22 +1461,25 @@ static void
set_multicast_list
(
struct
net_device
*
dev
)
set_multicast_list
(
struct
net_device
*
dev
)
{
{
unsigned
int
ioaddr
=
dev
->
base_addr
;
unsigned
int
ioaddr
=
dev
->
base_addr
;
unsigned
value
;
SelectPage
(
0x42
);
SelectPage
(
0x42
);
value
=
GetByte
(
XIRCREG42_SWC1
)
&
0xC0
;
if
(
dev
->
flags
&
IFF_PROMISC
)
{
/* snoop */
if
(
dev
->
flags
&
IFF_PROMISC
)
{
/* snoop */
PutByte
(
XIRCREG42_SWC1
,
0x06
);
/* set MPE and PME */
PutByte
(
XIRCREG42_SWC1
,
value
|
0x06
);
/* set MPE and PME */
}
else
if
(
dev
->
mc_count
>
9
||
(
dev
->
flags
&
IFF_ALLMULTI
))
{
}
else
if
(
dev
->
mc_count
>
9
||
(
dev
->
flags
&
IFF_ALLMULTI
))
{
PutByte
(
XIRCREG42_SWC1
,
0x02
);
/* set MPE */
PutByte
(
XIRCREG42_SWC1
,
value
|
0x02
);
/* set MPE */
}
else
if
(
dev
->
mc_count
)
{
}
else
if
(
dev
->
mc_count
)
{
/* the chip can filter 9 addresses perfectly */
/* the chip can filter 9 addresses perfectly */
PutByte
(
XIRCREG42_SWC1
,
0x01
);
PutByte
(
XIRCREG42_SWC1
,
value
|
0x01
);
SelectPage
(
0x40
);
SelectPage
(
0x40
);
PutByte
(
XIRCREG40_CMD0
,
Offline
);
PutByte
(
XIRCREG40_CMD0
,
Offline
);
set_addresses
(
dev
);
set_addresses
(
dev
);
SelectPage
(
0x40
);
SelectPage
(
0x40
);
PutByte
(
XIRCREG40_CMD0
,
EnableRecv
|
Online
);
PutByte
(
XIRCREG40_CMD0
,
EnableRecv
|
Online
);
}
else
{
/* standard usage */
}
else
{
/* standard usage */
PutByte
(
XIRCREG42_SWC1
,
0x00
);
PutByte
(
XIRCREG42_SWC1
,
value
|
0x00
);
}
}
SelectPage
(
0
);
SelectPage
(
0
);
}
}
...
@@ -1722,6 +1725,7 @@ do_reset(struct net_device *dev, int full)
...
@@ -1722,6 +1725,7 @@ do_reset(struct net_device *dev, int full)
/* enable receiver and put the mac online */
/* enable receiver and put the mac online */
if
(
full
)
{
if
(
full
)
{
set_multicast_list
(
dev
);
SelectPage
(
0x40
);
SelectPage
(
0x40
);
PutByte
(
XIRCREG40_CMD0
,
EnableRecv
|
Online
);
PutByte
(
XIRCREG40_CMD0
,
EnableRecv
|
Online
);
}
}
...
...
drivers/net/pcnet32.c
浏览文件 @
7bece815
...
@@ -325,7 +325,7 @@ static int pcnet32_get_regs_len(struct net_device *dev);
...
@@ -325,7 +325,7 @@ static int pcnet32_get_regs_len(struct net_device *dev);
static
void
pcnet32_get_regs
(
struct
net_device
*
dev
,
struct
ethtool_regs
*
regs
,
static
void
pcnet32_get_regs
(
struct
net_device
*
dev
,
struct
ethtool_regs
*
regs
,
void
*
ptr
);
void
*
ptr
);
static
void
pcnet32_purge_tx_ring
(
struct
net_device
*
dev
);
static
void
pcnet32_purge_tx_ring
(
struct
net_device
*
dev
);
static
int
pcnet32_alloc_ring
(
struct
net_device
*
dev
,
char
*
name
);
static
int
pcnet32_alloc_ring
(
struct
net_device
*
dev
,
c
onst
c
har
*
name
);
static
void
pcnet32_free_ring
(
struct
net_device
*
dev
);
static
void
pcnet32_free_ring
(
struct
net_device
*
dev
);
static
void
pcnet32_check_media
(
struct
net_device
*
dev
,
int
verbose
);
static
void
pcnet32_check_media
(
struct
net_device
*
dev
,
int
verbose
);
...
@@ -1983,7 +1983,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
...
@@ -1983,7 +1983,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
}
}
/* if any allocation fails, caller must also call pcnet32_free_ring */
/* if any allocation fails, caller must also call pcnet32_free_ring */
static
int
pcnet32_alloc_ring
(
struct
net_device
*
dev
,
char
*
name
)
static
int
pcnet32_alloc_ring
(
struct
net_device
*
dev
,
c
onst
c
har
*
name
)
{
{
struct
pcnet32_private
*
lp
=
netdev_priv
(
dev
);
struct
pcnet32_private
*
lp
=
netdev_priv
(
dev
);
...
...
drivers/net/phy/Kconfig
浏览文件 @
7bece815
...
@@ -5,7 +5,7 @@
...
@@ -5,7 +5,7 @@
menuconfig PHYLIB
menuconfig PHYLIB
tristate "PHY Device support and infrastructure"
tristate "PHY Device support and infrastructure"
depends on !S390
depends on !S390
depends on NET_ETHERNET
&& (BROKEN || !S390)
depends on NET_ETHERNET
help
help
Ethernet controllers are usually attached to PHY
Ethernet controllers are usually attached to PHY
devices. This option provides infrastructure for
devices. This option provides infrastructure for
...
...
drivers/net/phy/phy_device.c
浏览文件 @
7bece815
...
@@ -207,6 +207,7 @@ int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
...
@@ -207,6 +207,7 @@ int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
return
0
;
return
0
;
}
}
EXPORT_SYMBOL
(
get_phy_id
);
/**
/**
* get_phy_device - reads the specified PHY device and returns its @phy_device struct
* get_phy_device - reads the specified PHY device and returns its @phy_device struct
...
...
drivers/net/s2io-regs.h
浏览文件 @
7bece815
...
@@ -250,7 +250,7 @@ struct XENA_dev_config {
...
@@ -250,7 +250,7 @@ struct XENA_dev_config {
u64
tx_mat0_n
[
0x8
];
u64
tx_mat0_n
[
0x8
];
#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8)
#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8)
u
8
unused_1
[
0x8
]
;
u
64
xmsi_mask_reg
;
u64
stat_byte_cnt
;
u64
stat_byte_cnt
;
#define STAT_BC(n) vBIT(n,4,12)
#define STAT_BC(n) vBIT(n,4,12)
...
...
drivers/net/s2io.c
浏览文件 @
7bece815
此差异已折叠。
点击以展开。
drivers/net/s2io.h
浏览文件 @
7bece815
...
@@ -706,7 +706,7 @@ struct ring_info {
...
@@ -706,7 +706,7 @@ struct ring_info {
/* per-ring buffer counter */
/* per-ring buffer counter */
u32
rx_bufs_left
;
u32
rx_bufs_left
;
#define MAX_LRO_SESSIONS
32
#define MAX_LRO_SESSIONS
32
struct
lro
lro0_n
[
MAX_LRO_SESSIONS
];
struct
lro
lro0_n
[
MAX_LRO_SESSIONS
];
u8
lro
;
u8
lro
;
...
@@ -725,6 +725,11 @@ struct ring_info {
...
@@ -725,6 +725,11 @@ struct ring_info {
/* copy of sp->pdev pointer */
/* copy of sp->pdev pointer */
struct
pci_dev
*
pdev
;
struct
pci_dev
*
pdev
;
/* Per ring napi struct */
struct
napi_struct
napi
;
unsigned
long
interrupt_count
;
/*
/*
* Place holders for the virtual and physical addresses of
* Place holders for the virtual and physical addresses of
* all the Rx Blocks
* all the Rx Blocks
...
@@ -841,7 +846,7 @@ struct usr_addr {
...
@@ -841,7 +846,7 @@ struct usr_addr {
* Structure to keep track of the MSI-X vectors and the corresponding
* Structure to keep track of the MSI-X vectors and the corresponding
* argument registered against each vector
* argument registered against each vector
*/
*/
#define MAX_REQUESTED_MSI_X
17
#define MAX_REQUESTED_MSI_X
9
struct
s2io_msix_entry
struct
s2io_msix_entry
{
{
u16
vector
;
u16
vector
;
...
@@ -849,8 +854,8 @@ struct s2io_msix_entry
...
@@ -849,8 +854,8 @@ struct s2io_msix_entry
void
*
arg
;
void
*
arg
;
u8
type
;
u8
type
;
#define
MSIX_FIFO_TYPE
1
#define
MSIX_ALARM_TYPE
1
#define
MSIX_RING_TYPE
2
#define
MSIX_RING_TYPE
2
u8
in_use
;
u8
in_use
;
#define MSIX_REGISTERED_SUCCESS 0xAA
#define MSIX_REGISTERED_SUCCESS 0xAA
...
@@ -877,7 +882,6 @@ struct s2io_nic {
...
@@ -877,7 +882,6 @@ struct s2io_nic {
*/
*/
int
pkts_to_process
;
int
pkts_to_process
;
struct
net_device
*
dev
;
struct
net_device
*
dev
;
struct
napi_struct
napi
;
struct
mac_info
mac_control
;
struct
mac_info
mac_control
;
struct
config_param
config
;
struct
config_param
config
;
struct
pci_dev
*
pdev
;
struct
pci_dev
*
pdev
;
...
@@ -948,6 +952,7 @@ struct s2io_nic {
...
@@ -948,6 +952,7 @@ struct s2io_nic {
*/
*/
u8
other_fifo_idx
;
u8
other_fifo_idx
;
struct
napi_struct
napi
;
/* after blink, the adapter must be restored with original
/* after blink, the adapter must be restored with original
* values.
* values.
*/
*/
...
@@ -962,6 +967,7 @@ struct s2io_nic {
...
@@ -962,6 +967,7 @@ struct s2io_nic {
unsigned
long
long
start_time
;
unsigned
long
long
start_time
;
struct
vlan_group
*
vlgrp
;
struct
vlan_group
*
vlgrp
;
#define MSIX_FLG 0xA5
#define MSIX_FLG 0xA5
int
num_entries
;
struct
msix_entry
*
entries
;
struct
msix_entry
*
entries
;
int
msi_detected
;
int
msi_detected
;
wait_queue_head_t
msi_wait
;
wait_queue_head_t
msi_wait
;
...
@@ -982,6 +988,7 @@ struct s2io_nic {
...
@@ -982,6 +988,7 @@ struct s2io_nic {
u16
lro_max_aggr_per_sess
;
u16
lro_max_aggr_per_sess
;
volatile
unsigned
long
state
;
volatile
unsigned
long
state
;
u64
general_int_mask
;
u64
general_int_mask
;
#define VPD_STRING_LEN 80
#define VPD_STRING_LEN 80
u8
product_name
[
VPD_STRING_LEN
];
u8
product_name
[
VPD_STRING_LEN
];
u8
serial_num
[
VPD_STRING_LEN
];
u8
serial_num
[
VPD_STRING_LEN
];
...
@@ -1103,7 +1110,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev);
...
@@ -1103,7 +1110,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev);
static
int
init_shared_mem
(
struct
s2io_nic
*
sp
);
static
int
init_shared_mem
(
struct
s2io_nic
*
sp
);
static
void
free_shared_mem
(
struct
s2io_nic
*
sp
);
static
void
free_shared_mem
(
struct
s2io_nic
*
sp
);
static
int
init_nic
(
struct
s2io_nic
*
nic
);
static
int
init_nic
(
struct
s2io_nic
*
nic
);
static
void
rx_intr_handler
(
struct
ring_info
*
ring_data
);
static
int
rx_intr_handler
(
struct
ring_info
*
ring_data
,
int
budget
);
static
void
tx_intr_handler
(
struct
fifo_info
*
fifo_data
);
static
void
tx_intr_handler
(
struct
fifo_info
*
fifo_data
);
static
void
s2io_handle_errors
(
void
*
dev_id
);
static
void
s2io_handle_errors
(
void
*
dev_id
);
...
@@ -1114,7 +1121,8 @@ static void s2io_set_multicast(struct net_device *dev);
...
@@ -1114,7 +1121,8 @@ static void s2io_set_multicast(struct net_device *dev);
static
int
rx_osm_handler
(
struct
ring_info
*
ring_data
,
struct
RxD_t
*
rxdp
);
static
int
rx_osm_handler
(
struct
ring_info
*
ring_data
,
struct
RxD_t
*
rxdp
);
static
void
s2io_link
(
struct
s2io_nic
*
sp
,
int
link
);
static
void
s2io_link
(
struct
s2io_nic
*
sp
,
int
link
);
static
void
s2io_reset
(
struct
s2io_nic
*
sp
);
static
void
s2io_reset
(
struct
s2io_nic
*
sp
);
static
int
s2io_poll
(
struct
napi_struct
*
napi
,
int
budget
);
static
int
s2io_poll_msix
(
struct
napi_struct
*
napi
,
int
budget
);
static
int
s2io_poll_inta
(
struct
napi_struct
*
napi
,
int
budget
);
static
void
s2io_init_pci
(
struct
s2io_nic
*
sp
);
static
void
s2io_init_pci
(
struct
s2io_nic
*
sp
);
static
int
do_s2io_prog_unicast
(
struct
net_device
*
dev
,
u8
*
addr
);
static
int
do_s2io_prog_unicast
(
struct
net_device
*
dev
,
u8
*
addr
);
static
void
s2io_alarm_handle
(
unsigned
long
data
);
static
void
s2io_alarm_handle
(
unsigned
long
data
);
...
...
drivers/net/sb1250-mac.c
浏览文件 @
7bece815
...
@@ -179,8 +179,7 @@ enum sbmac_state {
...
@@ -179,8 +179,7 @@ enum sbmac_state {
#define SBMAC_MAX_TXDESCR 256
#define SBMAC_MAX_TXDESCR 256
#define SBMAC_MAX_RXDESCR 256
#define SBMAC_MAX_RXDESCR 256
#define ETHER_ALIGN 2
#define ETHER_ADDR_LEN 6
#define ETHER_ADDR_LEN 6
#define ENET_PACKET_SIZE 1518
#define ENET_PACKET_SIZE 1518
/*#define ENET_PACKET_SIZE 9216 */
/*#define ENET_PACKET_SIZE 9216 */
...
@@ -262,8 +261,6 @@ struct sbmac_softc {
...
@@ -262,8 +261,6 @@ struct sbmac_softc {
spinlock_t
sbm_lock
;
/* spin lock */
spinlock_t
sbm_lock
;
/* spin lock */
int
sbm_devflags
;
/* current device flags */
int
sbm_devflags
;
/* current device flags */
int
sbm_buffersize
;
/*
/*
* Controller-specific things
* Controller-specific things
*/
*/
...
@@ -305,10 +302,11 @@ struct sbmac_softc {
...
@@ -305,10 +302,11 @@ struct sbmac_softc {
static
void
sbdma_initctx
(
struct
sbmacdma
*
d
,
struct
sbmac_softc
*
s
,
int
chan
,
static
void
sbdma_initctx
(
struct
sbmacdma
*
d
,
struct
sbmac_softc
*
s
,
int
chan
,
int
txrx
,
int
maxdescr
);
int
txrx
,
int
maxdescr
);
static
void
sbdma_channel_start
(
struct
sbmacdma
*
d
,
int
rxtx
);
static
void
sbdma_channel_start
(
struct
sbmacdma
*
d
,
int
rxtx
);
static
int
sbdma_add_rcvbuffer
(
struct
sbmacdma
*
d
,
struct
sk_buff
*
m
);
static
int
sbdma_add_rcvbuffer
(
struct
sbmac_softc
*
sc
,
struct
sbmacdma
*
d
,
struct
sk_buff
*
m
);
static
int
sbdma_add_txbuffer
(
struct
sbmacdma
*
d
,
struct
sk_buff
*
m
);
static
int
sbdma_add_txbuffer
(
struct
sbmacdma
*
d
,
struct
sk_buff
*
m
);
static
void
sbdma_emptyring
(
struct
sbmacdma
*
d
);
static
void
sbdma_emptyring
(
struct
sbmacdma
*
d
);
static
void
sbdma_fillring
(
struct
sbmacdma
*
d
);
static
void
sbdma_fillring
(
struct
sbmac
_softc
*
sc
,
struct
sbmac
dma
*
d
);
static
int
sbdma_rx_process
(
struct
sbmac_softc
*
sc
,
struct
sbmacdma
*
d
,
static
int
sbdma_rx_process
(
struct
sbmac_softc
*
sc
,
struct
sbmacdma
*
d
,
int
work_to_do
,
int
poll
);
int
work_to_do
,
int
poll
);
static
void
sbdma_tx_process
(
struct
sbmac_softc
*
sc
,
struct
sbmacdma
*
d
,
static
void
sbdma_tx_process
(
struct
sbmac_softc
*
sc
,
struct
sbmacdma
*
d
,
...
@@ -777,16 +775,13 @@ static void sbdma_channel_stop(struct sbmacdma *d)
...
@@ -777,16 +775,13 @@ static void sbdma_channel_stop(struct sbmacdma *d)
d
->
sbdma_remptr
=
NULL
;
d
->
sbdma_remptr
=
NULL
;
}
}
static
void
sbdma_align_skb
(
struct
sk_buff
*
skb
,
int
power2
,
int
offset
)
static
inline
void
sbdma_align_skb
(
struct
sk_buff
*
skb
,
unsigned
int
power2
,
unsigned
int
offset
)
{
{
unsigned
long
addr
;
unsigned
char
*
addr
=
skb
->
data
;
unsigned
long
newaddr
;
unsigned
char
*
newaddr
=
PTR_ALIGN
(
addr
,
power2
);
addr
=
(
unsigned
long
)
skb
->
data
;
newaddr
=
(
addr
+
power2
-
1
)
&
~
(
power2
-
1
);
skb_reserve
(
skb
,
newaddr
-
addr
+
offset
);
skb_reserve
(
skb
,
newaddr
-
addr
+
offset
);
}
}
...
@@ -797,7 +792,8 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
...
@@ -797,7 +792,8 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
* this queues a buffer for inbound packets.
* this queues a buffer for inbound packets.
*
*
* Input parameters:
* Input parameters:
* d - DMA channel descriptor
* sc - softc structure
* d - DMA channel descriptor
* sb - sk_buff to add, or NULL if we should allocate one
* sb - sk_buff to add, or NULL if we should allocate one
*
*
* Return value:
* Return value:
...
@@ -806,8 +802,10 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
...
@@ -806,8 +802,10 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
********************************************************************* */
********************************************************************* */
static
int
sbdma_add_rcvbuffer
(
struct
sbmacdma
*
d
,
struct
sk_buff
*
sb
)
static
int
sbdma_add_rcvbuffer
(
struct
sbmac_softc
*
sc
,
struct
sbmacdma
*
d
,
struct
sk_buff
*
sb
)
{
{
struct
net_device
*
dev
=
sc
->
sbm_dev
;
struct
sbdmadscr
*
dsc
;
struct
sbdmadscr
*
dsc
;
struct
sbdmadscr
*
nextdsc
;
struct
sbdmadscr
*
nextdsc
;
struct
sk_buff
*
sb_new
=
NULL
;
struct
sk_buff
*
sb_new
=
NULL
;
...
@@ -848,14 +846,16 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb)
...
@@ -848,14 +846,16 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb)
*/
*/
if
(
sb
==
NULL
)
{
if
(
sb
==
NULL
)
{
sb_new
=
dev_alloc_skb
(
ENET_PACKET_SIZE
+
SMP_CACHE_BYTES
*
2
+
ETHER_ALIGN
);
sb_new
=
netdev_alloc_skb
(
dev
,
ENET_PACKET_SIZE
+
SMP_CACHE_BYTES
*
2
+
NET_IP_ALIGN
);
if
(
sb_new
==
NULL
)
{
if
(
sb_new
==
NULL
)
{
pr_info
(
"%s: sk_buff allocation failed
\n
"
,
pr_info
(
"%s: sk_buff allocation failed
\n
"
,
d
->
sbdma_eth
->
sbm_dev
->
name
);
d
->
sbdma_eth
->
sbm_dev
->
name
);
return
-
ENOBUFS
;
return
-
ENOBUFS
;
}
}
sbdma_align_skb
(
sb_new
,
SMP_CACHE_BYTES
,
ETHER
_ALIGN
);
sbdma_align_skb
(
sb_new
,
SMP_CACHE_BYTES
,
NET_IP
_ALIGN
);
}
}
else
{
else
{
sb_new
=
sb
;
sb_new
=
sb
;
...
@@ -874,10 +874,10 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb)
...
@@ -874,10 +874,10 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb)
* Do not interrupt per DMA transfer.
* Do not interrupt per DMA transfer.
*/
*/
dsc
->
dscr_a
=
virt_to_phys
(
sb_new
->
data
)
|
dsc
->
dscr_a
=
virt_to_phys
(
sb_new
->
data
)
|
V_DMA_DSCRA_A_SIZE
(
NUMCACHEBLKS
(
pktsize
+
ETHER
_ALIGN
))
|
0
;
V_DMA_DSCRA_A_SIZE
(
NUMCACHEBLKS
(
pktsize
+
NET_IP
_ALIGN
))
|
0
;
#else
#else
dsc
->
dscr_a
=
virt_to_phys
(
sb_new
->
data
)
|
dsc
->
dscr_a
=
virt_to_phys
(
sb_new
->
data
)
|
V_DMA_DSCRA_A_SIZE
(
NUMCACHEBLKS
(
pktsize
+
ETHER
_ALIGN
))
|
V_DMA_DSCRA_A_SIZE
(
NUMCACHEBLKS
(
pktsize
+
NET_IP
_ALIGN
))
|
M_DMA_DSCRA_INTERRUPT
;
M_DMA_DSCRA_INTERRUPT
;
#endif
#endif
...
@@ -1032,18 +1032,19 @@ static void sbdma_emptyring(struct sbmacdma *d)
...
@@ -1032,18 +1032,19 @@ static void sbdma_emptyring(struct sbmacdma *d)
* with sk_buffs
* with sk_buffs
*
*
* Input parameters:
* Input parameters:
* d - DMA channel
* sc - softc structure
* d - DMA channel
*
*
* Return value:
* Return value:
* nothing
* nothing
********************************************************************* */
********************************************************************* */
static
void
sbdma_fillring
(
struct
sbmacdma
*
d
)
static
void
sbdma_fillring
(
struct
sbmac
_softc
*
sc
,
struct
sbmac
dma
*
d
)
{
{
int
idx
;
int
idx
;
for
(
idx
=
0
;
idx
<
SBMAC_MAX_RXDESCR
-
1
;
idx
++
)
{
for
(
idx
=
0
;
idx
<
SBMAC_MAX_RXDESCR
-
1
;
idx
++
)
{
if
(
sbdma_add_rcvbuffer
(
d
,
NULL
)
!=
0
)
if
(
sbdma_add_rcvbuffer
(
sc
,
d
,
NULL
)
!=
0
)
break
;
break
;
}
}
}
}
...
@@ -1159,10 +1160,11 @@ static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
...
@@ -1159,10 +1160,11 @@ static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
* packet and put it right back on the receive ring.
* packet and put it right back on the receive ring.
*/
*/
if
(
unlikely
(
sbdma_add_rcvbuffer
(
d
,
NULL
)
==
if
(
unlikely
(
sbdma_add_rcvbuffer
(
sc
,
d
,
NULL
)
==
-
ENOBUFS
))
{
-
ENOBUFS
))
{
dev
->
stats
.
rx_dropped
++
;
dev
->
stats
.
rx_dropped
++
;
sbdma_add_rcvbuffer
(
d
,
sb
);
/* re-add old buffer */
/* Re-add old buffer */
sbdma_add_rcvbuffer
(
sc
,
d
,
sb
);
/* No point in continuing at the moment */
/* No point in continuing at the moment */
printk
(
KERN_ERR
"dropped packet (1)
\n
"
);
printk
(
KERN_ERR
"dropped packet (1)
\n
"
);
d
->
sbdma_remptr
=
SBDMA_NEXTBUF
(
d
,
sbdma_remptr
);
d
->
sbdma_remptr
=
SBDMA_NEXTBUF
(
d
,
sbdma_remptr
);
...
@@ -1212,7 +1214,7 @@ static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
...
@@ -1212,7 +1214,7 @@ static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
* put it back on the receive ring.
* put it back on the receive ring.
*/
*/
dev
->
stats
.
rx_errors
++
;
dev
->
stats
.
rx_errors
++
;
sbdma_add_rcvbuffer
(
d
,
sb
);
sbdma_add_rcvbuffer
(
sc
,
d
,
sb
);
}
}
...
@@ -1570,7 +1572,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
...
@@ -1570,7 +1572,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
* Fill the receive ring
* Fill the receive ring
*/
*/
sbdma_fillring
(
&
(
s
->
sbm_rxdma
));
sbdma_fillring
(
s
,
&
(
s
->
sbm_rxdma
));
/*
/*
* Turn on the rest of the bits in the enable register
* Turn on the rest of the bits in the enable register
...
@@ -2312,13 +2314,6 @@ static int sbmac_init(struct platform_device *pldev, long long base)
...
@@ -2312,13 +2314,6 @@ static int sbmac_init(struct platform_device *pldev, long long base)
dev
->
dev_addr
[
i
]
=
eaddr
[
i
];
dev
->
dev_addr
[
i
]
=
eaddr
[
i
];
}
}
/*
* Init packet size
*/
sc
->
sbm_buffersize
=
ENET_PACKET_SIZE
+
SMP_CACHE_BYTES
*
2
+
ETHER_ALIGN
;
/*
/*
* Initialize context (get pointers to registers and stuff), then
* Initialize context (get pointers to registers and stuff), then
* allocate the memory for the descriptor tables.
* allocate the memory for the descriptor tables.
...
...
drivers/net/sc92031.c
浏览文件 @
7bece815
...
@@ -953,9 +953,6 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
...
@@ -953,9 +953,6 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned
entry
;
unsigned
entry
;
u32
tx_status
;
u32
tx_status
;
if
(
skb_padto
(
skb
,
ETH_ZLEN
))
return
NETDEV_TX_OK
;
if
(
unlikely
(
skb
->
len
>
TX_BUF_SIZE
))
{
if
(
unlikely
(
skb
->
len
>
TX_BUF_SIZE
))
{
dev
->
stats
.
tx_dropped
++
;
dev
->
stats
.
tx_dropped
++
;
goto
out
;
goto
out
;
...
@@ -975,6 +972,11 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
...
@@ -975,6 +972,11 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_copy_and_csum_dev
(
skb
,
priv
->
tx_bufs
+
entry
*
TX_BUF_SIZE
);
skb_copy_and_csum_dev
(
skb
,
priv
->
tx_bufs
+
entry
*
TX_BUF_SIZE
);
len
=
skb
->
len
;
len
=
skb
->
len
;
if
(
unlikely
(
len
<
ETH_ZLEN
))
{
memset
(
priv
->
tx_bufs
+
entry
*
TX_BUF_SIZE
+
len
,
0
,
ETH_ZLEN
-
len
);
len
=
ETH_ZLEN
;
}
wmb
();
wmb
();
...
...
drivers/net/sfc/bitfield.h
浏览文件 @
7bece815
...
@@ -483,7 +483,7 @@ typedef union efx_oword {
...
@@ -483,7 +483,7 @@ typedef union efx_oword {
#endif
#endif
#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
if (
FALCON_REV
(efx) >= FALCON_REV_B0) { \
if (
falcon_rev
(efx) >= FALCON_REV_B0) { \
EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
} else { \
} else { \
EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
...
@@ -491,7 +491,7 @@ typedef union efx_oword {
...
@@ -491,7 +491,7 @@ typedef union efx_oword {
} while (0)
} while (0)
#define EFX_QWORD_FIELD_VER(efx, qword, field) \
#define EFX_QWORD_FIELD_VER(efx, qword, field) \
(
FALCON_REV
(efx) >= FALCON_REV_B0 ? \
(
falcon_rev
(efx) >= FALCON_REV_B0 ? \
EFX_QWORD_FIELD((qword), field##_B0) : \
EFX_QWORD_FIELD((qword), field##_B0) : \
EFX_QWORD_FIELD((qword), field##_A1))
EFX_QWORD_FIELD((qword), field##_A1))
...
@@ -501,8 +501,5 @@ typedef union efx_oword {
...
@@ -501,8 +501,5 @@ typedef union efx_oword {
#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
#define EFX_DMA_TYPE_WIDTH(width) \
#define EFX_DMA_TYPE_WIDTH(width) \
(((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
(((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
#define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \
~((u64) 0) : ~((u32) 0))
#define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK)
#endif
/* EFX_BITFIELD_H */
#endif
/* EFX_BITFIELD_H */
drivers/net/sfc/boards.c
浏览文件 @
7bece815
...
@@ -27,10 +27,8 @@ static void blink_led_timer(unsigned long context)
...
@@ -27,10 +27,8 @@ static void blink_led_timer(unsigned long context)
struct
efx_blinker
*
bl
=
&
efx
->
board_info
.
blinker
;
struct
efx_blinker
*
bl
=
&
efx
->
board_info
.
blinker
;
efx
->
board_info
.
set_fault_led
(
efx
,
bl
->
state
);
efx
->
board_info
.
set_fault_led
(
efx
,
bl
->
state
);
bl
->
state
=
!
bl
->
state
;
bl
->
state
=
!
bl
->
state
;
if
(
bl
->
resubmit
)
{
if
(
bl
->
resubmit
)
bl
->
timer
.
expires
=
jiffies
+
BLINK_INTERVAL
;
mod_timer
(
&
bl
->
timer
,
jiffies
+
BLINK_INTERVAL
);
add_timer
(
&
bl
->
timer
);
}
}
}
static
void
board_blink
(
struct
efx_nic
*
efx
,
int
blink
)
static
void
board_blink
(
struct
efx_nic
*
efx
,
int
blink
)
...
@@ -44,8 +42,7 @@ static void board_blink(struct efx_nic *efx, int blink)
...
@@ -44,8 +42,7 @@ static void board_blink(struct efx_nic *efx, int blink)
blinker
->
state
=
0
;
blinker
->
state
=
0
;
setup_timer
(
&
blinker
->
timer
,
blink_led_timer
,
setup_timer
(
&
blinker
->
timer
,
blink_led_timer
,
(
unsigned
long
)
efx
);
(
unsigned
long
)
efx
);
blinker
->
timer
.
expires
=
jiffies
+
BLINK_INTERVAL
;
mod_timer
(
&
blinker
->
timer
,
jiffies
+
BLINK_INTERVAL
);
add_timer
(
&
blinker
->
timer
);
}
else
{
}
else
{
blinker
->
resubmit
=
0
;
blinker
->
resubmit
=
0
;
if
(
blinker
->
timer
.
function
)
if
(
blinker
->
timer
.
function
)
...
...
drivers/net/sfc/efx.c
浏览文件 @
7bece815
...
@@ -199,11 +199,12 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
...
@@ -199,11 +199,12 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
*/
*/
static
inline
void
efx_channel_processed
(
struct
efx_channel
*
channel
)
static
inline
void
efx_channel_processed
(
struct
efx_channel
*
channel
)
{
{
/*
Write to EVQ_RPTR_REG. If a new event arrived in a race
/*
The interrupt handler for this channel may set work_pending
*
with finishing processing, a new interrupt will be raised.
*
as soon as we acknowledge the events we've seen. Make sure
*/
*
it's cleared before then. *
/
channel
->
work_pending
=
0
;
channel
->
work_pending
=
0
;
smp_wmb
();
/* Ensure channel updated before any new interrupt. */
smp_wmb
();
falcon_eventq_read_ack
(
channel
);
falcon_eventq_read_ack
(
channel
);
}
}
...
@@ -265,7 +266,7 @@ void efx_process_channel_now(struct efx_channel *channel)
...
@@ -265,7 +266,7 @@ void efx_process_channel_now(struct efx_channel *channel)
napi_disable
(
&
channel
->
napi_str
);
napi_disable
(
&
channel
->
napi_str
);
/* Poll the channel */
/* Poll the channel */
(
void
)
efx_process_channel
(
channel
,
efx
->
type
->
evq_size
);
efx_process_channel
(
channel
,
efx
->
type
->
evq_size
);
/* Ack the eventq. This may cause an interrupt to be generated
/* Ack the eventq. This may cause an interrupt to be generated
* when they are reenabled */
* when they are reenabled */
...
@@ -317,26 +318,6 @@ static void efx_remove_eventq(struct efx_channel *channel)
...
@@ -317,26 +318,6 @@ static void efx_remove_eventq(struct efx_channel *channel)
*
*
*************************************************************************/
*************************************************************************/
/* Setup per-NIC RX buffer parameters.
* Calculate the rx buffer allocation parameters required to support
* the current MTU, including padding for header alignment and overruns.
*/
static
void
efx_calc_rx_buffer_params
(
struct
efx_nic
*
efx
)
{
unsigned
int
order
,
len
;
len
=
(
max
(
EFX_PAGE_IP_ALIGN
,
NET_IP_ALIGN
)
+
EFX_MAX_FRAME_LEN
(
efx
->
net_dev
->
mtu
)
+
efx
->
type
->
rx_buffer_padding
);
/* Calculate page-order */
for
(
order
=
0
;
((
1u
<<
order
)
*
PAGE_SIZE
)
<
len
;
++
order
)
;
efx
->
rx_buffer_len
=
len
;
efx
->
rx_buffer_order
=
order
;
}
static
int
efx_probe_channel
(
struct
efx_channel
*
channel
)
static
int
efx_probe_channel
(
struct
efx_channel
*
channel
)
{
{
struct
efx_tx_queue
*
tx_queue
;
struct
efx_tx_queue
*
tx_queue
;
...
@@ -387,7 +368,14 @@ static int efx_init_channels(struct efx_nic *efx)
...
@@ -387,7 +368,14 @@ static int efx_init_channels(struct efx_nic *efx)
struct
efx_channel
*
channel
;
struct
efx_channel
*
channel
;
int
rc
=
0
;
int
rc
=
0
;
efx_calc_rx_buffer_params
(
efx
);
/* Calculate the rx buffer allocation parameters required to
* support the current MTU, including padding for header
* alignment and overruns.
*/
efx
->
rx_buffer_len
=
(
max
(
EFX_PAGE_IP_ALIGN
,
NET_IP_ALIGN
)
+
EFX_MAX_FRAME_LEN
(
efx
->
net_dev
->
mtu
)
+
efx
->
type
->
rx_buffer_padding
);
efx
->
rx_buffer_order
=
get_order
(
efx
->
rx_buffer_len
);
/* Initialise the channels */
/* Initialise the channels */
efx_for_each_channel
(
channel
,
efx
)
{
efx_for_each_channel
(
channel
,
efx
)
{
...
@@ -440,9 +428,12 @@ static void efx_start_channel(struct efx_channel *channel)
...
@@ -440,9 +428,12 @@ static void efx_start_channel(struct efx_channel *channel)
netif_napi_add
(
channel
->
napi_dev
,
&
channel
->
napi_str
,
netif_napi_add
(
channel
->
napi_dev
,
&
channel
->
napi_str
,
efx_poll
,
napi_weight
);
efx_poll
,
napi_weight
);
/* The interrupt handler for this channel may set work_pending
* as soon as we enable it. Make sure it's cleared before
* then. Similarly, make sure it sees the enabled flag set. */
channel
->
work_pending
=
0
;
channel
->
work_pending
=
0
;
channel
->
enabled
=
1
;
channel
->
enabled
=
1
;
smp_wmb
();
/* ensure channel updated before first interrupt */
smp_wmb
();
napi_enable
(
&
channel
->
napi_str
);
napi_enable
(
&
channel
->
napi_str
);
...
@@ -704,7 +695,7 @@ static void efx_stop_port(struct efx_nic *efx)
...
@@ -704,7 +695,7 @@ static void efx_stop_port(struct efx_nic *efx)
mutex_unlock
(
&
efx
->
mac_lock
);
mutex_unlock
(
&
efx
->
mac_lock
);
/* Serialise against efx_set_multicast_list() */
/* Serialise against efx_set_multicast_list() */
if
(
NET_DEV_REGISTERED
(
efx
))
{
if
(
efx_dev_registered
(
efx
))
{
netif_tx_lock_bh
(
efx
->
net_dev
);
netif_tx_lock_bh
(
efx
->
net_dev
);
netif_tx_unlock_bh
(
efx
->
net_dev
);
netif_tx_unlock_bh
(
efx
->
net_dev
);
}
}
...
@@ -791,22 +782,23 @@ static int efx_init_io(struct efx_nic *efx)
...
@@ -791,22 +782,23 @@ static int efx_init_io(struct efx_nic *efx)
efx
->
membase
=
ioremap_nocache
(
efx
->
membase_phys
,
efx
->
membase
=
ioremap_nocache
(
efx
->
membase_phys
,
efx
->
type
->
mem_map_size
);
efx
->
type
->
mem_map_size
);
if
(
!
efx
->
membase
)
{
if
(
!
efx
->
membase
)
{
EFX_ERR
(
efx
,
"could not map memory BAR %d at %lx+%x
\n
"
,
EFX_ERR
(
efx
,
"could not map memory BAR %d at %llx+%x
\n
"
,
efx
->
type
->
mem_bar
,
efx
->
membase_phys
,
efx
->
type
->
mem_bar
,
(
unsigned
long
long
)
efx
->
membase_phys
,
efx
->
type
->
mem_map_size
);
efx
->
type
->
mem_map_size
);
rc
=
-
ENOMEM
;
rc
=
-
ENOMEM
;
goto
fail4
;
goto
fail4
;
}
}
EFX_LOG
(
efx
,
"memory BAR %u at %lx+%x (virtual %p)
\n
"
,
EFX_LOG
(
efx
,
"memory BAR %u at %l
l
x+%x (virtual %p)
\n
"
,
efx
->
type
->
mem_bar
,
efx
->
membase_phys
,
efx
->
type
->
mem_map_size
,
efx
->
type
->
mem_bar
,
(
unsigned
long
long
)
efx
->
membase_phys
,
efx
->
membase
);
efx
->
type
->
mem_map_size
,
efx
->
membase
);
return
0
;
return
0
;
fail4:
fail4:
release_mem_region
(
efx
->
membase_phys
,
efx
->
type
->
mem_map_size
);
release_mem_region
(
efx
->
membase_phys
,
efx
->
type
->
mem_map_size
);
fail3:
fail3:
efx
->
membase_phys
=
0
UL
;
efx
->
membase_phys
=
0
;
fail2:
fail2:
pci_disable_device
(
efx
->
pci_dev
);
pci_disable_device
(
efx
->
pci_dev
);
fail1:
fail1:
...
@@ -824,7 +816,7 @@ static void efx_fini_io(struct efx_nic *efx)
...
@@ -824,7 +816,7 @@ static void efx_fini_io(struct efx_nic *efx)
if
(
efx
->
membase_phys
)
{
if
(
efx
->
membase_phys
)
{
pci_release_region
(
efx
->
pci_dev
,
efx
->
type
->
mem_bar
);
pci_release_region
(
efx
->
pci_dev
,
efx
->
type
->
mem_bar
);
efx
->
membase_phys
=
0
UL
;
efx
->
membase_phys
=
0
;
}
}
pci_disable_device
(
efx
->
pci_dev
);
pci_disable_device
(
efx
->
pci_dev
);
...
@@ -1043,7 +1035,7 @@ static void efx_start_all(struct efx_nic *efx)
...
@@ -1043,7 +1035,7 @@ static void efx_start_all(struct efx_nic *efx)
return
;
return
;
if
((
efx
->
state
!=
STATE_RUNNING
)
&&
(
efx
->
state
!=
STATE_INIT
))
if
((
efx
->
state
!=
STATE_RUNNING
)
&&
(
efx
->
state
!=
STATE_INIT
))
return
;
return
;
if
(
NET_DEV_REGISTERED
(
efx
)
&&
!
netif_running
(
efx
->
net_dev
))
if
(
efx_dev_registered
(
efx
)
&&
!
netif_running
(
efx
->
net_dev
))
return
;
return
;
/* Mark the port as enabled so port reconfigurations can start, then
/* Mark the port as enabled so port reconfigurations can start, then
...
@@ -1073,9 +1065,8 @@ static void efx_flush_all(struct efx_nic *efx)
...
@@ -1073,9 +1065,8 @@ static void efx_flush_all(struct efx_nic *efx)
cancel_delayed_work_sync
(
&
efx
->
monitor_work
);
cancel_delayed_work_sync
(
&
efx
->
monitor_work
);
/* Ensure that all RX slow refills are complete. */
/* Ensure that all RX slow refills are complete. */
efx_for_each_rx_queue
(
rx_queue
,
efx
)
{
efx_for_each_rx_queue
(
rx_queue
,
efx
)
cancel_delayed_work_sync
(
&
rx_queue
->
work
);
cancel_delayed_work_sync
(
&
rx_queue
->
work
);
}
/* Stop scheduled port reconfigurations */
/* Stop scheduled port reconfigurations */
cancel_work_sync
(
&
efx
->
reconfigure_work
);
cancel_work_sync
(
&
efx
->
reconfigure_work
);
...
@@ -1101,9 +1092,10 @@ static void efx_stop_all(struct efx_nic *efx)
...
@@ -1101,9 +1092,10 @@ static void efx_stop_all(struct efx_nic *efx)
falcon_disable_interrupts
(
efx
);
falcon_disable_interrupts
(
efx
);
if
(
efx
->
legacy_irq
)
if
(
efx
->
legacy_irq
)
synchronize_irq
(
efx
->
legacy_irq
);
synchronize_irq
(
efx
->
legacy_irq
);
efx_for_each_channel_with_interrupt
(
channel
,
efx
)
efx_for_each_channel_with_interrupt
(
channel
,
efx
)
{
if
(
channel
->
irq
)
if
(
channel
->
irq
)
synchronize_irq
(
channel
->
irq
);
synchronize_irq
(
channel
->
irq
);
}
/* Stop all NAPI processing and synchronous rx refills */
/* Stop all NAPI processing and synchronous rx refills */
efx_for_each_channel
(
channel
,
efx
)
efx_for_each_channel
(
channel
,
efx
)
...
@@ -1125,7 +1117,7 @@ static void efx_stop_all(struct efx_nic *efx)
...
@@ -1125,7 +1117,7 @@ static void efx_stop_all(struct efx_nic *efx)
/* Stop the kernel transmit interface late, so the watchdog
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
* timer isn't ticking over the flush */
efx_stop_queue
(
efx
);
efx_stop_queue
(
efx
);
if
(
NET_DEV_REGISTERED
(
efx
))
{
if
(
efx_dev_registered
(
efx
))
{
netif_tx_lock_bh
(
efx
->
net_dev
);
netif_tx_lock_bh
(
efx
->
net_dev
);
netif_tx_unlock_bh
(
efx
->
net_dev
);
netif_tx_unlock_bh
(
efx
->
net_dev
);
}
}
...
@@ -1344,13 +1336,17 @@ static int efx_net_stop(struct net_device *net_dev)
...
@@ -1344,13 +1336,17 @@ static int efx_net_stop(struct net_device *net_dev)
return
0
;
return
0
;
}
}
/* Context: process, dev_base_lock held, non-blocking. */
/* Context: process, dev_base_lock
or RTNL
held, non-blocking. */
static
struct
net_device_stats
*
efx_net_stats
(
struct
net_device
*
net_dev
)
static
struct
net_device_stats
*
efx_net_stats
(
struct
net_device
*
net_dev
)
{
{
struct
efx_nic
*
efx
=
net_dev
->
priv
;
struct
efx_nic
*
efx
=
net_dev
->
priv
;
struct
efx_mac_stats
*
mac_stats
=
&
efx
->
mac_stats
;
struct
efx_mac_stats
*
mac_stats
=
&
efx
->
mac_stats
;
struct
net_device_stats
*
stats
=
&
net_dev
->
stats
;
struct
net_device_stats
*
stats
=
&
net_dev
->
stats
;
/* Update stats if possible, but do not wait if another thread
* is updating them (or resetting the NIC); slightly stale
* stats are acceptable.
*/
if
(
!
spin_trylock
(
&
efx
->
stats_lock
))
if
(
!
spin_trylock
(
&
efx
->
stats_lock
))
return
stats
;
return
stats
;
if
(
efx
->
state
==
STATE_RUNNING
)
{
if
(
efx
->
state
==
STATE_RUNNING
)
{
...
@@ -1494,7 +1490,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
...
@@ -1494,7 +1490,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
static
int
efx_netdev_event
(
struct
notifier_block
*
this
,
static
int
efx_netdev_event
(
struct
notifier_block
*
this
,
unsigned
long
event
,
void
*
ptr
)
unsigned
long
event
,
void
*
ptr
)
{
{
struct
net_device
*
net_dev
=
(
struct
net_device
*
)
ptr
;
struct
net_device
*
net_dev
=
ptr
;
if
(
net_dev
->
open
==
efx_net_open
&&
event
==
NETDEV_CHANGENAME
)
{
if
(
net_dev
->
open
==
efx_net_open
&&
event
==
NETDEV_CHANGENAME
)
{
struct
efx_nic
*
efx
=
net_dev
->
priv
;
struct
efx_nic
*
efx
=
net_dev
->
priv
;
...
@@ -1563,7 +1559,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
...
@@ -1563,7 +1559,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
efx_for_each_tx_queue
(
tx_queue
,
efx
)
efx_for_each_tx_queue
(
tx_queue
,
efx
)
efx_release_tx_buffers
(
tx_queue
);
efx_release_tx_buffers
(
tx_queue
);
if
(
NET_DEV_REGISTERED
(
efx
))
{
if
(
efx_dev_registered
(
efx
))
{
strlcpy
(
efx
->
name
,
pci_name
(
efx
->
pci_dev
),
sizeof
(
efx
->
name
));
strlcpy
(
efx
->
name
,
pci_name
(
efx
->
pci_dev
),
sizeof
(
efx
->
name
));
unregister_netdev
(
efx
->
net_dev
);
unregister_netdev
(
efx
->
net_dev
);
}
}
...
@@ -1688,7 +1684,7 @@ static int efx_reset(struct efx_nic *efx)
...
@@ -1688,7 +1684,7 @@ static int efx_reset(struct efx_nic *efx)
if
(
method
==
RESET_TYPE_DISABLE
)
{
if
(
method
==
RESET_TYPE_DISABLE
)
{
/* Reinitialise the device anyway so the driver unload sequence
/* Reinitialise the device anyway so the driver unload sequence
* can talk to the external SRAM */
* can talk to the external SRAM */
(
void
)
falcon_init_nic
(
efx
);
falcon_init_nic
(
efx
);
rc
=
-
EIO
;
rc
=
-
EIO
;
goto
fail4
;
goto
fail4
;
}
}
...
...
drivers/net/sfc/falcon.c
浏览文件 @
7bece815
...
@@ -116,17 +116,8 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
...
@@ -116,17 +116,8 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
**************************************************************************
**************************************************************************
*/
*/
/* DMA address mask (up to 46-bit, avoiding compiler warnings)
/* DMA address mask */
*
#define FALCON_DMA_MASK DMA_BIT_MASK(46)
* Note that it is possible to have a platform with 64-bit longs and
* 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the
* platform DMA mask.
*/
#if BITS_PER_LONG == 64
#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL)
#else
#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL)
#endif
/* TX DMA length mask (13-bit) */
/* TX DMA length mask (13-bit) */
#define FALCON_TX_DMA_MASK (4096 - 1)
#define FALCON_TX_DMA_MASK (4096 - 1)
...
@@ -145,7 +136,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
...
@@ -145,7 +136,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
#define PCI_EXP_LNKSTA_LNK_WID_LBN 4
#define PCI_EXP_LNKSTA_LNK_WID_LBN 4
#define FALCON_IS_DUAL_FUNC(efx) \
#define FALCON_IS_DUAL_FUNC(efx) \
(
FALCON_REV
(efx) < FALCON_REV_B0)
(
falcon_rev
(efx) < FALCON_REV_B0)
/**************************************************************************
/**************************************************************************
*
*
...
@@ -465,7 +456,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
...
@@ -465,7 +456,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
TX_DESCQ_TYPE
,
0
,
TX_DESCQ_TYPE
,
0
,
TX_NON_IP_DROP_DIS_B0
,
1
);
TX_NON_IP_DROP_DIS_B0
,
1
);
if
(
FALCON_REV
(
efx
)
>=
FALCON_REV_B0
)
{
if
(
falcon_rev
(
efx
)
>=
FALCON_REV_B0
)
{
int
csum
=
!
(
efx
->
net_dev
->
features
&
NETIF_F_IP_CSUM
);
int
csum
=
!
(
efx
->
net_dev
->
features
&
NETIF_F_IP_CSUM
);
EFX_SET_OWORD_FIELD
(
tx_desc_ptr
,
TX_IP_CHKSM_DIS_B0
,
csum
);
EFX_SET_OWORD_FIELD
(
tx_desc_ptr
,
TX_IP_CHKSM_DIS_B0
,
csum
);
EFX_SET_OWORD_FIELD
(
tx_desc_ptr
,
TX_TCP_CHKSM_DIS_B0
,
csum
);
EFX_SET_OWORD_FIELD
(
tx_desc_ptr
,
TX_TCP_CHKSM_DIS_B0
,
csum
);
...
@@ -474,7 +465,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
...
@@ -474,7 +465,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
falcon_write_table
(
efx
,
&
tx_desc_ptr
,
efx
->
type
->
txd_ptr_tbl_base
,
falcon_write_table
(
efx
,
&
tx_desc_ptr
,
efx
->
type
->
txd_ptr_tbl_base
,
tx_queue
->
queue
);
tx_queue
->
queue
);
if
(
FALCON_REV
(
efx
)
<
FALCON_REV_B0
)
{
if
(
falcon_rev
(
efx
)
<
FALCON_REV_B0
)
{
efx_oword_t
reg
;
efx_oword_t
reg
;
BUG_ON
(
tx_queue
->
queue
>=
128
);
/* HW limit */
BUG_ON
(
tx_queue
->
queue
>=
128
);
/* HW limit */
...
@@ -635,7 +626,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue)
...
@@ -635,7 +626,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue)
efx_oword_t
rx_desc_ptr
;
efx_oword_t
rx_desc_ptr
;
struct
efx_nic
*
efx
=
rx_queue
->
efx
;
struct
efx_nic
*
efx
=
rx_queue
->
efx
;
int
rc
;
int
rc
;
int
is_b0
=
FALCON_REV
(
efx
)
>=
FALCON_REV_B0
;
int
is_b0
=
falcon_rev
(
efx
)
>=
FALCON_REV_B0
;
int
iscsi_digest_en
=
is_b0
;
int
iscsi_digest_en
=
is_b0
;
EFX_LOG
(
efx
,
"RX queue %d ring in special buffers %d-%d
\n
"
,
EFX_LOG
(
efx
,
"RX queue %d ring in special buffers %d-%d
\n
"
,
...
@@ -822,10 +813,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel,
...
@@ -822,10 +813,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel,
tx_ev_q_label
=
EFX_QWORD_FIELD
(
*
event
,
TX_EV_Q_LABEL
);
tx_ev_q_label
=
EFX_QWORD_FIELD
(
*
event
,
TX_EV_Q_LABEL
);
tx_queue
=
&
efx
->
tx_queue
[
tx_ev_q_label
];
tx_queue
=
&
efx
->
tx_queue
[
tx_ev_q_label
];
if
(
NET_DEV_REGISTERED
(
efx
))
if
(
efx_dev_registered
(
efx
))
netif_tx_lock
(
efx
->
net_dev
);
netif_tx_lock
(
efx
->
net_dev
);
falcon_notify_tx_desc
(
tx_queue
);
falcon_notify_tx_desc
(
tx_queue
);
if
(
NET_DEV_REGISTERED
(
efx
))
if
(
efx_dev_registered
(
efx
))
netif_tx_unlock
(
efx
->
net_dev
);
netif_tx_unlock
(
efx
->
net_dev
);
}
else
if
(
EFX_QWORD_FIELD
(
*
event
,
TX_EV_PKT_ERR
)
&&
}
else
if
(
EFX_QWORD_FIELD
(
*
event
,
TX_EV_PKT_ERR
)
&&
EFX_WORKAROUND_10727
(
efx
))
{
EFX_WORKAROUND_10727
(
efx
))
{
...
@@ -884,7 +875,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
...
@@ -884,7 +875,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
RX_EV_TCP_UDP_CHKSUM_ERR
);
RX_EV_TCP_UDP_CHKSUM_ERR
);
rx_ev_eth_crc_err
=
EFX_QWORD_FIELD
(
*
event
,
RX_EV_ETH_CRC_ERR
);
rx_ev_eth_crc_err
=
EFX_QWORD_FIELD
(
*
event
,
RX_EV_ETH_CRC_ERR
);
rx_ev_frm_trunc
=
EFX_QWORD_FIELD
(
*
event
,
RX_EV_FRM_TRUNC
);
rx_ev_frm_trunc
=
EFX_QWORD_FIELD
(
*
event
,
RX_EV_FRM_TRUNC
);
rx_ev_drib_nib
=
((
FALCON_REV
(
efx
)
>=
FALCON_REV_B0
)
?
rx_ev_drib_nib
=
((
falcon_rev
(
efx
)
>=
FALCON_REV_B0
)
?
0
:
EFX_QWORD_FIELD
(
*
event
,
RX_EV_DRIB_NIB
));
0
:
EFX_QWORD_FIELD
(
*
event
,
RX_EV_DRIB_NIB
));
rx_ev_pause_frm
=
EFX_QWORD_FIELD
(
*
event
,
RX_EV_PAUSE_FRM_ERR
);
rx_ev_pause_frm
=
EFX_QWORD_FIELD
(
*
event
,
RX_EV_PAUSE_FRM_ERR
);
...
@@ -1065,7 +1056,7 @@ static void falcon_handle_global_event(struct efx_channel *channel,
...
@@ -1065,7 +1056,7 @@ static void falcon_handle_global_event(struct efx_channel *channel,
EFX_QWORD_FIELD
(
*
event
,
XG_PHY_INTR
))
EFX_QWORD_FIELD
(
*
event
,
XG_PHY_INTR
))
is_phy_event
=
1
;
is_phy_event
=
1
;
if
((
FALCON_REV
(
efx
)
>=
FALCON_REV_B0
)
&&
if
((
falcon_rev
(
efx
)
>=
FALCON_REV_B0
)
&&
EFX_OWORD_FIELD
(
*
event
,
XG_MNT_INTR_B0
))
EFX_OWORD_FIELD
(
*
event
,
XG_MNT_INTR_B0
))
is_phy_event
=
1
;
is_phy_event
=
1
;
...
@@ -1405,7 +1396,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
...
@@ -1405,7 +1396,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
static
irqreturn_t
falcon_fatal_interrupt
(
struct
efx_nic
*
efx
)
static
irqreturn_t
falcon_fatal_interrupt
(
struct
efx_nic
*
efx
)
{
{
struct
falcon_nic_data
*
nic_data
=
efx
->
nic_data
;
struct
falcon_nic_data
*
nic_data
=
efx
->
nic_data
;
efx_oword_t
*
int_ker
=
(
efx_oword_t
*
)
efx
->
irq_status
.
addr
;
efx_oword_t
*
int_ker
=
efx
->
irq_status
.
addr
;
efx_oword_t
fatal_intr
;
efx_oword_t
fatal_intr
;
int
error
,
mem_perr
;
int
error
,
mem_perr
;
static
int
n_int_errors
;
static
int
n_int_errors
;
...
@@ -1451,8 +1442,8 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
...
@@ -1451,8 +1442,8 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
*/
*/
static
irqreturn_t
falcon_legacy_interrupt_b0
(
int
irq
,
void
*
dev_id
)
static
irqreturn_t
falcon_legacy_interrupt_b0
(
int
irq
,
void
*
dev_id
)
{
{
struct
efx_nic
*
efx
=
(
struct
efx_nic
*
)
dev_id
;
struct
efx_nic
*
efx
=
dev_id
;
efx_oword_t
*
int_ker
=
(
efx_oword_t
*
)
efx
->
irq_status
.
addr
;
efx_oword_t
*
int_ker
=
efx
->
irq_status
.
addr
;
struct
efx_channel
*
channel
;
struct
efx_channel
*
channel
;
efx_dword_t
reg
;
efx_dword_t
reg
;
u32
queues
;
u32
queues
;
...
@@ -1489,8 +1480,8 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
...
@@ -1489,8 +1480,8 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
static
irqreturn_t
falcon_legacy_interrupt_a1
(
int
irq
,
void
*
dev_id
)
static
irqreturn_t
falcon_legacy_interrupt_a1
(
int
irq
,
void
*
dev_id
)
{
{
struct
efx_nic
*
efx
=
(
struct
efx_nic
*
)
dev_id
;
struct
efx_nic
*
efx
=
dev_id
;
efx_oword_t
*
int_ker
=
(
efx_oword_t
*
)
efx
->
irq_status
.
addr
;
efx_oword_t
*
int_ker
=
efx
->
irq_status
.
addr
;
struct
efx_channel
*
channel
;
struct
efx_channel
*
channel
;
int
syserr
;
int
syserr
;
int
queues
;
int
queues
;
...
@@ -1542,9 +1533,9 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
...
@@ -1542,9 +1533,9 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
*/
*/
static
irqreturn_t
falcon_msi_interrupt
(
int
irq
,
void
*
dev_id
)
static
irqreturn_t
falcon_msi_interrupt
(
int
irq
,
void
*
dev_id
)
{
{
struct
efx_channel
*
channel
=
(
struct
efx_channel
*
)
dev_id
;
struct
efx_channel
*
channel
=
dev_id
;
struct
efx_nic
*
efx
=
channel
->
efx
;
struct
efx_nic
*
efx
=
channel
->
efx
;
efx_oword_t
*
int_ker
=
(
efx_oword_t
*
)
efx
->
irq_status
.
addr
;
efx_oword_t
*
int_ker
=
efx
->
irq_status
.
addr
;
int
syserr
;
int
syserr
;
efx
->
last_irq_cpu
=
raw_smp_processor_id
();
efx
->
last_irq_cpu
=
raw_smp_processor_id
();
...
@@ -1572,7 +1563,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
...
@@ -1572,7 +1563,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
unsigned
long
offset
;
unsigned
long
offset
;
efx_dword_t
dword
;
efx_dword_t
dword
;
if
(
FALCON_REV
(
efx
)
<
FALCON_REV_B0
)
if
(
falcon_rev
(
efx
)
<
FALCON_REV_B0
)
return
;
return
;
for
(
offset
=
RX_RSS_INDIR_TBL_B0
;
for
(
offset
=
RX_RSS_INDIR_TBL_B0
;
...
@@ -1595,7 +1586,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
...
@@ -1595,7 +1586,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
if
(
!
EFX_INT_MODE_USE_MSI
(
efx
))
{
if
(
!
EFX_INT_MODE_USE_MSI
(
efx
))
{
irq_handler_t
handler
;
irq_handler_t
handler
;
if
(
FALCON_REV
(
efx
)
>=
FALCON_REV_B0
)
if
(
falcon_rev
(
efx
)
>=
FALCON_REV_B0
)
handler
=
falcon_legacy_interrupt_b0
;
handler
=
falcon_legacy_interrupt_b0
;
else
else
handler
=
falcon_legacy_interrupt_a1
;
handler
=
falcon_legacy_interrupt_a1
;
...
@@ -1636,12 +1627,13 @@ void falcon_fini_interrupt(struct efx_nic *efx)
...
@@ -1636,12 +1627,13 @@ void falcon_fini_interrupt(struct efx_nic *efx)
efx_oword_t
reg
;
efx_oword_t
reg
;
/* Disable MSI/MSI-X interrupts */
/* Disable MSI/MSI-X interrupts */
efx_for_each_channel_with_interrupt
(
channel
,
efx
)
efx_for_each_channel_with_interrupt
(
channel
,
efx
)
{
if
(
channel
->
irq
)
if
(
channel
->
irq
)
free_irq
(
channel
->
irq
,
channel
);
free_irq
(
channel
->
irq
,
channel
);
}
/* ACK legacy interrupt */
/* ACK legacy interrupt */
if
(
FALCON_REV
(
efx
)
>=
FALCON_REV_B0
)
if
(
falcon_rev
(
efx
)
>=
FALCON_REV_B0
)
falcon_read
(
efx
,
&
reg
,
INT_ISR0_B0
);
falcon_read
(
efx
,
&
reg
,
INT_ISR0_B0
);
else
else
falcon_irq_ack_a1
(
efx
);
falcon_irq_ack_a1
(
efx
);
...
@@ -1732,7 +1724,7 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
...
@@ -1732,7 +1724,7 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
efx_oword_t
temp
;
efx_oword_t
temp
;
int
count
;
int
count
;
if
((
FALCON_REV
(
efx
)
<
FALCON_REV_B0
)
||
if
((
falcon_rev
(
efx
)
<
FALCON_REV_B0
)
||
(
efx
->
loopback_mode
!=
LOOPBACK_NONE
))
(
efx
->
loopback_mode
!=
LOOPBACK_NONE
))
return
;
return
;
...
@@ -1785,7 +1777,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
...
@@ -1785,7 +1777,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
{
{
efx_oword_t
temp
;
efx_oword_t
temp
;
if
(
FALCON_REV
(
efx
)
<
FALCON_REV_B0
)
if
(
falcon_rev
(
efx
)
<
FALCON_REV_B0
)
return
;
return
;
/* Isolate the MAC -> RX */
/* Isolate the MAC -> RX */
...
@@ -1823,7 +1815,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
...
@@ -1823,7 +1815,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
MAC_SPEED
,
link_speed
);
MAC_SPEED
,
link_speed
);
/* On B0, MAC backpressure can be disabled and packets get
/* On B0, MAC backpressure can be disabled and packets get
* discarded. */
* discarded. */
if
(
FALCON_REV
(
efx
)
>=
FALCON_REV_B0
)
{
if
(
falcon_rev
(
efx
)
>=
FALCON_REV_B0
)
{
EFX_SET_OWORD_FIELD
(
reg
,
TXFIFO_DRAIN_EN_B0
,
EFX_SET_OWORD_FIELD
(
reg
,
TXFIFO_DRAIN_EN_B0
,
!
efx
->
link_up
);
!
efx
->
link_up
);
}
}
...
@@ -1841,7 +1833,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
...
@@ -1841,7 +1833,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
EFX_SET_OWORD_FIELD_VER
(
efx
,
reg
,
RX_XOFF_MAC_EN
,
tx_fc
);
EFX_SET_OWORD_FIELD_VER
(
efx
,
reg
,
RX_XOFF_MAC_EN
,
tx_fc
);
/* Unisolate the MAC -> RX */
/* Unisolate the MAC -> RX */
if
(
FALCON_REV
(
efx
)
>=
FALCON_REV_B0
)
if
(
falcon_rev
(
efx
)
>=
FALCON_REV_B0
)
EFX_SET_OWORD_FIELD
(
reg
,
RX_INGR_EN_B0
,
1
);
EFX_SET_OWORD_FIELD
(
reg
,
RX_INGR_EN_B0
,
1
);
falcon_write
(
efx
,
&
reg
,
RX_CFG_REG_KER
);
falcon_write
(
efx
,
&
reg
,
RX_CFG_REG_KER
);
}
}
...
@@ -1856,7 +1848,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
...
@@ -1856,7 +1848,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
return
0
;
return
0
;
/* Statistics fetch will fail if the MAC is in TX drain */
/* Statistics fetch will fail if the MAC is in TX drain */
if
(
FALCON_REV
(
efx
)
>=
FALCON_REV_B0
)
{
if
(
falcon_rev
(
efx
)
>=
FALCON_REV_B0
)
{
efx_oword_t
temp
;
efx_oword_t
temp
;
falcon_read
(
efx
,
&
temp
,
MAC0_CTRL_REG_KER
);
falcon_read
(
efx
,
&
temp
,
MAC0_CTRL_REG_KER
);
if
(
EFX_OWORD_FIELD
(
temp
,
TXFIFO_DRAIN_EN_B0
))
if
(
EFX_OWORD_FIELD
(
temp
,
TXFIFO_DRAIN_EN_B0
))
...
@@ -1940,7 +1932,7 @@ static int falcon_gmii_wait(struct efx_nic *efx)
...
@@ -1940,7 +1932,7 @@ static int falcon_gmii_wait(struct efx_nic *efx)
static
void
falcon_mdio_write
(
struct
net_device
*
net_dev
,
int
phy_id
,
static
void
falcon_mdio_write
(
struct
net_device
*
net_dev
,
int
phy_id
,
int
addr
,
int
value
)
int
addr
,
int
value
)
{
{
struct
efx_nic
*
efx
=
(
struct
efx_nic
*
)
net_dev
->
priv
;
struct
efx_nic
*
efx
=
net_dev
->
priv
;
unsigned
int
phy_id2
=
phy_id
&
FALCON_PHY_ID_ID_MASK
;
unsigned
int
phy_id2
=
phy_id
&
FALCON_PHY_ID_ID_MASK
;
efx_oword_t
reg
;
efx_oword_t
reg
;
...
@@ -2008,7 +2000,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
...
@@ -2008,7 +2000,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
* could be read, -1 will be returned. */
* could be read, -1 will be returned. */
static
int
falcon_mdio_read
(
struct
net_device
*
net_dev
,
int
phy_id
,
int
addr
)
static
int
falcon_mdio_read
(
struct
net_device
*
net_dev
,
int
phy_id
,
int
addr
)
{
{
struct
efx_nic
*
efx
=
(
struct
efx_nic
*
)
net_dev
->
priv
;
struct
efx_nic
*
efx
=
net_dev
->
priv
;
unsigned
int
phy_addr
=
phy_id
&
FALCON_PHY_ID_ID_MASK
;
unsigned
int
phy_addr
=
phy_id
&
FALCON_PHY_ID_ID_MASK
;
efx_oword_t
reg
;
efx_oword_t
reg
;
int
value
=
-
1
;
int
value
=
-
1
;
...
@@ -2113,7 +2105,7 @@ int falcon_probe_port(struct efx_nic *efx)
...
@@ -2113,7 +2105,7 @@ int falcon_probe_port(struct efx_nic *efx)
falcon_init_mdio
(
&
efx
->
mii
);
falcon_init_mdio
(
&
efx
->
mii
);
/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
if
(
FALCON_REV
(
efx
)
>=
FALCON_REV_B0
)
if
(
falcon_rev
(
efx
)
>=
FALCON_REV_B0
)
efx
->
flow_control
=
EFX_FC_RX
|
EFX_FC_TX
;
efx
->
flow_control
=
EFX_FC_RX
|
EFX_FC_TX
;
else
else
efx
->
flow_control
=
EFX_FC_RX
;
efx
->
flow_control
=
EFX_FC_RX
;
...
@@ -2373,7 +2365,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
...
@@ -2373,7 +2365,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
return
-
ENODEV
;
return
-
ENODEV
;
}
}
switch
(
FALCON_REV
(
efx
))
{
switch
(
falcon_rev
(
efx
))
{
case
FALCON_REV_A0
:
case
FALCON_REV_A0
:
case
0xff
:
case
0xff
:
EFX_ERR
(
efx
,
"Falcon rev A0 not supported
\n
"
);
EFX_ERR
(
efx
,
"Falcon rev A0 not supported
\n
"
);
...
@@ -2399,7 +2391,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
...
@@ -2399,7 +2391,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
break
;
break
;
default:
default:
EFX_ERR
(
efx
,
"Unknown Falcon rev %d
\n
"
,
FALCON_REV
(
efx
));
EFX_ERR
(
efx
,
"Unknown Falcon rev %d
\n
"
,
falcon_rev
(
efx
));
return
-
ENODEV
;
return
-
ENODEV
;
}
}
...
@@ -2419,7 +2411,7 @@ int falcon_probe_nic(struct efx_nic *efx)
...
@@ -2419,7 +2411,7 @@ int falcon_probe_nic(struct efx_nic *efx)
/* Allocate storage for hardware specific data */
/* Allocate storage for hardware specific data */
nic_data
=
kzalloc
(
sizeof
(
*
nic_data
),
GFP_KERNEL
);
nic_data
=
kzalloc
(
sizeof
(
*
nic_data
),
GFP_KERNEL
);
efx
->
nic_data
=
(
void
*
)
nic_data
;
efx
->
nic_data
=
nic_data
;
/* Determine number of ports etc. */
/* Determine number of ports etc. */
rc
=
falcon_probe_nic_variant
(
efx
);
rc
=
falcon_probe_nic_variant
(
efx
);
...
@@ -2489,13 +2481,10 @@ int falcon_probe_nic(struct efx_nic *efx)
...
@@ -2489,13 +2481,10 @@ int falcon_probe_nic(struct efx_nic *efx)
*/
*/
int
falcon_init_nic
(
struct
efx_nic
*
efx
)
int
falcon_init_nic
(
struct
efx_nic
*
efx
)
{
{
struct
falcon_nic_data
*
data
;
efx_oword_t
temp
;
efx_oword_t
temp
;
unsigned
thresh
;
unsigned
thresh
;
int
rc
;
int
rc
;
data
=
(
struct
falcon_nic_data
*
)
efx
->
nic_data
;
/* Set up the address region register. This is only needed
/* Set up the address region register. This is only needed
* for the B0 FPGA, but since we are just pushing in the
* for the B0 FPGA, but since we are just pushing in the
* reset defaults this may as well be unconditional. */
* reset defaults this may as well be unconditional. */
...
@@ -2562,7 +2551,7 @@ int falcon_init_nic(struct efx_nic *efx)
...
@@ -2562,7 +2551,7 @@ int falcon_init_nic(struct efx_nic *efx)
/* Set number of RSS queues for receive path. */
/* Set number of RSS queues for receive path. */
falcon_read
(
efx
,
&
temp
,
RX_FILTER_CTL_REG
);
falcon_read
(
efx
,
&
temp
,
RX_FILTER_CTL_REG
);
if
(
FALCON_REV
(
efx
)
>=
FALCON_REV_B0
)
if
(
falcon_rev
(
efx
)
>=
FALCON_REV_B0
)
EFX_SET_OWORD_FIELD
(
temp
,
NUM_KER
,
0
);
EFX_SET_OWORD_FIELD
(
temp
,
NUM_KER
,
0
);
else
else
EFX_SET_OWORD_FIELD
(
temp
,
NUM_KER
,
efx
->
rss_queues
-
1
);
EFX_SET_OWORD_FIELD
(
temp
,
NUM_KER
,
efx
->
rss_queues
-
1
);
...
@@ -2600,7 +2589,7 @@ int falcon_init_nic(struct efx_nic *efx)
...
@@ -2600,7 +2589,7 @@ int falcon_init_nic(struct efx_nic *efx)
/* Prefetch threshold 2 => fetch when descriptor cache half empty */
/* Prefetch threshold 2 => fetch when descriptor cache half empty */
EFX_SET_OWORD_FIELD
(
temp
,
TX_PREF_THRESHOLD
,
2
);
EFX_SET_OWORD_FIELD
(
temp
,
TX_PREF_THRESHOLD
,
2
);
/* Squash TX of packets of 16 bytes or less */
/* Squash TX of packets of 16 bytes or less */
if
(
FALCON_REV
(
efx
)
>=
FALCON_REV_B0
&&
EFX_WORKAROUND_9141
(
efx
))
if
(
falcon_rev
(
efx
)
>=
FALCON_REV_B0
&&
EFX_WORKAROUND_9141
(
efx
))
EFX_SET_OWORD_FIELD
(
temp
,
TX_FLUSH_MIN_LEN_EN_B0
,
1
);
EFX_SET_OWORD_FIELD
(
temp
,
TX_FLUSH_MIN_LEN_EN_B0
,
1
);
falcon_write
(
efx
,
&
temp
,
TX_CFG2_REG_KER
);
falcon_write
(
efx
,
&
temp
,
TX_CFG2_REG_KER
);
...
@@ -2617,7 +2606,7 @@ int falcon_init_nic(struct efx_nic *efx)
...
@@ -2617,7 +2606,7 @@ int falcon_init_nic(struct efx_nic *efx)
if
(
EFX_WORKAROUND_7575
(
efx
))
if
(
EFX_WORKAROUND_7575
(
efx
))
EFX_SET_OWORD_FIELD_VER
(
efx
,
temp
,
RX_USR_BUF_SIZE
,
EFX_SET_OWORD_FIELD_VER
(
efx
,
temp
,
RX_USR_BUF_SIZE
,
(
3
*
4096
)
/
32
);
(
3
*
4096
)
/
32
);
if
(
FALCON_REV
(
efx
)
>=
FALCON_REV_B0
)
if
(
falcon_rev
(
efx
)
>=
FALCON_REV_B0
)
EFX_SET_OWORD_FIELD
(
temp
,
RX_INGR_EN_B0
,
1
);
EFX_SET_OWORD_FIELD
(
temp
,
RX_INGR_EN_B0
,
1
);
/* RX FIFO flow control thresholds */
/* RX FIFO flow control thresholds */
...
@@ -2633,7 +2622,7 @@ int falcon_init_nic(struct efx_nic *efx)
...
@@ -2633,7 +2622,7 @@ int falcon_init_nic(struct efx_nic *efx)
falcon_write
(
efx
,
&
temp
,
RX_CFG_REG_KER
);
falcon_write
(
efx
,
&
temp
,
RX_CFG_REG_KER
);
/* Set destination of both TX and RX Flush events */
/* Set destination of both TX and RX Flush events */
if
(
FALCON_REV
(
efx
)
>=
FALCON_REV_B0
)
{
if
(
falcon_rev
(
efx
)
>=
FALCON_REV_B0
)
{
EFX_POPULATE_OWORD_1
(
temp
,
FLS_EVQ_ID
,
0
);
EFX_POPULATE_OWORD_1
(
temp
,
FLS_EVQ_ID
,
0
);
falcon_write
(
efx
,
&
temp
,
DP_CTRL_REG
);
falcon_write
(
efx
,
&
temp
,
DP_CTRL_REG
);
}
}
...
@@ -2647,7 +2636,7 @@ void falcon_remove_nic(struct efx_nic *efx)
...
@@ -2647,7 +2636,7 @@ void falcon_remove_nic(struct efx_nic *efx)
falcon_free_buffer
(
efx
,
&
efx
->
irq_status
);
falcon_free_buffer
(
efx
,
&
efx
->
irq_status
);
(
void
)
falcon_reset_hw
(
efx
,
RESET_TYPE_ALL
);
falcon_reset_hw
(
efx
,
RESET_TYPE_ALL
);
/* Release the second function after the reset */
/* Release the second function after the reset */
if
(
nic_data
->
pci_dev2
)
{
if
(
nic_data
->
pci_dev2
)
{
...
...
drivers/net/sfc/falcon.h
浏览文件 @
7bece815
...
@@ -23,7 +23,10 @@ enum falcon_revision {
...
@@ -23,7 +23,10 @@ enum falcon_revision {
FALCON_REV_B0
=
2
,
FALCON_REV_B0
=
2
,
};
};
#define FALCON_REV(efx) ((efx)->pci_dev->revision)
static
inline
int
falcon_rev
(
struct
efx_nic
*
efx
)
{
return
efx
->
pci_dev
->
revision
;
}
extern
struct
efx_nic_type
falcon_a_nic_type
;
extern
struct
efx_nic_type
falcon_a_nic_type
;
extern
struct
efx_nic_type
falcon_b_nic_type
;
extern
struct
efx_nic_type
falcon_b_nic_type
;
...
...
drivers/net/sfc/falcon_hwdefs.h
浏览文件 @
7bece815
...
@@ -1125,7 +1125,7 @@ struct falcon_nvconfig_board_v2 {
...
@@ -1125,7 +1125,7 @@ struct falcon_nvconfig_board_v2 {
u8
port1_phy_type
;
u8
port1_phy_type
;
__le16
asic_sub_revision
;
__le16
asic_sub_revision
;
__le16
board_revision
;
__le16
board_revision
;
}
__
attribute__
((
packed
))
;
}
__
packed
;
#define NVCONFIG_BASE 0x300
#define NVCONFIG_BASE 0x300
#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
...
@@ -1144,6 +1144,6 @@ struct falcon_nvconfig {
...
@@ -1144,6 +1144,6 @@ struct falcon_nvconfig {
__le16
board_struct_ver
;
__le16
board_struct_ver
;
__le16
board_checksum
;
__le16
board_checksum
;
struct
falcon_nvconfig_board_v2
board_v2
;
struct
falcon_nvconfig_board_v2
board_v2
;
}
__
attribute__
((
packed
))
;
}
__
packed
;
#endif
/* EFX_FALCON_HWDEFS_H */
#endif
/* EFX_FALCON_HWDEFS_H */
drivers/net/sfc/falcon_io.h
浏览文件 @
7bece815
...
@@ -56,14 +56,27 @@
...
@@ -56,14 +56,27 @@
#define FALCON_USE_QWORD_IO 1
#define FALCON_USE_QWORD_IO 1
#endif
#endif
#define _falcon_writeq(efx, value, reg) \
#ifdef FALCON_USE_QWORD_IO
__raw_writeq((__force u64) (value), (efx)->membase + (reg))
static
inline
void
_falcon_writeq
(
struct
efx_nic
*
efx
,
__le64
value
,
#define _falcon_writel(efx, value, reg) \
unsigned
int
reg
)
__raw_writel((__force u32) (value), (efx)->membase + (reg))
{
#define _falcon_readq(efx, reg) \
__raw_writeq
((
__force
u64
)
value
,
efx
->
membase
+
reg
);
((__force __le64) __raw_readq((efx)->membase + (reg)))
}
#define _falcon_readl(efx, reg) \
static
inline
__le64
_falcon_readq
(
struct
efx_nic
*
efx
,
unsigned
int
reg
)
((__force __le32) __raw_readl((efx)->membase + (reg)))
{
return
(
__force
__le64
)
__raw_readq
(
efx
->
membase
+
reg
);
}
#endif
static
inline
void
_falcon_writel
(
struct
efx_nic
*
efx
,
__le32
value
,
unsigned
int
reg
)
{
__raw_writel
((
__force
u32
)
value
,
efx
->
membase
+
reg
);
}
static
inline
__le32
_falcon_readl
(
struct
efx_nic
*
efx
,
unsigned
int
reg
)
{
return
(
__force
__le32
)
__raw_readl
(
efx
->
membase
+
reg
);
}
/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
static
inline
void
falcon_write
(
struct
efx_nic
*
efx
,
efx_oword_t
*
value
,
static
inline
void
falcon_write
(
struct
efx_nic
*
efx
,
efx_oword_t
*
value
,
...
...
drivers/net/sfc/falcon_xmac.c
浏览文件 @
7bece815
...
@@ -221,7 +221,7 @@ static int falcon_xgmii_status(struct efx_nic *efx)
...
@@ -221,7 +221,7 @@ static int falcon_xgmii_status(struct efx_nic *efx)
{
{
efx_dword_t
reg
;
efx_dword_t
reg
;
if
(
FALCON_REV
(
efx
)
<
FALCON_REV_B0
)
if
(
falcon_rev
(
efx
)
<
FALCON_REV_B0
)
return
1
;
return
1
;
/* The ISR latches, so clear it and re-read */
/* The ISR latches, so clear it and re-read */
...
@@ -241,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
...
@@ -241,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
{
{
efx_dword_t
reg
;
efx_dword_t
reg
;
if
((
FALCON_REV
(
efx
)
<
FALCON_REV_B0
)
||
LOOPBACK_INTERNAL
(
efx
))
if
((
falcon_rev
(
efx
)
<
FALCON_REV_B0
)
||
LOOPBACK_INTERNAL
(
efx
))
return
;
return
;
/* Flush the ISR */
/* Flush the ISR */
...
@@ -454,7 +454,7 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
...
@@ -454,7 +454,7 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
EFX_LOG
(
efx
,
"%s Clobbering XAUI (%d tries left).
\n
"
,
EFX_LOG
(
efx
,
"%s Clobbering XAUI (%d tries left).
\n
"
,
__func__
,
tries
);
__func__
,
tries
);
(
void
)
falcon_reset_xaui
(
efx
);
falcon_reset_xaui
(
efx
);
udelay
(
200
);
udelay
(
200
);
tries
--
;
tries
--
;
}
}
...
@@ -572,7 +572,7 @@ int falcon_check_xmac(struct efx_nic *efx)
...
@@ -572,7 +572,7 @@ int falcon_check_xmac(struct efx_nic *efx)
xaui_link_ok
=
falcon_xaui_link_ok
(
efx
);
xaui_link_ok
=
falcon_xaui_link_ok
(
efx
);
if
(
EFX_WORKAROUND_5147
(
efx
)
&&
!
xaui_link_ok
)
if
(
EFX_WORKAROUND_5147
(
efx
)
&&
!
xaui_link_ok
)
(
void
)
falcon_reset_xaui
(
efx
);
falcon_reset_xaui
(
efx
);
/* Call the PHY check_hw routine */
/* Call the PHY check_hw routine */
rc
=
efx
->
phy_op
->
check_hw
(
efx
);
rc
=
efx
->
phy_op
->
check_hw
(
efx
);
...
@@ -639,7 +639,7 @@ int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
...
@@ -639,7 +639,7 @@ int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
reset
=
((
flow_control
&
EFX_FC_TX
)
&&
reset
=
((
flow_control
&
EFX_FC_TX
)
&&
!
(
efx
->
flow_control
&
EFX_FC_TX
));
!
(
efx
->
flow_control
&
EFX_FC_TX
));
if
(
EFX_WORKAROUND_11482
(
efx
)
&&
reset
)
{
if
(
EFX_WORKAROUND_11482
(
efx
)
&&
reset
)
{
if
(
FALCON_REV
(
efx
)
>=
FALCON_REV_B0
)
{
if
(
falcon_rev
(
efx
)
>=
FALCON_REV_B0
)
{
/* Recover by resetting the EM block */
/* Recover by resetting the EM block */
if
(
efx
->
link_up
)
if
(
efx
->
link_up
)
falcon_drain_tx_fifo
(
efx
);
falcon_drain_tx_fifo
(
efx
);
...
...
drivers/net/sfc/net_driver.h
浏览文件 @
7bece815
...
@@ -42,7 +42,7 @@
...
@@ -42,7 +42,7 @@
#ifndef EFX_DRIVER_NAME
#ifndef EFX_DRIVER_NAME
#define EFX_DRIVER_NAME "sfc"
#define EFX_DRIVER_NAME "sfc"
#endif
#endif
#define EFX_DRIVER_VERSION "2.2
.0136
"
#define EFX_DRIVER_VERSION "2.2"
#ifdef EFX_ENABLE_DEBUG
#ifdef EFX_ENABLE_DEBUG
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
...
@@ -52,28 +52,19 @@
...
@@ -52,28 +52,19 @@
#define EFX_WARN_ON_PARANOID(x) do {} while (0)
#define EFX_WARN_ON_PARANOID(x) do {} while (0)
#endif
#endif
#define NET_DEV_REGISTERED(efx) \
((efx)->net_dev->reg_state == NETREG_REGISTERED)
/* Include net device name in log messages if it has been registered.
* Use efx->name not efx->net_dev->name so that races with (un)registration
* are harmless.
*/
#define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "")
/* Un-rate-limited logging */
/* Un-rate-limited logging */
#define EFX_ERR(efx, fmt, args...) \
#define EFX_ERR(efx, fmt, args...) \
dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt,
NET_DEV_NAME
(efx), ##args)
dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt,
efx_dev_name
(efx), ##args)
#define EFX_INFO(efx, fmt, args...) \
#define EFX_INFO(efx, fmt, args...) \
dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt,
NET_DEV_NAME
(efx), ##args)
dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt,
efx_dev_name
(efx), ##args)
#ifdef EFX_ENABLE_DEBUG
#ifdef EFX_ENABLE_DEBUG
#define EFX_LOG(efx, fmt, args...) \
#define EFX_LOG(efx, fmt, args...) \
dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt,
NET_DEV_NAME
(efx), ##args)
dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt,
efx_dev_name
(efx), ##args)
#else
#else
#define EFX_LOG(efx, fmt, args...) \
#define EFX_LOG(efx, fmt, args...) \
dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt,
NET_DEV_NAME
(efx), ##args)
dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt,
efx_dev_name
(efx), ##args)
#endif
#endif
#define EFX_TRACE(efx, fmt, args...) do {} while (0)
#define EFX_TRACE(efx, fmt, args...) do {} while (0)
...
@@ -90,11 +81,6 @@ do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
...
@@ -90,11 +81,6 @@ do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
#define EFX_LOG_RL(efx, fmt, args...) \
#define EFX_LOG_RL(efx, fmt, args...) \
do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
/* Kernel headers may redefine inline anyway */
#ifndef inline
#define inline inline __attribute__ ((always_inline))
#endif
/**************************************************************************
/**************************************************************************
*
*
* Efx data structures
* Efx data structures
...
@@ -695,7 +681,7 @@ struct efx_nic {
...
@@ -695,7 +681,7 @@ struct efx_nic {
struct
workqueue_struct
*
workqueue
;
struct
workqueue_struct
*
workqueue
;
struct
work_struct
reset_work
;
struct
work_struct
reset_work
;
struct
delayed_work
monitor_work
;
struct
delayed_work
monitor_work
;
unsigned
long
membase_phys
;
resource_size_t
membase_phys
;
void
__iomem
*
membase
;
void
__iomem
*
membase
;
spinlock_t
biu_lock
;
spinlock_t
biu_lock
;
enum
efx_int_mode
interrupt_mode
;
enum
efx_int_mode
interrupt_mode
;
...
@@ -719,7 +705,7 @@ struct efx_nic {
...
@@ -719,7 +705,7 @@ struct efx_nic {
unsigned
n_rx_nodesc_drop_cnt
;
unsigned
n_rx_nodesc_drop_cnt
;
void
*
nic_data
;
struct
falcon_nic_data
*
nic_data
;
struct
mutex
mac_lock
;
struct
mutex
mac_lock
;
int
port_enabled
;
int
port_enabled
;
...
@@ -760,6 +746,20 @@ struct efx_nic {
...
@@ -760,6 +746,20 @@ struct efx_nic {
void
*
loopback_selftest
;
void
*
loopback_selftest
;
};
};
static
inline
int
efx_dev_registered
(
struct
efx_nic
*
efx
)
{
return
efx
->
net_dev
->
reg_state
==
NETREG_REGISTERED
;
}
/* Net device name, for inclusion in log messages if it has been registered.
* Use efx->name not efx->net_dev->name so that races with (un)registration
* are harmless.
*/
static
inline
const
char
*
efx_dev_name
(
struct
efx_nic
*
efx
)
{
return
efx_dev_registered
(
efx
)
?
efx
->
name
:
""
;
}
/**
/**
* struct efx_nic_type - Efx device type definition
* struct efx_nic_type - Efx device type definition
* @mem_bar: Memory BAR number
* @mem_bar: Memory BAR number
...
@@ -795,7 +795,7 @@ struct efx_nic_type {
...
@@ -795,7 +795,7 @@ struct efx_nic_type {
unsigned
int
txd_ring_mask
;
unsigned
int
txd_ring_mask
;
unsigned
int
rxd_ring_mask
;
unsigned
int
rxd_ring_mask
;
unsigned
int
evq_size
;
unsigned
int
evq_size
;
dma_addr_t
max_dma_mask
;
u64
max_dma_mask
;
unsigned
int
tx_dma_mask
;
unsigned
int
tx_dma_mask
;
unsigned
bug5391_mask
;
unsigned
bug5391_mask
;
...
...
drivers/net/sfc/rx.c
浏览文件 @
7bece815
...
@@ -86,14 +86,17 @@ static unsigned int rx_refill_limit = 95;
...
@@ -86,14 +86,17 @@ static unsigned int rx_refill_limit = 95;
*/
*/
#define EFX_RXD_HEAD_ROOM 2
#define EFX_RXD_HEAD_ROOM 2
/* Macros for zero-order pages (potentially) containing multiple RX buffers */
static
inline
unsigned
int
efx_rx_buf_offset
(
struct
efx_rx_buffer
*
buf
)
#define RX_DATA_OFFSET(_data) \
{
(((unsigned long) (_data)) & (PAGE_SIZE-1))
/* Offset is always within one page, so we don't need to consider
#define RX_BUF_OFFSET(_rx_buf) \
* the page order.
RX_DATA_OFFSET((_rx_buf)->data)
*/
return
(
__force
unsigned
long
)
buf
->
data
&
(
PAGE_SIZE
-
1
);
#define RX_PAGE_SIZE(_efx) \
}
(PAGE_SIZE * (1u << (_efx)->rx_buffer_order))
static
inline
unsigned
int
efx_rx_buf_size
(
struct
efx_nic
*
efx
)
{
return
PAGE_SIZE
<<
efx
->
rx_buffer_order
;
}
/**************************************************************************
/**************************************************************************
...
@@ -106,7 +109,7 @@ static unsigned int rx_refill_limit = 95;
...
@@ -106,7 +109,7 @@ static unsigned int rx_refill_limit = 95;
static
int
efx_lro_get_skb_hdr
(
struct
sk_buff
*
skb
,
void
**
ip_hdr
,
static
int
efx_lro_get_skb_hdr
(
struct
sk_buff
*
skb
,
void
**
ip_hdr
,
void
**
tcpudp_hdr
,
u64
*
hdr_flags
,
void
*
priv
)
void
**
tcpudp_hdr
,
u64
*
hdr_flags
,
void
*
priv
)
{
{
struct
efx_channel
*
channel
=
(
struct
efx_channel
*
)
priv
;
struct
efx_channel
*
channel
=
priv
;
struct
iphdr
*
iph
;
struct
iphdr
*
iph
;
struct
tcphdr
*
th
;
struct
tcphdr
*
th
;
...
@@ -131,12 +134,12 @@ static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
...
@@ -131,12 +134,12 @@ static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
void
**
ip_hdr
,
void
**
tcpudp_hdr
,
u64
*
hdr_flags
,
void
**
ip_hdr
,
void
**
tcpudp_hdr
,
u64
*
hdr_flags
,
void
*
priv
)
void
*
priv
)
{
{
struct
efx_channel
*
channel
=
(
struct
efx_channel
*
)
priv
;
struct
efx_channel
*
channel
=
priv
;
struct
ethhdr
*
eh
;
struct
ethhdr
*
eh
;
struct
iphdr
*
iph
;
struct
iphdr
*
iph
;
/* We support EtherII and VLAN encapsulated IPv4 */
/* We support EtherII and VLAN encapsulated IPv4 */
eh
=
(
struct
ethhdr
*
)(
page_address
(
frag
->
page
)
+
frag
->
page_offset
)
;
eh
=
page_address
(
frag
->
page
)
+
frag
->
page_offset
;
*
mac_hdr
=
eh
;
*
mac_hdr
=
eh
;
if
(
eh
->
h_proto
==
htons
(
ETH_P_IP
))
{
if
(
eh
->
h_proto
==
htons
(
ETH_P_IP
))
{
...
@@ -269,7 +272,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
...
@@ -269,7 +272,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
return
-
ENOMEM
;
return
-
ENOMEM
;
dma_addr
=
pci_map_page
(
efx
->
pci_dev
,
rx_buf
->
page
,
dma_addr
=
pci_map_page
(
efx
->
pci_dev
,
rx_buf
->
page
,
0
,
RX_PAGE_SIZE
(
efx
),
0
,
efx_rx_buf_size
(
efx
),
PCI_DMA_FROMDEVICE
);
PCI_DMA_FROMDEVICE
);
if
(
unlikely
(
pci_dma_mapping_error
(
dma_addr
)))
{
if
(
unlikely
(
pci_dma_mapping_error
(
dma_addr
)))
{
...
@@ -280,14 +283,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
...
@@ -280,14 +283,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
rx_queue
->
buf_page
=
rx_buf
->
page
;
rx_queue
->
buf_page
=
rx_buf
->
page
;
rx_queue
->
buf_dma_addr
=
dma_addr
;
rx_queue
->
buf_dma_addr
=
dma_addr
;
rx_queue
->
buf_data
=
(
(
char
*
)
page_address
(
rx_buf
->
page
)
+
rx_queue
->
buf_data
=
(
page_address
(
rx_buf
->
page
)
+
EFX_PAGE_IP_ALIGN
);
EFX_PAGE_IP_ALIGN
);
}
}
offset
=
RX_DATA_OFFSET
(
rx_queue
->
buf_data
);
rx_buf
->
len
=
bytes
;
rx_buf
->
len
=
bytes
;
rx_buf
->
dma_addr
=
rx_queue
->
buf_dma_addr
+
offset
;
rx_buf
->
data
=
rx_queue
->
buf_data
;
rx_buf
->
data
=
rx_queue
->
buf_data
;
offset
=
efx_rx_buf_offset
(
rx_buf
);
rx_buf
->
dma_addr
=
rx_queue
->
buf_dma_addr
+
offset
;
/* Try to pack multiple buffers per page */
/* Try to pack multiple buffers per page */
if
(
efx
->
rx_buffer_order
==
0
)
{
if
(
efx
->
rx_buffer_order
==
0
)
{
...
@@ -295,7 +298,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
...
@@ -295,7 +298,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
rx_queue
->
buf_data
+=
((
bytes
+
0x1ff
)
&
~
0x1ff
);
rx_queue
->
buf_data
+=
((
bytes
+
0x1ff
)
&
~
0x1ff
);
offset
+=
((
bytes
+
0x1ff
)
&
~
0x1ff
);
offset
+=
((
bytes
+
0x1ff
)
&
~
0x1ff
);
space
=
RX_PAGE_SIZE
(
efx
)
-
offset
;
space
=
efx_rx_buf_size
(
efx
)
-
offset
;
if
(
space
>=
bytes
)
{
if
(
space
>=
bytes
)
{
/* Refs dropped on kernel releasing each skb */
/* Refs dropped on kernel releasing each skb */
get_page
(
rx_queue
->
buf_page
);
get_page
(
rx_queue
->
buf_page
);
...
@@ -344,7 +347,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
...
@@ -344,7 +347,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
EFX_BUG_ON_PARANOID
(
rx_buf
->
skb
);
EFX_BUG_ON_PARANOID
(
rx_buf
->
skb
);
if
(
rx_buf
->
unmap_addr
)
{
if
(
rx_buf
->
unmap_addr
)
{
pci_unmap_page
(
efx
->
pci_dev
,
rx_buf
->
unmap_addr
,
pci_unmap_page
(
efx
->
pci_dev
,
rx_buf
->
unmap_addr
,
RX_PAGE_SIZE
(
efx
),
PCI_DMA_FROMDEVICE
);
efx_rx_buf_size
(
efx
),
PCI_DMA_FROMDEVICE
);
rx_buf
->
unmap_addr
=
0
;
rx_buf
->
unmap_addr
=
0
;
}
}
}
else
if
(
likely
(
rx_buf
->
skb
))
{
}
else
if
(
likely
(
rx_buf
->
skb
))
{
...
@@ -400,9 +404,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
...
@@ -400,9 +404,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
return
0
;
return
0
;
/* Record minimum fill level */
/* Record minimum fill level */
if
(
unlikely
(
fill_level
<
rx_queue
->
min_fill
))
if
(
unlikely
(
fill_level
<
rx_queue
->
min_fill
))
{
if
(
fill_level
)
if
(
fill_level
)
rx_queue
->
min_fill
=
fill_level
;
rx_queue
->
min_fill
=
fill_level
;
}
/* Acquire RX add lock. If this lock is contended, then a fast
/* Acquire RX add lock. If this lock is contended, then a fast
* fill must already be in progress (e.g. in the refill
* fill must already be in progress (e.g. in the refill
...
@@ -552,7 +557,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel,
...
@@ -552,7 +557,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel,
struct
skb_frag_struct
frags
;
struct
skb_frag_struct
frags
;
frags
.
page
=
rx_buf
->
page
;
frags
.
page
=
rx_buf
->
page
;
frags
.
page_offset
=
RX_BUF_OFFSET
(
rx_buf
);
frags
.
page_offset
=
efx_rx_buf_offset
(
rx_buf
);
frags
.
size
=
rx_buf
->
len
;
frags
.
size
=
rx_buf
->
len
;
lro_receive_frags
(
lro_mgr
,
&
frags
,
rx_buf
->
len
,
lro_receive_frags
(
lro_mgr
,
&
frags
,
rx_buf
->
len
,
...
@@ -597,7 +602,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
...
@@ -597,7 +602,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
if
(
unlikely
(
rx_buf
->
len
>
hdr_len
))
{
if
(
unlikely
(
rx_buf
->
len
>
hdr_len
))
{
struct
skb_frag_struct
*
frag
=
skb_shinfo
(
skb
)
->
frags
;
struct
skb_frag_struct
*
frag
=
skb_shinfo
(
skb
)
->
frags
;
frag
->
page
=
rx_buf
->
page
;
frag
->
page
=
rx_buf
->
page
;
frag
->
page_offset
=
RX_BUF_OFFSET
(
rx_buf
)
+
hdr_len
;
frag
->
page_offset
=
efx_rx_buf_offset
(
rx_buf
)
+
hdr_len
;
frag
->
size
=
skb
->
len
-
hdr_len
;
frag
->
size
=
skb
->
len
-
hdr_len
;
skb_shinfo
(
skb
)
->
nr_frags
=
1
;
skb_shinfo
(
skb
)
->
nr_frags
=
1
;
skb
->
data_len
=
frag
->
size
;
skb
->
data_len
=
frag
->
size
;
...
@@ -851,7 +856,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
...
@@ -851,7 +856,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
/* For a page that is part-way through splitting into RX buffers */
/* For a page that is part-way through splitting into RX buffers */
if
(
rx_queue
->
buf_page
!=
NULL
)
{
if
(
rx_queue
->
buf_page
!=
NULL
)
{
pci_unmap_page
(
rx_queue
->
efx
->
pci_dev
,
rx_queue
->
buf_dma_addr
,
pci_unmap_page
(
rx_queue
->
efx
->
pci_dev
,
rx_queue
->
buf_dma_addr
,
RX_PAGE_SIZE
(
rx_queue
->
efx
),
PCI_DMA_FROMDEVICE
);
efx_rx_buf_size
(
rx_queue
->
efx
),
PCI_DMA_FROMDEVICE
);
__free_pages
(
rx_queue
->
buf_page
,
__free_pages
(
rx_queue
->
buf_page
,
rx_queue
->
efx
->
rx_buffer_order
);
rx_queue
->
efx
->
rx_buffer_order
);
rx_queue
->
buf_page
=
NULL
;
rx_queue
->
buf_page
=
NULL
;
...
...
drivers/net/sfc/selftest.c
浏览文件 @
7bece815
...
@@ -290,7 +290,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
...
@@ -290,7 +290,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
payload
=
&
state
->
payload
;
payload
=
&
state
->
payload
;
received
=
(
struct
efx_loopback_payload
*
)
(
char
*
)
buf_ptr
;
received
=
(
struct
efx_loopback_payload
*
)
buf_ptr
;
received
->
ip
.
saddr
=
payload
->
ip
.
saddr
;
received
->
ip
.
saddr
=
payload
->
ip
.
saddr
;
received
->
ip
.
check
=
payload
->
ip
.
check
;
received
->
ip
.
check
=
payload
->
ip
.
check
;
...
@@ -424,10 +424,10 @@ static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
...
@@ -424,10 +424,10 @@ static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
* interrupt handler. */
* interrupt handler. */
smp_wmb
();
smp_wmb
();
if
(
NET_DEV_REGISTERED
(
efx
))
if
(
efx_dev_registered
(
efx
))
netif_tx_lock_bh
(
efx
->
net_dev
);
netif_tx_lock_bh
(
efx
->
net_dev
);
rc
=
efx_xmit
(
efx
,
tx_queue
,
skb
);
rc
=
efx_xmit
(
efx
,
tx_queue
,
skb
);
if
(
NET_DEV_REGISTERED
(
efx
))
if
(
efx_dev_registered
(
efx
))
netif_tx_unlock_bh
(
efx
->
net_dev
);
netif_tx_unlock_bh
(
efx
->
net_dev
);
if
(
rc
!=
NETDEV_TX_OK
)
{
if
(
rc
!=
NETDEV_TX_OK
)
{
...
@@ -453,7 +453,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
...
@@ -453,7 +453,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
int
tx_done
=
0
,
rx_good
,
rx_bad
;
int
tx_done
=
0
,
rx_good
,
rx_bad
;
int
i
,
rc
=
0
;
int
i
,
rc
=
0
;
if
(
NET_DEV_REGISTERED
(
efx
))
if
(
efx_dev_registered
(
efx
))
netif_tx_lock_bh
(
efx
->
net_dev
);
netif_tx_lock_bh
(
efx
->
net_dev
);
/* Count the number of tx completions, and decrement the refcnt. Any
/* Count the number of tx completions, and decrement the refcnt. Any
...
@@ -465,7 +465,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
...
@@ -465,7 +465,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
dev_kfree_skb_any
(
skb
);
dev_kfree_skb_any
(
skb
);
}
}
if
(
NET_DEV_REGISTERED
(
efx
))
if
(
efx_dev_registered
(
efx
))
netif_tx_unlock_bh
(
efx
->
net_dev
);
netif_tx_unlock_bh
(
efx
->
net_dev
);
/* Check TX completion and received packet counts */
/* Check TX completion and received packet counts */
...
@@ -517,6 +517,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
...
@@ -517,6 +517,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
state
->
packet_count
=
min
(
1
<<
(
i
<<
2
),
state
->
packet_count
);
state
->
packet_count
=
min
(
1
<<
(
i
<<
2
),
state
->
packet_count
);
state
->
skbs
=
kzalloc
(
sizeof
(
state
->
skbs
[
0
])
*
state
->
skbs
=
kzalloc
(
sizeof
(
state
->
skbs
[
0
])
*
state
->
packet_count
,
GFP_KERNEL
);
state
->
packet_count
,
GFP_KERNEL
);
if
(
!
state
->
skbs
)
return
-
ENOMEM
;
state
->
flush
=
0
;
state
->
flush
=
0
;
EFX_LOG
(
efx
,
"TX queue %d testing %s loopback with %d "
EFX_LOG
(
efx
,
"TX queue %d testing %s loopback with %d "
...
@@ -700,7 +702,7 @@ int efx_offline_test(struct efx_nic *efx,
...
@@ -700,7 +702,7 @@ int efx_offline_test(struct efx_nic *efx,
* "flushing" so all inflight packets are dropped */
* "flushing" so all inflight packets are dropped */
BUG_ON
(
efx
->
loopback_selftest
);
BUG_ON
(
efx
->
loopback_selftest
);
state
->
flush
=
1
;
state
->
flush
=
1
;
efx
->
loopback_selftest
=
(
void
*
)
state
;
efx
->
loopback_selftest
=
state
;
rc
=
efx_test_loopbacks
(
efx
,
tests
,
loopback_modes
);
rc
=
efx_test_loopbacks
(
efx
,
tests
,
loopback_modes
);
...
...
drivers/net/sfc/sfe4001.c
浏览文件 @
7bece815
...
@@ -116,18 +116,18 @@ void sfe4001_poweroff(struct efx_nic *efx)
...
@@ -116,18 +116,18 @@ void sfe4001_poweroff(struct efx_nic *efx)
/* Turn off all power rails */
/* Turn off all power rails */
out
=
0xff
;
out
=
0xff
;
(
void
)
efx_i2c_write
(
i2c
,
PCA9539
,
P0_OUT
,
&
out
,
1
);
efx_i2c_write
(
i2c
,
PCA9539
,
P0_OUT
,
&
out
,
1
);
/* Disable port 1 outputs on IO expander */
/* Disable port 1 outputs on IO expander */
cfg
=
0xff
;
cfg
=
0xff
;
(
void
)
efx_i2c_write
(
i2c
,
PCA9539
,
P1_CONFIG
,
&
cfg
,
1
);
efx_i2c_write
(
i2c
,
PCA9539
,
P1_CONFIG
,
&
cfg
,
1
);
/* Disable port 0 outputs on IO expander */
/* Disable port 0 outputs on IO expander */
cfg
=
0xff
;
cfg
=
0xff
;
(
void
)
efx_i2c_write
(
i2c
,
PCA9539
,
P0_CONFIG
,
&
cfg
,
1
);
efx_i2c_write
(
i2c
,
PCA9539
,
P0_CONFIG
,
&
cfg
,
1
);
/* Clear any over-temperature alert */
/* Clear any over-temperature alert */
(
void
)
efx_i2c_read
(
i2c
,
MAX6647
,
RSL
,
&
in
,
1
);
efx_i2c_read
(
i2c
,
MAX6647
,
RSL
,
&
in
,
1
);
}
}
/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected
/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected
...
@@ -253,14 +253,14 @@ int sfe4001_poweron(struct efx_nic *efx)
...
@@ -253,14 +253,14 @@ int sfe4001_poweron(struct efx_nic *efx)
fail3:
fail3:
/* Turn off all power rails */
/* Turn off all power rails */
out
=
0xff
;
out
=
0xff
;
(
void
)
efx_i2c_write
(
i2c
,
PCA9539
,
P0_OUT
,
&
out
,
1
);
efx_i2c_write
(
i2c
,
PCA9539
,
P0_OUT
,
&
out
,
1
);
/* Disable port 1 outputs on IO expander */
/* Disable port 1 outputs on IO expander */
out
=
0xff
;
out
=
0xff
;
(
void
)
efx_i2c_write
(
i2c
,
PCA9539
,
P1_CONFIG
,
&
out
,
1
);
efx_i2c_write
(
i2c
,
PCA9539
,
P1_CONFIG
,
&
out
,
1
);
fail2:
fail2:
/* Disable port 0 outputs on IO expander */
/* Disable port 0 outputs on IO expander */
out
=
0xff
;
out
=
0xff
;
(
void
)
efx_i2c_write
(
i2c
,
PCA9539
,
P0_CONFIG
,
&
out
,
1
);
efx_i2c_write
(
i2c
,
PCA9539
,
P0_CONFIG
,
&
out
,
1
);
fail1:
fail1:
return
rc
;
return
rc
;
}
}
drivers/net/sfc/tenxpress.c
浏览文件 @
7bece815
...
@@ -211,6 +211,8 @@ static int tenxpress_phy_init(struct efx_nic *efx)
...
@@ -211,6 +211,8 @@ static int tenxpress_phy_init(struct efx_nic *efx)
int
rc
=
0
;
int
rc
=
0
;
phy_data
=
kzalloc
(
sizeof
(
*
phy_data
),
GFP_KERNEL
);
phy_data
=
kzalloc
(
sizeof
(
*
phy_data
),
GFP_KERNEL
);
if
(
!
phy_data
)
return
-
ENOMEM
;
efx
->
phy_data
=
phy_data
;
efx
->
phy_data
=
phy_data
;
tenxpress_set_state
(
efx
,
TENXPRESS_STATUS_NORMAL
);
tenxpress_set_state
(
efx
,
TENXPRESS_STATUS_NORMAL
);
...
@@ -376,7 +378,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
...
@@ -376,7 +378,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
* perform a special software reset */
* perform a special software reset */
if
((
phy_data
->
tx_disabled
&&
!
efx
->
tx_disabled
)
||
if
((
phy_data
->
tx_disabled
&&
!
efx
->
tx_disabled
)
||
loop_change
)
{
loop_change
)
{
(
void
)
tenxpress_special_reset
(
efx
);
tenxpress_special_reset
(
efx
);
falcon_reset_xaui
(
efx
);
falcon_reset_xaui
(
efx
);
}
}
...
...
drivers/net/sfc/tx.c
浏览文件 @
7bece815
...
@@ -387,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
...
@@ -387,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
if
(
unlikely
(
tx_queue
->
stopped
))
{
if
(
unlikely
(
tx_queue
->
stopped
))
{
fill_level
=
tx_queue
->
insert_count
-
tx_queue
->
read_count
;
fill_level
=
tx_queue
->
insert_count
-
tx_queue
->
read_count
;
if
(
fill_level
<
EFX_NETDEV_TX_THRESHOLD
(
tx_queue
))
{
if
(
fill_level
<
EFX_NETDEV_TX_THRESHOLD
(
tx_queue
))
{
EFX_BUG_ON_PARANOID
(
!
NET_DEV_REGISTERED
(
efx
));
EFX_BUG_ON_PARANOID
(
!
efx_dev_registered
(
efx
));
/* Do this under netif_tx_lock(), to avoid racing
/* Do this under netif_tx_lock(), to avoid racing
* with efx_xmit(). */
* with efx_xmit(). */
...
@@ -639,11 +639,12 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
...
@@ -639,11 +639,12 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
base_dma
=
tsoh
->
dma_addr
&
PAGE_MASK
;
base_dma
=
tsoh
->
dma_addr
&
PAGE_MASK
;
p
=
&
tx_queue
->
tso_headers_free
;
p
=
&
tx_queue
->
tso_headers_free
;
while
(
*
p
!=
NULL
)
while
(
*
p
!=
NULL
)
{
if
(((
unsigned
long
)
*
p
&
PAGE_MASK
)
==
base_kva
)
if
(((
unsigned
long
)
*
p
&
PAGE_MASK
)
==
base_kva
)
*
p
=
(
*
p
)
->
next
;
*
p
=
(
*
p
)
->
next
;
else
else
p
=
&
(
*
p
)
->
next
;
p
=
&
(
*
p
)
->
next
;
}
pci_free_consistent
(
pci_dev
,
PAGE_SIZE
,
(
void
*
)
base_kva
,
base_dma
);
pci_free_consistent
(
pci_dev
,
PAGE_SIZE
,
(
void
*
)
base_kva
,
base_dma
);
}
}
...
@@ -939,9 +940,10 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
...
@@ -939,9 +940,10 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
/* Allocate a DMA-mapped header buffer. */
/* Allocate a DMA-mapped header buffer. */
if
(
likely
(
TSOH_SIZE
(
st
->
p
.
header_length
)
<=
TSOH_STD_SIZE
))
{
if
(
likely
(
TSOH_SIZE
(
st
->
p
.
header_length
)
<=
TSOH_STD_SIZE
))
{
if
(
tx_queue
->
tso_headers_free
==
NULL
)
if
(
tx_queue
->
tso_headers_free
==
NULL
)
{
if
(
efx_tsoh_block_alloc
(
tx_queue
))
if
(
efx_tsoh_block_alloc
(
tx_queue
))
return
-
1
;
return
-
1
;
}
EFX_BUG_ON_PARANOID
(
!
tx_queue
->
tso_headers_free
);
EFX_BUG_ON_PARANOID
(
!
tx_queue
->
tso_headers_free
);
tsoh
=
tx_queue
->
tso_headers_free
;
tsoh
=
tx_queue
->
tso_headers_free
;
tx_queue
->
tso_headers_free
=
tsoh
->
next
;
tx_queue
->
tso_headers_free
=
tsoh
->
next
;
...
@@ -1106,9 +1108,10 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
...
@@ -1106,9 +1108,10 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
{
{
unsigned
i
;
unsigned
i
;
if
(
tx_queue
->
buffer
)
if
(
tx_queue
->
buffer
)
{
for
(
i
=
0
;
i
<=
tx_queue
->
efx
->
type
->
txd_ring_mask
;
++
i
)
for
(
i
=
0
;
i
<=
tx_queue
->
efx
->
type
->
txd_ring_mask
;
++
i
)
efx_tsoh_free
(
tx_queue
,
&
tx_queue
->
buffer
[
i
]);
efx_tsoh_free
(
tx_queue
,
&
tx_queue
->
buffer
[
i
]);
}
while
(
tx_queue
->
tso_headers_free
!=
NULL
)
while
(
tx_queue
->
tso_headers_free
!=
NULL
)
efx_tsoh_block_free
(
tx_queue
,
tx_queue
->
tso_headers_free
,
efx_tsoh_block_free
(
tx_queue
,
tx_queue
->
tso_headers_free
,
...
...
drivers/net/sfc/workarounds.h
浏览文件 @
7bece815
...
@@ -16,7 +16,7 @@
...
@@ -16,7 +16,7 @@
*/
*/
#define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_FALCON_A(efx) (
FALCON_REV
(efx) <= FALCON_REV_A1)
#define EFX_WORKAROUND_FALCON_A(efx) (
falcon_rev
(efx) <= FALCON_REV_A1)
/* XAUI resets if link not detected */
/* XAUI resets if link not detected */
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
...
...
drivers/net/sfc/xfp_phy.c
浏览文件 @
7bece815
...
@@ -85,7 +85,9 @@ static int xfp_phy_init(struct efx_nic *efx)
...
@@ -85,7 +85,9 @@ static int xfp_phy_init(struct efx_nic *efx)
int
rc
;
int
rc
;
phy_data
=
kzalloc
(
sizeof
(
struct
xfp_phy_data
),
GFP_KERNEL
);
phy_data
=
kzalloc
(
sizeof
(
struct
xfp_phy_data
),
GFP_KERNEL
);
efx
->
phy_data
=
(
void
*
)
phy_data
;
if
(
!
phy_data
)
return
-
ENOMEM
;
efx
->
phy_data
=
phy_data
;
EFX_INFO
(
efx
,
"XFP: PHY ID reg %x (OUI %x model %x revision"
EFX_INFO
(
efx
,
"XFP: PHY ID reg %x (OUI %x model %x revision"
" %x)
\n
"
,
devid
,
MDIO_ID_OUI
(
devid
),
MDIO_ID_MODEL
(
devid
),
" %x)
\n
"
,
devid
,
MDIO_ID_OUI
(
devid
),
MDIO_ID_MODEL
(
devid
),
...
...
drivers/net/sky2.c
浏览文件 @
7bece815
...
@@ -1159,17 +1159,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
...
@@ -1159,17 +1159,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
#ifdef SKY2_VLAN_TAG_USED
#ifdef SKY2_VLAN_TAG_USED
static
void
sky2_
vlan_rx_register
(
struct
net_device
*
dev
,
struct
vlan_group
*
grp
)
static
void
sky2_
set_vlan_mode
(
struct
sky2_hw
*
hw
,
u16
port
,
bool
onoff
)
{
{
struct
sky2_port
*
sky2
=
netdev_priv
(
dev
);
if
(
onoff
)
{
struct
sky2_hw
*
hw
=
sky2
->
hw
;
u16
port
=
sky2
->
port
;
netif_tx_lock_bh
(
dev
);
napi_disable
(
&
hw
->
napi
);
sky2
->
vlgrp
=
grp
;
if
(
grp
)
{
sky2_write32
(
hw
,
SK_REG
(
port
,
RX_GMF_CTRL_T
),
sky2_write32
(
hw
,
SK_REG
(
port
,
RX_GMF_CTRL_T
),
RX_VLAN_STRIP_ON
);
RX_VLAN_STRIP_ON
);
sky2_write32
(
hw
,
SK_REG
(
port
,
TX_GMF_CTRL_T
),
sky2_write32
(
hw
,
SK_REG
(
port
,
TX_GMF_CTRL_T
),
...
@@ -1180,6 +1172,19 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
...
@@ -1180,6 +1172,19 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
sky2_write32
(
hw
,
SK_REG
(
port
,
TX_GMF_CTRL_T
),
sky2_write32
(
hw
,
SK_REG
(
port
,
TX_GMF_CTRL_T
),
TX_VLAN_TAG_OFF
);
TX_VLAN_TAG_OFF
);
}
}
}
static
void
sky2_vlan_rx_register
(
struct
net_device
*
dev
,
struct
vlan_group
*
grp
)
{
struct
sky2_port
*
sky2
=
netdev_priv
(
dev
);
struct
sky2_hw
*
hw
=
sky2
->
hw
;
u16
port
=
sky2
->
port
;
netif_tx_lock_bh
(
dev
);
napi_disable
(
&
hw
->
napi
);
sky2
->
vlgrp
=
grp
;
sky2_set_vlan_mode
(
hw
,
port
,
grp
!=
NULL
);
sky2_read32
(
hw
,
B0_Y2_SP_LISR
);
sky2_read32
(
hw
,
B0_Y2_SP_LISR
);
napi_enable
(
&
hw
->
napi
);
napi_enable
(
&
hw
->
napi
);
...
@@ -1418,6 +1423,10 @@ static int sky2_up(struct net_device *dev)
...
@@ -1418,6 +1423,10 @@ static int sky2_up(struct net_device *dev)
sky2_prefetch_init
(
hw
,
txqaddr
[
port
],
sky2
->
tx_le_map
,
sky2_prefetch_init
(
hw
,
txqaddr
[
port
],
sky2
->
tx_le_map
,
TX_RING_SIZE
-
1
);
TX_RING_SIZE
-
1
);
#ifdef SKY2_VLAN_TAG_USED
sky2_set_vlan_mode
(
hw
,
port
,
sky2
->
vlgrp
!=
NULL
);
#endif
err
=
sky2_rx_start
(
sky2
);
err
=
sky2_rx_start
(
sky2
);
if
(
err
)
if
(
err
)
goto
err_out
;
goto
err_out
;
...
...
drivers/net/tokenring/3c359.h
浏览文件 @
7bece815
...
@@ -264,7 +264,7 @@ struct xl_private {
...
@@ -264,7 +264,7 @@ struct xl_private {
u16
asb
;
u16
asb
;
u8
__iomem
*
xl_mmio
;
u8
__iomem
*
xl_mmio
;
char
*
xl_card_name
;
c
onst
c
har
*
xl_card_name
;
struct
pci_dev
*
pdev
;
struct
pci_dev
*
pdev
;
spinlock_t
xl_lock
;
spinlock_t
xl_lock
;
...
...
drivers/net/tokenring/olympic.h
浏览文件 @
7bece815
...
@@ -254,7 +254,7 @@ struct olympic_private {
...
@@ -254,7 +254,7 @@ struct olympic_private {
u8
__iomem
*
olympic_mmio
;
u8
__iomem
*
olympic_mmio
;
u8
__iomem
*
olympic_lap
;
u8
__iomem
*
olympic_lap
;
struct
pci_dev
*
pdev
;
struct
pci_dev
*
pdev
;
c
har
*
olympic_card_name
;
c
onst
char
*
olympic_card_name
;
spinlock_t
olympic_lock
;
spinlock_t
olympic_lock
;
...
...
drivers/net/tulip/uli526x.c
浏览文件 @
7bece815
...
@@ -225,6 +225,9 @@ static void uli526x_set_filter_mode(struct net_device *);
...
@@ -225,6 +225,9 @@ static void uli526x_set_filter_mode(struct net_device *);
static
const
struct
ethtool_ops
netdev_ethtool_ops
;
static
const
struct
ethtool_ops
netdev_ethtool_ops
;
static
u16
read_srom_word
(
long
,
int
);
static
u16
read_srom_word
(
long
,
int
);
static
irqreturn_t
uli526x_interrupt
(
int
,
void
*
);
static
irqreturn_t
uli526x_interrupt
(
int
,
void
*
);
#ifdef CONFIG_NET_POLL_CONTROLLER
static
void
uli526x_poll
(
struct
net_device
*
dev
);
#endif
static
void
uli526x_descriptor_init
(
struct
uli526x_board_info
*
,
unsigned
long
);
static
void
uli526x_descriptor_init
(
struct
uli526x_board_info
*
,
unsigned
long
);
static
void
allocate_rx_buffer
(
struct
uli526x_board_info
*
);
static
void
allocate_rx_buffer
(
struct
uli526x_board_info
*
);
static
void
update_cr6
(
u32
,
unsigned
long
);
static
void
update_cr6
(
u32
,
unsigned
long
);
...
@@ -339,6 +342,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
...
@@ -339,6 +342,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
dev
->
get_stats
=
&
uli526x_get_stats
;
dev
->
get_stats
=
&
uli526x_get_stats
;
dev
->
set_multicast_list
=
&
uli526x_set_filter_mode
;
dev
->
set_multicast_list
=
&
uli526x_set_filter_mode
;
dev
->
ethtool_ops
=
&
netdev_ethtool_ops
;
dev
->
ethtool_ops
=
&
netdev_ethtool_ops
;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev
->
poll_controller
=
&
uli526x_poll
;
#endif
spin_lock_init
(
&
db
->
lock
);
spin_lock_init
(
&
db
->
lock
);
...
@@ -681,8 +687,9 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
...
@@ -681,8 +687,9 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
db
->
cr5_data
=
inl
(
ioaddr
+
DCR5
);
db
->
cr5_data
=
inl
(
ioaddr
+
DCR5
);
outl
(
db
->
cr5_data
,
ioaddr
+
DCR5
);
outl
(
db
->
cr5_data
,
ioaddr
+
DCR5
);
if
(
!
(
db
->
cr5_data
&
0x180c1
)
)
{
if
(
!
(
db
->
cr5_data
&
0x180c1
)
)
{
spin_unlock_irqrestore
(
&
db
->
lock
,
flags
);
/* Restore CR7 to enable interrupt mask */
outl
(
db
->
cr7_data
,
ioaddr
+
DCR7
);
outl
(
db
->
cr7_data
,
ioaddr
+
DCR7
);
spin_unlock_irqrestore
(
&
db
->
lock
,
flags
);
return
IRQ_HANDLED
;
return
IRQ_HANDLED
;
}
}
...
@@ -715,6 +722,13 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
...
@@ -715,6 +722,13 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
return
IRQ_HANDLED
;
return
IRQ_HANDLED
;
}
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static
void
uli526x_poll
(
struct
net_device
*
dev
)
{
/* ISR grabs the irqsave lock, so this should be safe */
uli526x_interrupt
(
dev
->
irq
,
dev
);
}
#endif
/*
/*
* Free TX resource after TX complete
* Free TX resource after TX complete
...
...
drivers/net/ucc_geth.c
浏览文件 @
7bece815
...
@@ -237,7 +237,7 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
...
@@ -237,7 +237,7 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
skb
->
dev
=
ugeth
->
dev
;
skb
->
dev
=
ugeth
->
dev
;
out_be32
(
&
((
struct
qe_bd
__iomem
*
)
bd
)
->
buf
,
out_be32
(
&
((
struct
qe_bd
__iomem
*
)
bd
)
->
buf
,
dma_map_single
(
NULL
,
dma_map_single
(
&
ugeth
->
dev
->
dev
,
skb
->
data
,
skb
->
data
,
ugeth
->
ug_info
->
uf_info
.
max_rx_buf_length
+
ugeth
->
ug_info
->
uf_info
.
max_rx_buf_length
+
UCC_GETH_RX_DATA_BUF_ALIGNMENT
,
UCC_GETH_RX_DATA_BUF_ALIGNMENT
,
...
@@ -2158,7 +2158,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
...
@@ -2158,7 +2158,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
continue
;
continue
;
for
(
j
=
0
;
j
<
ugeth
->
ug_info
->
bdRingLenTx
[
i
];
j
++
)
{
for
(
j
=
0
;
j
<
ugeth
->
ug_info
->
bdRingLenTx
[
i
];
j
++
)
{
if
(
ugeth
->
tx_skbuff
[
i
][
j
])
{
if
(
ugeth
->
tx_skbuff
[
i
][
j
])
{
dma_unmap_single
(
NULL
,
dma_unmap_single
(
&
ugeth
->
dev
->
dev
,
in_be32
(
&
((
struct
qe_bd
__iomem
*
)
bd
)
->
buf
),
in_be32
(
&
((
struct
qe_bd
__iomem
*
)
bd
)
->
buf
),
(
in_be32
((
u32
__iomem
*
)
bd
)
&
(
in_be32
((
u32
__iomem
*
)
bd
)
&
BD_LENGTH_MASK
),
BD_LENGTH_MASK
),
...
@@ -2186,7 +2186,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
...
@@ -2186,7 +2186,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
bd
=
ugeth
->
p_rx_bd_ring
[
i
];
bd
=
ugeth
->
p_rx_bd_ring
[
i
];
for
(
j
=
0
;
j
<
ugeth
->
ug_info
->
bdRingLenRx
[
i
];
j
++
)
{
for
(
j
=
0
;
j
<
ugeth
->
ug_info
->
bdRingLenRx
[
i
];
j
++
)
{
if
(
ugeth
->
rx_skbuff
[
i
][
j
])
{
if
(
ugeth
->
rx_skbuff
[
i
][
j
])
{
dma_unmap_single
(
NULL
,
dma_unmap_single
(
&
ugeth
->
dev
->
dev
,
in_be32
(
&
((
struct
qe_bd
__iomem
*
)
bd
)
->
buf
),
in_be32
(
&
((
struct
qe_bd
__iomem
*
)
bd
)
->
buf
),
ugeth
->
ug_info
->
ugeth
->
ug_info
->
uf_info
.
max_rx_buf_length
+
uf_info
.
max_rx_buf_length
+
...
@@ -3406,7 +3406,8 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
...
@@ -3406,7 +3406,8 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set up the buffer descriptor */
/* set up the buffer descriptor */
out_be32
(
&
((
struct
qe_bd
__iomem
*
)
bd
)
->
buf
,
out_be32
(
&
((
struct
qe_bd
__iomem
*
)
bd
)
->
buf
,
dma_map_single
(
NULL
,
skb
->
data
,
skb
->
len
,
DMA_TO_DEVICE
));
dma_map_single
(
&
ugeth
->
dev
->
dev
,
skb
->
data
,
skb
->
len
,
DMA_TO_DEVICE
));
/* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
/* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
...
...
drivers/net/usb/asix.c
浏览文件 @
7bece815
...
@@ -1440,6 +1440,10 @@ static const struct usb_device_id products [] = {
...
@@ -1440,6 +1440,10 @@ static const struct usb_device_id products [] = {
// Belkin F5D5055
// Belkin F5D5055
USB_DEVICE
(
0x050d
,
0x5055
),
USB_DEVICE
(
0x050d
,
0x5055
),
.
driver_info
=
(
unsigned
long
)
&
ax88178_info
,
.
driver_info
=
(
unsigned
long
)
&
ax88178_info
,
},
{
// Apple USB Ethernet Adapter
USB_DEVICE
(
0x05ac
,
0x1402
),
.
driver_info
=
(
unsigned
long
)
&
ax88772_info
,
},
},
{
},
// END
{
},
// END
};
};
...
...
drivers/net/usb/rndis_host.c
浏览文件 @
7bece815
...
@@ -194,7 +194,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
...
@@ -194,7 +194,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
dev_dbg
(
&
info
->
control
->
dev
,
dev_dbg
(
&
info
->
control
->
dev
,
"rndis response error, code %d
\n
"
,
retval
);
"rndis response error, code %d
\n
"
,
retval
);
}
}
msleep
(
2
);
msleep
(
2
0
);
}
}
dev_dbg
(
&
info
->
control
->
dev
,
"rndis response timeout
\n
"
);
dev_dbg
(
&
info
->
control
->
dev
,
"rndis response timeout
\n
"
);
return
-
ETIMEDOUT
;
return
-
ETIMEDOUT
;
...
...
drivers/net/virtio_net.c
浏览文件 @
7bece815
...
@@ -470,8 +470,7 @@ static void virtnet_remove(struct virtio_device *vdev)
...
@@ -470,8 +470,7 @@ static void virtnet_remove(struct virtio_device *vdev)
kfree_skb
(
skb
);
kfree_skb
(
skb
);
vi
->
num
--
;
vi
->
num
--
;
}
}
while
((
skb
=
__skb_dequeue
(
&
vi
->
send
))
!=
NULL
)
__skb_queue_purge
(
&
vi
->
send
);
kfree_skb
(
skb
);
BUG_ON
(
vi
->
num
!=
0
);
BUG_ON
(
vi
->
num
!=
0
);
...
...
drivers/net/wan/hdlc.c
浏览文件 @
7bece815
...
@@ -43,8 +43,7 @@ static const char* version = "HDLC support module revision 1.22";
...
@@ -43,8 +43,7 @@ static const char* version = "HDLC support module revision 1.22";
#undef DEBUG_LINK
#undef DEBUG_LINK
static
struct
hdlc_proto
*
first_proto
=
NULL
;
static
struct
hdlc_proto
*
first_proto
;
static
int
hdlc_change_mtu
(
struct
net_device
*
dev
,
int
new_mtu
)
static
int
hdlc_change_mtu
(
struct
net_device
*
dev
,
int
new_mtu
)
{
{
...
@@ -314,21 +313,25 @@ void detach_hdlc_protocol(struct net_device *dev)
...
@@ -314,21 +313,25 @@ void detach_hdlc_protocol(struct net_device *dev)
void
register_hdlc_protocol
(
struct
hdlc_proto
*
proto
)
void
register_hdlc_protocol
(
struct
hdlc_proto
*
proto
)
{
{
rtnl_lock
();
proto
->
next
=
first_proto
;
proto
->
next
=
first_proto
;
first_proto
=
proto
;
first_proto
=
proto
;
rtnl_unlock
();
}
}
void
unregister_hdlc_protocol
(
struct
hdlc_proto
*
proto
)
void
unregister_hdlc_protocol
(
struct
hdlc_proto
*
proto
)
{
{
struct
hdlc_proto
**
p
=
&
first_proto
;
struct
hdlc_proto
**
p
;
while
(
*
p
)
{
if
(
*
p
==
proto
)
{
rtnl_lock
();
*
p
=
proto
->
next
;
p
=
&
first_proto
;
return
;
while
(
*
p
!=
proto
)
{
}
BUG_ON
(
!*
p
);
p
=
&
((
*
p
)
->
next
);
p
=
&
((
*
p
)
->
next
);
}
}
*
p
=
proto
->
next
;
rtnl_unlock
();
}
}
...
...
drivers/net/wan/hdlc_cisco.c
浏览文件 @
7bece815
...
@@ -56,6 +56,7 @@ struct cisco_state {
...
@@ -56,6 +56,7 @@ struct cisco_state {
cisco_proto
settings
;
cisco_proto
settings
;
struct
timer_list
timer
;
struct
timer_list
timer
;
spinlock_t
lock
;
unsigned
long
last_poll
;
unsigned
long
last_poll
;
int
up
;
int
up
;
int
request_sent
;
int
request_sent
;
...
@@ -158,6 +159,7 @@ static int cisco_rx(struct sk_buff *skb)
...
@@ -158,6 +159,7 @@ static int cisco_rx(struct sk_buff *skb)
{
{
struct
net_device
*
dev
=
skb
->
dev
;
struct
net_device
*
dev
=
skb
->
dev
;
hdlc_device
*
hdlc
=
dev_to_hdlc
(
dev
);
hdlc_device
*
hdlc
=
dev_to_hdlc
(
dev
);
struct
cisco_state
*
st
=
state
(
hdlc
);
struct
hdlc_header
*
data
=
(
struct
hdlc_header
*
)
skb
->
data
;
struct
hdlc_header
*
data
=
(
struct
hdlc_header
*
)
skb
->
data
;
struct
cisco_packet
*
cisco_data
;
struct
cisco_packet
*
cisco_data
;
struct
in_device
*
in_dev
;
struct
in_device
*
in_dev
;
...
@@ -220,11 +222,12 @@ static int cisco_rx(struct sk_buff *skb)
...
@@ -220,11 +222,12 @@ static int cisco_rx(struct sk_buff *skb)
goto
rx_error
;
goto
rx_error
;
case
CISCO_KEEPALIVE_REQ
:
case
CISCO_KEEPALIVE_REQ
:
state
(
hdlc
)
->
rxseq
=
ntohl
(
cisco_data
->
par1
);
spin_lock
(
&
st
->
lock
);
if
(
state
(
hdlc
)
->
request_sent
&&
st
->
rxseq
=
ntohl
(
cisco_data
->
par1
);
ntohl
(
cisco_data
->
par2
)
==
state
(
hdlc
)
->
txseq
)
{
if
(
st
->
request_sent
&&
state
(
hdlc
)
->
last_poll
=
jiffies
;
ntohl
(
cisco_data
->
par2
)
==
st
->
txseq
)
{
if
(
!
state
(
hdlc
)
->
up
)
{
st
->
last_poll
=
jiffies
;
if
(
!
st
->
up
)
{
u32
sec
,
min
,
hrs
,
days
;
u32
sec
,
min
,
hrs
,
days
;
sec
=
ntohl
(
cisco_data
->
time
)
/
1000
;
sec
=
ntohl
(
cisco_data
->
time
)
/
1000
;
min
=
sec
/
60
;
sec
-=
min
*
60
;
min
=
sec
/
60
;
sec
-=
min
*
60
;
...
@@ -232,12 +235,12 @@ static int cisco_rx(struct sk_buff *skb)
...
@@ -232,12 +235,12 @@ static int cisco_rx(struct sk_buff *skb)
days
=
hrs
/
24
;
hrs
-=
days
*
24
;
days
=
hrs
/
24
;
hrs
-=
days
*
24
;
printk
(
KERN_INFO
"%s: Link up (peer "
printk
(
KERN_INFO
"%s: Link up (peer "
"uptime %ud%uh%um%us)
\n
"
,
"uptime %ud%uh%um%us)
\n
"
,
dev
->
name
,
days
,
hrs
,
dev
->
name
,
days
,
hrs
,
min
,
sec
);
min
,
sec
);
netif_dormant_off
(
dev
);
netif_dormant_off
(
dev
);
st
ate
(
hdlc
)
->
up
=
1
;
st
->
up
=
1
;
}
}
}
}
spin_unlock
(
&
st
->
lock
);
dev_kfree_skb_any
(
skb
);
dev_kfree_skb_any
(
skb
);
return
NET_RX_SUCCESS
;
return
NET_RX_SUCCESS
;
...
@@ -261,24 +264,25 @@ static void cisco_timer(unsigned long arg)
...
@@ -261,24 +264,25 @@ static void cisco_timer(unsigned long arg)
{
{
struct
net_device
*
dev
=
(
struct
net_device
*
)
arg
;
struct
net_device
*
dev
=
(
struct
net_device
*
)
arg
;
hdlc_device
*
hdlc
=
dev_to_hdlc
(
dev
);
hdlc_device
*
hdlc
=
dev_to_hdlc
(
dev
);
struct
cisco_state
*
st
=
state
(
hdlc
);
if
(
state
(
hdlc
)
->
up
&&
spin_lock
(
&
st
->
lock
);
time_after
(
jiffies
,
state
(
hdlc
)
->
last_poll
+
if
(
st
->
up
&&
state
(
hdlc
)
->
settings
.
timeout
*
HZ
))
{
time_after
(
jiffies
,
st
->
last_poll
+
st
->
settings
.
timeout
*
HZ
))
{
st
ate
(
hdlc
)
->
up
=
0
;
st
->
up
=
0
;
printk
(
KERN_INFO
"%s: Link down
\n
"
,
dev
->
name
);
printk
(
KERN_INFO
"%s: Link down
\n
"
,
dev
->
name
);
netif_dormant_on
(
dev
);
netif_dormant_on
(
dev
);
}
}
cisco_keepalive_send
(
dev
,
CISCO_KEEPALIVE_REQ
,
cisco_keepalive_send
(
dev
,
CISCO_KEEPALIVE_REQ
,
htonl
(
++
st
->
txseq
),
htonl
(
++
state
(
hdlc
)
->
txseq
),
htonl
(
st
->
rxseq
));
htonl
(
state
(
hdlc
)
->
rxseq
))
;
st
->
request_sent
=
1
;
s
tate
(
hdlc
)
->
request_sent
=
1
;
s
pin_unlock
(
&
st
->
lock
)
;
state
(
hdlc
)
->
timer
.
expires
=
jiffies
+
state
(
hdlc
)
->
settings
.
interval
*
HZ
;
st
->
timer
.
expires
=
jiffies
+
st
->
settings
.
interval
*
HZ
;
st
ate
(
hdlc
)
->
timer
.
function
=
cisco_timer
;
st
->
timer
.
function
=
cisco_timer
;
st
ate
(
hdlc
)
->
timer
.
data
=
arg
;
st
->
timer
.
data
=
arg
;
add_timer
(
&
st
ate
(
hdlc
)
->
timer
);
add_timer
(
&
st
->
timer
);
}
}
...
@@ -286,15 +290,20 @@ static void cisco_timer(unsigned long arg)
...
@@ -286,15 +290,20 @@ static void cisco_timer(unsigned long arg)
static
void
cisco_start
(
struct
net_device
*
dev
)
static
void
cisco_start
(
struct
net_device
*
dev
)
{
{
hdlc_device
*
hdlc
=
dev_to_hdlc
(
dev
);
hdlc_device
*
hdlc
=
dev_to_hdlc
(
dev
);
state
(
hdlc
)
->
up
=
0
;
struct
cisco_state
*
st
=
state
(
hdlc
);
state
(
hdlc
)
->
request_sent
=
0
;
unsigned
long
flags
;
state
(
hdlc
)
->
txseq
=
state
(
hdlc
)
->
rxseq
=
0
;
spin_lock_irqsave
(
&
st
->
lock
,
flags
);
init_timer
(
&
state
(
hdlc
)
->
timer
);
st
->
up
=
0
;
state
(
hdlc
)
->
timer
.
expires
=
jiffies
+
HZ
;
/*First poll after 1s*/
st
->
request_sent
=
0
;
state
(
hdlc
)
->
timer
.
function
=
cisco_timer
;
st
->
txseq
=
st
->
rxseq
=
0
;
state
(
hdlc
)
->
timer
.
data
=
(
unsigned
long
)
dev
;
spin_unlock_irqrestore
(
&
st
->
lock
,
flags
);
add_timer
(
&
state
(
hdlc
)
->
timer
);
init_timer
(
&
st
->
timer
);
st
->
timer
.
expires
=
jiffies
+
HZ
;
/* First poll after 1 s */
st
->
timer
.
function
=
cisco_timer
;
st
->
timer
.
data
=
(
unsigned
long
)
dev
;
add_timer
(
&
st
->
timer
);
}
}
...
@@ -302,10 +311,16 @@ static void cisco_start(struct net_device *dev)
...
@@ -302,10 +311,16 @@ static void cisco_start(struct net_device *dev)
static
void
cisco_stop
(
struct
net_device
*
dev
)
static
void
cisco_stop
(
struct
net_device
*
dev
)
{
{
hdlc_device
*
hdlc
=
dev_to_hdlc
(
dev
);
hdlc_device
*
hdlc
=
dev_to_hdlc
(
dev
);
del_timer_sync
(
&
state
(
hdlc
)
->
timer
);
struct
cisco_state
*
st
=
state
(
hdlc
);
unsigned
long
flags
;
del_timer_sync
(
&
st
->
timer
);
spin_lock_irqsave
(
&
st
->
lock
,
flags
);
netif_dormant_on
(
dev
);
netif_dormant_on
(
dev
);
state
(
hdlc
)
->
up
=
0
;
st
->
up
=
0
;
state
(
hdlc
)
->
request_sent
=
0
;
st
->
request_sent
=
0
;
spin_unlock_irqrestore
(
&
st
->
lock
,
flags
);
}
}
...
@@ -367,6 +382,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
...
@@ -367,6 +382,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
return
result
;
return
result
;
memcpy
(
&
state
(
hdlc
)
->
settings
,
&
new_settings
,
size
);
memcpy
(
&
state
(
hdlc
)
->
settings
,
&
new_settings
,
size
);
spin_lock_init
(
&
state
(
hdlc
)
->
lock
);
dev
->
hard_start_xmit
=
hdlc
->
xmit
;
dev
->
hard_start_xmit
=
hdlc
->
xmit
;
dev
->
header_ops
=
&
cisco_header_ops
;
dev
->
header_ops
=
&
cisco_header_ops
;
dev
->
type
=
ARPHRD_CISCO
;
dev
->
type
=
ARPHRD_CISCO
;
...
...
drivers/net/xen-netfront.c
浏览文件 @
7bece815
...
@@ -946,8 +946,7 @@ static int xennet_poll(struct napi_struct *napi, int budget)
...
@@ -946,8 +946,7 @@ static int xennet_poll(struct napi_struct *napi, int budget)
work_done
++
;
work_done
++
;
}
}
while
((
skb
=
__skb_dequeue
(
&
errq
)))
__skb_queue_purge
(
&
errq
);
kfree_skb
(
skb
);
work_done
-=
handle_incoming_queue
(
dev
,
&
rxq
);
work_done
-=
handle_incoming_queue
(
dev
,
&
rxq
);
...
@@ -1079,8 +1078,7 @@ static void xennet_release_rx_bufs(struct netfront_info *np)
...
@@ -1079,8 +1078,7 @@ static void xennet_release_rx_bufs(struct netfront_info *np)
}
}
}
}
while
((
skb
=
__skb_dequeue
(
&
free_list
))
!=
NULL
)
__skb_queue_purge
(
&
free_list
);
dev_kfree_skb
(
skb
);
spin_unlock_bh
(
&
np
->
rx_lock
);
spin_unlock_bh
(
&
np
->
rx_lock
);
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录