Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
4e3ceac6
cloud-kernel
项目概览
openanolis
/
cloud-kernel
大约 1 年 前同步成功
通知
158
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
4e3ceac6
编写于
5月 20, 2006
作者:
J
Jeff Garzik
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'upstream-fixes' into upstream
上级
badc48e6
bb02aacc
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
131 addition
and
302 deletion
+131
-302
drivers/net/forcedeth.c
drivers/net/forcedeth.c
+84
-228
drivers/net/pcmcia/axnet_cs.c
drivers/net/pcmcia/axnet_cs.c
+2
-11
drivers/net/skge.c
drivers/net/skge.c
+3
-5
drivers/net/sky2.c
drivers/net/sky2.c
+32
-22
drivers/net/sky2.h
drivers/net/sky2.h
+2
-0
drivers/net/tulip/winbond-840.c
drivers/net/tulip/winbond-840.c
+2
-2
drivers/net/via-rhine.c
drivers/net/via-rhine.c
+3
-31
drivers/net/wireless/bcm43xx/bcm43xx_main.c
drivers/net/wireless/bcm43xx/bcm43xx_main.c
+3
-3
未找到文件。
drivers/net/forcedeth.c
浏览文件 @
4e3ceac6
...
...
@@ -106,7 +106,6 @@
* 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
* 0.52: 20 Jan 2006: Add MSI/MSIX support.
* 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
* 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
...
...
@@ -118,7 +117,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
#define FORCEDETH_VERSION "0.5
4
"
#define FORCEDETH_VERSION "0.5
3
"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
...
...
@@ -711,72 +710,6 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
}
}
static
int
using_multi_irqs
(
struct
net_device
*
dev
)
{
struct
fe_priv
*
np
=
get_nvpriv
(
dev
);
if
(
!
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
||
((
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
&&
((
np
->
msi_flags
&
NV_MSI_X_VECTORS_MASK
)
==
0x1
)))
return
0
;
else
return
1
;
}
static
void
nv_enable_irq
(
struct
net_device
*
dev
)
{
struct
fe_priv
*
np
=
get_nvpriv
(
dev
);
if
(
!
using_multi_irqs
(
dev
))
{
if
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
enable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_ALL
].
vector
);
else
enable_irq
(
dev
->
irq
);
}
else
{
enable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_RX
].
vector
);
enable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_TX
].
vector
);
enable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_OTHER
].
vector
);
}
}
static
void
nv_disable_irq
(
struct
net_device
*
dev
)
{
struct
fe_priv
*
np
=
get_nvpriv
(
dev
);
if
(
!
using_multi_irqs
(
dev
))
{
if
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
disable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_ALL
].
vector
);
else
disable_irq
(
dev
->
irq
);
}
else
{
disable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_RX
].
vector
);
disable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_TX
].
vector
);
disable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_OTHER
].
vector
);
}
}
/* In MSIX mode, a write to irqmask behaves as XOR */
static
void
nv_enable_hw_interrupts
(
struct
net_device
*
dev
,
u32
mask
)
{
u8
__iomem
*
base
=
get_hwbase
(
dev
);
writel
(
mask
,
base
+
NvRegIrqMask
);
}
static
void
nv_disable_hw_interrupts
(
struct
net_device
*
dev
,
u32
mask
)
{
struct
fe_priv
*
np
=
get_nvpriv
(
dev
);
u8
__iomem
*
base
=
get_hwbase
(
dev
);
if
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
{
writel
(
mask
,
base
+
NvRegIrqMask
);
}
else
{
if
(
np
->
msi_flags
&
NV_MSI_ENABLED
)
writel
(
0
,
base
+
NvRegMSIIrqMask
);
writel
(
0
,
base
+
NvRegIrqMask
);
}
}
#define MII_READ (-1)
/* mii_rw: read/write a register on the PHY.
*
...
...
@@ -1086,25 +1019,24 @@ static void nv_do_rx_refill(unsigned long data)
struct
net_device
*
dev
=
(
struct
net_device
*
)
data
;
struct
fe_priv
*
np
=
netdev_priv
(
dev
);
if
(
!
using_multi_irqs
(
dev
))
{
if
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
disable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_ALL
].
vector
);
else
disable_irq
(
dev
->
irq
);
if
(
!
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
||
((
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
&&
((
np
->
msi_flags
&
NV_MSI_X_VECTORS_MASK
)
==
0x1
)))
{
disable_irq
(
dev
->
irq
);
}
else
{
disable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_RX
].
vector
);
}
if
(
nv_alloc_rx
(
dev
))
{
spin_lock
_irq
(
&
np
->
lock
);
spin_lock
(
&
np
->
lock
);
if
(
!
np
->
in_shutdown
)
mod_timer
(
&
np
->
oom_kick
,
jiffies
+
OOM_REFILL
);
spin_unlock
_irq
(
&
np
->
lock
);
spin_unlock
(
&
np
->
lock
);
}
if
(
!
using_multi_irqs
(
dev
))
{
if
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
enable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_ALL
].
vector
);
else
enable_irq
(
dev
->
irq
);
if
(
!
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
||
((
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
&&
((
np
->
msi_flags
&
NV_MSI_X_VECTORS_MASK
)
==
0x1
)))
{
enable_irq
(
dev
->
irq
);
}
else
{
enable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_RX
].
vector
);
}
...
...
@@ -1736,7 +1668,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
* guessed, there is probably a simpler approach.
* Changing the MTU is a rare event, it shouldn't matter.
*/
nv_disable_irq
(
dev
);
if
(
!
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
||
((
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
&&
((
np
->
msi_flags
&
NV_MSI_X_VECTORS_MASK
)
==
0x1
)))
{
disable_irq
(
dev
->
irq
);
}
else
{
disable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_RX
].
vector
);
disable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_TX
].
vector
);
disable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_OTHER
].
vector
);
}
spin_lock_bh
(
&
dev
->
xmit_lock
);
spin_lock
(
&
np
->
lock
);
/* stop engines */
...
...
@@ -1769,7 +1709,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
nv_start_tx
(
dev
);
spin_unlock
(
&
np
->
lock
);
spin_unlock_bh
(
&
dev
->
xmit_lock
);
nv_enable_irq
(
dev
);
if
(
!
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
||
((
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
&&
((
np
->
msi_flags
&
NV_MSI_X_VECTORS_MASK
)
==
0x1
)))
{
enable_irq
(
dev
->
irq
);
}
else
{
enable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_RX
].
vector
);
enable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_TX
].
vector
);
enable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_OTHER
].
vector
);
}
}
return
0
;
}
...
...
@@ -2160,16 +2108,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
if
(
!
(
events
&
np
->
irqmask
))
break
;
spin_lock
_irq
(
&
np
->
lock
);
spin_lock
(
&
np
->
lock
);
nv_tx_done
(
dev
);
spin_unlock
_irq
(
&
np
->
lock
);
spin_unlock
(
&
np
->
lock
);
if
(
events
&
(
NVREG_IRQ_TX_ERR
))
{
dprintk
(
KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.
\n
"
,
dev
->
name
,
events
);
}
if
(
i
>
max_interrupt_work
)
{
spin_lock
_irq
(
&
np
->
lock
);
spin_lock
(
&
np
->
lock
);
/* disable interrupts on the nic */
writel
(
NVREG_IRQ_TX_ALL
,
base
+
NvRegIrqMask
);
pci_push
(
base
);
...
...
@@ -2179,7 +2127,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
mod_timer
(
&
np
->
nic_poll
,
jiffies
+
POLL_WAIT
);
}
printk
(
KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_tx.
\n
"
,
dev
->
name
,
i
);
spin_unlock
_irq
(
&
np
->
lock
);
spin_unlock
(
&
np
->
lock
);
break
;
}
...
...
@@ -2209,14 +2157,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
nv_rx_process
(
dev
);
if
(
nv_alloc_rx
(
dev
))
{
spin_lock
_irq
(
&
np
->
lock
);
spin_lock
(
&
np
->
lock
);
if
(
!
np
->
in_shutdown
)
mod_timer
(
&
np
->
oom_kick
,
jiffies
+
OOM_REFILL
);
spin_unlock
_irq
(
&
np
->
lock
);
spin_unlock
(
&
np
->
lock
);
}
if
(
i
>
max_interrupt_work
)
{
spin_lock
_irq
(
&
np
->
lock
);
spin_lock
(
&
np
->
lock
);
/* disable interrupts on the nic */
writel
(
NVREG_IRQ_RX_ALL
,
base
+
NvRegIrqMask
);
pci_push
(
base
);
...
...
@@ -2226,7 +2174,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
mod_timer
(
&
np
->
nic_poll
,
jiffies
+
POLL_WAIT
);
}
printk
(
KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_rx.
\n
"
,
dev
->
name
,
i
);
spin_unlock
_irq
(
&
np
->
lock
);
spin_unlock
(
&
np
->
lock
);
break
;
}
...
...
@@ -2255,14 +2203,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
break
;
if
(
events
&
NVREG_IRQ_LINK
)
{
spin_lock
_irq
(
&
np
->
lock
);
spin_lock
(
&
np
->
lock
);
nv_link_irq
(
dev
);
spin_unlock
_irq
(
&
np
->
lock
);
spin_unlock
(
&
np
->
lock
);
}
if
(
np
->
need_linktimer
&&
time_after
(
jiffies
,
np
->
link_timeout
))
{
spin_lock
_irq
(
&
np
->
lock
);
spin_lock
(
&
np
->
lock
);
nv_linkchange
(
dev
);
spin_unlock
_irq
(
&
np
->
lock
);
spin_unlock
(
&
np
->
lock
);
np
->
link_timeout
=
jiffies
+
LINK_TIMEOUT
;
}
if
(
events
&
(
NVREG_IRQ_UNKNOWN
))
{
...
...
@@ -2270,7 +2218,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
dev
->
name
,
events
);
}
if
(
i
>
max_interrupt_work
)
{
spin_lock
_irq
(
&
np
->
lock
);
spin_lock
(
&
np
->
lock
);
/* disable interrupts on the nic */
writel
(
NVREG_IRQ_OTHER
,
base
+
NvRegIrqMask
);
pci_push
(
base
);
...
...
@@ -2280,7 +2228,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
mod_timer
(
&
np
->
nic_poll
,
jiffies
+
POLL_WAIT
);
}
printk
(
KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_other.
\n
"
,
dev
->
name
,
i
);
spin_unlock
_irq
(
&
np
->
lock
);
spin_unlock
(
&
np
->
lock
);
break
;
}
...
...
@@ -2303,11 +2251,10 @@ static void nv_do_nic_poll(unsigned long data)
* nv_nic_irq because that may decide to do otherwise
*/
if
(
!
using_multi_irqs
(
dev
))
{
if
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
disable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_ALL
].
vector
);
else
disable_irq
(
dev
->
irq
);
if
(
!
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
||
((
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
&&
((
np
->
msi_flags
&
NV_MSI_X_VECTORS_MASK
)
==
0x1
)))
{
disable_irq
(
dev
->
irq
);
mask
=
np
->
irqmask
;
}
else
{
if
(
np
->
nic_poll_irq
&
NVREG_IRQ_RX_ALL
)
{
...
...
@@ -2330,12 +2277,11 @@ static void nv_do_nic_poll(unsigned long data)
writel
(
mask
,
base
+
NvRegIrqMask
);
pci_push
(
base
);
if
(
!
using_multi_irqs
(
dev
))
{
if
(
!
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
||
((
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
&&
((
np
->
msi_flags
&
NV_MSI_X_VECTORS_MASK
)
==
0x1
)))
{
nv_nic_irq
((
int
)
0
,
(
void
*
)
data
,
(
struct
pt_regs
*
)
NULL
);
if
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
enable_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_ALL
].
vector
);
else
enable_irq
(
dev
->
irq
);
enable_irq
(
dev
->
irq
);
}
else
{
if
(
np
->
nic_poll_irq
&
NVREG_IRQ_RX_ALL
)
{
nv_nic_irq_rx
((
int
)
0
,
(
void
*
)
data
,
(
struct
pt_regs
*
)
NULL
);
...
...
@@ -2682,113 +2628,6 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
writel
(
readl
(
base
+
NvRegMSIXMap1
)
|
msixmap
,
base
+
NvRegMSIXMap1
);
}
static
int
nv_request_irq
(
struct
net_device
*
dev
)
{
struct
fe_priv
*
np
=
get_nvpriv
(
dev
);
u8
__iomem
*
base
=
get_hwbase
(
dev
);
int
ret
=
1
;
int
i
;
if
(
np
->
msi_flags
&
NV_MSI_X_CAPABLE
)
{
for
(
i
=
0
;
i
<
(
np
->
msi_flags
&
NV_MSI_X_VECTORS_MASK
);
i
++
)
{
np
->
msi_x_entry
[
i
].
entry
=
i
;
}
if
((
ret
=
pci_enable_msix
(
np
->
pci_dev
,
np
->
msi_x_entry
,
(
np
->
msi_flags
&
NV_MSI_X_VECTORS_MASK
)))
==
0
)
{
np
->
msi_flags
|=
NV_MSI_X_ENABLED
;
if
(
optimization_mode
==
NV_OPTIMIZATION_MODE_THROUGHPUT
)
{
/* Request irq for rx handling */
if
(
request_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_RX
].
vector
,
&
nv_nic_irq_rx
,
SA_SHIRQ
,
dev
->
name
,
dev
)
!=
0
)
{
printk
(
KERN_INFO
"forcedeth: request_irq failed for rx %d
\n
"
,
ret
);
pci_disable_msix
(
np
->
pci_dev
);
np
->
msi_flags
&=
~
NV_MSI_X_ENABLED
;
goto
out_err
;
}
/* Request irq for tx handling */
if
(
request_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_TX
].
vector
,
&
nv_nic_irq_tx
,
SA_SHIRQ
,
dev
->
name
,
dev
)
!=
0
)
{
printk
(
KERN_INFO
"forcedeth: request_irq failed for tx %d
\n
"
,
ret
);
pci_disable_msix
(
np
->
pci_dev
);
np
->
msi_flags
&=
~
NV_MSI_X_ENABLED
;
goto
out_free_rx
;
}
/* Request irq for link and timer handling */
if
(
request_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_OTHER
].
vector
,
&
nv_nic_irq_other
,
SA_SHIRQ
,
dev
->
name
,
dev
)
!=
0
)
{
printk
(
KERN_INFO
"forcedeth: request_irq failed for link %d
\n
"
,
ret
);
pci_disable_msix
(
np
->
pci_dev
);
np
->
msi_flags
&=
~
NV_MSI_X_ENABLED
;
goto
out_free_tx
;
}
/* map interrupts to their respective vector */
writel
(
0
,
base
+
NvRegMSIXMap0
);
writel
(
0
,
base
+
NvRegMSIXMap1
);
set_msix_vector_map
(
dev
,
NV_MSI_X_VECTOR_RX
,
NVREG_IRQ_RX_ALL
);
set_msix_vector_map
(
dev
,
NV_MSI_X_VECTOR_TX
,
NVREG_IRQ_TX_ALL
);
set_msix_vector_map
(
dev
,
NV_MSI_X_VECTOR_OTHER
,
NVREG_IRQ_OTHER
);
}
else
{
/* Request irq for all interrupts */
if
(
request_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_ALL
].
vector
,
&
nv_nic_irq
,
SA_SHIRQ
,
dev
->
name
,
dev
)
!=
0
)
{
printk
(
KERN_INFO
"forcedeth: request_irq failed %d
\n
"
,
ret
);
pci_disable_msix
(
np
->
pci_dev
);
np
->
msi_flags
&=
~
NV_MSI_X_ENABLED
;
goto
out_err
;
}
/* map interrupts to vector 0 */
writel
(
0
,
base
+
NvRegMSIXMap0
);
writel
(
0
,
base
+
NvRegMSIXMap1
);
}
}
}
if
(
ret
!=
0
&&
np
->
msi_flags
&
NV_MSI_CAPABLE
)
{
if
((
ret
=
pci_enable_msi
(
np
->
pci_dev
))
==
0
)
{
np
->
msi_flags
|=
NV_MSI_ENABLED
;
if
(
request_irq
(
np
->
pci_dev
->
irq
,
&
nv_nic_irq
,
SA_SHIRQ
,
dev
->
name
,
dev
)
!=
0
)
{
printk
(
KERN_INFO
"forcedeth: request_irq failed %d
\n
"
,
ret
);
pci_disable_msi
(
np
->
pci_dev
);
np
->
msi_flags
&=
~
NV_MSI_ENABLED
;
goto
out_err
;
}
/* map interrupts to vector 0 */
writel
(
0
,
base
+
NvRegMSIMap0
);
writel
(
0
,
base
+
NvRegMSIMap1
);
/* enable msi vector 0 */
writel
(
NVREG_MSI_VECTOR_0_ENABLED
,
base
+
NvRegMSIIrqMask
);
}
}
if
(
ret
!=
0
)
{
if
(
request_irq
(
np
->
pci_dev
->
irq
,
&
nv_nic_irq
,
SA_SHIRQ
,
dev
->
name
,
dev
)
!=
0
)
goto
out_err
;
}
return
0
;
out_free_tx:
free_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_TX
].
vector
,
dev
);
out_free_rx:
free_irq
(
np
->
msi_x_entry
[
NV_MSI_X_VECTOR_RX
].
vector
,
dev
);
out_err:
return
1
;
}
static
void
nv_free_irq
(
struct
net_device
*
dev
)
{
struct
fe_priv
*
np
=
get_nvpriv
(
dev
);
int
i
;
if
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
{
for
(
i
=
0
;
i
<
(
np
->
msi_flags
&
NV_MSI_X_VECTORS_MASK
);
i
++
)
{
free_irq
(
np
->
msi_x_entry
[
i
].
vector
,
dev
);
}
pci_disable_msix
(
np
->
pci_dev
);
np
->
msi_flags
&=
~
NV_MSI_X_ENABLED
;
}
else
{
free_irq
(
np
->
pci_dev
->
irq
,
dev
);
if
(
np
->
msi_flags
&
NV_MSI_ENABLED
)
{
pci_disable_msi
(
np
->
pci_dev
);
np
->
msi_flags
&=
~
NV_MSI_ENABLED
;
}
}
}
static
int
nv_open
(
struct
net_device
*
dev
)
{
struct
fe_priv
*
np
=
netdev_priv
(
dev
);
...
...
@@ -2881,16 +2720,12 @@ static int nv_open(struct net_device *dev)
udelay
(
10
);
writel
(
readl
(
base
+
NvRegPowerState
)
|
NVREG_POWERSTATE_VALID
,
base
+
NvRegPowerState
);
nv_disable_hw_interrupts
(
dev
,
np
->
irqm
ask
);
writel
(
0
,
base
+
NvRegIrqM
ask
);
pci_push
(
base
);
writel
(
NVREG_MIISTAT_MASK2
,
base
+
NvRegMIIStatus
);
writel
(
NVREG_IRQSTAT_MASK
,
base
+
NvRegIrqStatus
);
pci_push
(
base
);
if
(
nv_request_irq
(
dev
))
{
goto
out_drain
;
}
if
(
np
->
msi_flags
&
NV_MSI_X_CAPABLE
)
{
for
(
i
=
0
;
i
<
(
np
->
msi_flags
&
NV_MSI_X_VECTORS_MASK
);
i
++
)
{
np
->
msi_x_entry
[
i
].
entry
=
i
;
...
...
@@ -2964,7 +2799,7 @@ static int nv_open(struct net_device *dev)
}
/* ask for interrupts */
nv_enable_hw_interrupts
(
dev
,
np
->
irqm
ask
);
writel
(
np
->
irqmask
,
base
+
NvRegIrqM
ask
);
spin_lock_irq
(
&
np
->
lock
);
writel
(
NVREG_MCASTADDRA_FORCE
,
base
+
NvRegMulticastAddrA
);
...
...
@@ -3008,6 +2843,7 @@ static int nv_close(struct net_device *dev)
{
struct
fe_priv
*
np
=
netdev_priv
(
dev
);
u8
__iomem
*
base
;
int
i
;
spin_lock_irq
(
&
np
->
lock
);
np
->
in_shutdown
=
1
;
...
...
@@ -3025,13 +2861,31 @@ static int nv_close(struct net_device *dev)
/* disable interrupts on the nic or we will lock up */
base
=
get_hwbase
(
dev
);
nv_disable_hw_interrupts
(
dev
,
np
->
irqmask
);
if
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
{
writel
(
np
->
irqmask
,
base
+
NvRegIrqMask
);
}
else
{
if
(
np
->
msi_flags
&
NV_MSI_ENABLED
)
writel
(
0
,
base
+
NvRegMSIIrqMask
);
writel
(
0
,
base
+
NvRegIrqMask
);
}
pci_push
(
base
);
dprintk
(
KERN_INFO
"%s: Irqmask is zero again
\n
"
,
dev
->
name
);
spin_unlock_irq
(
&
np
->
lock
);
nv_free_irq
(
dev
);
if
(
np
->
msi_flags
&
NV_MSI_X_ENABLED
)
{
for
(
i
=
0
;
i
<
(
np
->
msi_flags
&
NV_MSI_X_VECTORS_MASK
);
i
++
)
{
free_irq
(
np
->
msi_x_entry
[
i
].
vector
,
dev
);
}
pci_disable_msix
(
np
->
pci_dev
);
np
->
msi_flags
&=
~
NV_MSI_X_ENABLED
;
}
else
{
free_irq
(
np
->
pci_dev
->
irq
,
dev
);
if
(
np
->
msi_flags
&
NV_MSI_ENABLED
)
{
pci_disable_msi
(
np
->
pci_dev
);
np
->
msi_flags
&=
~
NV_MSI_ENABLED
;
}
}
drain_ring
(
dev
);
...
...
@@ -3120,18 +2974,20 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if
(
id
->
driver_data
&
DEV_HAS_HIGH_DMA
)
{
/* packet format 3: supports 40-bit addressing */
np
->
desc_ver
=
DESC_VER_3
;
np
->
txrxctl_bits
=
NVREG_TXRXCTL_DESC_3
;
if
(
pci_set_dma_mask
(
pci_dev
,
DMA_39BIT_MASK
))
{
printk
(
KERN_INFO
"forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.
\n
"
,
pci_name
(
pci_dev
));
}
else
{
dev
->
features
|=
NETIF_F_HIGHDMA
;
printk
(
KERN_INFO
"forcedeth: using HIGHDMA
\n
"
);
}
if
(
pci_set_consistent_dma_mask
(
pci_dev
,
0x0000007fffffffffULL
))
{
printk
(
KERN_INFO
"forcedeth: 64-bit DMA (consistent) failed for device %s.
\n
"
,
pci_name
(
pci_dev
));
if
(
pci_set_consistent_dma_mask
(
pci_dev
,
0x0000007fffffffffULL
))
{
printk
(
KERN_INFO
"forcedeth: 64-bit DMA (consistent) failed for device %s.
\n
"
,
pci_name
(
pci_dev
));
goto
out_relreg
;
}
else
{
dev
->
features
|=
NETIF_F_HIGHDMA
;
printk
(
KERN_INFO
"forcedeth: using HIGHDMA
\n
"
);
}
}
np
->
txrxctl_bits
=
NVREG_TXRXCTL_DESC_3
;
}
else
if
(
id
->
driver_data
&
DEV_HAS_LARGEDESC
)
{
/* packet format 2: supports jumbo frames */
np
->
desc_ver
=
DESC_VER_2
;
...
...
drivers/net/pcmcia/axnet_cs.c
浏览文件 @
4e3ceac6
...
...
@@ -1691,17 +1691,6 @@ static void do_set_multicast_list(struct net_device *dev)
memset
(
ei_local
->
mcfilter
,
0xFF
,
8
);
}
/*
* DP8390 manuals don't specify any magic sequence for altering
* the multicast regs on an already running card. To be safe, we
* ensure multicast mode is off prior to loading up the new hash
* table. If this proves to be not enough, we can always resort
* to stopping the NIC, loading the table and then restarting.
*/
if
(
netif_running
(
dev
))
outb_p
(
E8390_RXCONFIG
,
e8390_base
+
EN0_RXCR
);
outb_p
(
E8390_NODMA
+
E8390_PAGE1
,
e8390_base
+
E8390_CMD
);
for
(
i
=
0
;
i
<
8
;
i
++
)
{
...
...
@@ -1715,6 +1704,8 @@ static void do_set_multicast_list(struct net_device *dev)
outb_p
(
E8390_RXCONFIG
|
0x48
,
e8390_base
+
EN0_RXCR
);
else
outb_p
(
E8390_RXCONFIG
|
0x40
,
e8390_base
+
EN0_RXCR
);
outb_p
(
E8390_NODMA
+
E8390_PAGE0
+
E8390_START
,
e8390_base
+
E8390_CMD
);
}
/*
...
...
drivers/net/skge.c
浏览文件 @
4e3ceac6
...
...
@@ -78,8 +78,7 @@ static const struct pci_device_id skge_id_table[] = {
{
PCI_DEVICE
(
PCI_VENDOR_ID_SYSKONNECT
,
PCI_DEVICE_ID_SYSKONNECT_GE
)
},
{
PCI_DEVICE
(
PCI_VENDOR_ID_SYSKONNECT
,
PCI_DEVICE_ID_SYSKONNECT_YU
)
},
{
PCI_DEVICE
(
PCI_VENDOR_ID_DLINK
,
PCI_DEVICE_ID_DLINK_DGE510T
),
},
{
PCI_DEVICE
(
PCI_VENDOR_ID_DLINK
,
0x4b00
)
},
{
PCI_DEVICE
(
PCI_VENDOR_ID_DLINK
,
0x4b01
)
},
{
PCI_DEVICE
(
PCI_VENDOR_ID_DLINK
,
0x4b01
)
},
/* DGE-530T */
{
PCI_DEVICE
(
PCI_VENDOR_ID_MARVELL
,
0x4320
)
},
{
PCI_DEVICE
(
PCI_VENDOR_ID_MARVELL
,
0x5005
)
},
/* Belkin */
{
PCI_DEVICE
(
PCI_VENDOR_ID_CNET
,
PCI_DEVICE_ID_CNET_GIGACARD
)
},
...
...
@@ -402,7 +401,7 @@ static int skge_set_ring_param(struct net_device *dev,
int
err
;
if
(
p
->
rx_pending
==
0
||
p
->
rx_pending
>
MAX_RX_RING_SIZE
||
p
->
tx_pending
==
0
||
p
->
tx_pending
>
MAX_TX_RING_SIZE
)
p
->
tx_pending
<
MAX_SKB_FRAGS
+
1
||
p
->
tx_pending
>
MAX_TX_RING_SIZE
)
return
-
EINVAL
;
skge
->
rx_ring
.
count
=
p
->
rx_pending
;
...
...
@@ -2717,8 +2716,7 @@ static int skge_poll(struct net_device *dev, int *budget)
if
(
control
&
BMU_OWN
)
break
;
skb
=
skge_rx_get
(
skge
,
e
,
control
,
rd
->
status
,
le16_to_cpu
(
rd
->
csum2
));
skb
=
skge_rx_get
(
skge
,
e
,
control
,
rd
->
status
,
rd
->
csum2
);
if
(
likely
(
skb
))
{
dev
->
last_rx
=
jiffies
;
netif_receive_skb
(
skb
);
...
...
drivers/net/sky2.c
浏览文件 @
4e3ceac6
...
...
@@ -51,7 +51,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
#define DRV_VERSION "1.
3
"
#define DRV_VERSION "1.
4
"
#define PFX DRV_NAME " "
/*
...
...
@@ -105,6 +105,7 @@ MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms)
static
const
struct
pci_device_id
sky2_id_table
[]
=
{
{
PCI_DEVICE
(
PCI_VENDOR_ID_SYSKONNECT
,
0x9000
)
},
{
PCI_DEVICE
(
PCI_VENDOR_ID_SYSKONNECT
,
0x9E00
)
},
{
PCI_DEVICE
(
PCI_VENDOR_ID_DLINK
,
0x4b00
)
},
/* DGE-560T */
{
PCI_DEVICE
(
PCI_VENDOR_ID_MARVELL
,
0x4340
)
},
{
PCI_DEVICE
(
PCI_VENDOR_ID_MARVELL
,
0x4341
)
},
{
PCI_DEVICE
(
PCI_VENDOR_ID_MARVELL
,
0x4342
)
},
...
...
@@ -235,6 +236,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
}
if
(
hw
->
chip_id
==
CHIP_ID_YUKON_EC_U
)
{
sky2_write16
(
hw
,
B0_CTST
,
Y2_HW_WOL_ON
);
sky2_pci_write32
(
hw
,
PCI_DEV_REG3
,
0
);
reg1
=
sky2_pci_read32
(
hw
,
PCI_DEV_REG4
);
reg1
&=
P_ASPM_CONTROL_MSK
;
...
...
@@ -306,7 +308,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
u16
ctrl
,
ct1000
,
adv
,
pg
,
ledctrl
,
ledover
;
if
(
sky2
->
autoneg
==
AUTONEG_ENABLE
&&
(
hw
->
chip_id
!
=
CHIP_ID_YUKON_XL
||
hw
->
chip_id
==
CHIP_ID_YUKON_EC_U
))
{
!
(
hw
->
chip_id
=
=
CHIP_ID_YUKON_XL
||
hw
->
chip_id
==
CHIP_ID_YUKON_EC_U
))
{
u16
ectrl
=
gm_phy_read
(
hw
,
port
,
PHY_MARV_EXT_CTRL
);
ectrl
&=
~
(
PHY_M_EC_M_DSC_MSK
|
PHY_M_EC_S_DSC_MSK
|
...
...
@@ -1020,19 +1022,26 @@ static int sky2_up(struct net_device *dev)
struct
sky2_hw
*
hw
=
sky2
->
hw
;
unsigned
port
=
sky2
->
port
;
u32
ramsize
,
rxspace
,
imask
;
int
err
;
int
cap
,
err
=
-
ENOMEM
;
struct
net_device
*
otherdev
=
hw
->
dev
[
sky2
->
port
^
1
];
/*
Block bringing up both ports at the same time on a dual port card.
* There is an unfixed bug where receiver gets confused and picks up
*
packets out of order. Until this is fixed, prevent data corruption.
/*
* On dual port PCI-X card, there is an problem where status
*
can be received out of order due to split transactions
*/
if
(
otherdev
&&
netif_running
(
otherdev
))
{
printk
(
KERN_INFO
PFX
"dual port support is disabled.
\n
"
);
return
-
EBUSY
;
}
if
(
otherdev
&&
netif_running
(
otherdev
)
&&
(
cap
=
pci_find_capability
(
hw
->
pdev
,
PCI_CAP_ID_PCIX
)))
{
struct
sky2_port
*
osky2
=
netdev_priv
(
otherdev
);
u16
cmd
;
cmd
=
sky2_pci_read16
(
hw
,
cap
+
PCI_X_CMD
);
cmd
&=
~
PCI_X_CMD_MAX_SPLIT
;
sky2_pci_write16
(
hw
,
cap
+
PCI_X_CMD
,
cmd
);
sky2
->
rx_csum
=
0
;
osky2
->
rx_csum
=
0
;
}
err
=
-
ENOMEM
;
if
(
netif_msg_ifup
(
sky2
))
printk
(
KERN_INFO
PFX
"%s: enabling interface
\n
"
,
dev
->
name
);
...
...
@@ -1910,6 +1919,12 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
}
}
/* Is status ring empty or is there more to do? */
static
inline
int
sky2_more_work
(
const
struct
sky2_hw
*
hw
)
{
return
(
hw
->
st_idx
!=
sky2_read16
(
hw
,
STAT_PUT_IDX
));
}
/* Process status response ring */
static
int
sky2_status_intr
(
struct
sky2_hw
*
hw
,
int
to_do
)
{
...
...
@@ -2182,19 +2197,19 @@ static int sky2_poll(struct net_device *dev0, int *budget)
if
(
status
&
Y2_IS_CHK_TXA2
)
sky2_descriptor_error
(
hw
,
1
,
"transmit"
,
Y2_IS_CHK_TXA2
);
if
(
status
&
Y2_IS_STAT_BMU
)
sky2_write32
(
hw
,
STAT_CTRL
,
SC_STAT_CLR_IRQ
);
work_done
=
sky2_status_intr
(
hw
,
work_limit
);
*
budget
-=
work_done
;
dev0
->
quota
-=
work_done
;
if
(
work_done
>=
work_limit
)
if
(
status
&
Y2_IS_STAT_BMU
)
sky2_write32
(
hw
,
STAT_CTRL
,
SC_STAT_CLR_IRQ
);
if
(
sky2_more_work
(
hw
))
return
1
;
netif_rx_complete
(
dev0
);
s
tatus
=
s
ky2_read32
(
hw
,
B0_Y2_SP_LISR
);
sky2_read32
(
hw
,
B0_Y2_SP_LISR
);
return
0
;
}
...
...
@@ -3078,12 +3093,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
sky2
->
duplex
=
-
1
;
sky2
->
speed
=
-
1
;
sky2
->
advertising
=
sky2_supported_modes
(
hw
);
/* Receive checksum disabled for Yukon XL
* because of observed problems with incorrect
* values when multiple packets are received in one interrupt
*/
sky2
->
rx_csum
=
(
hw
->
chip_id
!=
CHIP_ID_YUKON_XL
);
sky2
->
rx_csum
=
1
;
spin_lock_init
(
&
sky2
->
phy_lock
);
sky2
->
tx_pending
=
TX_DEF_PENDING
;
...
...
drivers/net/sky2.h
浏览文件 @
4e3ceac6
...
...
@@ -214,6 +214,8 @@ enum csr_regs {
enum
{
Y2_VMAIN_AVAIL
=
1
<<
17
,
/* VMAIN available (YUKON-2 only) */
Y2_VAUX_AVAIL
=
1
<<
16
,
/* VAUX available (YUKON-2 only) */
Y2_HW_WOL_ON
=
1
<<
15
,
/* HW WOL On (Yukon-EC Ultra A1 only) */
Y2_HW_WOL_OFF
=
1
<<
14
,
/* HW WOL On (Yukon-EC Ultra A1 only) */
Y2_ASF_ENABLE
=
1
<<
13
,
/* ASF Unit Enable (YUKON-2 only) */
Y2_ASF_DISABLE
=
1
<<
12
,
/* ASF Unit Disable (YUKON-2 only) */
Y2_CLK_RUN_ENA
=
1
<<
11
,
/* CLK_RUN Enable (YUKON-2 only) */
...
...
drivers/net/tulip/winbond-840.c
浏览文件 @
4e3ceac6
...
...
@@ -850,7 +850,7 @@ static void init_rxtx_rings(struct net_device *dev)
break
;
skb
->
dev
=
dev
;
/* Mark as being used by this device. */
np
->
rx_addr
[
i
]
=
pci_map_single
(
np
->
pci_dev
,
skb
->
data
,
skb
->
len
,
PCI_DMA_FROMDEVICE
);
np
->
rx_buf_sz
,
PCI_DMA_FROMDEVICE
);
np
->
rx_ring
[
i
].
buffer1
=
np
->
rx_addr
[
i
];
np
->
rx_ring
[
i
].
status
=
DescOwn
;
...
...
@@ -1316,7 +1316,7 @@ static int netdev_rx(struct net_device *dev)
skb
->
dev
=
dev
;
/* Mark as being used by this device. */
np
->
rx_addr
[
entry
]
=
pci_map_single
(
np
->
pci_dev
,
skb
->
data
,
skb
->
len
,
PCI_DMA_FROMDEVICE
);
np
->
rx_buf_sz
,
PCI_DMA_FROMDEVICE
);
np
->
rx_ring
[
entry
].
buffer1
=
np
->
rx_addr
[
entry
];
}
wmb
();
...
...
drivers/net/via-rhine.c
浏览文件 @
4e3ceac6
...
...
@@ -491,8 +491,6 @@ struct rhine_private {
u8
tx_thresh
,
rx_thresh
;
struct
mii_if_info
mii_if
;
struct
work_struct
tx_timeout_task
;
struct
work_struct
check_media_task
;
void
__iomem
*
base
;
};
...
...
@@ -500,8 +498,6 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
static
void
mdio_write
(
struct
net_device
*
dev
,
int
phy_id
,
int
location
,
int
value
);
static
int
rhine_open
(
struct
net_device
*
dev
);
static
void
rhine_tx_timeout
(
struct
net_device
*
dev
);
static
void
rhine_tx_timeout_task
(
struct
net_device
*
dev
);
static
void
rhine_check_media_task
(
struct
net_device
*
dev
);
static
int
rhine_start_tx
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
);
static
irqreturn_t
rhine_interrupt
(
int
irq
,
void
*
dev_instance
,
struct
pt_regs
*
regs
);
static
void
rhine_tx
(
struct
net_device
*
dev
);
...
...
@@ -856,12 +852,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
if
(
rp
->
quirks
&
rqRhineI
)
dev
->
features
|=
NETIF_F_SG
|
NETIF_F_HW_CSUM
;
INIT_WORK
(
&
rp
->
tx_timeout_task
,
(
void
(
*
)(
void
*
))
rhine_tx_timeout_task
,
dev
);
INIT_WORK
(
&
rp
->
check_media_task
,
(
void
(
*
)(
void
*
))
rhine_check_media_task
,
dev
);
/* dev->name not defined before register_netdev()! */
rc
=
register_netdev
(
dev
);
if
(
rc
)
...
...
@@ -1108,11 +1098,6 @@ static void rhine_set_carrier(struct mii_if_info *mii)
netif_carrier_ok
(
mii
->
dev
));
}
static
void
rhine_check_media_task
(
struct
net_device
*
dev
)
{
rhine_check_media
(
dev
,
0
);
}
static
void
init_registers
(
struct
net_device
*
dev
)
{
struct
rhine_private
*
rp
=
netdev_priv
(
dev
);
...
...
@@ -1166,8 +1151,8 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
if
(
quirks
&
rqRhineI
)
{
iowrite8
(
0x01
,
ioaddr
+
MIIRegAddr
);
// MII_BMSR
/*
Do not call from ISR!
*/
m
sleep
(
1
);
/*
Can be called from ISR. Evil.
*/
m
delay
(
1
);
/* 0x80 must be set immediately before turning it off */
iowrite8
(
0x80
,
ioaddr
+
MIICmd
);
...
...
@@ -1255,16 +1240,6 @@ static int rhine_open(struct net_device *dev)
}
static
void
rhine_tx_timeout
(
struct
net_device
*
dev
)
{
struct
rhine_private
*
rp
=
netdev_priv
(
dev
);
/*
* Move bulk of work outside of interrupt context
*/
schedule_work
(
&
rp
->
tx_timeout_task
);
}
static
void
rhine_tx_timeout_task
(
struct
net_device
*
dev
)
{
struct
rhine_private
*
rp
=
netdev_priv
(
dev
);
void
__iomem
*
ioaddr
=
rp
->
base
;
...
...
@@ -1677,7 +1652,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
spin_lock
(
&
rp
->
lock
);
if
(
intr_status
&
IntrLinkChange
)
schedule_work
(
&
rp
->
check_media_task
);
rhine_check_media
(
dev
,
0
);
if
(
intr_status
&
IntrStatsMax
)
{
rp
->
stats
.
rx_crc_errors
+=
ioread16
(
ioaddr
+
RxCRCErrs
);
rp
->
stats
.
rx_missed_errors
+=
ioread16
(
ioaddr
+
RxMissed
);
...
...
@@ -1927,9 +1902,6 @@ static int rhine_close(struct net_device *dev)
spin_unlock_irq
(
&
rp
->
lock
);
free_irq
(
rp
->
pdev
->
irq
,
dev
);
flush_scheduled_work
();
free_rbufs
(
dev
);
free_tbufs
(
dev
);
free_ring
(
dev
);
...
...
drivers/net/wireless/bcm43xx/bcm43xx_main.c
浏览文件 @
4e3ceac6
...
...
@@ -3271,6 +3271,9 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm)
bcm43xx_sysfs_register
(
bcm
);
//FIXME: check for bcm43xx_sysfs_register failure. This function is a bit messy regarding unwinding, though...
/*FIXME: This should be handled by softmac instead. */
schedule_work
(
&
bcm
->
softmac
->
associnfo
.
work
);
assert
(
err
==
0
);
out:
return
err
;
...
...
@@ -3931,9 +3934,6 @@ static int bcm43xx_resume(struct pci_dev *pdev)
netif_device_attach
(
net_dev
);
/*FIXME: This should be handled by softmac instead. */
schedule_work
(
&
bcm
->
softmac
->
associnfo
.
work
);
dprintk
(
KERN_INFO
PFX
"Device resumed.
\n
"
);
return
0
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录