Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
b4c21639
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b4c21639
编写于
7月 15, 2008
作者:
D
David S. Miller
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
niu: Add TX multiqueue support.
Signed-off-by:
N
David S. Miller
<
davem@davemloft.net
>
上级
92831bc3
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
25 addition
and
20 deletion
+25
-20
drivers/net/niu.c
drivers/net/niu.c
+25
-20
未找到文件。
drivers/net/niu.c
浏览文件 @
b4c21639
...
...
@@ -3236,10 +3236,14 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
static
void
niu_tx_work
(
struct
niu
*
np
,
struct
tx_ring_info
*
rp
)
{
struct
netdev_queue
*
txq
;
u16
pkt_cnt
,
tmp
;
int
cons
;
int
cons
,
index
;
u64
cs
;
index
=
(
rp
-
np
->
tx_rings
);
txq
=
netdev_get_tx_queue
(
np
->
dev
,
index
);
cs
=
rp
->
tx_cs
;
if
(
unlikely
(
!
(
cs
&
(
TX_CS_MK
|
TX_CS_MMK
))))
goto
out
;
...
...
@@ -3262,13 +3266,13 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
smp_mb
();
out:
if
(
unlikely
(
netif_
queue_stopped
(
np
->
dev
)
&&
if
(
unlikely
(
netif_
tx_queue_stopped
(
txq
)
&&
(
niu_tx_avail
(
rp
)
>
NIU_TX_WAKEUP_THRESH
(
rp
))))
{
netif_tx_lock
(
np
->
dev
);
if
(
netif_
queue_stopped
(
np
->
dev
)
&&
__netif_tx_lock
(
txq
,
smp_processor_id
()
);
if
(
netif_
tx_queue_stopped
(
txq
)
&&
(
niu_tx_avail
(
rp
)
>
NIU_TX_WAKEUP_THRESH
(
rp
)))
netif_
wake_queue
(
np
->
dev
);
netif_tx_unlock
(
np
->
dev
);
netif_
tx_wake_queue
(
txq
);
__netif_tx_unlock
(
txq
);
}
}
...
...
@@ -4061,6 +4065,8 @@ static int niu_alloc_channels(struct niu *np)
np
->
num_rx_rings
=
parent
->
rxchan_per_port
[
port
];
np
->
num_tx_rings
=
parent
->
txchan_per_port
[
port
];
np
->
dev
->
real_num_tx_queues
=
np
->
num_tx_rings
;
np
->
rx_rings
=
kzalloc
(
np
->
num_rx_rings
*
sizeof
(
struct
rx_ring_info
),
GFP_KERNEL
);
err
=
-
ENOMEM
;
...
...
@@ -5686,7 +5692,7 @@ static int niu_open(struct net_device *dev)
goto
out_free_irq
;
}
netif_
start_queue
(
dev
);
netif_
tx_start_all_queues
(
dev
);
if
(
np
->
link_config
.
loopback_mode
!=
LOOPBACK_DISABLED
)
netif_carrier_on
(
dev
);
...
...
@@ -5710,7 +5716,7 @@ static void niu_full_shutdown(struct niu *np, struct net_device *dev)
cancel_work_sync
(
&
np
->
reset_task
);
niu_disable_napi
(
np
);
netif_
stop_queue
(
dev
);
netif_
tx_stop_all_queues
(
dev
);
del_timer_sync
(
&
np
->
timer
);
...
...
@@ -5971,7 +5977,7 @@ static void niu_netif_start(struct niu *np)
* so long as all callers are assured to have free tx slots
* (such as after niu_init_hw).
*/
netif_
wake_queue
(
np
->
dev
);
netif_
tx_wake_all_queues
(
np
->
dev
);
niu_enable_napi
(
np
);
...
...
@@ -6097,15 +6103,11 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
return
ret
;
}
static
struct
tx_ring_info
*
tx_ring_select
(
struct
niu
*
np
,
struct
sk_buff
*
skb
)
{
return
&
np
->
tx_rings
[
0
];
}
static
int
niu_start_xmit
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
)
{
struct
niu
*
np
=
netdev_priv
(
dev
);
unsigned
long
align
,
headroom
;
struct
netdev_queue
*
txq
;
struct
tx_ring_info
*
rp
;
struct
tx_pkt_hdr
*
tp
;
unsigned
int
len
,
nfg
;
...
...
@@ -6113,10 +6115,12 @@ static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev)
int
prod
,
i
,
tlen
;
u64
mapping
,
mrk
;
rp
=
tx_ring_select
(
np
,
skb
);
i
=
skb_get_queue_mapping
(
skb
);
rp
=
&
np
->
tx_rings
[
i
];
txq
=
netdev_get_tx_queue
(
dev
,
i
);
if
(
niu_tx_avail
(
rp
)
<=
(
skb_shinfo
(
skb
)
->
nr_frags
+
1
))
{
netif_
stop_queue
(
dev
);
netif_
tx_stop_queue
(
txq
);
dev_err
(
np
->
device
,
PFX
"%s: BUG! Tx ring full when "
"queue awake!
\n
"
,
dev
->
name
);
rp
->
tx_errors
++
;
...
...
@@ -6215,9 +6219,9 @@ static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev)
nw64
(
TX_RING_KICK
(
rp
->
tx_channel
),
rp
->
wrap_bit
|
(
prod
<<
3
));
if
(
unlikely
(
niu_tx_avail
(
rp
)
<=
(
MAX_SKB_FRAGS
+
1
)))
{
netif_
stop_queue
(
dev
);
netif_
tx_stop_queue
(
txq
);
if
(
niu_tx_avail
(
rp
)
>
NIU_TX_WAKEUP_THRESH
(
rp
))
netif_
wake_queue
(
dev
);
netif_
tx_wake_queue
(
txq
);
}
dev
->
trans_start
=
jiffies
;
...
...
@@ -6275,7 +6279,7 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu)
spin_unlock_irq
(
&
np
->
lock
);
if
(
!
err
)
{
netif_
start_queue
(
dev
);
netif_
tx_start_all_queues
(
dev
);
if
(
np
->
link_config
.
loopback_mode
!=
LOOPBACK_DISABLED
)
netif_carrier_on
(
dev
);
...
...
@@ -8532,9 +8536,10 @@ static struct net_device * __devinit niu_alloc_and_init(
struct
of_device
*
op
,
const
struct
niu_ops
*
ops
,
u8
port
)
{
struct
net_device
*
dev
=
alloc_etherdev
(
sizeof
(
struct
niu
))
;
struct
net_device
*
dev
;
struct
niu
*
np
;
dev
=
alloc_etherdev_mq
(
sizeof
(
struct
niu
),
NIU_NUM_TXCHAN
);
if
(
!
dev
)
{
dev_err
(
gen_dev
,
PFX
"Etherdev alloc failed, aborting.
\n
"
);
return
NULL
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录