Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
a13c1327
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a13c1327
编写于
12月 21, 2010
作者:
D
David S. Miller
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'for-davem' of
git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next-2.6
上级
34a52f36
c04bfc6b
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
35 addition
and
115 deletion
+35
-115
drivers/net/sfc/efx.c
drivers/net/sfc/efx.c
+14
-10
drivers/net/sfc/efx.h
drivers/net/sfc/efx.h
+0
-2
drivers/net/sfc/net_driver.h
drivers/net/sfc/net_driver.h
+2
-11
drivers/net/sfc/tx.c
drivers/net/sfc/tx.c
+19
-92
未找到文件。
drivers/net/sfc/efx.c
浏览文件 @
a13c1327
...
...
@@ -461,9 +461,6 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
}
}
spin_lock_init
(
&
channel
->
tx_stop_lock
);
atomic_set
(
&
channel
->
tx_stop_count
,
1
);
rx_queue
=
&
channel
->
rx_queue
;
rx_queue
->
efx
=
efx
;
setup_timer
(
&
rx_queue
->
slow_fill
,
efx_rx_slow_fill
,
...
...
@@ -1406,11 +1403,11 @@ static void efx_start_all(struct efx_nic *efx)
* restart the transmit interface early so the watchdog timer stops */
efx_start_port
(
efx
);
efx_for_each_channel
(
channel
,
efx
)
{
if
(
efx_dev_registered
(
efx
))
efx_wake_queue
(
channel
);
if
(
efx_dev_registered
(
efx
))
netif_tx_wake_all_queues
(
efx
->
net_dev
);
efx_for_each_channel
(
channel
,
efx
)
efx_start_channel
(
channel
);
}
if
(
efx
->
legacy_irq
)
efx
->
legacy_irq_enabled
=
true
;
...
...
@@ -1498,9 +1495,7 @@ static void efx_stop_all(struct efx_nic *efx)
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
if
(
efx_dev_registered
(
efx
))
{
struct
efx_channel
*
channel
;
efx_for_each_channel
(
channel
,
efx
)
efx_stop_queue
(
channel
);
netif_tx_stop_all_queues
(
efx
->
net_dev
);
netif_tx_lock_bh
(
efx
->
net_dev
);
netif_tx_unlock_bh
(
efx
->
net_dev
);
}
...
...
@@ -1896,6 +1891,7 @@ static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
static
int
efx_register_netdev
(
struct
efx_nic
*
efx
)
{
struct
net_device
*
net_dev
=
efx
->
net_dev
;
struct
efx_channel
*
channel
;
int
rc
;
net_dev
->
watchdog_timeo
=
5
*
HZ
;
...
...
@@ -1918,6 +1914,14 @@ static int efx_register_netdev(struct efx_nic *efx)
if
(
rc
)
goto
fail_locked
;
efx_for_each_channel
(
channel
,
efx
)
{
struct
efx_tx_queue
*
tx_queue
;
efx_for_each_channel_tx_queue
(
tx_queue
,
channel
)
{
tx_queue
->
core_txq
=
netdev_get_tx_queue
(
efx
->
net_dev
,
tx_queue
->
queue
/
EFX_TXQ_TYPES
);
}
}
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off
(
efx
->
net_dev
);
...
...
drivers/net/sfc/efx.h
浏览文件 @
a13c1327
...
...
@@ -36,8 +36,6 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern
netdev_tx_t
efx_enqueue_skb
(
struct
efx_tx_queue
*
tx_queue
,
struct
sk_buff
*
skb
);
extern
void
efx_xmit_done
(
struct
efx_tx_queue
*
tx_queue
,
unsigned
int
index
);
extern
void
efx_stop_queue
(
struct
efx_channel
*
channel
);
extern
void
efx_wake_queue
(
struct
efx_channel
*
channel
);
/* RX */
extern
int
efx_probe_rx_queue
(
struct
efx_rx_queue
*
rx_queue
);
...
...
drivers/net/sfc/net_driver.h
浏览文件 @
a13c1327
...
...
@@ -136,6 +136,7 @@ struct efx_tx_buffer {
* @efx: The associated Efx NIC
* @queue: DMA queue number
* @channel: The associated channel
* @core_txq: The networking core TX queue structure
* @buffer: The software buffer ring
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
...
...
@@ -148,8 +149,6 @@ struct efx_tx_buffer {
* variable indicates that the queue is empty. This is to
* avoid cache-line ping-pong between the xmit path and the
* completion path.
* @stopped: Stopped count.
* Set if this TX queue is currently stopping its port.
* @insert_count: Current insert pointer
* This is the number of buffers that have been added to the
* software ring.
...
...
@@ -179,7 +178,7 @@ struct efx_tx_queue {
struct
efx_nic
*
efx
____cacheline_aligned_in_smp
;
unsigned
queue
;
struct
efx_channel
*
channel
;
struct
efx_nic
*
nic
;
struct
netdev_queue
*
core_txq
;
struct
efx_tx_buffer
*
buffer
;
struct
efx_special_buffer
txd
;
unsigned
int
ptr_mask
;
...
...
@@ -188,7 +187,6 @@ struct efx_tx_queue {
/* Members used mainly on the completion path */
unsigned
int
read_count
____cacheline_aligned_in_smp
;
unsigned
int
old_write_count
;
int
stopped
;
/* Members used only on the xmit path */
unsigned
int
insert_count
____cacheline_aligned_in_smp
;
...
...
@@ -321,7 +319,6 @@ enum efx_rx_alloc_method {
* @irq_moderation: IRQ moderation value (in hardware ticks)
* @napi_dev: Net device used with NAPI
* @napi_str: NAPI control structure
* @reset_work: Scheduled reset work thread
* @work_pending: Is work pending via NAPI?
* @eventq: Event queue buffer
* @eventq_mask: Event queue pointer mask
...
...
@@ -342,8 +339,6 @@ enum efx_rx_alloc_method {
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
* @rx_queue: RX queue for this channel
* @tx_stop_count: Core TX queue stop count
* @tx_stop_lock: Core TX queue stop lock
* @tx_queue: TX queues for this channel
*/
struct
efx_channel
{
...
...
@@ -382,10 +377,6 @@ struct efx_channel {
bool
rx_pkt_csummed
;
struct
efx_rx_queue
rx_queue
;
atomic_t
tx_stop_count
;
spinlock_t
tx_stop_lock
;
struct
efx_tx_queue
tx_queue
[
2
];
};
...
...
drivers/net/sfc/tx.c
浏览文件 @
a13c1327
...
...
@@ -30,50 +30,6 @@
*/
#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
/* We need to be able to nest calls to netif_tx_stop_queue(), partly
* because of the 2 hardware queues associated with each core queue,
* but also so that we can inhibit TX for reasons other than a full
* hardware queue. */
void
efx_stop_queue
(
struct
efx_channel
*
channel
)
{
struct
efx_nic
*
efx
=
channel
->
efx
;
struct
efx_tx_queue
*
tx_queue
=
efx_channel_get_tx_queue
(
channel
,
0
);
if
(
!
tx_queue
)
return
;
spin_lock_bh
(
&
channel
->
tx_stop_lock
);
netif_vdbg
(
efx
,
tx_queued
,
efx
->
net_dev
,
"stop TX queue
\n
"
);
atomic_inc
(
&
channel
->
tx_stop_count
);
netif_tx_stop_queue
(
netdev_get_tx_queue
(
efx
->
net_dev
,
tx_queue
->
queue
/
EFX_TXQ_TYPES
));
spin_unlock_bh
(
&
channel
->
tx_stop_lock
);
}
/* Decrement core TX queue stop count and wake it if the count is 0 */
void
efx_wake_queue
(
struct
efx_channel
*
channel
)
{
struct
efx_nic
*
efx
=
channel
->
efx
;
struct
efx_tx_queue
*
tx_queue
=
efx_channel_get_tx_queue
(
channel
,
0
);
if
(
!
tx_queue
)
return
;
local_bh_disable
();
if
(
atomic_dec_and_lock
(
&
channel
->
tx_stop_count
,
&
channel
->
tx_stop_lock
))
{
netif_vdbg
(
efx
,
tx_queued
,
efx
->
net_dev
,
"waking TX queue
\n
"
);
netif_tx_wake_queue
(
netdev_get_tx_queue
(
efx
->
net_dev
,
tx_queue
->
queue
/
EFX_TXQ_TYPES
));
spin_unlock
(
&
channel
->
tx_stop_lock
);
}
local_bh_enable
();
}
static
void
efx_dequeue_buffer
(
struct
efx_tx_queue
*
tx_queue
,
struct
efx_tx_buffer
*
buffer
)
{
...
...
@@ -234,9 +190,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
* checked. Update the xmit path's
* copy of read_count.
*/
++
tx_queue
->
stopped
;
netif_tx_stop_queue
(
tx_queue
->
core_txq
)
;
/* This memory barrier protects the
* change of
stopped
from the access
* change of
queue state
from the access
* of read_count. */
smp_mb
();
tx_queue
->
old_read_count
=
...
...
@@ -244,10 +200,12 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
fill_level
=
(
tx_queue
->
insert_count
-
tx_queue
->
old_read_count
);
q_space
=
efx
->
txq_entries
-
1
-
fill_level
;
if
(
unlikely
(
q_space
--
<=
0
))
goto
stop
;
if
(
unlikely
(
q_space
--
<=
0
))
{
rc
=
NETDEV_TX_BUSY
;
goto
unwind
;
}
smp_mb
();
--
tx_queue
->
stopped
;
netif_tx_start_queue
(
tx_queue
->
core_txq
)
;
}
insert_ptr
=
tx_queue
->
insert_count
&
tx_queue
->
ptr_mask
;
...
...
@@ -307,13 +265,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Mark the packet as transmitted, and free the SKB ourselves */
dev_kfree_skb_any
(
skb
);
goto
unwind
;
stop:
rc
=
NETDEV_TX_BUSY
;
if
(
tx_queue
->
stopped
==
1
)
efx_stop_queue
(
tx_queue
->
channel
);
unwind:
/* Work backwards until we hit the original insert pointer value */
...
...
@@ -400,32 +351,21 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned
fill_level
;
struct
efx_nic
*
efx
=
tx_queue
->
efx
;
struct
netdev_queue
*
queue
;
EFX_BUG_ON_PARANOID
(
index
>
tx_queue
->
ptr_mask
);
efx_dequeue_buffers
(
tx_queue
,
index
);
/* See if we need to restart the netif queue. This barrier
* separates the update of read_count from the test of
*
stopped
. */
* separates the update of read_count from the test of
the
*
queue state
. */
smp_mb
();
if
(
unlikely
(
tx_queue
->
stopped
)
&&
likely
(
efx
->
port_enabled
))
{
if
(
unlikely
(
netif_tx_queue_stopped
(
tx_queue
->
core_txq
))
&&
likely
(
efx
->
port_enabled
))
{
fill_level
=
tx_queue
->
insert_count
-
tx_queue
->
read_count
;
if
(
fill_level
<
EFX_TXQ_THRESHOLD
(
efx
))
{
EFX_BUG_ON_PARANOID
(
!
efx_dev_registered
(
efx
));
/* Do this under netif_tx_lock(), to avoid racing
* with efx_xmit(). */
queue
=
netdev_get_tx_queue
(
efx
->
net_dev
,
tx_queue
->
queue
/
EFX_TXQ_TYPES
);
__netif_tx_lock
(
queue
,
smp_processor_id
());
if
(
tx_queue
->
stopped
)
{
tx_queue
->
stopped
=
0
;
efx_wake_queue
(
tx_queue
->
channel
);
}
__netif_tx_unlock
(
queue
);
netif_tx_wake_queue
(
tx_queue
->
core_txq
);
}
}
...
...
@@ -487,7 +427,6 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue
->
read_count
=
0
;
tx_queue
->
old_read_count
=
0
;
tx_queue
->
empty_read_count
=
0
|
EFX_EMPTY_COUNT_VALID
;
BUG_ON
(
tx_queue
->
stopped
);
/* Set up TX descriptor ring */
efx_nic_init_tx
(
tx_queue
);
...
...
@@ -523,12 +462,6 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
/* Free up TSO header cache */
efx_fini_tso
(
tx_queue
);
/* Release queue's stop on port, if any */
if
(
tx_queue
->
stopped
)
{
tx_queue
->
stopped
=
0
;
efx_wake_queue
(
tx_queue
->
channel
);
}
}
void
efx_remove_tx_queue
(
struct
efx_tx_queue
*
tx_queue
)
...
...
@@ -770,9 +703,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
* since the xmit path last checked. Update
* the xmit path's copy of read_count.
*/
++
tx_queue
->
stopped
;
netif_tx_stop_queue
(
tx_queue
->
core_txq
)
;
/* This memory barrier protects the change of
*
stopped
from the access of read_count. */
*
queue state
from the access of read_count. */
smp_mb
();
tx_queue
->
old_read_count
=
ACCESS_ONCE
(
tx_queue
->
read_count
);
...
...
@@ -784,7 +717,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
return
1
;
}
smp_mb
();
--
tx_queue
->
stopped
;
netif_tx_start_queue
(
tx_queue
->
core_txq
)
;
}
insert_ptr
=
tx_queue
->
insert_count
&
tx_queue
->
ptr_mask
;
...
...
@@ -1124,8 +1057,10 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
while
(
1
)
{
rc
=
tso_fill_packet_with_fragment
(
tx_queue
,
skb
,
&
state
);
if
(
unlikely
(
rc
))
goto
stop
;
if
(
unlikely
(
rc
))
{
rc2
=
NETDEV_TX_BUSY
;
goto
unwind
;
}
/* Move onto the next fragment? */
if
(
state
.
in_len
==
0
)
{
...
...
@@ -1154,14 +1089,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
netif_err
(
efx
,
tx_err
,
efx
->
net_dev
,
"Out of memory for TSO headers, or PCI mapping error
\n
"
);
dev_kfree_skb_any
(
skb
);
goto
unwind
;
stop:
rc2
=
NETDEV_TX_BUSY
;
/* Stop the queue if it wasn't stopped before. */
if
(
tx_queue
->
stopped
==
1
)
efx_stop_queue
(
tx_queue
->
channel
);
unwind:
/* Free the DMA mapping we were in the process of writing out */
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录