Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
f878b995
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f878b995
编写于
2月 15, 2011
作者:
D
David S. Miller
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'for-davem' of
git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next-2.6
上级
29e1846a
94b274bf
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
204 addition
and
65 deletion
+204
-65
drivers/net/sfc/efx.c
drivers/net/sfc/efx.c
+9
-22
drivers/net/sfc/efx.h
drivers/net/sfc/efx.h
+2
-0
drivers/net/sfc/ethtool.c
drivers/net/sfc/ethtool.c
+3
-3
drivers/net/sfc/net_driver.h
drivers/net/sfc/net_driver.h
+49
-15
drivers/net/sfc/nic.c
drivers/net/sfc/nic.c
+38
-13
drivers/net/sfc/regs.h
drivers/net/sfc/regs.h
+6
-0
drivers/net/sfc/selftest.c
drivers/net/sfc/selftest.c
+1
-1
drivers/net/sfc/tx.c
drivers/net/sfc/tx.c
+87
-3
net/core/dev.c
net/core/dev.c
+2
-1
net/sched/sch_mqprio.c
net/sched/sch_mqprio.c
+7
-7
未找到文件。
drivers/net/sfc/efx.c
浏览文件 @
f878b995
...
...
@@ -673,7 +673,7 @@ static void efx_fini_channels(struct efx_nic *efx)
efx_for_each_channel_rx_queue
(
rx_queue
,
channel
)
efx_fini_rx_queue
(
rx_queue
);
efx_for_each_channel_tx_queue
(
tx_queue
,
channel
)
efx_for_each_
possible_
channel_tx_queue
(
tx_queue
,
channel
)
efx_fini_tx_queue
(
tx_queue
);
efx_fini_eventq
(
channel
);
}
...
...
@@ -689,7 +689,7 @@ static void efx_remove_channel(struct efx_channel *channel)
efx_for_each_channel_rx_queue
(
rx_queue
,
channel
)
efx_remove_rx_queue
(
rx_queue
);
efx_for_each_channel_tx_queue
(
tx_queue
,
channel
)
efx_for_each_
possible_
channel_tx_queue
(
tx_queue
,
channel
)
efx_remove_tx_queue
(
tx_queue
);
efx_remove_eventq
(
channel
);
}
...
...
@@ -1271,21 +1271,8 @@ static void efx_remove_interrupts(struct efx_nic *efx)
static
void
efx_set_channels
(
struct
efx_nic
*
efx
)
{
struct
efx_channel
*
channel
;
struct
efx_tx_queue
*
tx_queue
;
efx
->
tx_channel_offset
=
separate_tx_channels
?
efx
->
n_channels
-
efx
->
n_tx_channels
:
0
;
/* Channel pointers were set in efx_init_struct() but we now
* need to clear them for TX queues in any RX-only channels. */
efx_for_each_channel
(
channel
,
efx
)
{
if
(
channel
->
channel
-
efx
->
tx_channel_offset
>=
efx
->
n_tx_channels
)
{
efx_for_each_channel_tx_queue
(
tx_queue
,
channel
)
tx_queue
->
channel
=
NULL
;
}
}
}
static
int
efx_probe_nic
(
struct
efx_nic
*
efx
)
...
...
@@ -1531,9 +1518,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
efx
->
irq_rx_adaptive
=
rx_adaptive
;
efx
->
irq_rx_moderation
=
rx_ticks
;
efx_for_each_channel
(
channel
,
efx
)
{
if
(
efx_channel_
get
_rx_queue
(
channel
))
if
(
efx_channel_
has
_rx_queue
(
channel
))
channel
->
irq_moderation
=
rx_ticks
;
else
if
(
efx_channel_
get_tx_queue
(
channel
,
0
))
else
if
(
efx_channel_
has_tx_queues
(
channel
))
channel
->
irq_moderation
=
tx_ticks
;
}
}
...
...
@@ -1849,6 +1836,7 @@ static const struct net_device_ops efx_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.
ndo_poll_controller
=
efx_netpoll
,
#endif
.
ndo_setup_tc
=
efx_setup_tc
,
};
static
void
efx_update_name
(
struct
efx_nic
*
efx
)
...
...
@@ -1910,10 +1898,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_for_each_channel
(
channel
,
efx
)
{
struct
efx_tx_queue
*
tx_queue
;
efx_for_each_channel_tx_queue
(
tx_queue
,
channel
)
{
tx_queue
->
core_txq
=
netdev_get_tx_queue
(
efx
->
net_dev
,
tx_queue
->
queue
/
EFX_TXQ_TYPES
);
}
efx_for_each_channel_tx_queue
(
tx_queue
,
channel
)
efx_init_tx_queue_core_txq
(
tx_queue
);
}
/* Always start with carrier off; PHY events will detect the link */
...
...
@@ -2401,7 +2387,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
int
i
,
rc
;
/* Allocate and initialise a struct net_device and struct efx_nic */
net_dev
=
alloc_etherdev_mq
(
sizeof
(
*
efx
),
EFX_MAX_CORE_TX_QUEUES
);
net_dev
=
alloc_etherdev_mqs
(
sizeof
(
*
efx
),
EFX_MAX_CORE_TX_QUEUES
,
EFX_MAX_RX_QUEUES
);
if
(
!
net_dev
)
return
-
ENOMEM
;
net_dev
->
features
|=
(
type
->
offload_features
|
NETIF_F_SG
|
...
...
drivers/net/sfc/efx.h
浏览文件 @
f878b995
...
...
@@ -29,6 +29,7 @@
extern
int
efx_probe_tx_queue
(
struct
efx_tx_queue
*
tx_queue
);
extern
void
efx_remove_tx_queue
(
struct
efx_tx_queue
*
tx_queue
);
extern
void
efx_init_tx_queue
(
struct
efx_tx_queue
*
tx_queue
);
extern
void
efx_init_tx_queue_core_txq
(
struct
efx_tx_queue
*
tx_queue
);
extern
void
efx_fini_tx_queue
(
struct
efx_tx_queue
*
tx_queue
);
extern
void
efx_release_tx_buffers
(
struct
efx_tx_queue
*
tx_queue
);
extern
netdev_tx_t
...
...
@@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern
netdev_tx_t
efx_enqueue_skb
(
struct
efx_tx_queue
*
tx_queue
,
struct
sk_buff
*
skb
);
extern
void
efx_xmit_done
(
struct
efx_tx_queue
*
tx_queue
,
unsigned
int
index
);
extern
int
efx_setup_tc
(
struct
net_device
*
net_dev
,
u8
num_tc
);
/* RX */
extern
int
efx_probe_rx_queue
(
struct
efx_rx_queue
*
rx_queue
);
...
...
drivers/net/sfc/ethtool.c
浏览文件 @
f878b995
...
...
@@ -631,7 +631,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
/* Find lowest IRQ moderation across all used TX queues */
coalesce
->
tx_coalesce_usecs_irq
=
~
((
u32
)
0
);
efx_for_each_channel
(
channel
,
efx
)
{
if
(
!
efx_channel_
get_tx_queue
(
channel
,
0
))
if
(
!
efx_channel_
has_tx_queues
(
channel
))
continue
;
if
(
channel
->
irq_moderation
<
coalesce
->
tx_coalesce_usecs_irq
)
{
if
(
channel
->
channel
<
efx
->
n_rx_channels
)
...
...
@@ -676,8 +676,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
/* If the channel is shared only allow RX parameters to be set */
efx_for_each_channel
(
channel
,
efx
)
{
if
(
efx_channel_
get
_rx_queue
(
channel
)
&&
efx_channel_
get_tx_queue
(
channel
,
0
)
&&
if
(
efx_channel_
has
_rx_queue
(
channel
)
&&
efx_channel_
has_tx_queues
(
channel
)
&&
tx_usecs
)
{
netif_err
(
efx
,
drv
,
efx
->
net_dev
,
"Channel is shared. "
"Only RX coalescing may be set
\n
"
);
...
...
drivers/net/sfc/net_driver.h
浏览文件 @
f878b995
...
...
@@ -63,10 +63,12 @@
/* Checksum generation is a per-queue option in hardware, so each
* queue visible to the networking core is backed by two hardware TX
* queues. */
#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS
#define EFX_TXQ_TYPE_OFFLOAD 1
#define EFX_TXQ_TYPES 2
#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES)
#define EFX_MAX_TX_TC 2
#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
#define EFX_TXQ_TYPE_OFFLOAD 1
/* flag */
#define EFX_TXQ_TYPE_HIGHPRI 2
/* flag */
#define EFX_TXQ_TYPES 4
#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
/**
* struct efx_special_buffer - An Efx special buffer
...
...
@@ -140,6 +142,7 @@ struct efx_tx_buffer {
* @buffer: The software buffer ring
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
* @initialised: Has hardware queue been initialised?
* @flushed: Used when handling queue flushing
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
...
...
@@ -182,6 +185,7 @@ struct efx_tx_queue {
struct
efx_tx_buffer
*
buffer
;
struct
efx_special_buffer
txd
;
unsigned
int
ptr_mask
;
bool
initialised
;
enum
efx_flush_state
flushed
;
/* Members used mainly on the completion path */
...
...
@@ -377,7 +381,7 @@ struct efx_channel {
bool
rx_pkt_csummed
;
struct
efx_rx_queue
rx_queue
;
struct
efx_tx_queue
tx_queue
[
2
];
struct
efx_tx_queue
tx_queue
[
EFX_TXQ_TYPES
];
};
enum
efx_led_mode
{
...
...
@@ -938,18 +942,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
return
&
efx
->
channel
[
efx
->
tx_channel_offset
+
index
]
->
tx_queue
[
type
];
}
static
inline
bool
efx_channel_has_tx_queues
(
struct
efx_channel
*
channel
)
{
return
channel
->
channel
-
channel
->
efx
->
tx_channel_offset
<
channel
->
efx
->
n_tx_channels
;
}
static
inline
struct
efx_tx_queue
*
efx_channel_get_tx_queue
(
struct
efx_channel
*
channel
,
unsigned
type
)
{
struct
efx_tx_queue
*
tx_queue
=
channel
->
tx_queue
;
EFX_BUG_ON_PARANOID
(
type
>=
EFX_TXQ_TYPES
);
return
tx_queue
->
channel
?
tx_queue
+
type
:
NULL
;
EFX_BUG_ON_PARANOID
(
!
efx_channel_has_tx_queues
(
channel
)
||
type
>=
EFX_TXQ_TYPES
);
return
&
channel
->
tx_queue
[
type
];
}
static
inline
bool
efx_tx_queue_used
(
struct
efx_tx_queue
*
tx_queue
)
{
return
!
(
tx_queue
->
efx
->
net_dev
->
num_tc
<
2
&&
tx_queue
->
queue
&
EFX_TXQ_TYPE_HIGHPRI
);
}
/* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \
_tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
if (!efx_channel_has_tx_queues(_channel)) \
; \
else \
for (_tx_queue = (_channel)->tx_queue; \
_tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
efx_tx_queue_used(_tx_queue); \
_tx_queue++)
/* Iterate over all possible TX queues belonging to a channel */
#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
for (_tx_queue = (_channel)->tx_queue; \
_tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
_tx_queue++)
static
inline
struct
efx_rx_queue
*
...
...
@@ -959,18 +985,26 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index)
return
&
efx
->
channel
[
index
]
->
rx_queue
;
}
static
inline
bool
efx_channel_has_rx_queue
(
struct
efx_channel
*
channel
)
{
return
channel
->
channel
<
channel
->
efx
->
n_rx_channels
;
}
static
inline
struct
efx_rx_queue
*
efx_channel_get_rx_queue
(
struct
efx_channel
*
channel
)
{
return
channel
->
channel
<
channel
->
efx
->
n_rx_channels
?
&
channel
->
rx_queue
:
NULL
;
EFX_BUG_ON_PARANOID
(
!
efx_channel_has_rx_queue
(
channel
));
return
&
channel
->
rx_queue
;
}
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
for (_rx_queue = efx_channel_get_rx_queue(channel); \
_rx_queue; \
_rx_queue = NULL)
if (!efx_channel_has_rx_queue(_channel)) \
; \
else \
for (_rx_queue = &(_channel)->rx_queue; \
_rx_queue; \
_rx_queue = NULL)
static
inline
struct
efx_channel
*
efx_rx_queue_channel
(
struct
efx_rx_queue
*
rx_queue
)
...
...
drivers/net/sfc/nic.c
浏览文件 @
f878b995
...
...
@@ -445,8 +445,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
void
efx_nic_init_tx
(
struct
efx_tx_queue
*
tx_queue
)
{
efx_oword_t
tx_desc_ptr
;
struct
efx_nic
*
efx
=
tx_queue
->
efx
;
efx_oword_t
reg
;
tx_queue
->
flushed
=
FLUSH_NONE
;
...
...
@@ -454,7 +454,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
efx_init_special_buffer
(
efx
,
&
tx_queue
->
txd
);
/* Push TX descriptor ring to card */
EFX_POPULATE_OWORD_10
(
tx_desc_ptr
,
EFX_POPULATE_OWORD_10
(
reg
,
FRF_AZ_TX_DESCQ_EN
,
1
,
FRF_AZ_TX_ISCSI_DDIG_EN
,
0
,
FRF_AZ_TX_ISCSI_HDIG_EN
,
0
,
...
...
@@ -470,17 +470,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
if
(
efx_nic_rev
(
efx
)
>=
EFX_REV_FALCON_B0
)
{
int
csum
=
tx_queue
->
queue
&
EFX_TXQ_TYPE_OFFLOAD
;
EFX_SET_OWORD_FIELD
(
tx_desc_ptr
,
FRF_BZ_TX_IP_CHKSM_DIS
,
!
csum
);
EFX_SET_OWORD_FIELD
(
tx_desc_ptr
,
FRF_BZ_TX_TCP_CHKSM_DIS
,
EFX_SET_OWORD_FIELD
(
reg
,
FRF_BZ_TX_IP_CHKSM_DIS
,
!
csum
);
EFX_SET_OWORD_FIELD
(
reg
,
FRF_BZ_TX_TCP_CHKSM_DIS
,
!
csum
);
}
efx_writeo_table
(
efx
,
&
tx_desc_ptr
,
efx
->
type
->
txd_ptr_tbl_base
,
efx_writeo_table
(
efx
,
&
reg
,
efx
->
type
->
txd_ptr_tbl_base
,
tx_queue
->
queue
);
if
(
efx_nic_rev
(
efx
)
<
EFX_REV_FALCON_B0
)
{
efx_oword_t
reg
;
/* Only 128 bits in this register */
BUILD_BUG_ON
(
EFX_MAX_TX_QUEUES
>
128
);
...
...
@@ -491,6 +489,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
set_bit_le
(
tx_queue
->
queue
,
(
void
*
)
&
reg
);
efx_writeo
(
efx
,
&
reg
,
FR_AA_TX_CHKSM_CFG
);
}
if
(
efx_nic_rev
(
efx
)
>=
EFX_REV_FALCON_B0
)
{
EFX_POPULATE_OWORD_1
(
reg
,
FRF_BZ_TX_PACE
,
(
tx_queue
->
queue
&
EFX_TXQ_TYPE_HIGHPRI
)
?
FFE_BZ_TX_PACE_OFF
:
FFE_BZ_TX_PACE_RESERVED
);
efx_writeo_table
(
efx
,
&
reg
,
FR_BZ_TX_PACE_TBL
,
tx_queue
->
queue
);
}
}
static
void
efx_flush_tx_queue
(
struct
efx_tx_queue
*
tx_queue
)
...
...
@@ -1238,8 +1246,10 @@ int efx_nic_flush_queues(struct efx_nic *efx)
/* Flush all tx queues in parallel */
efx_for_each_channel
(
channel
,
efx
)
{
efx_for_each_channel_tx_queue
(
tx_queue
,
channel
)
efx_flush_tx_queue
(
tx_queue
);
efx_for_each_possible_channel_tx_queue
(
tx_queue
,
channel
)
{
if
(
tx_queue
->
initialised
)
efx_flush_tx_queue
(
tx_queue
);
}
}
/* The hardware supports four concurrent rx flushes, each of which may
...
...
@@ -1262,8 +1272,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
++
rx_pending
;
}
}
efx_for_each_channel_tx_queue
(
tx_queue
,
channel
)
{
if
(
tx_queue
->
flushed
!=
FLUSH_DONE
)
efx_for_each_possible_channel_tx_queue
(
tx_queue
,
channel
)
{
if
(
tx_queue
->
initialised
&&
tx_queue
->
flushed
!=
FLUSH_DONE
)
++
tx_pending
;
}
}
...
...
@@ -1278,8 +1289,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
/* Mark the queues as all flushed. We're going to return failure
* leading to a reset, or fake up success anyway */
efx_for_each_channel
(
channel
,
efx
)
{
efx_for_each_channel_tx_queue
(
tx_queue
,
channel
)
{
if
(
tx_queue
->
flushed
!=
FLUSH_DONE
)
efx_for_each_possible_channel_tx_queue
(
tx_queue
,
channel
)
{
if
(
tx_queue
->
initialised
&&
tx_queue
->
flushed
!=
FLUSH_DONE
)
netif_err
(
efx
,
hw
,
efx
->
net_dev
,
"tx queue %d flush command timed out
\n
"
,
tx_queue
->
queue
);
...
...
@@ -1682,6 +1694,19 @@ void efx_nic_init_common(struct efx_nic *efx)
if
(
efx_nic_rev
(
efx
)
>=
EFX_REV_FALCON_B0
)
EFX_SET_OWORD_FIELD
(
temp
,
FRF_BZ_TX_FLUSH_MIN_LEN_EN
,
1
);
efx_writeo
(
efx
,
&
temp
,
FR_AZ_TX_RESERVED
);
if
(
efx_nic_rev
(
efx
)
>=
EFX_REV_FALCON_B0
)
{
EFX_POPULATE_OWORD_4
(
temp
,
/* Default values */
FRF_BZ_TX_PACE_SB_NOT_AF
,
0x15
,
FRF_BZ_TX_PACE_SB_AF
,
0xb
,
FRF_BZ_TX_PACE_FB_BASE
,
0
,
/* Allow large pace values in the
* fast bin. */
FRF_BZ_TX_PACE_BIN_TH
,
FFE_BZ_TX_PACE_RESERVED
);
efx_writeo
(
efx
,
&
temp
,
FR_BZ_TX_PACE
);
}
}
/* Register dump */
...
...
drivers/net/sfc/regs.h
浏览文件 @
f878b995
...
...
@@ -2907,6 +2907,12 @@
#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
/* TX_PACE_TBL */
/* Values >20 are documented as reserved, but will result in a queue going
* into the fast bin with a pace value of zero. */
#define FFE_BZ_TX_PACE_OFF 0
#define FFE_BZ_TX_PACE_RESERVED 21
/* DRIVER_EV */
/* Sub-fields of an RX flush completion event */
#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
...
...
drivers/net/sfc/selftest.c
浏览文件 @
f878b995
...
...
@@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
goto
out
;
}
/* Test
both
types of TX queue */
/* Test
all enabled
types of TX queue */
efx_for_each_channel_tx_queue
(
tx_queue
,
channel
)
{
state
->
offload_csum
=
(
tx_queue
->
queue
&
EFX_TXQ_TYPE_OFFLOAD
);
...
...
drivers/net/sfc/tx.c
浏览文件 @
f878b995
...
...
@@ -336,17 +336,91 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
{
struct
efx_nic
*
efx
=
netdev_priv
(
net_dev
);
struct
efx_tx_queue
*
tx_queue
;
unsigned
index
,
type
;
if
(
unlikely
(
efx
->
port_inhibited
))
return
NETDEV_TX_BUSY
;
tx_queue
=
efx_get_tx_queue
(
efx
,
skb_get_queue_mapping
(
skb
),
skb
->
ip_summed
==
CHECKSUM_PARTIAL
?
EFX_TXQ_TYPE_OFFLOAD
:
0
);
index
=
skb_get_queue_mapping
(
skb
);
type
=
skb
->
ip_summed
==
CHECKSUM_PARTIAL
?
EFX_TXQ_TYPE_OFFLOAD
:
0
;
if
(
index
>=
efx
->
n_tx_channels
)
{
index
-=
efx
->
n_tx_channels
;
type
|=
EFX_TXQ_TYPE_HIGHPRI
;
}
tx_queue
=
efx_get_tx_queue
(
efx
,
index
,
type
);
return
efx_enqueue_skb
(
tx_queue
,
skb
);
}
void
efx_init_tx_queue_core_txq
(
struct
efx_tx_queue
*
tx_queue
)
{
struct
efx_nic
*
efx
=
tx_queue
->
efx
;
/* Must be inverse of queue lookup in efx_hard_start_xmit() */
tx_queue
->
core_txq
=
netdev_get_tx_queue
(
efx
->
net_dev
,
tx_queue
->
queue
/
EFX_TXQ_TYPES
+
((
tx_queue
->
queue
&
EFX_TXQ_TYPE_HIGHPRI
)
?
efx
->
n_tx_channels
:
0
));
}
int
efx_setup_tc
(
struct
net_device
*
net_dev
,
u8
num_tc
)
{
struct
efx_nic
*
efx
=
netdev_priv
(
net_dev
);
struct
efx_channel
*
channel
;
struct
efx_tx_queue
*
tx_queue
;
unsigned
tc
;
int
rc
;
if
(
efx_nic_rev
(
efx
)
<
EFX_REV_FALCON_B0
||
num_tc
>
EFX_MAX_TX_TC
)
return
-
EINVAL
;
if
(
num_tc
==
net_dev
->
num_tc
)
return
0
;
for
(
tc
=
0
;
tc
<
num_tc
;
tc
++
)
{
net_dev
->
tc_to_txq
[
tc
].
offset
=
tc
*
efx
->
n_tx_channels
;
net_dev
->
tc_to_txq
[
tc
].
count
=
efx
->
n_tx_channels
;
}
if
(
num_tc
>
net_dev
->
num_tc
)
{
/* Initialise high-priority queues as necessary */
efx_for_each_channel
(
channel
,
efx
)
{
efx_for_each_possible_channel_tx_queue
(
tx_queue
,
channel
)
{
if
(
!
(
tx_queue
->
queue
&
EFX_TXQ_TYPE_HIGHPRI
))
continue
;
if
(
!
tx_queue
->
buffer
)
{
rc
=
efx_probe_tx_queue
(
tx_queue
);
if
(
rc
)
return
rc
;
}
if
(
!
tx_queue
->
initialised
)
efx_init_tx_queue
(
tx_queue
);
efx_init_tx_queue_core_txq
(
tx_queue
);
}
}
}
else
{
/* Reduce number of classes before number of queues */
net_dev
->
num_tc
=
num_tc
;
}
rc
=
netif_set_real_num_tx_queues
(
net_dev
,
max_t
(
int
,
num_tc
,
1
)
*
efx
->
n_tx_channels
);
if
(
rc
)
return
rc
;
/* Do not destroy high-priority queues when they become
* unused. We would have to flush them first, and it is
* fairly difficult to flush a subset of TX queues. Leave
* it to efx_fini_channels().
*/
net_dev
->
num_tc
=
num_tc
;
return
0
;
}
void
efx_xmit_done
(
struct
efx_tx_queue
*
tx_queue
,
unsigned
int
index
)
{
unsigned
fill_level
;
...
...
@@ -430,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
/* Set up TX descriptor ring */
efx_nic_init_tx
(
tx_queue
);
tx_queue
->
initialised
=
true
;
}
void
efx_release_tx_buffers
(
struct
efx_tx_queue
*
tx_queue
)
...
...
@@ -452,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
void
efx_fini_tx_queue
(
struct
efx_tx_queue
*
tx_queue
)
{
if
(
!
tx_queue
->
initialised
)
return
;
netif_dbg
(
tx_queue
->
efx
,
drv
,
tx_queue
->
efx
->
net_dev
,
"shutting down TX queue %d
\n
"
,
tx_queue
->
queue
);
tx_queue
->
initialised
=
false
;
/* Flush TX queue, remove descriptor ring */
efx_nic_fini_tx
(
tx_queue
);
...
...
@@ -466,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
void
efx_remove_tx_queue
(
struct
efx_tx_queue
*
tx_queue
)
{
if
(
!
tx_queue
->
buffer
)
return
;
netif_dbg
(
tx_queue
->
efx
,
drv
,
tx_queue
->
efx
->
net_dev
,
"destroying TX queue %d
\n
"
,
tx_queue
->
queue
);
efx_nic_remove_tx
(
tx_queue
);
...
...
net/core/dev.c
浏览文件 @
f878b995
...
...
@@ -1648,7 +1648,8 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
if
(
txq
<
1
||
txq
>
dev
->
num_tx_queues
)
return
-
EINVAL
;
if
(
dev
->
reg_state
==
NETREG_REGISTERED
)
{
if
(
dev
->
reg_state
==
NETREG_REGISTERED
||
dev
->
reg_state
==
NETREG_UNREGISTERING
)
{
ASSERT_RTNL
();
rc
=
netdev_queue_update_kobjects
(
dev
,
dev
->
real_num_tx_queues
,
...
...
net/sched/sch_mqprio.c
浏览文件 @
f878b995
...
...
@@ -29,18 +29,18 @@ static void mqprio_destroy(struct Qdisc *sch)
struct
mqprio_sched
*
priv
=
qdisc_priv
(
sch
);
unsigned
int
ntx
;
if
(
!
priv
->
qdiscs
)
return
;
for
(
ntx
=
0
;
ntx
<
dev
->
num_tx_queues
&&
priv
->
qdiscs
[
ntx
];
ntx
++
)
qdisc_destroy
(
priv
->
qdiscs
[
ntx
]);
if
(
priv
->
qdiscs
)
{
for
(
ntx
=
0
;
ntx
<
dev
->
num_tx_queues
&&
priv
->
qdiscs
[
ntx
];
ntx
++
)
qdisc_destroy
(
priv
->
qdiscs
[
ntx
]);
kfree
(
priv
->
qdiscs
);
}
if
(
priv
->
hw_owned
&&
dev
->
netdev_ops
->
ndo_setup_tc
)
dev
->
netdev_ops
->
ndo_setup_tc
(
dev
,
0
);
else
netdev_set_num_tc
(
dev
,
0
);
kfree
(
priv
->
qdiscs
);
}
static
int
mqprio_parse_opt
(
struct
net_device
*
dev
,
struct
tc_mqprio_qopt
*
qopt
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录