Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
81a430ac
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
81a430ac
编写于
3月 17, 2012
作者:
D
David S. Miller
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'master' of
git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
上级
c2ec3ff6
729739b7
变更
6
展开全部
隐藏空白更改
内联
并排
Showing
6 changed file
with
772 addition
and
628 deletion
+772
-628
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_defines.h
+2
-0
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_main.c
+35
-2
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe.h
+33
-27
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+15
-8
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+12
-5
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+675
-586
未找到文件。
drivers/net/ethernet/intel/igb/e1000_defines.h
浏览文件 @
81a430ac
...
...
@@ -134,6 +134,8 @@
#define E1000_RCTL_SZ_256 0x00030000
/* rx buffer size 256 */
#define E1000_RCTL_VFE 0x00040000
/* vlan filter enable */
#define E1000_RCTL_CFIEN 0x00080000
/* canonical form enable */
#define E1000_RCTL_DPF 0x00400000
/* Discard Pause Frames */
#define E1000_RCTL_PMCF 0x00800000
/* pass MAC control frames */
#define E1000_RCTL_SECRC 0x04000000
/* Strip Ethernet CRC */
/*
...
...
drivers/net/ethernet/intel/igb/igb_main.c
浏览文件 @
81a430ac
...
...
@@ -1769,10 +1769,21 @@ static int igb_set_features(struct net_device *netdev,
netdev_features_t
features
)
{
netdev_features_t
changed
=
netdev
->
features
^
features
;
struct
igb_adapter
*
adapter
=
netdev_priv
(
netdev
);
if
(
changed
&
NETIF_F_HW_VLAN_RX
)
igb_vlan_mode
(
netdev
,
features
);
if
(
!
(
changed
&
NETIF_F_RXALL
))
return
0
;
netdev
->
features
=
features
;
if
(
netif_running
(
netdev
))
igb_reinit_locked
(
adapter
);
else
igb_reset
(
adapter
);
return
0
;
}
...
...
@@ -1954,6 +1965,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
/* copy netdev features into list of user selectable features */
netdev
->
hw_features
|=
netdev
->
features
;
netdev
->
hw_features
|=
NETIF_F_RXALL
;
/* set this bit last since it cannot be part of hw_features */
netdev
->
features
|=
NETIF_F_HW_VLAN_FILTER
;
...
...
@@ -1964,6 +1976,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
NETIF_F_IPV6_CSUM
|
NETIF_F_SG
;
netdev
->
priv_flags
|=
IFF_SUPP_NOFCS
;
if
(
pci_using_dac
)
{
netdev
->
features
|=
NETIF_F_HIGHDMA
;
netdev
->
vlan_features
|=
NETIF_F_HIGHDMA
;
...
...
@@ -3003,6 +3017,22 @@ void igb_setup_rctl(struct igb_adapter *adapter)
wr32
(
E1000_QDE
,
ALL_QUEUES
);
}
/* This is useful for sniffing bad packets. */
if
(
adapter
->
netdev
->
features
&
NETIF_F_RXALL
)
{
/* UPE and MPE will be handled by normal PROMISC logic
* in e1000e_set_rx_mode */
rctl
|=
(
E1000_RCTL_SBP
|
/* Receive bad packets */
E1000_RCTL_BAM
|
/* RX All Bcast Pkts */
E1000_RCTL_PMCF
);
/* RX All MAC Ctrl Pkts */
rctl
&=
~
(
E1000_RCTL_VFE
|
/* Disable VLAN filter */
E1000_RCTL_DPF
|
/* Allow filtered pause */
E1000_RCTL_CFIEN
);
/* Dis VLAN CFIEN Filter */
/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
* and that breaks VLANs.
*/
}
wr32
(
E1000_RCTL
,
rctl
);
}
...
...
@@ -4293,6 +4323,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
/* write last descriptor with RS and EOP bits */
cmd_type
|=
cpu_to_le32
(
size
)
|
cpu_to_le32
(
IGB_TXD_DCMD
);
if
(
unlikely
(
skb
->
no_fcs
))
cmd_type
&=
~
(
cpu_to_le32
(
E1000_ADVTXD_DCMD_IFCS
));
tx_desc
->
read
.
cmd_type_len
=
cmd_type
;
/* set the timestamp */
...
...
@@ -6098,8 +6130,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
goto
next_desc
;
}
if
(
igb_test_staterr
(
rx_desc
,
E1000_RXDEXT_ERR_FRAME_ERR_MASK
))
{
if
(
unlikely
((
igb_test_staterr
(
rx_desc
,
E1000_RXDEXT_ERR_FRAME_ERR_MASK
))
&&
!
(
rx_ring
->
netdev
->
features
&
NETIF_F_RXALL
)))
{
dev_kfree_skb_any
(
skb
);
goto
next_desc
;
}
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe.h
浏览文件 @
81a430ac
...
...
@@ -72,12 +72,6 @@
/* Supported Rx Buffer Sizes */
#define IXGBE_RXBUFFER_512 512
/* Used for packet split */
#define IXGBE_RXBUFFER_2K 2048
#define IXGBE_RXBUFFER_3K 3072
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_RXBUFFER_7K 7168
#define IXGBE_RXBUFFER_8K 8192
#define IXGBE_RXBUFFER_15K 15360
#define IXGBE_MAX_RXBUFFER 16384
/* largest size for a single descriptor */
/*
...
...
@@ -102,7 +96,6 @@
#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
#define IXGBE_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 8)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
...
...
@@ -156,19 +149,18 @@ struct vf_macvlans {
struct
ixgbe_tx_buffer
{
union
ixgbe_adv_tx_desc
*
next_to_watch
;
unsigned
long
time_stamp
;
dma_addr_t
dma
;
u32
length
;
u32
tx_flags
;
struct
sk_buff
*
skb
;
u32
bytecount
;
u16
gso_segs
;
unsigned
int
bytecount
;
unsigned
short
gso_segs
;
DEFINE_DMA_UNMAP_ADDR
(
dma
);
DEFINE_DMA_UNMAP_LEN
(
len
);
u32
tx_flags
;
};
struct
ixgbe_rx_buffer
{
struct
sk_buff
*
skb
;
dma_addr_t
dma
;
struct
page
*
page
;
dma_addr_t
page_dma
;
unsigned
int
page_offset
;
};
...
...
@@ -180,7 +172,6 @@ struct ixgbe_queue_stats {
struct
ixgbe_tx_queue_stats
{
u64
restart_queue
;
u64
tx_busy
;
u64
completed
;
u64
tx_done_old
;
};
...
...
@@ -193,21 +184,15 @@ struct ixgbe_rx_queue_stats {
u64
csum_err
;
};
enum
ix
bg
e_ring_state_t
{
enum
ix
gb
e_ring_state_t
{
__IXGBE_TX_FDIR_INIT_DONE
,
__IXGBE_TX_DETECT_HANG
,
__IXGBE_HANG_CHECK_ARMED
,
__IXGBE_RX_PS_ENABLED
,
__IXGBE_RX_RSC_ENABLED
,
__IXGBE_RX_CSUM_UDP_ZERO_ERR
,
__IXGBE_RX_FCOE_BUFSZ
,
};
#define ring_is_ps_enabled(ring) \
test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
#define set_ring_ps_enabled(ring) \
set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
#define clear_ring_ps_enabled(ring) \
clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
#define check_for_tx_hang(ring) \
test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
...
...
@@ -233,7 +218,6 @@ struct ixgbe_ring {
u8
__iomem
*
tail
;
u16
count
;
/* amount of descriptors */
u16
rx_buf_len
;
u8
queue_index
;
/* needed for multiqueue queue management */
u8
reg_idx
;
/* holds the special value that gets
...
...
@@ -241,8 +225,13 @@ struct ixgbe_ring {
* associated with this ring, which is
* different for DCB and RSS modes
*/
u8
atr_sample_rate
;
u8
atr_count
;
union
{
struct
{
u8
atr_sample_rate
;
u8
atr_count
;
};
u16
next_to_alloc
;
};
u16
next_to_use
;
u16
next_to_clean
;
...
...
@@ -287,6 +276,22 @@ struct ixgbe_ring_feature {
int
mask
;
}
____cacheline_internodealigned_in_smp
;
/*
* FCoE requires that all Rx buffers be over 2200 bytes in length. Since
* this is twice the size of a half page we need to double the page order
* for FCoE enabled Rx queues.
*/
#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
static
inline
unsigned
int
ixgbe_rx_pg_order
(
struct
ixgbe_ring
*
ring
)
{
return
test_bit
(
__IXGBE_RX_FCOE_BUFSZ
,
&
ring
->
state
)
?
1
:
0
;
}
#else
#define ixgbe_rx_pg_order(_ring) 0
#endif
#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
struct
ixgbe_ring_container
{
struct
ixgbe_ring
*
ring
;
/* pointer to linked list of rings */
unsigned
int
total_bytes
;
/* total bytes processed this int */
...
...
@@ -554,7 +559,7 @@ struct ixgbe_cb {
};
dma_addr_t
dma
;
u16
append_cnt
;
bool
delay_unmap
;
bool
page_released
;
};
#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
...
...
@@ -625,7 +630,8 @@ extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern
void
ixgbe_do_reset
(
struct
net_device
*
netdev
);
#ifdef IXGBE_FCOE
extern
void
ixgbe_configure_fcoe
(
struct
ixgbe_adapter
*
adapter
);
extern
int
ixgbe_fso
(
struct
ixgbe_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
extern
int
ixgbe_fso
(
struct
ixgbe_ring
*
tx_ring
,
struct
ixgbe_tx_buffer
*
first
,
u32
tx_flags
,
u8
*
hdr_len
);
extern
void
ixgbe_cleanup_fcoe
(
struct
ixgbe_adapter
*
adapter
);
extern
int
ixgbe_fcoe_ddp
(
struct
ixgbe_adapter
*
adapter
,
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
浏览文件 @
81a430ac
...
...
@@ -35,6 +35,7 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/uaccess.h>
#include "ixgbe.h"
...
...
@@ -1615,7 +1616,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
rx_ring
->
dev
=
&
adapter
->
pdev
->
dev
;
rx_ring
->
netdev
=
adapter
->
netdev
;
rx_ring
->
reg_idx
=
adapter
->
rx_ring
[
0
]
->
reg_idx
;
rx_ring
->
rx_buf_len
=
IXGBE_RXBUFFER_2K
;
err
=
ixgbe_setup_rx_resources
(
rx_ring
);
if
(
err
)
{
...
...
@@ -1718,13 +1718,15 @@ static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
frame_size
>>=
1
;
data
=
rx_buffer
->
skb
->
data
;
data
=
kmap
(
rx_buffer
->
page
)
+
rx_buffer
->
page_offset
;
if
(
data
[
3
]
!=
0xFF
||
data
[
frame_size
+
10
]
!=
0xBE
||
data
[
frame_size
+
12
]
!=
0xAF
)
match
=
false
;
kunmap
(
rx_buffer
->
page
);
return
match
;
}
...
...
@@ -1746,17 +1748,22 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
/* check Rx buffer */
rx_buffer
=
&
rx_ring
->
rx_buffer_info
[
rx_ntc
];
/* unmap Rx buffer, will be remapped by alloc_rx_buffers */
dma_unmap_single
(
rx_ring
->
dev
,
rx_buffer
->
dma
,
rx_ring
->
rx_buf_len
,
DMA_FROM_DEVICE
);
rx_buffer
->
dma
=
0
;
/* sync Rx buffer for CPU read */
dma_sync_single_for_cpu
(
rx_ring
->
dev
,
rx_buffer
->
dma
,
ixgbe_rx_bufsz
(
rx_ring
),
DMA_FROM_DEVICE
);
/* verify contents of skb */
if
(
ixgbe_check_lbtest_frame
(
rx_buffer
,
size
))
count
++
;
/* sync Rx buffer for device write */
dma_sync_single_for_device
(
rx_ring
->
dev
,
rx_buffer
->
dma
,
ixgbe_rx_bufsz
(
rx_ring
),
DMA_FROM_DEVICE
);
/* unmap buffer on Tx side */
tx_buffer
=
&
tx_ring
->
tx_buffer_info
[
tx_ntc
];
ixgbe_unmap_and_free_tx_resource
(
tx_ring
,
tx_buffer
);
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
浏览文件 @
81a430ac
...
...
@@ -447,7 +447,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
/**
* ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
* @tx_ring: tx desc ring
* @
skb: associated skb
* @
first: first tx_buffer structure containing skb, tx_flags, and protocol
* @tx_flags: tx flags
* @hdr_len: hdr_len to be returned
*
...
...
@@ -455,9 +455,11 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
*
* Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
*/
int
ixgbe_fso
(
struct
ixgbe_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
int
ixgbe_fso
(
struct
ixgbe_ring
*
tx_ring
,
struct
ixgbe_tx_buffer
*
first
,
u32
tx_flags
,
u8
*
hdr_len
)
{
struct
sk_buff
*
skb
=
first
->
skb
;
struct
fc_frame_header
*
fh
;
u32
vlan_macip_lens
;
u32
fcoe_sof_eof
=
0
;
...
...
@@ -530,9 +532,14 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
*
hdr_len
=
sizeof
(
struct
fcoe_crc_eof
);
/* hdr_len includes fc_hdr if FCoE LSO is enabled */
if
(
skb_is_gso
(
skb
))
*
hdr_len
+=
(
skb_transport_offset
(
skb
)
+
sizeof
(
struct
fc_frame_header
));
if
(
skb_is_gso
(
skb
))
{
*
hdr_len
+=
skb_transport_offset
(
skb
)
+
sizeof
(
struct
fc_frame_header
);
/* update gso_segs and bytecount */
first
->
gso_segs
=
DIV_ROUND_UP
(
skb
->
len
-
*
hdr_len
,
skb_shinfo
(
skb
)
->
gso_size
);
first
->
bytecount
+=
(
first
->
gso_segs
-
1
)
*
*
hdr_len
;
}
/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
mss_l4len_idx
=
skb_shinfo
(
skb
)
->
gso_size
<<
IXGBE_ADVTXD_MSS_SHIFT
;
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
浏览文件 @
81a430ac
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录