提交 dccb235e 编写于 作者: D David S. Miller

Merge branch 'be2net-fixes'

Ajit Khaparde says:

====================
be2net patches

Please consider applying to net-next
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -72,6 +72,9 @@
#define BE_MAX_MTU (BE_MAX_JUMBO_FRAME_SIZE - \
(ETH_HLEN + ETH_FCS_LEN))
/* Accommodate for QnQ configurations where VLAN insertion is enabled in HW */
#define BE_MAX_GSO_SIZE (65535 - 2 * VLAN_HLEN)
#define BE_NUM_VLANS_SUPPORTED 64
#define BE_MAX_EQD 128u
#define BE_MAX_TX_FRAG_COUNT 30
......@@ -124,27 +127,27 @@ struct be_dma_mem {
};
struct be_queue_info {
u32 len;
u32 entry_size; /* Size of an element in the queue */
u32 tail, head;
atomic_t used; /* Number of valid elements in the queue */
u32 id;
struct be_dma_mem dma_mem;
u16 len;
u16 entry_size; /* Size of an element in the queue */
u16 id;
u16 tail, head;
bool created;
atomic_t used; /* Number of valid elements in the queue */
};
static inline u32 MODULO(u16 val, u16 limit)
static inline u32 MODULO(u32 val, u32 limit)
{
BUG_ON(limit & (limit - 1));
return val & (limit - 1);
}
static inline void index_adv(u16 *index, u16 val, u16 limit)
static inline void index_adv(u32 *index, u32 val, u32 limit)
{
*index = MODULO((*index + val), limit);
}
static inline void index_inc(u16 *index, u16 limit)
static inline void index_inc(u32 *index, u32 limit)
{
*index = MODULO((*index + 1), limit);
}
......@@ -169,7 +172,7 @@ static inline void queue_head_inc(struct be_queue_info *q)
index_inc(&q->head, q->len);
}
static inline void index_dec(u16 *index, u16 limit)
static inline void index_dec(u32 *index, u32 limit)
{
*index = MODULO((*index - 1), limit);
}
......
......@@ -596,7 +596,7 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
int status;
struct be_mcc_wrb *wrb;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
u16 index = mcc_obj->q.head;
u32 index = mcc_obj->q.head;
struct be_cmd_resp_hdr *resp;
index_dec(&index, mcc_obj->q.len);
......
......@@ -849,9 +849,9 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
}
/* Grab a WRB header for xmit */
static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
{
u16 head = txo->q.head;
u32 head = txo->q.head;
queue_head_inc(&txo->q);
return head;
......@@ -895,7 +895,7 @@ static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
* WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
*/
static void be_xmit_restore(struct be_adapter *adapter,
struct be_tx_obj *txo, u16 head, bool map_single,
struct be_tx_obj *txo, u32 head, bool map_single,
u32 copied)
{
struct device *dev;
......@@ -930,7 +930,7 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
struct device *dev = &adapter->pdev->dev;
struct be_queue_info *txq = &txo->q;
bool map_single = false;
u16 head = txq->head;
u32 head = txq->head;
dma_addr_t busaddr;
int len;
......@@ -1123,6 +1123,8 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
struct sk_buff *skb,
struct be_wrb_params *wrb_params)
{
int err;
/* Lancer, SH and BE3 in SRIOV mode have a bug wherein
* packets that are 32b or less may cause a transmit stall
* on that port. The workaround is to pad such packets
......@@ -1139,6 +1141,13 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
return NULL;
}
/* The stack can send us skbs with length greater than
* what the HW can handle. Trim the extra bytes.
*/
WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
err = pskb_trim(skb, BE_MAX_GSO_SIZE);
WARN_ON(err);
return skb;
}
......@@ -1990,7 +1999,7 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *rx_page_info;
struct be_queue_info *rxq = &rxo->q;
u16 frag_idx = rxq->tail;
u32 frag_idx = rxq->tail;
rx_page_info = &rxo->page_info_tbl[frag_idx];
BUG_ON(!rx_page_info->page);
......@@ -2401,10 +2410,11 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
{
struct sk_buff **sent_skbs = txo->sent_skb_list;
struct be_queue_info *txq = &txo->q;
u16 frag_index, num_wrbs = 0;
struct sk_buff *skb = NULL;
bool unmap_skb_hdr = false;
struct be_eth_wrb *wrb;
u16 num_wrbs = 0;
u32 frag_index;
do {
if (sent_skbs[txq->tail]) {
......@@ -2516,10 +2526,11 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
static void be_tx_compl_clean(struct be_adapter *adapter)
{
u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
struct device *dev = &adapter->pdev->dev;
u16 cmpl = 0, timeo = 0, num_wrbs = 0;
struct be_tx_compl_info *txcp;
struct be_queue_info *txq;
u32 end_idx, notified_idx;
struct be_tx_obj *txo;
int i, pending_txqs;
......@@ -4848,7 +4859,7 @@ static void be_netdev_init(struct net_device *netdev)
netdev->flags |= IFF_MULTICAST;
netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
netdev->netdev_ops = &be_netdev_ops;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册