提交 2760f5a3 编写于 作者: D David S. Miller

Merge branch 'aquantia-fixes'

Igor Russkikh says:

====================
aquantia: Atlantic driver bugfixes und improvements

This series contains bugfixes for aQuantia Atlantic driver.

Changes in v2:
Review comments applied:
- min_mtu set removed
- extra mtu range check is removed
- err codes handling improved
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -51,6 +51,10 @@ ...@@ -51,6 +51,10 @@
#define AQ_CFG_SKB_FRAGS_MAX 32U #define AQ_CFG_SKB_FRAGS_MAX 32U
/* Number of descriptors available in one ring to resume this ring queue
*/
#define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2)
#define AQ_CFG_NAPI_WEIGHT 64U #define AQ_CFG_NAPI_WEIGHT 64U
#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U #define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
......
...@@ -119,6 +119,35 @@ int aq_nic_cfg_start(struct aq_nic_s *self) ...@@ -119,6 +119,35 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
return 0; return 0;
} }
static int aq_nic_update_link_status(struct aq_nic_s *self)
{
int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
if (err)
return err;
if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps)
pr_info("%s: link change old %d new %d\n",
AQ_CFG_DRV_NAME, self->link_status.mbps,
self->aq_hw->aq_link_status.mbps);
self->link_status = self->aq_hw->aq_link_status;
if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
aq_utils_obj_set(&self->header.flags,
AQ_NIC_FLAG_STARTED);
aq_utils_obj_clear(&self->header.flags,
AQ_NIC_LINK_DOWN);
netif_carrier_on(self->ndev);
netif_tx_wake_all_queues(self->ndev);
}
if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
netif_carrier_off(self->ndev);
netif_tx_disable(self->ndev);
aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
}
return 0;
}
static void aq_nic_service_timer_cb(unsigned long param) static void aq_nic_service_timer_cb(unsigned long param)
{ {
struct aq_nic_s *self = (struct aq_nic_s *)param; struct aq_nic_s *self = (struct aq_nic_s *)param;
...@@ -131,26 +160,13 @@ static void aq_nic_service_timer_cb(unsigned long param) ...@@ -131,26 +160,13 @@ static void aq_nic_service_timer_cb(unsigned long param)
if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit; goto err_exit;
err = self->aq_hw_ops.hw_get_link_status(self->aq_hw); err = aq_nic_update_link_status(self);
if (err < 0) if (err)
goto err_exit; goto err_exit;
self->link_status = self->aq_hw->aq_link_status;
self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
self->aq_nic_cfg.is_interrupt_moderation); self->aq_nic_cfg.is_interrupt_moderation);
if (self->link_status.mbps) {
aq_utils_obj_set(&self->header.flags,
AQ_NIC_FLAG_STARTED);
aq_utils_obj_clear(&self->header.flags,
AQ_NIC_LINK_DOWN);
netif_carrier_on(self->ndev);
} else {
netif_carrier_off(self->ndev);
aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
}
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
for (i = AQ_DIMOF(self->aq_vec); i--;) { for (i = AQ_DIMOF(self->aq_vec); i--;) {
...@@ -214,7 +230,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, ...@@ -214,7 +230,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
SET_NETDEV_DEV(ndev, dev); SET_NETDEV_DEV(ndev, dev);
ndev->if_port = port; ndev->if_port = port;
ndev->min_mtu = ETH_MIN_MTU;
self->ndev = ndev; self->ndev = ndev;
self->aq_pci_func = aq_pci_func; self->aq_pci_func = aq_pci_func;
...@@ -241,7 +256,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, ...@@ -241,7 +256,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
int aq_nic_ndev_register(struct aq_nic_s *self) int aq_nic_ndev_register(struct aq_nic_s *self)
{ {
int err = 0; int err = 0;
unsigned int i = 0U;
if (!self->ndev) { if (!self->ndev) {
err = -EINVAL; err = -EINVAL;
...@@ -263,8 +277,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self) ...@@ -263,8 +277,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
netif_carrier_off(self->ndev); netif_carrier_off(self->ndev);
for (i = AQ_CFG_VECS_MAX; i--;) netif_tx_disable(self->ndev);
aq_nic_ndev_queue_stop(self, i);
err = register_netdev(self->ndev); err = register_netdev(self->ndev);
if (err < 0) if (err < 0)
...@@ -283,6 +296,7 @@ int aq_nic_ndev_init(struct aq_nic_s *self) ...@@ -283,6 +296,7 @@ int aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->features = aq_hw_caps->hw_features; self->ndev->features = aq_hw_caps->hw_features;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN;
return 0; return 0;
} }
...@@ -318,12 +332,8 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev) ...@@ -318,12 +332,8 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
err = -EINVAL; err = -EINVAL;
goto err_exit; goto err_exit;
} }
if (netif_running(ndev)) { if (netif_running(ndev))
unsigned int i; netif_tx_disable(ndev);
for (i = AQ_CFG_VECS_MAX; i--;)
netif_stop_subqueue(ndev, i);
}
for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
self->aq_vecs++) { self->aq_vecs++) {
...@@ -383,16 +393,6 @@ int aq_nic_init(struct aq_nic_s *self) ...@@ -383,16 +393,6 @@ int aq_nic_init(struct aq_nic_s *self)
return err; return err;
} }
void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
{
netif_start_subqueue(self->ndev, idx);
}
void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
{
netif_stop_subqueue(self->ndev, idx);
}
int aq_nic_start(struct aq_nic_s *self) int aq_nic_start(struct aq_nic_s *self)
{ {
struct aq_vec_s *aq_vec = NULL; struct aq_vec_s *aq_vec = NULL;
...@@ -451,10 +451,6 @@ int aq_nic_start(struct aq_nic_s *self) ...@@ -451,10 +451,6 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit; goto err_exit;
} }
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_nic_ndev_queue_start(self, i);
err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
if (err < 0) if (err < 0)
goto err_exit; goto err_exit;
...@@ -463,6 +459,8 @@ int aq_nic_start(struct aq_nic_s *self) ...@@ -463,6 +459,8 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0) if (err < 0)
goto err_exit; goto err_exit;
netif_tx_start_all_queues(self->ndev);
err_exit: err_exit:
return err; return err;
} }
...@@ -475,6 +473,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, ...@@ -475,6 +473,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int frag_count = 0U; unsigned int frag_count = 0U;
unsigned int dx = ring->sw_tail; unsigned int dx = ring->sw_tail;
struct aq_ring_buff_s *first = NULL;
struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
if (unlikely(skb_is_gso(skb))) { if (unlikely(skb_is_gso(skb))) {
...@@ -485,6 +484,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, ...@@ -485,6 +484,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
dx_buff->len_l4 = tcp_hdrlen(skb); dx_buff->len_l4 = tcp_hdrlen(skb);
dx_buff->mss = skb_shinfo(skb)->gso_size; dx_buff->mss = skb_shinfo(skb)->gso_size;
dx_buff->is_txc = 1U; dx_buff->is_txc = 1U;
dx_buff->eop_index = 0xffffU;
dx_buff->is_ipv6 = dx_buff->is_ipv6 =
(ip_hdr(skb)->version == 6) ? 1U : 0U; (ip_hdr(skb)->version == 6) ? 1U : 0U;
...@@ -504,6 +504,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, ...@@ -504,6 +504,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
goto exit; goto exit;
first = dx_buff;
dx_buff->len_pkt = skb->len; dx_buff->len_pkt = skb->len;
dx_buff->is_sop = 1U; dx_buff->is_sop = 1U;
dx_buff->is_mapped = 1U; dx_buff->is_mapped = 1U;
...@@ -532,40 +533,46 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, ...@@ -532,40 +533,46 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
for (; nr_frags--; ++frag_count) { for (; nr_frags--; ++frag_count) {
unsigned int frag_len = 0U; unsigned int frag_len = 0U;
unsigned int buff_offset = 0U;
unsigned int buff_size = 0U;
dma_addr_t frag_pa; dma_addr_t frag_pa;
skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
frag_len = skb_frag_size(frag); frag_len = skb_frag_size(frag);
frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
frag_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa))) while (frag_len) {
goto mapping_error; if (frag_len > AQ_CFG_TX_FRAME_MAX)
buff_size = AQ_CFG_TX_FRAME_MAX;
else
buff_size = frag_len;
frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
frag,
buff_offset,
buff_size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
frag_pa)))
goto mapping_error;
while (frag_len > AQ_CFG_TX_FRAME_MAX) {
dx = aq_ring_next_dx(ring, dx); dx = aq_ring_next_dx(ring, dx);
dx_buff = &ring->buff_ring[dx]; dx_buff = &ring->buff_ring[dx];
dx_buff->flags = 0U; dx_buff->flags = 0U;
dx_buff->len = AQ_CFG_TX_FRAME_MAX; dx_buff->len = buff_size;
dx_buff->pa = frag_pa; dx_buff->pa = frag_pa;
dx_buff->is_mapped = 1U; dx_buff->is_mapped = 1U;
dx_buff->eop_index = 0xffffU;
frag_len -= buff_size;
buff_offset += buff_size;
frag_len -= AQ_CFG_TX_FRAME_MAX;
frag_pa += AQ_CFG_TX_FRAME_MAX;
++ret; ++ret;
} }
dx = aq_ring_next_dx(ring, dx);
dx_buff = &ring->buff_ring[dx];
dx_buff->flags = 0U;
dx_buff->len = frag_len;
dx_buff->pa = frag_pa;
dx_buff->is_mapped = 1U;
++ret;
} }
first->eop_index = dx;
dx_buff->is_eop = 1U; dx_buff->is_eop = 1U;
dx_buff->skb = skb; dx_buff->skb = skb;
goto exit; goto exit;
...@@ -602,7 +609,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) ...@@ -602,7 +609,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
unsigned int tc = 0U; unsigned int tc = 0U;
int err = NETDEV_TX_OK; int err = NETDEV_TX_OK;
bool is_nic_in_bad_state;
frags = skb_shinfo(skb)->nr_frags + 1; frags = skb_shinfo(skb)->nr_frags + 1;
...@@ -613,13 +619,10 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) ...@@ -613,13 +619,10 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
goto err_exit; goto err_exit;
} }
is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags, aq_ring_update_queue_state(ring);
AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
(aq_ring_avail_dx(ring) <
AQ_CFG_SKB_FRAGS_MAX);
if (is_nic_in_bad_state) { /* Above status update may stop the queue. Check this. */
aq_nic_ndev_queue_stop(self, ring->idx); if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
err = NETDEV_TX_BUSY; err = NETDEV_TX_BUSY;
goto err_exit; goto err_exit;
} }
...@@ -631,9 +634,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) ...@@ -631,9 +634,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
ring, ring,
frags); frags);
if (err >= 0) { if (err >= 0) {
if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
aq_nic_ndev_queue_stop(self, ring->idx);
++ring->stats.tx.packets; ++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len; ring->stats.tx.bytes += skb->len;
} }
...@@ -693,16 +693,9 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) ...@@ -693,16 +693,9 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
{ {
int err = 0;
if (new_mtu > self->aq_hw_caps.mtu) {
err = -EINVAL;
goto err_exit;
}
self->aq_nic_cfg.mtu = new_mtu; self->aq_nic_cfg.mtu = new_mtu;
err_exit: return 0;
return err;
} }
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
...@@ -905,9 +898,7 @@ int aq_nic_stop(struct aq_nic_s *self) ...@@ -905,9 +898,7 @@ int aq_nic_stop(struct aq_nic_s *self)
struct aq_vec_s *aq_vec = NULL; struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U; unsigned int i = 0U;
for (i = 0U, aq_vec = self->aq_vec[0]; netif_tx_disable(self->ndev);
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_nic_ndev_queue_stop(self, i);
del_timer_sync(&self->service_timer); del_timer_sync(&self->service_timer);
......
...@@ -83,8 +83,6 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self); ...@@ -83,8 +83,6 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
int aq_nic_init(struct aq_nic_s *self); int aq_nic_init(struct aq_nic_s *self);
int aq_nic_cfg_start(struct aq_nic_s *self); int aq_nic_cfg_start(struct aq_nic_s *self);
int aq_nic_ndev_register(struct aq_nic_s *self); int aq_nic_ndev_register(struct aq_nic_s *self);
void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx);
void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx);
void aq_nic_ndev_free(struct aq_nic_s *self); void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self); int aq_nic_start(struct aq_nic_s *self);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
......
...@@ -104,6 +104,38 @@ int aq_ring_init(struct aq_ring_s *self) ...@@ -104,6 +104,38 @@ int aq_ring_init(struct aq_ring_s *self)
return 0; return 0;
} }
static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
unsigned int t)
{
return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
}
void aq_ring_update_queue_state(struct aq_ring_s *ring)
{
if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
aq_ring_queue_stop(ring);
else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
aq_ring_queue_wake(ring);
}
void aq_ring_queue_wake(struct aq_ring_s *ring)
{
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
if (__netif_subqueue_stopped(ndev, ring->idx)) {
netif_wake_subqueue(ndev, ring->idx);
ring->stats.tx.queue_restarts++;
}
}
void aq_ring_queue_stop(struct aq_ring_s *ring)
{
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
if (!__netif_subqueue_stopped(ndev, ring->idx))
netif_stop_subqueue(ndev, ring->idx);
}
void aq_ring_tx_clean(struct aq_ring_s *self) void aq_ring_tx_clean(struct aq_ring_s *self)
{ {
struct device *dev = aq_nic_get_dev(self->aq_nic); struct device *dev = aq_nic_get_dev(self->aq_nic);
...@@ -113,23 +145,28 @@ void aq_ring_tx_clean(struct aq_ring_s *self) ...@@ -113,23 +145,28 @@ void aq_ring_tx_clean(struct aq_ring_s *self)
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
if (likely(buff->is_mapped)) { if (likely(buff->is_mapped)) {
if (unlikely(buff->is_sop)) if (unlikely(buff->is_sop)) {
if (!buff->is_eop &&
buff->eop_index != 0xffffU &&
(!aq_ring_dx_in_range(self->sw_head,
buff->eop_index,
self->hw_head)))
break;
dma_unmap_single(dev, buff->pa, buff->len, dma_unmap_single(dev, buff->pa, buff->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
else } else {
dma_unmap_page(dev, buff->pa, buff->len, dma_unmap_page(dev, buff->pa, buff->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
}
} }
if (unlikely(buff->is_eop)) if (unlikely(buff->is_eop))
dev_kfree_skb_any(buff->skb); dev_kfree_skb_any(buff->skb);
}
}
static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i, buff->pa = 0U;
unsigned int t) buff->eop_index = 0xffffU;
{ }
return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
} }
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
......
...@@ -65,7 +65,7 @@ struct __packed aq_ring_buff_s { ...@@ -65,7 +65,7 @@ struct __packed aq_ring_buff_s {
}; };
union { union {
struct { struct {
u32 len:16; u16 len;
u32 is_ip_cso:1; u32 is_ip_cso:1;
u32 is_udp_cso:1; u32 is_udp_cso:1;
u32 is_tcp_cso:1; u32 is_tcp_cso:1;
...@@ -77,8 +77,10 @@ struct __packed aq_ring_buff_s { ...@@ -77,8 +77,10 @@ struct __packed aq_ring_buff_s {
u32 is_cleaned:1; u32 is_cleaned:1;
u32 is_error:1; u32 is_error:1;
u32 rsvd3:6; u32 rsvd3:6;
u16 eop_index;
u16 rsvd4;
}; };
u32 flags; u64 flags;
}; };
}; };
...@@ -94,6 +96,7 @@ struct aq_ring_stats_tx_s { ...@@ -94,6 +96,7 @@ struct aq_ring_stats_tx_s {
u64 errors; u64 errors;
u64 packets; u64 packets;
u64 bytes; u64 bytes;
u64 queue_restarts;
}; };
union aq_ring_stats_s { union aq_ring_stats_s {
...@@ -147,6 +150,9 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, ...@@ -147,6 +150,9 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
int aq_ring_init(struct aq_ring_s *self); int aq_ring_init(struct aq_ring_s *self);
void aq_ring_rx_deinit(struct aq_ring_s *self); void aq_ring_rx_deinit(struct aq_ring_s *self);
void aq_ring_free(struct aq_ring_s *self); void aq_ring_free(struct aq_ring_s *self);
void aq_ring_update_queue_state(struct aq_ring_s *ring);
void aq_ring_queue_wake(struct aq_ring_s *ring);
void aq_ring_queue_stop(struct aq_ring_s *ring);
void aq_ring_tx_clean(struct aq_ring_s *self); void aq_ring_tx_clean(struct aq_ring_s *self);
int aq_ring_rx_clean(struct aq_ring_s *self, int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi, struct napi_struct *napi,
......
...@@ -59,12 +59,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) ...@@ -59,12 +59,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
if (ring[AQ_VEC_TX_ID].sw_head != if (ring[AQ_VEC_TX_ID].sw_head !=
ring[AQ_VEC_TX_ID].hw_head) { ring[AQ_VEC_TX_ID].hw_head) {
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) >
AQ_CFG_SKB_FRAGS_MAX) {
aq_nic_ndev_queue_start(self->aq_nic,
ring[AQ_VEC_TX_ID].idx);
}
was_tx_cleaned = true; was_tx_cleaned = true;
} }
...@@ -364,6 +359,7 @@ void aq_vec_add_stats(struct aq_vec_s *self, ...@@ -364,6 +359,7 @@ void aq_vec_add_stats(struct aq_vec_s *self,
stats_tx->packets += tx->packets; stats_tx->packets += tx->packets;
stats_tx->bytes += tx->bytes; stats_tx->bytes += tx->bytes;
stats_tx->errors += tx->errors; stats_tx->errors += tx->errors;
stats_tx->queue_restarts += tx->queue_restarts;
} }
} }
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include "../aq_common.h" #include "../aq_common.h"
#define HW_ATL_B0_MTU_JUMBO (16000U) #define HW_ATL_B0_MTU_JUMBO 16352U
#define HW_ATL_B0_MTU 1514U #define HW_ATL_B0_MTU 1514U
#define HW_ATL_B0_TX_RINGS 4U #define HW_ATL_B0_TX_RINGS 4U
......
...@@ -351,8 +351,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) ...@@ -351,8 +351,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
break; break;
default: default:
link_status->mbps = 0U; return -EBUSY;
break;
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册