提交 98f45da2 编写于 作者: J Jon Mason 提交者: David S. Miller

vxge: NETIF_F_LLTX removal

NETIF_F_LLTX and it's usage of local transmit locks are depricated in
favor of using the netdev queue's transmit lock.  Remove the local
lock and all references to it, and use the netdev queue transmit lock
in the transmit completion handler.
Signed-off-by: NJon Mason <jon.mason@exar.com>
Signed-off-by: NRamkrishna Vepa <ramkrishna.vepa@exar.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 7adf7d1b
...@@ -87,7 +87,6 @@ static inline int is_vxge_card_up(struct vxgedev *vdev) ...@@ -87,7 +87,6 @@ static inline int is_vxge_card_up(struct vxgedev *vdev)
static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
{ {
unsigned long flags = 0;
struct sk_buff **skb_ptr = NULL; struct sk_buff **skb_ptr = NULL;
struct sk_buff **temp; struct sk_buff **temp;
#define NR_SKB_COMPLETED 128 #define NR_SKB_COMPLETED 128
...@@ -98,15 +97,16 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) ...@@ -98,15 +97,16 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
more = 0; more = 0;
skb_ptr = completed; skb_ptr = completed;
if (spin_trylock_irqsave(&fifo->tx_lock, flags)) { if (__netif_tx_trylock(fifo->txq)) {
vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr, vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
NR_SKB_COMPLETED, &more); NR_SKB_COMPLETED, &more);
spin_unlock_irqrestore(&fifo->tx_lock, flags); __netif_tx_unlock(fifo->txq);
} }
/* free SKBs */ /* free SKBs */
for (temp = completed; temp != skb_ptr; temp++) for (temp = completed; temp != skb_ptr; temp++)
dev_kfree_skb_irq(*temp); dev_kfree_skb_irq(*temp);
} while (more) ; } while (more);
} }
static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev) static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
...@@ -130,53 +130,6 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev) ...@@ -130,53 +130,6 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
} }
} }
/*
* MultiQ manipulation helper functions
*/
static inline int vxge_netif_queue_stopped(struct vxge_fifo *fifo)
{
struct net_device *dev = fifo->ndev;
struct netdev_queue *txq = NULL;
int vpath_no = fifo->driver_id;
int ret = 0;
if (fifo->tx_steering_type)
txq = netdev_get_tx_queue(dev, vpath_no);
else
txq = netdev_get_tx_queue(dev, 0);
ret = netif_tx_queue_stopped(txq);
return ret;
}
void vxge_stop_tx_queue(struct vxge_fifo *fifo)
{
struct net_device *dev = fifo->ndev;
struct netdev_queue *txq = NULL;
if (fifo->tx_steering_type)
txq = netdev_get_tx_queue(dev, fifo->driver_id);
else
txq = netdev_get_tx_queue(dev, 0);
netif_tx_stop_queue(txq);
}
void vxge_wake_tx_queue(struct vxge_fifo *fifo)
{
struct net_device *dev = fifo->ndev;
struct netdev_queue *txq = NULL;
int vpath_no = fifo->driver_id;
if (fifo->tx_steering_type)
txq = netdev_get_tx_queue(dev, vpath_no);
else
txq = netdev_get_tx_queue(dev, 0);
if (netif_tx_queue_stopped(txq))
netif_tx_wake_queue(txq);
}
/* /*
* vxge_callback_link_up * vxge_callback_link_up
* *
...@@ -650,7 +603,8 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, ...@@ -650,7 +603,8 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
&dtr, &t_code) == VXGE_HW_OK); &dtr, &t_code) == VXGE_HW_OK);
*skb_ptr = done_skb; *skb_ptr = done_skb;
vxge_wake_tx_queue(fifo); if (netif_tx_queue_stopped(fifo->txq))
netif_tx_wake_queue(fifo->txq);
vxge_debug_entryexit(VXGE_TRACE, vxge_debug_entryexit(VXGE_TRACE,
"%s: %s:%d Exiting...", "%s: %s:%d Exiting...",
...@@ -659,8 +613,7 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, ...@@ -659,8 +613,7 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
} }
/* select a vpath to transmit the packet */ /* select a vpath to transmit the packet */
static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb, static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
int *do_lock)
{ {
u16 queue_len, counter = 0; u16 queue_len, counter = 0;
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
...@@ -679,12 +632,6 @@ static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb, ...@@ -679,12 +632,6 @@ static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb,
vdev->vpath_selector[queue_len - 1]; vdev->vpath_selector[queue_len - 1];
if (counter >= queue_len) if (counter >= queue_len)
counter = queue_len - 1; counter = queue_len - 1;
if (ip->protocol == IPPROTO_UDP) {
#ifdef NETIF_F_LLTX
*do_lock = 0;
#endif
}
} }
} }
return counter; return counter;
...@@ -781,8 +728,6 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) ...@@ -781,8 +728,6 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
* *
* This function is the Tx entry point of the driver. Neterion NIC supports * This function is the Tx entry point of the driver. Neterion NIC supports
* certain protocol assist features on Tx side, namely CSO, S/G, LSO. * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
* NOTE: when device cant queue the pkt, just the trans_start variable will
* not be upadted.
*/ */
static netdev_tx_t static netdev_tx_t
vxge_xmit(struct sk_buff *skb, struct net_device *dev) vxge_xmit(struct sk_buff *skb, struct net_device *dev)
...@@ -799,9 +744,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -799,9 +744,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
struct vxge_tx_priv *txdl_priv = NULL; struct vxge_tx_priv *txdl_priv = NULL;
struct __vxge_hw_fifo *fifo_hw; struct __vxge_hw_fifo *fifo_hw;
int offload_type; int offload_type;
unsigned long flags = 0;
int vpath_no = 0; int vpath_no = 0;
int do_spin_tx_lock = 1;
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
dev->name, __func__, __LINE__); dev->name, __func__, __LINE__);
...@@ -837,7 +780,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -837,7 +780,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
vpath_no = skb_get_queue_mapping(skb); vpath_no = skb_get_queue_mapping(skb);
else if (vdev->config.tx_steering_type == TX_PORT_STEERING) else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
vpath_no = vxge_get_vpath_no(vdev, skb, &do_spin_tx_lock); vpath_no = vxge_get_vpath_no(vdev, skb);
vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no); vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
...@@ -847,40 +790,29 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -847,40 +790,29 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
fifo = &vdev->vpaths[vpath_no].fifo; fifo = &vdev->vpaths[vpath_no].fifo;
fifo_hw = fifo->handle; fifo_hw = fifo->handle;
if (do_spin_tx_lock) if (netif_tx_queue_stopped(fifo->txq))
spin_lock_irqsave(&fifo->tx_lock, flags);
else {
if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
return NETDEV_TX_LOCKED;
}
if (vxge_netif_queue_stopped(fifo)) {
spin_unlock_irqrestore(&fifo->tx_lock, flags);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
}
avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw); avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
if (avail == 0) { if (avail == 0) {
vxge_debug_tx(VXGE_ERR, vxge_debug_tx(VXGE_ERR,
"%s: No free TXDs available", dev->name); "%s: No free TXDs available", dev->name);
fifo->stats.txd_not_free++; fifo->stats.txd_not_free++;
vxge_stop_tx_queue(fifo); goto _exit0;
goto _exit2;
} }
/* Last TXD? Stop tx queue to avoid dropping packets. TX /* Last TXD? Stop tx queue to avoid dropping packets. TX
* completion will resume the queue. * completion will resume the queue.
*/ */
if (avail == 1) if (avail == 1)
vxge_stop_tx_queue(fifo); netif_tx_stop_queue(fifo->txq);
status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv); status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
if (unlikely(status != VXGE_HW_OK)) { if (unlikely(status != VXGE_HW_OK)) {
vxge_debug_tx(VXGE_ERR, vxge_debug_tx(VXGE_ERR,
"%s: Out of descriptors .", dev->name); "%s: Out of descriptors .", dev->name);
fifo->stats.txd_out_of_desc++; fifo->stats.txd_out_of_desc++;
vxge_stop_tx_queue(fifo); goto _exit0;
goto _exit2;
} }
vxge_debug_tx(VXGE_TRACE, vxge_debug_tx(VXGE_TRACE,
...@@ -900,9 +832,8 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -900,9 +832,8 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) { if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
vxge_hw_fifo_txdl_free(fifo_hw, dtr); vxge_hw_fifo_txdl_free(fifo_hw, dtr);
vxge_stop_tx_queue(fifo);
fifo->stats.pci_map_fail++; fifo->stats.pci_map_fail++;
goto _exit2; goto _exit0;
} }
txdl_priv = vxge_hw_fifo_txdl_private_get(dtr); txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
...@@ -925,13 +856,12 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -925,13 +856,12 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
if (!frag->size) if (!frag->size)
continue; continue;
dma_pointer = dma_pointer = (u64) pci_map_page(fifo->pdev, frag->page,
(u64)pci_map_page(fifo->pdev, frag->page,
frag->page_offset, frag->size, frag->page_offset, frag->size,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
goto _exit0; goto _exit2;
vxge_debug_tx(VXGE_TRACE, vxge_debug_tx(VXGE_TRACE,
"%s: %s:%d frag = %d dma_pointer = 0x%llx", "%s: %s:%d frag = %d dma_pointer = 0x%llx",
dev->name, __func__, __LINE__, i, dev->name, __func__, __LINE__, i,
...@@ -946,11 +876,9 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -946,11 +876,9 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
offload_type = vxge_offload_type(skb); offload_type = vxge_offload_type(skb);
if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
int mss = vxge_tcp_mss(skb); int mss = vxge_tcp_mss(skb);
if (mss) { if (mss) {
vxge_debug_tx(VXGE_TRACE, vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
"%s: %s:%d mss = %d",
dev->name, __func__, __LINE__, mss); dev->name, __func__, __LINE__, mss);
vxge_hw_fifo_txdl_mss_set(dtr, mss); vxge_hw_fifo_txdl_mss_set(dtr, mss);
} else { } else {
...@@ -968,19 +896,13 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -968,19 +896,13 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN); VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
vxge_hw_fifo_txdl_post(fifo_hw, dtr); vxge_hw_fifo_txdl_post(fifo_hw, dtr);
#ifdef NETIF_F_LLTX
dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
#endif
spin_unlock_irqrestore(&fifo->tx_lock, flags);
VXGE_COMPLETE_VPATH_TX(fifo);
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
dev->name, __func__, __LINE__); dev->name, __func__, __LINE__);
return NETDEV_TX_OK; return NETDEV_TX_OK;
_exit0: _exit2:
vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name); vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
_exit1: _exit1:
j = 0; j = 0;
frag = &skb_shinfo(skb)->frags[0]; frag = &skb_shinfo(skb)->frags[0];
...@@ -995,10 +917,9 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -995,10 +917,9 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
} }
vxge_hw_fifo_txdl_free(fifo_hw, dtr); vxge_hw_fifo_txdl_free(fifo_hw, dtr);
_exit2: _exit0:
netif_tx_stop_queue(fifo->txq);
dev_kfree_skb(skb); dev_kfree_skb(skb);
spin_unlock_irqrestore(&fifo->tx_lock, flags);
VXGE_COMPLETE_VPATH_TX(fifo);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -1448,7 +1369,8 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) ...@@ -1448,7 +1369,8 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
clear_bit(vp_id, &vdev->vp_reset); clear_bit(vp_id, &vdev->vp_reset);
/* Start the vpath queue */ /* Start the vpath queue */
vxge_wake_tx_queue(&vpath->fifo); if (netif_tx_queue_stopped(vpath->fifo.txq))
netif_tx_wake_queue(vpath->fifo.txq);
return ret; return ret;
} }
...@@ -2078,6 +2000,12 @@ int vxge_open_vpaths(struct vxgedev *vdev) ...@@ -2078,6 +2000,12 @@ int vxge_open_vpaths(struct vxgedev *vdev)
vdev->config.tx_steering_type; vdev->config.tx_steering_type;
vpath->fifo.ndev = vdev->ndev; vpath->fifo.ndev = vdev->ndev;
vpath->fifo.pdev = vdev->pdev; vpath->fifo.pdev = vdev->pdev;
if (vdev->config.tx_steering_type)
vpath->fifo.txq =
netdev_get_tx_queue(vdev->ndev, i);
else
vpath->fifo.txq =
netdev_get_tx_queue(vdev->ndev, 0);
vpath->fifo.indicate_max_pkts = vpath->fifo.indicate_max_pkts =
vdev->config.fifo_indicate_max_pkts; vdev->config.fifo_indicate_max_pkts;
vpath->ring.rx_vector_no = 0; vpath->ring.rx_vector_no = 0;
...@@ -2564,7 +2492,7 @@ static void vxge_poll_vp_lockup(unsigned long data) ...@@ -2564,7 +2492,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
vxge_vpath_intr_disable(vdev, i); vxge_vpath_intr_disable(vdev, i);
/* stop the queue for this vpath */ /* stop the queue for this vpath */
vxge_stop_tx_queue(&vpath->fifo); netif_tx_stop_queue(vpath->fifo.txq);
continue; continue;
} }
} }
...@@ -2627,7 +2555,6 @@ vxge_open(struct net_device *dev) ...@@ -2627,7 +2555,6 @@ vxge_open(struct net_device *dev)
goto out1; goto out1;
} }
if (vdev->config.intr_type != MSI_X) { if (vdev->config.intr_type != MSI_X) {
netif_napi_add(dev, &vdev->napi, vxge_poll_inta, netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
vdev->config.napi_weight); vdev->config.napi_weight);
...@@ -3200,7 +3127,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev, ...@@ -3200,7 +3127,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
struct net_device *ndev; struct net_device *ndev;
enum vxge_hw_status status = VXGE_HW_OK; enum vxge_hw_status status = VXGE_HW_OK;
struct vxgedev *vdev; struct vxgedev *vdev;
int i, ret = 0, no_of_queue = 1; int ret = 0, no_of_queue = 1;
u64 stat; u64 stat;
*vdev_out = NULL; *vdev_out = NULL;
...@@ -3273,13 +3200,6 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev, ...@@ -3273,13 +3200,6 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
if (vdev->config.gro_enable) if (vdev->config.gro_enable)
ndev->features |= NETIF_F_GRO; ndev->features |= NETIF_F_GRO;
#ifdef NETIF_F_LLTX
ndev->features |= NETIF_F_LLTX;
#endif
for (i = 0; i < no_of_vpath; i++)
spin_lock_init(&vdev->vpaths[i].fifo.tx_lock);
if (register_netdev(ndev)) { if (register_netdev(ndev)) {
vxge_debug_init(vxge_hw_device_trace_level_get(hldev), vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
"%s: %s : device registration failed!", "%s: %s : device registration failed!",
...@@ -3379,6 +3299,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev, ...@@ -3379,6 +3299,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
{ {
struct net_device *dev = hldev->ndev; struct net_device *dev = hldev->ndev;
struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
struct vxge_vpath *vpath = NULL;
int vpath_idx; int vpath_idx;
vxge_debug_entryexit(vdev->level_trace, vxge_debug_entryexit(vdev->level_trace,
...@@ -3389,9 +3310,11 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev, ...@@ -3389,9 +3310,11 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
*/ */
vdev->cric_err_event = type; vdev->cric_err_event = type;
for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
if (vdev->vpaths[vpath_idx].device_id == vp_id) vpath = &vdev->vpaths[vpath_idx];
if (vpath->device_id == vp_id)
break; break;
}
if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) { if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
if (type == VXGE_HW_EVENT_SLOT_FREEZE) { if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
...@@ -3428,8 +3351,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev, ...@@ -3428,8 +3351,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
vxge_vpath_intr_disable(vdev, vpath_idx); vxge_vpath_intr_disable(vdev, vpath_idx);
/* stop the queue for this vpath */ /* stop the queue for this vpath */
vxge_stop_tx_queue(&vdev->vpaths[vpath_idx]. netif_tx_stop_queue(vpath->fifo.txq);
fifo);
} }
} }
} }
...@@ -4274,7 +4196,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) ...@@ -4274,7 +4196,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
vdev->vpaths[j].is_configured = 1; vdev->vpaths[j].is_configured = 1;
vdev->vpaths[j].device_id = i; vdev->vpaths[j].device_id = i;
vdev->vpaths[j].fifo.driver_id = j;
vdev->vpaths[j].ring.driver_id = j; vdev->vpaths[j].ring.driver_id = j;
vdev->vpaths[j].vdev = vdev; vdev->vpaths[j].vdev = vdev;
vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath; vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
......
...@@ -217,17 +217,13 @@ struct vxge_fifo_stats { ...@@ -217,17 +217,13 @@ struct vxge_fifo_stats {
}; };
struct vxge_fifo { struct vxge_fifo {
struct net_device *ndev; struct net_device *ndev;
struct pci_dev *pdev; struct pci_dev *pdev;
struct __vxge_hw_fifo *handle; struct __vxge_hw_fifo *handle;
struct netdev_queue *txq;
/* The vpath id maintained in the driver -
* 0 to 'maximum_vpaths_in_function - 1'
*/
int driver_id;
int tx_steering_type; int tx_steering_type;
int indicate_max_pkts; int indicate_max_pkts;
spinlock_t tx_lock;
/* Tx stats */ /* Tx stats */
struct vxge_fifo_stats stats; struct vxge_fifo_stats stats;
...@@ -275,7 +271,6 @@ struct vxge_ring { ...@@ -275,7 +271,6 @@ struct vxge_ring {
} ____cacheline_aligned; } ____cacheline_aligned;
struct vxge_vpath { struct vxge_vpath {
struct vxge_fifo fifo; struct vxge_fifo fifo;
struct vxge_ring ring; struct vxge_ring ring;
...@@ -443,10 +438,6 @@ int vxge_open_vpaths(struct vxgedev *vdev); ...@@ -443,10 +438,6 @@ int vxge_open_vpaths(struct vxgedev *vdev);
enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
void vxge_stop_tx_queue(struct vxge_fifo *fifo);
void vxge_wake_tx_queue(struct vxge_fifo *fifo);
enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
struct macInfo *mac); struct macInfo *mac);
......
...@@ -2466,14 +2466,12 @@ enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring) ...@@ -2466,14 +2466,12 @@ enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
* the same. * the same.
* @fifo: Handle to the fifo object used for non offload send * @fifo: Handle to the fifo object used for non offload send
* *
* The function polls the Tx for the completed descriptors and calls * The function polls the Tx for the completed descriptors and calls
* the driver via supplied completion callback. * the driver via supplied completion callback.
* *
* Returns: VXGE_HW_OK, if the polling is completed successful. * Returns: VXGE_HW_OK, if the polling is completed successful.
* VXGE_HW_COMPLETIONS_REMAIN: There are still more completed * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
* descriptors available which are yet to be processed. * descriptors available which are yet to be processed.
*
* See also: vxge_hw_vpath_poll_tx().
*/ */
enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo, enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
struct sk_buff ***skb_ptr, int nr_skb, struct sk_buff ***skb_ptr, int nr_skb,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册