diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 666cf7e9cd0940e28cebce64192a2e5d39c98c2d..73e70e076e61da048bcded35cddc1c93cc8c833c 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -377,20 +377,12 @@ static void lio_update_txq_status(struct octeon_device *oct, int iq_num) return; lio = GET_LIO(netdev); - if (netif_is_multiqueue(netdev)) { - if (__netif_subqueue_stopped(netdev, iq->q_index) && - lio->linfo.link.s.link_up && - (!octnet_iq_is_full(oct, iq_num))) { - netif_wake_subqueue(netdev, iq->q_index); - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, - tx_restart, 1); - } - } else if (netif_queue_stopped(netdev) && - lio->linfo.link.s.link_up && - (!octnet_iq_is_full(oct, lio->txq))) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, + if (__netif_subqueue_stopped(netdev, iq->q_index) && + lio->linfo.link.s.link_up && + (!octnet_iq_is_full(oct, iq_num))) { + netif_wake_subqueue(netdev, iq->q_index); + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, tx_restart, 1); - netif_wake_queue(netdev); } } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 21280cb66550337159f2b536ad5fad8c82bc1d11..58b5c75fd2ee16e9f9ab3b2854367ac7280af2ab 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -513,115 +513,6 @@ static void liquidio_deinit_pci(void) pci_unregister_driver(&liquidio_pci_driver); } -/** - * \brief Stop Tx queues - * @param netdev network device - */ -static inline void txqs_stop(struct net_device *netdev) -{ - if (netif_is_multiqueue(netdev)) { - int i; - - for (i = 0; i < netdev->num_tx_queues; i++) - netif_stop_subqueue(netdev, i); - } else { - netif_stop_queue(netdev); - } -} - -/** - * \brief Start Tx queues - * @param netdev network device - */ -static inline void txqs_start(struct net_device *netdev) -{ - if (netif_is_multiqueue(netdev)) { - int i; - - for (i = 0; i < netdev->num_tx_queues; i++) - netif_start_subqueue(netdev, i); - } else { - netif_start_queue(netdev); - } -} - -/** - * \brief Wake Tx queues - * @param netdev network device - */ -static inline void txqs_wake(struct net_device *netdev) -{ - struct lio *lio = GET_LIO(netdev); - - if (netif_is_multiqueue(netdev)) { - int i; - - for (i = 0; i < netdev->num_tx_queues; i++) { - int qno = lio->linfo.txpciq[i % - lio->oct_dev->num_iqs].s.q_no; - - if (__netif_subqueue_stopped(netdev, i)) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, - tx_restart, 1); - netif_wake_subqueue(netdev, i); - } - } - } else { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, - tx_restart, 1); - netif_wake_queue(netdev); - } -} - -/** - * \brief Stop Tx queue - * @param netdev network device - */ -static void stop_txq(struct net_device *netdev) -{ - txqs_stop(netdev); -} - -/** - * \brief Start Tx queue - * @param netdev network device - */ -static void start_txq(struct net_device *netdev) -{ - struct lio *lio = GET_LIO(netdev); - - if (lio->linfo.link.s.link_up) { - txqs_start(netdev); - return; - } -} - -/** - * \brief Wake a queue - * @param netdev network device - * @param q which queue to wake - */ -static inline void wake_q(struct net_device *netdev, int q) -{ - if (netif_is_multiqueue(netdev)) - netif_wake_subqueue(netdev, q); - else - netif_wake_queue(netdev); -} - -/** - * \brief Stop a queue - * @param netdev network device - * @param q which queue to stop - */ -static inline void stop_q(struct net_device *netdev, int q) -{ - if (netif_is_multiqueue(netdev)) - netif_stop_subqueue(netdev, q); - else - netif_stop_queue(netdev); -} - /** * \brief Check Tx queue status, and take appropriate action * @param lio per-network private data @@ -629,33 +520,24 @@ static inline void stop_q(struct net_device *netdev, int q) */ static inline int check_txq_status(struct lio *lio) { + int numqs = lio->netdev->num_tx_queues; int ret_val = 0; + int q, iq; - if (netif_is_multiqueue(lio->netdev)) { - int numqs = lio->netdev->num_tx_queues; - int q, iq = 0; - - /* check each sub-queue state */ - for (q = 0; q < numqs; q++) { - iq = lio->linfo.txpciq[q % - lio->oct_dev->num_iqs].s.q_no; - if (octnet_iq_is_full(lio->oct_dev, iq)) - continue; - if (__netif_subqueue_stopped(lio->netdev, q)) { - wake_q(lio->netdev, q); - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, - tx_restart, 1); - ret_val++; - } + /* check each sub-queue state */ + for (q = 0; q < numqs; q++) { + iq = lio->linfo.txpciq[q % + lio->oct_dev->num_iqs].s.q_no; + if (octnet_iq_is_full(lio->oct_dev, iq)) + continue; + if (__netif_subqueue_stopped(lio->netdev, q)) { + netif_wake_subqueue(lio->netdev, q); + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, + tx_restart, 1); + ret_val++; } - } else { - if (octnet_iq_is_full(lio->oct_dev, lio->txq)) - return 0; - wake_q(lio->netdev, lio->txq); - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, - tx_restart, 1); - ret_val = 1; } + return ret_val; } @@ -900,11 +782,11 @@ static inline void update_link_status(struct net_device *netdev, if (lio->linfo.link.s.link_up) { dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__); netif_carrier_on(netdev); - txqs_wake(netdev); + wake_txqs(netdev); } else { dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__); netif_carrier_off(netdev); - stop_txq(netdev); + stop_txqs(netdev); } if (lio->linfo.link.s.mtu != current_max_mtu) { netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n", @@ -1752,16 +1634,6 @@ static int octeon_pci_os_setup(struct octeon_device *oct) return 0; } -static inline int skb_iq(struct lio *lio, struct sk_buff *skb) -{ - int q = 0; - - if (netif_is_multiqueue(lio->netdev)) - q = skb->queue_mapping % lio->linfo.num_txpciq; - - return q; -} - /** * \brief Check Tx queue state for a given network buffer * @param lio per-network private data @@ -1769,22 +1641,17 @@ static inline int skb_iq(struct lio *lio, struct sk_buff *skb) */ static inline int check_txq_state(struct lio *lio, struct sk_buff *skb) { - int q = 0, iq = 0; + int q, iq; - if (netif_is_multiqueue(lio->netdev)) { - q = skb->queue_mapping; - iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no; - } else { - iq = lio->txq; - q = iq; - } + q = skb->queue_mapping; + iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no; if (octnet_iq_is_full(lio->oct_dev, iq)) return 0; if (__netif_subqueue_stopped(lio->netdev, q)) { INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); - wake_q(lio->netdev, q); + netif_wake_subqueue(lio->netdev, q); } return 1; } @@ -2224,7 +2091,7 @@ static int liquidio_open(struct net_device *netdev) return -1; } - start_txq(netdev); + start_txqs(netdev); /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); @@ -2666,14 +2533,9 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) lio = GET_LIO(netdev); oct = lio->oct_dev; - if (netif_is_multiqueue(netdev)) { - q_idx = skb->queue_mapping; - q_idx = (q_idx % (lio->linfo.num_txpciq)); - tag = q_idx; - iq_no = lio->linfo.txpciq[q_idx].s.q_no; - } else { - iq_no = lio->txq; - } + q_idx = skb_iq(lio, skb); + tag = q_idx; + iq_no = lio->linfo.txpciq[q_idx].s.q_no; stats = &oct->instr_queue[iq_no]->stats; @@ -2704,23 +2566,14 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ndata.q_no = iq_no; - if (netif_is_multiqueue(netdev)) { - if (octnet_iq_is_full(oct, ndata.q_no)) { - /* defer sending if queue is full */ - netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", - ndata.q_no); - stats->tx_iq_busy++; - return NETDEV_TX_BUSY; - } - } else { - if (octnet_iq_is_full(oct, lio->txq)) { - /* defer sending if queue is full */ - stats->tx_iq_busy++; - netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", - lio->txq); - return NETDEV_TX_BUSY; - } + if (octnet_iq_is_full(oct, ndata.q_no)) { + /* defer sending if queue is full */ + netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", + ndata.q_no); + stats->tx_iq_busy++; + return NETDEV_TX_BUSY; } + /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no); */ @@ -2876,7 +2729,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); if (status == IQ_SEND_STOP) - stop_q(netdev, q_idx); + netif_stop_subqueue(netdev, q_idx); netif_trans_update(netdev); @@ -2915,7 +2768,7 @@ static void liquidio_tx_timeout(struct net_device *netdev) "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", netdev->stats.tx_dropped); netif_trans_update(netdev); - txqs_wake(netdev); + wake_txqs(netdev); } static int liquidio_vlan_rx_add_vid(struct net_device *netdev, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 3342d64b7081507fa61325b36aaae23328007384..d5f5c9a693eeacc8c9b5fbaeec1adebeeb7978a8 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -284,105 +284,6 @@ static struct pci_driver liquidio_vf_pci_driver = { .err_handler = &liquidio_vf_err_handler, /* For AER */ }; -/** - * \brief Stop Tx queues - * @param netdev network device - */ -static void txqs_stop(struct net_device *netdev) -{ - if (netif_is_multiqueue(netdev)) { - int i; - - for (i = 0; i < netdev->num_tx_queues; i++) - netif_stop_subqueue(netdev, i); - } else { - netif_stop_queue(netdev); - } -} - -/** - * \brief Start Tx queues - * @param netdev network device - */ -static void txqs_start(struct net_device *netdev) -{ - if (netif_is_multiqueue(netdev)) { - int i; - - for (i = 0; i < netdev->num_tx_queues; i++) - netif_start_subqueue(netdev, i); - } else { - netif_start_queue(netdev); - } -} - -/** - * \brief Wake Tx queues - * @param netdev network device - */ -static void txqs_wake(struct net_device *netdev) -{ - struct lio *lio = GET_LIO(netdev); - - if (netif_is_multiqueue(netdev)) { - int i; - - for (i = 0; i < netdev->num_tx_queues; i++) { - int qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs] - .s.q_no; - if (__netif_subqueue_stopped(netdev, i)) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, - tx_restart, 1); - netif_wake_subqueue(netdev, i); - } - } - } else { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, - tx_restart, 1); - netif_wake_queue(netdev); - } -} - -/** - * \brief Start Tx queue - * @param netdev network device - */ -static void start_txq(struct net_device *netdev) -{ - struct lio *lio = GET_LIO(netdev); - - if (lio->linfo.link.s.link_up) { - txqs_start(netdev); - return; - } -} - -/** - * \brief Wake a queue - * @param netdev network device - * @param q which queue to wake - */ -static void wake_q(struct net_device *netdev, int q) -{ - if (netif_is_multiqueue(netdev)) - netif_wake_subqueue(netdev, q); - else - netif_wake_queue(netdev); -} - -/** - * \brief Stop a queue - * @param netdev network device - * @param q which queue to stop - */ -static void stop_q(struct net_device *netdev, int q) -{ - if (netif_is_multiqueue(netdev)) - netif_stop_subqueue(netdev, q); - else - netif_stop_queue(netdev); -} - /** * Remove the node at the head of the list. The list would be empty at * the end of this call if there are no more nodes in the list. @@ -614,10 +515,10 @@ static void update_link_status(struct net_device *netdev, if (lio->linfo.link.s.link_up) { netif_carrier_on(netdev); - txqs_wake(netdev); + wake_txqs(netdev); } else { netif_carrier_off(netdev); - txqs_stop(netdev); + stop_txqs(netdev); } if (lio->linfo.link.s.mtu != current_max_mtu) { @@ -1052,16 +953,6 @@ static int octeon_pci_os_setup(struct octeon_device *oct) return 0; } -static int skb_iq(struct lio *lio, struct sk_buff *skb) -{ - int q = 0; - - if (netif_is_multiqueue(lio->netdev)) - q = skb->queue_mapping % lio->linfo.num_txpciq; - - return q; -} - /** * \brief Check Tx queue state for a given network buffer * @param lio per-network private data @@ -1069,22 +960,17 @@ static int skb_iq(struct lio *lio, struct sk_buff *skb) */ static int check_txq_state(struct lio *lio, struct sk_buff *skb) { - int q = 0, iq = 0; + int q, iq; - if (netif_is_multiqueue(lio->netdev)) { - q = skb->queue_mapping; - iq = lio->linfo.txpciq[q % lio->oct_dev->num_iqs].s.q_no; - } else { - iq = lio->txq; - q = iq; - } + q = skb->queue_mapping; + iq = lio->linfo.txpciq[q % lio->oct_dev->num_iqs].s.q_no; if (octnet_iq_is_full(lio->oct_dev, iq)) return 0; if (__netif_subqueue_stopped(lio->netdev, q)) { INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); - wake_q(lio->netdev, q); + netif_wake_subqueue(lio->netdev, q); } return 1; @@ -1258,7 +1144,7 @@ static int liquidio_open(struct net_device *netdev) lio->intf_open = 1; netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); - start_txq(netdev); + start_txqs(netdev); /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); @@ -1300,7 +1186,7 @@ static int liquidio_stop(struct net_device *netdev) ifstate_reset(lio, LIO_IFSTATE_RUNNING); - txqs_stop(netdev); + stop_txqs(netdev); dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); @@ -1718,14 +1604,9 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) lio = GET_LIO(netdev); oct = lio->oct_dev; - if (netif_is_multiqueue(netdev)) { - q_idx = skb->queue_mapping; - q_idx = (q_idx % (lio->linfo.num_txpciq)); - tag = q_idx; - iq_no = lio->linfo.txpciq[q_idx].s.q_no; - } else { - iq_no = lio->txq; - } + q_idx = skb_iq(lio, skb); + tag = q_idx; + iq_no = lio->linfo.txpciq[q_idx].s.q_no; stats = &oct->instr_queue[iq_no]->stats; @@ -1754,22 +1635,12 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ndata.q_no = iq_no; - if (netif_is_multiqueue(netdev)) { - if (octnet_iq_is_full(oct, ndata.q_no)) { - /* defer sending if queue is full */ - netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", - ndata.q_no); - stats->tx_iq_busy++; - return NETDEV_TX_BUSY; - } - } else { - if (octnet_iq_is_full(oct, lio->txq)) { - /* defer sending if queue is full */ - stats->tx_iq_busy++; - netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", - ndata.q_no); - return NETDEV_TX_BUSY; - } + if (octnet_iq_is_full(oct, ndata.q_no)) { + /* defer sending if queue is full */ + netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", + ndata.q_no); + stats->tx_iq_busy++; + return NETDEV_TX_BUSY; } ndata.datasize = skb->len; @@ -1911,7 +1782,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) if (status == IQ_SEND_STOP) { dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", iq_no); - stop_q(netdev, q_idx); + netif_stop_subqueue(netdev, q_idx); } netif_trans_update(netdev); @@ -1951,7 +1822,7 @@ static void liquidio_tx_timeout(struct net_device *netdev) "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", netdev->stats.tx_dropped); netif_trans_update(netdev); - txqs_wake(netdev); + wake_txqs(netdev); } static int diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 76803a569794b8a4b8a24bcb23c8290c2dbc1661..8782206271b631c04398b492ce289e77a8856822 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -506,4 +506,56 @@ static inline int wait_for_pending_requests(struct octeon_device *oct) return 0; } +/** + * \brief Stop Tx queues + * @param netdev network device + */ +static inline void stop_txqs(struct net_device *netdev) +{ + int i; + + for (i = 0; i < netdev->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} + +/** + * \brief Wake Tx queues + * @param netdev network device + */ +static inline void wake_txqs(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + int i, qno; + + for (i = 0; i < netdev->num_tx_queues; i++) { + qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no; + + if (__netif_subqueue_stopped(netdev, i)) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, + tx_restart, 1); + netif_wake_subqueue(netdev, i); + } + } +} + +/** + * \brief Start Tx queues + * @param netdev network device + */ +static inline void start_txqs(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + int i; + + if (lio->linfo.link.s.link_up) { + for (i = 0; i < netdev->num_tx_queues; i++) + netif_start_subqueue(netdev, i); + } +} + +static inline int skb_iq(struct lio *lio, struct sk_buff *skb) +{ + return skb->queue_mapping % lio->linfo.num_txpciq; +} + #endif