diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 25ac2def08cecca3ec128f662c8bc756f99c2f4f..085f9125cf42a6c1aa76bbb6744ba5b9ba919d31 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -183,6 +183,9 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) work_done = nps_enet_rx_handler(ndev); if (work_done < budget) { u32 buf_int_enable_value = 0; + u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); + u32 tx_ctrl_ct = + (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; napi_complete(napi); @@ -192,6 +195,18 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, buf_int_enable_value); + + /* in case we will get a tx interrupt while interrupts + * are masked, we will lose it since the tx is edge interrupt. + * specifically, while executing the code section above, + * between nps_enet_tx_handler and the interrupts enable, all + * tx requests will be stuck until we will get an rx interrupt. + * the two code lines below will solve this situation by + * re-adding ourselves to the poll list. + */ + + if (priv->tx_skb && !tx_ctrl_ct) + napi_reschedule(napi); } return work_done;