提交 f283862f 编写于 作者: A Ajay Kumar Gupta 提交者: Felipe Balbi

usb: musb: NAK timeout scheme on bulk TX endpoint

Fixes endpoint starvation issue when more than one bulk QH is
multiplexed on the reserved bulk TX endpoint.

This patch sets the NAK timeout interval for such QHs, and when
a timeout triggers the next QH will be scheduled.

This scheme doesn't work for devices which are connected to a
high to full speed tree (transaction translator) as there is
no NAK timeout interrupt from the musb controller from such
devices.
Signed-off-by: NAjay Kumar Gupta <ajay.gupta@ti.com>
Signed-off-by: NFelipe Balbi <balbi@ti.com>
上级 603fe2b2
......@@ -892,6 +892,73 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
}
}
/* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
* the end; avoids starvation for other endpoints.
*/
static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
int is_in)
{
struct dma_channel *dma;
struct urb *urb;
void __iomem *mbase = musb->mregs;
void __iomem *epio = ep->regs;
struct musb_qh *cur_qh, *next_qh;
u16 rx_csr, tx_csr;
musb_ep_select(mbase, ep->epnum);
if (is_in) {
dma = is_dma_capable() ? ep->rx_channel : NULL;
/* clear nak timeout bit */
rx_csr = musb_readw(epio, MUSB_RXCSR);
rx_csr |= MUSB_RXCSR_H_WZC_BITS;
rx_csr &= ~MUSB_RXCSR_DATAERROR;
musb_writew(epio, MUSB_RXCSR, rx_csr);
cur_qh = first_qh(&musb->in_bulk);
} else {
dma = is_dma_capable() ? ep->tx_channel : NULL;
/* clear nak timeout bit */
tx_csr = musb_readw(epio, MUSB_TXCSR);
tx_csr |= MUSB_TXCSR_H_WZC_BITS;
tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
musb_writew(epio, MUSB_TXCSR, tx_csr);
cur_qh = first_qh(&musb->out_bulk);
}
if (cur_qh) {
urb = next_urb(cur_qh);
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
musb->dma_controller->channel_abort(dma);
urb->actual_length += dma->actual_len;
dma->actual_len = 0L;
}
musb_save_toggle(cur_qh, is_in, urb);
if (is_in) {
/* move cur_qh to end of queue */
list_move_tail(&cur_qh->ring, &musb->in_bulk);
/* get the next qh from musb->in_bulk */
next_qh = first_qh(&musb->in_bulk);
/* set rx_reinit and schedule the next qh */
ep->rx_reinit = 1;
} else {
/* move cur_qh to end of queue */
list_move_tail(&cur_qh->ring, &musb->out_bulk);
/* get the next qh from musb->out_bulk */
next_qh = first_qh(&musb->out_bulk);
/* set tx_reinit and schedule the next qh */
ep->tx_reinit = 1;
}
musb_start_urb(musb, is_in, next_qh);
}
}
/*
* Service the default endpoint (ep0) as host.
......@@ -1156,8 +1223,14 @@ void musb_host_tx(struct musb *musb, u8 epnum)
status = -ETIMEDOUT;
} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum);
if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
&& !list_is_singular(&musb->out_bulk)) {
dev_dbg(musb->controller,
"NAK timeout on TX%d ep\n", epnum);
musb_bulk_nak_timeout(musb, hw_ep, 0);
} else {
dev_dbg(musb->controller,
"TX end=%d device not responding\n", epnum);
/* NOTE: this code path would be a good place to PAUSE a
* transfer, if there's some other (nonperiodic) tx urb
* that could use this fifo. (dma complicates it...)
......@@ -1170,6 +1243,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
musb_writew(epio, MUSB_TXCSR,
MUSB_TXCSR_H_WZC_BITS
| MUSB_TXCSR_TXPKTRDY);
}
return;
}
......@@ -1390,50 +1464,6 @@ void musb_host_tx(struct musb *musb, u8 epnum)
#endif
/* Schedule next QH from musb->in_bulk and move the current qh to
* the end; avoids starvation for other endpoints.
*/
static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
{
struct dma_channel *dma;
struct urb *urb;
void __iomem *mbase = musb->mregs;
void __iomem *epio = ep->regs;
struct musb_qh *cur_qh, *next_qh;
u16 rx_csr;
musb_ep_select(mbase, ep->epnum);
dma = is_dma_capable() ? ep->rx_channel : NULL;
/* clear nak timeout bit */
rx_csr = musb_readw(epio, MUSB_RXCSR);
rx_csr |= MUSB_RXCSR_H_WZC_BITS;
rx_csr &= ~MUSB_RXCSR_DATAERROR;
musb_writew(epio, MUSB_RXCSR, rx_csr);
cur_qh = first_qh(&musb->in_bulk);
if (cur_qh) {
urb = next_urb(cur_qh);
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
musb->dma_controller->channel_abort(dma);
urb->actual_length += dma->actual_len;
dma->actual_len = 0L;
}
musb_save_toggle(cur_qh, 1, urb);
/* move cur_qh to end of queue */
list_move_tail(&cur_qh->ring, &musb->in_bulk);
/* get the next qh from musb->in_bulk */
next_qh = first_qh(&musb->in_bulk);
/* set rx_reinit and schedule the next qh */
ep->rx_reinit = 1;
musb_start_urb(musb, 1, next_qh);
}
}
/*
* Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
* and high-bandwidth IN transfer cases.
......@@ -1510,7 +1540,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
if (usb_pipebulk(urb->pipe)
&& qh->mux == 1
&& !list_is_singular(&musb->in_bulk)) {
musb_bulk_rx_nak_timeout(musb, hw_ep);
musb_bulk_nak_timeout(musb, hw_ep, 1);
return;
}
musb_ep_select(mbase, epnum);
......@@ -1873,14 +1903,14 @@ static int musb_schedule(
else
head = &musb->out_bulk;
/* Enable bulk RX NAK timeout scheme when bulk requests are
/* Enable bulk RX/TX NAK timeout scheme when bulk requests are
* multiplexed. This scheme doen't work in high speed to full
* speed scenario as NAK interrupts are not coming from a
* full speed device connected to a high speed device.
* NAK timeout interval is 8 (128 uframe or 16ms) for HS and
* 4 (8 frame or 8ms) for FS device.
*/
if (is_in && qh->dev)
if (qh->dev)
qh->intv_reg =
(USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
goto success;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册