musb_host.c 72.5 KB
Newer Older
F
Felipe Balbi 已提交
1 2 3 4 5 6
/*
 * MUSB OTG driver host support
 *
 * Copyright 2005 Mentor Graphics Corporation
 * Copyright (C) 2005-2006 by Texas Instruments
 * Copyright (C) 2006-2007 Nokia Corporation
7
 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
F
Felipe Balbi 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 * 02110-1301 USA
 *
 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/list.h>
44
#include <linux/dma-mapping.h>
F
Felipe Balbi 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

#include "musb_core.h"
#include "musb_host.h"

/* MUSB HOST status 22-mar-2006
 *
 * - There's still lots of partial code duplication for fault paths, so
 *   they aren't handled as consistently as they need to be.
 *
 * - PIO mostly behaved when last tested.
 *     + including ep0, with all usbtest cases 9, 10
 *     + usbtest 14 (ep0out) doesn't seem to run at all
 *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
 *       configurations, but otherwise double buffering passes basic tests.
 *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
 *
 * - DMA (CPPI) ... partially behaves, not currently recommended
 *     + about 1/15 the speed of typical EHCI implementations (PCI)
 *     + RX, all too often reqpkt seems to misbehave after tx
 *     + TX, no known issues (other than evident silicon issue)
 *
 * - DMA (Mentor/OMAP) ...has at least toggle update problems
 *
68 69
 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
 *   starvation ... nothing yet for TX, interrupt, or bulk.
F
Felipe Balbi 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
 *
 * - Not tested with HNP, but some SRP paths seem to behave.
 *
 * NOTE 24-August-2006:
 *
 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
 *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
 *   mostly works, except that with "usbnet" it's easy to trigger cases
 *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
 *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
 *   although ARP RX wins.  (That test was done with a full speed link.)
 */


/*
 * NOTE on endpoint usage:
 *
 * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
 * (Yes, bulk _could_ use more of the endpoints than that, and would even
90
 * benefit from it.)
F
Felipe Balbi 已提交
91 92 93 94 95 96 97
 *
 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
 * So far that scheduling is both dumb and optimistic:  the endpoint will be
 * "claimed" until its software queue is no longer refilled.  No multiplexing
 * of transfers between endpoints, or anything clever.
 */

98 99 100 101 102
struct musb *hcd_to_musb(struct usb_hcd *hcd)
{
	return *(struct musb **) hcd->hcd_priv;
}

F
Felipe Balbi 已提交
103 104

static void musb_ep_program(struct musb *musb, u8 epnum,
105 106
			struct urb *urb, int is_out,
			u8 *buf, u32 offset, u32 len);
F
Felipe Balbi 已提交
107 108 109 110

/*
 * Clear TX fifo. Needed to avoid BABBLE errors.
 */
D
David Brownell 已提交
111
static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
F
Felipe Balbi 已提交
112
{
113
	struct musb	*musb = ep->musb;
F
Felipe Balbi 已提交
114 115
	void __iomem	*epio = ep->regs;
	u16		csr;
116
	u16		lastcsr = 0;
F
Felipe Balbi 已提交
117 118 119 120
	int		retries = 1000;

	csr = musb_readw(epio, MUSB_TXCSR);
	while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
121
		if (csr != lastcsr)
122
			dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
123
		lastcsr = csr;
F
Felipe Balbi 已提交
124 125 126
		csr |= MUSB_TXCSR_FLUSHFIFO;
		musb_writew(epio, MUSB_TXCSR, csr);
		csr = musb_readw(epio, MUSB_TXCSR);
127 128 129
		if (WARN(retries-- < 1,
				"Could not flush host TX%d fifo: csr: %04x\n",
				ep->epnum, csr))
F
Felipe Balbi 已提交
130 131 132 133 134
			return;
		mdelay(1);
	}
}

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
{
	void __iomem	*epio = ep->regs;
	u16		csr;
	int		retries = 5;

	/* scrub any data left in the fifo */
	do {
		csr = musb_readw(epio, MUSB_TXCSR);
		if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
			break;
		musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
		csr = musb_readw(epio, MUSB_TXCSR);
		udelay(10);
	} while (--retries);

	WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
			ep->epnum, csr);

	/* and reset for the next transfer */
	musb_writew(epio, MUSB_TXCSR, 0);
}

F
Felipe Balbi 已提交
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
/*
 * Start transmit. Caller is responsible for locking shared resources.
 * musb must be locked.
 */
static inline void musb_h_tx_start(struct musb_hw_ep *ep)
{
	u16	txcsr;

	/* NOTE: no locks here; caller should lock and select EP */
	if (ep->epnum) {
		txcsr = musb_readw(ep->regs, MUSB_TXCSR);
		txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
		musb_writew(ep->regs, MUSB_TXCSR, txcsr);
	} else {
		txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
		musb_writew(ep->regs, MUSB_CSR0, txcsr);
	}

}

178
static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
F
Felipe Balbi 已提交
179 180 181 182 183 184
{
	u16	txcsr;

	/* NOTE: no locks here; caller should lock and select EP */
	txcsr = musb_readw(ep->regs, MUSB_TXCSR);
	txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
185 186
	if (is_cppi_enabled())
		txcsr |= MUSB_TXCSR_DMAMODE;
F
Felipe Balbi 已提交
187 188 189
	musb_writew(ep->regs, MUSB_TXCSR, txcsr);
}

190 191 192 193 194 195 196 197 198 199 200 201 202
static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
{
	if (is_in != 0 || ep->is_shared_fifo)
		ep->in_qh  = qh;
	if (is_in == 0 || ep->is_shared_fifo)
		ep->out_qh = qh;
}

static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
{
	return is_in ? ep->in_qh : ep->out_qh;
}

F
Felipe Balbi 已提交
203 204 205 206 207 208 209 210 211 212 213 214 215
/*
 * Start the URB at the front of an endpoint's queue
 * end must be claimed from the caller.
 *
 * Context: controller locked, irqs blocked
 */
static void
musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
{
	u16			frame;
	u32			len;
	void __iomem		*mbase =  musb->mregs;
	struct urb		*urb = next_urb(qh);
216 217
	void			*buf = urb->transfer_buffer;
	u32			offset = 0;
F
Felipe Balbi 已提交
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
	struct musb_hw_ep	*hw_ep = qh->hw_ep;
	unsigned		pipe = urb->pipe;
	u8			address = usb_pipedevice(pipe);
	int			epnum = hw_ep->epnum;

	/* initialize software qh state */
	qh->offset = 0;
	qh->segsize = 0;

	/* gather right source of data */
	switch (qh->type) {
	case USB_ENDPOINT_XFER_CONTROL:
		/* control transfers always start with SETUP */
		is_in = 0;
		musb->ep0_stage = MUSB_EP0_START;
		buf = urb->setup_packet;
		len = 8;
		break;
	case USB_ENDPOINT_XFER_ISOC:
		qh->iso_idx = 0;
		qh->frame = 0;
239
		offset = urb->iso_frame_desc[0].offset;
F
Felipe Balbi 已提交
240 241 242
		len = urb->iso_frame_desc[0].length;
		break;
	default:		/* bulk, interrupt */
243 244 245
		/* actual_length may be nonzero on retry paths */
		buf = urb->transfer_buffer + urb->actual_length;
		len = urb->transfer_buffer_length - urb->actual_length;
F
Felipe Balbi 已提交
246 247
	}

248
	dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
F
Felipe Balbi 已提交
249 250 251 252 253 254 255 256
			qh, urb, address, qh->epnum,
			is_in ? "in" : "out",
			({char *s; switch (qh->type) {
			case USB_ENDPOINT_XFER_CONTROL:	s = ""; break;
			case USB_ENDPOINT_XFER_BULK:	s = "-bulk"; break;
			case USB_ENDPOINT_XFER_ISOC:	s = "-iso"; break;
			default:			s = "-intr"; break;
			}; s; }),
257
			epnum, buf + offset, len);
F
Felipe Balbi 已提交
258 259

	/* Configure endpoint */
260
	musb_ep_set_qh(hw_ep, is_in, qh);
261
	musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
F
Felipe Balbi 已提交
262 263 264 265 266 267 268 269 270

	/* transmit may have more work: start it when it is time */
	if (is_in)
		return;

	/* determine if the time is right for a periodic transfer */
	switch (qh->type) {
	case USB_ENDPOINT_XFER_ISOC:
	case USB_ENDPOINT_XFER_INT:
271
		dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
F
Felipe Balbi 已提交
272 273 274 275
		frame = musb_readw(mbase, MUSB_FRAME);
		/* FIXME this doesn't implement that scheduling policy ...
		 * or handle framecounter wrapping
		 */
276
		if (1) {	/* Always assume URB_ISO_ASAP */
F
Felipe Balbi 已提交
277 278 279 280 281 282 283 284
			/* REVISIT the SOF irq handler shouldn't duplicate
			 * this code; and we don't init urb->start_frame...
			 */
			qh->frame = 0;
			goto start;
		} else {
			qh->frame = urb->start_frame;
			/* enable SOF interrupt so we can count down */
285
			dev_dbg(musb->controller, "SOF for %d\n", epnum);
F
Felipe Balbi 已提交
286 287 288 289 290 291 292
#if 1 /* ifndef	CONFIG_ARCH_DAVINCI */
			musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
#endif
		}
		break;
	default:
start:
293
		dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
F
Felipe Balbi 已提交
294 295 296 297 298
			hw_ep->tx_channel ? "dma" : "pio");

		if (!hw_ep->tx_channel)
			musb_h_tx_start(hw_ep);
		else if (is_cppi_enabled() || tusb_dma_omap())
299
			musb_h_tx_dma_start(hw_ep);
F
Felipe Balbi 已提交
300 301 302
	}
}

303 304
/* Context: caller owns controller lock, IRQs are blocked */
static void musb_giveback(struct musb *musb, struct urb *urb, int status)
F
Felipe Balbi 已提交
305 306 307
__releases(musb->lock)
__acquires(musb->lock)
{
308
	dev_dbg(musb->controller,
309 310
			"complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
			urb, urb->complete, status,
F
Felipe Balbi 已提交
311 312 313 314 315 316
			usb_pipedevice(urb->pipe),
			usb_pipeendpoint(urb->pipe),
			usb_pipein(urb->pipe) ? "in" : "out",
			urb->actual_length, urb->transfer_buffer_length
			);

D
Daniel Mack 已提交
317
	usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
F
Felipe Balbi 已提交
318
	spin_unlock(&musb->lock);
D
Daniel Mack 已提交
319
	usb_hcd_giveback_urb(musb->hcd, urb, status);
F
Felipe Balbi 已提交
320 321 322
	spin_lock(&musb->lock);
}

323 324 325
/* For bulk/interrupt endpoints only */
static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
				    struct urb *urb)
F
Felipe Balbi 已提交
326
{
327
	void __iomem		*epio = qh->hw_ep->regs;
F
Felipe Balbi 已提交
328 329
	u16			csr;

330 331
	/*
	 * FIXME: the current Mentor DMA code seems to have
F
Felipe Balbi 已提交
332 333 334
	 * problems getting toggle correct.
	 */

335 336
	if (is_in)
		csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
F
Felipe Balbi 已提交
337
	else
338
		csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
F
Felipe Balbi 已提交
339

340
	usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
F
Felipe Balbi 已提交
341 342
}

343 344 345 346 347 348 349 350 351
/*
 * Advance this hardware endpoint's queue, completing the specified URB and
 * advancing to either the next URB queued to that qh, or else invalidating
 * that qh and advancing to the next qh scheduled after the current one.
 *
 * Context: caller owns controller lock, IRQs are blocked
 */
static void musb_advance_schedule(struct musb *musb, struct urb *urb,
				  struct musb_hw_ep *hw_ep, int is_in)
F
Felipe Balbi 已提交
352
{
353
	struct musb_qh		*qh = musb_ep_get_qh(hw_ep, is_in);
F
Felipe Balbi 已提交
354 355
	struct musb_hw_ep	*ep = qh->hw_ep;
	int			ready = qh->is_ready;
356 357 358
	int			status;

	status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
F
Felipe Balbi 已提交
359 360 361 362 363

	/* save toggle eagerly, for paranoia */
	switch (qh->type) {
	case USB_ENDPOINT_XFER_BULK:
	case USB_ENDPOINT_XFER_INT:
364
		musb_save_toggle(qh, is_in, urb);
F
Felipe Balbi 已提交
365 366
		break;
	case USB_ENDPOINT_XFER_ISOC:
367
		if (status == 0 && urb->error_count)
F
Felipe Balbi 已提交
368 369 370 371 372
			status = -EXDEV;
		break;
	}

	qh->is_ready = 0;
373
	musb_giveback(musb, urb, status);
F
Felipe Balbi 已提交
374 375 376 377 378 379 380
	qh->is_ready = ready;

	/* reclaim resources (and bandwidth) ASAP; deschedule it, and
	 * invalidate qh as soon as list_empty(&hep->urb_list)
	 */
	if (list_empty(&qh->hep->urb_list)) {
		struct list_head	*head;
381
		struct dma_controller	*dma = musb->dma_controller;
F
Felipe Balbi 已提交
382

383
		if (is_in) {
F
Felipe Balbi 已提交
384
			ep->rx_reinit = 1;
385 386 387 388 389
			if (ep->rx_channel) {
				dma->channel_release(ep->rx_channel);
				ep->rx_channel = NULL;
			}
		} else {
F
Felipe Balbi 已提交
390
			ep->tx_reinit = 1;
391 392 393 394 395
			if (ep->tx_channel) {
				dma->channel_release(ep->tx_channel);
				ep->tx_channel = NULL;
			}
		}
F
Felipe Balbi 已提交
396

397 398
		/* Clobber old pointers to this qh */
		musb_ep_set_qh(ep, is_in, NULL);
F
Felipe Balbi 已提交
399 400 401 402
		qh->hep->hcpriv = NULL;

		switch (qh->type) {

403 404 405 406 407 408 409 410 411 412 413 414 415
		case USB_ENDPOINT_XFER_CONTROL:
		case USB_ENDPOINT_XFER_BULK:
			/* fifo policy for these lists, except that NAKing
			 * should rotate a qh to the end (for fairness).
			 */
			if (qh->mux == 1) {
				head = qh->ring.prev;
				list_del(&qh->ring);
				kfree(qh);
				qh = first_qh(head);
				break;
			}

F
Felipe Balbi 已提交
416 417 418 419 420 421 422 423 424 425 426 427
		case USB_ENDPOINT_XFER_ISOC:
		case USB_ENDPOINT_XFER_INT:
			/* this is where periodic bandwidth should be
			 * de-allocated if it's tracked and allocated;
			 * and where we'd update the schedule tree...
			 */
			kfree(qh);
			qh = NULL;
			break;
		}
	}

428
	if (qh != NULL && qh->is_ready) {
429
		dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
430
		    hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
F
Felipe Balbi 已提交
431 432 433 434
		musb_start_urb(musb, is_in, qh);
	}
}

D
David Brownell 已提交
435
static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
F
Felipe Balbi 已提交
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
{
	/* we don't want fifo to fill itself again;
	 * ignore dma (various models),
	 * leave toggle alone (may not have been saved yet)
	 */
	csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
	csr &= ~(MUSB_RXCSR_H_REQPKT
		| MUSB_RXCSR_H_AUTOREQ
		| MUSB_RXCSR_AUTOCLEAR);

	/* write 2x to allow double buffering */
	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);

	/* flush writebuffer */
	return musb_readw(hw_ep->regs, MUSB_RXCSR);
}

/*
 * PIO RX for a packet (or part of it).
 */
static bool
musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
{
	u16			rx_count;
	u8			*buf;
	u16			csr;
	bool			done = false;
	u32			length;
	int			do_flush = 0;
	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
	void __iomem		*epio = hw_ep->regs;
	struct musb_qh		*qh = hw_ep->in_qh;
	int			pipe = urb->pipe;
	void			*buffer = urb->transfer_buffer;

	/* musb_ep_select(mbase, epnum); */
	rx_count = musb_readw(epio, MUSB_RXCOUNT);
474
	dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
F
Felipe Balbi 已提交
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
			urb->transfer_buffer, qh->offset,
			urb->transfer_buffer_length);

	/* unload FIFO */
	if (usb_pipeisoc(pipe)) {
		int					status = 0;
		struct usb_iso_packet_descriptor	*d;

		if (iso_err) {
			status = -EILSEQ;
			urb->error_count++;
		}

		d = urb->iso_frame_desc + qh->iso_idx;
		buf = buffer + d->offset;
		length = d->length;
		if (rx_count > length) {
			if (status == 0) {
				status = -EOVERFLOW;
				urb->error_count++;
			}
496
			dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
F
Felipe Balbi 已提交
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
			do_flush = 1;
		} else
			length = rx_count;
		urb->actual_length += length;
		d->actual_length = length;

		d->status = status;

		/* see if we are done */
		done = (++qh->iso_idx >= urb->number_of_packets);
	} else {
		/* non-isoch */
		buf = buffer + qh->offset;
		length = urb->transfer_buffer_length - qh->offset;
		if (rx_count > length) {
			if (urb->status == -EINPROGRESS)
				urb->status = -EOVERFLOW;
514
			dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
F
Felipe Balbi 已提交
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
			do_flush = 1;
		} else
			length = rx_count;
		urb->actual_length += length;
		qh->offset += length;

		/* see if we are done */
		done = (urb->actual_length == urb->transfer_buffer_length)
			|| (rx_count < qh->maxpacket)
			|| (urb->status != -EINPROGRESS);
		if (done
				&& (urb->status == -EINPROGRESS)
				&& (urb->transfer_flags & URB_SHORT_NOT_OK)
				&& (urb->actual_length
					< urb->transfer_buffer_length))
			urb->status = -EREMOTEIO;
	}

	musb_read_fifo(hw_ep, length, buf);

	csr = musb_readw(epio, MUSB_RXCSR);
	csr |= MUSB_RXCSR_H_WZC_BITS;
	if (unlikely(do_flush))
		musb_h_flush_rxfifo(hw_ep, csr);
	else {
		/* REVISIT this assumes AUTOCLEAR is never set */
		csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
		if (!done)
			csr |= MUSB_RXCSR_H_REQPKT;
		musb_writew(epio, MUSB_RXCSR, csr);
	}

	return done;
}

/* we don't always need to reinit a given side of an endpoint...
 * when we do, use tx/rx reinit routine and then construct a new CSR
 * to address data toggle, NYET, and DMA or PIO.
 *
 * it's possible that driver bugs (especially for DMA) or aborting a
 * transfer might have left the endpoint busier than it should be.
 * the busy/not-empty tests are basically paranoia.
 */
static void
musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
{
	u16	csr;

	/* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
	 * That always uses tx_reinit since ep0 repurposes TX register
	 * offsets; the initial SETUP packet is also a kind of OUT.
	 */

	/* if programmed for Tx, put it in RX mode */
	if (ep->is_shared_fifo) {
		csr = musb_readw(ep->regs, MUSB_TXCSR);
		if (csr & MUSB_TXCSR_MODE) {
			musb_h_tx_flush_fifo(ep);
573
			csr = musb_readw(ep->regs, MUSB_TXCSR);
F
Felipe Balbi 已提交
574
			musb_writew(ep->regs, MUSB_TXCSR,
575
				    csr | MUSB_TXCSR_FRCDATATOG);
F
Felipe Balbi 已提交
576
		}
577 578 579 580 581 582 583

		/*
		 * Clear the MODE bit (and everything else) to enable Rx.
		 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
		 */
		if (csr & MUSB_TXCSR_DMAMODE)
			musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
F
Felipe Balbi 已提交
584 585 586 587 588 589 590 591 592 593 594 595 596 597
		musb_writew(ep->regs, MUSB_TXCSR, 0);

	/* scrub all previous state, clearing toggle */
	} else {
		csr = musb_readw(ep->regs, MUSB_RXCSR);
		if (csr & MUSB_RXCSR_RXPKTRDY)
			WARNING("rx%d, packet/%d ready?\n", ep->epnum,
				musb_readw(ep->regs, MUSB_RXCOUNT));

		musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
	}

	/* target addr and (for multipoint) hub addr/port */
	if (musb->is_multipoint) {
598 599 600 601
		musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
		musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
		musb_write_rxhubport(ep->target_regs, qh->h_port_reg);

F
Felipe Balbi 已提交
602 603 604 605 606 607 608
	} else
		musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);

	/* protocol/endpoint, interval/NAKlimit, i/o size */
	musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
	musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
	/* NOTE: bulk combining rewrites high bits of maxpacket */
609 610 611
	/* Set RXMAXP with the FIFO size of the endpoint
	 * to disable double buffer mode.
	 */
612
	if (musb->double_buffer_not_ok)
613 614 615 616
		musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
	else
		musb_writew(ep->regs, MUSB_RXMAXP,
				qh->maxpacket | ((qh->hb_mult - 1) << 11));
F
Felipe Balbi 已提交
617 618 619 620

	ep->rx_reinit = 0;
}

621 622 623 624 625 626 627 628 629 630
static bool musb_tx_dma_program(struct dma_controller *dma,
		struct musb_hw_ep *hw_ep, struct musb_qh *qh,
		struct urb *urb, u32 offset, u32 length)
{
	struct dma_channel	*channel = hw_ep->tx_channel;
	void __iomem		*epio = hw_ep->regs;
	u16			pkt_size = qh->maxpacket;
	u16			csr;
	u8			mode;

631
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
632 633 634 635 636 637
	if (length > channel->max_len)
		length = channel->max_len;

	csr = musb_readw(epio, MUSB_TXCSR);
	if (length > pkt_size) {
		mode = 1;
638 639
		csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
		/* autoset shouldn't be set in high bandwidth */
640 641 642 643 644 645 646 647 648 649 650
		/*
		 * Enable Autoset according to table
		 * below
		 * bulk_split hb_mult	Autoset_Enable
		 *	0	1	Yes(Normal)
		 *	0	>1	No(High BW ISO)
		 *	1	1	Yes(HS bulk)
		 *	1	>1	Yes(FS bulk)
		 */
		if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
					can_bulk_split(hw_ep->musb, qh->type)))
651
			csr |= MUSB_TXCSR_AUTOSET;
652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
	} else {
		mode = 0;
		csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
		csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
	}
	channel->desired_mode = mode;
	musb_writew(epio, MUSB_TXCSR, csr);
#else
	if (!is_cppi_enabled() && !tusb_dma_omap())
		return false;

	channel->actual_len = 0;

	/*
	 * TX uses "RNDIS" mode automatically but needs help
	 * to identify the zero-length-final-packet case.
	 */
	mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
#endif

	qh->segsize = length;

674 675 676 677 678 679
	/*
	 * Ensure the data reaches to main memory before starting
	 * DMA transfer
	 */
	wmb();

680 681 682 683 684 685 686 687 688 689 690 691
	if (!dma->channel_program(channel, pkt_size, mode,
			urb->transfer_dma + offset, length)) {
		dma->channel_release(channel);
		hw_ep->tx_channel = NULL;

		csr = musb_readw(epio, MUSB_TXCSR);
		csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
		musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
		return false;
	}
	return true;
}
F
Felipe Balbi 已提交
692 693 694 695 696 697

/*
 * Program an HDRC endpoint as per the given URB
 * Context: irqs blocked, controller lock held
 */
static void musb_ep_program(struct musb *musb, u8 epnum,
698 699
			struct urb *urb, int is_out,
			u8 *buf, u32 offset, u32 len)
F
Felipe Balbi 已提交
700 701 702 703 704 705 706
{
	struct dma_controller	*dma_controller;
	struct dma_channel	*dma_channel;
	u8			dma_ok;
	void __iomem		*mbase = musb->mregs;
	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
	void __iomem		*epio = hw_ep->regs;
707 708
	struct musb_qh		*qh = musb_ep_get_qh(hw_ep, !is_out);
	u16			packet_sz = qh->maxpacket;
709 710
	u8			use_dma = 1;
	u16			csr;
F
Felipe Balbi 已提交
711

712
	dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
F
Felipe Balbi 已提交
713 714 715 716 717 718 719 720 721
				"h_addr%02x h_port%02x bytes %d\n",
			is_out ? "-->" : "<--",
			epnum, urb, urb->dev->speed,
			qh->addr_reg, qh->epnum, is_out ? "out" : "in",
			qh->h_addr_reg, qh->h_port_reg,
			len);

	musb_ep_select(mbase, epnum);

722 723 724 725 726 727 728 729
	if (is_out && !len) {
		use_dma = 0;
		csr = musb_readw(epio, MUSB_TXCSR);
		csr &= ~MUSB_TXCSR_DMAENAB;
		musb_writew(epio, MUSB_TXCSR, csr);
		hw_ep->tx_channel = NULL;
	}

F
Felipe Balbi 已提交
730 731
	/* candidate for DMA? */
	dma_controller = musb->dma_controller;
732
	if (use_dma && is_dma_capable() && epnum && dma_controller) {
F
Felipe Balbi 已提交
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
		dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
		if (!dma_channel) {
			dma_channel = dma_controller->channel_alloc(
					dma_controller, hw_ep, is_out);
			if (is_out)
				hw_ep->tx_channel = dma_channel;
			else
				hw_ep->rx_channel = dma_channel;
		}
	} else
		dma_channel = NULL;

	/* make sure we clear DMAEnab, autoSet bits from previous run */

	/* OUT/transmit/EP0 or IN/receive? */
	if (is_out) {
		u16	csr;
		u16	int_txe;
		u16	load_count;

		csr = musb_readw(epio, MUSB_TXCSR);

		/* disable interrupt in case we flush */
756
		int_txe = musb->intrtxe;
F
Felipe Balbi 已提交
757 758 759 760 761
		musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));

		/* general endpoint setup */
		if (epnum) {
			/* flush all old state, set default */
762 763 764 765 766 767 768
			/*
			 * We could be flushing valid
			 * packets in double buffering
			 * case
			 */
			if (!hw_ep->tx_double_buffered)
				musb_h_tx_flush_fifo(hw_ep);
769 770 771 772 773 774

			/*
			 * We must not clear the DMAMODE bit before or in
			 * the same cycle with the DMAENAB bit, so we clear
			 * the latter first...
			 */
F
Felipe Balbi 已提交
775
			csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
776 777
					| MUSB_TXCSR_AUTOSET
					| MUSB_TXCSR_DMAENAB
F
Felipe Balbi 已提交
778 779 780 781 782 783 784
					| MUSB_TXCSR_FRCDATATOG
					| MUSB_TXCSR_H_RXSTALL
					| MUSB_TXCSR_H_ERROR
					| MUSB_TXCSR_TXPKTRDY
					);
			csr |= MUSB_TXCSR_MODE;

785 786 787 788 789 790 791
			if (!hw_ep->tx_double_buffered) {
				if (usb_gettoggle(urb->dev, qh->epnum, 1))
					csr |= MUSB_TXCSR_H_WR_DATATOGGLE
						| MUSB_TXCSR_H_DATATOGGLE;
				else
					csr |= MUSB_TXCSR_CLRDATATOG;
			}
F
Felipe Balbi 已提交
792 793 794

			musb_writew(epio, MUSB_TXCSR, csr);
			/* REVISIT may need to clear FLUSHFIFO ... */
795
			csr &= ~MUSB_TXCSR_DMAMODE;
F
Felipe Balbi 已提交
796 797 798 799
			musb_writew(epio, MUSB_TXCSR, csr);
			csr = musb_readw(epio, MUSB_TXCSR);
		} else {
			/* endpoint 0: just flush */
800
			musb_h_ep0_flush_fifo(hw_ep);
F
Felipe Balbi 已提交
801 802 803 804
		}

		/* target addr and (for multipoint) hub addr/port */
		if (musb->is_multipoint) {
805 806 807
			musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
			musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
			musb_write_txhubport(mbase, epnum, qh->h_port_reg);
F
Felipe Balbi 已提交
808 809 810 811 812 813 814
/* FIXME if !epnum, do the same for RX ... */
		} else
			musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);

		/* protocol/endpoint/interval/NAKlimit */
		if (epnum) {
			musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
815
			if (musb->double_buffer_not_ok) {
F
Felipe Balbi 已提交
816
				musb_writew(epio, MUSB_TXMAXP,
817
						hw_ep->max_packet_sz_tx);
818 819 820
			} else if (can_bulk_split(musb, qh->type)) {
				qh->hb_mult = hw_ep->max_packet_sz_tx
						/ packet_sz;
821
				musb_writew(epio, MUSB_TXMAXP, packet_sz
822 823
					| ((qh->hb_mult) - 1) << 11);
			} else {
F
Felipe Balbi 已提交
824
				musb_writew(epio, MUSB_TXMAXP,
825 826
						qh->maxpacket |
						((qh->hb_mult - 1) << 11));
827
			}
F
Felipe Balbi 已提交
828 829 830 831 832 833 834 835 836 837 838 839 840 841
			musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
		} else {
			musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
			if (musb->is_multipoint)
				musb_writeb(epio, MUSB_TYPE0,
						qh->type_reg);
		}

		if (can_bulk_split(musb, qh->type))
			load_count = min((u32) hw_ep->max_packet_sz_tx,
						len);
		else
			load_count = min((u32) packet_sz, len);

842 843 844
		if (dma_channel && musb_tx_dma_program(dma_controller,
					hw_ep, qh, urb, offset, len))
			load_count = 0;
F
Felipe Balbi 已提交
845 846 847 848

		if (load_count) {
			/* PIO to load FIFO */
			qh->segsize = load_count;
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
			if (!buf) {
				sg_miter_start(&qh->sg_miter, urb->sg, 1,
						SG_MITER_ATOMIC
						| SG_MITER_FROM_SG);
				if (!sg_miter_next(&qh->sg_miter)) {
					dev_err(musb->controller,
							"error: sg"
							"list empty\n");
					sg_miter_stop(&qh->sg_miter);
					goto finish;
				}
				buf = qh->sg_miter.addr + urb->sg->offset +
					urb->actual_length;
				load_count = min_t(u32, load_count,
						qh->sg_miter.length);
				musb_write_fifo(hw_ep, load_count, buf);
				qh->sg_miter.consumed = load_count;
				sg_miter_stop(&qh->sg_miter);
			} else
				musb_write_fifo(hw_ep, load_count, buf);
F
Felipe Balbi 已提交
869
		}
870
finish:
F
Felipe Balbi 已提交
871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
		/* re-enable interrupt */
		musb_writew(mbase, MUSB_INTRTXE, int_txe);

	/* IN/receive */
	} else {
		u16	csr;

		if (hw_ep->rx_reinit) {
			musb_rx_reinit(musb, qh, hw_ep);

			/* init new state: toggle and NYET, maybe DMA later */
			if (usb_gettoggle(urb->dev, qh->epnum, 0))
				csr = MUSB_RXCSR_H_WR_DATATOGGLE
					| MUSB_RXCSR_H_DATATOGGLE;
			else
				csr = 0;
			if (qh->type == USB_ENDPOINT_XFER_INT)
				csr |= MUSB_RXCSR_DISNYET;

		} else {
			csr = musb_readw(hw_ep->regs, MUSB_RXCSR);

			if (csr & (MUSB_RXCSR_RXPKTRDY
					| MUSB_RXCSR_DMAENAB
					| MUSB_RXCSR_H_REQPKT))
				ERR("broken !rx_reinit, ep%d csr %04x\n",
						hw_ep->epnum, csr);

			/* scrub any stale state, leaving toggle alone */
			csr &= MUSB_RXCSR_DISNYET;
		}

		/* kick things off */

		if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
			/* Candidate for DMA */
			dma_channel->actual_len = 0L;
			qh->segsize = len;

			/* AUTOREQ is in a DMA register */
			musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
			csr = musb_readw(hw_ep->regs, MUSB_RXCSR);

			/*
			 * Unless caller treats short RX transfers as
			 * errors, we dare not queue multiple transfers.
			 */
			dma_ok = dma_controller->channel_program(dma_channel,
					packet_sz, !(urb->transfer_flags &
						     URB_SHORT_NOT_OK),
					urb->transfer_dma + offset,
					qh->segsize);
			if (!dma_ok) {
				dma_controller->channel_release(dma_channel);
				hw_ep->rx_channel = dma_channel = NULL;
			} else
				csr |= MUSB_RXCSR_DMAENAB;
F
Felipe Balbi 已提交
928 929 930
		}

		csr |= MUSB_RXCSR_H_REQPKT;
931
		dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
F
Felipe Balbi 已提交
932 933 934 935 936
		musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
		csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
	}
}

937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
/* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
 * the end; avoids starvation for other endpoints.
 */
static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
	int is_in)
{
	struct dma_channel	*dma;
	struct urb		*urb;
	void __iomem		*mbase = musb->mregs;
	void __iomem		*epio = ep->regs;
	struct musb_qh		*cur_qh, *next_qh;
	u16			rx_csr, tx_csr;

	musb_ep_select(mbase, ep->epnum);
	if (is_in) {
		dma = is_dma_capable() ? ep->rx_channel : NULL;

		/* clear nak timeout bit */
		rx_csr = musb_readw(epio, MUSB_RXCSR);
		rx_csr |= MUSB_RXCSR_H_WZC_BITS;
		rx_csr &= ~MUSB_RXCSR_DATAERROR;
		musb_writew(epio, MUSB_RXCSR, rx_csr);

		cur_qh = first_qh(&musb->in_bulk);
	} else {
		dma = is_dma_capable() ? ep->tx_channel : NULL;

		/* clear nak timeout bit */
		tx_csr = musb_readw(epio, MUSB_TXCSR);
		tx_csr |= MUSB_TXCSR_H_WZC_BITS;
		tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
		musb_writew(epio, MUSB_TXCSR, tx_csr);

		cur_qh = first_qh(&musb->out_bulk);
	}
	if (cur_qh) {
		urb = next_urb(cur_qh);
		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
			musb->dma_controller->channel_abort(dma);
			urb->actual_length += dma->actual_len;
			dma->actual_len = 0L;
		}
		musb_save_toggle(cur_qh, is_in, urb);

		if (is_in) {
			/* move cur_qh to end of queue */
			list_move_tail(&cur_qh->ring, &musb->in_bulk);

			/* get the next qh from musb->in_bulk */
			next_qh = first_qh(&musb->in_bulk);

			/* set rx_reinit and schedule the next qh */
			ep->rx_reinit = 1;
		} else {
			/* move cur_qh to end of queue */
			list_move_tail(&cur_qh->ring, &musb->out_bulk);

			/* get the next qh from musb->out_bulk */
			next_qh = first_qh(&musb->out_bulk);

			/* set tx_reinit and schedule the next qh */
			ep->tx_reinit = 1;
		}
		musb_start_urb(musb, is_in, next_qh);
	}
}
F
Felipe Balbi 已提交
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020

/*
 * Service the default endpoint (ep0) as host.
 * Return true until it's time to start the status stage.
 */
static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
{
	bool			 more = false;
	u8			*fifo_dest = NULL;
	u16			fifo_count = 0;
	struct musb_hw_ep	*hw_ep = musb->control_ep;
	struct musb_qh		*qh = hw_ep->in_qh;
	struct usb_ctrlrequest	*request;

	switch (musb->ep0_stage) {
	case MUSB_EP0_IN:
		fifo_dest = urb->transfer_buffer + urb->actual_length;
1021 1022
		fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
				   urb->actual_length);
F
Felipe Balbi 已提交
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
		if (fifo_count < len)
			urb->status = -EOVERFLOW;

		musb_read_fifo(hw_ep, fifo_count, fifo_dest);

		urb->actual_length += fifo_count;
		if (len < qh->maxpacket) {
			/* always terminate on short read; it's
			 * rarely reported as an error.
			 */
		} else if (urb->actual_length <
				urb->transfer_buffer_length)
			more = true;
		break;
	case MUSB_EP0_START:
		request = (struct usb_ctrlrequest *) urb->setup_packet;

		if (!request->wLength) {
1041
			dev_dbg(musb->controller, "start no-DATA\n");
F
Felipe Balbi 已提交
1042 1043
			break;
		} else if (request->bRequestType & USB_DIR_IN) {
1044
			dev_dbg(musb->controller, "start IN-DATA\n");
F
Felipe Balbi 已提交
1045 1046 1047 1048
			musb->ep0_stage = MUSB_EP0_IN;
			more = true;
			break;
		} else {
1049
			dev_dbg(musb->controller, "start OUT-DATA\n");
F
Felipe Balbi 已提交
1050 1051 1052 1053 1054
			musb->ep0_stage = MUSB_EP0_OUT;
			more = true;
		}
		/* FALLTHROUGH */
	case MUSB_EP0_OUT:
1055 1056 1057
		fifo_count = min_t(size_t, qh->maxpacket,
				   urb->transfer_buffer_length -
				   urb->actual_length);
F
Felipe Balbi 已提交
1058 1059 1060
		if (fifo_count) {
			fifo_dest = (u8 *) (urb->transfer_buffer
					+ urb->actual_length);
1061
			dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
1062 1063 1064
					fifo_count,
					(fifo_count == 1) ? "" : "s",
					fifo_dest);
F
Felipe Balbi 已提交
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
			musb_write_fifo(hw_ep, fifo_count, fifo_dest);

			urb->actual_length += fifo_count;
			more = true;
		}
		break;
	default:
		ERR("bogus ep0 stage %d\n", musb->ep0_stage);
		break;
	}

	return more;
}

/*
 * Handle default endpoint interrupt as host. Only called in IRQ time
D
David Brownell 已提交
1081
 * from musb_interrupt().
F
Felipe Balbi 已提交
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
 *
 * called with controller irqlocked
 */
irqreturn_t musb_h_ep0_irq(struct musb *musb)
{
	struct urb		*urb;
	u16			csr, len;
	int			status = 0;
	void __iomem		*mbase = musb->mregs;
	struct musb_hw_ep	*hw_ep = musb->control_ep;
	void __iomem		*epio = hw_ep->regs;
	struct musb_qh		*qh = hw_ep->in_qh;
	bool			complete = false;
	irqreturn_t		retval = IRQ_NONE;

	/* ep0 only has one queue, "in" */
	urb = next_urb(qh);

	musb_ep_select(mbase, 0);
	csr = musb_readw(epio, MUSB_CSR0);
	len = (csr & MUSB_CSR0_RXPKTRDY)
			? musb_readb(epio, MUSB_COUNT0)
			: 0;

1106
	dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
F
Felipe Balbi 已提交
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
		csr, qh, len, urb, musb->ep0_stage);

	/* if we just did status stage, we are done */
	if (MUSB_EP0_STATUS == musb->ep0_stage) {
		retval = IRQ_HANDLED;
		complete = true;
	}

	/* prepare status */
	if (csr & MUSB_CSR0_H_RXSTALL) {
1117
		dev_dbg(musb->controller, "STALLING ENDPOINT\n");
F
Felipe Balbi 已提交
1118 1119 1120
		status = -EPIPE;

	} else if (csr & MUSB_CSR0_H_ERROR) {
1121
		dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
F
Felipe Balbi 已提交
1122 1123 1124
		status = -EPROTO;

	} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1125
		dev_dbg(musb->controller, "control NAK timeout\n");
F
Felipe Balbi 已提交
1126 1127 1128

		/* NOTE:  this code path would be a good place to PAUSE a
		 * control transfer, if another one is queued, so that
1129 1130
		 * ep0 is more likely to stay busy.  That's already done
		 * for bulk RX transfers.
F
Felipe Balbi 已提交
1131 1132 1133 1134 1135 1136 1137 1138 1139
		 *
		 * if (qh->ring.next != &musb->control), then
		 * we have a candidate... NAKing is *NOT* an error
		 */
		musb_writew(epio, MUSB_CSR0, 0);
		retval = IRQ_HANDLED;
	}

	if (status) {
1140
		dev_dbg(musb->controller, "aborting\n");
F
Felipe Balbi 已提交
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
		retval = IRQ_HANDLED;
		if (urb)
			urb->status = status;
		complete = true;

		/* use the proper sequence to abort the transfer */
		if (csr & MUSB_CSR0_H_REQPKT) {
			csr &= ~MUSB_CSR0_H_REQPKT;
			musb_writew(epio, MUSB_CSR0, csr);
			csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
			musb_writew(epio, MUSB_CSR0, csr);
		} else {
1153
			musb_h_ep0_flush_fifo(hw_ep);
F
Felipe Balbi 已提交
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
		}

		musb_writeb(epio, MUSB_NAKLIMIT0, 0);

		/* clear it */
		musb_writew(epio, MUSB_CSR0, 0);
	}

	if (unlikely(!urb)) {
		/* stop endpoint since we have no place for its data, this
		 * SHOULD NEVER HAPPEN! */
		ERR("no URB for end 0\n");

1167
		musb_h_ep0_flush_fifo(hw_ep);
F
Felipe Balbi 已提交
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
		goto done;
	}

	if (!complete) {
		/* call common logic and prepare response */
		if (musb_h_ep0_continue(musb, len, urb)) {
			/* more packets required */
			csr = (MUSB_EP0_IN == musb->ep0_stage)
				?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
		} else {
			/* data transfer complete; perform status phase */
			if (usb_pipeout(urb->pipe)
					|| !urb->transfer_buffer_length)
				csr = MUSB_CSR0_H_STATUSPKT
					| MUSB_CSR0_H_REQPKT;
			else
				csr = MUSB_CSR0_H_STATUSPKT
					| MUSB_CSR0_TXPKTRDY;

			/* flag status stage */
			musb->ep0_stage = MUSB_EP0_STATUS;

1190
			dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
F
Felipe Balbi 已提交
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227

		}
		musb_writew(epio, MUSB_CSR0, csr);
		retval = IRQ_HANDLED;
	} else
		musb->ep0_stage = MUSB_EP0_IDLE;

	/* call completion handler if done */
	if (complete)
		musb_advance_schedule(musb, urb, hw_ep, 1);
done:
	return retval;
}


#ifdef CONFIG_USB_INVENTRA_DMA

/* Host side TX (OUT) using Mentor DMA works as follows:
	submit_urb ->
		- if queue was empty, Program Endpoint
		- ... which starts DMA to fifo in mode 1 or 0

	DMA Isr (transfer complete) -> TxAvail()
		- Stop DMA (~DmaEnab)	(<--- Alert ... currently happens
					only in musb_cleanup_urb)
		- TxPktRdy has to be set in mode 0 or for
			short packets in mode 1.
*/

#endif

/* Service a Tx-Available or dma completion irq for the endpoint */
void musb_host_tx(struct musb *musb, u8 epnum)
{
	int			pipe;
	bool			done = false;
	u16			tx_csr;
1228 1229
	size_t			length = 0;
	size_t			offset = 0;
F
Felipe Balbi 已提交
1230 1231
	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
	void __iomem		*epio = hw_ep->regs;
1232 1233
	struct musb_qh		*qh = hw_ep->out_qh;
	struct urb		*urb = next_urb(qh);
F
Felipe Balbi 已提交
1234 1235 1236
	u32			status = 0;
	void __iomem		*mbase = musb->mregs;
	struct dma_channel	*dma;
1237
	bool			transfer_pending = false;
F
Felipe Balbi 已提交
1238 1239 1240 1241 1242 1243

	musb_ep_select(mbase, epnum);
	tx_csr = musb_readw(epio, MUSB_TXCSR);

	/* with CPPI, DMA sometimes triggers "extra" irqs */
	if (!urb) {
1244
		dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1245
		return;
F
Felipe Balbi 已提交
1246 1247 1248 1249
	}

	pipe = urb->pipe;
	dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1250
	dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
F
Felipe Balbi 已提交
1251 1252 1253 1254 1255
			dma ? ", dma" : "");

	/* check for errors */
	if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
		/* dma was disabled, fifo flushed */
1256
		dev_dbg(musb->controller, "TX end %d stall\n", epnum);
F
Felipe Balbi 已提交
1257 1258 1259 1260 1261 1262

		/* stall; record URB status */
		status = -EPIPE;

	} else if (tx_csr & MUSB_TXCSR_H_ERROR) {
		/* (NON-ISO) dma was disabled, fifo flushed */
1263
		dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
F
Felipe Balbi 已提交
1264 1265 1266 1267

		status = -ETIMEDOUT;

	} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
		if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
				&& !list_is_singular(&musb->out_bulk)) {
			dev_dbg(musb->controller,
				"NAK timeout on TX%d ep\n", epnum);
			musb_bulk_nak_timeout(musb, hw_ep, 0);
		} else {
			dev_dbg(musb->controller,
				"TX end=%d device not responding\n", epnum);
			/* NOTE:  this code path would be a good place to PAUSE a
			 * transfer, if there's some other (nonperiodic) tx urb
			 * that could use this fifo.  (dma complicates it...)
			 * That's already done for bulk RX transfers.
			 *
			 * if (bulk && qh->ring.next != &musb->out_bulk), then
			 * we have a candidate... NAKing is *NOT* an error
			 */
			musb_ep_select(mbase, epnum);
			musb_writew(epio, MUSB_TXCSR,
					MUSB_TXCSR_H_WZC_BITS
					| MUSB_TXCSR_TXPKTRDY);
		}
			return;
F
Felipe Balbi 已提交
1290 1291
	}

1292
done:
F
Felipe Balbi 已提交
1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
	if (status) {
		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
			(void) musb->dma_controller->channel_abort(dma);
		}

		/* do the proper sequence to abort the transfer in the
		 * usb core; the dma engine should already be stopped.
		 */
		musb_h_tx_flush_fifo(hw_ep);
		tx_csr &= ~(MUSB_TXCSR_AUTOSET
				| MUSB_TXCSR_DMAENAB
				| MUSB_TXCSR_H_ERROR
				| MUSB_TXCSR_H_RXSTALL
				| MUSB_TXCSR_H_NAKTIMEOUT
				);

		musb_ep_select(mbase, epnum);
		musb_writew(epio, MUSB_TXCSR, tx_csr);
		/* REVISIT may need to clear FLUSHFIFO ... */
		musb_writew(epio, MUSB_TXCSR, tx_csr);
		musb_writeb(epio, MUSB_TXINTERVAL, 0);

		done = true;
	}

	/* second cppi case */
	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1321
		dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1322
		return;
F
Felipe Balbi 已提交
1323 1324
	}

1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
	if (is_dma_capable() && dma && !status) {
		/*
		 * DMA has completed.  But if we're using DMA mode 1 (multi
		 * packet DMA), we need a terminal TXPKTRDY interrupt before
		 * we can consider this transfer completed, lest we trash
		 * its last packet when writing the next URB's data.  So we
		 * switch back to mode 0 to get that interrupt; we'll come
		 * back here once it happens.
		 */
		if (tx_csr & MUSB_TXCSR_DMAMODE) {
			/*
			 * We shouldn't clear DMAMODE with DMAENAB set; so
			 * clear them in a safe order.  That should be OK
			 * once TXPKTRDY has been set (and I've never seen
			 * it being 0 at this moment -- DMA interrupt latency
			 * is significant) but if it hasn't been then we have
			 * no choice but to stop being polite and ignore the
			 * programmer's guide... :-)
			 *
			 * Note that we must write TXCSR with TXPKTRDY cleared
			 * in order not to re-trigger the packet send (this bit
			 * can't be cleared by CPU), and there's another caveat:
			 * TXPKTRDY may be set shortly and then cleared in the
			 * double-buffered FIFO mode, so we do an extra TXCSR
			 * read for debouncing...
			 */
			tx_csr &= musb_readw(epio, MUSB_TXCSR);
			if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
				tx_csr &= ~(MUSB_TXCSR_DMAENAB |
					    MUSB_TXCSR_TXPKTRDY);
				musb_writew(epio, MUSB_TXCSR,
					    tx_csr | MUSB_TXCSR_H_WZC_BITS);
			}
			tx_csr &= ~(MUSB_TXCSR_DMAMODE |
				    MUSB_TXCSR_TXPKTRDY);
			musb_writew(epio, MUSB_TXCSR,
				    tx_csr | MUSB_TXCSR_H_WZC_BITS);

			/*
			 * There is no guarantee that we'll get an interrupt
			 * after clearing DMAMODE as we might have done this
			 * too late (after TXPKTRDY was cleared by controller).
			 * Re-read TXCSR as we have spoiled its previous value.
			 */
			tx_csr = musb_readw(epio, MUSB_TXCSR);
		}

		/*
		 * We may get here from a DMA completion or TXPKTRDY interrupt.
		 * In any case, we must check the FIFO status here and bail out
		 * only if the FIFO still has data -- that should prevent the
		 * "missed" TXPKTRDY interrupts and deal with double-buffered
		 * FIFO mode too...
		 */
		if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1380
			dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
1381 1382 1383 1384 1385
			    "CSR %04x\n", tx_csr);
			return;
		}
	}

F
Felipe Balbi 已提交
1386 1387
	if (!status || dma || usb_pipeisoc(pipe)) {
		if (dma)
1388
			length = dma->actual_len;
F
Felipe Balbi 已提交
1389
		else
1390 1391
			length = qh->segsize;
		qh->offset += length;
F
Felipe Balbi 已提交
1392 1393 1394 1395 1396

		if (usb_pipeisoc(pipe)) {
			struct usb_iso_packet_descriptor	*d;

			d = urb->iso_frame_desc + qh->iso_idx;
1397 1398
			d->actual_length = length;
			d->status = status;
F
Felipe Balbi 已提交
1399 1400 1401 1402
			if (++qh->iso_idx >= urb->number_of_packets) {
				done = true;
			} else {
				d++;
1403 1404
				offset = d->offset;
				length = d->length;
F
Felipe Balbi 已提交
1405
			}
1406
		} else if (dma && urb->transfer_buffer_length == qh->offset) {
F
Felipe Balbi 已提交
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
			done = true;
		} else {
			/* see if we need to send more data, or ZLP */
			if (qh->segsize < qh->maxpacket)
				done = true;
			else if (qh->offset == urb->transfer_buffer_length
					&& !(urb->transfer_flags
						& URB_ZERO_PACKET))
				done = true;
			if (!done) {
1417 1418
				offset = qh->offset;
				length = urb->transfer_buffer_length - offset;
1419
				transfer_pending = true;
F
Felipe Balbi 已提交
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
			}
		}
	}

	/* urb->status != -EINPROGRESS means request has been faulted,
	 * so we must abort this transfer after cleanup
	 */
	if (urb->status != -EINPROGRESS) {
		done = true;
		if (status == 0)
			status = urb->status;
	}

	if (done) {
		/* set status */
		urb->status = status;
		urb->actual_length = qh->offset;
		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1438
		return;
1439
	} else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1440
		if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1441 1442 1443
				offset, length)) {
			if (is_cppi_enabled() || tusb_dma_omap())
				musb_h_tx_dma_start(hw_ep);
1444
			return;
1445
		}
1446
	} else	if (tx_csr & MUSB_TXCSR_DMAENAB) {
1447
		dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
1448 1449
		return;
	}
F
Felipe Balbi 已提交
1450

1451 1452 1453 1454 1455 1456 1457 1458 1459
	/*
	 * PIO: start next packet in this URB.
	 *
	 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
	 * (and presumably, FIFO is not half-full) we should write *two*
	 * packets before updating TXCSR; other docs disagree...
	 */
	if (length > qh->maxpacket)
		length = qh->maxpacket;
1460
	/* Unmap the buffer so that CPU can use it */
D
Daniel Mack 已提交
1461
	usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1462 1463 1464 1465 1466 1467

	/*
	 * We need to map sg if the transfer_buffer is
	 * NULL.
	 */
	if (!urb->transfer_buffer)
1468
		qh->use_sg = true;
1469

1470
	if (qh->use_sg) {
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
		/* sg_miter_start is already done in musb_ep_program */
		if (!sg_miter_next(&qh->sg_miter)) {
			dev_err(musb->controller, "error: sg list empty\n");
			sg_miter_stop(&qh->sg_miter);
			status = -EINVAL;
			goto done;
		}
		urb->transfer_buffer = qh->sg_miter.addr;
		length = min_t(u32, length, qh->sg_miter.length);
		musb_write_fifo(hw_ep, length, urb->transfer_buffer);
		qh->sg_miter.consumed = length;
		sg_miter_stop(&qh->sg_miter);
	} else {
		musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
	}

1487
	qh->segsize = length;
F
Felipe Balbi 已提交
1488

1489
	if (qh->use_sg) {
1490
		if (offset + length >= urb->transfer_buffer_length)
1491
			qh->use_sg = false;
1492 1493
	}

1494 1495 1496
	musb_ep_select(mbase, epnum);
	musb_writew(epio, MUSB_TXCSR,
			MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
F
Felipe Balbi 已提交
1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556
}


#ifdef CONFIG_USB_INVENTRA_DMA

/* Host side RX (IN) using Mentor DMA works as follows:
	submit_urb ->
		- if queue was empty, ProgramEndpoint
		- first IN token is sent out (by setting ReqPkt)
	LinuxIsr -> RxReady()
	/\	=> first packet is received
	|	- Set in mode 0 (DmaEnab, ~ReqPkt)
	|		-> DMA Isr (transfer complete) -> RxReady()
	|		    - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
	|		    - if urb not complete, send next IN token (ReqPkt)
	|			   |		else complete urb.
	|			   |
	---------------------------
 *
 * Nuances of mode 1:
 *	For short packets, no ack (+RxPktRdy) is sent automatically
 *	(even if AutoClear is ON)
 *	For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
 *	automatically => major problem, as collecting the next packet becomes
 *	difficult. Hence mode 1 is not used.
 *
 * REVISIT
 *	All we care about at this driver level is that
 *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
 *       (b) termination conditions are: short RX, or buffer full;
 *       (c) fault modes include
 *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
 *             (and that endpoint's dma queue stops immediately)
 *           - overflow (full, PLUS more bytes in the terminal packet)
 *
 *	So for example, usb-storage sets URB_SHORT_NOT_OK, and would
 *	thus be a great candidate for using mode 1 ... for all but the
 *	last packet of one URB's transfer.
 */

#endif

/*
 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
 * and high-bandwidth IN transfer cases.
 */
void musb_host_rx(struct musb *musb, u8 epnum)
{
	struct urb		*urb;
	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
	void __iomem		*epio = hw_ep->regs;
	struct musb_qh		*qh = hw_ep->in_qh;
	size_t			xfer_len;
	void __iomem		*mbase = musb->mregs;
	int			pipe;
	u16			rx_csr, val;
	bool			iso_err = false;
	bool			done = false;
	u32			status;
	struct dma_channel	*dma;
1557
	unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
F
Felipe Balbi 已提交
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573

	musb_ep_select(mbase, epnum);

	urb = next_urb(qh);
	dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
	status = 0;
	xfer_len = 0;

	rx_csr = musb_readw(epio, MUSB_RXCSR);
	val = rx_csr;

	if (unlikely(!urb)) {
		/* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
		 * usbtest #11 (unlinks) triggers it regularly, sometimes
		 * with fifo full.  (Only with DMA??)
		 */
1574
		dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
F
Felipe Balbi 已提交
1575 1576 1577 1578 1579 1580 1581
			musb_readw(epio, MUSB_RXCOUNT));
		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
		return;
	}

	pipe = urb->pipe;

1582
	dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
F
Felipe Balbi 已提交
1583 1584 1585 1586 1587 1588
		epnum, rx_csr, urb->actual_length,
		dma ? dma->actual_len : 0);

	/* check for errors, concurrent stall & unlink is not really
	 * handled yet! */
	if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1589
		dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
F
Felipe Balbi 已提交
1590 1591 1592 1593 1594

		/* stall; record URB status */
		status = -EPIPE;

	} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1595
		dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
F
Felipe Balbi 已提交
1596 1597 1598 1599 1600 1601 1602

		status = -EPROTO;
		musb_writeb(epio, MUSB_RXINTERVAL, 0);

	} else if (rx_csr & MUSB_RXCSR_DATAERROR) {

		if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1603
			dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
1604 1605 1606 1607

			/* NOTE: NAKing is *NOT* an error, so we want to
			 * continue.  Except ... if there's a request for
			 * another QH, use that instead of starving it.
F
Felipe Balbi 已提交
1608
			 *
1609 1610 1611
			 * Devices like Ethernet and serial adapters keep
			 * reads posted at all times, which will starve
			 * other devices without this logic.
F
Felipe Balbi 已提交
1612
			 */
1613 1614 1615
			if (usb_pipebulk(urb->pipe)
					&& qh->mux == 1
					&& !list_is_singular(&musb->in_bulk)) {
1616
				musb_bulk_nak_timeout(musb, hw_ep, 1);
1617 1618
				return;
			}
F
Felipe Balbi 已提交
1619
			musb_ep_select(mbase, epnum);
1620 1621 1622
			rx_csr |= MUSB_RXCSR_H_WZC_BITS;
			rx_csr &= ~MUSB_RXCSR_DATAERROR;
			musb_writew(epio, MUSB_RXCSR, rx_csr);
F
Felipe Balbi 已提交
1623 1624 1625

			goto finish;
		} else {
1626
			dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
F
Felipe Balbi 已提交
1627 1628 1629
			/* packet error reported later */
			iso_err = true;
		}
1630
	} else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1631
		dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
1632 1633
				epnum);
		status = -EPROTO;
F
Felipe Balbi 已提交
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
	}

	/* faults abort the transfer */
	if (status) {
		/* clean up dma and collect transfer count */
		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
			(void) musb->dma_controller->channel_abort(dma);
			xfer_len = dma->actual_len;
		}
		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
		musb_writeb(epio, MUSB_RXINTERVAL, 0);
		done = true;
		goto finish;
	}

	if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
		/* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
		ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
		goto finish;
	}

	/* thorough shutdown for now ... given more precise fault handling
	 * and better queueing support, we might keep a DMA pipeline going
	 * while processing this irq for earlier completions.
	 */

	/* FIXME this is _way_ too much in-line logic for Mentor DMA */

1663
#if !defined(CONFIG_USB_INVENTRA_DMA) && !defined(CONFIG_USB_UX500_DMA)
F
Felipe Balbi 已提交
1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
	if (rx_csr & MUSB_RXCSR_H_REQPKT)  {
		/* REVISIT this happened for a while on some short reads...
		 * the cleanup still needs investigation... looks bad...
		 * and also duplicates dma cleanup code above ... plus,
		 * shouldn't this be the "half full" double buffer case?
		 */
		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
			(void) musb->dma_controller->channel_abort(dma);
			xfer_len = dma->actual_len;
			done = true;
		}

1677
		dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
F
Felipe Balbi 已提交
1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
				xfer_len, dma ? ", dma" : "");
		rx_csr &= ~MUSB_RXCSR_H_REQPKT;

		musb_ep_select(mbase, epnum);
		musb_writew(epio, MUSB_RXCSR,
				MUSB_RXCSR_H_WZC_BITS | rx_csr);
	}
#endif
	if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
		xfer_len = dma->actual_len;

		val &= ~(MUSB_RXCSR_DMAENAB
			| MUSB_RXCSR_H_AUTOREQ
			| MUSB_RXCSR_AUTOCLEAR
			| MUSB_RXCSR_RXPKTRDY);
		musb_writew(hw_ep->regs, MUSB_RXCSR, val);

1695
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
1696 1697 1698 1699 1700 1701 1702 1703 1704
		if (usb_pipeisoc(pipe)) {
			struct usb_iso_packet_descriptor *d;

			d = urb->iso_frame_desc + qh->iso_idx;
			d->actual_length = xfer_len;

			/* even if there was an error, we did the dma
			 * for iso_frame_desc->length
			 */
1705
			if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1706 1707 1708 1709 1710 1711 1712 1713
				d->status = 0;

			if (++qh->iso_idx >= urb->number_of_packets)
				done = true;
			else
				done = false;

		} else  {
F
Felipe Balbi 已提交
1714 1715 1716 1717
		/* done if urb buffer is full or short packet is recd */
		done = (urb->actual_length + xfer_len >=
				urb->transfer_buffer_length
			|| dma->actual_len < qh->maxpacket);
1718
		}
F
Felipe Balbi 已提交
1719 1720 1721 1722 1723 1724 1725 1726

		/* send IN token for next packet, without AUTOREQ */
		if (!done) {
			val |= MUSB_RXCSR_H_REQPKT;
			musb_writew(epio, MUSB_RXCSR,
				MUSB_RXCSR_H_WZC_BITS | val);
		}

1727
		dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
F
Felipe Balbi 已提交
1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750
			done ? "off" : "reset",
			musb_readw(epio, MUSB_RXCSR),
			musb_readw(epio, MUSB_RXCOUNT));
#else
		done = true;
#endif
	} else if (urb->status == -EINPROGRESS) {
		/* if no errors, be sure a packet is ready for unloading */
		if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
			status = -EPROTO;
			ERR("Rx interrupt with no errors or packet!\n");

			/* FIXME this is another "SHOULD NEVER HAPPEN" */

/* SCRUB (RX) */
			/* do the proper sequence to abort the transfer */
			musb_ep_select(mbase, epnum);
			val &= ~MUSB_RXCSR_H_REQPKT;
			musb_writew(epio, MUSB_RXCSR, val);
			goto finish;
		}

		/* we are expecting IN packets */
1751
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
F
Felipe Balbi 已提交
1752 1753 1754
		if (dma) {
			struct dma_controller	*c;
			u16			rx_count;
1755 1756
			int			ret, length;
			dma_addr_t		buf;
F
Felipe Balbi 已提交
1757 1758 1759

			rx_count = musb_readw(epio, MUSB_RXCOUNT);

1760
			dev_dbg(musb->controller, "RX%d count %d, buffer 0x%llx len %d/%d\n",
F
Felipe Balbi 已提交
1761
					epnum, rx_count,
1762 1763
					(unsigned long long) urb->transfer_dma
					+ urb->actual_length,
F
Felipe Balbi 已提交
1764 1765 1766 1767 1768
					qh->offset,
					urb->transfer_buffer_length);

			c = musb->dma_controller;

1769
			if (usb_pipeisoc(pipe)) {
1770
				int d_status = 0;
1771 1772 1773 1774 1775
				struct usb_iso_packet_descriptor *d;

				d = urb->iso_frame_desc + qh->iso_idx;

				if (iso_err) {
1776
					d_status = -EILSEQ;
1777 1778 1779
					urb->error_count++;
				}
				if (rx_count > d->length) {
1780 1781
					if (d_status == 0) {
						d_status = -EOVERFLOW;
1782 1783
						urb->error_count++;
					}
1784
					dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\
1785 1786 1787 1788 1789
					    rx_count, d->length);

					length = d->length;
				} else
					length = rx_count;
1790
				d->status = d_status;
1791 1792 1793 1794 1795 1796 1797
				buf = urb->transfer_dma + d->offset;
			} else {
				length = rx_count;
				buf = urb->transfer_dma +
						urb->actual_length;
			}

F
Felipe Balbi 已提交
1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
			dma->desired_mode = 0;
#ifdef USE_MODE1
			/* because of the issue below, mode 1 will
			 * only rarely behave with correct semantics.
			 */
			if ((urb->transfer_flags &
						URB_SHORT_NOT_OK)
				&& (urb->transfer_buffer_length -
						urb->actual_length)
					> qh->maxpacket)
				dma->desired_mode = 1;
1809 1810
			if (rx_count < hw_ep->max_packet_sz_rx) {
				length = rx_count;
1811
				dma->desired_mode = 0;
1812 1813 1814
			} else {
				length = urb->transfer_buffer_length;
			}
F
Felipe Balbi 已提交
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840
#endif

/* Disadvantage of using mode 1:
 *	It's basically usable only for mass storage class; essentially all
 *	other protocols also terminate transfers on short packets.
 *
 * Details:
 *	An extra IN token is sent at the end of the transfer (due to AUTOREQ)
 *	If you try to use mode 1 for (transfer_buffer_length - 512), and try
 *	to use the extra IN token to grab the last packet using mode 0, then
 *	the problem is that you cannot be sure when the device will send the
 *	last packet and RxPktRdy set. Sometimes the packet is recd too soon
 *	such that it gets lost when RxCSR is re-set at the end of the mode 1
 *	transfer, while sometimes it is recd just a little late so that if you
 *	try to configure for mode 0 soon after the mode 1 transfer is
 *	completed, you will find rxcount 0. Okay, so you might think why not
 *	wait for an interrupt when the pkt is recd. Well, you won't get any!
 */

			val = musb_readw(epio, MUSB_RXCSR);
			val &= ~MUSB_RXCSR_H_REQPKT;

			if (dma->desired_mode == 0)
				val &= ~MUSB_RXCSR_H_AUTOREQ;
			else
				val |= MUSB_RXCSR_H_AUTOREQ;
1841 1842 1843 1844 1845
			val |= MUSB_RXCSR_DMAENAB;

			/* autoclear shouldn't be set in high bandwidth */
			if (qh->hb_mult == 1)
				val |= MUSB_RXCSR_AUTOCLEAR;
F
Felipe Balbi 已提交
1846 1847 1848 1849 1850 1851 1852 1853 1854 1855

			musb_writew(epio, MUSB_RXCSR,
				MUSB_RXCSR_H_WZC_BITS | val);

			/* REVISIT if when actual_length != 0,
			 * transfer_buffer_length needs to be
			 * adjusted first...
			 */
			ret = c->channel_program(
				dma, qh->maxpacket,
1856
				dma->desired_mode, buf, length);
F
Felipe Balbi 已提交
1857 1858 1859 1860 1861

			if (!ret) {
				c->channel_release(dma);
				hw_ep->rx_channel = NULL;
				dma = NULL;
1862 1863 1864 1865 1866
				val = musb_readw(epio, MUSB_RXCSR);
				val &= ~(MUSB_RXCSR_DMAENAB
					| MUSB_RXCSR_H_AUTOREQ
					| MUSB_RXCSR_AUTOCLEAR);
				musb_writew(epio, MUSB_RXCSR, val);
F
Felipe Balbi 已提交
1867 1868 1869 1870 1871
			}
		}
#endif	/* Mentor DMA */

		if (!dma) {
1872 1873
			unsigned int received_len;

1874
			/* Unmap the buffer so that CPU can use it */
D
Daniel Mack 已提交
1875
			usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1876 1877 1878 1879 1880 1881

			/*
			 * We need to map sg if the transfer_buffer is
			 * NULL.
			 */
			if (!urb->transfer_buffer) {
1882
				qh->use_sg = true;
1883 1884 1885 1886
				sg_miter_start(&qh->sg_miter, urb->sg, 1,
						sg_flags);
			}

1887
			if (qh->use_sg) {
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
				if (!sg_miter_next(&qh->sg_miter)) {
					dev_err(musb->controller, "error: sg list empty\n");
					sg_miter_stop(&qh->sg_miter);
					status = -EINVAL;
					done = true;
					goto finish;
				}
				urb->transfer_buffer = qh->sg_miter.addr;
				received_len = urb->actual_length;
				qh->offset = 0x0;
				done = musb_host_packet_rx(musb, urb, epnum,
						iso_err);
				/* Calculate the number of bytes received */
				received_len = urb->actual_length -
					received_len;
				qh->sg_miter.consumed = received_len;
				sg_miter_stop(&qh->sg_miter);
			} else {
				done = musb_host_packet_rx(musb, urb,
						epnum, iso_err);
			}
1909
			dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
F
Felipe Balbi 已提交
1910 1911 1912 1913 1914 1915 1916
		}
	}

finish:
	urb->actual_length += xfer_len;
	qh->offset += xfer_len;
	if (done) {
1917 1918
		if (qh->use_sg)
			qh->use_sg = false;
1919

F
Felipe Balbi 已提交
1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
		if (urb->status == -EINPROGRESS)
			urb->status = status;
		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
	}
}

/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
 * the software schedule associates multiple such nodes with a given
 * host side hardware endpoint + direction; scheduling may activate
 * that hardware endpoint.
 */
static int musb_schedule(
	struct musb		*musb,
	struct musb_qh		*qh,
	int			is_in)
{
	int			idle;
	int			best_diff;
	int			best_end, epnum;
	struct musb_hw_ep	*hw_ep = NULL;
	struct list_head	*head = NULL;
1941 1942 1943
	u8			toggle;
	u8			txtype;
	struct urb		*urb = next_urb(qh);
F
Felipe Balbi 已提交
1944 1945

	/* use fixed hardware for control and bulk */
1946
	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
F
Felipe Balbi 已提交
1947 1948 1949 1950 1951 1952 1953
		head = &musb->control;
		hw_ep = musb->control_ep;
		goto success;
	}

	/* else, periodic transfers get muxed to other endpoints */

1954 1955
	/*
	 * We know this qh hasn't been scheduled, so all we need to do
F
Felipe Balbi 已提交
1956 1957 1958
	 * is choose which hardware endpoint to put it on ...
	 *
	 * REVISIT what we really want here is a regular schedule tree
1959
	 * like e.g. OHCI uses.
F
Felipe Balbi 已提交
1960 1961 1962 1963
	 */
	best_diff = 4096;
	best_end = -1;

1964 1965 1966
	for (epnum = 1, hw_ep = musb->endpoints + 1;
			epnum < musb->nr_endpoints;
			epnum++, hw_ep++) {
F
Felipe Balbi 已提交
1967 1968
		int	diff;

1969
		if (musb_ep_get_qh(hw_ep, is_in) != NULL)
F
Felipe Balbi 已提交
1970
			continue;
1971

F
Felipe Balbi 已提交
1972 1973 1974 1975
		if (hw_ep == musb->bulk_ep)
			continue;

		if (is_in)
1976
			diff = hw_ep->max_packet_sz_rx;
F
Felipe Balbi 已提交
1977
		else
1978 1979
			diff = hw_ep->max_packet_sz_tx;
		diff -= (qh->maxpacket * qh->hb_mult);
F
Felipe Balbi 已提交
1980

1981
		if (diff >= 0 && best_diff > diff) {
1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002

			/*
			 * Mentor controller has a bug in that if we schedule
			 * a BULK Tx transfer on an endpoint that had earlier
			 * handled ISOC then the BULK transfer has to start on
			 * a zero toggle.  If the BULK transfer starts on a 1
			 * toggle then this transfer will fail as the mentor
			 * controller starts the Bulk transfer on a 0 toggle
			 * irrespective of the programming of the toggle bits
			 * in the TXCSR register.  Check for this condition
			 * while allocating the EP for a Tx Bulk transfer.  If
			 * so skip this EP.
			 */
			hw_ep = musb->endpoints + epnum;
			toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
			txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
					>> 4) & 0x3;
			if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
				toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
				continue;

F
Felipe Balbi 已提交
2003 2004 2005 2006
			best_diff = diff;
			best_end = epnum;
		}
	}
2007
	/* use bulk reserved ep1 if no other ep is free */
F
Felipe Balbi 已提交
2008
	if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
2009 2010 2011 2012 2013
		hw_ep = musb->bulk_ep;
		if (is_in)
			head = &musb->in_bulk;
		else
			head = &musb->out_bulk;
2014

2015
		/* Enable bulk RX/TX NAK timeout scheme when bulk requests are
2016 2017 2018 2019 2020 2021
		 * multiplexed.  This scheme doen't work in high speed to full
		 * speed scenario as NAK interrupts are not coming from a
		 * full speed device connected to a high speed device.
		 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
		 * 4 (8 frame or 8ms) for FS device.
		 */
2022
		if (qh->dev)
2023 2024
			qh->intv_reg =
				(USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2025 2026
		goto success;
	} else if (best_end < 0) {
F
Felipe Balbi 已提交
2027
		return -ENOSPC;
2028
	}
F
Felipe Balbi 已提交
2029 2030

	idle = 1;
2031
	qh->mux = 0;
F
Felipe Balbi 已提交
2032
	hw_ep = musb->endpoints + best_end;
2033
	dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
F
Felipe Balbi 已提交
2034
success:
2035 2036 2037 2038 2039
	if (head) {
		idle = list_empty(head);
		list_add_tail(&qh->ring, head);
		qh->mux = 1;
	}
F
Felipe Balbi 已提交
2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054
	qh->hw_ep = hw_ep;
	qh->hep->hcpriv = qh;
	if (idle)
		musb_start_urb(musb, is_in, qh);
	return 0;
}

static int musb_urb_enqueue(
	struct usb_hcd			*hcd,
	struct urb			*urb,
	gfp_t				mem_flags)
{
	unsigned long			flags;
	struct musb			*musb = hcd_to_musb(hcd);
	struct usb_host_endpoint	*hep = urb->ep;
2055
	struct musb_qh			*qh;
F
Felipe Balbi 已提交
2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
	struct usb_endpoint_descriptor	*epd = &hep->desc;
	int				ret;
	unsigned			type_reg;
	unsigned			interval;

	/* host role must be active */
	if (!is_host_active(musb) || !musb->is_active)
		return -ENODEV;

	spin_lock_irqsave(&musb->lock, flags);
	ret = usb_hcd_link_urb_to_ep(hcd, urb);
2067 2068 2069
	qh = ret ? NULL : hep->hcpriv;
	if (qh)
		urb->hcpriv = qh;
F
Felipe Balbi 已提交
2070 2071 2072
	spin_unlock_irqrestore(&musb->lock, flags);

	/* DMA mapping was already done, if needed, and this urb is on
2073 2074
	 * hep->urb_list now ... so we're done, unless hep wasn't yet
	 * scheduled onto a live qh.
F
Felipe Balbi 已提交
2075 2076 2077 2078 2079
	 *
	 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
	 * disabled, testing for empty qh->ring and avoiding qh setup costs
	 * except for the first urb queued after a config change.
	 */
2080 2081
	if (qh || ret)
		return ret;
F
Felipe Balbi 已提交
2082 2083 2084 2085 2086 2087 2088 2089 2090

	/* Allocate and initialize qh, minimizing the work done each time
	 * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
	 *
	 * REVISIT consider a dedicated qh kmem_cache, so it's harder
	 * for bugs in other kernel code to break this driver...
	 */
	qh = kzalloc(sizeof *qh, mem_flags);
	if (!qh) {
2091
		spin_lock_irqsave(&musb->lock, flags);
F
Felipe Balbi 已提交
2092
		usb_hcd_unlink_urb_from_ep(hcd, urb);
2093
		spin_unlock_irqrestore(&musb->lock, flags);
F
Felipe Balbi 已提交
2094 2095 2096 2097 2098 2099 2100 2101
		return -ENOMEM;
	}

	qh->hep = hep;
	qh->dev = urb->dev;
	INIT_LIST_HEAD(&qh->ring);
	qh->is_ready = 1;

2102
	qh->maxpacket = usb_endpoint_maxp(epd);
2103
	qh->type = usb_endpoint_type(epd);
F
Felipe Balbi 已提交
2104

2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120
	/* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
	 * Some musb cores don't support high bandwidth ISO transfers; and
	 * we don't (yet!) support high bandwidth interrupt transfers.
	 */
	qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
	if (qh->hb_mult > 1) {
		int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);

		if (ok)
			ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
				|| (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
		if (!ok) {
			ret = -EMSGSIZE;
			goto done;
		}
		qh->maxpacket &= 0x7ff;
F
Felipe Balbi 已提交
2121 2122
	}

J
Julia Lawall 已提交
2123
	qh->epnum = usb_endpoint_num(epd);
F
Felipe Balbi 已提交
2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141

	/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
	qh->addr_reg = (u8) usb_pipedevice(urb->pipe);

	/* precompute rxtype/txtype/type0 register */
	type_reg = (qh->type << 4) | qh->epnum;
	switch (urb->dev->speed) {
	case USB_SPEED_LOW:
		type_reg |= 0xc0;
		break;
	case USB_SPEED_FULL:
		type_reg |= 0x80;
		break;
	default:
		type_reg |= 0x40;
	}
	qh->type_reg = type_reg;

2142
	/* Precompute RXINTERVAL/TXINTERVAL register */
F
Felipe Balbi 已提交
2143 2144
	switch (qh->type) {
	case USB_ENDPOINT_XFER_INT:
2145 2146 2147 2148 2149 2150 2151
		/*
		 * Full/low speeds use the  linear encoding,
		 * high speed uses the logarithmic encoding.
		 */
		if (urb->dev->speed <= USB_SPEED_FULL) {
			interval = max_t(u8, epd->bInterval, 1);
			break;
F
Felipe Balbi 已提交
2152 2153 2154
		}
		/* FALLTHROUGH */
	case USB_ENDPOINT_XFER_ISOC:
2155 2156
		/* ISO always uses logarithmic encoding */
		interval = min_t(u8, epd->bInterval, 16);
F
Felipe Balbi 已提交
2157 2158 2159 2160 2161 2162 2163 2164 2165 2166
		break;
	default:
		/* REVISIT we actually want to use NAK limits, hinting to the
		 * transfer scheduling logic to try some other qh, e.g. try
		 * for 2 msec first:
		 *
		 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
		 *
		 * The downside of disabling this is that transfer scheduling
		 * gets VERY unfair for nonperiodic transfers; a misbehaving
2167 2168 2169
		 * peripheral could make that hurt.  That's perfectly normal
		 * for reads from network or serial adapters ... so we have
		 * partial NAKlimit support for bulk RX.
F
Felipe Balbi 已提交
2170
		 *
2171
		 * The upside of disabling it is simpler transfer scheduling.
F
Felipe Balbi 已提交
2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186
		 */
		interval = 0;
	}
	qh->intv_reg = interval;

	/* precompute addressing for external hub/tt ports */
	if (musb->is_multipoint) {
		struct usb_device	*parent = urb->dev->parent;

		if (parent != hcd->self.root_hub) {
			qh->h_addr_reg = (u8) parent->devnum;

			/* set up tt info if needed */
			if (urb->dev->tt) {
				qh->h_port_reg = (u8) urb->dev->ttport;
2187 2188 2189 2190 2191
				if (urb->dev->tt->hub)
					qh->h_addr_reg =
						(u8) urb->dev->tt->hub->devnum;
				if (urb->dev->tt->multi)
					qh->h_addr_reg |= 0x80;
F
Felipe Balbi 已提交
2192 2193 2194 2195 2196 2197 2198 2199 2200
			}
		}
	}

	/* invariant: hep->hcpriv is null OR the qh that's already scheduled.
	 * until we get real dma queues (with an entry for each urb/buffer),
	 * we only have work to do in the former case.
	 */
	spin_lock_irqsave(&musb->lock, flags);
2201
	if (hep->hcpriv || !next_urb(qh)) {
F
Felipe Balbi 已提交
2202 2203 2204 2205
		/* some concurrent activity submitted another urb to hep...
		 * odd, rare, error prone, but legal.
		 */
		kfree(qh);
D
Dan Carpenter 已提交
2206
		qh = NULL;
F
Felipe Balbi 已提交
2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221
		ret = 0;
	} else
		ret = musb_schedule(musb, qh,
				epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);

	if (ret == 0) {
		urb->hcpriv = qh;
		/* FIXME set urb->start_frame for iso/intr, it's tested in
		 * musb_start_urb(), but otherwise only konicawc cares ...
		 */
	}
	spin_unlock_irqrestore(&musb->lock, flags);

done:
	if (ret != 0) {
2222
		spin_lock_irqsave(&musb->lock, flags);
F
Felipe Balbi 已提交
2223
		usb_hcd_unlink_urb_from_ep(hcd, urb);
2224
		spin_unlock_irqrestore(&musb->lock, flags);
F
Felipe Balbi 已提交
2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235
		kfree(qh);
	}
	return ret;
}


/*
 * abort a transfer that's at the head of a hardware queue.
 * called with controller locked, irqs blocked
 * that hardware queue advances to the next transfer, unless prevented
 */
2236
static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
F
Felipe Balbi 已提交
2237 2238
{
	struct musb_hw_ep	*ep = qh->hw_ep;
2239
	struct musb		*musb = ep->musb;
F
Felipe Balbi 已提交
2240 2241 2242
	void __iomem		*epio = ep->regs;
	unsigned		hw_end = ep->epnum;
	void __iomem		*regs = ep->musb->mregs;
2243
	int			is_in = usb_pipein(urb->pipe);
F
Felipe Balbi 已提交
2244
	int			status = 0;
2245
	u16			csr;
F
Felipe Balbi 已提交
2246 2247 2248 2249 2250 2251 2252 2253 2254

	musb_ep_select(regs, hw_end);

	if (is_dma_capable()) {
		struct dma_channel	*dma;

		dma = is_in ? ep->rx_channel : ep->tx_channel;
		if (dma) {
			status = ep->musb->dma_controller->channel_abort(dma);
2255
			dev_dbg(musb->controller,
F
Felipe Balbi 已提交
2256 2257 2258 2259 2260 2261 2262 2263
				"abort %cX%d DMA for urb %p --> %d\n",
				is_in ? 'R' : 'T', ep->epnum,
				urb, status);
			urb->actual_length += dma->actual_len;
		}
	}

	/* turn off DMA requests, discard state, stop polling ... */
2264
	if (ep->epnum && is_in) {
F
Felipe Balbi 已提交
2265 2266 2267 2268 2269 2270 2271
		/* giveback saves bulk toggle */
		csr = musb_h_flush_rxfifo(ep, 0);

		/* REVISIT we still get an irq; should likely clear the
		 * endpoint's irq status here to avoid bogus irqs.
		 * clearing that status is platform-specific...
		 */
2272
	} else if (ep->epnum) {
F
Felipe Balbi 已提交
2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285
		musb_h_tx_flush_fifo(ep);
		csr = musb_readw(epio, MUSB_TXCSR);
		csr &= ~(MUSB_TXCSR_AUTOSET
			| MUSB_TXCSR_DMAENAB
			| MUSB_TXCSR_H_RXSTALL
			| MUSB_TXCSR_H_NAKTIMEOUT
			| MUSB_TXCSR_H_ERROR
			| MUSB_TXCSR_TXPKTRDY);
		musb_writew(epio, MUSB_TXCSR, csr);
		/* REVISIT may need to clear FLUSHFIFO ... */
		musb_writew(epio, MUSB_TXCSR, csr);
		/* flush cpu writebuffer */
		csr = musb_readw(epio, MUSB_TXCSR);
2286 2287
	} else  {
		musb_h_ep0_flush_fifo(ep);
F
Felipe Balbi 已提交
2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
	}
	if (status == 0)
		musb_advance_schedule(ep->musb, urb, ep, is_in);
	return status;
}

static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
	struct musb		*musb = hcd_to_musb(hcd);
	struct musb_qh		*qh;
	unsigned long		flags;
2299
	int			is_in  = usb_pipein(urb->pipe);
F
Felipe Balbi 已提交
2300 2301
	int			ret;

2302
	dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
F
Felipe Balbi 已提交
2303 2304
			usb_pipedevice(urb->pipe),
			usb_pipeendpoint(urb->pipe),
2305
			is_in ? "in" : "out");
F
Felipe Balbi 已提交
2306 2307 2308 2309 2310 2311 2312 2313 2314 2315

	spin_lock_irqsave(&musb->lock, flags);
	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
	if (ret)
		goto done;

	qh = urb->hcpriv;
	if (!qh)
		goto done;

2316 2317
	/*
	 * Any URB not actively programmed into endpoint hardware can be
2318
	 * immediately given back; that's any URB not at the head of an
F
Felipe Balbi 已提交
2319
	 * endpoint queue, unless someday we get real DMA queues.  And even
2320
	 * if it's at the head, it might not be known to the hardware...
F
Felipe Balbi 已提交
2321
	 *
2322
	 * Otherwise abort current transfer, pending DMA, etc.; urb->status
F
Felipe Balbi 已提交
2323 2324
	 * has already been updated.  This is a synchronous abort; it'd be
	 * OK to hold off until after some IRQ, though.
2325 2326
	 *
	 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
F
Felipe Balbi 已提交
2327
	 */
2328 2329 2330
	if (!qh->is_ready
			|| urb->urb_list.prev != &qh->hep->urb_list
			|| musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
F
Felipe Balbi 已提交
2331 2332 2333
		int	ready = qh->is_ready;

		qh->is_ready = 0;
2334
		musb_giveback(musb, urb, 0);
F
Felipe Balbi 已提交
2335
		qh->is_ready = ready;
2336 2337 2338 2339 2340 2341 2342 2343 2344

		/* If nothing else (usually musb_giveback) is using it
		 * and its URB list has emptied, recycle this qh.
		 */
		if (ready && list_empty(&qh->hep->urb_list)) {
			qh->hep->hcpriv = NULL;
			list_del(&qh->ring);
			kfree(qh);
		}
F
Felipe Balbi 已提交
2345
	} else
2346
		ret = musb_cleanup_urb(urb, qh);
F
Felipe Balbi 已提交
2347 2348 2349 2350 2351 2352 2353 2354 2355
done:
	spin_unlock_irqrestore(&musb->lock, flags);
	return ret;
}

/* disable an endpoint */
static void
musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
{
2356
	u8			is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
F
Felipe Balbi 已提交
2357 2358
	unsigned long		flags;
	struct musb		*musb = hcd_to_musb(hcd);
2359 2360
	struct musb_qh		*qh;
	struct urb		*urb;
F
Felipe Balbi 已提交
2361 2362 2363

	spin_lock_irqsave(&musb->lock, flags);

2364 2365 2366 2367
	qh = hep->hcpriv;
	if (qh == NULL)
		goto exit;

2368
	/* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
F
Felipe Balbi 已提交
2369

2370
	/* Kick the first URB off the hardware, if needed */
F
Felipe Balbi 已提交
2371
	qh->is_ready = 0;
2372
	if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
F
Felipe Balbi 已提交
2373 2374 2375 2376 2377 2378 2379
		urb = next_urb(qh);

		/* make software (then hardware) stop ASAP */
		if (!urb->unlinked)
			urb->status = -ESHUTDOWN;

		/* cleanup */
2380
		musb_cleanup_urb(urb, qh);
F
Felipe Balbi 已提交
2381

2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395
		/* Then nuke all the others ... and advance the
		 * queue on hw_ep (e.g. bulk ring) when we're done.
		 */
		while (!list_empty(&hep->urb_list)) {
			urb = next_urb(qh);
			urb->status = -ESHUTDOWN;
			musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
		}
	} else {
		/* Just empty the queue; the hardware is busy with
		 * other transfers, and since !qh->is_ready nothing
		 * will activate any of these as it advances.
		 */
		while (!list_empty(&hep->urb_list))
2396
			musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
F
Felipe Balbi 已提交
2397

2398 2399 2400 2401 2402
		hep->hcpriv = NULL;
		list_del(&qh->ring);
		kfree(qh);
	}
exit:
F
Felipe Balbi 已提交
2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
	spin_unlock_irqrestore(&musb->lock, flags);
}

static int musb_h_get_frame_number(struct usb_hcd *hcd)
{
	struct musb	*musb = hcd_to_musb(hcd);

	return musb_readw(musb->mregs, MUSB_FRAME);
}

static int musb_h_start(struct usb_hcd *hcd)
{
	struct musb	*musb = hcd_to_musb(hcd);

	/* NOTE: musb_start() is called when the hub driver turns
	 * on port power, or when (OTG) peripheral starts.
	 */
	hcd->state = HC_STATE_RUNNING;
	musb->port1_status = 0;
	return 0;
}

static void musb_h_stop(struct usb_hcd *hcd)
{
	musb_stop(hcd_to_musb(hcd));
	hcd->state = HC_STATE_HALT;
}

static int musb_bus_suspend(struct usb_hcd *hcd)
{
	struct musb	*musb = hcd_to_musb(hcd);
2434
	u8		devctl;
F
Felipe Balbi 已提交
2435

2436
	if (!is_host_active(musb))
F
Felipe Balbi 已提交
2437 2438
		return 0;

2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456
	switch (musb->xceiv->state) {
	case OTG_STATE_A_SUSPEND:
		return 0;
	case OTG_STATE_A_WAIT_VRISE:
		/* ID could be grounded even if there's no device
		 * on the other end of the cable.  NOTE that the
		 * A_WAIT_VRISE timers are messy with MUSB...
		 */
		devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
			musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
		break;
	default:
		break;
	}

	if (musb->is_active) {
		WARNING("trying to suspend as %s while active\n",
2457
				usb_otg_state_string(musb->xceiv->state));
F
Felipe Balbi 已提交
2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
		return -EBUSY;
	} else
		return 0;
}

static int musb_bus_resume(struct usb_hcd *hcd)
{
	/* resuming child port does the work */
	return 0;
}

2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579
#ifndef CONFIG_MUSB_PIO_ONLY

#define MUSB_USB_DMA_ALIGN 4

struct musb_temp_buffer {
	void *kmalloc_ptr;
	void *old_xfer_buffer;
	u8 data[0];
};

static void musb_free_temp_buffer(struct urb *urb)
{
	enum dma_data_direction dir;
	struct musb_temp_buffer *temp;

	if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
		return;

	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;

	temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
			    data);

	if (dir == DMA_FROM_DEVICE) {
		memcpy(temp->old_xfer_buffer, temp->data,
		       urb->transfer_buffer_length);
	}
	urb->transfer_buffer = temp->old_xfer_buffer;
	kfree(temp->kmalloc_ptr);

	urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
}

static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
{
	enum dma_data_direction dir;
	struct musb_temp_buffer *temp;
	void *kmalloc_ptr;
	size_t kmalloc_size;

	if (urb->num_sgs || urb->sg ||
	    urb->transfer_buffer_length == 0 ||
	    !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
		return 0;

	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;

	/* Allocate a buffer with enough padding for alignment */
	kmalloc_size = urb->transfer_buffer_length +
		sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;

	kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
	if (!kmalloc_ptr)
		return -ENOMEM;

	/* Position our struct temp_buffer such that data is aligned */
	temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);


	temp->kmalloc_ptr = kmalloc_ptr;
	temp->old_xfer_buffer = urb->transfer_buffer;
	if (dir == DMA_TO_DEVICE)
		memcpy(temp->data, urb->transfer_buffer,
		       urb->transfer_buffer_length);
	urb->transfer_buffer = temp->data;

	urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;

	return 0;
}

static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
				      gfp_t mem_flags)
{
	struct musb	*musb = hcd_to_musb(hcd);
	int ret;

	/*
	 * The DMA engine in RTL1.8 and above cannot handle
	 * DMA addresses that are not aligned to a 4 byte boundary.
	 * For such engine implemented (un)map_urb_for_dma hooks.
	 * Do not use these hooks for RTL<1.8
	 */
	if (musb->hwvers < MUSB_HWVERS_1800)
		return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);

	ret = musb_alloc_temp_buffer(urb, mem_flags);
	if (ret)
		return ret;

	ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
	if (ret)
		musb_free_temp_buffer(urb);

	return ret;
}

static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
	struct musb	*musb = hcd_to_musb(hcd);

	usb_hcd_unmap_urb_for_dma(hcd, urb);

	/* Do not use this hook for RTL<1.8 (see description above) */
	if (musb->hwvers < MUSB_HWVERS_1800)
		return;

	musb_free_temp_buffer(urb);
}
#endif /* !CONFIG_MUSB_PIO_ONLY */

2580
static const struct hc_driver musb_hc_driver = {
F
Felipe Balbi 已提交
2581 2582
	.description		= "musb-hcd",
	.product_desc		= "MUSB HDRC host driver",
2583
	.hcd_priv_size		= sizeof(struct musb *),
F
Felipe Balbi 已提交
2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598
	.flags			= HCD_USB2 | HCD_MEMORY,

	/* not using irq handler or reset hooks from usbcore, since
	 * those must be shared with peripheral code for OTG configs
	 */

	.start			= musb_h_start,
	.stop			= musb_h_stop,

	.get_frame_number	= musb_h_get_frame_number,

	.urb_enqueue		= musb_urb_enqueue,
	.urb_dequeue		= musb_urb_dequeue,
	.endpoint_disable	= musb_h_disable,

2599 2600 2601 2602 2603
#ifndef CONFIG_MUSB_PIO_ONLY
	.map_urb_for_dma	= musb_map_urb_for_dma,
	.unmap_urb_for_dma	= musb_unmap_urb_for_dma,
#endif

F
Felipe Balbi 已提交
2604 2605 2606 2607 2608 2609 2610
	.hub_status_data	= musb_hub_status_data,
	.hub_control		= musb_hub_control,
	.bus_suspend		= musb_bus_suspend,
	.bus_resume		= musb_bus_resume,
	/* .start_port_reset	= NULL, */
	/* .hub_irq_enable	= NULL, */
};
2611

2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
int musb_host_alloc(struct musb *musb)
{
	struct device	*dev = musb->controller;

	/* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
	musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
	if (!musb->hcd)
		return -EINVAL;

	*musb->hcd->hcd_priv = (unsigned long) musb;
	musb->hcd->self.uses_pio_for_control = 1;
	musb->hcd->uses_new_polling = 1;
	musb->hcd->has_tt = 1;

	return 0;
}

void musb_host_cleanup(struct musb *musb)
{
2631 2632
	if (musb->port_mode == MUSB_PORT_MODE_GADGET)
		return;
2633 2634 2635 2636 2637 2638 2639 2640 2641
	usb_remove_hcd(musb->hcd);
	musb->hcd = NULL;
}

void musb_host_free(struct musb *musb)
{
	usb_put_hcd(musb->hcd);
}

2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662
int musb_host_setup(struct musb *musb, int power_budget)
{
	int ret;
	struct usb_hcd *hcd = musb->hcd;

	MUSB_HST_MODE(musb);
	musb->xceiv->otg->default_a = 1;
	musb->xceiv->state = OTG_STATE_A_IDLE;

	otg_set_host(musb->xceiv->otg, &hcd->self);
	hcd->self.otg_port = 1;
	musb->xceiv->otg->host = &hcd->self;
	hcd->power_budget = 2 * (power_budget ? : 250);

	ret = usb_add_hcd(hcd, 0, 0);
	if (ret < 0)
		return ret;

	return 0;
}

2663 2664
void musb_host_resume_root_hub(struct musb *musb)
{
2665
	usb_hcd_resume_root_hub(musb->hcd);
2666 2667 2668 2669 2670
}

void musb_host_poke_root_hub(struct musb *musb)
{
	MUSB_HST_MODE(musb);
2671 2672
	if (musb->hcd->status_urb)
		usb_hcd_poll_rh_status(musb->hcd);
2673
	else
2674
		usb_hcd_resume_root_hub(musb->hcd);
2675
}