musb_host.c 65.5 KB
Newer Older
F
Felipe Balbi 已提交
1 2 3 4 5 6
/*
 * MUSB OTG driver host support
 *
 * Copyright 2005 Mentor Graphics Corporation
 * Copyright (C) 2005-2006 by Texas Instruments
 * Copyright (C) 2006-2007 Nokia Corporation
7
 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
F
Felipe Balbi 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 * 02110-1301 USA
 *
 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/list.h>
44
#include <linux/dma-mapping.h>
F
Felipe Balbi 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68

#include "musb_core.h"
#include "musb_host.h"


/* MUSB HOST status 22-mar-2006
 *
 * - There's still lots of partial code duplication for fault paths, so
 *   they aren't handled as consistently as they need to be.
 *
 * - PIO mostly behaved when last tested.
 *     + including ep0, with all usbtest cases 9, 10
 *     + usbtest 14 (ep0out) doesn't seem to run at all
 *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
 *       configurations, but otherwise double buffering passes basic tests.
 *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
 *
 * - DMA (CPPI) ... partially behaves, not currently recommended
 *     + about 1/15 the speed of typical EHCI implementations (PCI)
 *     + RX, all too often reqpkt seems to misbehave after tx
 *     + TX, no known issues (other than evident silicon issue)
 *
 * - DMA (Mentor/OMAP) ...has at least toggle update problems
 *
69 70
 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
 *   starvation ... nothing yet for TX, interrupt, or bulk.
F
Felipe Balbi 已提交
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
 *
 * - Not tested with HNP, but some SRP paths seem to behave.
 *
 * NOTE 24-August-2006:
 *
 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
 *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
 *   mostly works, except that with "usbnet" it's easy to trigger cases
 *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
 *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
 *   although ARP RX wins.  (That test was done with a full speed link.)
 */


/*
 * NOTE on endpoint usage:
 *
 * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
 * (Yes, bulk _could_ use more of the endpoints than that, and would even
91
 * benefit from it.)
F
Felipe Balbi 已提交
92 93 94 95 96 97 98 99 100
 *
 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
 * So far that scheduling is both dumb and optimistic:  the endpoint will be
 * "claimed" until its software queue is no longer refilled.  No multiplexing
 * of transfers between endpoints, or anything clever.
 */


static void musb_ep_program(struct musb *musb, u8 epnum,
101 102
			struct urb *urb, int is_out,
			u8 *buf, u32 offset, u32 len);
F
Felipe Balbi 已提交
103 104 105 106

/*
 * Clear TX fifo. Needed to avoid BABBLE errors.
 */
D
David Brownell 已提交
107
static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
F
Felipe Balbi 已提交
108
{
109
	struct musb	*musb = ep->musb;
F
Felipe Balbi 已提交
110 111
	void __iomem	*epio = ep->regs;
	u16		csr;
112
	u16		lastcsr = 0;
F
Felipe Balbi 已提交
113 114 115 116
	int		retries = 1000;

	csr = musb_readw(epio, MUSB_TXCSR);
	while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
117
		if (csr != lastcsr)
118
			dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
119
		lastcsr = csr;
F
Felipe Balbi 已提交
120 121 122
		csr |= MUSB_TXCSR_FLUSHFIFO;
		musb_writew(epio, MUSB_TXCSR, csr);
		csr = musb_readw(epio, MUSB_TXCSR);
123 124 125
		if (WARN(retries-- < 1,
				"Could not flush host TX%d fifo: csr: %04x\n",
				ep->epnum, csr))
F
Felipe Balbi 已提交
126 127 128 129 130
			return;
		mdelay(1);
	}
}

131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
{
	void __iomem	*epio = ep->regs;
	u16		csr;
	int		retries = 5;

	/* scrub any data left in the fifo */
	do {
		csr = musb_readw(epio, MUSB_TXCSR);
		if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
			break;
		musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
		csr = musb_readw(epio, MUSB_TXCSR);
		udelay(10);
	} while (--retries);

	WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
			ep->epnum, csr);

	/* and reset for the next transfer */
	musb_writew(epio, MUSB_TXCSR, 0);
}

F
Felipe Balbi 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
/*
 * Start transmit. Caller is responsible for locking shared resources.
 * musb must be locked.
 */
static inline void musb_h_tx_start(struct musb_hw_ep *ep)
{
	u16	txcsr;

	/* NOTE: no locks here; caller should lock and select EP */
	if (ep->epnum) {
		txcsr = musb_readw(ep->regs, MUSB_TXCSR);
		txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
		musb_writew(ep->regs, MUSB_TXCSR, txcsr);
	} else {
		txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
		musb_writew(ep->regs, MUSB_CSR0, txcsr);
	}

}

174
static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
F
Felipe Balbi 已提交
175 176 177 178 179 180
{
	u16	txcsr;

	/* NOTE: no locks here; caller should lock and select EP */
	txcsr = musb_readw(ep->regs, MUSB_TXCSR);
	txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
181 182
	if (is_cppi_enabled())
		txcsr |= MUSB_TXCSR_DMAMODE;
F
Felipe Balbi 已提交
183 184 185
	musb_writew(ep->regs, MUSB_TXCSR, txcsr);
}

186 187 188 189 190 191 192 193 194 195 196 197 198
static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
{
	if (is_in != 0 || ep->is_shared_fifo)
		ep->in_qh  = qh;
	if (is_in == 0 || ep->is_shared_fifo)
		ep->out_qh = qh;
}

static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
{
	return is_in ? ep->in_qh : ep->out_qh;
}

F
Felipe Balbi 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211
/*
 * Start the URB at the front of an endpoint's queue
 * end must be claimed from the caller.
 *
 * Context: controller locked, irqs blocked
 */
static void
musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
{
	u16			frame;
	u32			len;
	void __iomem		*mbase =  musb->mregs;
	struct urb		*urb = next_urb(qh);
212 213
	void			*buf = urb->transfer_buffer;
	u32			offset = 0;
F
Felipe Balbi 已提交
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
	struct musb_hw_ep	*hw_ep = qh->hw_ep;
	unsigned		pipe = urb->pipe;
	u8			address = usb_pipedevice(pipe);
	int			epnum = hw_ep->epnum;

	/* initialize software qh state */
	qh->offset = 0;
	qh->segsize = 0;

	/* gather right source of data */
	switch (qh->type) {
	case USB_ENDPOINT_XFER_CONTROL:
		/* control transfers always start with SETUP */
		is_in = 0;
		musb->ep0_stage = MUSB_EP0_START;
		buf = urb->setup_packet;
		len = 8;
		break;
	case USB_ENDPOINT_XFER_ISOC:
		qh->iso_idx = 0;
		qh->frame = 0;
235
		offset = urb->iso_frame_desc[0].offset;
F
Felipe Balbi 已提交
236 237 238
		len = urb->iso_frame_desc[0].length;
		break;
	default:		/* bulk, interrupt */
239 240 241
		/* actual_length may be nonzero on retry paths */
		buf = urb->transfer_buffer + urb->actual_length;
		len = urb->transfer_buffer_length - urb->actual_length;
F
Felipe Balbi 已提交
242 243
	}

244
	dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
F
Felipe Balbi 已提交
245 246 247 248 249 250 251 252
			qh, urb, address, qh->epnum,
			is_in ? "in" : "out",
			({char *s; switch (qh->type) {
			case USB_ENDPOINT_XFER_CONTROL:	s = ""; break;
			case USB_ENDPOINT_XFER_BULK:	s = "-bulk"; break;
			case USB_ENDPOINT_XFER_ISOC:	s = "-iso"; break;
			default:			s = "-intr"; break;
			}; s; }),
253
			epnum, buf + offset, len);
F
Felipe Balbi 已提交
254 255

	/* Configure endpoint */
256
	musb_ep_set_qh(hw_ep, is_in, qh);
257
	musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
F
Felipe Balbi 已提交
258 259 260 261 262 263 264 265 266

	/* transmit may have more work: start it when it is time */
	if (is_in)
		return;

	/* determine if the time is right for a periodic transfer */
	switch (qh->type) {
	case USB_ENDPOINT_XFER_ISOC:
	case USB_ENDPOINT_XFER_INT:
267
		dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
F
Felipe Balbi 已提交
268 269 270 271 272 273 274 275 276 277 278 279 280 281
		frame = musb_readw(mbase, MUSB_FRAME);
		/* FIXME this doesn't implement that scheduling policy ...
		 * or handle framecounter wrapping
		 */
		if ((urb->transfer_flags & URB_ISO_ASAP)
				|| (frame >= urb->start_frame)) {
			/* REVISIT the SOF irq handler shouldn't duplicate
			 * this code; and we don't init urb->start_frame...
			 */
			qh->frame = 0;
			goto start;
		} else {
			qh->frame = urb->start_frame;
			/* enable SOF interrupt so we can count down */
282
			dev_dbg(musb->controller, "SOF for %d\n", epnum);
F
Felipe Balbi 已提交
283 284 285 286 287 288 289
#if 1 /* ifndef	CONFIG_ARCH_DAVINCI */
			musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
#endif
		}
		break;
	default:
start:
290
		dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
F
Felipe Balbi 已提交
291 292 293 294 295
			hw_ep->tx_channel ? "dma" : "pio");

		if (!hw_ep->tx_channel)
			musb_h_tx_start(hw_ep);
		else if (is_cppi_enabled() || tusb_dma_omap())
296
			musb_h_tx_dma_start(hw_ep);
F
Felipe Balbi 已提交
297 298 299
	}
}

300 301
/* Context: caller owns controller lock, IRQs are blocked */
static void musb_giveback(struct musb *musb, struct urb *urb, int status)
F
Felipe Balbi 已提交
302 303 304
__releases(musb->lock)
__acquires(musb->lock)
{
305
	dev_dbg(musb->controller,
306 307
			"complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
			urb, urb->complete, status,
F
Felipe Balbi 已提交
308 309 310 311 312 313
			usb_pipedevice(urb->pipe),
			usb_pipeendpoint(urb->pipe),
			usb_pipein(urb->pipe) ? "in" : "out",
			urb->actual_length, urb->transfer_buffer_length
			);

314
	usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
F
Felipe Balbi 已提交
315 316 317 318 319
	spin_unlock(&musb->lock);
	usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
	spin_lock(&musb->lock);
}

320 321 322
/* For bulk/interrupt endpoints only */
static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
				    struct urb *urb)
F
Felipe Balbi 已提交
323
{
324
	void __iomem		*epio = qh->hw_ep->regs;
F
Felipe Balbi 已提交
325 326
	u16			csr;

327 328
	/*
	 * FIXME: the current Mentor DMA code seems to have
F
Felipe Balbi 已提交
329 330 331
	 * problems getting toggle correct.
	 */

332 333
	if (is_in)
		csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
F
Felipe Balbi 已提交
334
	else
335
		csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
F
Felipe Balbi 已提交
336

337
	usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
F
Felipe Balbi 已提交
338 339
}

340 341 342 343 344 345 346 347 348
/*
 * Advance this hardware endpoint's queue, completing the specified URB and
 * advancing to either the next URB queued to that qh, or else invalidating
 * that qh and advancing to the next qh scheduled after the current one.
 *
 * Context: caller owns controller lock, IRQs are blocked
 */
static void musb_advance_schedule(struct musb *musb, struct urb *urb,
				  struct musb_hw_ep *hw_ep, int is_in)
F
Felipe Balbi 已提交
349
{
350
	struct musb_qh		*qh = musb_ep_get_qh(hw_ep, is_in);
F
Felipe Balbi 已提交
351 352
	struct musb_hw_ep	*ep = qh->hw_ep;
	int			ready = qh->is_ready;
353 354 355
	int			status;

	status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
F
Felipe Balbi 已提交
356 357 358 359 360

	/* save toggle eagerly, for paranoia */
	switch (qh->type) {
	case USB_ENDPOINT_XFER_BULK:
	case USB_ENDPOINT_XFER_INT:
361
		musb_save_toggle(qh, is_in, urb);
F
Felipe Balbi 已提交
362 363
		break;
	case USB_ENDPOINT_XFER_ISOC:
364
		if (status == 0 && urb->error_count)
F
Felipe Balbi 已提交
365 366 367 368 369
			status = -EXDEV;
		break;
	}

	qh->is_ready = 0;
370
	musb_giveback(musb, urb, status);
F
Felipe Balbi 已提交
371 372 373 374 375 376 377
	qh->is_ready = ready;

	/* reclaim resources (and bandwidth) ASAP; deschedule it, and
	 * invalidate qh as soon as list_empty(&hep->urb_list)
	 */
	if (list_empty(&qh->hep->urb_list)) {
		struct list_head	*head;
378
		struct dma_controller	*dma = musb->dma_controller;
F
Felipe Balbi 已提交
379

380
		if (is_in) {
F
Felipe Balbi 已提交
381
			ep->rx_reinit = 1;
382 383 384 385 386
			if (ep->rx_channel) {
				dma->channel_release(ep->rx_channel);
				ep->rx_channel = NULL;
			}
		} else {
F
Felipe Balbi 已提交
387
			ep->tx_reinit = 1;
388 389 390 391 392
			if (ep->tx_channel) {
				dma->channel_release(ep->tx_channel);
				ep->tx_channel = NULL;
			}
		}
F
Felipe Balbi 已提交
393

394 395
		/* Clobber old pointers to this qh */
		musb_ep_set_qh(ep, is_in, NULL);
F
Felipe Balbi 已提交
396 397 398 399
		qh->hep->hcpriv = NULL;

		switch (qh->type) {

400 401 402 403 404 405 406 407 408 409 410 411 412
		case USB_ENDPOINT_XFER_CONTROL:
		case USB_ENDPOINT_XFER_BULK:
			/* fifo policy for these lists, except that NAKing
			 * should rotate a qh to the end (for fairness).
			 */
			if (qh->mux == 1) {
				head = qh->ring.prev;
				list_del(&qh->ring);
				kfree(qh);
				qh = first_qh(head);
				break;
			}

F
Felipe Balbi 已提交
413 414 415 416 417 418 419 420 421 422 423 424
		case USB_ENDPOINT_XFER_ISOC:
		case USB_ENDPOINT_XFER_INT:
			/* this is where periodic bandwidth should be
			 * de-allocated if it's tracked and allocated;
			 * and where we'd update the schedule tree...
			 */
			kfree(qh);
			qh = NULL;
			break;
		}
	}

425
	if (qh != NULL && qh->is_ready) {
426
		dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
427
		    hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
F
Felipe Balbi 已提交
428 429 430 431
		musb_start_urb(musb, is_in, qh);
	}
}

D
David Brownell 已提交
432
static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
F
Felipe Balbi 已提交
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
{
	/* we don't want fifo to fill itself again;
	 * ignore dma (various models),
	 * leave toggle alone (may not have been saved yet)
	 */
	csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
	csr &= ~(MUSB_RXCSR_H_REQPKT
		| MUSB_RXCSR_H_AUTOREQ
		| MUSB_RXCSR_AUTOCLEAR);

	/* write 2x to allow double buffering */
	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);

	/* flush writebuffer */
	return musb_readw(hw_ep->regs, MUSB_RXCSR);
}

/*
 * PIO RX for a packet (or part of it).
 */
static bool
musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
{
	u16			rx_count;
	u8			*buf;
	u16			csr;
	bool			done = false;
	u32			length;
	int			do_flush = 0;
	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
	void __iomem		*epio = hw_ep->regs;
	struct musb_qh		*qh = hw_ep->in_qh;
	int			pipe = urb->pipe;
	void			*buffer = urb->transfer_buffer;

	/* musb_ep_select(mbase, epnum); */
	rx_count = musb_readw(epio, MUSB_RXCOUNT);
471
	dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
F
Felipe Balbi 已提交
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
			urb->transfer_buffer, qh->offset,
			urb->transfer_buffer_length);

	/* unload FIFO */
	if (usb_pipeisoc(pipe)) {
		int					status = 0;
		struct usb_iso_packet_descriptor	*d;

		if (iso_err) {
			status = -EILSEQ;
			urb->error_count++;
		}

		d = urb->iso_frame_desc + qh->iso_idx;
		buf = buffer + d->offset;
		length = d->length;
		if (rx_count > length) {
			if (status == 0) {
				status = -EOVERFLOW;
				urb->error_count++;
			}
493
			dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
F
Felipe Balbi 已提交
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
			do_flush = 1;
		} else
			length = rx_count;
		urb->actual_length += length;
		d->actual_length = length;

		d->status = status;

		/* see if we are done */
		done = (++qh->iso_idx >= urb->number_of_packets);
	} else {
		/* non-isoch */
		buf = buffer + qh->offset;
		length = urb->transfer_buffer_length - qh->offset;
		if (rx_count > length) {
			if (urb->status == -EINPROGRESS)
				urb->status = -EOVERFLOW;
511
			dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
F
Felipe Balbi 已提交
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
			do_flush = 1;
		} else
			length = rx_count;
		urb->actual_length += length;
		qh->offset += length;

		/* see if we are done */
		done = (urb->actual_length == urb->transfer_buffer_length)
			|| (rx_count < qh->maxpacket)
			|| (urb->status != -EINPROGRESS);
		if (done
				&& (urb->status == -EINPROGRESS)
				&& (urb->transfer_flags & URB_SHORT_NOT_OK)
				&& (urb->actual_length
					< urb->transfer_buffer_length))
			urb->status = -EREMOTEIO;
	}

	musb_read_fifo(hw_ep, length, buf);

	csr = musb_readw(epio, MUSB_RXCSR);
	csr |= MUSB_RXCSR_H_WZC_BITS;
	if (unlikely(do_flush))
		musb_h_flush_rxfifo(hw_ep, csr);
	else {
		/* REVISIT this assumes AUTOCLEAR is never set */
		csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
		if (!done)
			csr |= MUSB_RXCSR_H_REQPKT;
		musb_writew(epio, MUSB_RXCSR, csr);
	}

	return done;
}

/* we don't always need to reinit a given side of an endpoint...
 * when we do, use tx/rx reinit routine and then construct a new CSR
 * to address data toggle, NYET, and DMA or PIO.
 *
 * it's possible that driver bugs (especially for DMA) or aborting a
 * transfer might have left the endpoint busier than it should be.
 * the busy/not-empty tests are basically paranoia.
 */
static void
musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
{
	u16	csr;

	/* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
	 * That always uses tx_reinit since ep0 repurposes TX register
	 * offsets; the initial SETUP packet is also a kind of OUT.
	 */

	/* if programmed for Tx, put it in RX mode */
	if (ep->is_shared_fifo) {
		csr = musb_readw(ep->regs, MUSB_TXCSR);
		if (csr & MUSB_TXCSR_MODE) {
			musb_h_tx_flush_fifo(ep);
570
			csr = musb_readw(ep->regs, MUSB_TXCSR);
F
Felipe Balbi 已提交
571
			musb_writew(ep->regs, MUSB_TXCSR,
572
				    csr | MUSB_TXCSR_FRCDATATOG);
F
Felipe Balbi 已提交
573
		}
574 575 576 577 578 579 580

		/*
		 * Clear the MODE bit (and everything else) to enable Rx.
		 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
		 */
		if (csr & MUSB_TXCSR_DMAMODE)
			musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
F
Felipe Balbi 已提交
581 582 583 584 585 586 587 588 589 590 591 592 593 594
		musb_writew(ep->regs, MUSB_TXCSR, 0);

	/* scrub all previous state, clearing toggle */
	} else {
		csr = musb_readw(ep->regs, MUSB_RXCSR);
		if (csr & MUSB_RXCSR_RXPKTRDY)
			WARNING("rx%d, packet/%d ready?\n", ep->epnum,
				musb_readw(ep->regs, MUSB_RXCOUNT));

		musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
	}

	/* target addr and (for multipoint) hub addr/port */
	if (musb->is_multipoint) {
595 596 597 598
		musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
		musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
		musb_write_rxhubport(ep->target_regs, qh->h_port_reg);

F
Felipe Balbi 已提交
599 600 601 602 603 604 605
	} else
		musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);

	/* protocol/endpoint, interval/NAKlimit, i/o size */
	musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
	musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
	/* NOTE: bulk combining rewrites high bits of maxpacket */
606 607 608
	/* Set RXMAXP with the FIFO size of the endpoint
	 * to disable double buffer mode.
	 */
609
	if (musb->double_buffer_not_ok)
610 611 612 613
		musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
	else
		musb_writew(ep->regs, MUSB_RXMAXP,
				qh->maxpacket | ((qh->hb_mult - 1) << 11));
F
Felipe Balbi 已提交
614 615 616 617

	ep->rx_reinit = 0;
}

618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
static bool musb_tx_dma_program(struct dma_controller *dma,
		struct musb_hw_ep *hw_ep, struct musb_qh *qh,
		struct urb *urb, u32 offset, u32 length)
{
	struct dma_channel	*channel = hw_ep->tx_channel;
	void __iomem		*epio = hw_ep->regs;
	u16			pkt_size = qh->maxpacket;
	u16			csr;
	u8			mode;

#ifdef	CONFIG_USB_INVENTRA_DMA
	if (length > channel->max_len)
		length = channel->max_len;

	csr = musb_readw(epio, MUSB_TXCSR);
	if (length > pkt_size) {
		mode = 1;
635 636 637 638
		csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
		/* autoset shouldn't be set in high bandwidth */
		if (qh->hb_mult == 1)
			csr |= MUSB_TXCSR_AUTOSET;
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
	} else {
		mode = 0;
		csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
		csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
	}
	channel->desired_mode = mode;
	musb_writew(epio, MUSB_TXCSR, csr);
#else
	if (!is_cppi_enabled() && !tusb_dma_omap())
		return false;

	channel->actual_len = 0;

	/*
	 * TX uses "RNDIS" mode automatically but needs help
	 * to identify the zero-length-final-packet case.
	 */
	mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
#endif

	qh->segsize = length;

661 662 663 664 665 666
	/*
	 * Ensure the data reaches to main memory before starting
	 * DMA transfer
	 */
	wmb();

667 668 669 670 671 672 673 674 675 676 677 678
	if (!dma->channel_program(channel, pkt_size, mode,
			urb->transfer_dma + offset, length)) {
		dma->channel_release(channel);
		hw_ep->tx_channel = NULL;

		csr = musb_readw(epio, MUSB_TXCSR);
		csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
		musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
		return false;
	}
	return true;
}
F
Felipe Balbi 已提交
679 680 681 682 683 684

/*
 * Program an HDRC endpoint as per the given URB
 * Context: irqs blocked, controller lock held
 */
static void musb_ep_program(struct musb *musb, u8 epnum,
685 686
			struct urb *urb, int is_out,
			u8 *buf, u32 offset, u32 len)
F
Felipe Balbi 已提交
687 688 689 690 691 692 693
{
	struct dma_controller	*dma_controller;
	struct dma_channel	*dma_channel;
	u8			dma_ok;
	void __iomem		*mbase = musb->mregs;
	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
	void __iomem		*epio = hw_ep->regs;
694 695
	struct musb_qh		*qh = musb_ep_get_qh(hw_ep, !is_out);
	u16			packet_sz = qh->maxpacket;
696 697
	u8			use_dma = 1;
	u16			csr;
F
Felipe Balbi 已提交
698

699
	dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
F
Felipe Balbi 已提交
700 701 702 703 704 705 706 707 708
				"h_addr%02x h_port%02x bytes %d\n",
			is_out ? "-->" : "<--",
			epnum, urb, urb->dev->speed,
			qh->addr_reg, qh->epnum, is_out ? "out" : "in",
			qh->h_addr_reg, qh->h_port_reg,
			len);

	musb_ep_select(mbase, epnum);

709 710 711 712 713 714 715 716
	if (is_out && !len) {
		use_dma = 0;
		csr = musb_readw(epio, MUSB_TXCSR);
		csr &= ~MUSB_TXCSR_DMAENAB;
		musb_writew(epio, MUSB_TXCSR, csr);
		hw_ep->tx_channel = NULL;
	}

F
Felipe Balbi 已提交
717 718
	/* candidate for DMA? */
	dma_controller = musb->dma_controller;
719
	if (use_dma && is_dma_capable() && epnum && dma_controller) {
F
Felipe Balbi 已提交
720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
		dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
		if (!dma_channel) {
			dma_channel = dma_controller->channel_alloc(
					dma_controller, hw_ep, is_out);
			if (is_out)
				hw_ep->tx_channel = dma_channel;
			else
				hw_ep->rx_channel = dma_channel;
		}
	} else
		dma_channel = NULL;

	/* make sure we clear DMAEnab, autoSet bits from previous run */

	/* OUT/transmit/EP0 or IN/receive? */
	if (is_out) {
		u16	csr;
		u16	int_txe;
		u16	load_count;

		csr = musb_readw(epio, MUSB_TXCSR);

		/* disable interrupt in case we flush */
		int_txe = musb_readw(mbase, MUSB_INTRTXE);
		musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));

		/* general endpoint setup */
		if (epnum) {
			/* flush all old state, set default */
			musb_h_tx_flush_fifo(hw_ep);
750 751 752 753 754 755

			/*
			 * We must not clear the DMAMODE bit before or in
			 * the same cycle with the DMAENAB bit, so we clear
			 * the latter first...
			 */
F
Felipe Balbi 已提交
756
			csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
757 758
					| MUSB_TXCSR_AUTOSET
					| MUSB_TXCSR_DMAENAB
F
Felipe Balbi 已提交
759 760 761 762 763 764 765
					| MUSB_TXCSR_FRCDATATOG
					| MUSB_TXCSR_H_RXSTALL
					| MUSB_TXCSR_H_ERROR
					| MUSB_TXCSR_TXPKTRDY
					);
			csr |= MUSB_TXCSR_MODE;

766
			if (usb_gettoggle(urb->dev, qh->epnum, 1))
F
Felipe Balbi 已提交
767 768 769 770 771 772 773
				csr |= MUSB_TXCSR_H_WR_DATATOGGLE
					| MUSB_TXCSR_H_DATATOGGLE;
			else
				csr |= MUSB_TXCSR_CLRDATATOG;

			musb_writew(epio, MUSB_TXCSR, csr);
			/* REVISIT may need to clear FLUSHFIFO ... */
774
			csr &= ~MUSB_TXCSR_DMAMODE;
F
Felipe Balbi 已提交
775 776 777 778
			musb_writew(epio, MUSB_TXCSR, csr);
			csr = musb_readw(epio, MUSB_TXCSR);
		} else {
			/* endpoint 0: just flush */
779
			musb_h_ep0_flush_fifo(hw_ep);
F
Felipe Balbi 已提交
780 781 782 783
		}

		/* target addr and (for multipoint) hub addr/port */
		if (musb->is_multipoint) {
784 785 786
			musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
			musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
			musb_write_txhubport(mbase, epnum, qh->h_port_reg);
F
Felipe Balbi 已提交
787 788 789 790 791 792 793
/* FIXME if !epnum, do the same for RX ... */
		} else
			musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);

		/* protocol/endpoint/interval/NAKlimit */
		if (epnum) {
			musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
794
			if (musb->double_buffer_not_ok)
F
Felipe Balbi 已提交
795
				musb_writew(epio, MUSB_TXMAXP,
796
						hw_ep->max_packet_sz_tx);
797 798 799 800
			else if (can_bulk_split(musb, qh->type))
				musb_writew(epio, MUSB_TXMAXP, packet_sz
					| ((hw_ep->max_packet_sz_tx /
						packet_sz) - 1) << 11);
F
Felipe Balbi 已提交
801 802
			else
				musb_writew(epio, MUSB_TXMAXP,
803 804
						qh->maxpacket |
						((qh->hb_mult - 1) << 11));
F
Felipe Balbi 已提交
805 806 807 808 809 810 811 812 813 814 815 816 817 818
			musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
		} else {
			musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
			if (musb->is_multipoint)
				musb_writeb(epio, MUSB_TYPE0,
						qh->type_reg);
		}

		if (can_bulk_split(musb, qh->type))
			load_count = min((u32) hw_ep->max_packet_sz_tx,
						len);
		else
			load_count = min((u32) packet_sz, len);

819 820 821
		if (dma_channel && musb_tx_dma_program(dma_controller,
					hw_ep, qh, urb, offset, len))
			load_count = 0;
F
Felipe Balbi 已提交
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863

		if (load_count) {
			/* PIO to load FIFO */
			qh->segsize = load_count;
			musb_write_fifo(hw_ep, load_count, buf);
		}

		/* re-enable interrupt */
		musb_writew(mbase, MUSB_INTRTXE, int_txe);

	/* IN/receive */
	} else {
		u16	csr;

		if (hw_ep->rx_reinit) {
			musb_rx_reinit(musb, qh, hw_ep);

			/* init new state: toggle and NYET, maybe DMA later */
			if (usb_gettoggle(urb->dev, qh->epnum, 0))
				csr = MUSB_RXCSR_H_WR_DATATOGGLE
					| MUSB_RXCSR_H_DATATOGGLE;
			else
				csr = 0;
			if (qh->type == USB_ENDPOINT_XFER_INT)
				csr |= MUSB_RXCSR_DISNYET;

		} else {
			csr = musb_readw(hw_ep->regs, MUSB_RXCSR);

			if (csr & (MUSB_RXCSR_RXPKTRDY
					| MUSB_RXCSR_DMAENAB
					| MUSB_RXCSR_H_REQPKT))
				ERR("broken !rx_reinit, ep%d csr %04x\n",
						hw_ep->epnum, csr);

			/* scrub any stale state, leaving toggle alone */
			csr &= MUSB_RXCSR_DISNYET;
		}

		/* kick things off */

		if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
			/* Candidate for DMA */
			dma_channel->actual_len = 0L;
			qh->segsize = len;

			/* AUTOREQ is in a DMA register */
			musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
			csr = musb_readw(hw_ep->regs, MUSB_RXCSR);

			/*
			 * Unless caller treats short RX transfers as
			 * errors, we dare not queue multiple transfers.
			 */
			dma_ok = dma_controller->channel_program(dma_channel,
					packet_sz, !(urb->transfer_flags &
						     URB_SHORT_NOT_OK),
					urb->transfer_dma + offset,
					qh->segsize);
			if (!dma_ok) {
				dma_controller->channel_release(dma_channel);
				hw_ep->rx_channel = dma_channel = NULL;
			} else
				csr |= MUSB_RXCSR_DMAENAB;
F
Felipe Balbi 已提交
886 887 888
		}

		csr |= MUSB_RXCSR_H_REQPKT;
889
		dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
F
Felipe Balbi 已提交
890 891 892 893 894
		musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
		csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
	}
}

895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961
/* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
 * the end; avoids starvation for other endpoints.
 */
static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
	int is_in)
{
	struct dma_channel	*dma;
	struct urb		*urb;
	void __iomem		*mbase = musb->mregs;
	void __iomem		*epio = ep->regs;
	struct musb_qh		*cur_qh, *next_qh;
	u16			rx_csr, tx_csr;

	musb_ep_select(mbase, ep->epnum);
	if (is_in) {
		dma = is_dma_capable() ? ep->rx_channel : NULL;

		/* clear nak timeout bit */
		rx_csr = musb_readw(epio, MUSB_RXCSR);
		rx_csr |= MUSB_RXCSR_H_WZC_BITS;
		rx_csr &= ~MUSB_RXCSR_DATAERROR;
		musb_writew(epio, MUSB_RXCSR, rx_csr);

		cur_qh = first_qh(&musb->in_bulk);
	} else {
		dma = is_dma_capable() ? ep->tx_channel : NULL;

		/* clear nak timeout bit */
		tx_csr = musb_readw(epio, MUSB_TXCSR);
		tx_csr |= MUSB_TXCSR_H_WZC_BITS;
		tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
		musb_writew(epio, MUSB_TXCSR, tx_csr);

		cur_qh = first_qh(&musb->out_bulk);
	}
	if (cur_qh) {
		urb = next_urb(cur_qh);
		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
			musb->dma_controller->channel_abort(dma);
			urb->actual_length += dma->actual_len;
			dma->actual_len = 0L;
		}
		musb_save_toggle(cur_qh, is_in, urb);

		if (is_in) {
			/* move cur_qh to end of queue */
			list_move_tail(&cur_qh->ring, &musb->in_bulk);

			/* get the next qh from musb->in_bulk */
			next_qh = first_qh(&musb->in_bulk);

			/* set rx_reinit and schedule the next qh */
			ep->rx_reinit = 1;
		} else {
			/* move cur_qh to end of queue */
			list_move_tail(&cur_qh->ring, &musb->out_bulk);

			/* get the next qh from musb->out_bulk */
			next_qh = first_qh(&musb->out_bulk);

			/* set tx_reinit and schedule the next qh */
			ep->tx_reinit = 1;
		}
		musb_start_urb(musb, is_in, next_qh);
	}
}
F
Felipe Balbi 已提交
962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978

/*
 * Service the default endpoint (ep0) as host.
 * Return true until it's time to start the status stage.
 */
static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
{
	bool			 more = false;
	u8			*fifo_dest = NULL;
	u16			fifo_count = 0;
	struct musb_hw_ep	*hw_ep = musb->control_ep;
	struct musb_qh		*qh = hw_ep->in_qh;
	struct usb_ctrlrequest	*request;

	switch (musb->ep0_stage) {
	case MUSB_EP0_IN:
		fifo_dest = urb->transfer_buffer + urb->actual_length;
979 980
		fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
				   urb->actual_length);
F
Felipe Balbi 已提交
981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
		if (fifo_count < len)
			urb->status = -EOVERFLOW;

		musb_read_fifo(hw_ep, fifo_count, fifo_dest);

		urb->actual_length += fifo_count;
		if (len < qh->maxpacket) {
			/* always terminate on short read; it's
			 * rarely reported as an error.
			 */
		} else if (urb->actual_length <
				urb->transfer_buffer_length)
			more = true;
		break;
	case MUSB_EP0_START:
		request = (struct usb_ctrlrequest *) urb->setup_packet;

		if (!request->wLength) {
999
			dev_dbg(musb->controller, "start no-DATA\n");
F
Felipe Balbi 已提交
1000 1001
			break;
		} else if (request->bRequestType & USB_DIR_IN) {
1002
			dev_dbg(musb->controller, "start IN-DATA\n");
F
Felipe Balbi 已提交
1003 1004 1005 1006
			musb->ep0_stage = MUSB_EP0_IN;
			more = true;
			break;
		} else {
1007
			dev_dbg(musb->controller, "start OUT-DATA\n");
F
Felipe Balbi 已提交
1008 1009 1010 1011 1012
			musb->ep0_stage = MUSB_EP0_OUT;
			more = true;
		}
		/* FALLTHROUGH */
	case MUSB_EP0_OUT:
1013 1014 1015
		fifo_count = min_t(size_t, qh->maxpacket,
				   urb->transfer_buffer_length -
				   urb->actual_length);
F
Felipe Balbi 已提交
1016 1017 1018
		if (fifo_count) {
			fifo_dest = (u8 *) (urb->transfer_buffer
					+ urb->actual_length);
1019
			dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
1020 1021 1022
					fifo_count,
					(fifo_count == 1) ? "" : "s",
					fifo_dest);
F
Felipe Balbi 已提交
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
			musb_write_fifo(hw_ep, fifo_count, fifo_dest);

			urb->actual_length += fifo_count;
			more = true;
		}
		break;
	default:
		ERR("bogus ep0 stage %d\n", musb->ep0_stage);
		break;
	}

	return more;
}

/*
 * Handle default endpoint interrupt as host. Only called in IRQ time
D
David Brownell 已提交
1039
 * from musb_interrupt().
F
Felipe Balbi 已提交
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
 *
 * called with controller irqlocked
 */
irqreturn_t musb_h_ep0_irq(struct musb *musb)
{
	struct urb		*urb;
	u16			csr, len;
	int			status = 0;
	void __iomem		*mbase = musb->mregs;
	struct musb_hw_ep	*hw_ep = musb->control_ep;
	void __iomem		*epio = hw_ep->regs;
	struct musb_qh		*qh = hw_ep->in_qh;
	bool			complete = false;
	irqreturn_t		retval = IRQ_NONE;

	/* ep0 only has one queue, "in" */
	urb = next_urb(qh);

	musb_ep_select(mbase, 0);
	csr = musb_readw(epio, MUSB_CSR0);
	len = (csr & MUSB_CSR0_RXPKTRDY)
			? musb_readb(epio, MUSB_COUNT0)
			: 0;

1064
	dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
F
Felipe Balbi 已提交
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
		csr, qh, len, urb, musb->ep0_stage);

	/* if we just did status stage, we are done */
	if (MUSB_EP0_STATUS == musb->ep0_stage) {
		retval = IRQ_HANDLED;
		complete = true;
	}

	/* prepare status */
	if (csr & MUSB_CSR0_H_RXSTALL) {
1075
		dev_dbg(musb->controller, "STALLING ENDPOINT\n");
F
Felipe Balbi 已提交
1076 1077 1078
		status = -EPIPE;

	} else if (csr & MUSB_CSR0_H_ERROR) {
1079
		dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
F
Felipe Balbi 已提交
1080 1081 1082
		status = -EPROTO;

	} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1083
		dev_dbg(musb->controller, "control NAK timeout\n");
F
Felipe Balbi 已提交
1084 1085 1086

		/* NOTE:  this code path would be a good place to PAUSE a
		 * control transfer, if another one is queued, so that
1087 1088
		 * ep0 is more likely to stay busy.  That's already done
		 * for bulk RX transfers.
F
Felipe Balbi 已提交
1089 1090 1091 1092 1093 1094 1095 1096 1097
		 *
		 * if (qh->ring.next != &musb->control), then
		 * we have a candidate... NAKing is *NOT* an error
		 */
		musb_writew(epio, MUSB_CSR0, 0);
		retval = IRQ_HANDLED;
	}

	if (status) {
1098
		dev_dbg(musb->controller, "aborting\n");
F
Felipe Balbi 已提交
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
		retval = IRQ_HANDLED;
		if (urb)
			urb->status = status;
		complete = true;

		/* use the proper sequence to abort the transfer */
		if (csr & MUSB_CSR0_H_REQPKT) {
			csr &= ~MUSB_CSR0_H_REQPKT;
			musb_writew(epio, MUSB_CSR0, csr);
			csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
			musb_writew(epio, MUSB_CSR0, csr);
		} else {
1111
			musb_h_ep0_flush_fifo(hw_ep);
F
Felipe Balbi 已提交
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
		}

		musb_writeb(epio, MUSB_NAKLIMIT0, 0);

		/* clear it */
		musb_writew(epio, MUSB_CSR0, 0);
	}

	if (unlikely(!urb)) {
		/* stop endpoint since we have no place for its data, this
		 * SHOULD NEVER HAPPEN! */
		ERR("no URB for end 0\n");

1125
		musb_h_ep0_flush_fifo(hw_ep);
F
Felipe Balbi 已提交
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
		goto done;
	}

	if (!complete) {
		/* call common logic and prepare response */
		if (musb_h_ep0_continue(musb, len, urb)) {
			/* more packets required */
			csr = (MUSB_EP0_IN == musb->ep0_stage)
				?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
		} else {
			/* data transfer complete; perform status phase */
			if (usb_pipeout(urb->pipe)
					|| !urb->transfer_buffer_length)
				csr = MUSB_CSR0_H_STATUSPKT
					| MUSB_CSR0_H_REQPKT;
			else
				csr = MUSB_CSR0_H_STATUSPKT
					| MUSB_CSR0_TXPKTRDY;

			/* flag status stage */
			musb->ep0_stage = MUSB_EP0_STATUS;

1148
			dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
F
Felipe Balbi 已提交
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185

		}
		musb_writew(epio, MUSB_CSR0, csr);
		retval = IRQ_HANDLED;
	} else
		musb->ep0_stage = MUSB_EP0_IDLE;

	/* call completion handler if done */
	if (complete)
		musb_advance_schedule(musb, urb, hw_ep, 1);
done:
	return retval;
}


#ifdef CONFIG_USB_INVENTRA_DMA

/* Host side TX (OUT) using Mentor DMA works as follows:
	submit_urb ->
		- if queue was empty, Program Endpoint
		- ... which starts DMA to fifo in mode 1 or 0

	DMA Isr (transfer complete) -> TxAvail()
		- Stop DMA (~DmaEnab)	(<--- Alert ... currently happens
					only in musb_cleanup_urb)
		- TxPktRdy has to be set in mode 0 or for
			short packets in mode 1.
*/

#endif

/* Service a Tx-Available or dma completion irq for the endpoint */
void musb_host_tx(struct musb *musb, u8 epnum)
{
	int			pipe;
	bool			done = false;
	u16			tx_csr;
1186 1187
	size_t			length = 0;
	size_t			offset = 0;
F
Felipe Balbi 已提交
1188 1189
	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
	void __iomem		*epio = hw_ep->regs;
1190 1191
	struct musb_qh		*qh = hw_ep->out_qh;
	struct urb		*urb = next_urb(qh);
F
Felipe Balbi 已提交
1192 1193 1194
	u32			status = 0;
	void __iomem		*mbase = musb->mregs;
	struct dma_channel	*dma;
1195
	bool			transfer_pending = false;
F
Felipe Balbi 已提交
1196 1197 1198 1199 1200 1201

	musb_ep_select(mbase, epnum);
	tx_csr = musb_readw(epio, MUSB_TXCSR);

	/* with CPPI, DMA sometimes triggers "extra" irqs */
	if (!urb) {
1202
		dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1203
		return;
F
Felipe Balbi 已提交
1204 1205 1206 1207
	}

	pipe = urb->pipe;
	dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1208
	dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
F
Felipe Balbi 已提交
1209 1210 1211 1212 1213
			dma ? ", dma" : "");

	/* check for errors */
	if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
		/* dma was disabled, fifo flushed */
1214
		dev_dbg(musb->controller, "TX end %d stall\n", epnum);
F
Felipe Balbi 已提交
1215 1216 1217 1218 1219 1220

		/* stall; record URB status */
		status = -EPIPE;

	} else if (tx_csr & MUSB_TXCSR_H_ERROR) {
		/* (NON-ISO) dma was disabled, fifo flushed */
1221
		dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
F
Felipe Balbi 已提交
1222 1223 1224 1225

		status = -ETIMEDOUT;

	} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
		if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
				&& !list_is_singular(&musb->out_bulk)) {
			dev_dbg(musb->controller,
				"NAK timeout on TX%d ep\n", epnum);
			musb_bulk_nak_timeout(musb, hw_ep, 0);
		} else {
			dev_dbg(musb->controller,
				"TX end=%d device not responding\n", epnum);
			/* NOTE:  this code path would be a good place to PAUSE a
			 * transfer, if there's some other (nonperiodic) tx urb
			 * that could use this fifo.  (dma complicates it...)
			 * That's already done for bulk RX transfers.
			 *
			 * if (bulk && qh->ring.next != &musb->out_bulk), then
			 * we have a candidate... NAKing is *NOT* an error
			 */
			musb_ep_select(mbase, epnum);
			musb_writew(epio, MUSB_TXCSR,
					MUSB_TXCSR_H_WZC_BITS
					| MUSB_TXCSR_TXPKTRDY);
		}
			return;
F
Felipe Balbi 已提交
1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
	}

	if (status) {
		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
			(void) musb->dma_controller->channel_abort(dma);
		}

		/* do the proper sequence to abort the transfer in the
		 * usb core; the dma engine should already be stopped.
		 */
		musb_h_tx_flush_fifo(hw_ep);
		tx_csr &= ~(MUSB_TXCSR_AUTOSET
				| MUSB_TXCSR_DMAENAB
				| MUSB_TXCSR_H_ERROR
				| MUSB_TXCSR_H_RXSTALL
				| MUSB_TXCSR_H_NAKTIMEOUT
				);

		musb_ep_select(mbase, epnum);
		musb_writew(epio, MUSB_TXCSR, tx_csr);
		/* REVISIT may need to clear FLUSHFIFO ... */
		musb_writew(epio, MUSB_TXCSR, tx_csr);
		musb_writeb(epio, MUSB_TXINTERVAL, 0);

		done = true;
	}

	/* second cppi case */
	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1278
		dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1279
		return;
F
Felipe Balbi 已提交
1280 1281
	}

1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
	if (is_dma_capable() && dma && !status) {
		/*
		 * DMA has completed.  But if we're using DMA mode 1 (multi
		 * packet DMA), we need a terminal TXPKTRDY interrupt before
		 * we can consider this transfer completed, lest we trash
		 * its last packet when writing the next URB's data.  So we
		 * switch back to mode 0 to get that interrupt; we'll come
		 * back here once it happens.
		 */
		if (tx_csr & MUSB_TXCSR_DMAMODE) {
			/*
			 * We shouldn't clear DMAMODE with DMAENAB set; so
			 * clear them in a safe order.  That should be OK
			 * once TXPKTRDY has been set (and I've never seen
			 * it being 0 at this moment -- DMA interrupt latency
			 * is significant) but if it hasn't been then we have
			 * no choice but to stop being polite and ignore the
			 * programmer's guide... :-)
			 *
			 * Note that we must write TXCSR with TXPKTRDY cleared
			 * in order not to re-trigger the packet send (this bit
			 * can't be cleared by CPU), and there's another caveat:
			 * TXPKTRDY may be set shortly and then cleared in the
			 * double-buffered FIFO mode, so we do an extra TXCSR
			 * read for debouncing...
			 */
			tx_csr &= musb_readw(epio, MUSB_TXCSR);
			if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
				tx_csr &= ~(MUSB_TXCSR_DMAENAB |
					    MUSB_TXCSR_TXPKTRDY);
				musb_writew(epio, MUSB_TXCSR,
					    tx_csr | MUSB_TXCSR_H_WZC_BITS);
			}
			tx_csr &= ~(MUSB_TXCSR_DMAMODE |
				    MUSB_TXCSR_TXPKTRDY);
			musb_writew(epio, MUSB_TXCSR,
				    tx_csr | MUSB_TXCSR_H_WZC_BITS);

			/*
			 * There is no guarantee that we'll get an interrupt
			 * after clearing DMAMODE as we might have done this
			 * too late (after TXPKTRDY was cleared by controller).
			 * Re-read TXCSR as we have spoiled its previous value.
			 */
			tx_csr = musb_readw(epio, MUSB_TXCSR);
		}

		/*
		 * We may get here from a DMA completion or TXPKTRDY interrupt.
		 * In any case, we must check the FIFO status here and bail out
		 * only if the FIFO still has data -- that should prevent the
		 * "missed" TXPKTRDY interrupts and deal with double-buffered
		 * FIFO mode too...
		 */
		if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1337
			dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
1338 1339 1340 1341 1342
			    "CSR %04x\n", tx_csr);
			return;
		}
	}

F
Felipe Balbi 已提交
1343 1344
	if (!status || dma || usb_pipeisoc(pipe)) {
		if (dma)
1345
			length = dma->actual_len;
F
Felipe Balbi 已提交
1346
		else
1347 1348
			length = qh->segsize;
		qh->offset += length;
F
Felipe Balbi 已提交
1349 1350 1351 1352 1353

		if (usb_pipeisoc(pipe)) {
			struct usb_iso_packet_descriptor	*d;

			d = urb->iso_frame_desc + qh->iso_idx;
1354 1355
			d->actual_length = length;
			d->status = status;
F
Felipe Balbi 已提交
1356 1357 1358 1359
			if (++qh->iso_idx >= urb->number_of_packets) {
				done = true;
			} else {
				d++;
1360 1361
				offset = d->offset;
				length = d->length;
F
Felipe Balbi 已提交
1362
			}
1363
		} else if (dma && urb->transfer_buffer_length == qh->offset) {
F
Felipe Balbi 已提交
1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
			done = true;
		} else {
			/* see if we need to send more data, or ZLP */
			if (qh->segsize < qh->maxpacket)
				done = true;
			else if (qh->offset == urb->transfer_buffer_length
					&& !(urb->transfer_flags
						& URB_ZERO_PACKET))
				done = true;
			if (!done) {
1374 1375
				offset = qh->offset;
				length = urb->transfer_buffer_length - offset;
1376
				transfer_pending = true;
F
Felipe Balbi 已提交
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
			}
		}
	}

	/* urb->status != -EINPROGRESS means request has been faulted,
	 * so we must abort this transfer after cleanup
	 */
	if (urb->status != -EINPROGRESS) {
		done = true;
		if (status == 0)
			status = urb->status;
	}

	if (done) {
		/* set status */
		urb->status = status;
		urb->actual_length = qh->offset;
		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1395
		return;
1396
	} else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1397
		if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1398 1399 1400
				offset, length)) {
			if (is_cppi_enabled() || tusb_dma_omap())
				musb_h_tx_dma_start(hw_ep);
1401
			return;
1402
		}
1403
	} else	if (tx_csr & MUSB_TXCSR_DMAENAB) {
1404
		dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
1405 1406
		return;
	}
F
Felipe Balbi 已提交
1407

1408 1409 1410 1411 1412 1413 1414 1415 1416
	/*
	 * PIO: start next packet in this URB.
	 *
	 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
	 * (and presumably, FIFO is not half-full) we should write *two*
	 * packets before updating TXCSR; other docs disagree...
	 */
	if (length > qh->maxpacket)
		length = qh->maxpacket;
1417
	/* Unmap the buffer so that CPU can use it */
1418
	usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
1419 1420
	musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
	qh->segsize = length;
F
Felipe Balbi 已提交
1421

1422 1423 1424
	musb_ep_select(mbase, epnum);
	musb_writew(epio, MUSB_TXCSR,
			MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
F
Felipe Balbi 已提交
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
}


#ifdef CONFIG_USB_INVENTRA_DMA

/* Host side RX (IN) using Mentor DMA works as follows:
	submit_urb ->
		- if queue was empty, ProgramEndpoint
		- first IN token is sent out (by setting ReqPkt)
	LinuxIsr -> RxReady()
	/\	=> first packet is received
	|	- Set in mode 0 (DmaEnab, ~ReqPkt)
	|		-> DMA Isr (transfer complete) -> RxReady()
	|		    - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
	|		    - if urb not complete, send next IN token (ReqPkt)
	|			   |		else complete urb.
	|			   |
	---------------------------
 *
 * Nuances of mode 1:
 *	For short packets, no ack (+RxPktRdy) is sent automatically
 *	(even if AutoClear is ON)
 *	For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
 *	automatically => major problem, as collecting the next packet becomes
 *	difficult. Hence mode 1 is not used.
 *
 * REVISIT
 *	All we care about at this driver level is that
 *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
 *       (b) termination conditions are: short RX, or buffer full;
 *       (c) fault modes include
 *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
 *             (and that endpoint's dma queue stops immediately)
 *           - overflow (full, PLUS more bytes in the terminal packet)
 *
 *	So for example, usb-storage sets URB_SHORT_NOT_OK, and would
 *	thus be a great candidate for using mode 1 ... for all but the
 *	last packet of one URB's transfer.
 */

#endif

/*
 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
 * and high-bandwidth IN transfer cases.
 */
void musb_host_rx(struct musb *musb, u8 epnum)
{
	struct urb		*urb;
	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
	void __iomem		*epio = hw_ep->regs;
	struct musb_qh		*qh = hw_ep->in_qh;
	size_t			xfer_len;
	void __iomem		*mbase = musb->mregs;
	int			pipe;
	u16			rx_csr, val;
	bool			iso_err = false;
	bool			done = false;
	u32			status;
	struct dma_channel	*dma;

	musb_ep_select(mbase, epnum);

	urb = next_urb(qh);
	dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
	status = 0;
	xfer_len = 0;

	rx_csr = musb_readw(epio, MUSB_RXCSR);
	val = rx_csr;

	if (unlikely(!urb)) {
		/* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
		 * usbtest #11 (unlinks) triggers it regularly, sometimes
		 * with fifo full.  (Only with DMA??)
		 */
1501
		dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
F
Felipe Balbi 已提交
1502 1503 1504 1505 1506 1507 1508
			musb_readw(epio, MUSB_RXCOUNT));
		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
		return;
	}

	pipe = urb->pipe;

1509
	dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
F
Felipe Balbi 已提交
1510 1511 1512 1513 1514 1515
		epnum, rx_csr, urb->actual_length,
		dma ? dma->actual_len : 0);

	/* check for errors, concurrent stall & unlink is not really
	 * handled yet! */
	if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1516
		dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
F
Felipe Balbi 已提交
1517 1518 1519 1520 1521

		/* stall; record URB status */
		status = -EPIPE;

	} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1522
		dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
F
Felipe Balbi 已提交
1523 1524 1525 1526 1527 1528 1529

		status = -EPROTO;
		musb_writeb(epio, MUSB_RXINTERVAL, 0);

	} else if (rx_csr & MUSB_RXCSR_DATAERROR) {

		if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1530
			dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
1531 1532 1533 1534

			/* NOTE: NAKing is *NOT* an error, so we want to
			 * continue.  Except ... if there's a request for
			 * another QH, use that instead of starving it.
F
Felipe Balbi 已提交
1535
			 *
1536 1537 1538
			 * Devices like Ethernet and serial adapters keep
			 * reads posted at all times, which will starve
			 * other devices without this logic.
F
Felipe Balbi 已提交
1539
			 */
1540 1541 1542
			if (usb_pipebulk(urb->pipe)
					&& qh->mux == 1
					&& !list_is_singular(&musb->in_bulk)) {
1543
				musb_bulk_nak_timeout(musb, hw_ep, 1);
1544 1545
				return;
			}
F
Felipe Balbi 已提交
1546
			musb_ep_select(mbase, epnum);
1547 1548 1549
			rx_csr |= MUSB_RXCSR_H_WZC_BITS;
			rx_csr &= ~MUSB_RXCSR_DATAERROR;
			musb_writew(epio, MUSB_RXCSR, rx_csr);
F
Felipe Balbi 已提交
1550 1551 1552

			goto finish;
		} else {
1553
			dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
F
Felipe Balbi 已提交
1554 1555 1556
			/* packet error reported later */
			iso_err = true;
		}
1557
	} else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1558
		dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
1559 1560
				epnum);
		status = -EPROTO;
F
Felipe Balbi 已提交
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
	}

	/* faults abort the transfer */
	if (status) {
		/* clean up dma and collect transfer count */
		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
			(void) musb->dma_controller->channel_abort(dma);
			xfer_len = dma->actual_len;
		}
		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
		musb_writeb(epio, MUSB_RXINTERVAL, 0);
		done = true;
		goto finish;
	}

	if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
		/* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
		ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
		goto finish;
	}

	/* thorough shutdown for now ... given more precise fault handling
	 * and better queueing support, we might keep a DMA pipeline going
	 * while processing this irq for earlier completions.
	 */

	/* FIXME this is _way_ too much in-line logic for Mentor DMA */

#ifndef CONFIG_USB_INVENTRA_DMA
	if (rx_csr & MUSB_RXCSR_H_REQPKT)  {
		/* REVISIT this happened for a while on some short reads...
		 * the cleanup still needs investigation... looks bad...
		 * and also duplicates dma cleanup code above ... plus,
		 * shouldn't this be the "half full" double buffer case?
		 */
		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
			(void) musb->dma_controller->channel_abort(dma);
			xfer_len = dma->actual_len;
			done = true;
		}

1604
		dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
F
Felipe Balbi 已提交
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622
				xfer_len, dma ? ", dma" : "");
		rx_csr &= ~MUSB_RXCSR_H_REQPKT;

		musb_ep_select(mbase, epnum);
		musb_writew(epio, MUSB_RXCSR,
				MUSB_RXCSR_H_WZC_BITS | rx_csr);
	}
#endif
	if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
		xfer_len = dma->actual_len;

		val &= ~(MUSB_RXCSR_DMAENAB
			| MUSB_RXCSR_H_AUTOREQ
			| MUSB_RXCSR_AUTOCLEAR
			| MUSB_RXCSR_RXPKTRDY);
		musb_writew(hw_ep->regs, MUSB_RXCSR, val);

#ifdef CONFIG_USB_INVENTRA_DMA
1623 1624 1625 1626 1627 1628 1629 1630 1631
		if (usb_pipeisoc(pipe)) {
			struct usb_iso_packet_descriptor *d;

			d = urb->iso_frame_desc + qh->iso_idx;
			d->actual_length = xfer_len;

			/* even if there was an error, we did the dma
			 * for iso_frame_desc->length
			 */
1632
			if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1633 1634 1635 1636 1637 1638 1639 1640
				d->status = 0;

			if (++qh->iso_idx >= urb->number_of_packets)
				done = true;
			else
				done = false;

		} else  {
F
Felipe Balbi 已提交
1641 1642 1643 1644
		/* done if urb buffer is full or short packet is recd */
		done = (urb->actual_length + xfer_len >=
				urb->transfer_buffer_length
			|| dma->actual_len < qh->maxpacket);
1645
		}
F
Felipe Balbi 已提交
1646 1647 1648 1649 1650 1651 1652 1653

		/* send IN token for next packet, without AUTOREQ */
		if (!done) {
			val |= MUSB_RXCSR_H_REQPKT;
			musb_writew(epio, MUSB_RXCSR,
				MUSB_RXCSR_H_WZC_BITS | val);
		}

1654
		dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
F
Felipe Balbi 已提交
1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
			done ? "off" : "reset",
			musb_readw(epio, MUSB_RXCSR),
			musb_readw(epio, MUSB_RXCOUNT));
#else
		done = true;
#endif
	} else if (urb->status == -EINPROGRESS) {
		/* if no errors, be sure a packet is ready for unloading */
		if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
			status = -EPROTO;
			ERR("Rx interrupt with no errors or packet!\n");

			/* FIXME this is another "SHOULD NEVER HAPPEN" */

/* SCRUB (RX) */
			/* do the proper sequence to abort the transfer */
			musb_ep_select(mbase, epnum);
			val &= ~MUSB_RXCSR_H_REQPKT;
			musb_writew(epio, MUSB_RXCSR, val);
			goto finish;
		}

		/* we are expecting IN packets */
#ifdef CONFIG_USB_INVENTRA_DMA
		if (dma) {
			struct dma_controller	*c;
			u16			rx_count;
1682 1683
			int			ret, length;
			dma_addr_t		buf;
F
Felipe Balbi 已提交
1684 1685 1686

			rx_count = musb_readw(epio, MUSB_RXCOUNT);

1687
			dev_dbg(musb->controller, "RX%d count %d, buffer 0x%x len %d/%d\n",
F
Felipe Balbi 已提交
1688 1689 1690 1691 1692 1693 1694 1695
					epnum, rx_count,
					urb->transfer_dma
						+ urb->actual_length,
					qh->offset,
					urb->transfer_buffer_length);

			c = musb->dma_controller;

1696
			if (usb_pipeisoc(pipe)) {
1697
				int d_status = 0;
1698 1699 1700 1701 1702
				struct usb_iso_packet_descriptor *d;

				d = urb->iso_frame_desc + qh->iso_idx;

				if (iso_err) {
1703
					d_status = -EILSEQ;
1704 1705 1706
					urb->error_count++;
				}
				if (rx_count > d->length) {
1707 1708
					if (d_status == 0) {
						d_status = -EOVERFLOW;
1709 1710
						urb->error_count++;
					}
1711
					dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\
1712 1713 1714 1715 1716
					    rx_count, d->length);

					length = d->length;
				} else
					length = rx_count;
1717
				d->status = d_status;
1718 1719 1720 1721 1722 1723 1724
				buf = urb->transfer_dma + d->offset;
			} else {
				length = rx_count;
				buf = urb->transfer_dma +
						urb->actual_length;
			}

F
Felipe Balbi 已提交
1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735
			dma->desired_mode = 0;
#ifdef USE_MODE1
			/* because of the issue below, mode 1 will
			 * only rarely behave with correct semantics.
			 */
			if ((urb->transfer_flags &
						URB_SHORT_NOT_OK)
				&& (urb->transfer_buffer_length -
						urb->actual_length)
					> qh->maxpacket)
				dma->desired_mode = 1;
1736 1737
			if (rx_count < hw_ep->max_packet_sz_rx) {
				length = rx_count;
1738
				dma->desired_mode = 0;
1739 1740 1741
			} else {
				length = urb->transfer_buffer_length;
			}
F
Felipe Balbi 已提交
1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767
#endif

/* Disadvantage of using mode 1:
 *	It's basically usable only for mass storage class; essentially all
 *	other protocols also terminate transfers on short packets.
 *
 * Details:
 *	An extra IN token is sent at the end of the transfer (due to AUTOREQ)
 *	If you try to use mode 1 for (transfer_buffer_length - 512), and try
 *	to use the extra IN token to grab the last packet using mode 0, then
 *	the problem is that you cannot be sure when the device will send the
 *	last packet and RxPktRdy set. Sometimes the packet is recd too soon
 *	such that it gets lost when RxCSR is re-set at the end of the mode 1
 *	transfer, while sometimes it is recd just a little late so that if you
 *	try to configure for mode 0 soon after the mode 1 transfer is
 *	completed, you will find rxcount 0. Okay, so you might think why not
 *	wait for an interrupt when the pkt is recd. Well, you won't get any!
 */

			val = musb_readw(epio, MUSB_RXCSR);
			val &= ~MUSB_RXCSR_H_REQPKT;

			if (dma->desired_mode == 0)
				val &= ~MUSB_RXCSR_H_AUTOREQ;
			else
				val |= MUSB_RXCSR_H_AUTOREQ;
1768 1769 1770 1771 1772
			val |= MUSB_RXCSR_DMAENAB;

			/* autoclear shouldn't be set in high bandwidth */
			if (qh->hb_mult == 1)
				val |= MUSB_RXCSR_AUTOCLEAR;
F
Felipe Balbi 已提交
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782

			musb_writew(epio, MUSB_RXCSR,
				MUSB_RXCSR_H_WZC_BITS | val);

			/* REVISIT if when actual_length != 0,
			 * transfer_buffer_length needs to be
			 * adjusted first...
			 */
			ret = c->channel_program(
				dma, qh->maxpacket,
1783
				dma->desired_mode, buf, length);
F
Felipe Balbi 已提交
1784 1785 1786 1787 1788

			if (!ret) {
				c->channel_release(dma);
				hw_ep->rx_channel = NULL;
				dma = NULL;
1789 1790 1791 1792 1793
				val = musb_readw(epio, MUSB_RXCSR);
				val &= ~(MUSB_RXCSR_DMAENAB
					| MUSB_RXCSR_H_AUTOREQ
					| MUSB_RXCSR_AUTOCLEAR);
				musb_writew(epio, MUSB_RXCSR, val);
F
Felipe Balbi 已提交
1794 1795 1796 1797 1798
			}
		}
#endif	/* Mentor DMA */

		if (!dma) {
1799
			/* Unmap the buffer so that CPU can use it */
1800
			usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
F
Felipe Balbi 已提交
1801 1802
			done = musb_host_packet_rx(musb, urb,
					epnum, iso_err);
1803
			dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
F
Felipe Balbi 已提交
1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
		}
	}

finish:
	urb->actual_length += xfer_len;
	qh->offset += xfer_len;
	if (done) {
		if (urb->status == -EINPROGRESS)
			urb->status = status;
		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
	}
}

/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
 * the software schedule associates multiple such nodes with a given
 * host side hardware endpoint + direction; scheduling may activate
 * that hardware endpoint.
 */
static int musb_schedule(
	struct musb		*musb,
	struct musb_qh		*qh,
	int			is_in)
{
	int			idle;
	int			best_diff;
	int			best_end, epnum;
	struct musb_hw_ep	*hw_ep = NULL;
	struct list_head	*head = NULL;
1832 1833 1834
	u8			toggle;
	u8			txtype;
	struct urb		*urb = next_urb(qh);
F
Felipe Balbi 已提交
1835 1836

	/* use fixed hardware for control and bulk */
1837
	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
F
Felipe Balbi 已提交
1838 1839 1840 1841 1842 1843 1844
		head = &musb->control;
		hw_ep = musb->control_ep;
		goto success;
	}

	/* else, periodic transfers get muxed to other endpoints */

1845 1846
	/*
	 * We know this qh hasn't been scheduled, so all we need to do
F
Felipe Balbi 已提交
1847 1848 1849
	 * is choose which hardware endpoint to put it on ...
	 *
	 * REVISIT what we really want here is a regular schedule tree
1850
	 * like e.g. OHCI uses.
F
Felipe Balbi 已提交
1851 1852 1853 1854
	 */
	best_diff = 4096;
	best_end = -1;

1855 1856 1857
	for (epnum = 1, hw_ep = musb->endpoints + 1;
			epnum < musb->nr_endpoints;
			epnum++, hw_ep++) {
F
Felipe Balbi 已提交
1858 1859
		int	diff;

1860
		if (musb_ep_get_qh(hw_ep, is_in) != NULL)
F
Felipe Balbi 已提交
1861
			continue;
1862

F
Felipe Balbi 已提交
1863 1864 1865 1866
		if (hw_ep == musb->bulk_ep)
			continue;

		if (is_in)
1867
			diff = hw_ep->max_packet_sz_rx;
F
Felipe Balbi 已提交
1868
		else
1869 1870
			diff = hw_ep->max_packet_sz_tx;
		diff -= (qh->maxpacket * qh->hb_mult);
F
Felipe Balbi 已提交
1871

1872
		if (diff >= 0 && best_diff > diff) {
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893

			/*
			 * Mentor controller has a bug in that if we schedule
			 * a BULK Tx transfer on an endpoint that had earlier
			 * handled ISOC then the BULK transfer has to start on
			 * a zero toggle.  If the BULK transfer starts on a 1
			 * toggle then this transfer will fail as the mentor
			 * controller starts the Bulk transfer on a 0 toggle
			 * irrespective of the programming of the toggle bits
			 * in the TXCSR register.  Check for this condition
			 * while allocating the EP for a Tx Bulk transfer.  If
			 * so skip this EP.
			 */
			hw_ep = musb->endpoints + epnum;
			toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
			txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
					>> 4) & 0x3;
			if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
				toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
				continue;

F
Felipe Balbi 已提交
1894 1895 1896 1897
			best_diff = diff;
			best_end = epnum;
		}
	}
1898
	/* use bulk reserved ep1 if no other ep is free */
F
Felipe Balbi 已提交
1899
	if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
1900 1901 1902 1903 1904
		hw_ep = musb->bulk_ep;
		if (is_in)
			head = &musb->in_bulk;
		else
			head = &musb->out_bulk;
1905

1906
		/* Enable bulk RX/TX NAK timeout scheme when bulk requests are
1907 1908 1909 1910 1911 1912
		 * multiplexed.  This scheme doen't work in high speed to full
		 * speed scenario as NAK interrupts are not coming from a
		 * full speed device connected to a high speed device.
		 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
		 * 4 (8 frame or 8ms) for FS device.
		 */
1913
		if (qh->dev)
1914 1915
			qh->intv_reg =
				(USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
1916 1917
		goto success;
	} else if (best_end < 0) {
F
Felipe Balbi 已提交
1918
		return -ENOSPC;
1919
	}
F
Felipe Balbi 已提交
1920 1921

	idle = 1;
1922
	qh->mux = 0;
F
Felipe Balbi 已提交
1923
	hw_ep = musb->endpoints + best_end;
1924
	dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
F
Felipe Balbi 已提交
1925
success:
1926 1927 1928 1929 1930
	if (head) {
		idle = list_empty(head);
		list_add_tail(&qh->ring, head);
		qh->mux = 1;
	}
F
Felipe Balbi 已提交
1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945
	qh->hw_ep = hw_ep;
	qh->hep->hcpriv = qh;
	if (idle)
		musb_start_urb(musb, is_in, qh);
	return 0;
}

static int musb_urb_enqueue(
	struct usb_hcd			*hcd,
	struct urb			*urb,
	gfp_t				mem_flags)
{
	unsigned long			flags;
	struct musb			*musb = hcd_to_musb(hcd);
	struct usb_host_endpoint	*hep = urb->ep;
1946
	struct musb_qh			*qh;
F
Felipe Balbi 已提交
1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957
	struct usb_endpoint_descriptor	*epd = &hep->desc;
	int				ret;
	unsigned			type_reg;
	unsigned			interval;

	/* host role must be active */
	if (!is_host_active(musb) || !musb->is_active)
		return -ENODEV;

	spin_lock_irqsave(&musb->lock, flags);
	ret = usb_hcd_link_urb_to_ep(hcd, urb);
1958 1959 1960
	qh = ret ? NULL : hep->hcpriv;
	if (qh)
		urb->hcpriv = qh;
F
Felipe Balbi 已提交
1961 1962 1963
	spin_unlock_irqrestore(&musb->lock, flags);

	/* DMA mapping was already done, if needed, and this urb is on
1964 1965
	 * hep->urb_list now ... so we're done, unless hep wasn't yet
	 * scheduled onto a live qh.
F
Felipe Balbi 已提交
1966 1967 1968 1969 1970
	 *
	 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
	 * disabled, testing for empty qh->ring and avoiding qh setup costs
	 * except for the first urb queued after a config change.
	 */
1971 1972
	if (qh || ret)
		return ret;
F
Felipe Balbi 已提交
1973 1974 1975 1976 1977 1978 1979 1980 1981

	/* Allocate and initialize qh, minimizing the work done each time
	 * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
	 *
	 * REVISIT consider a dedicated qh kmem_cache, so it's harder
	 * for bugs in other kernel code to break this driver...
	 */
	qh = kzalloc(sizeof *qh, mem_flags);
	if (!qh) {
1982
		spin_lock_irqsave(&musb->lock, flags);
F
Felipe Balbi 已提交
1983
		usb_hcd_unlink_urb_from_ep(hcd, urb);
1984
		spin_unlock_irqrestore(&musb->lock, flags);
F
Felipe Balbi 已提交
1985 1986 1987 1988 1989 1990 1991 1992
		return -ENOMEM;
	}

	qh->hep = hep;
	qh->dev = urb->dev;
	INIT_LIST_HEAD(&qh->ring);
	qh->is_ready = 1;

1993
	qh->maxpacket = usb_endpoint_maxp(epd);
1994
	qh->type = usb_endpoint_type(epd);
F
Felipe Balbi 已提交
1995

1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011
	/* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
	 * Some musb cores don't support high bandwidth ISO transfers; and
	 * we don't (yet!) support high bandwidth interrupt transfers.
	 */
	qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
	if (qh->hb_mult > 1) {
		int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);

		if (ok)
			ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
				|| (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
		if (!ok) {
			ret = -EMSGSIZE;
			goto done;
		}
		qh->maxpacket &= 0x7ff;
F
Felipe Balbi 已提交
2012 2013
	}

J
Julia Lawall 已提交
2014
	qh->epnum = usb_endpoint_num(epd);
F
Felipe Balbi 已提交
2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032

	/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
	qh->addr_reg = (u8) usb_pipedevice(urb->pipe);

	/* precompute rxtype/txtype/type0 register */
	type_reg = (qh->type << 4) | qh->epnum;
	switch (urb->dev->speed) {
	case USB_SPEED_LOW:
		type_reg |= 0xc0;
		break;
	case USB_SPEED_FULL:
		type_reg |= 0x80;
		break;
	default:
		type_reg |= 0x40;
	}
	qh->type_reg = type_reg;

2033
	/* Precompute RXINTERVAL/TXINTERVAL register */
F
Felipe Balbi 已提交
2034 2035
	switch (qh->type) {
	case USB_ENDPOINT_XFER_INT:
2036 2037 2038 2039 2040 2041 2042
		/*
		 * Full/low speeds use the  linear encoding,
		 * high speed uses the logarithmic encoding.
		 */
		if (urb->dev->speed <= USB_SPEED_FULL) {
			interval = max_t(u8, epd->bInterval, 1);
			break;
F
Felipe Balbi 已提交
2043 2044 2045
		}
		/* FALLTHROUGH */
	case USB_ENDPOINT_XFER_ISOC:
2046 2047
		/* ISO always uses logarithmic encoding */
		interval = min_t(u8, epd->bInterval, 16);
F
Felipe Balbi 已提交
2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
		break;
	default:
		/* REVISIT we actually want to use NAK limits, hinting to the
		 * transfer scheduling logic to try some other qh, e.g. try
		 * for 2 msec first:
		 *
		 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
		 *
		 * The downside of disabling this is that transfer scheduling
		 * gets VERY unfair for nonperiodic transfers; a misbehaving
2058 2059 2060
		 * peripheral could make that hurt.  That's perfectly normal
		 * for reads from network or serial adapters ... so we have
		 * partial NAKlimit support for bulk RX.
F
Felipe Balbi 已提交
2061
		 *
2062
		 * The upside of disabling it is simpler transfer scheduling.
F
Felipe Balbi 已提交
2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
		 */
		interval = 0;
	}
	qh->intv_reg = interval;

	/* precompute addressing for external hub/tt ports */
	if (musb->is_multipoint) {
		struct usb_device	*parent = urb->dev->parent;

		if (parent != hcd->self.root_hub) {
			qh->h_addr_reg = (u8) parent->devnum;

			/* set up tt info if needed */
			if (urb->dev->tt) {
				qh->h_port_reg = (u8) urb->dev->ttport;
2078 2079 2080 2081 2082
				if (urb->dev->tt->hub)
					qh->h_addr_reg =
						(u8) urb->dev->tt->hub->devnum;
				if (urb->dev->tt->multi)
					qh->h_addr_reg |= 0x80;
F
Felipe Balbi 已提交
2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096
			}
		}
	}

	/* invariant: hep->hcpriv is null OR the qh that's already scheduled.
	 * until we get real dma queues (with an entry for each urb/buffer),
	 * we only have work to do in the former case.
	 */
	spin_lock_irqsave(&musb->lock, flags);
	if (hep->hcpriv) {
		/* some concurrent activity submitted another urb to hep...
		 * odd, rare, error prone, but legal.
		 */
		kfree(qh);
D
Dan Carpenter 已提交
2097
		qh = NULL;
F
Felipe Balbi 已提交
2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112
		ret = 0;
	} else
		ret = musb_schedule(musb, qh,
				epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);

	if (ret == 0) {
		urb->hcpriv = qh;
		/* FIXME set urb->start_frame for iso/intr, it's tested in
		 * musb_start_urb(), but otherwise only konicawc cares ...
		 */
	}
	spin_unlock_irqrestore(&musb->lock, flags);

done:
	if (ret != 0) {
2113
		spin_lock_irqsave(&musb->lock, flags);
F
Felipe Balbi 已提交
2114
		usb_hcd_unlink_urb_from_ep(hcd, urb);
2115
		spin_unlock_irqrestore(&musb->lock, flags);
F
Felipe Balbi 已提交
2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126
		kfree(qh);
	}
	return ret;
}


/*
 * abort a transfer that's at the head of a hardware queue.
 * called with controller locked, irqs blocked
 * that hardware queue advances to the next transfer, unless prevented
 */
2127
static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
F
Felipe Balbi 已提交
2128 2129
{
	struct musb_hw_ep	*ep = qh->hw_ep;
2130
	struct musb		*musb = ep->musb;
F
Felipe Balbi 已提交
2131 2132 2133
	void __iomem		*epio = ep->regs;
	unsigned		hw_end = ep->epnum;
	void __iomem		*regs = ep->musb->mregs;
2134
	int			is_in = usb_pipein(urb->pipe);
F
Felipe Balbi 已提交
2135
	int			status = 0;
2136
	u16			csr;
F
Felipe Balbi 已提交
2137 2138 2139 2140 2141 2142 2143 2144 2145

	musb_ep_select(regs, hw_end);

	if (is_dma_capable()) {
		struct dma_channel	*dma;

		dma = is_in ? ep->rx_channel : ep->tx_channel;
		if (dma) {
			status = ep->musb->dma_controller->channel_abort(dma);
2146
			dev_dbg(musb->controller,
F
Felipe Balbi 已提交
2147 2148 2149 2150 2151 2152 2153 2154
				"abort %cX%d DMA for urb %p --> %d\n",
				is_in ? 'R' : 'T', ep->epnum,
				urb, status);
			urb->actual_length += dma->actual_len;
		}
	}

	/* turn off DMA requests, discard state, stop polling ... */
2155
	if (ep->epnum && is_in) {
F
Felipe Balbi 已提交
2156 2157 2158 2159 2160 2161 2162
		/* giveback saves bulk toggle */
		csr = musb_h_flush_rxfifo(ep, 0);

		/* REVISIT we still get an irq; should likely clear the
		 * endpoint's irq status here to avoid bogus irqs.
		 * clearing that status is platform-specific...
		 */
2163
	} else if (ep->epnum) {
F
Felipe Balbi 已提交
2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176
		musb_h_tx_flush_fifo(ep);
		csr = musb_readw(epio, MUSB_TXCSR);
		csr &= ~(MUSB_TXCSR_AUTOSET
			| MUSB_TXCSR_DMAENAB
			| MUSB_TXCSR_H_RXSTALL
			| MUSB_TXCSR_H_NAKTIMEOUT
			| MUSB_TXCSR_H_ERROR
			| MUSB_TXCSR_TXPKTRDY);
		musb_writew(epio, MUSB_TXCSR, csr);
		/* REVISIT may need to clear FLUSHFIFO ... */
		musb_writew(epio, MUSB_TXCSR, csr);
		/* flush cpu writebuffer */
		csr = musb_readw(epio, MUSB_TXCSR);
2177 2178
	} else  {
		musb_h_ep0_flush_fifo(ep);
F
Felipe Balbi 已提交
2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189
	}
	if (status == 0)
		musb_advance_schedule(ep->musb, urb, ep, is_in);
	return status;
}

static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
	struct musb		*musb = hcd_to_musb(hcd);
	struct musb_qh		*qh;
	unsigned long		flags;
2190
	int			is_in  = usb_pipein(urb->pipe);
F
Felipe Balbi 已提交
2191 2192
	int			ret;

2193
	dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
F
Felipe Balbi 已提交
2194 2195
			usb_pipedevice(urb->pipe),
			usb_pipeendpoint(urb->pipe),
2196
			is_in ? "in" : "out");
F
Felipe Balbi 已提交
2197 2198 2199 2200 2201 2202 2203 2204 2205 2206

	spin_lock_irqsave(&musb->lock, flags);
	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
	if (ret)
		goto done;

	qh = urb->hcpriv;
	if (!qh)
		goto done;

2207 2208
	/*
	 * Any URB not actively programmed into endpoint hardware can be
2209
	 * immediately given back; that's any URB not at the head of an
F
Felipe Balbi 已提交
2210
	 * endpoint queue, unless someday we get real DMA queues.  And even
2211
	 * if it's at the head, it might not be known to the hardware...
F
Felipe Balbi 已提交
2212
	 *
2213
	 * Otherwise abort current transfer, pending DMA, etc.; urb->status
F
Felipe Balbi 已提交
2214 2215
	 * has already been updated.  This is a synchronous abort; it'd be
	 * OK to hold off until after some IRQ, though.
2216 2217
	 *
	 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
F
Felipe Balbi 已提交
2218
	 */
2219 2220 2221
	if (!qh->is_ready
			|| urb->urb_list.prev != &qh->hep->urb_list
			|| musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
F
Felipe Balbi 已提交
2222 2223 2224
		int	ready = qh->is_ready;

		qh->is_ready = 0;
2225
		musb_giveback(musb, urb, 0);
F
Felipe Balbi 已提交
2226
		qh->is_ready = ready;
2227 2228 2229 2230 2231 2232 2233 2234 2235

		/* If nothing else (usually musb_giveback) is using it
		 * and its URB list has emptied, recycle this qh.
		 */
		if (ready && list_empty(&qh->hep->urb_list)) {
			qh->hep->hcpriv = NULL;
			list_del(&qh->ring);
			kfree(qh);
		}
F
Felipe Balbi 已提交
2236
	} else
2237
		ret = musb_cleanup_urb(urb, qh);
F
Felipe Balbi 已提交
2238 2239 2240 2241 2242 2243 2244 2245 2246
done:
	spin_unlock_irqrestore(&musb->lock, flags);
	return ret;
}

/* disable an endpoint */
static void
musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
{
2247
	u8			is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
F
Felipe Balbi 已提交
2248 2249
	unsigned long		flags;
	struct musb		*musb = hcd_to_musb(hcd);
2250 2251
	struct musb_qh		*qh;
	struct urb		*urb;
F
Felipe Balbi 已提交
2252 2253 2254

	spin_lock_irqsave(&musb->lock, flags);

2255 2256 2257 2258
	qh = hep->hcpriv;
	if (qh == NULL)
		goto exit;

2259
	/* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
F
Felipe Balbi 已提交
2260

2261
	/* Kick the first URB off the hardware, if needed */
F
Felipe Balbi 已提交
2262
	qh->is_ready = 0;
2263
	if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
F
Felipe Balbi 已提交
2264 2265 2266 2267 2268 2269 2270
		urb = next_urb(qh);

		/* make software (then hardware) stop ASAP */
		if (!urb->unlinked)
			urb->status = -ESHUTDOWN;

		/* cleanup */
2271
		musb_cleanup_urb(urb, qh);
F
Felipe Balbi 已提交
2272

2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286
		/* Then nuke all the others ... and advance the
		 * queue on hw_ep (e.g. bulk ring) when we're done.
		 */
		while (!list_empty(&hep->urb_list)) {
			urb = next_urb(qh);
			urb->status = -ESHUTDOWN;
			musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
		}
	} else {
		/* Just empty the queue; the hardware is busy with
		 * other transfers, and since !qh->is_ready nothing
		 * will activate any of these as it advances.
		 */
		while (!list_empty(&hep->urb_list))
2287
			musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
F
Felipe Balbi 已提交
2288

2289 2290 2291 2292 2293
		hep->hcpriv = NULL;
		list_del(&qh->ring);
		kfree(qh);
	}
exit:
F
Felipe Balbi 已提交
2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324
	spin_unlock_irqrestore(&musb->lock, flags);
}

static int musb_h_get_frame_number(struct usb_hcd *hcd)
{
	struct musb	*musb = hcd_to_musb(hcd);

	return musb_readw(musb->mregs, MUSB_FRAME);
}

static int musb_h_start(struct usb_hcd *hcd)
{
	struct musb	*musb = hcd_to_musb(hcd);

	/* NOTE: musb_start() is called when the hub driver turns
	 * on port power, or when (OTG) peripheral starts.
	 */
	hcd->state = HC_STATE_RUNNING;
	musb->port1_status = 0;
	return 0;
}

static void musb_h_stop(struct usb_hcd *hcd)
{
	musb_stop(hcd_to_musb(hcd));
	hcd->state = HC_STATE_HALT;
}

static int musb_bus_suspend(struct usb_hcd *hcd)
{
	struct musb	*musb = hcd_to_musb(hcd);
2325
	u8		devctl;
F
Felipe Balbi 已提交
2326

2327
	if (!is_host_active(musb))
F
Felipe Balbi 已提交
2328 2329
		return 0;

2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347
	switch (musb->xceiv->state) {
	case OTG_STATE_A_SUSPEND:
		return 0;
	case OTG_STATE_A_WAIT_VRISE:
		/* ID could be grounded even if there's no device
		 * on the other end of the cable.  NOTE that the
		 * A_WAIT_VRISE timers are messy with MUSB...
		 */
		devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
			musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
		break;
	default:
		break;
	}

	if (musb->is_active) {
		WARNING("trying to suspend as %s while active\n",
2348
				otg_state_string(musb->xceiv->state));
F
Felipe Balbi 已提交
2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385
		return -EBUSY;
	} else
		return 0;
}

static int musb_bus_resume(struct usb_hcd *hcd)
{
	/* resuming child port does the work */
	return 0;
}

const struct hc_driver musb_hc_driver = {
	.description		= "musb-hcd",
	.product_desc		= "MUSB HDRC host driver",
	.hcd_priv_size		= sizeof(struct musb),
	.flags			= HCD_USB2 | HCD_MEMORY,

	/* not using irq handler or reset hooks from usbcore, since
	 * those must be shared with peripheral code for OTG configs
	 */

	.start			= musb_h_start,
	.stop			= musb_h_stop,

	.get_frame_number	= musb_h_get_frame_number,

	.urb_enqueue		= musb_urb_enqueue,
	.urb_dequeue		= musb_urb_dequeue,
	.endpoint_disable	= musb_h_disable,

	.hub_status_data	= musb_hub_status_data,
	.hub_control		= musb_hub_control,
	.bus_suspend		= musb_bus_suspend,
	.bus_resume		= musb_bus_resume,
	/* .start_port_reset	= NULL, */
	/* .hub_irq_enable	= NULL, */
};