musb_gadget.c 58.1 KB
Newer Older
F
Felipe Balbi 已提交
1 2 3 4 5 6
/*
 * MUSB OTG driver peripheral support
 *
 * Copyright 2005 Mentor Graphics Corporation
 * Copyright (C) 2005-2006 by Texas Instruments
 * Copyright (C) 2006-2007 Nokia Corporation
7
 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
F
Felipe Balbi 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 * 02110-1301 USA
 *
 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 */

#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/moduleparam.h>
#include <linux/stat.h>
#include <linux/dma-mapping.h>
46
#include <linux/slab.h>
F
Felipe Balbi 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94

#include "musb_core.h"


/* MUSB PERIPHERAL status 3-mar-2006:
 *
 * - EP0 seems solid.  It passes both USBCV and usbtest control cases.
 *   Minor glitches:
 *
 *     + remote wakeup to Linux hosts work, but saw USBCV failures;
 *       in one test run (operator error?)
 *     + endpoint halt tests -- in both usbtest and usbcv -- seem
 *       to break when dma is enabled ... is something wrongly
 *       clearing SENDSTALL?
 *
 * - Mass storage behaved ok when last tested.  Network traffic patterns
 *   (with lots of short transfers etc) need retesting; they turn up the
 *   worst cases of the DMA, since short packets are typical but are not
 *   required.
 *
 * - TX/IN
 *     + both pio and dma behave in with network and g_zero tests
 *     + no cppi throughput issues other than no-hw-queueing
 *     + failed with FLAT_REG (DaVinci)
 *     + seems to behave with double buffering, PIO -and- CPPI
 *     + with gadgetfs + AIO, requests got lost?
 *
 * - RX/OUT
 *     + both pio and dma behave in with network and g_zero tests
 *     + dma is slow in typical case (short_not_ok is clear)
 *     + double buffering ok with PIO
 *     + double buffering *FAILS* with CPPI, wrong data bytes sometimes
 *     + request lossage observed with gadgetfs
 *
 * - ISO not tested ... might work, but only weakly isochronous
 *
 * - Gadget driver disabling of softconnect during bind() is ignored; so
 *   drivers can't hold off host requests until userspace is ready.
 *   (Workaround:  they can turn it off later.)
 *
 * - PORTABILITY (assumes PIO works):
 *     + DaVinci, basically works with cppi dma
 *     + OMAP 2430, ditto with mentor dma
 *     + TUSB 6010, platform-specific dma in the works
 */

/* ----------------------------------------------------------------------- */

95 96 97
#define is_buffer_mapped(req) (is_dma_capable() && \
					(req->map_state != UN_MAPPED))

98 99 100
/* Maps the buffer to dma  */

static inline void map_dma_buffer(struct musb_request *request,
101
			struct musb *musb, struct musb_ep *musb_ep)
102
{
103 104 105
	int compatible = true;
	struct dma_controller *dma = musb->dma_controller;

106 107 108 109 110
	request->map_state = UN_MAPPED;

	if (!is_dma_capable() || !musb_ep->dma)
		return;

111 112 113 114 115 116 117 118 119 120 121
	/* Check if DMA engine can handle this request.
	 * DMA code must reject the USB request explicitly.
	 * Default behaviour is to map the request.
	 */
	if (dma->is_compatible)
		compatible = dma->is_compatible(musb_ep->dma,
				musb_ep->packet_sz, request->request.buf,
				request->request.length);
	if (!compatible)
		return;

122 123 124 125 126 127 128 129
	if (request->request.dma == DMA_ADDR_INVALID) {
		request->request.dma = dma_map_single(
				musb->controller,
				request->request.buf,
				request->request.length,
				request->tx
					? DMA_TO_DEVICE
					: DMA_FROM_DEVICE);
130
		request->map_state = MUSB_MAPPED;
131 132 133 134 135 136 137
	} else {
		dma_sync_single_for_device(musb->controller,
			request->request.dma,
			request->request.length,
			request->tx
				? DMA_TO_DEVICE
				: DMA_FROM_DEVICE);
138
		request->map_state = PRE_MAPPED;
139 140 141 142 143 144 145
	}
}

/* Unmap the buffer from dma and maps it back to cpu */
static inline void unmap_dma_buffer(struct musb_request *request,
				struct musb *musb)
{
146 147 148
	if (!is_buffer_mapped(request))
		return;

149
	if (request->request.dma == DMA_ADDR_INVALID) {
150 151
		dev_vdbg(musb->controller,
				"not unmapping a never mapped buffer\n");
152 153
		return;
	}
154
	if (request->map_state == MUSB_MAPPED) {
155 156 157 158 159 160 161
		dma_unmap_single(musb->controller,
			request->request.dma,
			request->request.length,
			request->tx
				? DMA_TO_DEVICE
				: DMA_FROM_DEVICE);
		request->request.dma = DMA_ADDR_INVALID;
162
	} else { /* PRE_MAPPED */
163 164 165 166 167 168 169
		dma_sync_single_for_cpu(musb->controller,
			request->request.dma,
			request->request.length,
			request->tx
				? DMA_TO_DEVICE
				: DMA_FROM_DEVICE);
	}
170
	request->map_state = UN_MAPPED;
171 172
}

F
Felipe Balbi 已提交
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
/*
 * Immediately complete a request.
 *
 * @param request the request to complete
 * @param status the status to complete the request with
 * Context: controller locked, IRQs blocked.
 */
void musb_g_giveback(
	struct musb_ep		*ep,
	struct usb_request	*request,
	int			status)
__releases(ep->musb->lock)
__acquires(ep->musb->lock)
{
	struct musb_request	*req;
	struct musb		*musb;
	int			busy = ep->busy;

	req = to_musb_request(request);

193
	list_del(&req->list);
F
Felipe Balbi 已提交
194 195 196 197 198 199
	if (req->request.status == -EINPROGRESS)
		req->request.status = status;
	musb = req->musb;

	ep->busy = 1;
	spin_unlock(&musb->lock);
200
	unmap_dma_buffer(req, musb);
F
Felipe Balbi 已提交
201
	if (request->status == 0)
202
		dev_dbg(musb->controller, "%s done request %p,  %d/%d\n",
F
Felipe Balbi 已提交
203 204 205
				ep->end_point.name, request,
				req->request.actual, req->request.length);
	else
206
		dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
F
Felipe Balbi 已提交
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
				ep->end_point.name, request,
				req->request.actual, req->request.length,
				request->status);
	req->request.complete(&req->ep->end_point, &req->request);
	spin_lock(&musb->lock);
	ep->busy = busy;
}

/* ----------------------------------------------------------------------- */

/*
 * Abort requests queued to an endpoint using the status. Synchronous.
 * caller locked controller and blocked irqs, and selected this ep.
 */
static void nuke(struct musb_ep *ep, const int status)
{
223
	struct musb		*musb = ep->musb;
F
Felipe Balbi 已提交
224 225 226 227 228 229 230 231
	struct musb_request	*req = NULL;
	void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;

	ep->busy = 1;

	if (is_dma_capable() && ep->dma) {
		struct dma_controller	*c = ep->musb->dma_controller;
		int value;
232

F
Felipe Balbi 已提交
233
		if (ep->is_in) {
234 235 236 237 238
			/*
			 * The programming guide says that we must not clear
			 * the DMAMODE bit before DMAENAB, so we only
			 * clear it in the second write...
			 */
F
Felipe Balbi 已提交
239
			musb_writew(epio, MUSB_TXCSR,
240
				    MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
F
Felipe Balbi 已提交
241 242 243 244 245 246 247 248 249 250
			musb_writew(epio, MUSB_TXCSR,
					0 | MUSB_TXCSR_FLUSHFIFO);
		} else {
			musb_writew(epio, MUSB_RXCSR,
					0 | MUSB_RXCSR_FLUSHFIFO);
			musb_writew(epio, MUSB_RXCSR,
					0 | MUSB_RXCSR_FLUSHFIFO);
		}

		value = c->channel_abort(ep->dma);
251 252
		dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
				ep->name, value);
F
Felipe Balbi 已提交
253 254 255 256
		c->channel_release(ep->dma);
		ep->dma = NULL;
	}

257 258
	while (!list_empty(&ep->req_list)) {
		req = list_first_entry(&ep->req_list, struct musb_request, list);
F
Felipe Balbi 已提交
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
		musb_g_giveback(ep, &req->request, status);
	}
}

/* ----------------------------------------------------------------------- */

/* Data transfers - pure PIO, pure DMA, or mixed mode */

/*
 * This assumes the separate CPPI engine is responding to DMA requests
 * from the usb core ... sequenced a bit differently from mentor dma.
 */

static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
{
	if (can_bulk_split(musb, ep->type))
		return ep->hw_ep->max_packet_sz_tx;
	else
		return ep->packet_sz;
}


#ifdef CONFIG_USB_INVENTRA_DMA

/* Peripheral tx (IN) using Mentor DMA works as follows:
	Only mode 0 is used for transfers <= wPktSize,
	mode 1 is used for larger transfers,

	One of the following happens:
	- Host sends IN token which causes an endpoint interrupt
		-> TxAvail
			-> if DMA is currently busy, exit.
			-> if queue is non-empty, txstate().

	- Request is queued by the gadget driver.
		-> if queue was previously empty, txstate()

	txstate()
		-> start
		  /\	-> setup DMA
		  |     (data is transferred to the FIFO, then sent out when
		  |	IN token(s) are recd from Host.
		  |		-> DMA interrupt on completion
		  |		   calls TxAvail.
303
		  |		      -> stop DMA, ~DMAENAB,
F
Felipe Balbi 已提交
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
		  |		      -> set TxPktRdy for last short pkt or zlp
		  |		      -> Complete Request
		  |		      -> Continue next request (call txstate)
		  |___________________________________|

 * Non-Mentor DMA engines can of course work differently, such as by
 * upleveling from irq-per-packet to irq-per-buffer.
 */

#endif

/*
 * An endpoint is transmitting data. This can be called either from
 * the IRQ routine or from ep.queue() to kickstart a request on an
 * endpoint.
 *
 * Context: controller locked, IRQs blocked, endpoint selected
 */
static void txstate(struct musb *musb, struct musb_request *req)
{
	u8			epnum = req->epnum;
	struct musb_ep		*musb_ep;
	void __iomem		*epio = musb->endpoints[epnum].regs;
	struct usb_request	*request;
	u16			fifo_count = 0, csr;
	int			use_dma = 0;

	musb_ep = req->ep;

	/* we shouldn't get here while DMA is active ... but we do ... */
	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
335
		dev_dbg(musb->controller, "dma pending...\n");
F
Felipe Balbi 已提交
336 337 338 339 340 341 342 343 344 345 346
		return;
	}

	/* read TXCSR before */
	csr = musb_readw(epio, MUSB_TXCSR);

	request = &req->request;
	fifo_count = min(max_ep_writesize(musb, musb_ep),
			(int)(request->length - request->actual));

	if (csr & MUSB_TXCSR_TXPKTRDY) {
347
		dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
F
Felipe Balbi 已提交
348 349 350 351 352
				musb_ep->end_point.name, csr);
		return;
	}

	if (csr & MUSB_TXCSR_P_SENDSTALL) {
353
		dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
F
Felipe Balbi 已提交
354 355 356 357
				musb_ep->end_point.name, csr);
		return;
	}

358
	dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
F
Felipe Balbi 已提交
359 360 361 362
			epnum, musb_ep->packet_sz, fifo_count,
			csr);

#ifndef	CONFIG_MUSB_PIO_ONLY
363
	if (is_buffer_mapped(req)) {
F
Felipe Balbi 已提交
364
		struct dma_controller	*c = musb->dma_controller;
365 366 367 368 369
		size_t request_size;

		/* setup DMA, then program endpoint CSR */
		request_size = min_t(size_t, request->length - request->actual,
					musb_ep->dma->max_len);
F
Felipe Balbi 已提交
370 371 372 373 374

		use_dma = (request->dma != DMA_ADDR_INVALID);

		/* MUSB_TXCSR_P_ISO is still set correctly */

375
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
F
Felipe Balbi 已提交
376
		{
377
			if (request_size < musb_ep->packet_sz)
F
Felipe Balbi 已提交
378 379 380 381 382 383 384
				musb_ep->dma->desired_mode = 0;
			else
				musb_ep->dma->desired_mode = 1;

			use_dma = use_dma && c->channel_program(
					musb_ep->dma, musb_ep->packet_sz,
					musb_ep->dma->desired_mode,
C
Cliff Cai 已提交
385
					request->dma + request->actual, request_size);
F
Felipe Balbi 已提交
386 387
			if (use_dma) {
				if (musb_ep->dma->desired_mode == 0) {
388 389 390 391 392 393 394 395 396 397 398
					/*
					 * We must not clear the DMAMODE bit
					 * before the DMAENAB bit -- and the
					 * latter doesn't always get cleared
					 * before we get here...
					 */
					csr &= ~(MUSB_TXCSR_AUTOSET
						| MUSB_TXCSR_DMAENAB);
					musb_writew(epio, MUSB_TXCSR, csr
						| MUSB_TXCSR_P_WZC_BITS);
					csr &= ~MUSB_TXCSR_DMAMODE;
F
Felipe Balbi 已提交
399 400 401
					csr |= (MUSB_TXCSR_DMAENAB |
							MUSB_TXCSR_MODE);
					/* against programming guide */
402 403
				} else {
					csr |= (MUSB_TXCSR_DMAENAB
F
Felipe Balbi 已提交
404 405
							| MUSB_TXCSR_DMAMODE
							| MUSB_TXCSR_MODE);
406 407 408
					if (!musb_ep->hb_mult)
						csr |= MUSB_TXCSR_AUTOSET;
				}
F
Felipe Balbi 已提交
409
				csr &= ~MUSB_TXCSR_P_UNDERRUN;
410

F
Felipe Balbi 已提交
411 412 413 414 415 416
				musb_writew(epio, MUSB_TXCSR, csr);
			}
		}

#elif defined(CONFIG_USB_TI_CPPI_DMA)
		/* program endpoint CSR first, then setup DMA */
417
		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
418 419
		csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
		       MUSB_TXCSR_MODE;
F
Felipe Balbi 已提交
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
		musb_writew(epio, MUSB_TXCSR,
			(MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
				| csr);

		/* ensure writebuffer is empty */
		csr = musb_readw(epio, MUSB_TXCSR);

		/* NOTE host side sets DMAENAB later than this; both are
		 * OK since the transfer dma glue (between CPPI and Mentor
		 * fifos) just tells CPPI it could start.  Data only moves
		 * to the USB TX fifo when both fifos are ready.
		 */

		/* "mode" is irrelevant here; handle terminating ZLPs like
		 * PIO does, since the hardware RNDIS mode seems unreliable
		 * except for the last-packet-is-already-short case.
		 */
		use_dma = use_dma && c->channel_program(
				musb_ep->dma, musb_ep->packet_sz,
				0,
440 441
				request->dma + request->actual,
				request_size);
F
Felipe Balbi 已提交
442 443 444
		if (!use_dma) {
			c->channel_release(musb_ep->dma);
			musb_ep->dma = NULL;
445 446
			csr &= ~MUSB_TXCSR_DMAENAB;
			musb_writew(epio, MUSB_TXCSR, csr);
F
Felipe Balbi 已提交
447 448 449 450 451 452
			/* invariant: prequest->buf is non-null */
		}
#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
		use_dma = use_dma && c->channel_program(
				musb_ep->dma, musb_ep->packet_sz,
				request->zero,
453 454
				request->dma + request->actual,
				request_size);
F
Felipe Balbi 已提交
455 456 457 458 459
#endif
	}
#endif

	if (!use_dma) {
460 461 462 463
		/*
		 * Unmap the dma buffer back to cpu if dma channel
		 * programming fails
		 */
464
		unmap_dma_buffer(req, musb);
465

F
Felipe Balbi 已提交
466 467 468 469 470 471 472 473 474
		musb_write_fifo(musb_ep->hw_ep, fifo_count,
				(u8 *) (request->buf + request->actual));
		request->actual += fifo_count;
		csr |= MUSB_TXCSR_TXPKTRDY;
		csr &= ~MUSB_TXCSR_P_UNDERRUN;
		musb_writew(epio, MUSB_TXCSR, csr);
	}

	/* host may already have the data when this message shows... */
475
	dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
F
Felipe Balbi 已提交
476 477 478 479 480 481 482 483 484 485 486 487 488 489
			musb_ep->end_point.name, use_dma ? "dma" : "pio",
			request->actual, request->length,
			musb_readw(epio, MUSB_TXCSR),
			fifo_count,
			musb_readw(epio, MUSB_TXMAXP));
}

/*
 * FIFO state update (e.g. data ready).
 * Called from IRQ,  with controller locked.
 */
void musb_g_tx(struct musb *musb, u8 epnum)
{
	u16			csr;
490
	struct musb_request	*req;
F
Felipe Balbi 已提交
491 492 493 494 495 496 497
	struct usb_request	*request;
	u8 __iomem		*mbase = musb->mregs;
	struct musb_ep		*musb_ep = &musb->endpoints[epnum].ep_in;
	void __iomem		*epio = musb->endpoints[epnum].regs;
	struct dma_channel	*dma;

	musb_ep_select(mbase, epnum);
498 499
	req = next_request(musb_ep);
	request = &req->request;
F
Felipe Balbi 已提交
500 501

	csr = musb_readw(epio, MUSB_TXCSR);
502
	dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
F
Felipe Balbi 已提交
503 504

	dma = is_dma_capable() ? musb_ep->dma : NULL;
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521

	/*
	 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
	 * probably rates reporting as a host error.
	 */
	if (csr & MUSB_TXCSR_P_SENTSTALL) {
		csr |=	MUSB_TXCSR_P_WZC_BITS;
		csr &= ~MUSB_TXCSR_P_SENTSTALL;
		musb_writew(epio, MUSB_TXCSR, csr);
		return;
	}

	if (csr & MUSB_TXCSR_P_UNDERRUN) {
		/* We NAKed, no big deal... little reason to care. */
		csr |=	 MUSB_TXCSR_P_WZC_BITS;
		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
		musb_writew(epio, MUSB_TXCSR, csr);
522 523
		dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
				epnum, request);
524 525 526 527 528 529
	}

	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
		/*
		 * SHOULD NOT HAPPEN... has with CPPI though, after
		 * changing SENDSTALL (and other cases); harmless?
F
Felipe Balbi 已提交
530
		 */
531
		dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
532 533
		return;
	}
F
Felipe Balbi 已提交
534

535 536 537 538 539
	if (request) {
		u8	is_dma = 0;

		if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
			is_dma = 1;
F
Felipe Balbi 已提交
540
			csr |= MUSB_TXCSR_P_WZC_BITS;
541
			csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
542
				 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
F
Felipe Balbi 已提交
543
			musb_writew(epio, MUSB_TXCSR, csr);
544 545 546
			/* Ensure writebuffer is empty. */
			csr = musb_readw(epio, MUSB_TXCSR);
			request->actual += musb_ep->dma->actual_len;
547
			dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
548
				epnum, csr, musb_ep->dma->actual_len, request);
F
Felipe Balbi 已提交
549 550
		}

551 552 553 554 555 556 557
		/*
		 * First, maybe a terminating short packet. Some DMA
		 * engines might handle this by themselves.
		 */
		if ((request->zero && request->length
			&& (request->length % musb_ep->packet_sz == 0)
			&& (request->actual == request->length))
558
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
559 560 561
			|| (is_dma && (!dma->desired_mode ||
				(request->actual &
					(musb_ep->packet_sz - 1))))
F
Felipe Balbi 已提交
562
#endif
563 564 565 566 567 568 569
		) {
			/*
			 * On DMA completion, FIFO may not be
			 * available yet...
			 */
			if (csr & MUSB_TXCSR_TXPKTRDY)
				return;
F
Felipe Balbi 已提交
570

571
			dev_dbg(musb->controller, "sending zero pkt\n");
572 573 574 575 576 577 578
			musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
					| MUSB_TXCSR_TXPKTRDY);
			request->zero = 0;
		}

		if (request->actual == request->length) {
			musb_g_giveback(musb_ep, request, 0);
579 580
			req = musb_ep->desc ? next_request(musb_ep) : NULL;
			if (!req) {
581
				dev_dbg(musb->controller, "%s idle now\n",
582 583
					musb_ep->end_point.name);
				return;
584
			}
F
Felipe Balbi 已提交
585 586
		}

587
		txstate(musb, req);
588
	}
F
Felipe Balbi 已提交
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
}

/* ------------------------------------------------------------ */

#ifdef CONFIG_USB_INVENTRA_DMA

/* Peripheral rx (OUT) using Mentor DMA works as follows:
	- Only mode 0 is used.

	- Request is queued by the gadget class driver.
		-> if queue was previously empty, rxstate()

	- Host sends OUT token which causes an endpoint interrupt
	  /\      -> RxReady
	  |	      -> if request queued, call rxstate
	  |		/\	-> setup DMA
	  |		|	     -> DMA interrupt on completion
	  |		|		-> RxReady
	  |		|		      -> stop DMA
	  |		|		      -> ack the read
	  |		|		      -> if data recd = max expected
	  |		|				by the request, or host
	  |		|				sent a short packet,
	  |		|				complete the request,
	  |		|				and start the next one.
	  |		|_____________________________________|
	  |					 else just wait for the host
	  |					    to send the next OUT token.
	  |__________________________________________________|

 * Non-Mentor DMA engines can of course work differently.
 */

#endif

/*
 * Context: controller locked, IRQs blocked, endpoint selected
 */
static void rxstate(struct musb *musb, struct musb_request *req)
{
	const u8		epnum = req->epnum;
	struct usb_request	*request = &req->request;
631
	struct musb_ep		*musb_ep;
F
Felipe Balbi 已提交
632
	void __iomem		*epio = musb->endpoints[epnum].regs;
633
	unsigned		fifo_count = 0;
634
	u16			len;
635
	u16			csr = musb_readw(epio, MUSB_RXCSR);
636
	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
637
	u8			use_mode_1;
638 639 640 641 642 643 644

	if (hw_ep->is_shared_fifo)
		musb_ep = &hw_ep->ep_in;
	else
		musb_ep = &hw_ep->ep_out;

	len = musb_ep->packet_sz;
F
Felipe Balbi 已提交
645

646 647
	/* We shouldn't get here while DMA is active, but we do... */
	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
648
		dev_dbg(musb->controller, "DMA pending...\n");
649 650 651 652
		return;
	}

	if (csr & MUSB_RXCSR_P_SENDSTALL) {
653
		dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
654 655 656
		    musb_ep->end_point.name, csr);
		return;
	}
F
Felipe Balbi 已提交
657

658
	if (is_cppi_enabled() && is_buffer_mapped(req)) {
F
Felipe Balbi 已提交
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
		struct dma_controller	*c = musb->dma_controller;
		struct dma_channel	*channel = musb_ep->dma;

		/* NOTE:  CPPI won't actually stop advancing the DMA
		 * queue after short packet transfers, so this is almost
		 * always going to run as IRQ-per-packet DMA so that
		 * faults will be handled correctly.
		 */
		if (c->channel_program(channel,
				musb_ep->packet_sz,
				!request->short_not_ok,
				request->dma + request->actual,
				request->length - request->actual)) {

			/* make sure that if an rxpkt arrived after the irq,
			 * the cppi engine will be ready to take it as soon
			 * as DMA is enabled
			 */
			csr &= ~(MUSB_RXCSR_AUTOCLEAR
					| MUSB_RXCSR_DMAMODE);
			csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
			musb_writew(epio, MUSB_RXCSR, csr);
			return;
		}
	}

	if (csr & MUSB_RXCSR_RXPKTRDY) {
		len = musb_readw(epio, MUSB_RXCOUNT);
687 688 689 690 691 692 693 694 695 696 697 698

		/*
		 * Enable Mode 1 on RX transfers only when short_not_ok flag
		 * is set. Currently short_not_ok flag is set only from
		 * file_storage and f_mass_storage drivers
		 */

		if (request->short_not_ok && len == musb_ep->packet_sz)
			use_mode_1 = 1;
		else
			use_mode_1 = 0;

F
Felipe Balbi 已提交
699 700
		if (request->actual < request->length) {
#ifdef CONFIG_USB_INVENTRA_DMA
701
			if (is_buffer_mapped(req)) {
F
Felipe Balbi 已提交
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
				struct dma_controller	*c;
				struct dma_channel	*channel;
				int			use_dma = 0;

				c = musb->dma_controller;
				channel = musb_ep->dma;

	/* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
	 * mode 0 only. So we do not get endpoint interrupts due to DMA
	 * completion. We only get interrupts from DMA controller.
	 *
	 * We could operate in DMA mode 1 if we knew the size of the tranfer
	 * in advance. For mass storage class, request->length = what the host
	 * sends, so that'd work.  But for pretty much everything else,
	 * request->length is routinely more than what the host sends. For
	 * most these gadgets, end of is signified either by a short packet,
	 * or filling the last byte of the buffer.  (Sending extra data in
	 * that last pckate should trigger an overflow fault.)  But in mode 1,
720
	 * we don't get DMA completion interrupt for short packets.
F
Felipe Balbi 已提交
721 722 723 724 725 726 727 728 729
	 *
	 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
	 * to get endpoint interrupt on every DMA req, but that didn't seem
	 * to work reliably.
	 *
	 * REVISIT an updated g_file_storage can set req->short_not_ok, which
	 * then becomes usable as a runtime "use mode 1" hint...
	 */

730 731
				/* Experimental: Mode1 works with mass storage use cases */
				if (use_mode_1) {
732
					csr |= MUSB_RXCSR_AUTOCLEAR;
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
					musb_writew(epio, MUSB_RXCSR, csr);
					csr |= MUSB_RXCSR_DMAENAB;
					musb_writew(epio, MUSB_RXCSR, csr);

					/*
					 * this special sequence (enabling and then
					 * disabling MUSB_RXCSR_DMAMODE) is required
					 * to get DMAReq to activate
					 */
					musb_writew(epio, MUSB_RXCSR,
						csr | MUSB_RXCSR_DMAMODE);
					musb_writew(epio, MUSB_RXCSR, csr);

				} else {
					if (!musb_ep->hb_mult &&
						musb_ep->hw_ep->rx_double_buffered)
						csr |= MUSB_RXCSR_AUTOCLEAR;
					csr |= MUSB_RXCSR_DMAENAB;
					musb_writew(epio, MUSB_RXCSR, csr);
				}
F
Felipe Balbi 已提交
753 754 755

				if (request->actual < request->length) {
					int transfer_size = 0;
756 757 758
					if (use_mode_1) {
						transfer_size = min(request->length - request->actual,
								channel->max_len);
F
Felipe Balbi 已提交
759
						musb_ep->dma->desired_mode = 1;
760 761 762 763 764
					} else {
						transfer_size = min(request->length - request->actual,
								(unsigned)len);
						musb_ep->dma->desired_mode = 0;
					}
F
Felipe Balbi 已提交
765 766 767 768 769 770 771 772 773 774 775

					use_dma = c->channel_program(
							channel,
							musb_ep->packet_sz,
							channel->desired_mode,
							request->dma
							+ request->actual,
							transfer_size);
				}

				if (use_dma)
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
					return;
			}
#elif defined(CONFIG_USB_UX500_DMA)
			if ((is_buffer_mapped(req)) &&
				(request->actual < request->length)) {

				struct dma_controller *c;
				struct dma_channel *channel;
				int transfer_size = 0;

				c = musb->dma_controller;
				channel = musb_ep->dma;

				/* In case first packet is short */
				if (len < musb_ep->packet_sz)
					transfer_size = len;
				else if (request->short_not_ok)
					transfer_size =	min(request->length -
							request->actual,
							channel->max_len);
				else
					transfer_size = min(request->length -
							request->actual,
							(unsigned)len);

				csr &= ~MUSB_RXCSR_DMAMODE;
				csr |= (MUSB_RXCSR_DMAENAB |
					MUSB_RXCSR_AUTOCLEAR);

				musb_writew(epio, MUSB_RXCSR, csr);

				if (transfer_size <= musb_ep->packet_sz) {
					musb_ep->dma->desired_mode = 0;
				} else {
					musb_ep->dma->desired_mode = 1;
					/* Mode must be set after DMAENAB */
					csr |= MUSB_RXCSR_DMAMODE;
					musb_writew(epio, MUSB_RXCSR, csr);
				}

				if (c->channel_program(channel,
							musb_ep->packet_sz,
							channel->desired_mode,
							request->dma
							+ request->actual,
							transfer_size))

F
Felipe Balbi 已提交
823 824 825 826 827
					return;
			}
#endif	/* Mentor's DMA */

			fifo_count = request->length - request->actual;
828
			dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
F
Felipe Balbi 已提交
829 830 831 832
					musb_ep->end_point.name,
					len, fifo_count,
					musb_ep->packet_sz);

833
			fifo_count = min_t(unsigned, len, fifo_count);
F
Felipe Balbi 已提交
834 835

#ifdef	CONFIG_USB_TUSB_OMAP_DMA
836
			if (tusb_dma_omap() && is_buffer_mapped(req)) {
F
Felipe Balbi 已提交
837 838 839 840 841 842 843 844 845 846 847 848 849 850
				struct dma_controller *c = musb->dma_controller;
				struct dma_channel *channel = musb_ep->dma;
				u32 dma_addr = request->dma + request->actual;
				int ret;

				ret = c->channel_program(channel,
						musb_ep->packet_sz,
						channel->desired_mode,
						dma_addr,
						fifo_count);
				if (ret)
					return;
			}
#endif
851 852 853 854 855
			/*
			 * Unmap the dma buffer back to cpu if dma channel
			 * programming fails. This buffer is mapped if the
			 * channel allocation is successful
			 */
856
			 if (is_buffer_mapped(req)) {
857 858
				unmap_dma_buffer(req, musb);

859 860
				/*
				 * Clear DMAENAB and AUTOCLEAR for the
861 862
				 * PIO mode transfer
				 */
863
				csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
864 865
				musb_writew(epio, MUSB_RXCSR, csr);
			}
F
Felipe Balbi 已提交
866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892

			musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
					(request->buf + request->actual));
			request->actual += fifo_count;

			/* REVISIT if we left anything in the fifo, flush
			 * it and report -EOVERFLOW
			 */

			/* ack the read! */
			csr |= MUSB_RXCSR_P_WZC_BITS;
			csr &= ~MUSB_RXCSR_RXPKTRDY;
			musb_writew(epio, MUSB_RXCSR, csr);
		}
	}

	/* reach the end or short packet detected */
	if (request->actual == request->length || len < musb_ep->packet_sz)
		musb_g_giveback(musb_ep, request, 0);
}

/*
 * Data ready for a request; called from IRQ
 */
void musb_g_rx(struct musb *musb, u8 epnum)
{
	u16			csr;
893
	struct musb_request	*req;
F
Felipe Balbi 已提交
894 895
	struct usb_request	*request;
	void __iomem		*mbase = musb->mregs;
896
	struct musb_ep		*musb_ep;
F
Felipe Balbi 已提交
897 898
	void __iomem		*epio = musb->endpoints[epnum].regs;
	struct dma_channel	*dma;
899 900 901 902 903 904
	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];

	if (hw_ep->is_shared_fifo)
		musb_ep = &hw_ep->ep_in;
	else
		musb_ep = &hw_ep->ep_out;
F
Felipe Balbi 已提交
905 906 907

	musb_ep_select(mbase, epnum);

908 909
	req = next_request(musb_ep);
	if (!req)
910
		return;
F
Felipe Balbi 已提交
911

912 913
	request = &req->request;

F
Felipe Balbi 已提交
914 915 916
	csr = musb_readw(epio, MUSB_RXCSR);
	dma = is_dma_capable() ? musb_ep->dma : NULL;

917
	dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
F
Felipe Balbi 已提交
918 919 920 921 922 923
			csr, dma ? " (dma)" : "", request);

	if (csr & MUSB_RXCSR_P_SENTSTALL) {
		csr |= MUSB_RXCSR_P_WZC_BITS;
		csr &= ~MUSB_RXCSR_P_SENTSTALL;
		musb_writew(epio, MUSB_RXCSR, csr);
924
		return;
F
Felipe Balbi 已提交
925 926 927 928 929 930 931
	}

	if (csr & MUSB_RXCSR_P_OVERRUN) {
		/* csr |= MUSB_RXCSR_P_WZC_BITS; */
		csr &= ~MUSB_RXCSR_P_OVERRUN;
		musb_writew(epio, MUSB_RXCSR, csr);

932
		dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
933
		if (request->status == -EINPROGRESS)
F
Felipe Balbi 已提交
934 935 936 937
			request->status = -EOVERFLOW;
	}
	if (csr & MUSB_RXCSR_INCOMPRX) {
		/* REVISIT not necessarily an error */
938
		dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
F
Felipe Balbi 已提交
939 940 941 942
	}

	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
		/* "should not happen"; likely RXPKTRDY pending for DMA */
943
		dev_dbg(musb->controller, "%s busy, csr %04x\n",
F
Felipe Balbi 已提交
944
			musb_ep->end_point.name, csr);
945
		return;
F
Felipe Balbi 已提交
946 947 948 949 950 951 952 953 954 955 956
	}

	if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
		csr &= ~(MUSB_RXCSR_AUTOCLEAR
				| MUSB_RXCSR_DMAENAB
				| MUSB_RXCSR_DMAMODE);
		musb_writew(epio, MUSB_RXCSR,
			MUSB_RXCSR_P_WZC_BITS | csr);

		request->actual += musb_ep->dma->actual_len;

957
		dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
F
Felipe Balbi 已提交
958 959 960 961
			epnum, csr,
			musb_readw(epio, MUSB_RXCSR),
			musb_ep->dma->actual_len, request);

962 963
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
	defined(CONFIG_USB_UX500_DMA)
F
Felipe Balbi 已提交
964
		/* Autoclear doesn't clear RxPktRdy for short packets */
965
		if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
F
Felipe Balbi 已提交
966 967 968 969 970 971 972 973 974 975
				|| (dma->actual_len
					& (musb_ep->packet_sz - 1))) {
			/* ack the read! */
			csr &= ~MUSB_RXCSR_RXPKTRDY;
			musb_writew(epio, MUSB_RXCSR, csr);
		}

		/* incomplete, and not short? wait for next IN packet */
		if ((request->actual < request->length)
				&& (musb_ep->dma->actual_len
976 977 978 979 980 981 982 983
					== musb_ep->packet_sz)) {
			/* In double buffer case, continue to unload fifo if
 			 * there is Rx packet in FIFO.
 			 **/
			csr = musb_readw(epio, MUSB_RXCSR);
			if ((csr & MUSB_RXCSR_RXPKTRDY) &&
				hw_ep->rx_double_buffered)
				goto exit;
984
			return;
985
		}
F
Felipe Balbi 已提交
986 987 988
#endif
		musb_g_giveback(musb_ep, request, 0);

989 990
		req = next_request(musb_ep);
		if (!req)
991
			return;
F
Felipe Balbi 已提交
992
	}
993 994
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
	defined(CONFIG_USB_UX500_DMA)
995
exit:
996
#endif
997
	/* Analyze request */
998
	rxstate(musb, req);
F
Felipe Balbi 已提交
999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
}

/* ------------------------------------------------------------ */

static int musb_gadget_enable(struct usb_ep *ep,
			const struct usb_endpoint_descriptor *desc)
{
	unsigned long		flags;
	struct musb_ep		*musb_ep;
	struct musb_hw_ep	*hw_ep;
	void __iomem		*regs;
	struct musb		*musb;
	void __iomem	*mbase;
	u8		epnum;
	u16		csr;
	unsigned	tmp;
	int		status = -EINVAL;

	if (!ep || !desc)
		return -EINVAL;

	musb_ep = to_musb_ep(ep);
	hw_ep = musb_ep->hw_ep;
	regs = hw_ep->regs;
	musb = musb_ep->musb;
	mbase = musb->mregs;
	epnum = musb_ep->current_epnum;

	spin_lock_irqsave(&musb->lock, flags);

	if (musb_ep->desc) {
		status = -EBUSY;
		goto fail;
	}
J
Julia Lawall 已提交
1033
	musb_ep->type = usb_endpoint_type(desc);
F
Felipe Balbi 已提交
1034 1035

	/* check direction and (later) maxpacket size against endpoint */
J
Julia Lawall 已提交
1036
	if (usb_endpoint_num(desc) != epnum)
F
Felipe Balbi 已提交
1037 1038 1039
		goto fail;

	/* REVISIT this rules out high bandwidth periodic transfers */
1040
	tmp = usb_endpoint_maxp(desc);
1041 1042 1043 1044 1045 1046 1047 1048 1049
	if (tmp & ~0x07ff) {
		int ok;

		if (usb_endpoint_dir_in(desc))
			ok = musb->hb_iso_tx;
		else
			ok = musb->hb_iso_rx;

		if (!ok) {
1050
			dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
1051 1052 1053 1054 1055 1056 1057 1058 1059
			goto fail;
		}
		musb_ep->hb_mult = (tmp >> 11) & 3;
	} else {
		musb_ep->hb_mult = 0;
	}

	musb_ep->packet_sz = tmp & 0x7ff;
	tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
F
Felipe Balbi 已提交
1060 1061 1062 1063 1064

	/* enable the interrupts for the endpoint, set the endpoint
	 * packet size (or fail), set the mode, clear the fifo
	 */
	musb_ep_select(mbase, epnum);
J
Julia Lawall 已提交
1065
	if (usb_endpoint_dir_in(desc)) {
F
Felipe Balbi 已提交
1066 1067 1068 1069 1070 1071
		u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);

		if (hw_ep->is_shared_fifo)
			musb_ep->is_in = 1;
		if (!musb_ep->is_in)
			goto fail;
1072 1073

		if (tmp > hw_ep->max_packet_sz_tx) {
1074
			dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
F
Felipe Balbi 已提交
1075
			goto fail;
1076
		}
F
Felipe Balbi 已提交
1077 1078 1079 1080 1081 1082 1083

		int_txe |= (1 << epnum);
		musb_writew(mbase, MUSB_INTRTXE, int_txe);

		/* REVISIT if can_bulk_split(), use by updating "tmp";
		 * likewise high bandwidth periodic tx
		 */
1084
		/* Set TXMAXP with the FIFO size of the endpoint
1085
		 * to disable double buffering mode.
1086
		 */
1087 1088 1089 1090 1091
		if (musb->double_buffer_not_ok)
			musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
		else
			musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
					| (musb_ep->hb_mult << 11));
F
Felipe Balbi 已提交
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111

		csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
		if (musb_readw(regs, MUSB_TXCSR)
				& MUSB_TXCSR_FIFONOTEMPTY)
			csr |= MUSB_TXCSR_FLUSHFIFO;
		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
			csr |= MUSB_TXCSR_P_ISO;

		/* set twice in case of double buffering */
		musb_writew(regs, MUSB_TXCSR, csr);
		/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
		musb_writew(regs, MUSB_TXCSR, csr);

	} else {
		u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);

		if (hw_ep->is_shared_fifo)
			musb_ep->is_in = 0;
		if (musb_ep->is_in)
			goto fail;
1112 1113

		if (tmp > hw_ep->max_packet_sz_rx) {
1114
			dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
F
Felipe Balbi 已提交
1115
			goto fail;
1116
		}
F
Felipe Balbi 已提交
1117 1118 1119 1120 1121 1122 1123

		int_rxe |= (1 << epnum);
		musb_writew(mbase, MUSB_INTRRXE, int_rxe);

		/* REVISIT if can_bulk_combine() use by updating "tmp"
		 * likewise high bandwidth periodic rx
		 */
1124 1125 1126
		/* Set RXMAXP with the FIFO size of the endpoint
		 * to disable double buffering mode.
		 */
1127 1128 1129 1130 1131
		if (musb->double_buffer_not_ok)
			musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
		else
			musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
					| (musb_ep->hb_mult << 11));
F
Felipe Balbi 已提交
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163

		/* force shared fifo to OUT-only mode */
		if (hw_ep->is_shared_fifo) {
			csr = musb_readw(regs, MUSB_TXCSR);
			csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
			musb_writew(regs, MUSB_TXCSR, csr);
		}

		csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
			csr |= MUSB_RXCSR_P_ISO;
		else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
			csr |= MUSB_RXCSR_DISNYET;

		/* set twice in case of double buffering */
		musb_writew(regs, MUSB_RXCSR, csr);
		musb_writew(regs, MUSB_RXCSR, csr);
	}

	/* NOTE:  all the I/O code _should_ work fine without DMA, in case
	 * for some reason you run out of channels here.
	 */
	if (is_dma_capable() && musb->dma_controller) {
		struct dma_controller	*c = musb->dma_controller;

		musb_ep->dma = c->channel_alloc(c, hw_ep,
				(desc->bEndpointAddress & USB_DIR_IN));
	} else
		musb_ep->dma = NULL;

	musb_ep->desc = desc;
	musb_ep->busy = 0;
1164
	musb_ep->wedged = 0;
F
Felipe Balbi 已提交
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
	status = 0;

	pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
			musb_driver_name, musb_ep->end_point.name,
			({ char *s; switch (musb_ep->type) {
			case USB_ENDPOINT_XFER_BULK:	s = "bulk"; break;
			case USB_ENDPOINT_XFER_INT:	s = "int"; break;
			default:			s = "iso"; break;
			}; s; }),
			musb_ep->is_in ? "IN" : "OUT",
			musb_ep->dma ? "dma, " : "",
			musb_ep->packet_sz);

	schedule_work(&musb->irq_work);

fail:
	spin_unlock_irqrestore(&musb->lock, flags);
	return status;
}

/*
 * Disable an endpoint flushing all requests queued.
 */
static int musb_gadget_disable(struct usb_ep *ep)
{
	unsigned long	flags;
	struct musb	*musb;
	u8		epnum;
	struct musb_ep	*musb_ep;
	void __iomem	*epio;
	int		status = 0;

	musb_ep = to_musb_ep(ep);
	musb = musb_ep->musb;
	epnum = musb_ep->current_epnum;
	epio = musb->endpoints[epnum].regs;

	spin_lock_irqsave(&musb->lock, flags);
	musb_ep_select(musb->mregs, epnum);

	/* zero the endpoint sizes */
	if (musb_ep->is_in) {
		u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
		int_txe &= ~(1 << epnum);
		musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
		musb_writew(epio, MUSB_TXMAXP, 0);
	} else {
		u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
		int_rxe &= ~(1 << epnum);
		musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
		musb_writew(epio, MUSB_RXMAXP, 0);
	}

	musb_ep->desc = NULL;

	/* abort all pending DMA and requests */
	nuke(musb_ep, -ESHUTDOWN);

	schedule_work(&musb->irq_work);

	spin_unlock_irqrestore(&(musb->lock), flags);

1227
	dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
F
Felipe Balbi 已提交
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238

	return status;
}

/*
 * Allocate a request for an endpoint.
 * Reused by ep0 code.
 */
struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
	struct musb_ep		*musb_ep = to_musb_ep(ep);
1239
	struct musb		*musb = musb_ep->musb;
F
Felipe Balbi 已提交
1240 1241 1242
	struct musb_request	*request = NULL;

	request = kzalloc(sizeof *request, gfp_flags);
1243
	if (!request) {
1244
		dev_dbg(musb->controller, "not enough memory\n");
1245
		return NULL;
F
Felipe Balbi 已提交
1246 1247
	}

1248 1249 1250 1251
	request->request.dma = DMA_ADDR_INVALID;
	request->epnum = musb_ep->current_epnum;
	request->ep = musb_ep;

F
Felipe Balbi 已提交
1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
	return &request->request;
}

/*
 * Free a request
 * Reused by ep0 code.
 */
void musb_free_request(struct usb_ep *ep, struct usb_request *req)
{
	kfree(to_musb_request(req));
}

static LIST_HEAD(buffers);

struct free_record {
	struct list_head	list;
	struct device		*dev;
	unsigned		bytes;
	dma_addr_t		dma;
};

/*
 * Context: controller locked, IRQs blocked.
 */
1276
void musb_ep_restart(struct musb *musb, struct musb_request *req)
F
Felipe Balbi 已提交
1277
{
1278
	dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
F
Felipe Balbi 已提交
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
		req->tx ? "TX/IN" : "RX/OUT",
		&req->request, req->request.length, req->epnum);

	musb_ep_select(musb->mregs, req->epnum);
	if (req->tx)
		txstate(musb, req);
	else
		rxstate(musb, req);
}

static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
			gfp_t gfp_flags)
{
	struct musb_ep		*musb_ep;
	struct musb_request	*request;
	struct musb		*musb;
	int			status = 0;
	unsigned long		lockflags;

	if (!ep || !req)
		return -EINVAL;
	if (!req->buf)
		return -ENODATA;

	musb_ep = to_musb_ep(ep);
	musb = musb_ep->musb;

	request = to_musb_request(req);
	request->musb = musb;

	if (request->ep != musb_ep)
		return -EINVAL;

1312
	dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
F
Felipe Balbi 已提交
1313 1314 1315 1316 1317 1318 1319

	/* request is mine now... */
	request->request.actual = 0;
	request->request.status = -EINPROGRESS;
	request->epnum = musb_ep->current_epnum;
	request->tx = musb_ep->is_in;

1320
	map_dma_buffer(request, musb, musb_ep);
F
Felipe Balbi 已提交
1321 1322 1323 1324 1325

	spin_lock_irqsave(&musb->lock, lockflags);

	/* don't queue if the ep is down */
	if (!musb_ep->desc) {
1326
		dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
F
Felipe Balbi 已提交
1327 1328 1329 1330 1331 1332
				req, ep->name, "disabled");
		status = -ESHUTDOWN;
		goto cleanup;
	}

	/* add request to the list */
1333
	list_add_tail(&request->list, &musb_ep->req_list);
F
Felipe Balbi 已提交
1334 1335

	/* it this is the head of the queue, start i/o ... */
1336
	if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
F
Felipe Balbi 已提交
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
		musb_ep_restart(musb, request);

cleanup:
	spin_unlock_irqrestore(&musb->lock, lockflags);
	return status;
}

static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
{
	struct musb_ep		*musb_ep = to_musb_ep(ep);
1347 1348
	struct musb_request	*req = to_musb_request(request);
	struct musb_request	*r;
F
Felipe Balbi 已提交
1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
	unsigned long		flags;
	int			status = 0;
	struct musb		*musb = musb_ep->musb;

	if (!ep || !request || to_musb_request(request)->ep != musb_ep)
		return -EINVAL;

	spin_lock_irqsave(&musb->lock, flags);

	list_for_each_entry(r, &musb_ep->req_list, list) {
1359
		if (r == req)
F
Felipe Balbi 已提交
1360 1361
			break;
	}
1362
	if (r != req) {
1363
		dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
F
Felipe Balbi 已提交
1364 1365 1366 1367 1368
		status = -EINVAL;
		goto done;
	}

	/* if the hardware doesn't have the request, easy ... */
1369
	if (musb_ep->req_list.next != &req->list || musb_ep->busy)
F
Felipe Balbi 已提交
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
		musb_g_giveback(musb_ep, request, -ECONNRESET);

	/* ... else abort the dma transfer ... */
	else if (is_dma_capable() && musb_ep->dma) {
		struct dma_controller	*c = musb->dma_controller;

		musb_ep_select(musb->mregs, musb_ep->current_epnum);
		if (c->channel_abort)
			status = c->channel_abort(musb_ep->dma);
		else
			status = -EBUSY;
		if (status == 0)
			musb_g_giveback(musb_ep, request, -ECONNRESET);
	} else {
		/* NOTE: by sticking to easily tested hardware/driver states,
		 * we leave counting of in-flight packets imprecise.
		 */
		musb_g_giveback(musb_ep, request, -ECONNRESET);
	}

done:
	spin_unlock_irqrestore(&musb->lock, flags);
	return status;
}

/*
 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
 * data but will queue requests.
 *
 * exported to ep0 code
 */
1401
static int musb_gadget_set_halt(struct usb_ep *ep, int value)
F
Felipe Balbi 已提交
1402 1403 1404 1405 1406 1407 1408 1409
{
	struct musb_ep		*musb_ep = to_musb_ep(ep);
	u8			epnum = musb_ep->current_epnum;
	struct musb		*musb = musb_ep->musb;
	void __iomem		*epio = musb->endpoints[epnum].regs;
	void __iomem		*mbase;
	unsigned long		flags;
	u16			csr;
1410
	struct musb_request	*request;
F
Felipe Balbi 已提交
1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
	int			status = 0;

	if (!ep)
		return -EINVAL;
	mbase = musb->mregs;

	spin_lock_irqsave(&musb->lock, flags);

	if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
		status = -EINVAL;
		goto done;
	}

	musb_ep_select(mbase, epnum);

1426
	request = next_request(musb_ep);
1427 1428
	if (value) {
		if (request) {
1429
			dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
1430 1431 1432 1433 1434 1435 1436 1437
			    ep->name);
			status = -EAGAIN;
			goto done;
		}
		/* Cannot portably stall with non-empty FIFO */
		if (musb_ep->is_in) {
			csr = musb_readw(epio, MUSB_TXCSR);
			if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1438
				dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
1439 1440 1441
				status = -EAGAIN;
				goto done;
			}
F
Felipe Balbi 已提交
1442
		}
1443 1444
	} else
		musb_ep->wedged = 0;
F
Felipe Balbi 已提交
1445 1446

	/* set/clear the stall and toggle bits */
1447
	dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
F
Felipe Balbi 已提交
1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473
	if (musb_ep->is_in) {
		csr = musb_readw(epio, MUSB_TXCSR);
		csr |= MUSB_TXCSR_P_WZC_BITS
			| MUSB_TXCSR_CLRDATATOG;
		if (value)
			csr |= MUSB_TXCSR_P_SENDSTALL;
		else
			csr &= ~(MUSB_TXCSR_P_SENDSTALL
				| MUSB_TXCSR_P_SENTSTALL);
		csr &= ~MUSB_TXCSR_TXPKTRDY;
		musb_writew(epio, MUSB_TXCSR, csr);
	} else {
		csr = musb_readw(epio, MUSB_RXCSR);
		csr |= MUSB_RXCSR_P_WZC_BITS
			| MUSB_RXCSR_FLUSHFIFO
			| MUSB_RXCSR_CLRDATATOG;
		if (value)
			csr |= MUSB_RXCSR_P_SENDSTALL;
		else
			csr &= ~(MUSB_RXCSR_P_SENDSTALL
				| MUSB_RXCSR_P_SENTSTALL);
		musb_writew(epio, MUSB_RXCSR, csr);
	}

	/* maybe start the first request in the queue */
	if (!musb_ep->busy && !value && request) {
1474
		dev_dbg(musb->controller, "restarting the request\n");
F
Felipe Balbi 已提交
1475 1476 1477
		musb_ep_restart(musb, request);
	}

1478
done:
F
Felipe Balbi 已提交
1479 1480 1481 1482
	spin_unlock_irqrestore(&musb->lock, flags);
	return status;
}

1483 1484 1485
/*
 * Sets the halt feature with the clear requests ignored
 */
1486
static int musb_gadget_set_wedge(struct usb_ep *ep)
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
{
	struct musb_ep		*musb_ep = to_musb_ep(ep);

	if (!ep)
		return -EINVAL;

	musb_ep->wedged = 1;

	return usb_ep_set_halt(ep);
}

F
Felipe Balbi 已提交
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543
static int musb_gadget_fifo_status(struct usb_ep *ep)
{
	struct musb_ep		*musb_ep = to_musb_ep(ep);
	void __iomem		*epio = musb_ep->hw_ep->regs;
	int			retval = -EINVAL;

	if (musb_ep->desc && !musb_ep->is_in) {
		struct musb		*musb = musb_ep->musb;
		int			epnum = musb_ep->current_epnum;
		void __iomem		*mbase = musb->mregs;
		unsigned long		flags;

		spin_lock_irqsave(&musb->lock, flags);

		musb_ep_select(mbase, epnum);
		/* FIXME return zero unless RXPKTRDY is set */
		retval = musb_readw(epio, MUSB_RXCOUNT);

		spin_unlock_irqrestore(&musb->lock, flags);
	}
	return retval;
}

static void musb_gadget_fifo_flush(struct usb_ep *ep)
{
	struct musb_ep	*musb_ep = to_musb_ep(ep);
	struct musb	*musb = musb_ep->musb;
	u8		epnum = musb_ep->current_epnum;
	void __iomem	*epio = musb->endpoints[epnum].regs;
	void __iomem	*mbase;
	unsigned long	flags;
	u16		csr, int_txe;

	mbase = musb->mregs;

	spin_lock_irqsave(&musb->lock, flags);
	musb_ep_select(mbase, (u8) epnum);

	/* disable interrupts */
	int_txe = musb_readw(mbase, MUSB_INTRTXE);
	musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));

	if (musb_ep->is_in) {
		csr = musb_readw(epio, MUSB_TXCSR);
		if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
			csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1544 1545 1546 1547 1548 1549
			/*
			 * Setting both TXPKTRDY and FLUSHFIFO makes controller
			 * to interrupt current FIFO loading, but not flushing
			 * the already loaded ones.
			 */
			csr &= ~MUSB_TXCSR_TXPKTRDY;
F
Felipe Balbi 已提交
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
			musb_writew(epio, MUSB_TXCSR, csr);
			/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
			musb_writew(epio, MUSB_TXCSR, csr);
		}
	} else {
		csr = musb_readw(epio, MUSB_RXCSR);
		csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
		musb_writew(epio, MUSB_RXCSR, csr);
		musb_writew(epio, MUSB_RXCSR, csr);
	}

	/* re-enable interrupt */
	musb_writew(mbase, MUSB_INTRTXE, int_txe);
	spin_unlock_irqrestore(&musb->lock, flags);
}

static const struct usb_ep_ops musb_ep_ops = {
	.enable		= musb_gadget_enable,
	.disable	= musb_gadget_disable,
	.alloc_request	= musb_alloc_request,
	.free_request	= musb_free_request,
	.queue		= musb_gadget_queue,
	.dequeue	= musb_gadget_dequeue,
	.set_halt	= musb_gadget_set_halt,
1574
	.set_wedge	= musb_gadget_set_wedge,
F
Felipe Balbi 已提交
1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
	.fifo_status	= musb_gadget_fifo_status,
	.fifo_flush	= musb_gadget_fifo_flush
};

/* ----------------------------------------------------------------------- */

static int musb_gadget_get_frame(struct usb_gadget *gadget)
{
	struct musb	*musb = gadget_to_musb(gadget);

	return (int)musb_readw(musb->mregs, MUSB_FRAME);
}

static int musb_gadget_wakeup(struct usb_gadget *gadget)
{
	struct musb	*musb = gadget_to_musb(gadget);
	void __iomem	*mregs = musb->mregs;
	unsigned long	flags;
	int		status = -EINVAL;
	u8		power, devctl;
	int		retries;

	spin_lock_irqsave(&musb->lock, flags);

1599
	switch (musb->xceiv->state) {
F
Felipe Balbi 已提交
1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
	case OTG_STATE_B_PERIPHERAL:
		/* NOTE:  OTG state machine doesn't include B_SUSPENDED;
		 * that's part of the standard usb 1.1 state machine, and
		 * doesn't affect OTG transitions.
		 */
		if (musb->may_wakeup && musb->is_suspended)
			break;
		goto done;
	case OTG_STATE_B_IDLE:
		/* Start SRP ... OTG not required. */
		devctl = musb_readb(mregs, MUSB_DEVCTL);
1611
		dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
F
Felipe Balbi 已提交
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
		devctl |= MUSB_DEVCTL_SESSION;
		musb_writeb(mregs, MUSB_DEVCTL, devctl);
		devctl = musb_readb(mregs, MUSB_DEVCTL);
		retries = 100;
		while (!(devctl & MUSB_DEVCTL_SESSION)) {
			devctl = musb_readb(mregs, MUSB_DEVCTL);
			if (retries-- < 1)
				break;
		}
		retries = 10000;
		while (devctl & MUSB_DEVCTL_SESSION) {
			devctl = musb_readb(mregs, MUSB_DEVCTL);
			if (retries-- < 1)
				break;
		}

1628 1629 1630 1631
		spin_unlock_irqrestore(&musb->lock, flags);
		otg_start_srp(musb->xceiv);
		spin_lock_irqsave(&musb->lock, flags);

F
Felipe Balbi 已提交
1632 1633 1634 1635 1636 1637 1638
		/* Block idling for at least 1s */
		musb_platform_try_idle(musb,
			jiffies + msecs_to_jiffies(1 * HZ));

		status = 0;
		goto done;
	default:
1639
		dev_dbg(musb->controller, "Unhandled wake: %s\n",
1640
			otg_state_string(musb->xceiv->state));
F
Felipe Balbi 已提交
1641 1642 1643 1644 1645 1646 1647 1648
		goto done;
	}

	status = 0;

	power = musb_readb(mregs, MUSB_POWER);
	power |= MUSB_POWER_RESUME;
	musb_writeb(mregs, MUSB_POWER, power);
1649
	dev_dbg(musb->controller, "issue wakeup\n");
F
Felipe Balbi 已提交
1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682

	/* FIXME do this next chunk in a timer callback, no udelay */
	mdelay(2);

	power = musb_readb(mregs, MUSB_POWER);
	power &= ~MUSB_POWER_RESUME;
	musb_writeb(mregs, MUSB_POWER, power);
done:
	spin_unlock_irqrestore(&musb->lock, flags);
	return status;
}

static int
musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
{
	struct musb	*musb = gadget_to_musb(gadget);

	musb->is_self_powered = !!is_selfpowered;
	return 0;
}

static void musb_pullup(struct musb *musb, int is_on)
{
	u8 power;

	power = musb_readb(musb->mregs, MUSB_POWER);
	if (is_on)
		power |= MUSB_POWER_SOFTCONN;
	else
		power &= ~MUSB_POWER_SOFTCONN;

	/* FIXME if on, HdrcStart; if off, HdrcStop */

1683 1684
	dev_dbg(musb->controller, "gadget D+ pullup %s\n",
		is_on ? "on" : "off");
F
Felipe Balbi 已提交
1685 1686 1687 1688 1689 1690
	musb_writeb(musb->mregs, MUSB_POWER, power);
}

#if 0
static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
{
1691
	dev_dbg(musb->controller, "<= %s =>\n", __func__);
F
Felipe Balbi 已提交
1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705

	/*
	 * FIXME iff driver's softconnect flag is set (as it is during probe,
	 * though that can clear it), just musb_pullup().
	 */

	return -EINVAL;
}
#endif

static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
	struct musb	*musb = gadget_to_musb(gadget);

1706
	if (!musb->xceiv->set_power)
F
Felipe Balbi 已提交
1707
		return -EOPNOTSUPP;
1708
	return otg_set_power(musb->xceiv, mA);
F
Felipe Balbi 已提交
1709 1710 1711 1712 1713 1714 1715 1716 1717
}

static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
{
	struct musb	*musb = gadget_to_musb(gadget);
	unsigned long	flags;

	is_on = !!is_on;

1718 1719
	pm_runtime_get_sync(musb->controller);

F
Felipe Balbi 已提交
1720 1721 1722 1723 1724 1725 1726 1727 1728
	/* NOTE: this assumes we are sensing vbus; we'd rather
	 * not pullup unless the B-session is active.
	 */
	spin_lock_irqsave(&musb->lock, flags);
	if (is_on != musb->softconnect) {
		musb->softconnect = is_on;
		musb_pullup(musb, is_on);
	}
	spin_unlock_irqrestore(&musb->lock, flags);
1729 1730 1731

	pm_runtime_put(musb->controller);

F
Felipe Balbi 已提交
1732 1733 1734
	return 0;
}

1735 1736 1737 1738
static int musb_gadget_start(struct usb_gadget *g,
		struct usb_gadget_driver *driver);
static int musb_gadget_stop(struct usb_gadget *g,
		struct usb_gadget_driver *driver);
1739

F
Felipe Balbi 已提交
1740 1741 1742 1743 1744 1745 1746
static const struct usb_gadget_ops musb_gadget_operations = {
	.get_frame		= musb_gadget_get_frame,
	.wakeup			= musb_gadget_wakeup,
	.set_selfpowered	= musb_gadget_set_self_powered,
	/* .vbus_session		= musb_gadget_vbus_session, */
	.vbus_draw		= musb_gadget_vbus_draw,
	.pullup			= musb_gadget_pullup,
1747 1748
	.udc_start		= musb_gadget_start,
	.udc_stop		= musb_gadget_stop,
F
Felipe Balbi 已提交
1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809
};

/* ----------------------------------------------------------------------- */

/* Registration */

/* Only this registration code "knows" the rule (from USB standards)
 * about there being only one external upstream port.  It assumes
 * all peripheral ports are external...
 */

static void musb_gadget_release(struct device *dev)
{
	/* kref_put(WHAT) */
	dev_dbg(dev, "%s\n", __func__);
}


static void __init
init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
{
	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;

	memset(ep, 0, sizeof *ep);

	ep->current_epnum = epnum;
	ep->musb = musb;
	ep->hw_ep = hw_ep;
	ep->is_in = is_in;

	INIT_LIST_HEAD(&ep->req_list);

	sprintf(ep->name, "ep%d%s", epnum,
			(!epnum || hw_ep->is_shared_fifo) ? "" : (
				is_in ? "in" : "out"));
	ep->end_point.name = ep->name;
	INIT_LIST_HEAD(&ep->end_point.ep_list);
	if (!epnum) {
		ep->end_point.maxpacket = 64;
		ep->end_point.ops = &musb_g_ep0_ops;
		musb->g.ep0 = &ep->end_point;
	} else {
		if (is_in)
			ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
		else
			ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
		ep->end_point.ops = &musb_ep_ops;
		list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
	}
}

/*
 * Initialize the endpoints exposed to peripheral drivers, with backlinks
 * to the rest of the driver state.
 */
static inline void __init musb_g_init_endpoints(struct musb *musb)
{
	u8			epnum;
	struct musb_hw_ep	*hw_ep;
	unsigned		count = 0;

1810
	/* initialize endpoint list just once */
F
Felipe Balbi 已提交
1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846
	INIT_LIST_HEAD(&(musb->g.ep_list));

	for (epnum = 0, hw_ep = musb->endpoints;
			epnum < musb->nr_endpoints;
			epnum++, hw_ep++) {
		if (hw_ep->is_shared_fifo /* || !epnum */) {
			init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
			count++;
		} else {
			if (hw_ep->max_packet_sz_tx) {
				init_peripheral_ep(musb, &hw_ep->ep_in,
							epnum, 1);
				count++;
			}
			if (hw_ep->max_packet_sz_rx) {
				init_peripheral_ep(musb, &hw_ep->ep_out,
							epnum, 0);
				count++;
			}
		}
	}
}

/* called once during driver setup to initialize and link into
 * the driver model; memory is zeroed.
 */
int __init musb_gadget_setup(struct musb *musb)
{
	int status;

	/* REVISIT minor race:  if (erroneously) setting up two
	 * musb peripherals at the same time, only the bus lock
	 * is probably held.
	 */

	musb->g.ops = &musb_gadget_operations;
1847
	musb->g.max_speed = USB_SPEED_HIGH;
F
Felipe Balbi 已提交
1848 1849 1850
	musb->g.speed = USB_SPEED_UNKNOWN;

	/* this "gadget" abstracts/virtualizes the controller */
1851
	dev_set_name(&musb->g.dev, "gadget");
F
Felipe Balbi 已提交
1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
	musb->g.dev.parent = musb->controller;
	musb->g.dev.dma_mask = musb->controller->dma_mask;
	musb->g.dev.release = musb_gadget_release;
	musb->g.name = musb_driver_name;

	if (is_otg_enabled(musb))
		musb->g.is_otg = 1;

	musb_g_init_endpoints(musb);

	musb->is_active = 0;
	musb_platform_try_idle(musb, 0);

	status = device_register(&musb->g.dev);
1866 1867
	if (status != 0) {
		put_device(&musb->g.dev);
1868
		return status;
1869
	}
1870 1871 1872 1873 1874 1875
	status = usb_add_gadget_udc(musb->controller, &musb->g);
	if (status)
		goto err;

	return 0;
err:
1876
	musb->g.dev.parent = NULL;
1877
	device_unregister(&musb->g.dev);
F
Felipe Balbi 已提交
1878 1879 1880 1881 1882
	return status;
}

void musb_gadget_cleanup(struct musb *musb)
{
1883
	usb_del_gadget_udc(&musb->g);
1884 1885
	if (musb->g.dev.parent)
		device_unregister(&musb->g.dev);
F
Felipe Balbi 已提交
1886 1887 1888 1889 1890 1891 1892 1893
}

/*
 * Register the gadget driver. Used by gadget drivers when
 * registering themselves with the controller.
 *
 * -EINVAL something went wrong (not driver)
 * -EBUSY another gadget is already using the controller
1894
 * -ENOMEM no memory to perform the operation
F
Felipe Balbi 已提交
1895 1896 1897 1898
 *
 * @param driver the gadget driver
 * @return <0 if error, 0 if everything is fine
 */
1899 1900
static int musb_gadget_start(struct usb_gadget *g,
		struct usb_gadget_driver *driver)
F
Felipe Balbi 已提交
1901
{
1902
	struct musb		*musb = gadget_to_musb(g);
1903 1904
	unsigned long		flags;
	int			retval = -EINVAL;
F
Felipe Balbi 已提交
1905

1906
	if (driver->speed < USB_SPEED_HIGH)
1907
		goto err0;
F
Felipe Balbi 已提交
1908

1909 1910
	pm_runtime_get_sync(musb->controller);

1911
	dev_dbg(musb->controller, "registering driver %s\n", driver->function);
F
Felipe Balbi 已提交
1912

1913
	musb->softconnect = 0;
1914
	musb->gadget_driver = driver;
F
Felipe Balbi 已提交
1915

1916
	spin_lock_irqsave(&musb->lock, flags);
1917
	musb->is_active = 1;
F
Felipe Balbi 已提交
1918

1919 1920
	otg_set_peripheral(musb->xceiv, &musb->g);
	musb->xceiv->state = OTG_STATE_B_IDLE;
F
Felipe Balbi 已提交
1921

1922 1923 1924 1925 1926 1927
	/*
	 * FIXME this ignores the softconnect flag.  Drivers are
	 * allowed hold the peripheral inactive until for example
	 * userspace hooks up printer hardware or DSP codecs, so
	 * hosts only see fully functional devices.
	 */
F
Felipe Balbi 已提交
1928

1929 1930
	if (!is_otg_enabled(musb))
		musb_start(musb);
F
Felipe Balbi 已提交
1931

1932
	spin_unlock_irqrestore(&musb->lock, flags);
F
Felipe Balbi 已提交
1933

1934 1935
	if (is_otg_enabled(musb)) {
		struct usb_hcd	*hcd = musb_to_hcd(musb);
1936

1937
		dev_dbg(musb->controller, "OTG startup...\n");
F
Felipe Balbi 已提交
1938

1939 1940 1941 1942 1943 1944
		/* REVISIT:  funcall to other code, which also
		 * handles power budgeting ... this way also
		 * ensures HdrcStart is indirectly called.
		 */
		retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
		if (retval < 0) {
1945
			dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
1946
			goto err2;
F
Felipe Balbi 已提交
1947
		}
1948

H
Hema HK 已提交
1949 1950 1951 1952
		if ((musb->xceiv->last_event == USB_EVENT_ID)
					&& musb->xceiv->set_vbus)
			otg_set_vbus(musb->xceiv, 1);

1953
		hcd->self.uses_pio_for_control = 1;
F
Felipe Balbi 已提交
1954
	}
1955 1956
	if (musb->xceiv->last_event == USB_EVENT_NONE)
		pm_runtime_put(musb->controller);
F
Felipe Balbi 已提交
1957

1958 1959 1960 1961 1962 1963
	return 0;

err2:
	if (!is_otg_enabled(musb))
		musb_stop(musb);
err0:
F
Felipe Balbi 已提交
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
	return retval;
}

static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
{
	int			i;
	struct musb_hw_ep	*hw_ep;

	/* don't disconnect if it's not connected */
	if (musb->g.speed == USB_SPEED_UNKNOWN)
		driver = NULL;
	else
		musb->g.speed = USB_SPEED_UNKNOWN;

	/* deactivate the hardware */
	if (musb->softconnect) {
		musb->softconnect = 0;
		musb_pullup(musb, 0);
	}
	musb_stop(musb);

	/* killing any outstanding requests will quiesce the driver;
	 * then report disconnect
	 */
	if (driver) {
		for (i = 0, hw_ep = musb->endpoints;
				i < musb->nr_endpoints;
				i++, hw_ep++) {
			musb_ep_select(musb->mregs, i);
			if (hw_ep->is_shared_fifo /* || !epnum */) {
				nuke(&hw_ep->ep_in, -ESHUTDOWN);
			} else {
				if (hw_ep->max_packet_sz_tx)
					nuke(&hw_ep->ep_in, -ESHUTDOWN);
				if (hw_ep->max_packet_sz_rx)
					nuke(&hw_ep->ep_out, -ESHUTDOWN);
			}
		}
	}
}

/*
 * Unregister the gadget driver. Used by gadget drivers when
 * unregistering themselves from the controller.
 *
 * @param driver the gadget driver to unregister
 */
2011 2012
static int musb_gadget_stop(struct usb_gadget *g,
		struct usb_gadget_driver *driver)
F
Felipe Balbi 已提交
2013
{
2014
	struct musb	*musb = gadget_to_musb(g);
2015
	unsigned long	flags;
F
Felipe Balbi 已提交
2016

2017 2018 2019
	if (musb->xceiv->last_event == USB_EVENT_NONE)
		pm_runtime_get_sync(musb->controller);

2020 2021
	/*
	 * REVISIT always use otg_set_peripheral() here too;
F
Felipe Balbi 已提交
2022 2023 2024 2025 2026 2027 2028
	 * this needs to shut down the OTG engine.
	 */

	spin_lock_irqsave(&musb->lock, flags);

	musb_hnp_stop(musb);

2029
	(void) musb_gadget_vbus_draw(&musb->g, 0);
F
Felipe Balbi 已提交
2030

2031 2032 2033
	musb->xceiv->state = OTG_STATE_UNDEFINED;
	stop_activity(musb, driver);
	otg_set_peripheral(musb->xceiv, NULL);
F
Felipe Balbi 已提交
2034

2035
	dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
F
Felipe Balbi 已提交
2036

2037 2038
	musb->is_active = 0;
	musb_platform_try_idle(musb, 0);
F
Felipe Balbi 已提交
2039 2040
	spin_unlock_irqrestore(&musb->lock, flags);

2041
	if (is_otg_enabled(musb)) {
F
Felipe Balbi 已提交
2042 2043 2044 2045 2046 2047 2048
		usb_remove_hcd(musb_to_hcd(musb));
		/* FIXME we need to be able to register another
		 * gadget driver here and have everything work;
		 * that currently misbehaves.
		 */
	}

2049 2050 2051
	if (!is_otg_enabled(musb))
		musb_stop(musb);

2052 2053
	pm_runtime_put(musb->controller);

2054
	return 0;
F
Felipe Balbi 已提交
2055 2056 2057 2058 2059 2060 2061 2062 2063
}

/* ----------------------------------------------------------------------- */

/* lifecycle operations called through plat_uds.c */

void musb_g_resume(struct musb *musb)
{
	musb->is_suspended = 0;
2064
	switch (musb->xceiv->state) {
F
Felipe Balbi 已提交
2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
	case OTG_STATE_B_IDLE:
		break;
	case OTG_STATE_B_WAIT_ACON:
	case OTG_STATE_B_PERIPHERAL:
		musb->is_active = 1;
		if (musb->gadget_driver && musb->gadget_driver->resume) {
			spin_unlock(&musb->lock);
			musb->gadget_driver->resume(&musb->g);
			spin_lock(&musb->lock);
		}
		break;
	default:
		WARNING("unhandled RESUME transition (%s)\n",
2078
				otg_state_string(musb->xceiv->state));
F
Felipe Balbi 已提交
2079 2080 2081 2082 2083 2084 2085 2086 2087
	}
}

/* called when SOF packets stop for 3+ msec */
void musb_g_suspend(struct musb *musb)
{
	u8	devctl;

	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2088
	dev_dbg(musb->controller, "devctl %02x\n", devctl);
F
Felipe Balbi 已提交
2089

2090
	switch (musb->xceiv->state) {
F
Felipe Balbi 已提交
2091 2092
	case OTG_STATE_B_IDLE:
		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2093
			musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
F
Felipe Balbi 已提交
2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107
		break;
	case OTG_STATE_B_PERIPHERAL:
		musb->is_suspended = 1;
		if (musb->gadget_driver && musb->gadget_driver->suspend) {
			spin_unlock(&musb->lock);
			musb->gadget_driver->suspend(&musb->g);
			spin_lock(&musb->lock);
		}
		break;
	default:
		/* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
		 * A_PERIPHERAL may need care too
		 */
		WARNING("unhandled SUSPEND transition (%s)\n",
2108
				otg_state_string(musb->xceiv->state));
F
Felipe Balbi 已提交
2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
	}
}

/* Called during SRP */
void musb_g_wakeup(struct musb *musb)
{
	musb_gadget_wakeup(&musb->g);
}

/* called when VBUS drops below session threshold, and in other cases */
void musb_g_disconnect(struct musb *musb)
{
	void __iomem	*mregs = musb->mregs;
	u8	devctl = musb_readb(mregs, MUSB_DEVCTL);

2124
	dev_dbg(musb->controller, "devctl %02x\n", devctl);
F
Felipe Balbi 已提交
2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138

	/* clear HR */
	musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);

	/* don't draw vbus until new b-default session */
	(void) musb_gadget_vbus_draw(&musb->g, 0);

	musb->g.speed = USB_SPEED_UNKNOWN;
	if (musb->gadget_driver && musb->gadget_driver->disconnect) {
		spin_unlock(&musb->lock);
		musb->gadget_driver->disconnect(&musb->g);
		spin_lock(&musb->lock);
	}

2139
	switch (musb->xceiv->state) {
F
Felipe Balbi 已提交
2140
	default:
2141
		dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
2142
			otg_state_string(musb->xceiv->state));
2143
		musb->xceiv->state = OTG_STATE_A_IDLE;
2144
		MUSB_HST_MODE(musb);
F
Felipe Balbi 已提交
2145 2146
		break;
	case OTG_STATE_A_PERIPHERAL:
2147
		musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2148
		MUSB_HST_MODE(musb);
F
Felipe Balbi 已提交
2149 2150 2151 2152 2153
		break;
	case OTG_STATE_B_WAIT_ACON:
	case OTG_STATE_B_HOST:
	case OTG_STATE_B_PERIPHERAL:
	case OTG_STATE_B_IDLE:
2154
		musb->xceiv->state = OTG_STATE_B_IDLE;
F
Felipe Balbi 已提交
2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
		break;
	case OTG_STATE_B_SRP_INIT:
		break;
	}

	musb->is_active = 0;
}

void musb_g_reset(struct musb *musb)
__releases(musb->lock)
__acquires(musb->lock)
{
	void __iomem	*mbase = musb->mregs;
	u8		devctl = musb_readb(mbase, MUSB_DEVCTL);
	u8		power;

2171
	dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n",
F
Felipe Balbi 已提交
2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209
			(devctl & MUSB_DEVCTL_BDEVICE)
				? "B-Device" : "A-Device",
			musb_readb(mbase, MUSB_FADDR),
			musb->gadget_driver
				? musb->gadget_driver->driver.name
				: NULL
			);

	/* report disconnect, if we didn't already (flushing EP state) */
	if (musb->g.speed != USB_SPEED_UNKNOWN)
		musb_g_disconnect(musb);

	/* clear HR */
	else if (devctl & MUSB_DEVCTL_HR)
		musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);


	/* what speed did we negotiate? */
	power = musb_readb(mbase, MUSB_POWER);
	musb->g.speed = (power & MUSB_POWER_HSMODE)
			? USB_SPEED_HIGH : USB_SPEED_FULL;

	/* start in USB_STATE_DEFAULT */
	musb->is_active = 1;
	musb->is_suspended = 0;
	MUSB_DEV_MODE(musb);
	musb->address = 0;
	musb->ep0_state = MUSB_EP0_STAGE_SETUP;

	musb->may_wakeup = 0;
	musb->g.b_hnp_enable = 0;
	musb->g.a_alt_hnp_support = 0;
	musb->g.a_hnp_support = 0;

	/* Normal reset, as B-Device;
	 * or else after HNP, as A-Device
	 */
	if (devctl & MUSB_DEVCTL_BDEVICE) {
2210
		musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
F
Felipe Balbi 已提交
2211 2212
		musb->g.is_a_peripheral = 0;
	} else if (is_otg_enabled(musb)) {
2213
		musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
F
Felipe Balbi 已提交
2214 2215 2216 2217 2218 2219 2220 2221
		musb->g.is_a_peripheral = 1;
	} else
		WARN_ON(1);

	/* start with default limits on VBUS power draw */
	(void) musb_gadget_vbus_draw(&musb->g,
			is_otg_enabled(musb) ? 8 : 100);
}