ep0.c 25.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
/**
 * ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
 *
 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
 *
 * Authors: Felipe Balbi <balbi@ti.com>,
 *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions, and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. The names of the above-listed copyright holders may not be used
 *    to endorse or promote products derived from this software without
 *    specific prior written permission.
 *
 * ALTERNATIVELY, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2, as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>

#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
51
#include <linux/usb/composite.h>
52 53 54 55 56

#include "core.h"
#include "gadget.h"
#include "io.h"

57
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
58 59
static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
		struct dwc3_ep *dep, struct dwc3_request *req);
60

61 62 63 64 65
static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
{
	switch (state) {
	case EP0_UNCONNECTED:
		return "Unconnected";
66 67 68 69 70 71
	case EP0_SETUP_PHASE:
		return "Setup Phase";
	case EP0_DATA_PHASE:
		return "Data Phase";
	case EP0_STATUS_PHASE:
		return "Status Phase";
72 73 74 75 76 77
	default:
		return "UNKNOWN";
	}
}

static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
78
		u32 len, u32 type)
79 80
{
	struct dwc3_gadget_ep_cmd_params params;
81
	struct dwc3_trb			*trb;
82 83 84 85 86
	struct dwc3_ep			*dep;

	int				ret;

	dep = dwc->eps[epnum];
87 88 89 90
	if (dep->flags & DWC3_EP_BUSY) {
		dev_vdbg(dwc->dev, "%s: still busy\n", dep->name);
		return 0;
	}
91

92
	trb = dwc->ep0_trb;
93

94 95 96 97
	trb->bpl = lower_32_bits(buf_dma);
	trb->bph = upper_32_bits(buf_dma);
	trb->size = len;
	trb->ctrl = type;
98

99 100 101 102
	trb->ctrl |= (DWC3_TRB_CTRL_HWO
			| DWC3_TRB_CTRL_LST
			| DWC3_TRB_CTRL_IOC
			| DWC3_TRB_CTRL_ISP_IMI);
103 104

	memset(&params, 0, sizeof(params));
105 106
	params.param0 = upper_32_bits(dwc->ep0_trb_addr);
	params.param1 = lower_32_bits(dwc->ep0_trb_addr);
107 108 109 110 111 112 113 114

	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
			DWC3_DEPCMD_STARTTRANSFER, &params);
	if (ret < 0) {
		dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
		return ret;
	}

115
	dep->flags |= DWC3_EP_BUSY;
116
	dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
117 118
			dep->number);

119 120
	dwc->ep0_next_event = DWC3_EP0_COMPLETE;

121 122 123 124 125 126
	return 0;
}

static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
		struct dwc3_request *req)
{
127
	struct dwc3		*dwc = dep->dwc;
128 129 130 131 132 133

	req->request.actual	= 0;
	req->request.status	= -EINPROGRESS;
	req->epnum		= dep->number;

	list_add_tail(&req->list, &dep->request_list);
134

135 136 137 138 139 140 141 142 143 144 145 146 147 148
	/*
	 * Gadget driver might not be quick enough to queue a request
	 * before we get a Transfer Not Ready event on this endpoint.
	 *
	 * In that case, we will set DWC3_EP_PENDING_REQUEST. When that
	 * flag is set, it's telling us that as soon as Gadget queues the
	 * required request, we should kick the transfer here because the
	 * IRQ we were waiting for is long gone.
	 */
	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
		unsigned	direction;

		direction = !!(dep->flags & DWC3_EP0_DIR_IN);

149 150
		if (dwc->ep0state != EP0_DATA_PHASE) {
			dev_WARN(dwc->dev, "Unexpected pending request\n");
151 152
			return 0;
		}
153

154 155
		__dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);

156 157
		dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
				DWC3_EP0_DIR_IN);
158 159 160 161 162 163 164 165 166

		return 0;
	}

	/*
	 * In case gadget driver asked us to delay the STATUS phase,
	 * handle it here.
	 */
	if (dwc->delayed_status) {
167 168 169
		unsigned	direction;

		direction = !dwc->ep0_expect_in;
170
		dwc->delayed_status = false;
171 172

		if (dwc->ep0state == EP0_STATUS_PHASE)
173
			__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
174 175
		else
			dev_dbg(dwc->dev, "too early for delayed status\n");
176 177

		return 0;
178 179
	}

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
	/*
	 * Unfortunately we have uncovered a limitation wrt the Data Phase.
	 *
	 * Section 9.4 says we can wait for the XferNotReady(DATA) event to
	 * come before issueing Start Transfer command, but if we do, we will
	 * miss situations where the host starts another SETUP phase instead of
	 * the DATA phase.  Such cases happen at least on TD.7.6 of the Link
	 * Layer Compliance Suite.
	 *
	 * The problem surfaces due to the fact that in case of back-to-back
	 * SETUP packets there will be no XferNotReady(DATA) generated and we
	 * will be stuck waiting for XferNotReady(DATA) forever.
	 *
	 * By looking at tables 9-13 and 9-14 of the Databook, we can see that
	 * it tells us to start Data Phase right away. It also mentions that if
	 * we receive a SETUP phase instead of the DATA phase, core will issue
	 * XferComplete for the DATA phase, before actually initiating it in
	 * the wire, with the TRB's status set to "SETUP_PENDING". Such status
	 * can only be used to print some debugging logs, as the core expects
	 * us to go through to the STATUS phase and start a CONTROL_STATUS TRB,
	 * just so it completes right away, without transferring anything and,
	 * only then, we can go back to the SETUP phase.
	 *
	 * Because of this scenario, SNPS decided to change the programming
	 * model of control transfers and support on-demand transfers only for
	 * the STATUS phase. To fix the issue we have now, we will always wait
	 * for gadget driver to queue the DATA phase's struct usb_request, then
	 * start it right away.
	 *
	 * If we're actually in a 2-stage transfer, we will wait for
	 * XferNotReady(STATUS).
	 */
	if (dwc->three_stage_setup) {
		unsigned        direction;

		direction = dwc->ep0_expect_in;
		dwc->ep0state = EP0_DATA_PHASE;

		__dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);

		dep->flags &= ~DWC3_EP0_DIR_IN;
	}

223
	return 0;
224 225 226 227 228 229 230 231 232 233 234 235 236 237
}

int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
		gfp_t gfp_flags)
{
	struct dwc3_request		*req = to_dwc3_request(request);
	struct dwc3_ep			*dep = to_dwc3_ep(ep);
	struct dwc3			*dwc = dep->dwc;

	unsigned long			flags;

	int				ret;

	spin_lock_irqsave(&dwc->lock, flags);
238
	if (!dep->endpoint.desc) {
239 240 241 242 243 244 245
		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
				request, dep->name);
		ret = -ESHUTDOWN;
		goto out;
	}

	/* we share one TRB for ep0/1 */
246
	if (!list_empty(&dep->request_list)) {
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
		ret = -EBUSY;
		goto out;
	}

	dev_vdbg(dwc->dev, "queueing request %p to %s length %d, state '%s'\n",
			request, dep->name, request->length,
			dwc3_ep0_state_string(dwc->ep0state));

	ret = __dwc3_gadget_ep0_queue(dep, req);

out:
	spin_unlock_irqrestore(&dwc->lock, flags);

	return ret;
}

static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
{
265 266
	struct dwc3_ep		*dep = dwc->eps[0];

267
	/* stall is always issued on EP0 */
268 269
	__dwc3_gadget_ep_set_halt(dep, 1);
	dep->flags = DWC3_EP_ENABLED;
270
	dwc->delayed_status = false;
271 272 273 274 275 276 277 278

	if (!list_empty(&dep->request_list)) {
		struct dwc3_request	*req;

		req = next_request(&dep->request_list);
		dwc3_gadget_giveback(dep, req, -ECONNRESET);
	}

279
	dwc->ep0state = EP0_SETUP_PHASE;
280 281 282
	dwc3_ep0_out_start(dwc);
}

283 284 285 286 287 288 289 290 291 292
int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
{
	struct dwc3_ep			*dep = to_dwc3_ep(ep);
	struct dwc3			*dwc = dep->dwc;

	dwc3_ep0_stall_and_restart(dwc);

	return 0;
}

293 294 295 296
void dwc3_ep0_out_start(struct dwc3 *dwc)
{
	int				ret;

297 298
	ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
			DWC3_TRBCTL_CONTROL_SETUP);
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
	WARN_ON(ret < 0);
}

static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
{
	struct dwc3_ep		*dep;
	u32			windex = le16_to_cpu(wIndex_le);
	u32			epnum;

	epnum = (windex & USB_ENDPOINT_NUMBER_MASK) << 1;
	if ((windex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
		epnum |= 1;

	dep = dwc->eps[epnum];
	if (dep->flags & DWC3_EP_ENABLED)
		return dep;

	return NULL;
}

319
static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
320 321 322 323 324
{
}
/*
 * ch 9.4.5
 */
325 326
static int dwc3_ep0_handle_status(struct dwc3 *dwc,
		struct usb_ctrlrequest *ctrl)
327 328 329
{
	struct dwc3_ep		*dep;
	u32			recip;
330
	u32			reg;
331 332 333 334 335 336 337
	u16			usb_status = 0;
	__le16			*response_pkt;

	recip = ctrl->bRequestType & USB_RECIP_MASK;
	switch (recip) {
	case USB_RECIP_DEVICE:
		/*
338
		 * LTM will be set once we know how to set this in HW.
339 340
		 */
		usb_status |= dwc->is_selfpowered << USB_DEVICE_SELF_POWERED;
341 342 343 344 345 346 347 348 349

		if (dwc->speed == DWC3_DSTS_SUPERSPEED) {
			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
			if (reg & DWC3_DCTL_INITU1ENA)
				usb_status |= 1 << USB_DEV_STAT_U1_ENABLED;
			if (reg & DWC3_DCTL_INITU2ENA)
				usb_status |= 1 << USB_DEV_STAT_U2_ENABLED;
		}

350 351 352 353 354 355 356 357 358 359 360 361
		break;

	case USB_RECIP_INTERFACE:
		/*
		 * Function Remote Wake Capable	D0
		 * Function Remote Wakeup	D1
		 */
		break;

	case USB_RECIP_ENDPOINT:
		dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
		if (!dep)
362
			return -EINVAL;
363 364 365 366 367 368 369 370 371 372

		if (dep->flags & DWC3_EP_STALL)
			usb_status = 1 << USB_ENDPOINT_HALT;
		break;
	default:
		return -EINVAL;
	};

	response_pkt = (__le16 *) dwc->setup_buf;
	*response_pkt = cpu_to_le16(usb_status);
373 374 375

	dep = dwc->eps[0];
	dwc->ep0_usb_req.dep = dep;
376
	dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
377
	dwc->ep0_usb_req.request.buf = dwc->setup_buf;
378
	dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
379 380

	return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
381 382 383 384 385 386 387 388 389
}

static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
		struct usb_ctrlrequest *ctrl, int set)
{
	struct dwc3_ep		*dep;
	u32			recip;
	u32			wValue;
	u32			wIndex;
390
	u32			reg;
391 392 393 394 395 396 397 398
	int			ret;

	wValue = le16_to_cpu(ctrl->wValue);
	wIndex = le16_to_cpu(ctrl->wIndex);
	recip = ctrl->bRequestType & USB_RECIP_MASK;
	switch (recip) {
	case USB_RECIP_DEVICE:

399 400 401
		switch (wValue) {
		case USB_DEVICE_REMOTE_WAKEUP:
			break;
402 403 404 405 406 407 408 409 410 411
		/*
		 * 9.4.1 says only only for SS, in AddressState only for
		 * default control pipe
		 */
		case USB_DEVICE_U1_ENABLE:
			if (dwc->dev_state != DWC3_CONFIGURED_STATE)
				return -EINVAL;
			if (dwc->speed != DWC3_DSTS_SUPERSPEED)
				return -EINVAL;

412 413 414 415 416 417
			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
			if (set)
				reg |= DWC3_DCTL_INITU1ENA;
			else
				reg &= ~DWC3_DCTL_INITU1ENA;
			dwc3_writel(dwc->regs, DWC3_DCTL, reg);
418
			break;
419

420
		case USB_DEVICE_U2_ENABLE:
421 422 423 424 425 426 427 428 429 430 431
			if (dwc->dev_state != DWC3_CONFIGURED_STATE)
				return -EINVAL;
			if (dwc->speed != DWC3_DSTS_SUPERSPEED)
				return -EINVAL;

			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
			if (set)
				reg |= DWC3_DCTL_INITU2ENA;
			else
				reg &= ~DWC3_DCTL_INITU2ENA;
			dwc3_writel(dwc->regs, DWC3_DCTL, reg);
432
			break;
433

434
		case USB_DEVICE_LTM_ENABLE:
435
			return -EINVAL;
436 437 438 439 440 441 442 443
			break;

		case USB_DEVICE_TEST_MODE:
			if ((wIndex & 0xff) != 0)
				return -EINVAL;
			if (!set)
				return -EINVAL;

444 445
			dwc->test_mode_nr = wIndex >> 8;
			dwc->test_mode = true;
446 447 448
			break;
		default:
			return -EINVAL;
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
		}
		break;

	case USB_RECIP_INTERFACE:
		switch (wValue) {
		case USB_INTRF_FUNC_SUSPEND:
			if (wIndex & USB_INTRF_FUNC_SUSPEND_LP)
				/* XXX enable Low power suspend */
				;
			if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
				/* XXX enable remote wakeup */
				;
			break;
		default:
			return -EINVAL;
		}
		break;

	case USB_RECIP_ENDPOINT:
		switch (wValue) {
		case USB_ENDPOINT_HALT:
470
			dep = dwc3_wIndex_to_dep(dwc, wIndex);
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
			if (!dep)
				return -EINVAL;
			ret = __dwc3_gadget_ep_set_halt(dep, set);
			if (ret)
				return -EINVAL;
			break;
		default:
			return -EINVAL;
		}
		break;

	default:
		return -EINVAL;
	};

	return 0;
}

static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
	u32 addr;
	u32 reg;

	addr = le16_to_cpu(ctrl->wValue);
495 496
	if (addr > 127) {
		dev_dbg(dwc->dev, "invalid device address %d\n", addr);
497
		return -EINVAL;
498 499 500 501 502 503
	}

	if (dwc->dev_state == DWC3_CONFIGURED_STATE) {
		dev_dbg(dwc->dev, "trying to set address when configured\n");
		return -EINVAL;
	}
504

505 506 507 508
	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
	reg |= DWC3_DCFG_DEVADDR(addr);
	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
509

510 511 512 513
	if (addr)
		dwc->dev_state = DWC3_ADDRESS_STATE;
	else
		dwc->dev_state = DWC3_DEFAULT_STATE;
514

515
	return 0;
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
}

static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
	int ret;

	spin_unlock(&dwc->lock);
	ret = dwc->gadget_driver->setup(&dwc->gadget, ctrl);
	spin_lock(&dwc->lock);
	return ret;
}

static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
	u32 cfg;
	int ret;
532
	u32 reg;
533

534
	dwc->start_config_issued = false;
535 536 537 538 539 540 541 542 543 544
	cfg = le16_to_cpu(ctrl->wValue);

	switch (dwc->dev_state) {
	case DWC3_DEFAULT_STATE:
		return -EINVAL;
		break;

	case DWC3_ADDRESS_STATE:
		ret = dwc3_ep0_delegate_req(dwc, ctrl);
		/* if the cfg matches and the cfg is non zero */
545
		if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
546
			dwc->dev_state = DWC3_CONFIGURED_STATE;
547 548 549 550 551 552 553 554
			/*
			 * Enable transition to U1/U2 state when
			 * nothing is pending from application.
			 */
			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
			reg |= (DWC3_DCTL_ACCEPTU1ENA | DWC3_DCTL_ACCEPTU2ENA);
			dwc3_writel(dwc->regs, DWC3_DCTL, reg);

555 556 557
			dwc->resize_fifos = true;
			dev_dbg(dwc->dev, "resize fifos flag SET\n");
		}
558 559 560 561 562 563 564
		break;

	case DWC3_CONFIGURED_STATE:
		ret = dwc3_ep0_delegate_req(dwc, ctrl);
		if (!cfg)
			dwc->dev_state = DWC3_ADDRESS_STATE;
		break;
565 566
	default:
		ret = -EINVAL;
567
	}
568
	return ret;
569 570
}

571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req)
{
	struct dwc3_ep	*dep = to_dwc3_ep(ep);
	struct dwc3	*dwc = dep->dwc;

	u32		param = 0;
	u32		reg;

	struct timing {
		u8	u1sel;
		u8	u1pel;
		u16	u2sel;
		u16	u2pel;
	} __packed timing;

	int		ret;

	memcpy(&timing, req->buf, sizeof(timing));

	dwc->u1sel = timing.u1sel;
	dwc->u1pel = timing.u1pel;
592 593
	dwc->u2sel = le16_to_cpu(timing.u2sel);
	dwc->u2pel = le16_to_cpu(timing.u2pel);
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649

	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
	if (reg & DWC3_DCTL_INITU2ENA)
		param = dwc->u2pel;
	if (reg & DWC3_DCTL_INITU1ENA)
		param = dwc->u1pel;

	/*
	 * According to Synopsys Databook, if parameter is
	 * greater than 125, a value of zero should be
	 * programmed in the register.
	 */
	if (param > 125)
		param = 0;

	/* now that we have the time, issue DGCMD Set Sel */
	ret = dwc3_send_gadget_generic_command(dwc,
			DWC3_DGCMD_SET_PERIODIC_PAR, param);
	WARN_ON(ret < 0);
}

static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
	struct dwc3_ep	*dep;
	u16		wLength;
	u16		wValue;

	if (dwc->dev_state == DWC3_DEFAULT_STATE)
		return -EINVAL;

	wValue = le16_to_cpu(ctrl->wValue);
	wLength = le16_to_cpu(ctrl->wLength);

	if (wLength != 6) {
		dev_err(dwc->dev, "Set SEL should be 6 bytes, got %d\n",
				wLength);
		return -EINVAL;
	}

	/*
	 * To handle Set SEL we need to receive 6 bytes from Host. So let's
	 * queue a usb_request for 6 bytes.
	 *
	 * Remember, though, this controller can't handle non-wMaxPacketSize
	 * aligned transfers on the OUT direction, so we queue a request for
	 * wMaxPacketSize instead.
	 */
	dep = dwc->eps[0];
	dwc->ep0_usb_req.dep = dep;
	dwc->ep0_usb_req.request.length = dep->endpoint.maxpacket;
	dwc->ep0_usb_req.request.buf = dwc->setup_buf;
	dwc->ep0_usb_req.request.complete = dwc3_ep0_set_sel_cmpl;

	return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
}

650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
static int dwc3_ep0_set_isoch_delay(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
	u16		wLength;
	u16		wValue;
	u16		wIndex;

	wValue = le16_to_cpu(ctrl->wValue);
	wLength = le16_to_cpu(ctrl->wLength);
	wIndex = le16_to_cpu(ctrl->wIndex);

	if (wIndex || wLength)
		return -EINVAL;

	/*
	 * REVISIT It's unclear from Databook what to do with this
	 * value. For now, just cache it.
	 */
	dwc->isoch_delay = wValue;

	return 0;
}

672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
	int ret;

	switch (ctrl->bRequest) {
	case USB_REQ_GET_STATUS:
		dev_vdbg(dwc->dev, "USB_REQ_GET_STATUS\n");
		ret = dwc3_ep0_handle_status(dwc, ctrl);
		break;
	case USB_REQ_CLEAR_FEATURE:
		dev_vdbg(dwc->dev, "USB_REQ_CLEAR_FEATURE\n");
		ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
		break;
	case USB_REQ_SET_FEATURE:
		dev_vdbg(dwc->dev, "USB_REQ_SET_FEATURE\n");
		ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
		break;
	case USB_REQ_SET_ADDRESS:
		dev_vdbg(dwc->dev, "USB_REQ_SET_ADDRESS\n");
		ret = dwc3_ep0_set_address(dwc, ctrl);
		break;
	case USB_REQ_SET_CONFIGURATION:
		dev_vdbg(dwc->dev, "USB_REQ_SET_CONFIGURATION\n");
		ret = dwc3_ep0_set_config(dwc, ctrl);
		break;
697 698 699 700
	case USB_REQ_SET_SEL:
		dev_vdbg(dwc->dev, "USB_REQ_SET_SEL\n");
		ret = dwc3_ep0_set_sel(dwc, ctrl);
		break;
701 702 703 704
	case USB_REQ_SET_ISOCH_DELAY:
		dev_vdbg(dwc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
		ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
		break;
705 706 707 708 709 710 711 712 713 714 715 716 717
	default:
		dev_vdbg(dwc->dev, "Forwarding to gadget driver\n");
		ret = dwc3_ep0_delegate_req(dwc, ctrl);
		break;
	};

	return ret;
}

static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
		const struct dwc3_event_depevt *event)
{
	struct usb_ctrlrequest *ctrl = dwc->ctrl_req;
718
	int ret = -EINVAL;
719 720 721
	u32 len;

	if (!dwc->gadget_driver)
722
		goto out;
723 724

	len = le16_to_cpu(ctrl->wLength);
725
	if (!len) {
726 727
		dwc->three_stage_setup = false;
		dwc->ep0_expect_in = false;
728 729
		dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
	} else {
730 731
		dwc->three_stage_setup = true;
		dwc->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
732 733
		dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
	}
734 735 736 737 738 739

	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
		ret = dwc3_ep0_std_request(dwc, ctrl);
	else
		ret = dwc3_ep0_delegate_req(dwc, ctrl);

740 741 742
	if (ret == USB_GADGET_DELAYED_STATUS)
		dwc->delayed_status = true;

743 744 745
out:
	if (ret < 0)
		dwc3_ep0_stall_and_restart(dwc);
746 747 748 749 750 751 752
}

static void dwc3_ep0_complete_data(struct dwc3 *dwc,
		const struct dwc3_event_depevt *event)
{
	struct dwc3_request	*r = NULL;
	struct usb_request	*ur;
753
	struct dwc3_trb		*trb;
754
	struct dwc3_ep		*ep0;
755
	u32			transferred;
756
	u32			status;
757
	u32			length;
758 759 760
	u8			epnum;

	epnum = event->endpoint_number;
761
	ep0 = dwc->eps[0];
762

763 764
	dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;

765
	r = next_request(&ep0->request_list);
766
	ur = &r->request;
767

768
	trb = dwc->ep0_trb;
769 770 771 772 773 774 775 776 777 778 779

	status = DWC3_TRB_SIZE_TRBSTS(trb->size);
	if (status == DWC3_TRBSTS_SETUP_PENDING) {
		dev_dbg(dwc->dev, "Setup Pending received\n");

		if (r)
			dwc3_gadget_giveback(ep0, r, -ECONNRESET);

		return;
	}

780
	length = trb->size & DWC3_TRB_SIZE_MASK;
781

782
	if (dwc->ep0_bounced) {
783 784 785 786
		unsigned transfer_size = ur->length;
		unsigned maxp = ep0->endpoint.maxpacket;

		transfer_size += (maxp - (transfer_size % maxp));
787
		transferred = min_t(u32, ur->length,
788
				transfer_size - length);
789 790 791
		memcpy(ur->buf, dwc->ep0_bounce, transferred);
		dwc->ep0_bounced = false;
	} else {
792
		transferred = ur->length - length;
793
	}
794

795 796
	ur->actual += transferred;

797 798 799 800 801 802 803 804 805 806
	if ((epnum & 1) && ur->actual < ur->length) {
		/* for some reason we did not get everything out */

		dwc3_ep0_stall_and_restart(dwc);
	} else {
		/*
		 * handle the case where we have to send a zero packet. This
		 * seems to be case when req.length > maxpacket. Could it be?
		 */
		if (r)
807
			dwc3_gadget_giveback(ep0, r, 0);
808 809 810
	}
}

811
static void dwc3_ep0_complete_status(struct dwc3 *dwc,
812 813 814 815
		const struct dwc3_event_depevt *event)
{
	struct dwc3_request	*r;
	struct dwc3_ep		*dep;
816 817
	struct dwc3_trb		*trb;
	u32			status;
818

819
	dep = dwc->eps[0];
820
	trb = dwc->ep0_trb;
821 822 823 824 825 826 827

	if (!list_empty(&dep->request_list)) {
		r = next_request(&dep->request_list);

		dwc3_gadget_giveback(dep, r, 0);
	}

828 829 830 831 832 833 834 835
	if (dwc->test_mode) {
		int ret;

		ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr);
		if (ret < 0) {
			dev_dbg(dwc->dev, "Invalid Test #%d\n",
					dwc->test_mode_nr);
			dwc3_ep0_stall_and_restart(dwc);
836
			return;
837 838 839
		}
	}

840 841 842 843
	status = DWC3_TRB_SIZE_TRBSTS(trb->size);
	if (status == DWC3_TRBSTS_SETUP_PENDING)
		dev_dbg(dwc->dev, "Setup Pending received\n");

844
	dwc->ep0state = EP0_SETUP_PHASE;
845 846 847 848 849 850
	dwc3_ep0_out_start(dwc);
}

static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
			const struct dwc3_event_depevt *event)
{
851 852 853
	struct dwc3_ep		*dep = dwc->eps[event->endpoint_number];

	dep->flags &= ~DWC3_EP_BUSY;
854
	dep->resource_index = 0;
855
	dwc->setup_packet_pending = false;
856

857
	switch (dwc->ep0state) {
858 859
	case EP0_SETUP_PHASE:
		dev_vdbg(dwc->dev, "Inspecting Setup Bytes\n");
860 861 862
		dwc3_ep0_inspect_setup(dwc, event);
		break;

863 864
	case EP0_DATA_PHASE:
		dev_vdbg(dwc->dev, "Data Phase\n");
865 866 867
		dwc3_ep0_complete_data(dwc, event);
		break;

868 869
	case EP0_STATUS_PHASE:
		dev_vdbg(dwc->dev, "Status Phase\n");
870
		dwc3_ep0_complete_status(dwc, event);
871
		break;
872 873 874 875
	default:
		WARN(true, "UNKNOWN ep0state %d\n", dwc->ep0state);
	}
}
876

877 878
static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
		struct dwc3_ep *dep, struct dwc3_request *req)
879 880 881
{
	int			ret;

882
	req->direction = !!dep->number;
883 884

	if (req->request.length == 0) {
885
		ret = dwc3_ep0_start_trans(dwc, dep->number,
886 887
				dwc->ctrl_req_addr, 0,
				DWC3_TRBCTL_CONTROL_DATA);
888
	} else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
889 890 891
			&& (dep->number == 0)) {
		u32		transfer_size;

892
		ret = usb_gadget_map_request(&dwc->gadget, &req->request,
893
				dep->number);
894 895 896 897
		if (ret) {
			dev_dbg(dwc->dev, "failed to map request\n");
			return;
		}
898

899
		WARN_ON(req->request.length > DWC3_EP0_BOUNCE_SIZE);
900

901 902 903
		transfer_size = roundup(req->request.length,
				(u32) dep->endpoint.maxpacket);

904 905 906
		dwc->ep0_bounced = true;

		/*
907 908 909
		 * REVISIT in case request length is bigger than
		 * DWC3_EP0_BOUNCE_SIZE we will need two chained
		 * TRBs to handle the transfer.
910
		 */
911 912
		ret = dwc3_ep0_start_trans(dwc, dep->number,
				dwc->ep0_bounce_addr, transfer_size,
913 914
				DWC3_TRBCTL_CONTROL_DATA);
	} else {
915
		ret = usb_gadget_map_request(&dwc->gadget, &req->request,
916
				dep->number);
917 918 919 920
		if (ret) {
			dev_dbg(dwc->dev, "failed to map request\n");
			return;
		}
921

922 923
		ret = dwc3_ep0_start_trans(dwc, dep->number, req->request.dma,
				req->request.length, DWC3_TRBCTL_CONTROL_DATA);
924 925 926
	}

	WARN_ON(ret < 0);
927 928
}

929
static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
930
{
931
	struct dwc3		*dwc = dep->dwc;
932
	u32			type;
933

934 935 936
	type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
		: DWC3_TRBCTL_CONTROL_STATUS2;

937
	return dwc3_ep0_start_trans(dwc, dep->number,
938
			dwc->ctrl_req_addr, 0, type);
939
}
940

941
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
942
{
943 944 945 946 947 948
	if (dwc->resize_fifos) {
		dev_dbg(dwc->dev, "starting to resize fifos\n");
		dwc3_gadget_resize_tx_fifos(dwc);
		dwc->resize_fifos = 0;
	}

949
	WARN_ON(dwc3_ep0_start_control_status(dep));
950 951
}

952 953 954 955 956 957 958 959
static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
		const struct dwc3_event_depevt *event)
{
	struct dwc3_ep		*dep = dwc->eps[event->endpoint_number];

	__dwc3_ep0_do_control_status(dwc, dep);
}

960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
{
	struct dwc3_gadget_ep_cmd_params params;
	u32			cmd;
	int			ret;

	if (!dep->resource_index)
		return;

	cmd = DWC3_DEPCMD_ENDTRANSFER;
	cmd |= DWC3_DEPCMD_CMDIOC;
	cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
	memset(&params, 0, sizeof(params));
	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
	WARN_ON_ONCE(ret);
	dep->resource_index = 0;
}

978 979 980
static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
		const struct dwc3_event_depevt *event)
{
981 982
	dwc->setup_packet_pending = true;

983 984 985
	switch (event->status) {
	case DEPEVT_STATUS_CONTROL_DATA:
		dev_vdbg(dwc->dev, "Control Data\n");
986

987
		/*
988 989 990
		 * We already have a DATA transfer in the controller's cache,
		 * if we receive a XferNotReady(DATA) we will ignore it, unless
		 * it's for the wrong direction.
991
		 *
992 993 994
		 * In that case, we must issue END_TRANSFER command to the Data
		 * Phase we already have started and issue SetStall on the
		 * control endpoint.
995 996
		 */
		if (dwc->ep0_expect_in != event->endpoint_number) {
997 998
			struct dwc3_ep	*dep = dwc->eps[dwc->ep0_expect_in];

999
			dev_vdbg(dwc->dev, "Wrong direction for Data phase\n");
1000
			dwc3_ep0_end_control_data(dwc, dep);
1001 1002 1003 1004
			dwc3_ep0_stall_and_restart(dwc);
			return;
		}

1005
		break;
1006

1007
	case DEPEVT_STATUS_CONTROL_STATUS:
1008 1009 1010
		if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
			return;

1011
		dev_vdbg(dwc->dev, "Control Status\n");
1012

1013 1014
		dwc->ep0state = EP0_STATUS_PHASE;

1015 1016 1017 1018 1019 1020
		if (dwc->delayed_status) {
			WARN_ON_ONCE(event->endpoint_number != 1);
			dev_vdbg(dwc->dev, "Mass Storage delayed status\n");
			return;
		}

1021
		dwc3_ep0_do_control_status(dwc, event);
1022 1023 1024 1025
	}
}

void dwc3_ep0_interrupt(struct dwc3 *dwc,
F
Felipe Balbi 已提交
1026
		const struct dwc3_event_depevt *event)
1027 1028 1029 1030 1031
{
	u8			epnum = event->endpoint_number;

	dev_dbg(dwc->dev, "%s while ep%d%s in state '%s'\n",
			dwc3_ep_event_string(event->endpoint_event),
1032
			epnum >> 1, (epnum & 1) ? "in" : "out",
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
			dwc3_ep0_state_string(dwc->ep0state));

	switch (event->endpoint_event) {
	case DWC3_DEPEVT_XFERCOMPLETE:
		dwc3_ep0_xfer_complete(dwc, event);
		break;

	case DWC3_DEPEVT_XFERNOTREADY:
		dwc3_ep0_xfernotready(dwc, event);
		break;

	case DWC3_DEPEVT_XFERINPROGRESS:
	case DWC3_DEPEVT_RXTXFIFOEVT:
	case DWC3_DEPEVT_STREAMEVT:
	case DWC3_DEPEVT_EPCMDCMPLT:
		break;
	}
}