xhci-ring.c 125.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * xHCI host controller driver
 *
 * Copyright (C) 2008 Intel Corp.
 *
 * Author: Sarah Sharp
 * Some code borrowed from the Linux EHCI driver.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

/*
 * Ring initialization rules:
 * 1. Each segment is initialized to zero, except for link TRBs.
 * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
 *    Consumer Cycle State (CCS), depending on ring function.
 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
 *
 * Ring behavior rules:
 * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
 *    least one free TRB in the ring.  This is useful if you want to turn that
 *    into a link TRB and expand the ring.
 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
 *    link TRB, then load the pointer with the address in the link TRB.  If the
 *    link TRB had its toggle bit set, you may need to update the ring cycle
 *    state (see cycle bit rules).  You may have to do this multiple times
 *    until you reach a non-link TRB.
 * 3. A ring is full if enqueue++ (for the definition of increment above)
 *    equals the dequeue pointer.
 *
 * Cycle bit rules:
 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 *
 * Producer rules:
 * 1. Check if ring is full before you enqueue.
 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
 *    Update enqueue pointer between each write (which may update the ring
 *    cycle state).
 * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
 *    and endpoint rings.  If HC is the producer for the event ring,
 *    and it generates an interrupt according to interrupt modulation rules.
 *
 * Consumer rules:
 * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
 *    the TRB is owned by the consumer.
 * 2. Update dequeue pointer (which may update the ring cycle state) and
 *    continue processing TRBs until you reach a TRB which is not owned by you.
 * 3. Notify the producer.  SW is the consumer for the event ring, and it
 *   updates event ring dequeue pointer.  HC is the consumer for the command and
 *   endpoint rings; it generates events on the event ring for these.
 */

67
#include <linux/scatterlist.h>
68
#include <linux/slab.h>
69
#include <linux/dma-mapping.h>
70
#include "xhci.h"
71
#include "xhci-trace.h"
72
#include "xhci-mtk.h"
73 74 75 76 77

/*
 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
 * address of the TRB.
 */
78
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
79 80
		union xhci_trb *trb)
{
81
	unsigned long segment_offset;
82

83
	if (!seg || !trb || trb < seg->trbs)
84
		return 0;
85 86
	/* offset in TRBs */
	segment_offset = trb - seg->trbs;
87
	if (segment_offset >= TRBS_PER_SEGMENT)
88
		return 0;
89
	return seg->dma + (segment_offset * sizeof(*trb));
90 91 92 93 94
}

/* Does this link TRB point to the first segment in a ring,
 * or was the previous TRB the last TRB on the last segment in the ERST?
 */
95
static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
96 97 98 99 100 101
		struct xhci_segment *seg, union xhci_trb *trb)
{
	if (ring == xhci->event_ring)
		return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
			(seg->next == xhci->event_ring->first_seg);
	else
M
Matt Evans 已提交
102
		return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
103 104
}

105 106 107 108 109
static bool trb_is_link(union xhci_trb *trb)
{
	return TRB_TYPE_LINK_LE32(trb->link.control);
}

110
static int enqueue_is_link_trb(struct xhci_ring *ring)
111 112
{
	struct xhci_link_trb *link = &ring->enqueue->link;
113
	return TRB_TYPE_LINK_LE32(link->control);
114 115
}

116 117 118 119 120 121 122 123 124 125 126
static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
{
	return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
}

static bool last_trb_on_ring(struct xhci_ring *ring,
			struct xhci_segment *seg, union xhci_trb *trb)
{
	return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
}

127 128 129 130 131 132 133 134 135
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
 * TRB is in a new segment.  This does not skip over link TRBs, and it does not
 * effect the ring dequeue or enqueue pointers.
 */
static void next_trb(struct xhci_hcd *xhci,
		struct xhci_ring *ring,
		struct xhci_segment **seg,
		union xhci_trb **trb)
{
136
	if (trb_is_link(*trb)) {
137 138 139
		*seg = (*seg)->next;
		*trb = ((*seg)->trbs);
	} else {
140
		(*trb)++;
141 142 143
	}
}

144 145 146 147
/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 */
A
Andiry Xu 已提交
148
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
149 150
{
	ring->deq_updates++;
151

152 153 154
	/* event ring doesn't have link trbs, check for last trb */
	if (ring->type == TYPE_EVENT) {
		if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
155
			ring->dequeue++;
156
			return;
157
		}
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
		if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
			ring->cycle_state ^= 1;
		ring->deq_seg = ring->deq_seg->next;
		ring->dequeue = ring->deq_seg->trbs;
		return;
	}

	/* All other rings have link trbs */
	if (!trb_is_link(ring->dequeue)) {
		ring->dequeue++;
		ring->num_trbs_free++;
	}
	while (trb_is_link(ring->dequeue)) {
		ring->deq_seg = ring->deq_seg->next;
		ring->dequeue = ring->deq_seg->trbs;
	}
	return;
175 176 177 178 179 180 181 182 183 184 185 186
}

/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 *
 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
 * chain bit is set), then set the chain bit in all the following link TRBs.
 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
 * have their chain bit cleared (so that each Link TRB is a separate TD).
 *
 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
187 188 189
 * set, but other sections talk about dealing with the chain bit set.  This was
 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
190 191 192
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
193
 */
194
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
A
Andiry Xu 已提交
195
			bool more_trbs_coming)
196 197 198 199
{
	u32 chain;
	union xhci_trb *next;

M
Matt Evans 已提交
200
	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
201
	/* If this is not event ring, there is one less usable TRB */
202
	if (!trb_is_link(ring->enqueue))
203
		ring->num_trbs_free--;
204 205 206
	next = ++(ring->enqueue);

	ring->enq_updates++;
207
	/* Update the dequeue pointer further if that was a link TRB */
208
	while (trb_is_link(next)) {
209

210 211 212 213 214 215 216 217 218
		/*
		 * If the caller doesn't plan on enqueueing more TDs before
		 * ringing the doorbell, then we don't want to give the link TRB
		 * to the hardware just yet. We'll give the link TRB back in
		 * prepare_ring() just before we enqueue the TD at the top of
		 * the ring.
		 */
		if (!chain && !more_trbs_coming)
			break;
A
Andiry Xu 已提交
219

220 221 222 223 224 225 226 227 228
		/* If we're not dealing with 0.95 hardware or isoc rings on
		 * AMD 0.96 host, carry over the chain bit of the previous TRB
		 * (which may mean the chain bit is cleared).
		 */
		if (!(ring->type == TYPE_ISOC &&
		      (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
		    !xhci_link_trb_quirk(xhci)) {
			next->link.control &= cpu_to_le32(~TRB_CHAIN);
			next->link.control |= cpu_to_le32(chain);
229
		}
230 231 232 233 234 235 236 237
		/* Give this link TRB to the hardware */
		wmb();
		next->link.control ^= cpu_to_le32(TRB_CYCLE);

		/* Toggle the cycle bit after the last ring segment. */
		if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next))
			ring->cycle_state ^= 1;

238 239 240 241 242 243 244
		ring->enq_seg = ring->enq_seg->next;
		ring->enqueue = ring->enq_seg->trbs;
		next = ring->enqueue;
	}
}

/*
245 246
 * Check to see if there's room to enqueue num_trbs on the ring and make sure
 * enqueue pointer will not advance into dequeue segment. See rules above.
247
 */
248
static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
249 250
		unsigned int num_trbs)
{
251
	int num_trbs_in_deq_seg;
252

253 254 255 256 257 258 259 260 261 262
	if (ring->num_trbs_free < num_trbs)
		return 0;

	if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
		num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
		if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
			return 0;
	}

	return 1;
263 264 265
}

/* Ring the host controller doorbell after placing a command on the ring */
266
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
267
{
E
Elric Fu 已提交
268 269 270
	if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
		return;

271
	xhci_dbg(xhci, "// Ding dong!\n");
272
	writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
273
	/* Flush PCI posted writes */
274
	readl(&xhci->dba->doorbell[0]);
275 276
}

277 278 279 280 281 282 283
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
{
	u64 temp_64;
	int ret;

	xhci_dbg(xhci, "Abort command ring\n");

284
	temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
285
	xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
286 287 288 289 290 291 292 293

	/*
	 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
	 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
	 * but the completion event in never sent. Use the cmd timeout timer to
	 * handle those cases. Use twice the time to cover the bit polling retry
	 */
	mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
294 295
	xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
			&xhci->op_regs->cmd_ring);
296 297 298 299 300 301 302 303

	/* Section 4.6.1.2 of xHCI 1.0 spec says software should
	 * time the completion od all xHCI commands, including
	 * the Command Abort operation. If software doesn't see
	 * CRR negated in a timely manner (e.g. longer than 5
	 * seconds), then it should assume that the there are
	 * larger problems with the xHC and assert HCRST.
	 */
304
	ret = xhci_handshake(&xhci->op_regs->cmd_ring,
305 306
			CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
	if (ret < 0) {
307 308 309 310 311 312 313 314 315
		/* we are about to kill xhci, give it one more chance */
		xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
			      &xhci->op_regs->cmd_ring);
		udelay(1000);
		ret = xhci_handshake(&xhci->op_regs->cmd_ring,
				     CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
		if (ret == 0)
			return 0;

316 317
		xhci_err(xhci, "Stopped the command ring failed, "
				"maybe the host is dead\n");
318
		del_timer(&xhci->cmd_timer);
319 320 321 322 323 324 325 326 327
		xhci->xhc_state |= XHCI_STATE_DYING;
		xhci_quiesce(xhci);
		xhci_halt(xhci);
		return -ESHUTDOWN;
	}

	return 0;
}

328
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
329
		unsigned int slot_id,
330 331
		unsigned int ep_index,
		unsigned int stream_id)
332
{
M
Matt Evans 已提交
333
	__le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
334 335
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
	unsigned int ep_state = ep->ep_state;
336 337

	/* Don't ring the doorbell for this endpoint if there are pending
338
	 * cancellations because we don't want to interrupt processing.
339 340 341
	 * We don't want to restart any stream rings if there's a set dequeue
	 * pointer command pending because the device can choose to start any
	 * stream once the endpoint is on the HW schedule.
342
	 */
343 344 345
	if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
	    (ep_state & EP_HALTED))
		return;
346
	writel(DB_VALUE(ep_index, stream_id), db_addr);
347 348 349
	/* The CPU has better things to do at this point than wait for a
	 * write-posting flush.  It'll get there soon enough.
	 */
350 351
}

352 353 354 355 356 357 358 359 360 361 362 363
/* Ring the doorbell for any rings with pending URBs */
static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
		unsigned int slot_id,
		unsigned int ep_index)
{
	unsigned int stream_id;
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];

	/* A ring has pending URBs if its TD list is not empty */
	if (!(ep->ep_state & EP_HAS_STREAMS)) {
364
		if (ep->ring && !(list_empty(&ep->ring->td_list)))
365
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
366 367 368 369 370 371 372
		return;
	}

	for (stream_id = 1; stream_id < ep->stream_info->num_streams;
			stream_id++) {
		struct xhci_stream_info *stream_info = ep->stream_info;
		if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
373 374
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
						stream_id);
375 376 377
	}
}

378 379 380 381 382
/* Get the right ring for the given slot_id, ep_index and stream_id.
 * If the endpoint supports streams, boundary check the URB's stream ID.
 * If the endpoint doesn't support streams, return the singular endpoint ring.
 */
struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
		unsigned int slot_id, unsigned int ep_index,
		unsigned int stream_id)
{
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];
	/* Common case: no streams */
	if (!(ep->ep_state & EP_HAS_STREAMS))
		return ep->ring;

	if (stream_id == 0) {
		xhci_warn(xhci,
				"WARN: Slot ID %u, ep index %u has streams, "
				"but URB has no stream ID.\n",
				slot_id, ep_index);
		return NULL;
	}

	if (stream_id < ep->stream_info->num_streams)
		return ep->stream_info->stream_rings[stream_id];

	xhci_warn(xhci,
			"WARN: Slot ID %u, ep index %u has "
			"stream IDs 1 to %u allocated, "
			"but stream ID %u is requested.\n",
			slot_id, ep_index,
			ep->stream_info->num_streams - 1,
			stream_id);
	return NULL;
}

414 415 416 417 418 419 420 421 422 423 424 425 426
/*
 * Move the xHC's endpoint ring dequeue pointer past cur_td.
 * Record the new state of the xHC's endpoint ring dequeue segment,
 * dequeue pointer, and new consumer cycle state in state.
 * Update our internal representation of the ring's dequeue pointer.
 *
 * We do this in three jumps:
 *  - First we update our new ring state to be the same as when the xHC stopped.
 *  - Then we traverse the ring to find the segment that contains
 *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
 *    any link TRBs with the toggle cycle bit set.
 *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
 *    if we've moved it past a link TRB with the toggle cycle bit set.
M
Matt Evans 已提交
427 428 429 430
 *
 * Some of the uses of xhci_generic_trb are grotty, but if they're done
 * with correct __le32 accesses they should work fine.  Only users of this are
 * in here.
431
 */
432
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
433
		unsigned int slot_id, unsigned int ep_index,
434 435
		unsigned int stream_id, struct xhci_td *cur_td,
		struct xhci_dequeue_state *state)
436 437
{
	struct xhci_virt_device *dev = xhci->devs[slot_id];
438
	struct xhci_virt_ep *ep = &dev->eps[ep_index];
439
	struct xhci_ring *ep_ring;
440 441
	struct xhci_segment *new_seg;
	union xhci_trb *new_deq;
442
	dma_addr_t addr;
443
	u64 hw_dequeue;
444 445
	bool cycle_found = false;
	bool td_last_trb_found = false;
446

447 448 449 450 451 452 453 454
	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
			ep_index, stream_id);
	if (!ep_ring) {
		xhci_warn(xhci, "WARN can't find new dequeue state "
				"for invalid stream ID %u.\n",
				stream_id);
		return;
	}
455

456
	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
457 458
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Finding endpoint context");
459 460 461 462
	/* 4.6.9 the css flag is written to the stream context for streams */
	if (ep->ep_state & EP_HAS_STREAMS) {
		struct xhci_stream_ctx *ctx =
			&ep->stream_info->stream_ctx_array[stream_id];
463
		hw_dequeue = le64_to_cpu(ctx->stream_ring);
464 465 466
	} else {
		struct xhci_ep_ctx *ep_ctx
			= xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
467
		hw_dequeue = le64_to_cpu(ep_ctx->deq);
468
	}
469

470 471 472 473
	new_seg = ep_ring->deq_seg;
	new_deq = ep_ring->dequeue;
	state->new_cycle_state = hw_dequeue & 0x1;

474
	/*
475 476 477 478
	 * We want to find the pointer, segment and cycle state of the new trb
	 * (the one after current TD's last_trb). We know the cycle state at
	 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
	 * found.
479
	 */
480 481 482 483 484 485 486 487 488
	do {
		if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
		    == (dma_addr_t)(hw_dequeue & ~0xf)) {
			cycle_found = true;
			if (td_last_trb_found)
				break;
		}
		if (new_deq == cur_td->last_trb)
			td_last_trb_found = true;
489

490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
		if (cycle_found &&
		    TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
		    new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
			state->new_cycle_state ^= 0x1;

		next_trb(xhci, ep_ring, &new_seg, &new_deq);

		/* Search wrapped around, bail out */
		if (new_deq == ep->ring->dequeue) {
			xhci_err(xhci, "Error: Failed finding new dequeue state\n");
			state->new_deq_seg = NULL;
			state->new_deq_ptr = NULL;
			return;
		}

	} while (!cycle_found || !td_last_trb_found);
506

507 508
	state->new_deq_seg = new_seg;
	state->new_deq_ptr = new_deq;
509

510
	/* Don't update the ring cycle state for the producer (us). */
511 512
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Cycle state = 0x%x", state->new_cycle_state);
513

514 515
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"New dequeue segment = %p (virtual)",
516 517
			state->new_deq_seg);
	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
518 519
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"New dequeue pointer = 0x%llx (DMA)",
520
			(unsigned long long) addr);
521 522
}

523 524 525 526
/* flip_cycle means flip the cycle bit of all but the first and last TRB.
 * (The last TRB actually points to the ring enqueue pointer, which is not part
 * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
 */
527
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
528
		struct xhci_td *cur_td, bool flip_cycle)
529 530 531 532 533 534 535
{
	struct xhci_segment *cur_seg;
	union xhci_trb *cur_trb;

	for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
			true;
			next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
536
		if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
537 538 539
			/* Unchain any chained Link TRBs, but
			 * leave the pointers intact.
			 */
M
Matt Evans 已提交
540
			cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
541 542 543 544 545 546
			/* Flip the cycle bit (link TRBs can't be the first
			 * or last TRB).
			 */
			if (flip_cycle)
				cur_trb->generic.field[3] ^=
					cpu_to_le32(TRB_CYCLE);
547 548 549 550 551
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Cancel (unchain) link TRB");
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Address = %p (0x%llx dma); "
					"in seg %p (0x%llx dma)",
552
					cur_trb,
553
					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
554 555
					cur_seg,
					(unsigned long long)cur_seg->dma);
556 557 558 559 560
		} else {
			cur_trb->generic.field[0] = 0;
			cur_trb->generic.field[1] = 0;
			cur_trb->generic.field[2] = 0;
			/* Preserve only the cycle bit of this TRB */
M
Matt Evans 已提交
561
			cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
562 563 564 565 566
			/* Flip the cycle bit except on the first or last TRB */
			if (flip_cycle && cur_trb != cur_td->first_trb &&
					cur_trb != cur_td->last_trb)
				cur_trb->generic.field[3] ^=
					cpu_to_le32(TRB_CYCLE);
M
Matt Evans 已提交
567 568
			cur_trb->generic.field[3] |= cpu_to_le32(
				TRB_TYPE(TRB_TR_NOOP));
569 570
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"TRB to noop at offset 0x%llx",
571 572
					(unsigned long long)
					xhci_trb_virt_to_dma(cur_seg, cur_trb));
573 574 575 576 577 578
		}
		if (cur_trb == cur_td->last_trb)
			break;
	}
}

579
static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
580 581 582 583 584 585 586 587 588 589 590 591 592
		struct xhci_virt_ep *ep)
{
	ep->ep_state &= ~EP_HALT_PENDING;
	/* Can't del_timer_sync in interrupt, so we attempt to cancel.  If the
	 * timer is running on another CPU, we don't decrement stop_cmds_pending
	 * (since we didn't successfully stop the watchdog timer).
	 */
	if (del_timer(&ep->stop_cmd_timer))
		ep->stop_cmds_pending--;
}

/* Must be called with xhci->lock held in interrupt context */
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
593
		struct xhci_td *cur_td, int status)
594
{
595
	struct usb_hcd *hcd;
596 597
	struct urb	*urb;
	struct urb_priv	*urb_priv;
598

599 600 601
	urb = cur_td->urb;
	urb_priv = urb->hcpriv;
	urb_priv->td_cnt++;
602
	hcd = bus_to_hcd(urb->dev->bus);
603

604 605
	/* Only giveback urb when this is the last td in urb */
	if (urb_priv->td_cnt == urb_priv->length) {
A
Andiry Xu 已提交
606 607 608 609 610 611 612
		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
			xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
			if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs	== 0) {
				if (xhci->quirks & XHCI_AMD_PLL_FIX)
					usb_amd_quirk_pll_enable();
			}
		}
613 614 615 616
		usb_hcd_unlink_urb_from_ep(hcd, urb);

		spin_unlock(&xhci->lock);
		usb_hcd_giveback_urb(hcd, urb, status);
617
		xhci_urb_free_priv(urb_priv);
618 619
		spin_lock(&xhci->lock);
	}
620 621
}

622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
				 struct xhci_td *td)
{
	struct device *dev = xhci_to_hcd(xhci)->self.controller;
	struct xhci_segment *seg = td->bounce_seg;
	struct urb *urb = td->urb;

	if (!seg || !urb)
		return;

	if (usb_urb_dir_out(urb)) {
		dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
				 DMA_TO_DEVICE);
		return;
	}

	/* for in tranfers we need to copy the data from bounce to sg */
	sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
			     seg->bounce_len, seg->bounce_offs);
	dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
			 DMA_FROM_DEVICE);
	seg->bounce_len = 0;
	seg->bounce_offs = 0;
}

647 648 649 650 651 652 653 654 655 656
/*
 * When we get a command completion for a Stop Endpoint Command, we need to
 * unlink any cancelled TDs from the ring.  There are two ways to do that:
 *
 *  1. If the HW was in the middle of processing the TD that needs to be
 *     cancelled, then we must move the ring's dequeue pointer past the last TRB
 *     in the TD with a Set Dequeue Pointer Command.
 *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
 *     bit cleared) so that the HW will skip over them.
 */
657
static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
658
		union xhci_trb *trb, struct xhci_event_cmd *event)
659 660 661
{
	unsigned int ep_index;
	struct xhci_ring *ep_ring;
662
	struct xhci_virt_ep *ep;
663
	struct list_head *entry;
664
	struct xhci_td *cur_td = NULL;
665 666
	struct xhci_td *last_unlinked_td;

667
	struct xhci_dequeue_state deq_state;
668

669
	if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
670
		if (!xhci->devs[slot_id])
671 672 673 674 675 676
			xhci_warn(xhci, "Stop endpoint command "
				"completion for disabled slot %u\n",
				slot_id);
		return;
	}

677
	memset(&deq_state, 0, sizeof(deq_state));
M
Matt Evans 已提交
678
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
679
	ep = &xhci->devs[slot_id]->eps[ep_index];
680

681
	if (list_empty(&ep->cancelled_td_list)) {
682
		xhci_stop_watchdog_timer_in_irq(xhci, ep);
683
		ep->stopped_td = NULL;
684
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
685
		return;
686
	}
687 688 689 690 691 692

	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
	 * We have the xHCI lock, so nothing can modify this list until we drop
	 * it.  We're also in the event handler, so we can't get re-interrupted
	 * if another Stop Endpoint command completes
	 */
693
	list_for_each(entry, &ep->cancelled_td_list) {
694
		cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
695 696
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Removing canceled TD starting at 0x%llx (dma).",
697 698
				(unsigned long long)xhci_trb_virt_to_dma(
					cur_td->start_seg, cur_td->first_trb));
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
		if (!ep_ring) {
			/* This shouldn't happen unless a driver is mucking
			 * with the stream ID after submission.  This will
			 * leave the TD on the hardware ring, and the hardware
			 * will try to execute it, and may access a buffer
			 * that has already been freed.  In the best case, the
			 * hardware will execute it, and the event handler will
			 * ignore the completion event for that TD, since it was
			 * removed from the td_list for that endpoint.  In
			 * short, don't muck with the stream ID after
			 * submission.
			 */
			xhci_warn(xhci, "WARN Cancelled URB %p "
					"has invalid stream ID %u.\n",
					cur_td->urb,
					cur_td->urb->stream_id);
			goto remove_finished_td;
		}
718 719 720 721
		/*
		 * If we stopped on the TD we need to cancel, then we have to
		 * move the xHC endpoint ring dequeue pointer past this TD.
		 */
722
		if (cur_td == ep->stopped_td)
723 724 725
			xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
					cur_td->urb->stream_id,
					cur_td, &deq_state);
726
		else
727
			td_to_noop(xhci, ep_ring, cur_td, false);
728
remove_finished_td:
729 730 731 732 733
		/*
		 * The event handler won't see a completion for this TD anymore,
		 * so remove it from the endpoint ring's TD list.  Keep it in
		 * the cancelled TD list for URB completion later.
		 */
734
		list_del_init(&cur_td->td_list);
735 736
	}
	last_unlinked_td = cur_td;
737
	xhci_stop_watchdog_timer_in_irq(xhci, ep);
738 739 740

	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
741 742
		xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
				ep->stopped_td->urb->stream_id, &deq_state);
743
		xhci_ring_cmd_db(xhci);
744
	} else {
745 746
		/* Otherwise ring the doorbell(s) to restart queued transfers */
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
747
	}
748

749
	ep->stopped_td = NULL;
750 751 752 753 754 755 756 757

	/*
	 * Drop the lock and complete the URBs in the cancelled TD list.
	 * New TDs to be cancelled might be added to the end of the list before
	 * we can complete all the URBs for the TDs we already unlinked.
	 * So stop when we've completed the URB for the last TD we unlinked.
	 */
	do {
758
		cur_td = list_entry(ep->cancelled_td_list.next,
759
				struct xhci_td, cancelled_td_list);
760
		list_del_init(&cur_td->cancelled_td_list);
761 762 763 764 765

		/* Clean up the cancelled URB */
		/* Doesn't matter what we pass for status, since the core will
		 * just overwrite it (because the URB has been unlinked).
		 */
766 767
		if (ep_ring && cur_td->bounce_seg)
			xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
768
		xhci_giveback_urb_in_irq(xhci, cur_td, 0);
769

770 771 772 773 774
		/* Stop processing the cancelled list if the watchdog timer is
		 * running.
		 */
		if (xhci->xhc_state & XHCI_STATE_DYING)
			return;
775 776 777 778 779
	} while (cur_td != last_unlinked_td);

	/* Return to the event handler with xhci->lock re-acquired */
}

780 781 782 783 784 785 786 787 788 789
static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
	struct xhci_td *cur_td;

	while (!list_empty(&ring->td_list)) {
		cur_td = list_first_entry(&ring->td_list,
				struct xhci_td, td_list);
		list_del_init(&cur_td->td_list);
		if (!list_empty(&cur_td->cancelled_td_list))
			list_del_init(&cur_td->cancelled_td_list);
790 791 792

		if (cur_td->bounce_seg)
			xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
793 794 795 796 797 798 799 800 801 802 803 804
		xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
	}
}

static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
		int slot_id, int ep_index)
{
	struct xhci_td *cur_td;
	struct xhci_virt_ep *ep;
	struct xhci_ring *ring;

	ep = &xhci->devs[slot_id]->eps[ep_index];
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
	if ((ep->ep_state & EP_HAS_STREAMS) ||
			(ep->ep_state & EP_GETTING_NO_STREAMS)) {
		int stream_id;

		for (stream_id = 0; stream_id < ep->stream_info->num_streams;
				stream_id++) {
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Killing URBs for slot ID %u, ep index %u, stream %u",
					slot_id, ep_index, stream_id + 1);
			xhci_kill_ring_urbs(xhci,
					ep->stream_info->stream_rings[stream_id]);
		}
	} else {
		ring = ep->ring;
		if (!ring)
			return;
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Killing URBs for slot ID %u, ep index %u",
				slot_id, ep_index);
		xhci_kill_ring_urbs(xhci, ring);
	}
826 827 828 829 830 831 832 833
	while (!list_empty(&ep->cancelled_td_list)) {
		cur_td = list_first_entry(&ep->cancelled_td_list,
				struct xhci_td, cancelled_td_list);
		list_del_init(&cur_td->cancelled_td_list);
		xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
	}
}

834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
/* Watchdog timer function for when a stop endpoint command fails to complete.
 * In this case, we assume the host controller is broken or dying or dead.  The
 * host may still be completing some other events, so we have to be careful to
 * let the event ring handler and the URB dequeueing/enqueueing functions know
 * through xhci->state.
 *
 * The timer may also fire if the host takes a very long time to respond to the
 * command, and the stop endpoint command completion handler cannot delete the
 * timer before the timer function is called.  Another endpoint cancellation may
 * sneak in before the timer function can grab the lock, and that may queue
 * another stop endpoint command and add the timer back.  So we cannot use a
 * simple flag to say whether there is a pending stop endpoint command for a
 * particular endpoint.
 *
 * Instead we use a combination of that flag and a counter for the number of
 * pending stop endpoint commands.  If the timer is the tail end of the last
 * stop endpoint command, and the endpoint's command is still pending, we assume
 * the host is dying.
 */
void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
	struct xhci_hcd *xhci;
	struct xhci_virt_ep *ep;
	int ret, i, j;
858
	unsigned long flags;
859 860 861 862

	ep = (struct xhci_virt_ep *) arg;
	xhci = ep->xhci;

863
	spin_lock_irqsave(&xhci->lock, flags);
864 865 866

	ep->stop_cmds_pending--;
	if (xhci->xhc_state & XHCI_STATE_DYING) {
867 868 869
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Stop EP timer ran, but another timer marked "
				"xHCI as DYING, exiting.");
870
		spin_unlock_irqrestore(&xhci->lock, flags);
871 872 873
		return;
	}
	if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
874 875 876
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Stop EP timer ran, but no command pending, "
				"exiting.");
877
		spin_unlock_irqrestore(&xhci->lock, flags);
878 879 880 881 882 883 884 885 886 887 888
		return;
	}

	xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
	xhci_warn(xhci, "Assuming host is dying, halting host.\n");
	/* Oops, HC is dead or dying or at least not responding to the stop
	 * endpoint command.
	 */
	xhci->xhc_state |= XHCI_STATE_DYING;
	/* Disable interrupts from the host controller and start halting it */
	xhci_quiesce(xhci);
889
	spin_unlock_irqrestore(&xhci->lock, flags);
890 891 892

	ret = xhci_halt(xhci);

893
	spin_lock_irqsave(&xhci->lock, flags);
894 895 896
	if (ret < 0) {
		/* This is bad; the host is not responding to commands and it's
		 * not allowing itself to be halted.  At least interrupts are
897
		 * disabled. If we call usb_hc_died(), it will attempt to
898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
		 * disconnect all device drivers under this host.  Those
		 * disconnect() methods will wait for all URBs to be unlinked,
		 * so we must complete them.
		 */
		xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
		xhci_warn(xhci, "Completing active URBs anyway.\n");
		/* We could turn all TDs on the rings to no-ops.  This won't
		 * help if the host has cached part of the ring, and is slow if
		 * we want to preserve the cycle bit.  Skip it and hope the host
		 * doesn't touch the memory.
		 */
	}
	for (i = 0; i < MAX_HC_SLOTS; i++) {
		if (!xhci->devs[i])
			continue;
913 914
		for (j = 0; j < 31; j++)
			xhci_kill_endpoint_urbs(xhci, i, j);
915
	}
916
	spin_unlock_irqrestore(&xhci->lock, flags);
917 918
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Calling usb_hc_died()");
919
	usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
920 921
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"xHCI host controller is dead.");
922 923
}

924 925 926 927 928 929 930 931 932 933 934 935 936

static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
		struct xhci_virt_device *dev,
		struct xhci_ring *ep_ring,
		unsigned int ep_index)
{
	union xhci_trb *dequeue_temp;
	int num_trbs_free_temp;
	bool revert = false;

	num_trbs_free_temp = ep_ring->num_trbs_free;
	dequeue_temp = ep_ring->dequeue;

937 938 939 940 941 942
	/* If we get two back-to-back stalls, and the first stalled transfer
	 * ends just before a link TRB, the dequeue pointer will be left on
	 * the link TRB by the code in the while loop.  So we have to update
	 * the dequeue pointer one segment further, or we'll jump off
	 * the segment into la-la-land.
	 */
943
	if (trb_is_link(ep_ring->dequeue)) {
944 945 946 947
		ep_ring->deq_seg = ep_ring->deq_seg->next;
		ep_ring->dequeue = ep_ring->deq_seg->trbs;
	}

948 949 950 951
	while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
		/* We have more usable TRBs */
		ep_ring->num_trbs_free++;
		ep_ring->dequeue++;
952
		if (trb_is_link(ep_ring->dequeue)) {
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
			if (ep_ring->dequeue ==
					dev->eps[ep_index].queued_deq_ptr)
				break;
			ep_ring->deq_seg = ep_ring->deq_seg->next;
			ep_ring->dequeue = ep_ring->deq_seg->trbs;
		}
		if (ep_ring->dequeue == dequeue_temp) {
			revert = true;
			break;
		}
	}

	if (revert) {
		xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
		ep_ring->num_trbs_free = num_trbs_free_temp;
	}
}

971 972 973 974 975 976 977
/*
 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
 * we need to clear the set deq pending flag in the endpoint ring state, so that
 * the TD queueing code can ring the doorbell again.  We also need to ring the
 * endpoint doorbell to restart the ring, but only if there aren't more
 * cancellations pending.
 */
978
static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
979
		union xhci_trb *trb, u32 cmd_comp_code)
980 981
{
	unsigned int ep_index;
982
	unsigned int stream_id;
983 984
	struct xhci_ring *ep_ring;
	struct xhci_virt_device *dev;
985
	struct xhci_virt_ep *ep;
986 987
	struct xhci_ep_ctx *ep_ctx;
	struct xhci_slot_ctx *slot_ctx;
988

M
Matt Evans 已提交
989 990
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
991
	dev = xhci->devs[slot_id];
992
	ep = &dev->eps[ep_index];
993 994 995

	ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
	if (!ep_ring) {
O
Oliver Neukum 已提交
996
		xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
997 998
				stream_id);
		/* XXX: Harmless??? */
999
		goto cleanup;
1000 1001
	}

1002 1003
	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
1004

1005
	if (cmd_comp_code != COMP_SUCCESS) {
1006 1007 1008
		unsigned int ep_state;
		unsigned int slot_state;

1009
		switch (cmd_comp_code) {
1010
		case COMP_TRB_ERR:
O
Oliver Neukum 已提交
1011
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
1012 1013
			break;
		case COMP_CTX_STATE:
O
Oliver Neukum 已提交
1014
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
M
Matt Evans 已提交
1015
			ep_state = le32_to_cpu(ep_ctx->ep_info);
1016
			ep_state &= EP_STATE_MASK;
M
Matt Evans 已提交
1017
			slot_state = le32_to_cpu(slot_ctx->dev_state);
1018
			slot_state = GET_SLOT_STATE(slot_state);
1019 1020
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Slot state = %u, EP state = %u",
1021 1022 1023
					slot_state, ep_state);
			break;
		case COMP_EBADSLT:
O
Oliver Neukum 已提交
1024 1025
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
					slot_id);
1026 1027
			break;
		default:
O
Oliver Neukum 已提交
1028 1029
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
					cmd_comp_code);
1030 1031 1032 1033 1034 1035 1036 1037 1038
			break;
		}
		/* OK what do we do now?  The endpoint state is hosed, and we
		 * should never get to this point if the synchronization between
		 * queueing, and endpoint state are correct.  This might happen
		 * if the device gets disconnected after we've finished
		 * cancelling URBs, which might not be an error...
		 */
	} else {
1039 1040 1041 1042 1043 1044 1045 1046 1047
		u64 deq;
		/* 4.6.10 deq ptr is written to the stream ctx for streams */
		if (ep->ep_state & EP_HAS_STREAMS) {
			struct xhci_stream_ctx *ctx =
				&ep->stream_info->stream_ctx_array[stream_id];
			deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
		} else {
			deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
		}
1048
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1049 1050 1051
			"Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
		if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
					 ep->queued_deq_ptr) == deq) {
1052 1053 1054
			/* Update the ring's dequeue segment and dequeue pointer
			 * to reflect the new position.
			 */
1055 1056
			update_ring_for_set_deq_completion(xhci, dev,
				ep_ring, ep_index);
1057
		} else {
O
Oliver Neukum 已提交
1058
			xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1059
			xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1060
				  ep->queued_deq_seg, ep->queued_deq_ptr);
1061
		}
1062 1063
	}

1064
cleanup:
1065
	dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1066 1067
	dev->eps[ep_index].queued_deq_seg = NULL;
	dev->eps[ep_index].queued_deq_ptr = NULL;
1068 1069
	/* Restart any rings with pending URBs */
	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1070 1071
}

1072
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1073
		union xhci_trb *trb, u32 cmd_comp_code)
1074 1075 1076
{
	unsigned int ep_index;

M
Matt Evans 已提交
1077
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1078 1079 1080
	/* This command will only fail if the endpoint wasn't halted,
	 * but we don't care.
	 */
1081
	xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1082
		"Ignoring reset ep completion code of %u", cmd_comp_code);
1083

1084 1085 1086 1087 1088
	/* HW with the reset endpoint quirk needs to have a configure endpoint
	 * command complete before the endpoint can be used.  Queue that here
	 * because the HW can't handle two commands being queued in a row.
	 */
	if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1089 1090
		struct xhci_command *command;
		command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1091 1092 1093 1094
		if (!command) {
			xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n");
			return;
		}
1095 1096
		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
				"Queueing configure endpoint command");
1097
		xhci_queue_configure_endpoint(xhci, command,
1098 1099
				xhci->devs[slot_id]->in_ctx->dma, slot_id,
				false);
1100 1101
		xhci_ring_cmd_db(xhci);
	} else {
1102
		/* Clear our internal halted state */
1103
		xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1104
	}
1105
}
1106

1107 1108 1109 1110 1111 1112 1113 1114 1115
static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
		u32 cmd_comp_code)
{
	if (cmd_comp_code == COMP_SUCCESS)
		xhci->slot_id = slot_id;
	else
		xhci->slot_id = 0;
}

1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
{
	struct xhci_virt_device *virt_dev;

	virt_dev = xhci->devs[slot_id];
	if (!virt_dev)
		return;
	if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
		/* Delete default control endpoint resources */
		xhci_free_device_endpoint_resources(xhci, virt_dev, true);
	xhci_free_virt_device(xhci, slot_id);
}

1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
		struct xhci_event_cmd *event, u32 cmd_comp_code)
{
	struct xhci_virt_device *virt_dev;
	struct xhci_input_control_ctx *ctrl_ctx;
	unsigned int ep_index;
	unsigned int ep_state;
	u32 add_flags, drop_flags;

	/*
	 * Configure endpoint commands can come from the USB core
	 * configuration or alt setting changes, or because the HW
	 * needed an extra configure endpoint command after a reset
	 * endpoint command or streams were being configured.
	 * If the command was for a halted endpoint, the xHCI driver
	 * is not waiting on the configure endpoint command.
	 */
1146
	virt_dev = xhci->devs[slot_id];
1147
	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
	if (!ctrl_ctx) {
		xhci_warn(xhci, "Could not get input context, bad type.\n");
		return;
	}

	add_flags = le32_to_cpu(ctrl_ctx->add_flags);
	drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
	/* Input ctx add_flags are the endpoint index plus one */
	ep_index = xhci_last_valid_endpoint(add_flags) - 1;

	/* A usb_set_interface() call directly after clearing a halted
	 * condition may race on this quirky hardware.  Not worth
	 * worrying about, since this is prototype hardware.  Not sure
	 * if this will work for streams, but streams support was
	 * untested on this prototype.
	 */
	if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
			ep_index != (unsigned int) -1 &&
			add_flags - SLOT_FLAG == drop_flags) {
		ep_state = virt_dev->eps[ep_index].ep_state;
		if (!(ep_state & EP_HALTED))
1169
			return;
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
				"Completed config ep cmd - "
				"last ep index = %d, state = %d",
				ep_index, ep_state);
		/* Clear internal halted state and restart ring(s) */
		virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
		return;
	}
	return;
}

1182 1183 1184 1185
static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
		struct xhci_event_cmd *event)
{
	xhci_dbg(xhci, "Completed reset device command.\n");
1186
	if (!xhci->devs[slot_id])
1187 1188 1189 1190
		xhci_warn(xhci, "Reset device command completion "
				"for disabled slot %u\n", slot_id);
}

1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
	if (!(xhci->quirks & XHCI_NEC_HOST)) {
		xhci->error_bitmask |= 1 << 6;
		return;
	}
	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
			"NEC firmware version %2x.%02x",
			NEC_FW_MAJOR(le32_to_cpu(event->status)),
			NEC_FW_MINOR(le32_to_cpu(event->status)));
}

1204
static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
M
Mathias Nyman 已提交
1205 1206
{
	list_del(&cmd->cmd_list);
1207 1208 1209 1210 1211

	if (cmd->completion) {
		cmd->status = status;
		complete(cmd->completion);
	} else {
M
Mathias Nyman 已提交
1212
		kfree(cmd);
1213
	}
M
Mathias Nyman 已提交
1214 1215 1216 1217 1218 1219
}

void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
{
	struct xhci_command *cur_cmd, *tmp_cmd;
	list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1220
		xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
M
Mathias Nyman 已提交
1221 1222
}

1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
/*
 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
 * If there are other commands waiting then restart the ring and kick the timer.
 * This must be called with command ring stopped and xhci->lock held.
 */
static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
					 struct xhci_command *cur_cmd)
{
	struct xhci_command *i_cmd, *tmp_cmd;
	u32 cycle_state;

	/* Turn all aborted commands in list to no-ops, then restart */
	list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
				 cmd_list) {

		if (i_cmd->status != COMP_CMD_ABORT)
			continue;

		i_cmd->status = COMP_CMD_STOP;

		xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
			 i_cmd->command_trb);
		/* get cycle state from the original cmd trb */
		cycle_state = le32_to_cpu(
			i_cmd->command_trb->generic.field[3]) &	TRB_CYCLE;
		/* modify the command trb to no-op command */
		i_cmd->command_trb->generic.field[0] = 0;
		i_cmd->command_trb->generic.field[1] = 0;
		i_cmd->command_trb->generic.field[2] = 0;
		i_cmd->command_trb->generic.field[3] = cpu_to_le32(
			TRB_TYPE(TRB_CMD_NOOP) | cycle_state);

		/*
		 * caller waiting for completion is called when command
		 *  completion event is received for these no-op commands
		 */
	}

	xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;

	/* ring command ring doorbell to restart the command ring */
	if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
	    !(xhci->xhc_state & XHCI_STATE_DYING)) {
		xhci->current_cmd = cur_cmd;
		mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
		xhci_ring_cmd_db(xhci);
	}
	return;
}


void xhci_handle_command_timeout(unsigned long data)
{
	struct xhci_hcd *xhci;
	int ret;
	unsigned long flags;
	u64 hw_ring_state;
1280
	bool second_timeout = false;
1281 1282 1283 1284 1285
	xhci = (struct xhci_hcd *) data;

	/* mark this command to be cancelled */
	spin_lock_irqsave(&xhci->lock, flags);
	if (xhci->current_cmd) {
1286 1287 1288
		if (xhci->current_cmd->status == COMP_CMD_ABORT)
			second_timeout = true;
		xhci->current_cmd->status = COMP_CMD_ABORT;
1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
	}

	/* Make sure command ring is running before aborting it */
	hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
	if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
	    (hw_ring_state & CMD_RING_RUNNING))  {
		spin_unlock_irqrestore(&xhci->lock, flags);
		xhci_dbg(xhci, "Command timeout\n");
		ret = xhci_abort_cmd_ring(xhci);
		if (unlikely(ret == -ESHUTDOWN)) {
			xhci_err(xhci, "Abort command ring failed\n");
			xhci_cleanup_command_queue(xhci);
			usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
			xhci_dbg(xhci, "xHCI host controller is dead.\n");
		}
		return;
	}
1306 1307 1308 1309 1310 1311 1312 1313 1314

	/* command ring failed to restart, or host removed. Bail out */
	if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
		spin_unlock_irqrestore(&xhci->lock, flags);
		xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
		xhci_cleanup_command_queue(xhci);
		return;
	}

1315 1316 1317 1318 1319 1320 1321
	/* command timeout on stopped ring, ring can't be aborted */
	xhci_dbg(xhci, "Command timeout on stopped ring\n");
	xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
	spin_unlock_irqrestore(&xhci->lock, flags);
	return;
}

1322 1323 1324
static void handle_cmd_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
M
Matt Evans 已提交
1325
	int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1326 1327
	u64 cmd_dma;
	dma_addr_t cmd_dequeue_dma;
1328
	u32 cmd_comp_code;
1329
	union xhci_trb *cmd_trb;
M
Mathias Nyman 已提交
1330
	struct xhci_command *cmd;
1331
	u32 cmd_type;
1332

M
Matt Evans 已提交
1333
	cmd_dma = le64_to_cpu(event->cmd_trb);
1334
	cmd_trb = xhci->cmd_ring->dequeue;
1335
	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1336
			cmd_trb);
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
	/* Is the command ring deq ptr out of sync with the deq seg ptr? */
	if (cmd_dequeue_dma == 0) {
		xhci->error_bitmask |= 1 << 4;
		return;
	}
	/* Does the DMA address match our internal dequeue pointer address? */
	if (cmd_dma != (u64) cmd_dequeue_dma) {
		xhci->error_bitmask |= 1 << 5;
		return;
	}
1347

M
Mathias Nyman 已提交
1348 1349 1350 1351 1352 1353 1354
	cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);

	if (cmd->command_trb != xhci->cmd_ring->dequeue) {
		xhci_err(xhci,
			 "Command completion event does not match command\n");
		return;
	}
1355 1356 1357

	del_timer(&xhci->cmd_timer);

1358
	trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
1359

1360
	cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376

	/* If CMD ring stopped we own the trbs between enqueue and dequeue */
	if (cmd_comp_code == COMP_CMD_STOP) {
		xhci_handle_stopped_cmd_ring(xhci, cmd);
		return;
	}
	/*
	 * Host aborted the command ring, check if the current command was
	 * supposed to be aborted, otherwise continue normally.
	 * The command ring is stopped now, but the xHC will issue a Command
	 * Ring Stopped event which will cause us to restart it.
	 */
	if (cmd_comp_code == COMP_CMD_ABORT) {
		xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
		if (cmd->status == COMP_CMD_ABORT)
			goto event_handled;
1377 1378
	}

1379 1380 1381
	cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
	switch (cmd_type) {
	case TRB_ENABLE_SLOT:
1382
		xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
1383
		break;
1384
	case TRB_DISABLE_SLOT:
1385
		xhci_handle_cmd_disable_slot(xhci, slot_id);
1386
		break;
1387
	case TRB_CONFIG_EP:
1388 1389 1390
		if (!cmd->completion)
			xhci_handle_cmd_config_ep(xhci, slot_id, event,
						  cmd_comp_code);
1391
		break;
1392
	case TRB_EVAL_CONTEXT:
1393
		break;
1394
	case TRB_ADDR_DEV:
1395
		break;
1396
	case TRB_STOP_RING:
1397 1398 1399
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
		xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
1400
		break;
1401
	case TRB_SET_DEQ:
1402 1403
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
1404
		xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1405
		break;
1406
	case TRB_CMD_NOOP:
1407 1408 1409
		/* Is this an aborted command turned to NO-OP? */
		if (cmd->status == COMP_CMD_STOP)
			cmd_comp_code = COMP_CMD_STOP;
1410
		break;
1411
	case TRB_RESET_EP:
1412 1413
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
1414
		xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1415
		break;
1416
	case TRB_RESET_DEV:
1417 1418 1419 1420 1421
		/* SLOT_ID field in reset device cmd completion event TRB is 0.
		 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
		 */
		slot_id = TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3]));
1422
		xhci_handle_cmd_reset_dev(xhci, slot_id, event);
1423
		break;
1424
	case TRB_NEC_GET_FW:
1425
		xhci_handle_cmd_nec_get_fw(xhci, event);
1426
		break;
1427 1428 1429 1430 1431
	default:
		/* Skip over unknown commands on the event ring */
		xhci->error_bitmask |= 1 << 6;
		break;
	}
M
Mathias Nyman 已提交
1432

1433 1434 1435 1436 1437 1438 1439 1440
	/* restart timer if this wasn't the last command */
	if (cmd->cmd_list.next != &xhci->cmd_list) {
		xhci->current_cmd = list_entry(cmd->cmd_list.next,
					       struct xhci_command, cmd_list);
		mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
	}

event_handled:
1441
	xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
M
Mathias Nyman 已提交
1442

A
Andiry Xu 已提交
1443
	inc_deq(xhci, xhci->cmd_ring);
1444 1445
}

1446 1447 1448 1449 1450
static void handle_vendor_event(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 trb_type;

M
Matt Evans 已提交
1451
	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1452 1453 1454 1455 1456
	xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
	if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
		handle_cmd_completion(xhci, &event->event_cmd);
}

1457 1458 1459 1460 1461
/* @port_id: the one-based port ID from the hardware (indexed from array of all
 * port registers -- USB 3.0 and USB 2.0).
 *
 * Returns a zero-based port number, which is suitable for indexing into each of
 * the split roothubs' port arrays and bus state arrays.
1462
 * Add one to it in order to call xhci_find_slot_id_by_port.
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
 */
static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
		struct xhci_hcd *xhci, u32 port_id)
{
	unsigned int i;
	unsigned int num_similar_speed_ports = 0;

	/* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
	 * and usb2_ports are 0-based indexes.  Count the number of similar
	 * speed ports, up to 1 port before this port.
	 */
	for (i = 0; i < (port_id - 1); i++) {
		u8 port_speed = xhci->port_array[i];

		/*
		 * Skip ports that don't have known speeds, or have duplicate
		 * Extended Capabilities port speed entries.
		 */
1481
		if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1482 1483 1484 1485 1486 1487 1488
			continue;

		/*
		 * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
		 * 1.1 ports are under the USB 2.0 hub.  If the port speed
		 * matches the device speed, it's a similar speed port.
		 */
1489
		if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3))
1490 1491 1492 1493 1494
			num_similar_speed_ports++;
	}
	return num_similar_speed_ports;
}

1495 1496 1497 1498
static void handle_device_notification(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 slot_id;
1499
	struct usb_device *udev;
1500

1501
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1502
	if (!xhci->devs[slot_id]) {
1503 1504
		xhci_warn(xhci, "Device Notification event for "
				"unused slot %u\n", slot_id);
1505 1506 1507 1508 1509 1510 1511 1512
		return;
	}

	xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
			slot_id);
	udev = xhci->devs[slot_id]->udev;
	if (udev && udev->parent)
		usb_wakeup_notification(udev->parent, udev->portnum);
1513 1514
}

S
Sarah Sharp 已提交
1515 1516 1517
static void handle_port_status(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
1518
	struct usb_hcd *hcd;
S
Sarah Sharp 已提交
1519
	u32 port_id;
1520
	u32 temp, temp1;
1521
	int max_ports;
1522
	int slot_id;
1523
	unsigned int faked_port_index;
1524
	u8 major_revision;
1525
	struct xhci_bus_state *bus_state;
M
Matt Evans 已提交
1526
	__le32 __iomem **port_array;
1527
	bool bogus_port_status = false;
S
Sarah Sharp 已提交
1528 1529

	/* Port status change events always have a successful completion code */
M
Matt Evans 已提交
1530
	if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
S
Sarah Sharp 已提交
1531 1532 1533
		xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
		xhci->error_bitmask |= 1 << 8;
	}
M
Matt Evans 已提交
1534
	port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
S
Sarah Sharp 已提交
1535 1536
	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);

1537 1538
	max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
	if ((port_id <= 0) || (port_id > max_ports)) {
1539
		xhci_warn(xhci, "Invalid port id %d\n", port_id);
P
Peter Chen 已提交
1540 1541
		inc_deq(xhci, xhci->event_ring);
		return;
1542 1543
	}

1544 1545 1546 1547
	/* Figure out which usb_hcd this port is attached to:
	 * is it a USB 3.0 port or a USB 2.0/1.1 port?
	 */
	major_revision = xhci->port_array[port_id - 1];
P
Peter Chen 已提交
1548 1549 1550

	/* Find the right roothub. */
	hcd = xhci_to_hcd(xhci);
1551
	if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3))
P
Peter Chen 已提交
1552 1553
		hcd = xhci->shared_hcd;

1554 1555 1556 1557
	if (major_revision == 0) {
		xhci_warn(xhci, "Event for port %u not in "
				"Extended Capabilities, ignoring.\n",
				port_id);
1558
		bogus_port_status = true;
1559
		goto cleanup;
1560
	}
1561
	if (major_revision == DUPLICATE_ENTRY) {
1562 1563 1564
		xhci_warn(xhci, "Event for port %u duplicated in"
				"Extended Capabilities, ignoring.\n",
				port_id);
1565
		bogus_port_status = true;
1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
		goto cleanup;
	}

	/*
	 * Hardware port IDs reported by a Port Status Change Event include USB
	 * 3.0 and USB 2.0 ports.  We want to check if the port has reported a
	 * resume event, but we first need to translate the hardware port ID
	 * into the index into the ports on the correct split roothub, and the
	 * correct bus_state structure.
	 */
	bus_state = &xhci->bus_state[hcd_index(hcd)];
1577
	if (hcd->speed >= HCD_USB3)
1578 1579 1580 1581 1582 1583
		port_array = xhci->usb3_ports;
	else
		port_array = xhci->usb2_ports;
	/* Find the faked port hub number */
	faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
			port_id);
1584

1585
	temp = readl(port_array[faked_port_index]);
1586
	if (hcd->state == HC_STATE_SUSPENDED) {
1587 1588 1589 1590
		xhci_dbg(xhci, "resume root hub\n");
		usb_hcd_resume_root_hub(hcd);
	}

1591
	if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
1592 1593
		bus_state->port_remote_wakeup &= ~(1 << faked_port_index);

1594 1595 1596
	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
		xhci_dbg(xhci, "port resume event for port %d\n", port_id);

1597
		temp1 = readl(&xhci->op_regs->command);
1598 1599 1600 1601 1602
		if (!(temp1 & CMD_RUN)) {
			xhci_warn(xhci, "xHC is not running.\n");
			goto cleanup;
		}

1603
		if (DEV_SUPERSPEED_ANY(temp)) {
1604
			xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1605 1606 1607 1608 1609
			/* Set a flag to say the port signaled remote wakeup,
			 * so we can tell the difference between the end of
			 * device and host initiated resume.
			 */
			bus_state->port_remote_wakeup |= 1 << faked_port_index;
1610 1611
			xhci_test_and_clear_bit(xhci, port_array,
					faked_port_index, PORT_PLC);
A
Andiry Xu 已提交
1612 1613
			xhci_set_link_state(xhci, port_array, faked_port_index,
						XDEV_U0);
1614 1615 1616 1617 1618
			/* Need to wait until the next link state change
			 * indicates the device is actually in U0.
			 */
			bogus_port_status = true;
			goto cleanup;
1619 1620
		} else if (!test_bit(faked_port_index,
				     &bus_state->resuming_ports)) {
1621
			xhci_dbg(xhci, "resume HS port %d\n", port_id);
1622
			bus_state->resume_done[faked_port_index] = jiffies +
1623
				msecs_to_jiffies(USB_RESUME_TIMEOUT);
1624
			set_bit(faked_port_index, &bus_state->resuming_ports);
1625
			mod_timer(&hcd->rh_timer,
1626
				  bus_state->resume_done[faked_port_index]);
1627 1628 1629
			/* Do the rest in GetPortStatus */
		}
	}
1630 1631

	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
1632
			DEV_SUPERSPEED_ANY(temp)) {
1633
		xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1634 1635 1636 1637 1638 1639 1640
		/* We've just brought the device into U0 through either the
		 * Resume state after a device remote wakeup, or through the
		 * U3Exit state after a host-initiated resume.  If it's a device
		 * initiated remote wake, don't pass up the link state change,
		 * so the roothub behavior is consistent with external
		 * USB 3.0 hub behavior.
		 */
1641 1642 1643 1644
		slot_id = xhci_find_slot_id_by_port(hcd, xhci,
				faked_port_index + 1);
		if (slot_id && xhci->devs[slot_id])
			xhci_ring_device(xhci, slot_id);
1645
		if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
1646 1647 1648 1649 1650 1651 1652 1653 1654
			bus_state->port_remote_wakeup &=
				~(1 << faked_port_index);
			xhci_test_and_clear_bit(xhci, port_array,
					faked_port_index, PORT_PLC);
			usb_wakeup_notification(hcd->self.root_hub,
					faked_port_index + 1);
			bogus_port_status = true;
			goto cleanup;
		}
1655
	}
1656

1657 1658 1659 1660 1661
	/*
	 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
	 * RExit to a disconnect state).  If so, let the the driver know it's
	 * out of the RExit state.
	 */
1662
	if (!DEV_SUPERSPEED_ANY(temp) &&
1663 1664 1665 1666 1667 1668 1669
			test_and_clear_bit(faked_port_index,
				&bus_state->rexit_ports)) {
		complete(&bus_state->rexit_done[faked_port_index]);
		bogus_port_status = true;
		goto cleanup;
	}

1670
	if (hcd->speed < HCD_USB3)
1671 1672 1673
		xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
					PORT_PLC);

1674
cleanup:
S
Sarah Sharp 已提交
1675
	/* Update event ring dequeue pointer before dropping the lock */
A
Andiry Xu 已提交
1676
	inc_deq(xhci, xhci->event_ring);
S
Sarah Sharp 已提交
1677

1678 1679 1680 1681 1682 1683 1684
	/* Don't make the USB core poll the roothub if we got a bad port status
	 * change event.  Besides, at that point we can't tell which roothub
	 * (USB 2.0 or USB 3.0) to kick.
	 */
	if (bogus_port_status)
		return;

1685 1686 1687 1688 1689 1690 1691 1692 1693
	/*
	 * xHCI port-status-change events occur when the "or" of all the
	 * status-change bits in the portsc register changes from 0 to 1.
	 * New status changes won't cause an event if any other change
	 * bits are still set.  When an event occurs, switch over to
	 * polling to avoid losing status changes.
	 */
	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
S
Sarah Sharp 已提交
1694 1695
	spin_unlock(&xhci->lock);
	/* Pass this up to the core */
1696
	usb_hcd_poll_rh_status(hcd);
S
Sarah Sharp 已提交
1697 1698 1699
	spin_lock(&xhci->lock);
}

1700 1701 1702 1703 1704 1705
/*
 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
 * at end_trb, which may be in another segment.  If the suspect DMA address is a
 * TRB in this TD, this function returns that TRB's segment.  Otherwise it
 * returns 0.
 */
1706 1707
struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
		struct xhci_segment *start_seg,
1708 1709
		union xhci_trb	*start_trb,
		union xhci_trb	*end_trb,
1710 1711
		dma_addr_t	suspect_dma,
		bool		debug)
1712 1713 1714 1715 1716 1717
{
	dma_addr_t start_dma;
	dma_addr_t end_seg_dma;
	dma_addr_t end_trb_dma;
	struct xhci_segment *cur_seg;

1718
	start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1719 1720 1721
	cur_seg = start_seg;

	do {
1722
		if (start_dma == 0)
1723
			return NULL;
1724
		/* We may get an event for a Link TRB in the middle of a TD */
1725
		end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1726
				&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1727
		/* If the end TRB isn't in this segment, this is set to 0 */
1728
		end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1729

1730 1731 1732 1733 1734 1735 1736 1737 1738
		if (debug)
			xhci_warn(xhci,
				"Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
				(unsigned long long)suspect_dma,
				(unsigned long long)start_dma,
				(unsigned long long)end_trb_dma,
				(unsigned long long)cur_seg->dma,
				(unsigned long long)end_seg_dma);

1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
		if (end_trb_dma > 0) {
			/* The end TRB is in this segment, so suspect should be here */
			if (start_dma <= end_trb_dma) {
				if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
					return cur_seg;
			} else {
				/* Case for one segment with
				 * a TD wrapped around to the top
				 */
				if ((suspect_dma >= start_dma &&
							suspect_dma <= end_seg_dma) ||
						(suspect_dma >= cur_seg->dma &&
						 suspect_dma <= end_trb_dma))
					return cur_seg;
			}
1754
			return NULL;
1755 1756 1757 1758 1759 1760
		} else {
			/* Might still be somewhere in this segment */
			if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
				return cur_seg;
		}
		cur_seg = cur_seg->next;
1761
		start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1762
	} while (cur_seg != start_seg);
1763

1764
	return NULL;
1765 1766
}

1767 1768
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
1769
		unsigned int stream_id,
1770 1771 1772
		struct xhci_td *td, union xhci_trb *event_trb)
{
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1773 1774 1775 1776 1777
	struct xhci_command *command;
	command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
	if (!command)
		return;

1778
	ep->ep_state |= EP_HALTED;
1779
	ep->stopped_stream = stream_id;
1780

1781
	xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
1782
	xhci_cleanup_stalled_ring(xhci, ep_index, td);
1783

1784
	ep->stopped_stream = 0;
1785

1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802
	xhci_ring_cmd_db(xhci);
}

/* Check if an error has halted the endpoint ring.  The class driver will
 * cleanup the halt for a non-default control endpoint if we indicate a stall.
 * However, a babble and other errors also halt the endpoint ring, and the class
 * driver won't clear the halt in that case, so we need to issue a Set Transfer
 * Ring Dequeue Pointer command manually.
 */
static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
		struct xhci_ep_ctx *ep_ctx,
		unsigned int trb_comp_code)
{
	/* TRB completion codes that may require a manual halt cleanup */
	if (trb_comp_code == COMP_TX_ERR ||
			trb_comp_code == COMP_BABBLE ||
			trb_comp_code == COMP_SPLIT_ERR)
1803
		/* The 0.95 spec says a babbling control endpoint
1804 1805 1806 1807 1808
		 * is not halted. The 0.96 spec says it is.  Some HW
		 * claims to be 0.95 compliant, but it halts the control
		 * endpoint anyway.  Check if a babble halted the
		 * endpoint.
		 */
1809 1810
		if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
		    cpu_to_le32(EP_STATE_HALTED))
1811 1812 1813 1814 1815
			return 1;

	return 0;
}

1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
{
	if (trb_comp_code >= 224 && trb_comp_code <= 255) {
		/* Vendor defined "informational" completion code,
		 * treat as not-an-error.
		 */
		xhci_dbg(xhci, "Vendor defined info completion code %u\n",
				trb_comp_code);
		xhci_dbg(xhci, "Treating code as success.\n");
		return 1;
	}
	return 0;
}

1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844
/*
 * Finish the td processing, remove the td from td list;
 * Return 1 if the urb can be given back.
 */
static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status, bool skip)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	unsigned int slot_id;
	int ep_index;
	struct urb *urb = NULL;
	struct xhci_ep_ctx *ep_ctx;
	int ret = 0;
1845
	struct urb_priv	*urb_priv;
1846 1847
	u32 trb_comp_code;

M
Matt Evans 已提交
1848
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1849
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
1850 1851
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1852
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
1853
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1854 1855 1856 1857

	if (skip)
		goto td_cleanup;

1858 1859 1860
	if (trb_comp_code == COMP_STOP_INVAL ||
			trb_comp_code == COMP_STOP ||
			trb_comp_code == COMP_STOP_SHORT) {
1861 1862 1863 1864 1865 1866
		/* The Endpoint Stop Command completion will take care of any
		 * stopped TDs.  A stopped TD may be restarted, so don't update
		 * the ring dequeue pointer or take this TD off any lists yet.
		 */
		ep->stopped_td = td;
		return 0;
M
Mathias Nyman 已提交
1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
	}
	if (trb_comp_code == COMP_STALL ||
		xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
						trb_comp_code)) {
		/* Issue a reset endpoint command to clear the host side
		 * halt, followed by a set dequeue command to move the
		 * dequeue pointer past the TD.
		 * The class driver clears the device side halt later.
		 */
		xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
					ep_ring->stream_id, td, event_trb);
1878
	} else {
M
Mathias Nyman 已提交
1879 1880
		/* Update ring dequeue pointer */
		while (ep_ring->dequeue != td->last_trb)
A
Andiry Xu 已提交
1881
			inc_deq(xhci, ep_ring);
M
Mathias Nyman 已提交
1882 1883
		inc_deq(xhci, ep_ring);
	}
1884 1885

td_cleanup:
M
Mathias Nyman 已提交
1886 1887 1888 1889
	/* Clean up the endpoint's TD list */
	urb = td->urb;
	urb_priv = urb->hcpriv;

1890 1891 1892 1893
	/* if a bounce buffer was used to align this td then unmap it */
	if (td->bounce_seg)
		xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);

M
Mathias Nyman 已提交
1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922
	/* Do one last check of the actual transfer length.
	 * If the host controller said we transferred more data than the buffer
	 * length, urb->actual_length will be a very big number (since it's
	 * unsigned).  Play it safe and say we didn't transfer anything.
	 */
	if (urb->actual_length > urb->transfer_buffer_length) {
		xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n",
			urb->transfer_buffer_length,
			urb->actual_length);
		urb->actual_length = 0;
		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
			*status = -EREMOTEIO;
		else
			*status = 0;
	}
	list_del_init(&td->td_list);
	/* Was this TD slated to be cancelled but completed anyway? */
	if (!list_empty(&td->cancelled_td_list))
		list_del_init(&td->cancelled_td_list);

	urb_priv->td_cnt++;
	/* Giveback the urb when all the tds are completed */
	if (urb_priv->td_cnt == urb_priv->length) {
		ret = 1;
		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
			xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
			if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
				if (xhci->quirks & XHCI_AMD_PLL_FIX)
					usb_amd_quirk_pll_enable();
A
Andiry Xu 已提交
1923 1924
			}
		}
1925 1926 1927 1928 1929
	}

	return ret;
}

1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
/*
 * Process control tds, update urb status and actual_length.
 */
static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	unsigned int slot_id;
	int ep_index;
	struct xhci_ep_ctx *ep_ctx;
	u32 trb_comp_code;

M
Matt Evans 已提交
1944
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1945
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
1946 1947
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1948
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
1949
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970

	switch (trb_comp_code) {
	case COMP_SUCCESS:
		if (event_trb == ep_ring->dequeue) {
			xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
					"without IOC set??\n");
			*status = -ESHUTDOWN;
		} else if (event_trb != td->last_trb) {
			xhci_warn(xhci, "WARN: Success on ctrl data TRB "
					"without IOC set??\n");
			*status = -ESHUTDOWN;
		} else {
			*status = 0;
		}
		break;
	case COMP_SHORT_TX:
		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
			*status = -EREMOTEIO;
		else
			*status = 0;
		break;
1971 1972 1973 1974 1975 1976 1977 1978
	case COMP_STOP_SHORT:
		if (event_trb == ep_ring->dequeue || event_trb == td->last_trb)
			xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
		else
			td->urb->actual_length =
				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));

		return finish_td(xhci, td, event_trb, event, ep, status, false);
1979
	case COMP_STOP:
1980 1981 1982 1983 1984 1985 1986
		/* Did we stop at data stage? */
		if (event_trb != ep_ring->dequeue && event_trb != td->last_trb)
			td->urb->actual_length =
				td->urb->transfer_buffer_length -
				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
		/* fall through */
	case COMP_STOP_INVAL:
1987
		return finish_td(xhci, td, event_trb, event, ep, status, false);
1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000
	default:
		if (!xhci_requires_manual_halt_cleanup(xhci,
					ep_ctx, trb_comp_code))
			break;
		xhci_dbg(xhci, "TRB error code %u, "
				"halted endpoint index = %u\n",
				trb_comp_code, ep_index);
		/* else fall through */
	case COMP_STALL:
		/* Did we transfer part of the data (middle) phase? */
		if (event_trb != ep_ring->dequeue &&
				event_trb != td->last_trb)
			td->urb->actual_length =
2001 2002
				td->urb->transfer_buffer_length -
				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2003
		else if (!td->urb_length_set)
2004 2005
			td->urb->actual_length = 0;

2006
		return finish_td(xhci, td, event_trb, event, ep, status, false);
2007 2008 2009 2010 2011 2012 2013 2014
	}
	/*
	 * Did we transfer any data, despite the errors that might have
	 * happened?  I.e. did we get past the setup stage?
	 */
	if (event_trb != ep_ring->dequeue) {
		/* The event was for the status stage */
		if (event_trb == td->last_trb) {
2015
			if (td->urb_length_set) {
2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028
				/* Don't overwrite a previously set error code
				 */
				if ((*status == -EINPROGRESS || *status == 0) &&
						(td->urb->transfer_flags
						 & URB_SHORT_NOT_OK))
					/* Did we already see a short data
					 * stage? */
					*status = -EREMOTEIO;
			} else {
				td->urb->actual_length =
					td->urb->transfer_buffer_length;
			}
		} else {
2029 2030 2031 2032 2033 2034 2035
			/*
			 * Maybe the event was for the data stage? If so, update
			 * already the actual_length of the URB and flag it as
			 * set, so that it is not overwritten in the event for
			 * the last TRB.
			 */
			td->urb_length_set = true;
2036 2037
			td->urb->actual_length =
				td->urb->transfer_buffer_length -
2038
				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2039 2040 2041
			xhci_dbg(xhci, "Waiting for status "
					"stage event\n");
			return 0;
2042 2043 2044 2045 2046 2047
		}
	}

	return finish_td(xhci, td, event_trb, event, ep, status, false);
}

2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
/*
 * Process isochronous tds, update urb packet status and actual_length.
 */
static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	int idx;
	int len = 0;
	union xhci_trb *cur_trb;
	struct xhci_segment *cur_seg;
2061
	struct usb_iso_packet_descriptor *frame;
2062
	u32 trb_comp_code;
2063
	bool skip_td = false;
2064

M
Matt Evans 已提交
2065 2066
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2067 2068
	urb_priv = td->urb->hcpriv;
	idx = urb_priv->td_cnt;
2069
	frame = &td->urb->iso_frame_desc[idx];
2070

2071 2072 2073
	/* handle completion code */
	switch (trb_comp_code) {
	case COMP_SUCCESS:
2074
		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
2075 2076 2077 2078 2079
			frame->status = 0;
			break;
		}
		if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
			trb_comp_code = COMP_SHORT_TX;
2080 2081
	/* fallthrough */
	case COMP_STOP_SHORT:
2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094
	case COMP_SHORT_TX:
		frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
				-EREMOTEIO : 0;
		break;
	case COMP_BW_OVER:
		frame->status = -ECOMM;
		skip_td = true;
		break;
	case COMP_BUFF_OVER:
	case COMP_BABBLE:
		frame->status = -EOVERFLOW;
		skip_td = true;
		break;
A
Alex He 已提交
2095
	case COMP_DEV_ERR:
2096
	case COMP_STALL:
2097 2098 2099
		frame->status = -EPROTO;
		skip_td = true;
		break;
2100
	case COMP_TX_ERR:
2101
		frame->status = -EPROTO;
2102 2103
		if (event_trb != td->last_trb)
			return 0;
2104 2105 2106 2107 2108 2109 2110 2111
		skip_td = true;
		break;
	case COMP_STOP:
	case COMP_STOP_INVAL:
		break;
	default:
		frame->status = -1;
		break;
2112 2113
	}

2114 2115 2116
	if (trb_comp_code == COMP_SUCCESS || skip_td) {
		frame->actual_length = frame->length;
		td->urb->actual_length += frame->length;
2117 2118 2119 2120
	} else if (trb_comp_code == COMP_STOP_SHORT) {
		frame->actual_length =
			EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
		td->urb->actual_length += frame->actual_length;
2121 2122 2123 2124
	} else {
		for (cur_trb = ep_ring->dequeue,
		     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
		     next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2125 2126
			if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
			    !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
M
Matt Evans 已提交
2127
				len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2128
		}
M
Matt Evans 已提交
2129
		len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2130
			EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2131 2132

		if (trb_comp_code != COMP_STOP_INVAL) {
2133
			frame->actual_length = len;
2134 2135 2136 2137 2138 2139 2140
			td->urb->actual_length += len;
		}
	}

	return finish_td(xhci, td, event_trb, event, ep, status, false);
}

2141 2142 2143 2144 2145 2146 2147 2148 2149
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
			struct xhci_transfer_event *event,
			struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct usb_iso_packet_descriptor *frame;
	int idx;

2150
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2151 2152 2153 2154
	urb_priv = td->urb->hcpriv;
	idx = urb_priv->td_cnt;
	frame = &td->urb->iso_frame_desc[idx];

2155
	/* The transfer is partly done. */
2156 2157 2158 2159 2160 2161 2162
	frame->status = -EXDEV;

	/* calc actual length */
	frame->actual_length = 0;

	/* Update ring dequeue pointer */
	while (ep_ring->dequeue != td->last_trb)
A
Andiry Xu 已提交
2163 2164
		inc_deq(xhci, ep_ring);
	inc_deq(xhci, ep_ring);
2165 2166 2167 2168

	return finish_td(xhci, td, NULL, event, ep, status, true);
}

2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
/*
 * Process bulk and interrupt tds, update urb status and actual_length.
 */
static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	union xhci_trb *cur_trb;
	struct xhci_segment *cur_seg;
	u32 trb_comp_code;

M
Matt Evans 已提交
2181 2182
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2183 2184 2185 2186

	switch (trb_comp_code) {
	case COMP_SUCCESS:
		/* Double check that the HW transferred everything. */
2187
		if (event_trb != td->last_trb ||
2188
		    EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2189 2190 2191 2192 2193 2194
			xhci_warn(xhci, "WARN Successful completion "
					"on short TX\n");
			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
				*status = -EREMOTEIO;
			else
				*status = 0;
2195 2196
			if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
				trb_comp_code = COMP_SHORT_TX;
2197 2198 2199 2200
		} else {
			*status = 0;
		}
		break;
2201
	case COMP_STOP_SHORT:
2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
	case COMP_SHORT_TX:
		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
			*status = -EREMOTEIO;
		else
			*status = 0;
		break;
	default:
		/* Others already handled above */
		break;
	}
2212 2213 2214 2215 2216
	if (trb_comp_code == COMP_SHORT_TX)
		xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
				"%d bytes untransferred\n",
				td->urb->ep->desc.bEndpointAddress,
				td->urb->transfer_buffer_length,
2217
				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229
	/* Stopped - short packet completion */
	if (trb_comp_code == COMP_STOP_SHORT) {
		td->urb->actual_length =
			EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));

		if (td->urb->transfer_buffer_length <
				td->urb->actual_length) {
			xhci_warn(xhci, "HC gave bad length of %d bytes txed\n",
				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
			td->urb->actual_length = 0;
			 /* status will be set by usb core for canceled urbs */
		}
2230
	/* Fast path - was this the last TRB in the TD for this URB? */
2231
	} else if (event_trb == td->last_trb) {
2232
		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2233 2234
			td->urb->actual_length =
				td->urb->transfer_buffer_length -
2235
				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2236 2237 2238 2239
			if (td->urb->transfer_buffer_length <
					td->urb->actual_length) {
				xhci_warn(xhci, "HC gave bad length "
						"of %d bytes left\n",
2240
					  EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
				td->urb->actual_length = 0;
				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
					*status = -EREMOTEIO;
				else
					*status = 0;
			}
			/* Don't overwrite a previously set error code */
			if (*status == -EINPROGRESS) {
				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
					*status = -EREMOTEIO;
				else
					*status = 0;
			}
		} else {
			td->urb->actual_length =
				td->urb->transfer_buffer_length;
			/* Ignore a short packet completion if the
			 * untransferred length was zero.
			 */
			if (*status == -EREMOTEIO)
				*status = 0;
		}
	} else {
		/* Slow path - walk the list, starting from the dequeue
		 * pointer, to get the actual length transferred.
		 */
		td->urb->actual_length = 0;
		for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
				cur_trb != event_trb;
				next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2271 2272
			if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
			    !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2273
				td->urb->actual_length +=
M
Matt Evans 已提交
2274
					TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2275 2276 2277 2278 2279 2280
		}
		/* If the ring didn't stop on a Link or No-op TRB, add
		 * in the actual bytes transferred from the Normal TRB
		 */
		if (trb_comp_code != COMP_STOP_INVAL)
			td->urb->actual_length +=
M
Matt Evans 已提交
2281
				TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2282
				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2283 2284 2285 2286 2287
	}

	return finish_td(xhci, td, event_trb, event, ep, status, false);
}

2288 2289 2290 2291 2292 2293 2294
/*
 * If this function returns an error condition, it means it got a Transfer
 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
 * At this point, the host controller is probably hosed and should be reset.
 */
static int handle_tx_event(struct xhci_hcd *xhci,
		struct xhci_transfer_event *event)
F
Felipe Balbi 已提交
2295 2296
	__releases(&xhci->lock)
	__acquires(&xhci->lock)
2297 2298
{
	struct xhci_virt_device *xdev;
2299
	struct xhci_virt_ep *ep;
2300
	struct xhci_ring *ep_ring;
2301
	unsigned int slot_id;
2302
	int ep_index;
2303
	struct xhci_td *td = NULL;
2304 2305 2306
	dma_addr_t event_dma;
	struct xhci_segment *event_seg;
	union xhci_trb *event_trb;
2307
	struct urb *urb = NULL;
2308
	int status = -EINPROGRESS;
2309
	struct urb_priv *urb_priv;
2310
	struct xhci_ep_ctx *ep_ctx;
2311
	struct list_head *tmp;
2312
	u32 trb_comp_code;
2313
	int ret = 0;
2314
	int td_num = 0;
2315
	bool handling_skipped_tds = false;
2316

M
Matt Evans 已提交
2317
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2318
	xdev = xhci->devs[slot_id];
2319 2320
	if (!xdev) {
		xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
2321
		xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2322 2323
			 (unsigned long long) xhci_trb_virt_to_dma(
				 xhci->event_ring->deq_seg,
2324 2325 2326 2327 2328 2329 2330
				 xhci->event_ring->dequeue),
			 lower_32_bits(le64_to_cpu(event->buffer)),
			 upper_32_bits(le64_to_cpu(event->buffer)),
			 le32_to_cpu(event->transfer_len),
			 le32_to_cpu(event->flags));
		xhci_dbg(xhci, "Event ring:\n");
		xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2331 2332 2333 2334
		return -ENODEV;
	}

	/* Endpoint ID is 1 based, our index is zero based */
M
Matt Evans 已提交
2335
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2336
	ep = &xdev->eps[ep_index];
M
Matt Evans 已提交
2337
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2338
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2339
	if (!ep_ring ||
M
Matt Evans 已提交
2340 2341
	    (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
	    EP_STATE_DISABLED) {
2342 2343
		xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
				"or incorrect stream ring\n");
2344
		xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2345 2346
			 (unsigned long long) xhci_trb_virt_to_dma(
				 xhci->event_ring->deq_seg,
2347 2348 2349 2350 2351 2352 2353
				 xhci->event_ring->dequeue),
			 lower_32_bits(le64_to_cpu(event->buffer)),
			 upper_32_bits(le64_to_cpu(event->buffer)),
			 le32_to_cpu(event->transfer_len),
			 le32_to_cpu(event->flags));
		xhci_dbg(xhci, "Event ring:\n");
		xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2354 2355 2356
		return -ENODEV;
	}

2357 2358 2359 2360 2361 2362
	/* Count current td numbers if ep->skip is set */
	if (ep->skip) {
		list_for_each(tmp, &ep_ring->td_list)
			td_num++;
	}

M
Matt Evans 已提交
2363 2364
	event_dma = le64_to_cpu(event->buffer);
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2365
	/* Look for common error cases */
2366
	switch (trb_comp_code) {
S
Sarah Sharp 已提交
2367 2368 2369 2370
	/* Skip codes that require special handling depending on
	 * transfer type
	 */
	case COMP_SUCCESS:
2371
		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2372 2373 2374 2375
			break;
		if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
			trb_comp_code = COMP_SHORT_TX;
		else
2376 2377
			xhci_warn_ratelimited(xhci,
					"WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
S
Sarah Sharp 已提交
2378 2379
	case COMP_SHORT_TX:
		break;
2380 2381 2382 2383 2384 2385
	case COMP_STOP:
		xhci_dbg(xhci, "Stopped on Transfer TRB\n");
		break;
	case COMP_STOP_INVAL:
		xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
		break;
2386 2387 2388
	case COMP_STOP_SHORT:
		xhci_dbg(xhci, "Stopped with short packet transfer detected\n");
		break;
S
Sarah Sharp 已提交
2389
	case COMP_STALL:
2390
		xhci_dbg(xhci, "Stalled endpoint\n");
2391
		ep->ep_state |= EP_HALTED;
S
Sarah Sharp 已提交
2392 2393 2394 2395 2396 2397
		status = -EPIPE;
		break;
	case COMP_TRB_ERR:
		xhci_warn(xhci, "WARN: TRB error on endpoint\n");
		status = -EILSEQ;
		break;
2398
	case COMP_SPLIT_ERR:
S
Sarah Sharp 已提交
2399
	case COMP_TX_ERR:
2400
		xhci_dbg(xhci, "Transfer error on endpoint\n");
S
Sarah Sharp 已提交
2401 2402
		status = -EPROTO;
		break;
2403
	case COMP_BABBLE:
2404
		xhci_dbg(xhci, "Babble error on endpoint\n");
2405 2406
		status = -EOVERFLOW;
		break;
S
Sarah Sharp 已提交
2407 2408 2409 2410
	case COMP_DB_ERR:
		xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
		status = -ENOSR;
		break;
2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426
	case COMP_BW_OVER:
		xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
		break;
	case COMP_BUFF_OVER:
		xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
		break;
	case COMP_UNDERRUN:
		/*
		 * When the Isoch ring is empty, the xHC will generate
		 * a Ring Overrun Event for IN Isoch endpoint or Ring
		 * Underrun Event for OUT Isoch endpoint.
		 */
		xhci_dbg(xhci, "underrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2427 2428
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2429 2430 2431 2432 2433 2434
		goto cleanup;
	case COMP_OVERRUN:
		xhci_dbg(xhci, "overrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2435 2436
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2437
		goto cleanup;
A
Alex He 已提交
2438 2439 2440 2441
	case COMP_DEV_ERR:
		xhci_warn(xhci, "WARN: detect an incompatible device");
		status = -EPROTO;
		break;
2442 2443 2444 2445 2446 2447 2448 2449 2450 2451
	case COMP_MISSED_INT:
		/*
		 * When encounter missed service error, one or more isoc tds
		 * may be missed by xHC.
		 * Set skip flag of the ep_ring; Complete the missed tds as
		 * short transfer when process the ep_ring next time.
		 */
		ep->skip = true;
		xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
		goto cleanup;
2452 2453 2454 2455
	case COMP_PING_ERR:
		ep->skip = true;
		xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
		goto cleanup;
S
Sarah Sharp 已提交
2456
	default:
2457
		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2458 2459 2460
			status = 0;
			break;
		}
2461 2462
		xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n",
			  trb_comp_code);
2463 2464 2465
		goto cleanup;
	}

2466 2467 2468 2469 2470
	do {
		/* This TRB should be in the TD at the head of this ring's
		 * TD list.
		 */
		if (list_empty(&ep_ring->td_list)) {
2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485
			/*
			 * A stopped endpoint may generate an extra completion
			 * event if the device was suspended.  Don't print
			 * warnings.
			 */
			if (!(trb_comp_code == COMP_STOP ||
						trb_comp_code == COMP_STOP_INVAL)) {
				xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
						TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
						ep_index);
				xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
						(le32_to_cpu(event->flags) &
						 TRB_TYPE_BITMASK)>>10);
				xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
			}
2486 2487 2488 2489 2490 2491 2492 2493
			if (ep->skip) {
				ep->skip = false;
				xhci_dbg(xhci, "td_list is empty while skip "
						"flag set. Clear skip flag.\n");
			}
			ret = 0;
			goto cleanup;
		}
2494

2495 2496 2497 2498 2499 2500 2501 2502 2503
		/* We've skipped all the TDs on the ep ring when ep->skip set */
		if (ep->skip && td_num == 0) {
			ep->skip = false;
			xhci_dbg(xhci, "All tds on the ep_ring skipped. "
						"Clear skip flag.\n");
			ret = 0;
			goto cleanup;
		}

2504
		td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2505 2506
		if (ep->skip)
			td_num--;
2507

2508
		/* Is this a TRB in the currently executing TD? */
2509 2510
		event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
				td->last_trb, event_dma, false);
A
Alex He 已提交
2511 2512 2513 2514 2515 2516 2517 2518 2519

		/*
		 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
		 * is not in the current TD pointed by ep_ring->dequeue because
		 * that the hardware dequeue pointer still at the previous TRB
		 * of the current TD. The previous TRB maybe a Link TD or the
		 * last TRB of the previous TD. The command completion handle
		 * will take care the rest.
		 */
2520 2521
		if (!event_seg && (trb_comp_code == COMP_STOP ||
				   trb_comp_code == COMP_STOP_INVAL)) {
A
Alex He 已提交
2522 2523 2524 2525
			ret = 0;
			goto cleanup;
		}

2526 2527 2528
		if (!event_seg) {
			if (!ep->skip ||
			    !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2529 2530 2531 2532
				/* Some host controllers give a spurious
				 * successful event after a short transfer.
				 * Ignore it.
				 */
2533
				if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2534 2535 2536 2537 2538
						ep_ring->last_td_was_short) {
					ep_ring->last_td_was_short = false;
					ret = 0;
					goto cleanup;
				}
2539 2540 2541
				/* HC is busted, give up! */
				xhci_err(xhci,
					"ERROR Transfer event TRB DMA ptr not "
2542 2543 2544 2545 2546 2547
					"part of current TD ep_index %d "
					"comp_code %u\n", ep_index,
					trb_comp_code);
				trb_in_td(xhci, ep_ring->deq_seg,
					  ep_ring->dequeue, td->last_trb,
					  event_dma, true);
2548 2549 2550 2551 2552 2553
				return -ESHUTDOWN;
			}

			ret = skip_isoc_td(xhci, td, event, ep, &status);
			goto cleanup;
		}
2554 2555 2556 2557
		if (trb_comp_code == COMP_SHORT_TX)
			ep_ring->last_td_was_short = true;
		else
			ep_ring->last_td_was_short = false;
2558 2559

		if (ep->skip) {
2560 2561 2562
			xhci_dbg(xhci, "Found td. Clear skip flag.\n");
			ep->skip = false;
		}
2563

2564 2565 2566 2567 2568 2569 2570 2571
		event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
						sizeof(*event_trb)];
		/*
		 * No-op TRB should not trigger interrupts.
		 * If event_trb is a no-op TRB, it means the
		 * corresponding TD has been cancelled. Just ignore
		 * the TD.
		 */
2572
		if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2573 2574 2575
			xhci_dbg(xhci,
				 "event_trb is a no-op TRB. Skip it\n");
			goto cleanup;
2576
		}
2577

2578 2579
		/* Now update the urb's actual_length and give back to
		 * the core
2580
		 */
2581 2582 2583
		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
			ret = process_ctrl_td(xhci, td, event_trb, event, ep,
						 &status);
2584 2585 2586
		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
			ret = process_isoc_td(xhci, td, event_trb, event, ep,
						 &status);
2587 2588 2589 2590 2591
		else
			ret = process_bulk_intr_td(xhci, td, event_trb, event,
						 ep, &status);

cleanup:
2592 2593 2594 2595 2596 2597


		handling_skipped_tds = ep->skip &&
			trb_comp_code != COMP_MISSED_INT &&
			trb_comp_code != COMP_PING_ERR;

2598
		/*
2599 2600
		 * Do not update event ring dequeue pointer if we're in a loop
		 * processing missed tds.
2601
		 */
2602
		if (!handling_skipped_tds)
A
Andiry Xu 已提交
2603
			inc_deq(xhci, xhci->event_ring);
2604 2605 2606

		if (ret) {
			urb = td->urb;
2607
			urb_priv = urb->hcpriv;
2608

2609
			xhci_urb_free_priv(urb_priv);
2610

2611
			usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2612 2613 2614
			if ((urb->actual_length != urb->transfer_buffer_length &&
						(urb->transfer_flags &
						 URB_SHORT_NOT_OK)) ||
2615 2616
					(status != 0 &&
					 !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2617
				xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2618
						"expected = %d, status = %d\n",
2619 2620 2621
						urb, urb->actual_length,
						urb->transfer_buffer_length,
						status);
2622
			spin_unlock(&xhci->lock);
2623 2624 2625 2626 2627
			/* EHCI, UHCI, and OHCI always unconditionally set the
			 * urb->status of an isochronous endpoint to 0.
			 */
			if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
				status = 0;
2628
			usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2629 2630 2631 2632 2633 2634 2635 2636 2637
			spin_lock(&xhci->lock);
		}

	/*
	 * If ep->skip is set, it means there are missed tds on the
	 * endpoint ring need to take care of.
	 * Process them as short transfer until reach the td pointed by
	 * the event.
	 */
2638
	} while (handling_skipped_tds);
2639

2640 2641 2642
	return 0;
}

S
Sarah Sharp 已提交
2643 2644 2645
/*
 * This function handles all OS-owned events on the event ring.  It may drop
 * xhci->lock between event processing (e.g. to pass up port status changes).
2646 2647
 * Returns >0 for "possibly more events to process" (caller should call again),
 * otherwise 0 if done.  In future, <0 returns should indicate error code.
S
Sarah Sharp 已提交
2648
 */
2649
static int xhci_handle_event(struct xhci_hcd *xhci)
2650 2651
{
	union xhci_trb *event;
S
Sarah Sharp 已提交
2652
	int update_ptrs = 1;
2653
	int ret;
2654 2655 2656

	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
		xhci->error_bitmask |= 1 << 1;
2657
		return 0;
2658 2659 2660 2661
	}

	event = xhci->event_ring->dequeue;
	/* Does the HC or OS own the TRB? */
M
Matt Evans 已提交
2662 2663
	if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
	    xhci->event_ring->cycle_state) {
2664
		xhci->error_bitmask |= 1 << 2;
2665
		return 0;
2666 2667
	}

2668 2669 2670 2671 2672
	/*
	 * Barrier between reading the TRB_CYCLE (valid) flag above and any
	 * speculative reads of the event's flags/data below.
	 */
	rmb();
S
Sarah Sharp 已提交
2673
	/* FIXME: Handle more event types. */
M
Matt Evans 已提交
2674
	switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2675 2676 2677
	case TRB_TYPE(TRB_COMPLETION):
		handle_cmd_completion(xhci, &event->event_cmd);
		break;
S
Sarah Sharp 已提交
2678 2679 2680 2681
	case TRB_TYPE(TRB_PORT_STATUS):
		handle_port_status(xhci, event);
		update_ptrs = 0;
		break;
2682 2683 2684 2685 2686 2687 2688
	case TRB_TYPE(TRB_TRANSFER):
		ret = handle_tx_event(xhci, &event->trans_event);
		if (ret < 0)
			xhci->error_bitmask |= 1 << 9;
		else
			update_ptrs = 0;
		break;
2689 2690 2691
	case TRB_TYPE(TRB_DEV_NOTE):
		handle_device_notification(xhci, event);
		break;
2692
	default:
M
Matt Evans 已提交
2693 2694
		if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
		    TRB_TYPE(48))
2695 2696 2697
			handle_vendor_event(xhci, event);
		else
			xhci->error_bitmask |= 1 << 3;
2698
	}
2699 2700 2701 2702 2703 2704
	/* Any of the above functions may drop and re-acquire the lock, so check
	 * to make sure a watchdog timer didn't mark the host as non-responsive.
	 */
	if (xhci->xhc_state & XHCI_STATE_DYING) {
		xhci_dbg(xhci, "xHCI host dying, returning from "
				"event handler.\n");
2705
		return 0;
2706
	}
2707

2708 2709
	if (update_ptrs)
		/* Update SW event ring dequeue pointer */
A
Andiry Xu 已提交
2710
		inc_deq(xhci, xhci->event_ring);
2711

2712 2713 2714 2715
	/* Are there more items on the event ring?  Caller will call us again to
	 * check.
	 */
	return 1;
2716
}
2717 2718 2719 2720 2721 2722 2723 2724 2725

/*
 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
 * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
 * indicators of an event TRB error, but we check the status *first* to be safe.
 */
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2726
	u32 status;
2727
	u64 temp_64;
2728 2729
	union xhci_trb *event_ring_deq;
	dma_addr_t deq;
2730 2731 2732

	spin_lock(&xhci->lock);
	/* Check if the xHC generated the interrupt, or the irq is shared */
2733
	status = readl(&xhci->op_regs->status);
2734
	if (status == 0xffffffff)
2735 2736
		goto hw_died;

2737
	if (!(status & STS_EINT)) {
2738 2739 2740
		spin_unlock(&xhci->lock);
		return IRQ_NONE;
	}
2741
	if (status & STS_FATAL) {
2742 2743 2744 2745
		xhci_warn(xhci, "WARNING: Host System Error\n");
		xhci_halt(xhci);
hw_died:
		spin_unlock(&xhci->lock);
2746
		return IRQ_HANDLED;
2747 2748
	}

2749 2750 2751 2752 2753
	/*
	 * Clear the op reg interrupt status first,
	 * so we can receive interrupts from other MSI-X interrupters.
	 * Write 1 to clear the interrupt status.
	 */
2754
	status |= STS_EINT;
2755
	writel(status, &xhci->op_regs->status);
2756 2757 2758
	/* FIXME when MSI-X is supported and there are multiple vectors */
	/* Clear the MSI-X event interrupt status */

2759
	if (hcd->irq) {
2760 2761
		u32 irq_pending;
		/* Acknowledge the PCI interrupt */
2762
		irq_pending = readl(&xhci->ir_set->irq_pending);
2763
		irq_pending |= IMAN_IP;
2764
		writel(irq_pending, &xhci->ir_set->irq_pending);
2765
	}
2766

2767 2768
	if (xhci->xhc_state & XHCI_STATE_DYING ||
	    xhci->xhc_state & XHCI_STATE_HALTED) {
2769 2770
		xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
				"Shouldn't IRQs be disabled?\n");
2771 2772
		/* Clear the event handler busy flag (RW1C);
		 * the event ring should be empty.
2773
		 */
2774
		temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2775 2776
		xhci_write_64(xhci, temp_64 | ERST_EHB,
				&xhci->ir_set->erst_dequeue);
2777 2778 2779 2780 2781 2782 2783 2784 2785
		spin_unlock(&xhci->lock);

		return IRQ_HANDLED;
	}

	event_ring_deq = xhci->event_ring->dequeue;
	/* FIXME this should be a delayed service routine
	 * that clears the EHB.
	 */
2786
	while (xhci_handle_event(xhci) > 0) {}
2787

2788
	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802
	/* If necessary, update the HW's version of the event ring deq ptr. */
	if (event_ring_deq != xhci->event_ring->dequeue) {
		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
				xhci->event_ring->dequeue);
		if (deq == 0)
			xhci_warn(xhci, "WARN something wrong with SW event "
					"ring dequeue ptr.\n");
		/* Update HC event ring dequeue pointer */
		temp_64 &= ERST_PTR_MASK;
		temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
	}

	/* Clear the event handler busy flag (RW1C); event ring is empty. */
	temp_64 |= ERST_EHB;
2803
	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2804

2805 2806 2807 2808 2809
	spin_unlock(&xhci->lock);

	return IRQ_HANDLED;
}

2810
irqreturn_t xhci_msi_irq(int irq, void *hcd)
2811
{
A
Alan Stern 已提交
2812
	return xhci_irq(hcd);
2813
}
2814

2815 2816
/****		Endpoint Ring Operations	****/

2817 2818 2819
/*
 * Generic function for queueing a TRB on a ring.
 * The caller must have checked to make sure there's room on the ring.
2820 2821 2822
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
2823 2824
 */
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
A
Andiry Xu 已提交
2825
		bool more_trbs_coming,
2826 2827 2828 2829 2830
		u32 field1, u32 field2, u32 field3, u32 field4)
{
	struct xhci_generic_trb *trb;

	trb = &ring->enqueue->generic;
M
Matt Evans 已提交
2831 2832 2833 2834
	trb->field[0] = cpu_to_le32(field1);
	trb->field[1] = cpu_to_le32(field2);
	trb->field[2] = cpu_to_le32(field3);
	trb->field[3] = cpu_to_le32(field4);
A
Andiry Xu 已提交
2835
	inc_enq(xhci, ring, more_trbs_coming);
2836 2837
}

2838 2839 2840 2841 2842
/*
 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
 * FIXME allocate segments if the ring is full.
 */
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
A
Andiry Xu 已提交
2843
		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2844
{
A
Andiry Xu 已提交
2845 2846
	unsigned int num_trbs_needed;

2847 2848 2849 2850 2851 2852 2853 2854 2855 2856
	/* Make sure the endpoint has been added to xHC schedule */
	switch (ep_state) {
	case EP_STATE_DISABLED:
		/*
		 * USB core changed config/interfaces without notifying us,
		 * or hardware is reporting the wrong state.
		 */
		xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
		return -ENOENT;
	case EP_STATE_ERROR:
2857
		xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2858 2859 2860
		/* FIXME event handling code for error needs to clear it */
		/* XXX not sure if this should be -ENOENT or not */
		return -EINVAL;
2861 2862
	case EP_STATE_HALTED:
		xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873
	case EP_STATE_STOPPED:
	case EP_STATE_RUNNING:
		break;
	default:
		xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
		/*
		 * FIXME issue Configure Endpoint command to try to get the HC
		 * back into a known state.
		 */
		return -EINVAL;
	}
A
Andiry Xu 已提交
2874 2875

	while (1) {
2876 2877
		if (room_on_ring(xhci, ep_ring, num_trbs))
			break;
A
Andiry Xu 已提交
2878 2879 2880 2881 2882 2883

		if (ep_ring == xhci->cmd_ring) {
			xhci_err(xhci, "Do not support expand command ring\n");
			return -ENOMEM;
		}

2884 2885
		xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
				"ERROR no room on ep ring, try ring expansion");
A
Andiry Xu 已提交
2886 2887 2888 2889 2890 2891
		num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
		if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
					mem_flags)) {
			xhci_err(xhci, "Ring expansion failed\n");
			return -ENOMEM;
		}
2892
	}
2893 2894 2895 2896 2897 2898 2899

	if (enqueue_is_link_trb(ep_ring)) {
		struct xhci_ring *ring = ep_ring;
		union xhci_trb *next;

		next = ring->enqueue;

2900
		while (trb_is_link(next)) {
2901 2902
			/* If we're not dealing with 0.95 hardware or isoc rings
			 * on AMD 0.96 host, clear the chain bit.
2903
			 */
A
Andiry Xu 已提交
2904 2905 2906
			if (!xhci_link_trb_quirk(xhci) &&
					!(ring->type == TYPE_ISOC &&
					 (xhci->quirks & XHCI_AMD_0x96_HOST)))
M
Matt Evans 已提交
2907
				next->link.control &= cpu_to_le32(~TRB_CHAIN);
2908
			else
M
Matt Evans 已提交
2909
				next->link.control |= cpu_to_le32(TRB_CHAIN);
2910 2911

			wmb();
2912
			next->link.control ^= cpu_to_le32(TRB_CYCLE);
2913 2914 2915

			/* Toggle the cycle bit after the last ring segment. */
			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
2916
				ring->cycle_state ^= 1;
2917 2918 2919 2920 2921 2922 2923
			}
			ring->enq_seg = ring->enq_seg->next;
			ring->enqueue = ring->enq_seg->trbs;
			next = ring->enqueue;
		}
	}

2924 2925 2926
	return 0;
}

2927
static int prepare_transfer(struct xhci_hcd *xhci,
2928 2929
		struct xhci_virt_device *xdev,
		unsigned int ep_index,
2930
		unsigned int stream_id,
2931 2932
		unsigned int num_trbs,
		struct urb *urb,
2933
		unsigned int td_index,
2934 2935 2936
		gfp_t mem_flags)
{
	int ret;
2937 2938
	struct urb_priv *urb_priv;
	struct xhci_td	*td;
2939
	struct xhci_ring *ep_ring;
2940
	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2941 2942 2943 2944 2945 2946 2947 2948 2949

	ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
	if (!ep_ring) {
		xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
				stream_id);
		return -EINVAL;
	}

	ret = prepare_ring(xhci, ep_ring,
M
Matt Evans 已提交
2950
			   le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
A
Andiry Xu 已提交
2951
			   num_trbs, mem_flags);
2952 2953 2954
	if (ret)
		return ret;

2955 2956 2957 2958 2959 2960 2961
	urb_priv = urb->hcpriv;
	td = urb_priv->td[td_index];

	INIT_LIST_HEAD(&td->td_list);
	INIT_LIST_HEAD(&td->cancelled_td_list);

	if (td_index == 0) {
2962
		ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2963
		if (unlikely(ret))
2964
			return ret;
2965 2966
	}

2967
	td->urb = urb;
2968
	/* Add this TD to the tail of the endpoint ring's TD list */
2969 2970 2971 2972 2973
	list_add_tail(&td->td_list, &ep_ring->td_list);
	td->start_seg = ep_ring->enq_seg;
	td->first_trb = ep_ring->enqueue;

	urb_priv->td[td_index] = td;
2974 2975 2976 2977

	return 0;
}

2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995
static unsigned int count_trbs(u64 addr, u64 len)
{
	unsigned int num_trbs;

	num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
			TRB_MAX_BUFF_SIZE);
	if (num_trbs == 0)
		num_trbs++;

	return num_trbs;
}

static inline unsigned int count_trbs_needed(struct urb *urb)
{
	return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
}

static unsigned int count_sg_trbs_needed(struct urb *urb)
2996 2997
{
	struct scatterlist *sg;
2998
	unsigned int i, len, full_len, num_trbs = 0;
2999

3000
	full_len = urb->transfer_buffer_length;
3001

3002 3003 3004 3005 3006 3007
	for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
		len = sg_dma_len(sg);
		num_trbs += count_trbs(sg_dma_address(sg), len);
		len = min_t(unsigned int, len, full_len);
		full_len -= len;
		if (full_len == 0)
3008 3009
			break;
	}
3010

3011 3012 3013
	return num_trbs;
}

3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024
static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
{
	u64 addr, len;

	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
	len = urb->iso_frame_desc[i].length;

	return count_trbs(addr, len);
}

static void check_trb_math(struct urb *urb, int running_total)
3025
{
3026
	if (unlikely(running_total != urb->transfer_buffer_length))
3027
		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
3028 3029 3030 3031 3032 3033 3034 3035
				"queued %#x (%d), asked for %#x (%d)\n",
				__func__,
				urb->ep->desc.bEndpointAddress,
				running_total, running_total,
				urb->transfer_buffer_length,
				urb->transfer_buffer_length);
}

3036
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3037
		unsigned int ep_index, unsigned int stream_id, int start_cycle,
3038
		struct xhci_generic_trb *start_trb)
3039 3040 3041 3042 3043 3044
{
	/*
	 * Pass all the TRBs to the hardware at once and make sure this write
	 * isn't reordered.
	 */
	wmb();
3045
	if (start_cycle)
M
Matt Evans 已提交
3046
		start_trb->field[3] |= cpu_to_le32(start_cycle);
3047
	else
M
Matt Evans 已提交
3048
		start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3049
	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3050 3051
}

3052 3053
static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
						struct xhci_ep_ctx *ep_ctx)
3054 3055 3056 3057
{
	int xhci_interval;
	int ep_interval;

M
Matt Evans 已提交
3058
	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3059
	ep_interval = urb->interval;
3060

3061 3062 3063 3064
	/* Convert to microframes */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		ep_interval *= 8;
3065

3066 3067 3068 3069
	/* FIXME change this to a warning and a suggestion to use the new API
	 * to set the polling interval (once the API is added).
	 */
	if (xhci_interval != ep_interval) {
3070 3071 3072 3073
		dev_dbg_ratelimited(&urb->dev->dev,
				"Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
				ep_interval, ep_interval == 1 ? "" : "s",
				xhci_interval, xhci_interval == 1 ? "" : "s");
3074 3075 3076 3077 3078 3079
		urb->interval = xhci_interval;
		/* Convert back to frames for LS/FS devices */
		if (urb->dev->speed == USB_SPEED_LOW ||
				urb->dev->speed == USB_SPEED_FULL)
			urb->interval /= 8;
	}
3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095
}

/*
 * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
 * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
 * (comprised of sg list entries) can take several service intervals to
 * transmit.
 */
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ep_ctx *ep_ctx;

	ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
	check_interval(xhci, urb, ep_ctx);

3096
	return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3097 3098
}

3099
/*
3100 3101
 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
 * packets remaining in the TD (*not* including this TRB).
3102 3103
 *
 * Total TD packet count = total_packet_count =
3104
 *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
3105 3106 3107 3108 3109 3110
 *
 * Packets transferred up to and including this TRB = packets_transferred =
 *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
 *
 * TD size = total_packet_count - packets_transferred
 *
3111 3112 3113 3114 3115 3116
 * For xHCI 0.96 and older, TD size field should be the remaining bytes
 * including this TRB, right shifted by 10
 *
 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
 * This is taken care of in the TRB_TD_SIZE() macro
 *
3117
 * The last TRB in a TD must have the TD size set to zero.
3118
 */
3119 3120
static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
			      int trb_buff_len, unsigned int td_total_len,
3121
			      struct urb *urb, bool more_trbs_coming)
3122
{
3123 3124
	u32 maxp, total_packet_count;

3125 3126
	/* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
	if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3127 3128
		return ((td_total_len - transferred) >> 10);

3129
	/* One TRB with a zero-length data packet. */
3130
	if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
3131
	    trb_buff_len == td_total_len)
3132 3133
		return 0;

3134 3135 3136 3137 3138 3139 3140
	/* for MTK xHCI, TD size doesn't include this TRB */
	if (xhci->quirks & XHCI_MTK_HOST)
		trb_buff_len = 0;

	maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
	total_packet_count = DIV_ROUND_UP(td_total_len, maxp);

3141 3142
	/* Queueing functions don't count the current TRB into transferred */
	return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3143 3144
}

3145

3146
static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
3147
			 u32 *trb_buff_len, struct xhci_segment *seg)
3148
{
3149
	struct device *dev = xhci_to_hcd(xhci)->self.controller;
3150 3151
	unsigned int unalign;
	unsigned int max_pkt;
3152
	u32 new_buff_len;
3153 3154 3155 3156 3157 3158 3159 3160

	max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
	unalign = (enqd_len + *trb_buff_len) % max_pkt;

	/* we got lucky, last normal TRB data on segment is packet aligned */
	if (unalign == 0)
		return 0;

3161 3162 3163
	xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
		 unalign, *trb_buff_len);

3164 3165 3166
	/* is the last nornal TRB alignable by splitting it */
	if (*trb_buff_len > unalign) {
		*trb_buff_len -= unalign;
3167
		xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
3168 3169
		return 0;
	}
3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202

	/*
	 * We want enqd_len + trb_buff_len to sum up to a number aligned to
	 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
	 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
	 */
	new_buff_len = max_pkt - (enqd_len % max_pkt);

	if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
		new_buff_len = (urb->transfer_buffer_length - enqd_len);

	/* create a max max_pkt sized bounce buffer pointed to by last trb */
	if (usb_urb_dir_out(urb)) {
		sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
				   seg->bounce_buf, new_buff_len, enqd_len);
		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
						 max_pkt, DMA_TO_DEVICE);
	} else {
		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
						 max_pkt, DMA_FROM_DEVICE);
	}

	if (dma_mapping_error(dev, seg->bounce_dma)) {
		/* try without aligning. Some host controllers survive */
		xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
		return 0;
	}
	*trb_buff_len = new_buff_len;
	seg->bounce_len = new_buff_len;
	seg->bounce_offs = enqd_len;

	xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);

3203 3204 3205
	return 1;
}

3206 3207
/* This is very similar to what ehci-q.c qtd_fill() does */
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3208 3209
		struct urb *urb, int slot_id, unsigned int ep_index)
{
3210
	struct xhci_ring *ring;
3211
	struct urb_priv *urb_priv;
3212
	struct xhci_td *td;
3213 3214
	struct xhci_generic_trb *start_trb;
	struct scatterlist *sg = NULL;
3215 3216
	bool more_trbs_coming = true;
	bool need_zero_pkt = false;
3217 3218
	bool first_trb = true;
	unsigned int num_trbs;
3219
	unsigned int start_cycle, num_sgs = 0;
3220
	unsigned int enqd_len, block_len, trb_buff_len, full_len;
3221
	int sent_len, ret;
3222
	u32 field, length_field, remainder;
3223
	u64 addr, send_addr;
3224

3225 3226
	ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ring)
3227 3228
		return -EINVAL;

3229
	full_len = urb->transfer_buffer_length;
3230 3231 3232 3233
	/* If we have scatter/gather list, we use it. */
	if (urb->num_sgs) {
		num_sgs = urb->num_mapped_sgs;
		sg = urb->sg;
3234 3235
		addr = (u64) sg_dma_address(sg);
		block_len = sg_dma_len(sg);
3236
		num_trbs = count_sg_trbs_needed(urb);
3237
	} else {
3238
		num_trbs = count_trbs_needed(urb);
3239 3240 3241
		addr = (u64) urb->transfer_dma;
		block_len = full_len;
	}
3242
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
3243
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
3244
			num_trbs, urb, 0, mem_flags);
3245
	if (unlikely(ret < 0))
3246
		return ret;
3247 3248

	urb_priv = urb->hcpriv;
3249 3250

	/* Deal with URB_ZERO_PACKET - need one more td/trb */
3251 3252
	if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1)
		need_zero_pkt = true;
3253

3254 3255
	td = urb_priv->td[0];

3256 3257 3258 3259 3260
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
3261 3262
	start_trb = &ring->enqueue->generic;
	start_cycle = ring->cycle_state;
3263
	send_addr = addr;
3264

3265
	/* Queue the TRBs, even if they are zero-length */
3266
	for (enqd_len = 0; enqd_len < full_len; enqd_len += trb_buff_len) {
3267
		field = TRB_TYPE(TRB_NORMAL);
3268

3269 3270 3271
		/* TRB buffer should not cross 64KB boundaries */
		trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
		trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
3272

3273 3274
		if (enqd_len + trb_buff_len > full_len)
			trb_buff_len = full_len - enqd_len;
S
Sarah Sharp 已提交
3275 3276

		/* Don't change the cycle bit of the first TRB until later */
3277 3278
		if (first_trb) {
			first_trb = false;
3279
			if (start_cycle == 0)
3280
				field |= TRB_CYCLE;
3281
		} else
3282
			field |= ring->cycle_state;
S
Sarah Sharp 已提交
3283 3284 3285 3286

		/* Chain all the TRBs together; clear the chain bit in the last
		 * TRB to indicate it's the last TRB in the chain.
		 */
3287
		if (enqd_len + trb_buff_len < full_len) {
S
Sarah Sharp 已提交
3288
			field |= TRB_CHAIN;
3289
			if (trb_is_link(ring->enqueue + 1)) {
3290
				if (xhci_align_td(xhci, urb, enqd_len,
3291 3292 3293 3294 3295 3296
						  &trb_buff_len,
						  ring->enq_seg)) {
					send_addr = ring->enq_seg->bounce_dma;
					/* assuming TD won't span 2 segs */
					td->bounce_seg = ring->enq_seg;
				}
3297
			}
3298 3299 3300
		}
		if (enqd_len + trb_buff_len >= full_len) {
			field &= ~TRB_CHAIN;
3301
			field |= TRB_IOC;
3302
			more_trbs_coming = false;
3303
			td->last_trb = ring->enqueue;
S
Sarah Sharp 已提交
3304
		}
3305 3306 3307 3308 3309

		/* Only set interrupt on short packet for IN endpoints */
		if (usb_urb_dir_in(urb))
			field |= TRB_ISP;

3310
		/* Set the TRB length, TD size, and interrupter fields. */
3311 3312 3313
		remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
					      full_len, urb, more_trbs_coming);

3314
		length_field = TRB_LEN(trb_buff_len) |
3315
			TRB_TD_SIZE(remainder) |
3316
			TRB_INTR_TARGET(0);
3317

3318
		queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
3319 3320
				lower_32_bits(send_addr),
				upper_32_bits(send_addr),
3321
				length_field,
3322
				field);
S
Sarah Sharp 已提交
3323 3324

		addr += trb_buff_len;
3325
		sent_len = trb_buff_len;
3326

3327
		while (sg && sent_len >= block_len) {
3328 3329
			/* New sg entry */
			--num_sgs;
3330
			sent_len -= block_len;
3331
			if (num_sgs != 0) {
3332
				sg = sg_next(sg);
3333 3334
				block_len = sg_dma_len(sg);
				addr = (u64) sg_dma_address(sg);
3335
				addr += sent_len;
3336 3337
			}
		}
3338 3339
		block_len -= sent_len;
		send_addr = addr;
3340
	}
S
Sarah Sharp 已提交
3341

3342 3343 3344 3345 3346 3347 3348 3349 3350
	if (need_zero_pkt) {
		ret = prepare_transfer(xhci, xhci->devs[slot_id],
				       ep_index, urb->stream_id,
				       1, urb, 1, mem_flags);
		urb_priv->td[1]->last_trb = ring->enqueue;
		field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
		queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
	}

3351
	check_trb_math(urb, enqd_len);
3352
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3353
			start_cycle, start_trb);
S
Sarah Sharp 已提交
3354 3355 3356
	return 0;
}

3357
/* Caller must have locked xhci->lock */
3358
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3359 3360 3361 3362 3363 3364 3365 3366
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	int num_trbs;
	int ret;
	struct usb_ctrlrequest *setup;
	struct xhci_generic_trb *start_trb;
	int start_cycle;
3367
	u32 field, length_field, remainder;
3368
	struct urb_priv *urb_priv;
3369 3370
	struct xhci_td *td;

3371 3372 3373
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;
3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390

	/*
	 * Need to copy setup packet into setup TRB, so we can't use the setup
	 * DMA address.
	 */
	if (!urb->setup_packet)
		return -EINVAL;

	/* 1 TRB for setup, 1 for status */
	num_trbs = 2;
	/*
	 * Don't need to check if we need additional event data and normal TRBs,
	 * since data in control transfers will never get bigger than 16MB
	 * XXX: can we get a buffer that crosses 64KB boundaries?
	 */
	if (urb->transfer_buffer_length > 0)
		num_trbs++;
3391 3392
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
3393
			num_trbs, urb, 0, mem_flags);
3394 3395 3396
	if (ret < 0)
		return ret;

3397 3398 3399
	urb_priv = urb->hcpriv;
	td = urb_priv->td[0];

3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	/* Queue setup TRB - see section 6.4.1.2.1 */
	/* FIXME better way to translate setup_packet into two u32 fields? */
	setup = (struct usb_ctrlrequest *) urb->setup_packet;
3411 3412 3413 3414
	field = 0;
	field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
	if (start_cycle == 0)
		field |= 0x1;
3415

3416
	/* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
3417
	if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
3418 3419 3420 3421 3422 3423 3424 3425
		if (urb->transfer_buffer_length > 0) {
			if (setup->bRequestType & USB_DIR_IN)
				field |= TRB_TX_TYPE(TRB_DATA_IN);
			else
				field |= TRB_TX_TYPE(TRB_DATA_OUT);
		}
	}

A
Andiry Xu 已提交
3426
	queue_trb(xhci, ep_ring, true,
M
Matt Evans 已提交
3427 3428 3429 3430 3431
		  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
		  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
		  TRB_LEN(8) | TRB_INTR_TARGET(0),
		  /* Immediate data in pointer */
		  field);
3432 3433

	/* If there's data, queue data TRBs */
3434 3435 3436 3437 3438 3439
	/* Only set interrupt on short packet for IN endpoints */
	if (usb_urb_dir_in(urb))
		field = TRB_ISP | TRB_TYPE(TRB_DATA);
	else
		field = TRB_TYPE(TRB_DATA);

3440 3441 3442 3443 3444
	remainder = xhci_td_remainder(xhci, 0,
				   urb->transfer_buffer_length,
				   urb->transfer_buffer_length,
				   urb, 1);

3445
	length_field = TRB_LEN(urb->transfer_buffer_length) |
3446
		TRB_TD_SIZE(remainder) |
3447
		TRB_INTR_TARGET(0);
3448

3449 3450 3451
	if (urb->transfer_buffer_length > 0) {
		if (setup->bRequestType & USB_DIR_IN)
			field |= TRB_DIR_IN;
A
Andiry Xu 已提交
3452
		queue_trb(xhci, ep_ring, true,
3453 3454
				lower_32_bits(urb->transfer_dma),
				upper_32_bits(urb->transfer_dma),
3455
				length_field,
3456
				field | ep_ring->cycle_state);
3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467
	}

	/* Save the DMA address of the last TRB in the TD */
	td->last_trb = ep_ring->enqueue;

	/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
	/* If the device sent data, the status stage is an OUT transfer */
	if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
		field = 0;
	else
		field = TRB_DIR_IN;
A
Andiry Xu 已提交
3468
	queue_trb(xhci, ep_ring, false,
3469 3470 3471 3472 3473 3474
			0,
			0,
			TRB_INTR_TARGET(0),
			/* Event on completion */
			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);

3475
	giveback_first_trb(xhci, slot_id, ep_index, 0,
3476
			start_cycle, start_trb);
3477 3478 3479
	return 0;
}

3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492
/*
 * The transfer burst count field of the isochronous TRB defines the number of
 * bursts that are required to move all packets in this TD.  Only SuperSpeed
 * devices can burst up to bMaxBurst number of packets per service interval.
 * This field is zero based, meaning a value of zero in the field means one
 * burst.  Basically, for everything but SuperSpeed devices, this field will be
 * zero.  Only xHCI 1.0 host controllers support this field.
 */
static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;

3493
	if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
3494 3495 3496
		return 0;

	max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3497
	return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3498 3499
}

3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516
/*
 * Returns the number of packets in the last "burst" of packets.  This field is
 * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
 * the last burst packet count is equal to the total number of packets in the
 * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
 * must contain (bMaxBurst + 1) number of packets, but the last burst can
 * contain 1 to (bMaxBurst + 1) packets.
 */
static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;
	unsigned int residue;

	if (xhci->hci_version < 0x100)
		return 0;

3517
	if (urb->dev->speed >= USB_SPEED_SUPER) {
3518 3519 3520 3521 3522 3523 3524 3525 3526 3527
		/* bMaxBurst is zero based: 0 means 1 packet per burst */
		max_burst = urb->ep->ss_ep_comp.bMaxBurst;
		residue = total_packet_count % (max_burst + 1);
		/* If residue is zero, the last burst contains (max_burst + 1)
		 * number of packets, but the TLBPC field is zero-based.
		 */
		if (residue == 0)
			return max_burst;
		return residue - 1;
	}
3528 3529 3530
	if (total_packet_count == 0)
		return 0;
	return total_packet_count - 1;
3531 3532
}

3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623
/*
 * Calculates Frame ID field of the isochronous TRB identifies the
 * target frame that the Interval associated with this Isochronous
 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
 *
 * Returns actual frame id on success, negative value on error.
 */
static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
		struct urb *urb, int index)
{
	int start_frame, ist, ret = 0;
	int start_frame_id, end_frame_id, current_frame_id;

	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		start_frame = urb->start_frame + index * urb->interval;
	else
		start_frame = (urb->start_frame + index * urb->interval) >> 3;

	/* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
	 *
	 * If bit [3] of IST is cleared to '0', software can add a TRB no
	 * later than IST[2:0] Microframes before that TRB is scheduled to
	 * be executed.
	 * If bit [3] of IST is set to '1', software can add a TRB no later
	 * than IST[2:0] Frames before that TRB is scheduled to be executed.
	 */
	ist = HCS_IST(xhci->hcs_params2) & 0x7;
	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
		ist <<= 3;

	/* Software shall not schedule an Isoch TD with a Frame ID value that
	 * is less than the Start Frame ID or greater than the End Frame ID,
	 * where:
	 *
	 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
	 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
	 *
	 * Both the End Frame ID and Start Frame ID values are calculated
	 * in microframes. When software determines the valid Frame ID value;
	 * The End Frame ID value should be rounded down to the nearest Frame
	 * boundary, and the Start Frame ID value should be rounded up to the
	 * nearest Frame boundary.
	 */
	current_frame_id = readl(&xhci->run_regs->microframe_index);
	start_frame_id = roundup(current_frame_id + ist + 1, 8);
	end_frame_id = rounddown(current_frame_id + 895 * 8, 8);

	start_frame &= 0x7ff;
	start_frame_id = (start_frame_id >> 3) & 0x7ff;
	end_frame_id = (end_frame_id >> 3) & 0x7ff;

	xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
		 __func__, index, readl(&xhci->run_regs->microframe_index),
		 start_frame_id, end_frame_id, start_frame);

	if (start_frame_id < end_frame_id) {
		if (start_frame > end_frame_id ||
				start_frame < start_frame_id)
			ret = -EINVAL;
	} else if (start_frame_id > end_frame_id) {
		if ((start_frame > end_frame_id &&
				start_frame < start_frame_id))
			ret = -EINVAL;
	} else {
			ret = -EINVAL;
	}

	if (index == 0) {
		if (ret == -EINVAL || start_frame == start_frame_id) {
			start_frame = start_frame_id + 1;
			if (urb->dev->speed == USB_SPEED_LOW ||
					urb->dev->speed == USB_SPEED_FULL)
				urb->start_frame = start_frame;
			else
				urb->start_frame = start_frame << 3;
			ret = 0;
		}
	}

	if (ret) {
		xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
				start_frame, current_frame_id, index,
				start_frame_id, end_frame_id);
		xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
		return ret;
	}

	return start_frame;
}

3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638
/* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct xhci_td *td;
	int num_tds, trbs_per_td;
	struct xhci_generic_trb *start_trb;
	bool first_trb;
	int start_cycle;
	u32 field, length_field;
	int running_total, trb_buff_len, td_len, td_remain_len, ret;
	u64 start_addr, addr;
	int i, j;
A
Andiry Xu 已提交
3639
	bool more_trbs_coming;
3640
	struct xhci_virt_ep *xep;
3641
	int frame_id;
3642

3643
	xep = &xhci->devs[slot_id]->eps[ep_index];
3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654
	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;

	num_tds = urb->number_of_packets;
	if (num_tds < 1) {
		xhci_dbg(xhci, "Isoc URB with zero packets?\n");
		return -EINVAL;
	}
	start_addr = (u64) urb->transfer_dma;
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

3655
	urb_priv = urb->hcpriv;
3656
	/* Queue the TRBs for each TD, even if they are zero-length */
3657
	for (i = 0; i < num_tds; i++) {
3658 3659 3660
		unsigned int total_pkt_count, max_pkt;
		unsigned int burst_count, last_burst_pkt_count;
		u32 sia_frame_id;
3661

3662
		first_trb = true;
3663 3664 3665 3666
		running_total = 0;
		addr = start_addr + urb->iso_frame_desc[i].offset;
		td_len = urb->iso_frame_desc[i].length;
		td_remain_len = td_len;
3667 3668 3669
		max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
		total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);

3670
		/* A zero-length transfer still involves at least one packet. */
3671 3672 3673 3674 3675
		if (total_pkt_count == 0)
			total_pkt_count++;
		burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
		last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
							urb, total_pkt_count);
3676

3677
		trbs_per_td = count_isoc_trbs_needed(urb, i);
3678 3679

		ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
A
Andiry Xu 已提交
3680
				urb->stream_id, trbs_per_td, urb, i, mem_flags);
3681 3682 3683 3684 3685
		if (ret < 0) {
			if (i == 0)
				return ret;
			goto cleanup;
		}
3686
		td = urb_priv->td[i];
3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700

		/* use SIA as default, if frame id is used overwrite it */
		sia_frame_id = TRB_SIA;
		if (!(urb->transfer_flags & URB_ISO_ASAP) &&
		    HCC_CFC(xhci->hcc_params)) {
			frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
			if (frame_id >= 0)
				sia_frame_id = TRB_FRAME_ID(frame_id);
		}
		/*
		 * Set isoc specific data for the first TRB in a TD.
		 * Prevent HW from getting the TRBs by keeping the cycle state
		 * inverted in the first TDs isoc TRB.
		 */
3701
		field = TRB_TYPE(TRB_ISOC) |
3702 3703 3704 3705
			TRB_TLBPC(last_burst_pkt_count) |
			sia_frame_id |
			(i ? ep_ring->cycle_state : !start_cycle);

3706 3707 3708 3709
		/* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
		if (!xep->use_extended_tbc)
			field |= TRB_TBC(burst_count);

3710
		/* fill the rest of the TRB fields, and remaining normal TRBs */
3711 3712
		for (j = 0; j < trbs_per_td; j++) {
			u32 remainder = 0;
3713 3714 3715 3716 3717

			/* only first TRB is isoc, overwrite otherwise */
			if (!first_trb)
				field = TRB_TYPE(TRB_NORMAL) |
					ep_ring->cycle_state;
3718

3719 3720 3721 3722
			/* Only set interrupt on short packet for IN EPs */
			if (usb_urb_dir_in(urb))
				field |= TRB_ISP;

3723
			/* Set the chain bit for all except the last TRB  */
3724
			if (j < trbs_per_td - 1) {
A
Andiry Xu 已提交
3725
				more_trbs_coming = true;
3726
				field |= TRB_CHAIN;
3727
			} else {
3728
				more_trbs_coming = false;
3729 3730
				td->last_trb = ep_ring->enqueue;
				field |= TRB_IOC;
3731 3732 3733 3734 3735
				/* set BEI, except for the last TD */
				if (xhci->hci_version >= 0x100 &&
				    !(xhci->quirks & XHCI_AVOID_BEI) &&
				    i < num_tds - 1)
					field |= TRB_BEI;
3736 3737
			}
			/* Calculate TRB length */
3738
			trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3739 3740 3741
			if (trb_buff_len > td_remain_len)
				trb_buff_len = td_remain_len;

3742
			/* Set the TRB length, TD size, & interrupter fields. */
3743 3744
			remainder = xhci_td_remainder(xhci, running_total,
						   trb_buff_len, td_len,
3745
						   urb, more_trbs_coming);
3746

3747 3748
			length_field = TRB_LEN(trb_buff_len) |
				TRB_INTR_TARGET(0);
3749

3750 3751 3752 3753 3754 3755 3756
			/* xhci 1.1 with ETE uses TD Size field for TBC */
			if (first_trb && xep->use_extended_tbc)
				length_field |= TRB_TD_SIZE_TBC(burst_count);
			else
				length_field |= TRB_TD_SIZE(remainder);
			first_trb = false;

A
Andiry Xu 已提交
3757
			queue_trb(xhci, ep_ring, more_trbs_coming,
3758 3759 3760
				lower_32_bits(addr),
				upper_32_bits(addr),
				length_field,
3761
				field);
3762 3763 3764 3765 3766 3767 3768 3769 3770
			running_total += trb_buff_len;

			addr += trb_buff_len;
			td_remain_len -= trb_buff_len;
		}

		/* Check TD length */
		if (running_total != td_len) {
			xhci_err(xhci, "ISOC TD length unmatch\n");
3771 3772
			ret = -EINVAL;
			goto cleanup;
3773 3774 3775
		}
	}

3776 3777 3778 3779
	/* store the next frame id */
	if (HCC_CFC(xhci->hcc_params))
		xep->next_frame_id = urb->start_frame + num_tds * urb->interval;

A
Andiry Xu 已提交
3780 3781 3782 3783 3784 3785
	if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
		if (xhci->quirks & XHCI_AMD_PLL_FIX)
			usb_amd_quirk_pll_disable();
	}
	xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;

3786 3787
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
			start_cycle, start_trb);
3788
	return 0;
3789 3790 3791 3792
cleanup:
	/* Clean up a partially enqueued isoc transfer. */

	for (i--; i >= 0; i--)
3793
		list_del_init(&urb_priv->td[i]->td_list);
3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807

	/* Use the first TD as a temporary variable to turn the TDs we've queued
	 * into No-ops with a software-owned cycle bit. That way the hardware
	 * won't accidentally start executing bogus TDs when we partially
	 * overwrite them.  td->first_trb and td->start_seg are already set.
	 */
	urb_priv->td[0]->last_trb = ep_ring->enqueue;
	/* Every TRB except the first & last will have its cycle bit flipped. */
	td_to_noop(xhci, ep_ring, urb_priv->td[0], true);

	/* Reset the ring enqueue back to the first TRB and its cycle bit. */
	ep_ring->enqueue = urb_priv->td[0]->first_trb;
	ep_ring->enq_seg = urb_priv->td[0]->start_seg;
	ep_ring->cycle_state = start_cycle;
3808
	ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3809 3810
	usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
	return ret;
3811 3812 3813 3814 3815
}

/*
 * Check transfer ring to guarantee there is enough room for the urb.
 * Update ISO URB start_frame and interval.
3816 3817 3818
 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
 * Contiguous Frame ID is not supported by HC.
3819 3820 3821 3822 3823 3824 3825 3826 3827 3828
 */
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	struct xhci_ep_ctx *ep_ctx;
	int start_frame;
	int num_tds, num_trbs, i;
	int ret;
3829 3830
	struct xhci_virt_ep *xep;
	int ist;
3831 3832

	xdev = xhci->devs[slot_id];
3833
	xep = &xhci->devs[slot_id]->eps[ep_index];
3834 3835 3836 3837 3838 3839
	ep_ring = xdev->eps[ep_index].ring;
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);

	num_trbs = 0;
	num_tds = urb->number_of_packets;
	for (i = 0; i < num_tds; i++)
3840
		num_trbs += count_isoc_trbs_needed(urb, i);
3841 3842 3843 3844

	/* Check the ring to guarantee there is enough room for the whole urb.
	 * Do not insert any td of the urb to the ring if the check failed.
	 */
M
Matt Evans 已提交
3845
	ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
A
Andiry Xu 已提交
3846
			   num_trbs, mem_flags);
3847 3848 3849
	if (ret)
		return ret;

3850 3851 3852 3853
	/*
	 * Check interval value. This should be done before we start to
	 * calculate the start frame value.
	 */
3854
	check_interval(xhci, urb, ep_ctx);
3855 3856

	/* Calculate the start frame and put it in urb->start_frame. */
L
Lu Baolu 已提交
3857 3858 3859 3860 3861 3862
	if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
		if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
				EP_STATE_RUNNING) {
			urb->start_frame = xep->next_frame_id;
			goto skip_start_over;
		}
3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890
	}

	start_frame = readl(&xhci->run_regs->microframe_index);
	start_frame &= 0x3fff;
	/*
	 * Round up to the next frame and consider the time before trb really
	 * gets scheduled by hardare.
	 */
	ist = HCS_IST(xhci->hcs_params2) & 0x7;
	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
		ist <<= 3;
	start_frame += ist + XHCI_CFC_DELAY;
	start_frame = roundup(start_frame, 8);

	/*
	 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
	 * is greate than 8 microframes.
	 */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL) {
		start_frame = roundup(start_frame, urb->interval << 3);
		urb->start_frame = start_frame >> 3;
	} else {
		start_frame = roundup(start_frame, urb->interval);
		urb->start_frame = start_frame;
	}

skip_start_over:
3891 3892
	ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;

3893
	return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
3894 3895
}

3896 3897
/****		Command Ring Operations		****/

3898 3899 3900 3901 3902 3903 3904 3905
/* Generic function for queueing a command TRB on the command ring.
 * Check to make sure there's room on the command ring for one command TRB.
 * Also check that there's room reserved for commands that must not fail.
 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
 * then only check for the number of reserved spots.
 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
 * because the command event handler may want to resubmit a failed command.
 */
3906 3907 3908
static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
			 u32 field1, u32 field2,
			 u32 field3, u32 field4, bool command_must_succeed)
3909
{
3910
	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3911
	int ret;
3912

3913 3914
	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
		(xhci->xhc_state & XHCI_STATE_HALTED)) {
3915
		xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
M
Mathias Nyman 已提交
3916
		return -ESHUTDOWN;
3917
	}
3918

3919 3920 3921
	if (!command_must_succeed)
		reserved_trbs++;

3922
	ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
A
Andiry Xu 已提交
3923
			reserved_trbs, GFP_ATOMIC);
3924 3925
	if (ret < 0) {
		xhci_err(xhci, "ERR: No room for command on command ring\n");
3926 3927 3928
		if (command_must_succeed)
			xhci_err(xhci, "ERR: Reserved TRB counting for "
					"unfailable commands failed.\n");
3929
		return ret;
3930
	}
M
Mathias Nyman 已提交
3931 3932 3933

	cmd->command_trb = xhci->cmd_ring->enqueue;
	list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
3934

3935 3936 3937 3938 3939 3940 3941
	/* if there are no other commands queued we start the timeout timer */
	if (xhci->cmd_list.next == &cmd->cmd_list &&
	    !timer_pending(&xhci->cmd_timer)) {
		xhci->current_cmd = cmd;
		mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
	}

A
Andiry Xu 已提交
3942 3943
	queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
			field4 | xhci->cmd_ring->cycle_state);
3944 3945 3946
	return 0;
}

3947
/* Queue a slot enable or disable request on the command ring */
3948 3949
int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
		u32 trb_type, u32 slot_id)
3950
{
3951
	return queue_command(xhci, cmd, 0, 0, 0,
3952
			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3953 3954 3955
}

/* Queue an address device command TRB */
3956 3957
int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
		dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
3958
{
3959
	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3960
			upper_32_bits(in_ctx_ptr), 0,
3961 3962
			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
			| (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
3963 3964
}

3965
int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3966 3967
		u32 field1, u32 field2, u32 field3, u32 field4)
{
3968
	return queue_command(xhci, cmd, field1, field2, field3, field4, false);
3969 3970
}

3971
/* Queue a reset device command TRB */
3972 3973
int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
		u32 slot_id)
3974
{
3975
	return queue_command(xhci, cmd, 0, 0, 0,
3976
			TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
3977
			false);
3978
}
3979 3980

/* Queue a configure endpoint command TRB */
3981 3982
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
		struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
3983
		u32 slot_id, bool command_must_succeed)
3984
{
3985
	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3986
			upper_32_bits(in_ctx_ptr), 0,
3987 3988
			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
			command_must_succeed);
3989
}
3990

3991
/* Queue an evaluate context command TRB */
3992 3993
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
		dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
3994
{
3995
	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3996
			upper_32_bits(in_ctx_ptr), 0,
3997
			TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
3998
			command_must_succeed);
3999 4000
}

4001 4002 4003 4004
/*
 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
 * activity on an endpoint that is about to be suspended.
 */
4005 4006
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
			     int slot_id, unsigned int ep_index, int suspend)
4007 4008 4009 4010
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_STOP_RING);
4011
	u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
4012

4013
	return queue_command(xhci, cmd, 0, 0, 0,
4014
			trb_slot_id | trb_ep_index | type | trb_suspend, false);
4015 4016
}

4017 4018 4019 4020 4021
/* Set Transfer Ring Dequeue Pointer command */
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
		unsigned int stream_id,
		struct xhci_dequeue_state *deq_state)
4022 4023 4024 4025
{
	dma_addr_t addr;
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4026
	u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
4027
	u32 trb_sct = 0;
4028
	u32 type = TRB_TYPE(TRB_SET_DEQ);
4029
	struct xhci_virt_ep *ep;
4030 4031
	struct xhci_command *cmd;
	int ret;
4032

4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
		"Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
		deq_state->new_deq_seg,
		(unsigned long long)deq_state->new_deq_seg->dma,
		deq_state->new_deq_ptr,
		(unsigned long long)xhci_trb_virt_to_dma(
			deq_state->new_deq_seg, deq_state->new_deq_ptr),
		deq_state->new_cycle_state);

	addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
				    deq_state->new_deq_ptr);
4044
	if (addr == 0) {
4045
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4046
		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
4047 4048
			  deq_state->new_deq_seg, deq_state->new_deq_ptr);
		return;
4049
	}
4050 4051 4052 4053
	ep = &xhci->devs[slot_id]->eps[ep_index];
	if ((ep->ep_state & SET_DEQ_PENDING)) {
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
		xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
4054
		return;
4055
	}
4056 4057 4058 4059 4060

	/* This function gets called from contexts where it cannot sleep */
	cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
	if (!cmd) {
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
4061
		return;
4062 4063
	}

4064 4065
	ep->queued_deq_seg = deq_state->new_deq_seg;
	ep->queued_deq_ptr = deq_state->new_deq_ptr;
4066 4067
	if (stream_id)
		trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
4068
	ret = queue_command(xhci, cmd,
4069 4070 4071
		lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
		upper_32_bits(addr), trb_stream_id,
		trb_slot_id | trb_ep_index | type, false);
4072 4073
	if (ret < 0) {
		xhci_free_command(xhci, cmd);
4074
		return;
4075 4076
	}

4077 4078 4079 4080 4081 4082
	/* Stop the TD queueing code from ringing the doorbell until
	 * this command completes.  The HC won't set the dequeue pointer
	 * if the ring is running, and ringing the doorbell starts the
	 * ring running.
	 */
	ep->ep_state |= SET_DEQ_PENDING;
4083
}
4084

4085 4086
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
			int slot_id, unsigned int ep_index)
4087 4088 4089 4090 4091
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_RESET_EP);

4092 4093
	return queue_command(xhci, cmd, 0, 0, 0,
			trb_slot_id | trb_ep_index | type, false);
4094
}