xhci-ring.c 111.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * xHCI host controller driver
 *
 * Copyright (C) 2008 Intel Corp.
 *
 * Author: Sarah Sharp
 * Some code borrowed from the Linux EHCI driver.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

/*
 * Ring initialization rules:
 * 1. Each segment is initialized to zero, except for link TRBs.
 * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
 *    Consumer Cycle State (CCS), depending on ring function.
 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
 *
 * Ring behavior rules:
 * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
 *    least one free TRB in the ring.  This is useful if you want to turn that
 *    into a link TRB and expand the ring.
 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
 *    link TRB, then load the pointer with the address in the link TRB.  If the
 *    link TRB had its toggle bit set, you may need to update the ring cycle
 *    state (see cycle bit rules).  You may have to do this multiple times
 *    until you reach a non-link TRB.
 * 3. A ring is full if enqueue++ (for the definition of increment above)
 *    equals the dequeue pointer.
 *
 * Cycle bit rules:
 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 *
 * Producer rules:
 * 1. Check if ring is full before you enqueue.
 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
 *    Update enqueue pointer between each write (which may update the ring
 *    cycle state).
 * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
 *    and endpoint rings.  If HC is the producer for the event ring,
 *    and it generates an interrupt according to interrupt modulation rules.
 *
 * Consumer rules:
 * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
 *    the TRB is owned by the consumer.
 * 2. Update dequeue pointer (which may update the ring cycle state) and
 *    continue processing TRBs until you reach a TRB which is not owned by you.
 * 3. Notify the producer.  SW is the consumer for the event ring, and it
 *   updates event ring dequeue pointer.  HC is the consumer for the command and
 *   endpoint rings; it generates events on the event ring for these.
 */

67
#include <linux/scatterlist.h>
68
#include <linux/slab.h>
69 70
#include "xhci.h"

71 72 73 74
static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
		struct xhci_virt_device *virt_dev,
		struct xhci_event_cmd *event);

75 76 77 78
/*
 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
 * address of the TRB.
 */
79
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
80 81
		union xhci_trb *trb)
{
82
	unsigned long segment_offset;
83

84
	if (!seg || !trb || trb < seg->trbs)
85
		return 0;
86 87 88
	/* offset in TRBs */
	segment_offset = trb - seg->trbs;
	if (segment_offset > TRBS_PER_SEGMENT)
89
		return 0;
90
	return seg->dma + (segment_offset * sizeof(*trb));
91 92 93 94 95
}

/* Does this link TRB point to the first segment in a ring,
 * or was the previous TRB the last TRB on the last segment in the ERST?
 */
96
static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
97 98 99 100 101 102
		struct xhci_segment *seg, union xhci_trb *trb)
{
	if (ring == xhci->event_ring)
		return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
			(seg->next == xhci->event_ring->first_seg);
	else
M
Matt Evans 已提交
103
		return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
104 105 106 107 108 109
}

/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
 * segment?  I.e. would the updated event TRB pointer step off the end of the
 * event seg?
 */
110
static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
111 112 113 114 115
		struct xhci_segment *seg, union xhci_trb *trb)
{
	if (ring == xhci->event_ring)
		return trb == &seg->trbs[TRBS_PER_SEGMENT];
	else
116
		return TRB_TYPE_LINK_LE32(trb->link.control);
117 118
}

119
static int enqueue_is_link_trb(struct xhci_ring *ring)
120 121
{
	struct xhci_link_trb *link = &ring->enqueue->link;
122
	return TRB_TYPE_LINK_LE32(link->control);
123 124
}

125 126 127 128 129 130 131 132 133 134 135 136 137
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
 * TRB is in a new segment.  This does not skip over link TRBs, and it does not
 * effect the ring dequeue or enqueue pointers.
 */
static void next_trb(struct xhci_hcd *xhci,
		struct xhci_ring *ring,
		struct xhci_segment **seg,
		union xhci_trb **trb)
{
	if (last_trb(xhci, ring, *seg, *trb)) {
		*seg = (*seg)->next;
		*trb = ((*seg)->trbs);
	} else {
138
		(*trb)++;
139 140 141
	}
}

142 143 144 145 146 147 148
/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 */
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
{
	union xhci_trb *next = ++(ring->dequeue);
149
	unsigned long long addr;
150 151 152 153 154 155 156 157 158

	ring->deq_updates++;
	/* Update the dequeue pointer further if that was a link TRB or we're at
	 * the end of an event ring segment (which doesn't have link TRBS)
	 */
	while (last_trb(xhci, ring, ring->deq_seg, next)) {
		if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
			ring->cycle_state = (ring->cycle_state ? 0 : 1);
			if (!in_interrupt())
159 160
				xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
						ring,
161 162 163 164 165 166
						(unsigned int) ring->cycle_state);
		}
		ring->deq_seg = ring->deq_seg->next;
		ring->dequeue = ring->deq_seg->trbs;
		next = ring->dequeue;
	}
167
	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
168 169 170 171 172 173 174 175 176 177 178 179
}

/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 *
 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
 * chain bit is set), then set the chain bit in all the following link TRBs.
 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
 * have their chain bit cleared (so that each Link TRB is a separate TD).
 *
 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
180 181 182
 * set, but other sections talk about dealing with the chain bit set.  This was
 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
183 184 185
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
186
 */
187 188
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
		bool consumer, bool more_trbs_coming)
189 190 191
{
	u32 chain;
	union xhci_trb *next;
192
	unsigned long long addr;
193

M
Matt Evans 已提交
194
	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
195 196 197 198 199 200 201 202 203
	next = ++(ring->enqueue);

	ring->enq_updates++;
	/* Update the dequeue pointer further if that was a link TRB or we're at
	 * the end of an event ring segment (which doesn't have link TRBS)
	 */
	while (last_trb(xhci, ring, ring->enq_seg, next)) {
		if (!consumer) {
			if (ring != xhci->event_ring) {
204 205 206 207 208 209 210 211 212
				/*
				 * If the caller doesn't plan on enqueueing more
				 * TDs before ringing the doorbell, then we
				 * don't want to give the link TRB to the
				 * hardware just yet.  We'll give the link TRB
				 * back in prepare_ring() just before we enqueue
				 * the TD at the top of the ring.
				 */
				if (!chain && !more_trbs_coming)
213
					break;
214 215 216 217 218 219

				/* If we're not dealing with 0.95 hardware,
				 * carry over the chain bit of the previous TRB
				 * (which may mean the chain bit is cleared).
				 */
				if (!xhci_link_trb_quirk(xhci)) {
M
Matt Evans 已提交
220 221 222 223
					next->link.control &=
						cpu_to_le32(~TRB_CHAIN);
					next->link.control |=
						cpu_to_le32(chain);
224
				}
225 226
				/* Give this link TRB to the hardware */
				wmb();
M
Matt Evans 已提交
227
				next->link.control ^= cpu_to_le32(TRB_CYCLE);
228 229 230 231 232
			}
			/* Toggle the cycle bit after the last ring segment. */
			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
				ring->cycle_state = (ring->cycle_state ? 0 : 1);
				if (!in_interrupt())
233 234
					xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
							ring,
235 236 237 238 239 240 241
							(unsigned int) ring->cycle_state);
			}
		}
		ring->enq_seg = ring->enq_seg->next;
		ring->enqueue = ring->enq_seg->trbs;
		next = ring->enqueue;
	}
242
	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
243 244 245 246 247 248 249 250 251 252 253 254 255 256
}

/*
 * Check to see if there's room to enqueue num_trbs on the ring.  See rules
 * above.
 * FIXME: this would be simpler and faster if we just kept track of the number
 * of free TRBs in a ring.
 */
static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
		unsigned int num_trbs)
{
	int i;
	union xhci_trb *enq = ring->enqueue;
	struct xhci_segment *enq_seg = ring->enq_seg;
257 258
	struct xhci_segment *cur_seg;
	unsigned int left_on_ring;
259

260 261 262 263 264 265 266
	/* If we are currently pointing to a link TRB, advance the
	 * enqueue pointer before checking for space */
	while (last_trb(xhci, ring, enq_seg, enq)) {
		enq_seg = enq_seg->next;
		enq = enq_seg->trbs;
	}

267
	/* Check if ring is empty */
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
	if (enq == ring->dequeue) {
		/* Can't use link trbs */
		left_on_ring = TRBS_PER_SEGMENT - 1;
		for (cur_seg = enq_seg->next; cur_seg != enq_seg;
				cur_seg = cur_seg->next)
			left_on_ring += TRBS_PER_SEGMENT - 1;

		/* Always need one TRB free in the ring. */
		left_on_ring -= 1;
		if (num_trbs > left_on_ring) {
			xhci_warn(xhci, "Not enough room on ring; "
					"need %u TRBs, %u TRBs left\n",
					num_trbs, left_on_ring);
			return 0;
		}
283
		return 1;
284
	}
285 286 287 288 289 290 291 292 293 294 295 296 297 298
	/* Make sure there's an extra empty TRB available */
	for (i = 0; i <= num_trbs; ++i) {
		if (enq == ring->dequeue)
			return 0;
		enq++;
		while (last_trb(xhci, ring, enq_seg, enq)) {
			enq_seg = enq_seg->next;
			enq = enq_seg->trbs;
		}
	}
	return 1;
}

/* Ring the host controller doorbell after placing a command on the ring */
299
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
300 301
{
	xhci_dbg(xhci, "// Ding dong!\n");
302
	xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
303 304 305 306
	/* Flush PCI posted writes */
	xhci_readl(xhci, &xhci->dba->doorbell[0]);
}

307
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
308
		unsigned int slot_id,
309 310
		unsigned int ep_index,
		unsigned int stream_id)
311
{
M
Matt Evans 已提交
312
	__le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
313 314
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
	unsigned int ep_state = ep->ep_state;
315 316

	/* Don't ring the doorbell for this endpoint if there are pending
317
	 * cancellations because we don't want to interrupt processing.
318 319 320 321
	 * We don't want to restart any stream rings if there's a set dequeue
	 * pointer command pending because the device can choose to start any
	 * stream once the endpoint is on the HW schedule.
	 * FIXME - check all the stream rings for pending cancellations.
322
	 */
323 324 325 326 327 328 329
	if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
	    (ep_state & EP_HALTED))
		return;
	xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
	/* The CPU has better things to do at this point than wait for a
	 * write-posting flush.  It'll get there soon enough.
	 */
330 331
}

332 333 334 335 336 337 338 339 340 341 342 343 344
/* Ring the doorbell for any rings with pending URBs */
static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
		unsigned int slot_id,
		unsigned int ep_index)
{
	unsigned int stream_id;
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];

	/* A ring has pending URBs if its TD list is not empty */
	if (!(ep->ep_state & EP_HAS_STREAMS)) {
		if (!(list_empty(&ep->ring->td_list)))
345
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
346 347 348 349 350 351 352
		return;
	}

	for (stream_id = 1; stream_id < ep->stream_info->num_streams;
			stream_id++) {
		struct xhci_stream_info *stream_info = ep->stream_info;
		if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
353 354
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
						stream_id);
355 356 357
	}
}

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
/*
 * Find the segment that trb is in.  Start searching in start_seg.
 * If we must move past a segment that has a link TRB with a toggle cycle state
 * bit set, then we will toggle the value pointed at by cycle_state.
 */
static struct xhci_segment *find_trb_seg(
		struct xhci_segment *start_seg,
		union xhci_trb	*trb, int *cycle_state)
{
	struct xhci_segment *cur_seg = start_seg;
	struct xhci_generic_trb *generic_trb;

	while (cur_seg->trbs > trb ||
			&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
		generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
373
		if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
374
			*cycle_state ^= 0x1;
375 376 377
		cur_seg = cur_seg->next;
		if (cur_seg == start_seg)
			/* Looped over the entire list.  Oops! */
378
			return NULL;
379 380 381 382
	}
	return cur_seg;
}

383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426

static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
		unsigned int stream_id)
{
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];
	/* Common case: no streams */
	if (!(ep->ep_state & EP_HAS_STREAMS))
		return ep->ring;

	if (stream_id == 0) {
		xhci_warn(xhci,
				"WARN: Slot ID %u, ep index %u has streams, "
				"but URB has no stream ID.\n",
				slot_id, ep_index);
		return NULL;
	}

	if (stream_id < ep->stream_info->num_streams)
		return ep->stream_info->stream_rings[stream_id];

	xhci_warn(xhci,
			"WARN: Slot ID %u, ep index %u has "
			"stream IDs 1 to %u allocated, "
			"but stream ID %u is requested.\n",
			slot_id, ep_index,
			ep->stream_info->num_streams - 1,
			stream_id);
	return NULL;
}

/* Get the right ring for the given URB.
 * If the endpoint supports streams, boundary check the URB's stream ID.
 * If the endpoint doesn't support streams, return the singular endpoint ring.
 */
static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
		struct urb *urb)
{
	return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
		xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
}

427 428 429 430 431 432 433 434 435 436 437 438 439
/*
 * Move the xHC's endpoint ring dequeue pointer past cur_td.
 * Record the new state of the xHC's endpoint ring dequeue segment,
 * dequeue pointer, and new consumer cycle state in state.
 * Update our internal representation of the ring's dequeue pointer.
 *
 * We do this in three jumps:
 *  - First we update our new ring state to be the same as when the xHC stopped.
 *  - Then we traverse the ring to find the segment that contains
 *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
 *    any link TRBs with the toggle cycle bit set.
 *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
 *    if we've moved it past a link TRB with the toggle cycle bit set.
M
Matt Evans 已提交
440 441 442 443
 *
 * Some of the uses of xhci_generic_trb are grotty, but if they're done
 * with correct __le32 accesses they should work fine.  Only users of this are
 * in here.
444
 */
445
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
446
		unsigned int slot_id, unsigned int ep_index,
447 448
		unsigned int stream_id, struct xhci_td *cur_td,
		struct xhci_dequeue_state *state)
449 450
{
	struct xhci_virt_device *dev = xhci->devs[slot_id];
451
	struct xhci_ring *ep_ring;
452
	struct xhci_generic_trb *trb;
453
	struct xhci_ep_ctx *ep_ctx;
454
	dma_addr_t addr;
455

456 457 458 459 460 461 462 463
	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
			ep_index, stream_id);
	if (!ep_ring) {
		xhci_warn(xhci, "WARN can't find new dequeue state "
				"for invalid stream ID %u.\n",
				stream_id);
		return;
	}
464
	state->new_cycle_state = 0;
465
	xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
466
	state->new_deq_seg = find_trb_seg(cur_td->start_seg,
467
			dev->eps[ep_index].stopped_trb,
468
			&state->new_cycle_state);
469 470 471 472 473
	if (!state->new_deq_seg) {
		WARN_ON(1);
		return;
	}

474
	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
475
	xhci_dbg(xhci, "Finding endpoint context\n");
476
	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
M
Matt Evans 已提交
477
	state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
478 479

	state->new_deq_ptr = cur_td->last_trb;
480
	xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
481 482 483
	state->new_deq_seg = find_trb_seg(state->new_deq_seg,
			state->new_deq_ptr,
			&state->new_cycle_state);
484 485 486 487
	if (!state->new_deq_seg) {
		WARN_ON(1);
		return;
	}
488 489

	trb = &state->new_deq_ptr->generic;
490 491
	if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
	    (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
492
		state->new_cycle_state ^= 0x1;
493 494
	next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);

495 496 497 498 499 500 501 502 503 504 505 506 507 508
	/*
	 * If there is only one segment in a ring, find_trb_seg()'s while loop
	 * will not run, and it will return before it has a chance to see if it
	 * needs to toggle the cycle bit.  It can't tell if the stalled transfer
	 * ended just before the link TRB on a one-segment ring, or if the TD
	 * wrapped around the top of the ring, because it doesn't have the TD in
	 * question.  Look for the one-segment case where stalled TRB's address
	 * is greater than the new dequeue pointer address.
	 */
	if (ep_ring->first_seg == ep_ring->first_seg->next &&
			state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
		state->new_cycle_state ^= 0x1;
	xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);

509
	/* Don't update the ring cycle state for the producer (us). */
510 511 512 513 514
	xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
			state->new_deq_seg);
	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
	xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
			(unsigned long long) addr);
515 516
}

517 518 519 520
/* flip_cycle means flip the cycle bit of all but the first and last TRB.
 * (The last TRB actually points to the ring enqueue pointer, which is not part
 * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
 */
521
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
522
		struct xhci_td *cur_td, bool flip_cycle)
523 524 525 526 527 528 529
{
	struct xhci_segment *cur_seg;
	union xhci_trb *cur_trb;

	for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
			true;
			next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
530
		if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
531 532 533
			/* Unchain any chained Link TRBs, but
			 * leave the pointers intact.
			 */
M
Matt Evans 已提交
534
			cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
535 536 537 538 539 540
			/* Flip the cycle bit (link TRBs can't be the first
			 * or last TRB).
			 */
			if (flip_cycle)
				cur_trb->generic.field[3] ^=
					cpu_to_le32(TRB_CYCLE);
541
			xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
542 543 544
			xhci_dbg(xhci, "Address = %p (0x%llx dma); "
					"in seg %p (0x%llx dma)\n",
					cur_trb,
545
					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
546 547
					cur_seg,
					(unsigned long long)cur_seg->dma);
548 549 550 551 552
		} else {
			cur_trb->generic.field[0] = 0;
			cur_trb->generic.field[1] = 0;
			cur_trb->generic.field[2] = 0;
			/* Preserve only the cycle bit of this TRB */
M
Matt Evans 已提交
553
			cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
554 555 556 557 558
			/* Flip the cycle bit except on the first or last TRB */
			if (flip_cycle && cur_trb != cur_td->first_trb &&
					cur_trb != cur_td->last_trb)
				cur_trb->generic.field[3] ^=
					cpu_to_le32(TRB_CYCLE);
M
Matt Evans 已提交
559 560
			cur_trb->generic.field[3] |= cpu_to_le32(
				TRB_TYPE(TRB_TR_NOOP));
561 562 563
			xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
					"in seg %p (0x%llx dma)\n",
					cur_trb,
564
					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
565 566
					cur_seg,
					(unsigned long long)cur_seg->dma);
567 568 569 570 571 572 573
		}
		if (cur_trb == cur_td->last_trb)
			break;
	}
}

static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
574 575
		unsigned int ep_index, unsigned int stream_id,
		struct xhci_segment *deq_seg,
576 577
		union xhci_trb *deq_ptr, u32 cycle_state);

578
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
579
		unsigned int slot_id, unsigned int ep_index,
580
		unsigned int stream_id,
581
		struct xhci_dequeue_state *deq_state)
582
{
583 584
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];

585 586 587 588 589 590 591
	xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
			"new deq ptr = %p (0x%llx dma), new cycle = %u\n",
			deq_state->new_deq_seg,
			(unsigned long long)deq_state->new_deq_seg->dma,
			deq_state->new_deq_ptr,
			(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
			deq_state->new_cycle_state);
592
	queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
593 594 595 596 597 598 599 600
			deq_state->new_deq_seg,
			deq_state->new_deq_ptr,
			(u32) deq_state->new_cycle_state);
	/* Stop the TD queueing code from ringing the doorbell until
	 * this command completes.  The HC won't set the dequeue pointer
	 * if the ring is running, and ringing the doorbell starts the
	 * ring running.
	 */
601
	ep->ep_state |= SET_DEQ_PENDING;
602 603
}

604
static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
		struct xhci_virt_ep *ep)
{
	ep->ep_state &= ~EP_HALT_PENDING;
	/* Can't del_timer_sync in interrupt, so we attempt to cancel.  If the
	 * timer is running on another CPU, we don't decrement stop_cmds_pending
	 * (since we didn't successfully stop the watchdog timer).
	 */
	if (del_timer(&ep->stop_cmd_timer))
		ep->stop_cmds_pending--;
}

/* Must be called with xhci->lock held in interrupt context */
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
		struct xhci_td *cur_td, int status, char *adjective)
{
620
	struct usb_hcd *hcd;
621 622
	struct urb	*urb;
	struct urb_priv	*urb_priv;
623

624 625 626
	urb = cur_td->urb;
	urb_priv = urb->hcpriv;
	urb_priv->td_cnt++;
627
	hcd = bus_to_hcd(urb->dev->bus);
628

629 630
	/* Only giveback urb when this is the last td in urb */
	if (urb_priv->td_cnt == urb_priv->length) {
A
Andiry Xu 已提交
631 632 633 634 635 636 637
		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
			xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
			if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs	== 0) {
				if (xhci->quirks & XHCI_AMD_PLL_FIX)
					usb_amd_quirk_pll_enable();
			}
		}
638 639 640 641 642 643 644
		usb_hcd_unlink_urb_from_ep(hcd, urb);

		spin_unlock(&xhci->lock);
		usb_hcd_giveback_urb(hcd, urb, status);
		xhci_urb_free_priv(xhci, urb_priv);
		spin_lock(&xhci->lock);
	}
645 646
}

647 648 649 650 651 652 653 654 655 656 657
/*
 * When we get a command completion for a Stop Endpoint Command, we need to
 * unlink any cancelled TDs from the ring.  There are two ways to do that:
 *
 *  1. If the HW was in the middle of processing the TD that needs to be
 *     cancelled, then we must move the ring's dequeue pointer past the last TRB
 *     in the TD with a Set Dequeue Pointer Command.
 *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
 *     bit cleared) so that the HW will skip over them.
 */
static void handle_stopped_endpoint(struct xhci_hcd *xhci,
658
		union xhci_trb *trb, struct xhci_event_cmd *event)
659 660 661
{
	unsigned int slot_id;
	unsigned int ep_index;
662
	struct xhci_virt_device *virt_dev;
663
	struct xhci_ring *ep_ring;
664
	struct xhci_virt_ep *ep;
665
	struct list_head *entry;
666
	struct xhci_td *cur_td = NULL;
667 668
	struct xhci_td *last_unlinked_td;

669
	struct xhci_dequeue_state deq_state;
670

671
	if (unlikely(TRB_TO_SUSPEND_PORT(
M
Matt Evans 已提交
672
			     le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
673
		slot_id = TRB_TO_SLOT_ID(
M
Matt Evans 已提交
674
			le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
675 676 677 678 679 680 681 682 683 684 685
		virt_dev = xhci->devs[slot_id];
		if (virt_dev)
			handle_cmd_in_cmd_wait_list(xhci, virt_dev,
				event);
		else
			xhci_warn(xhci, "Stop endpoint command "
				"completion for disabled slot %u\n",
				slot_id);
		return;
	}

686
	memset(&deq_state, 0, sizeof(deq_state));
M
Matt Evans 已提交
687 688
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
689
	ep = &xhci->devs[slot_id]->eps[ep_index];
690

691
	if (list_empty(&ep->cancelled_td_list)) {
692
		xhci_stop_watchdog_timer_in_irq(xhci, ep);
693 694
		ep->stopped_td = NULL;
		ep->stopped_trb = NULL;
695
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
696
		return;
697
	}
698 699 700 701 702 703

	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
	 * We have the xHCI lock, so nothing can modify this list until we drop
	 * it.  We're also in the event handler, so we can't get re-interrupted
	 * if another Stop Endpoint command completes
	 */
704
	list_for_each(entry, &ep->cancelled_td_list) {
705
		cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
706 707
		xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
				cur_td->first_trb,
708
				(unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
		if (!ep_ring) {
			/* This shouldn't happen unless a driver is mucking
			 * with the stream ID after submission.  This will
			 * leave the TD on the hardware ring, and the hardware
			 * will try to execute it, and may access a buffer
			 * that has already been freed.  In the best case, the
			 * hardware will execute it, and the event handler will
			 * ignore the completion event for that TD, since it was
			 * removed from the td_list for that endpoint.  In
			 * short, don't muck with the stream ID after
			 * submission.
			 */
			xhci_warn(xhci, "WARN Cancelled URB %p "
					"has invalid stream ID %u.\n",
					cur_td->urb,
					cur_td->urb->stream_id);
			goto remove_finished_td;
		}
728 729 730 731
		/*
		 * If we stopped on the TD we need to cancel, then we have to
		 * move the xHC endpoint ring dequeue pointer past this TD.
		 */
732
		if (cur_td == ep->stopped_td)
733 734 735
			xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
					cur_td->urb->stream_id,
					cur_td, &deq_state);
736
		else
737
			td_to_noop(xhci, ep_ring, cur_td, false);
738
remove_finished_td:
739 740 741 742 743
		/*
		 * The event handler won't see a completion for this TD anymore,
		 * so remove it from the endpoint ring's TD list.  Keep it in
		 * the cancelled TD list for URB completion later.
		 */
744
		list_del_init(&cur_td->td_list);
745 746
	}
	last_unlinked_td = cur_td;
747
	xhci_stop_watchdog_timer_in_irq(xhci, ep);
748 749 750

	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
751
		xhci_queue_new_dequeue_state(xhci,
752 753 754
				slot_id, ep_index,
				ep->stopped_td->urb->stream_id,
				&deq_state);
755
		xhci_ring_cmd_db(xhci);
756
	} else {
757 758
		/* Otherwise ring the doorbell(s) to restart queued transfers */
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
759
	}
760 761
	ep->stopped_td = NULL;
	ep->stopped_trb = NULL;
762 763 764 765 766 767 768 769

	/*
	 * Drop the lock and complete the URBs in the cancelled TD list.
	 * New TDs to be cancelled might be added to the end of the list before
	 * we can complete all the URBs for the TDs we already unlinked.
	 * So stop when we've completed the URB for the last TD we unlinked.
	 */
	do {
770
		cur_td = list_entry(ep->cancelled_td_list.next,
771
				struct xhci_td, cancelled_td_list);
772
		list_del_init(&cur_td->cancelled_td_list);
773 774 775 776 777

		/* Clean up the cancelled URB */
		/* Doesn't matter what we pass for status, since the core will
		 * just overwrite it (because the URB has been unlinked).
		 */
778
		xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
779

780 781 782 783 784
		/* Stop processing the cancelled list if the watchdog timer is
		 * running.
		 */
		if (xhci->xhc_state & XHCI_STATE_DYING)
			return;
785 786 787 788 789
	} while (cur_td != last_unlinked_td);

	/* Return to the event handler with xhci->lock re-acquired */
}

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852
/* Watchdog timer function for when a stop endpoint command fails to complete.
 * In this case, we assume the host controller is broken or dying or dead.  The
 * host may still be completing some other events, so we have to be careful to
 * let the event ring handler and the URB dequeueing/enqueueing functions know
 * through xhci->state.
 *
 * The timer may also fire if the host takes a very long time to respond to the
 * command, and the stop endpoint command completion handler cannot delete the
 * timer before the timer function is called.  Another endpoint cancellation may
 * sneak in before the timer function can grab the lock, and that may queue
 * another stop endpoint command and add the timer back.  So we cannot use a
 * simple flag to say whether there is a pending stop endpoint command for a
 * particular endpoint.
 *
 * Instead we use a combination of that flag and a counter for the number of
 * pending stop endpoint commands.  If the timer is the tail end of the last
 * stop endpoint command, and the endpoint's command is still pending, we assume
 * the host is dying.
 */
void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
	struct xhci_hcd *xhci;
	struct xhci_virt_ep *ep;
	struct xhci_virt_ep *temp_ep;
	struct xhci_ring *ring;
	struct xhci_td *cur_td;
	int ret, i, j;

	ep = (struct xhci_virt_ep *) arg;
	xhci = ep->xhci;

	spin_lock(&xhci->lock);

	ep->stop_cmds_pending--;
	if (xhci->xhc_state & XHCI_STATE_DYING) {
		xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
				"xHCI as DYING, exiting.\n");
		spin_unlock(&xhci->lock);
		return;
	}
	if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
		xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
				"exiting.\n");
		spin_unlock(&xhci->lock);
		return;
	}

	xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
	xhci_warn(xhci, "Assuming host is dying, halting host.\n");
	/* Oops, HC is dead or dying or at least not responding to the stop
	 * endpoint command.
	 */
	xhci->xhc_state |= XHCI_STATE_DYING;
	/* Disable interrupts from the host controller and start halting it */
	xhci_quiesce(xhci);
	spin_unlock(&xhci->lock);

	ret = xhci_halt(xhci);

	spin_lock(&xhci->lock);
	if (ret < 0) {
		/* This is bad; the host is not responding to commands and it's
		 * not allowing itself to be halted.  At least interrupts are
853
		 * disabled. If we call usb_hc_died(), it will attempt to
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
		 * disconnect all device drivers under this host.  Those
		 * disconnect() methods will wait for all URBs to be unlinked,
		 * so we must complete them.
		 */
		xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
		xhci_warn(xhci, "Completing active URBs anyway.\n");
		/* We could turn all TDs on the rings to no-ops.  This won't
		 * help if the host has cached part of the ring, and is slow if
		 * we want to preserve the cycle bit.  Skip it and hope the host
		 * doesn't touch the memory.
		 */
	}
	for (i = 0; i < MAX_HC_SLOTS; i++) {
		if (!xhci->devs[i])
			continue;
		for (j = 0; j < 31; j++) {
			temp_ep = &xhci->devs[i]->eps[j];
			ring = temp_ep->ring;
			if (!ring)
				continue;
			xhci_dbg(xhci, "Killing URBs for slot ID %u, "
					"ep index %u\n", i, j);
			while (!list_empty(&ring->td_list)) {
				cur_td = list_first_entry(&ring->td_list,
						struct xhci_td,
						td_list);
880
				list_del_init(&cur_td->td_list);
881
				if (!list_empty(&cur_td->cancelled_td_list))
882
					list_del_init(&cur_td->cancelled_td_list);
883 884 885 886 887 888 889 890
				xhci_giveback_urb_in_irq(xhci, cur_td,
						-ESHUTDOWN, "killed");
			}
			while (!list_empty(&temp_ep->cancelled_td_list)) {
				cur_td = list_first_entry(
						&temp_ep->cancelled_td_list,
						struct xhci_td,
						cancelled_td_list);
891
				list_del_init(&cur_td->cancelled_td_list);
892 893 894 895 896 897 898
				xhci_giveback_urb_in_irq(xhci, cur_td,
						-ESHUTDOWN, "killed");
			}
		}
	}
	spin_unlock(&xhci->lock);
	xhci_dbg(xhci, "Calling usb_hc_died()\n");
899
	usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
900 901 902
	xhci_dbg(xhci, "xHCI host controller is dead.\n");
}

903 904 905 906 907 908 909 910 911 912 913 914 915
/*
 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
 * we need to clear the set deq pending flag in the endpoint ring state, so that
 * the TD queueing code can ring the doorbell again.  We also need to ring the
 * endpoint doorbell to restart the ring, but only if there aren't more
 * cancellations pending.
 */
static void handle_set_deq_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event,
		union xhci_trb *trb)
{
	unsigned int slot_id;
	unsigned int ep_index;
916
	unsigned int stream_id;
917 918
	struct xhci_ring *ep_ring;
	struct xhci_virt_device *dev;
919 920
	struct xhci_ep_ctx *ep_ctx;
	struct xhci_slot_ctx *slot_ctx;
921

M
Matt Evans 已提交
922 923 924
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
925
	dev = xhci->devs[slot_id];
926 927 928 929 930 931 932 933 934 935 936

	ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
	if (!ep_ring) {
		xhci_warn(xhci, "WARN Set TR deq ptr command for "
				"freed stream ID %u\n",
				stream_id);
		/* XXX: Harmless??? */
		dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
		return;
	}

937 938
	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
939

M
Matt Evans 已提交
940
	if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
941 942 943
		unsigned int ep_state;
		unsigned int slot_state;

M
Matt Evans 已提交
944
		switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
945 946 947 948 949 950 951
		case COMP_TRB_ERR:
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
					"of stream ID configuration\n");
			break;
		case COMP_CTX_STATE:
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
					"to incorrect slot or ep state.\n");
M
Matt Evans 已提交
952
			ep_state = le32_to_cpu(ep_ctx->ep_info);
953
			ep_state &= EP_STATE_MASK;
M
Matt Evans 已提交
954
			slot_state = le32_to_cpu(slot_ctx->dev_state);
955 956 957 958 959 960 961 962 963 964 965
			slot_state = GET_SLOT_STATE(slot_state);
			xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
					slot_state, ep_state);
			break;
		case COMP_EBADSLT:
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
					"slot %u was not enabled.\n", slot_id);
			break;
		default:
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
					"completion code of %u.\n",
M
Matt Evans 已提交
966
				  GET_COMP_CODE(le32_to_cpu(event->status)));
967 968 969 970 971 972 973 974 975
			break;
		}
		/* OK what do we do now?  The endpoint state is hosed, and we
		 * should never get to this point if the synchronization between
		 * queueing, and endpoint state are correct.  This might happen
		 * if the device gets disconnected after we've finished
		 * cancelling URBs, which might not be an error...
		 */
	} else {
976
		xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
M
Matt Evans 已提交
977
			 le64_to_cpu(ep_ctx->deq));
978
		if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
M
Matt Evans 已提交
979 980
					 dev->eps[ep_index].queued_deq_ptr) ==
		    (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
981 982 983 984 985 986 987 988 989 990 991 992
			/* Update the ring's dequeue segment and dequeue pointer
			 * to reflect the new position.
			 */
			ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg;
			ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr;
		} else {
			xhci_warn(xhci, "Mismatch between completed Set TR Deq "
					"Ptr command & xHCI internal state.\n");
			xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
					dev->eps[ep_index].queued_deq_seg,
					dev->eps[ep_index].queued_deq_ptr);
		}
993 994
	}

995
	dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
996 997
	dev->eps[ep_index].queued_deq_seg = NULL;
	dev->eps[ep_index].queued_deq_ptr = NULL;
998 999
	/* Restart any rings with pending URBs */
	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1000 1001
}

1002 1003 1004 1005 1006 1007 1008
static void handle_reset_ep_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event,
		union xhci_trb *trb)
{
	int slot_id;
	unsigned int ep_index;

M
Matt Evans 已提交
1009 1010
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1011 1012 1013 1014
	/* This command will only fail if the endpoint wasn't halted,
	 * but we don't care.
	 */
	xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
1015
		 GET_COMP_CODE(le32_to_cpu(event->status)));
1016

1017 1018 1019 1020 1021 1022 1023
	/* HW with the reset endpoint quirk needs to have a configure endpoint
	 * command complete before the endpoint can be used.  Queue that here
	 * because the HW can't handle two commands being queued in a row.
	 */
	if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
		xhci_dbg(xhci, "Queueing configure endpoint command\n");
		xhci_queue_configure_endpoint(xhci,
1024 1025
				xhci->devs[slot_id]->in_ctx->dma, slot_id,
				false);
1026 1027
		xhci_ring_cmd_db(xhci);
	} else {
1028
		/* Clear our internal halted state and restart the ring(s) */
1029
		xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1030
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1031
	}
1032
}
1033

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
/* Check to see if a command in the device's command queue matches this one.
 * Signal the completion or free the command, and return 1.  Return 0 if the
 * completed command isn't at the head of the command list.
 */
static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
		struct xhci_virt_device *virt_dev,
		struct xhci_event_cmd *event)
{
	struct xhci_command *command;

	if (list_empty(&virt_dev->cmd_list))
		return 0;

	command = list_entry(virt_dev->cmd_list.next,
			struct xhci_command, cmd_list);
	if (xhci->cmd_ring->dequeue != command->command_trb)
		return 0;

M
Matt Evans 已提交
1052
	command->status = GET_COMP_CODE(le32_to_cpu(event->status));
1053 1054 1055 1056 1057 1058 1059 1060
	list_del(&command->cmd_list);
	if (command->completion)
		complete(command->completion);
	else
		xhci_free_command(xhci, command);
	return 1;
}

1061 1062 1063
static void handle_cmd_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
M
Matt Evans 已提交
1064
	int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1065 1066
	u64 cmd_dma;
	dma_addr_t cmd_dequeue_dma;
1067
	struct xhci_input_control_ctx *ctrl_ctx;
1068
	struct xhci_virt_device *virt_dev;
1069 1070 1071
	unsigned int ep_index;
	struct xhci_ring *ep_ring;
	unsigned int ep_state;
1072

M
Matt Evans 已提交
1073
	cmd_dma = le64_to_cpu(event->cmd_trb);
1074
	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
			xhci->cmd_ring->dequeue);
	/* Is the command ring deq ptr out of sync with the deq seg ptr? */
	if (cmd_dequeue_dma == 0) {
		xhci->error_bitmask |= 1 << 4;
		return;
	}
	/* Does the DMA address match our internal dequeue pointer address? */
	if (cmd_dma != (u64) cmd_dequeue_dma) {
		xhci->error_bitmask |= 1 << 5;
		return;
	}
M
Matt Evans 已提交
1086 1087
	switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
		& TRB_TYPE_BITMASK) {
1088
	case TRB_TYPE(TRB_ENABLE_SLOT):
M
Matt Evans 已提交
1089
		if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
1090 1091 1092 1093 1094 1095
			xhci->slot_id = slot_id;
		else
			xhci->slot_id = 0;
		complete(&xhci->addr_dev);
		break;
	case TRB_TYPE(TRB_DISABLE_SLOT):
1096 1097 1098 1099 1100
		if (xhci->devs[slot_id]) {
			if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
				/* Delete default control endpoint resources */
				xhci_free_device_endpoint_resources(xhci,
						xhci->devs[slot_id], true);
1101
			xhci_free_virt_device(xhci, slot_id);
1102
		}
1103
		break;
1104
	case TRB_TYPE(TRB_CONFIG_EP):
1105
		virt_dev = xhci->devs[slot_id];
1106
		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1107
			break;
1108 1109 1110 1111
		/*
		 * Configure endpoint commands can come from the USB core
		 * configuration or alt setting changes, or because the HW
		 * needed an extra configure endpoint command after a reset
1112 1113 1114
		 * endpoint command or streams were being configured.
		 * If the command was for a halted endpoint, the xHCI driver
		 * is not waiting on the configure endpoint command.
1115 1116
		 */
		ctrl_ctx = xhci_get_input_control_ctx(xhci,
1117
				virt_dev->in_ctx);
1118
		/* Input ctx add_flags are the endpoint index plus one */
M
Matt Evans 已提交
1119
		ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
1120
		/* A usb_set_interface() call directly after clearing a halted
1121 1122 1123 1124
		 * condition may race on this quirky hardware.  Not worth
		 * worrying about, since this is prototype hardware.  Not sure
		 * if this will work for streams, but streams support was
		 * untested on this prototype.
1125
		 */
1126
		if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1127
				ep_index != (unsigned int) -1 &&
M
Matt Evans 已提交
1128 1129
		    le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
		    le32_to_cpu(ctrl_ctx->drop_flags)) {
1130 1131 1132 1133 1134 1135 1136
			ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
			ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
			if (!(ep_state & EP_HALTED))
				goto bandwidth_change;
			xhci_dbg(xhci, "Completed config ep cmd - "
					"last ep index = %d, state = %d\n",
					ep_index, ep_state);
1137
			/* Clear internal halted state and restart ring(s) */
1138
			xhci->devs[slot_id]->eps[ep_index].ep_state &=
1139
				~EP_HALTED;
1140
			ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1141
			break;
1142
		}
1143 1144 1145
bandwidth_change:
		xhci_dbg(xhci, "Completed config ep cmd\n");
		xhci->devs[slot_id]->cmd_status =
M
Matt Evans 已提交
1146
			GET_COMP_CODE(le32_to_cpu(event->status));
1147
		complete(&xhci->devs[slot_id]->cmd_completion);
1148
		break;
1149
	case TRB_TYPE(TRB_EVAL_CONTEXT):
S
Sarah Sharp 已提交
1150 1151 1152
		virt_dev = xhci->devs[slot_id];
		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
			break;
M
Matt Evans 已提交
1153
		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1154 1155
		complete(&xhci->devs[slot_id]->cmd_completion);
		break;
1156
	case TRB_TYPE(TRB_ADDR_DEV):
M
Matt Evans 已提交
1157
		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1158 1159
		complete(&xhci->addr_dev);
		break;
1160
	case TRB_TYPE(TRB_STOP_RING):
1161
		handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
1162 1163 1164 1165
		break;
	case TRB_TYPE(TRB_SET_DEQ):
		handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
		break;
1166 1167
	case TRB_TYPE(TRB_CMD_NOOP):
		break;
1168 1169 1170
	case TRB_TYPE(TRB_RESET_EP):
		handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
		break;
1171 1172 1173
	case TRB_TYPE(TRB_RESET_DEV):
		xhci_dbg(xhci, "Completed reset device command.\n");
		slot_id = TRB_TO_SLOT_ID(
M
Matt Evans 已提交
1174
			le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
1175 1176 1177 1178 1179 1180 1181
		virt_dev = xhci->devs[slot_id];
		if (virt_dev)
			handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
		else
			xhci_warn(xhci, "Reset device command completion "
					"for disabled slot %u\n", slot_id);
		break;
1182 1183 1184 1185 1186 1187
	case TRB_TYPE(TRB_NEC_GET_FW):
		if (!(xhci->quirks & XHCI_NEC_HOST)) {
			xhci->error_bitmask |= 1 << 6;
			break;
		}
		xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
M
Matt Evans 已提交
1188 1189
			 NEC_FW_MAJOR(le32_to_cpu(event->status)),
			 NEC_FW_MINOR(le32_to_cpu(event->status)));
1190
		break;
1191 1192 1193 1194 1195 1196 1197 1198
	default:
		/* Skip over unknown commands on the event ring */
		xhci->error_bitmask |= 1 << 6;
		break;
	}
	inc_deq(xhci, xhci->cmd_ring, false);
}

1199 1200 1201 1202 1203
static void handle_vendor_event(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 trb_type;

M
Matt Evans 已提交
1204
	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1205 1206 1207 1208 1209
	xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
	if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
		handle_cmd_completion(xhci, &event->event_cmd);
}

1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
/* @port_id: the one-based port ID from the hardware (indexed from array of all
 * port registers -- USB 3.0 and USB 2.0).
 *
 * Returns a zero-based port number, which is suitable for indexing into each of
 * the split roothubs' port arrays and bus state arrays.
 */
static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
		struct xhci_hcd *xhci, u32 port_id)
{
	unsigned int i;
	unsigned int num_similar_speed_ports = 0;

	/* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
	 * and usb2_ports are 0-based indexes.  Count the number of similar
	 * speed ports, up to 1 port before this port.
	 */
	for (i = 0; i < (port_id - 1); i++) {
		u8 port_speed = xhci->port_array[i];

		/*
		 * Skip ports that don't have known speeds, or have duplicate
		 * Extended Capabilities port speed entries.
		 */
1233
		if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
			continue;

		/*
		 * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
		 * 1.1 ports are under the USB 2.0 hub.  If the port speed
		 * matches the device speed, it's a similar speed port.
		 */
		if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
			num_similar_speed_ports++;
	}
	return num_similar_speed_ports;
}

S
Sarah Sharp 已提交
1247 1248 1249
static void handle_port_status(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
1250
	struct usb_hcd *hcd;
S
Sarah Sharp 已提交
1251
	u32 port_id;
1252
	u32 temp, temp1;
1253
	int max_ports;
1254
	int slot_id;
1255
	unsigned int faked_port_index;
1256
	u8 major_revision;
1257
	struct xhci_bus_state *bus_state;
M
Matt Evans 已提交
1258
	__le32 __iomem **port_array;
1259
	bool bogus_port_status = false;
S
Sarah Sharp 已提交
1260 1261

	/* Port status change events always have a successful completion code */
M
Matt Evans 已提交
1262
	if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
S
Sarah Sharp 已提交
1263 1264 1265
		xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
		xhci->error_bitmask |= 1 << 8;
	}
M
Matt Evans 已提交
1266
	port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
S
Sarah Sharp 已提交
1267 1268
	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);

1269 1270
	max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
	if ((port_id <= 0) || (port_id > max_ports)) {
1271
		xhci_warn(xhci, "Invalid port id %d\n", port_id);
1272
		bogus_port_status = true;
1273 1274 1275
		goto cleanup;
	}

1276 1277 1278 1279 1280 1281 1282 1283
	/* Figure out which usb_hcd this port is attached to:
	 * is it a USB 3.0 port or a USB 2.0/1.1 port?
	 */
	major_revision = xhci->port_array[port_id - 1];
	if (major_revision == 0) {
		xhci_warn(xhci, "Event for port %u not in "
				"Extended Capabilities, ignoring.\n",
				port_id);
1284
		bogus_port_status = true;
1285
		goto cleanup;
1286
	}
1287
	if (major_revision == DUPLICATE_ENTRY) {
1288 1289 1290
		xhci_warn(xhci, "Event for port %u duplicated in"
				"Extended Capabilities, ignoring.\n",
				port_id);
1291
		bogus_port_status = true;
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
		goto cleanup;
	}

	/*
	 * Hardware port IDs reported by a Port Status Change Event include USB
	 * 3.0 and USB 2.0 ports.  We want to check if the port has reported a
	 * resume event, but we first need to translate the hardware port ID
	 * into the index into the ports on the correct split roothub, and the
	 * correct bus_state structure.
	 */
	/* Find the right roothub. */
	hcd = xhci_to_hcd(xhci);
	if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
		hcd = xhci->shared_hcd;
	bus_state = &xhci->bus_state[hcd_index(hcd)];
	if (hcd->speed == HCD_USB3)
		port_array = xhci->usb3_ports;
	else
		port_array = xhci->usb2_ports;
	/* Find the faked port hub number */
	faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
			port_id);
1314 1315

	temp = xhci_readl(xhci, port_array[faked_port_index]);
1316
	if (hcd->state == HC_STATE_SUSPENDED) {
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334
		xhci_dbg(xhci, "resume root hub\n");
		usb_hcd_resume_root_hub(hcd);
	}

	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
		xhci_dbg(xhci, "port resume event for port %d\n", port_id);

		temp1 = xhci_readl(xhci, &xhci->op_regs->command);
		if (!(temp1 & CMD_RUN)) {
			xhci_warn(xhci, "xHC is not running.\n");
			goto cleanup;
		}

		if (DEV_SUPERSPEED(temp)) {
			xhci_dbg(xhci, "resume SS port %d\n", port_id);
			temp = xhci_port_state_to_neutral(temp);
			temp &= ~PORT_PLS_MASK;
			temp |= PORT_LINK_STROBE | XDEV_U0;
1335
			xhci_writel(xhci, temp, port_array[faked_port_index]);
1336 1337
			slot_id = xhci_find_slot_id_by_port(hcd, xhci,
					faked_port_index);
1338 1339 1340 1341 1342 1343 1344
			if (!slot_id) {
				xhci_dbg(xhci, "slot_id is zero\n");
				goto cleanup;
			}
			xhci_ring_device(xhci, slot_id);
			xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
			/* Clear PORT_PLC */
1345
			temp = xhci_readl(xhci, port_array[faked_port_index]);
1346 1347
			temp = xhci_port_state_to_neutral(temp);
			temp |= PORT_PLC;
1348
			xhci_writel(xhci, temp, port_array[faked_port_index]);
1349 1350
		} else {
			xhci_dbg(xhci, "resume HS port %d\n", port_id);
1351
			bus_state->resume_done[faked_port_index] = jiffies +
1352 1353
				msecs_to_jiffies(20);
			mod_timer(&hcd->rh_timer,
1354
				  bus_state->resume_done[faked_port_index]);
1355 1356 1357 1358 1359
			/* Do the rest in GetPortStatus */
		}
	}

cleanup:
S
Sarah Sharp 已提交
1360 1361 1362
	/* Update event ring dequeue pointer before dropping the lock */
	inc_deq(xhci, xhci->event_ring, true);

1363 1364 1365 1366 1367 1368 1369
	/* Don't make the USB core poll the roothub if we got a bad port status
	 * change event.  Besides, at that point we can't tell which roothub
	 * (USB 2.0 or USB 3.0) to kick.
	 */
	if (bogus_port_status)
		return;

S
Sarah Sharp 已提交
1370 1371
	spin_unlock(&xhci->lock);
	/* Pass this up to the core */
1372
	usb_hcd_poll_rh_status(hcd);
S
Sarah Sharp 已提交
1373 1374 1375
	spin_lock(&xhci->lock);
}

1376 1377 1378 1379 1380 1381
/*
 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
 * at end_trb, which may be in another segment.  If the suspect DMA address is a
 * TRB in this TD, this function returns that TRB's segment.  Otherwise it
 * returns 0.
 */
1382
struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1383 1384 1385 1386 1387 1388 1389 1390 1391
		union xhci_trb	*start_trb,
		union xhci_trb	*end_trb,
		dma_addr_t	suspect_dma)
{
	dma_addr_t start_dma;
	dma_addr_t end_seg_dma;
	dma_addr_t end_trb_dma;
	struct xhci_segment *cur_seg;

1392
	start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1393 1394 1395
	cur_seg = start_seg;

	do {
1396
		if (start_dma == 0)
1397
			return NULL;
1398
		/* We may get an event for a Link TRB in the middle of a TD */
1399
		end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1400
				&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1401
		/* If the end TRB isn't in this segment, this is set to 0 */
1402
		end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418

		if (end_trb_dma > 0) {
			/* The end TRB is in this segment, so suspect should be here */
			if (start_dma <= end_trb_dma) {
				if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
					return cur_seg;
			} else {
				/* Case for one segment with
				 * a TD wrapped around to the top
				 */
				if ((suspect_dma >= start_dma &&
							suspect_dma <= end_seg_dma) ||
						(suspect_dma >= cur_seg->dma &&
						 suspect_dma <= end_trb_dma))
					return cur_seg;
			}
1419
			return NULL;
1420 1421 1422 1423 1424 1425
		} else {
			/* Might still be somewhere in this segment */
			if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
				return cur_seg;
		}
		cur_seg = cur_seg->next;
1426
		start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1427
	} while (cur_seg != start_seg);
1428

1429
	return NULL;
1430 1431
}

1432 1433
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
1434
		unsigned int stream_id,
1435 1436 1437 1438 1439 1440
		struct xhci_td *td, union xhci_trb *event_trb)
{
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
	ep->ep_state |= EP_HALTED;
	ep->stopped_td = td;
	ep->stopped_trb = event_trb;
1441
	ep->stopped_stream = stream_id;
1442

1443 1444
	xhci_queue_reset_ep(xhci, slot_id, ep_index);
	xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1445 1446 1447

	ep->stopped_td = NULL;
	ep->stopped_trb = NULL;
1448
	ep->stopped_stream = 0;
1449

1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472
	xhci_ring_cmd_db(xhci);
}

/* Check if an error has halted the endpoint ring.  The class driver will
 * cleanup the halt for a non-default control endpoint if we indicate a stall.
 * However, a babble and other errors also halt the endpoint ring, and the class
 * driver won't clear the halt in that case, so we need to issue a Set Transfer
 * Ring Dequeue Pointer command manually.
 */
static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
		struct xhci_ep_ctx *ep_ctx,
		unsigned int trb_comp_code)
{
	/* TRB completion codes that may require a manual halt cleanup */
	if (trb_comp_code == COMP_TX_ERR ||
			trb_comp_code == COMP_BABBLE ||
			trb_comp_code == COMP_SPLIT_ERR)
		/* The 0.96 spec says a babbling control endpoint
		 * is not halted. The 0.96 spec says it is.  Some HW
		 * claims to be 0.95 compliant, but it halts the control
		 * endpoint anyway.  Check if a babble halted the
		 * endpoint.
		 */
1473 1474
		if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
		    cpu_to_le32(EP_STATE_HALTED))
1475 1476 1477 1478 1479
			return 1;

	return 0;
}

1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
{
	if (trb_comp_code >= 224 && trb_comp_code <= 255) {
		/* Vendor defined "informational" completion code,
		 * treat as not-an-error.
		 */
		xhci_dbg(xhci, "Vendor defined info completion code %u\n",
				trb_comp_code);
		xhci_dbg(xhci, "Treating code as success.\n");
		return 1;
	}
	return 0;
}

1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508
/*
 * Finish the td processing, remove the td from td list;
 * Return 1 if the urb can be given back.
 */
static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status, bool skip)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	unsigned int slot_id;
	int ep_index;
	struct urb *urb = NULL;
	struct xhci_ep_ctx *ep_ctx;
	int ret = 0;
1509
	struct urb_priv	*urb_priv;
1510 1511
	u32 trb_comp_code;

M
Matt Evans 已提交
1512
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1513
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
1514 1515
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1516
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
1517
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562

	if (skip)
		goto td_cleanup;

	if (trb_comp_code == COMP_STOP_INVAL ||
			trb_comp_code == COMP_STOP) {
		/* The Endpoint Stop Command completion will take care of any
		 * stopped TDs.  A stopped TD may be restarted, so don't update
		 * the ring dequeue pointer or take this TD off any lists yet.
		 */
		ep->stopped_td = td;
		ep->stopped_trb = event_trb;
		return 0;
	} else {
		if (trb_comp_code == COMP_STALL) {
			/* The transfer is completed from the driver's
			 * perspective, but we need to issue a set dequeue
			 * command for this stalled endpoint to move the dequeue
			 * pointer past the TD.  We can't do that here because
			 * the halt condition must be cleared first.  Let the
			 * USB class driver clear the stall later.
			 */
			ep->stopped_td = td;
			ep->stopped_trb = event_trb;
			ep->stopped_stream = ep_ring->stream_id;
		} else if (xhci_requires_manual_halt_cleanup(xhci,
					ep_ctx, trb_comp_code)) {
			/* Other types of errors halt the endpoint, but the
			 * class driver doesn't call usb_reset_endpoint() unless
			 * the error is -EPIPE.  Clear the halted status in the
			 * xHCI hardware manually.
			 */
			xhci_cleanup_halted_endpoint(xhci,
					slot_id, ep_index, ep_ring->stream_id,
					td, event_trb);
		} else {
			/* Update ring dequeue pointer */
			while (ep_ring->dequeue != td->last_trb)
				inc_deq(xhci, ep_ring, false);
			inc_deq(xhci, ep_ring, false);
		}

td_cleanup:
		/* Clean up the endpoint's TD list */
		urb = td->urb;
1563
		urb_priv = urb->hcpriv;
1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582

		/* Do one last check of the actual transfer length.
		 * If the host controller said we transferred more data than
		 * the buffer length, urb->actual_length will be a very big
		 * number (since it's unsigned).  Play it safe and say we didn't
		 * transfer anything.
		 */
		if (urb->actual_length > urb->transfer_buffer_length) {
			xhci_warn(xhci, "URB transfer length is wrong, "
					"xHC issue? req. len = %u, "
					"act. len = %u\n",
					urb->transfer_buffer_length,
					urb->actual_length);
			urb->actual_length = 0;
			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
				*status = -EREMOTEIO;
			else
				*status = 0;
		}
1583
		list_del_init(&td->td_list);
1584 1585
		/* Was this TD slated to be cancelled but completed anyway? */
		if (!list_empty(&td->cancelled_td_list))
1586
			list_del_init(&td->cancelled_td_list);
1587

1588 1589
		urb_priv->td_cnt++;
		/* Giveback the urb when all the tds are completed */
A
Andiry Xu 已提交
1590
		if (urb_priv->td_cnt == urb_priv->length) {
1591
			ret = 1;
A
Andiry Xu 已提交
1592 1593 1594 1595 1596 1597 1598 1599 1600
			if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
				xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
				if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
					== 0) {
					if (xhci->quirks & XHCI_AMD_PLL_FIX)
						usb_amd_quirk_pll_enable();
				}
			}
		}
1601 1602 1603 1604 1605
	}

	return ret;
}

1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619
/*
 * Process control tds, update urb status and actual_length.
 */
static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	unsigned int slot_id;
	int ep_index;
	struct xhci_ep_ctx *ep_ctx;
	u32 trb_comp_code;

M
Matt Evans 已提交
1620
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1621
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
1622 1623
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1624
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
1625
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648

	xhci_debug_trb(xhci, xhci->event_ring->dequeue);
	switch (trb_comp_code) {
	case COMP_SUCCESS:
		if (event_trb == ep_ring->dequeue) {
			xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
					"without IOC set??\n");
			*status = -ESHUTDOWN;
		} else if (event_trb != td->last_trb) {
			xhci_warn(xhci, "WARN: Success on ctrl data TRB "
					"without IOC set??\n");
			*status = -ESHUTDOWN;
		} else {
			*status = 0;
		}
		break;
	case COMP_SHORT_TX:
		xhci_warn(xhci, "WARN: short transfer on control ep\n");
		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
			*status = -EREMOTEIO;
		else
			*status = 0;
		break;
1649 1650 1651
	case COMP_STOP_INVAL:
	case COMP_STOP:
		return finish_td(xhci, td, event_trb, event, ep, status, false);
1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
	default:
		if (!xhci_requires_manual_halt_cleanup(xhci,
					ep_ctx, trb_comp_code))
			break;
		xhci_dbg(xhci, "TRB error code %u, "
				"halted endpoint index = %u\n",
				trb_comp_code, ep_index);
		/* else fall through */
	case COMP_STALL:
		/* Did we transfer part of the data (middle) phase? */
		if (event_trb != ep_ring->dequeue &&
				event_trb != td->last_trb)
			td->urb->actual_length =
				td->urb->transfer_buffer_length
M
Matt Evans 已提交
1666
				- TRB_LEN(le32_to_cpu(event->transfer_len));
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695
		else
			td->urb->actual_length = 0;

		xhci_cleanup_halted_endpoint(xhci,
			slot_id, ep_index, 0, td, event_trb);
		return finish_td(xhci, td, event_trb, event, ep, status, true);
	}
	/*
	 * Did we transfer any data, despite the errors that might have
	 * happened?  I.e. did we get past the setup stage?
	 */
	if (event_trb != ep_ring->dequeue) {
		/* The event was for the status stage */
		if (event_trb == td->last_trb) {
			if (td->urb->actual_length != 0) {
				/* Don't overwrite a previously set error code
				 */
				if ((*status == -EINPROGRESS || *status == 0) &&
						(td->urb->transfer_flags
						 & URB_SHORT_NOT_OK))
					/* Did we already see a short data
					 * stage? */
					*status = -EREMOTEIO;
			} else {
				td->urb->actual_length =
					td->urb->transfer_buffer_length;
			}
		} else {
		/* Maybe the event was for the data stage? */
1696 1697 1698 1699 1700 1701
			td->urb->actual_length =
				td->urb->transfer_buffer_length -
				TRB_LEN(le32_to_cpu(event->transfer_len));
			xhci_dbg(xhci, "Waiting for status "
					"stage event\n");
			return 0;
1702 1703 1704 1705 1706 1707
		}
	}

	return finish_td(xhci, td, event_trb, event, ep, status, false);
}

1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
/*
 * Process isochronous tds, update urb packet status and actual_length.
 */
static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	int idx;
	int len = 0;
	union xhci_trb *cur_trb;
	struct xhci_segment *cur_seg;
1721
	struct usb_iso_packet_descriptor *frame;
1722
	u32 trb_comp_code;
1723
	bool skip_td = false;
1724

M
Matt Evans 已提交
1725 1726
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1727 1728
	urb_priv = td->urb->hcpriv;
	idx = urb_priv->td_cnt;
1729
	frame = &td->urb->iso_frame_desc[idx];
1730

1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748
	/* handle completion code */
	switch (trb_comp_code) {
	case COMP_SUCCESS:
		frame->status = 0;
		break;
	case COMP_SHORT_TX:
		frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
				-EREMOTEIO : 0;
		break;
	case COMP_BW_OVER:
		frame->status = -ECOMM;
		skip_td = true;
		break;
	case COMP_BUFF_OVER:
	case COMP_BABBLE:
		frame->status = -EOVERFLOW;
		skip_td = true;
		break;
A
Alex He 已提交
1749
	case COMP_DEV_ERR:
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
	case COMP_STALL:
		frame->status = -EPROTO;
		skip_td = true;
		break;
	case COMP_STOP:
	case COMP_STOP_INVAL:
		break;
	default:
		frame->status = -1;
		break;
1760 1761
	}

1762 1763 1764
	if (trb_comp_code == COMP_SUCCESS || skip_td) {
		frame->actual_length = frame->length;
		td->urb->actual_length += frame->length;
1765 1766 1767 1768
	} else {
		for (cur_trb = ep_ring->dequeue,
		     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
		     next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1769 1770
			if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
			    !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
M
Matt Evans 已提交
1771
				len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1772
		}
M
Matt Evans 已提交
1773 1774
		len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
			TRB_LEN(le32_to_cpu(event->transfer_len));
1775 1776

		if (trb_comp_code != COMP_STOP_INVAL) {
1777
			frame->actual_length = len;
1778 1779 1780 1781 1782 1783 1784
			td->urb->actual_length += len;
		}
	}

	return finish_td(xhci, td, event_trb, event, ep, status, false);
}

1785 1786 1787 1788 1789 1790 1791 1792 1793
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
			struct xhci_transfer_event *event,
			struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct usb_iso_packet_descriptor *frame;
	int idx;

1794
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1795 1796 1797 1798
	urb_priv = td->urb->hcpriv;
	idx = urb_priv->td_cnt;
	frame = &td->urb->iso_frame_desc[idx];

1799
	/* The transfer is partly done. */
1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
	frame->status = -EXDEV;

	/* calc actual length */
	frame->actual_length = 0;

	/* Update ring dequeue pointer */
	while (ep_ring->dequeue != td->last_trb)
		inc_deq(xhci, ep_ring, false);
	inc_deq(xhci, ep_ring, false);

	return finish_td(xhci, td, NULL, event, ep, status, true);
}

1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
/*
 * Process bulk and interrupt tds, update urb status and actual_length.
 */
static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	union xhci_trb *cur_trb;
	struct xhci_segment *cur_seg;
	u32 trb_comp_code;

M
Matt Evans 已提交
1825 1826
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851

	switch (trb_comp_code) {
	case COMP_SUCCESS:
		/* Double check that the HW transferred everything. */
		if (event_trb != td->last_trb) {
			xhci_warn(xhci, "WARN Successful completion "
					"on short TX\n");
			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
				*status = -EREMOTEIO;
			else
				*status = 0;
		} else {
			*status = 0;
		}
		break;
	case COMP_SHORT_TX:
		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
			*status = -EREMOTEIO;
		else
			*status = 0;
		break;
	default:
		/* Others already handled above */
		break;
	}
1852 1853 1854 1855 1856 1857
	if (trb_comp_code == COMP_SHORT_TX)
		xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
				"%d bytes untransferred\n",
				td->urb->ep->desc.bEndpointAddress,
				td->urb->transfer_buffer_length,
				TRB_LEN(le32_to_cpu(event->transfer_len)));
1858 1859
	/* Fast path - was this the last TRB in the TD for this URB? */
	if (event_trb == td->last_trb) {
M
Matt Evans 已提交
1860
		if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
1861 1862
			td->urb->actual_length =
				td->urb->transfer_buffer_length -
M
Matt Evans 已提交
1863
				TRB_LEN(le32_to_cpu(event->transfer_len));
1864 1865 1866 1867
			if (td->urb->transfer_buffer_length <
					td->urb->actual_length) {
				xhci_warn(xhci, "HC gave bad length "
						"of %d bytes left\n",
M
Matt Evans 已提交
1868
					  TRB_LEN(le32_to_cpu(event->transfer_len)));
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898
				td->urb->actual_length = 0;
				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
					*status = -EREMOTEIO;
				else
					*status = 0;
			}
			/* Don't overwrite a previously set error code */
			if (*status == -EINPROGRESS) {
				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
					*status = -EREMOTEIO;
				else
					*status = 0;
			}
		} else {
			td->urb->actual_length =
				td->urb->transfer_buffer_length;
			/* Ignore a short packet completion if the
			 * untransferred length was zero.
			 */
			if (*status == -EREMOTEIO)
				*status = 0;
		}
	} else {
		/* Slow path - walk the list, starting from the dequeue
		 * pointer, to get the actual length transferred.
		 */
		td->urb->actual_length = 0;
		for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
				cur_trb != event_trb;
				next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1899 1900
			if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
			    !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
1901
				td->urb->actual_length +=
M
Matt Evans 已提交
1902
					TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1903 1904 1905 1906 1907 1908
		}
		/* If the ring didn't stop on a Link or No-op TRB, add
		 * in the actual bytes transferred from the Normal TRB
		 */
		if (trb_comp_code != COMP_STOP_INVAL)
			td->urb->actual_length +=
M
Matt Evans 已提交
1909 1910
				TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
				TRB_LEN(le32_to_cpu(event->transfer_len));
1911 1912 1913 1914 1915
	}

	return finish_td(xhci, td, event_trb, event, ep, status, false);
}

1916 1917 1918 1919 1920 1921 1922 1923 1924
/*
 * If this function returns an error condition, it means it got a Transfer
 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
 * At this point, the host controller is probably hosed and should be reset.
 */
static int handle_tx_event(struct xhci_hcd *xhci,
		struct xhci_transfer_event *event)
{
	struct xhci_virt_device *xdev;
1925
	struct xhci_virt_ep *ep;
1926
	struct xhci_ring *ep_ring;
1927
	unsigned int slot_id;
1928
	int ep_index;
1929
	struct xhci_td *td = NULL;
1930 1931 1932
	dma_addr_t event_dma;
	struct xhci_segment *event_seg;
	union xhci_trb *event_trb;
1933
	struct urb *urb = NULL;
1934
	int status = -EINPROGRESS;
1935
	struct urb_priv *urb_priv;
1936
	struct xhci_ep_ctx *ep_ctx;
1937
	struct list_head *tmp;
1938
	u32 trb_comp_code;
1939
	int ret = 0;
1940
	int td_num = 0;
1941

M
Matt Evans 已提交
1942
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1943
	xdev = xhci->devs[slot_id];
1944 1945 1946 1947 1948 1949
	if (!xdev) {
		xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
		return -ENODEV;
	}

	/* Endpoint ID is 1 based, our index is zero based */
M
Matt Evans 已提交
1950
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1951
	ep = &xdev->eps[ep_index];
M
Matt Evans 已提交
1952
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1953
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1954
	if (!ep_ring ||
M
Matt Evans 已提交
1955 1956
	    (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
	    EP_STATE_DISABLED) {
1957 1958
		xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
				"or incorrect stream ring\n");
1959 1960 1961
		return -ENODEV;
	}

1962 1963 1964 1965 1966 1967
	/* Count current td numbers if ep->skip is set */
	if (ep->skip) {
		list_for_each(tmp, &ep_ring->td_list)
			td_num++;
	}

M
Matt Evans 已提交
1968 1969
	event_dma = le64_to_cpu(event->buffer);
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1970
	/* Look for common error cases */
1971
	switch (trb_comp_code) {
S
Sarah Sharp 已提交
1972 1973 1974 1975 1976 1977
	/* Skip codes that require special handling depending on
	 * transfer type
	 */
	case COMP_SUCCESS:
	case COMP_SHORT_TX:
		break;
1978 1979 1980 1981 1982 1983
	case COMP_STOP:
		xhci_dbg(xhci, "Stopped on Transfer TRB\n");
		break;
	case COMP_STOP_INVAL:
		xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
		break;
S
Sarah Sharp 已提交
1984 1985
	case COMP_STALL:
		xhci_warn(xhci, "WARN: Stalled endpoint\n");
1986
		ep->ep_state |= EP_HALTED;
S
Sarah Sharp 已提交
1987 1988 1989 1990 1991 1992
		status = -EPIPE;
		break;
	case COMP_TRB_ERR:
		xhci_warn(xhci, "WARN: TRB error on endpoint\n");
		status = -EILSEQ;
		break;
1993
	case COMP_SPLIT_ERR:
S
Sarah Sharp 已提交
1994 1995 1996 1997
	case COMP_TX_ERR:
		xhci_warn(xhci, "WARN: transfer error on endpoint\n");
		status = -EPROTO;
		break;
1998 1999 2000 2001
	case COMP_BABBLE:
		xhci_warn(xhci, "WARN: babble error on endpoint\n");
		status = -EOVERFLOW;
		break;
S
Sarah Sharp 已提交
2002 2003 2004 2005
	case COMP_DB_ERR:
		xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
		status = -ENOSR;
		break;
2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021
	case COMP_BW_OVER:
		xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
		break;
	case COMP_BUFF_OVER:
		xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
		break;
	case COMP_UNDERRUN:
		/*
		 * When the Isoch ring is empty, the xHC will generate
		 * a Ring Overrun Event for IN Isoch endpoint or Ring
		 * Underrun Event for OUT Isoch endpoint.
		 */
		xhci_dbg(xhci, "underrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2022 2023
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2024 2025 2026 2027 2028 2029
		goto cleanup;
	case COMP_OVERRUN:
		xhci_dbg(xhci, "overrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2030 2031
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2032
		goto cleanup;
A
Alex He 已提交
2033 2034 2035 2036
	case COMP_DEV_ERR:
		xhci_warn(xhci, "WARN: detect an incompatible device");
		status = -EPROTO;
		break;
2037 2038 2039 2040 2041 2042 2043 2044 2045 2046
	case COMP_MISSED_INT:
		/*
		 * When encounter missed service error, one or more isoc tds
		 * may be missed by xHC.
		 * Set skip flag of the ep_ring; Complete the missed tds as
		 * short transfer when process the ep_ring next time.
		 */
		ep->skip = true;
		xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
		goto cleanup;
S
Sarah Sharp 已提交
2047
	default:
2048
		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2049 2050 2051
			status = 0;
			break;
		}
2052 2053 2054 2055 2056
		xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
				"busted\n");
		goto cleanup;
	}

2057 2058 2059 2060 2061 2062 2063
	do {
		/* This TRB should be in the TD at the head of this ring's
		 * TD list.
		 */
		if (list_empty(&ep_ring->td_list)) {
			xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
					"with no TDs queued?\n",
M
Matt Evans 已提交
2064 2065
				  TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				  ep_index);
2066
			xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2067 2068
				 (le32_to_cpu(event->flags) &
				  TRB_TYPE_BITMASK)>>10);
2069 2070 2071 2072 2073 2074 2075 2076 2077
			xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
			if (ep->skip) {
				ep->skip = false;
				xhci_dbg(xhci, "td_list is empty while skip "
						"flag set. Clear skip flag.\n");
			}
			ret = 0;
			goto cleanup;
		}
2078

2079 2080 2081 2082 2083 2084 2085 2086 2087
		/* We've skipped all the TDs on the ep ring when ep->skip set */
		if (ep->skip && td_num == 0) {
			ep->skip = false;
			xhci_dbg(xhci, "All tds on the ep_ring skipped. "
						"Clear skip flag.\n");
			ret = 0;
			goto cleanup;
		}

2088
		td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2089 2090
		if (ep->skip)
			td_num--;
2091

2092 2093 2094
		/* Is this a TRB in the currently executing TD? */
		event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
				td->last_trb, event_dma);
A
Alex He 已提交
2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108

		/*
		 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
		 * is not in the current TD pointed by ep_ring->dequeue because
		 * that the hardware dequeue pointer still at the previous TRB
		 * of the current TD. The previous TRB maybe a Link TD or the
		 * last TRB of the previous TD. The command completion handle
		 * will take care the rest.
		 */
		if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
			ret = 0;
			goto cleanup;
		}

2109 2110 2111
		if (!event_seg) {
			if (!ep->skip ||
			    !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
				/* Some host controllers give a spurious
				 * successful event after a short transfer.
				 * Ignore it.
				 */
				if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 
						ep_ring->last_td_was_short) {
					ep_ring->last_td_was_short = false;
					ret = 0;
					goto cleanup;
				}
2122 2123 2124 2125 2126 2127 2128 2129 2130 2131
				/* HC is busted, give up! */
				xhci_err(xhci,
					"ERROR Transfer event TRB DMA ptr not "
					"part of current TD\n");
				return -ESHUTDOWN;
			}

			ret = skip_isoc_td(xhci, td, event, ep, &status);
			goto cleanup;
		}
2132 2133 2134 2135
		if (trb_comp_code == COMP_SHORT_TX)
			ep_ring->last_td_was_short = true;
		else
			ep_ring->last_td_was_short = false;
2136 2137

		if (ep->skip) {
2138 2139 2140
			xhci_dbg(xhci, "Found td. Clear skip flag.\n");
			ep->skip = false;
		}
2141

2142 2143 2144 2145 2146 2147 2148 2149
		event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
						sizeof(*event_trb)];
		/*
		 * No-op TRB should not trigger interrupts.
		 * If event_trb is a no-op TRB, it means the
		 * corresponding TD has been cancelled. Just ignore
		 * the TD.
		 */
2150
		if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2151 2152 2153
			xhci_dbg(xhci,
				 "event_trb is a no-op TRB. Skip it\n");
			goto cleanup;
2154
		}
2155

2156 2157
		/* Now update the urb's actual_length and give back to
		 * the core
2158
		 */
2159 2160 2161
		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
			ret = process_ctrl_td(xhci, td, event_trb, event, ep,
						 &status);
2162 2163 2164
		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
			ret = process_isoc_td(xhci, td, event_trb, event, ep,
						 &status);
2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
		else
			ret = process_bulk_intr_td(xhci, td, event_trb, event,
						 ep, &status);

cleanup:
		/*
		 * Do not update event ring dequeue pointer if ep->skip is set.
		 * Will roll back to continue process missed tds.
		 */
		if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
			inc_deq(xhci, xhci->event_ring, true);
		}

		if (ret) {
			urb = td->urb;
2180
			urb_priv = urb->hcpriv;
2181 2182 2183 2184 2185 2186 2187 2188
			/* Leave the TD around for the reset endpoint function
			 * to use(but only if it's not a control endpoint,
			 * since we already queued the Set TR dequeue pointer
			 * command for stalled control endpoints).
			 */
			if (usb_endpoint_xfer_control(&urb->ep->desc) ||
				(trb_comp_code != COMP_STALL &&
					trb_comp_code != COMP_BABBLE))
2189
				xhci_urb_free_priv(xhci, urb_priv);
2190

2191
			usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2192 2193 2194 2195 2196 2197 2198 2199 2200
			if ((urb->actual_length != urb->transfer_buffer_length &&
						(urb->transfer_flags &
						 URB_SHORT_NOT_OK)) ||
					status != 0)
				xhci_dbg(xhci, "Giveback URB %p, len = %d, "
						"expected = %x, status = %d\n",
						urb, urb->actual_length,
						urb->transfer_buffer_length,
						status);
2201
			spin_unlock(&xhci->lock);
2202 2203 2204 2205 2206
			/* EHCI, UHCI, and OHCI always unconditionally set the
			 * urb->status of an isochronous endpoint to 0.
			 */
			if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
				status = 0;
2207
			usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218
			spin_lock(&xhci->lock);
		}

	/*
	 * If ep->skip is set, it means there are missed tds on the
	 * endpoint ring need to take care of.
	 * Process them as short transfer until reach the td pointed by
	 * the event.
	 */
	} while (ep->skip && trb_comp_code != COMP_MISSED_INT);

2219 2220 2221
	return 0;
}

S
Sarah Sharp 已提交
2222 2223 2224
/*
 * This function handles all OS-owned events on the event ring.  It may drop
 * xhci->lock between event processing (e.g. to pass up port status changes).
2225 2226
 * Returns >0 for "possibly more events to process" (caller should call again),
 * otherwise 0 if done.  In future, <0 returns should indicate error code.
S
Sarah Sharp 已提交
2227
 */
2228
static int xhci_handle_event(struct xhci_hcd *xhci)
2229 2230
{
	union xhci_trb *event;
S
Sarah Sharp 已提交
2231
	int update_ptrs = 1;
2232
	int ret;
2233 2234 2235

	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
		xhci->error_bitmask |= 1 << 1;
2236
		return 0;
2237 2238 2239 2240
	}

	event = xhci->event_ring->dequeue;
	/* Does the HC or OS own the TRB? */
M
Matt Evans 已提交
2241 2242
	if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
	    xhci->event_ring->cycle_state) {
2243
		xhci->error_bitmask |= 1 << 2;
2244
		return 0;
2245 2246
	}

2247 2248 2249 2250 2251
	/*
	 * Barrier between reading the TRB_CYCLE (valid) flag above and any
	 * speculative reads of the event's flags/data below.
	 */
	rmb();
S
Sarah Sharp 已提交
2252
	/* FIXME: Handle more event types. */
M
Matt Evans 已提交
2253
	switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2254 2255 2256
	case TRB_TYPE(TRB_COMPLETION):
		handle_cmd_completion(xhci, &event->event_cmd);
		break;
S
Sarah Sharp 已提交
2257 2258 2259 2260
	case TRB_TYPE(TRB_PORT_STATUS):
		handle_port_status(xhci, event);
		update_ptrs = 0;
		break;
2261 2262 2263 2264 2265 2266 2267
	case TRB_TYPE(TRB_TRANSFER):
		ret = handle_tx_event(xhci, &event->trans_event);
		if (ret < 0)
			xhci->error_bitmask |= 1 << 9;
		else
			update_ptrs = 0;
		break;
2268
	default:
M
Matt Evans 已提交
2269 2270
		if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
		    TRB_TYPE(48))
2271 2272 2273
			handle_vendor_event(xhci, event);
		else
			xhci->error_bitmask |= 1 << 3;
2274
	}
2275 2276 2277 2278 2279 2280
	/* Any of the above functions may drop and re-acquire the lock, so check
	 * to make sure a watchdog timer didn't mark the host as non-responsive.
	 */
	if (xhci->xhc_state & XHCI_STATE_DYING) {
		xhci_dbg(xhci, "xHCI host dying, returning from "
				"event handler.\n");
2281
		return 0;
2282
	}
2283

2284 2285
	if (update_ptrs)
		/* Update SW event ring dequeue pointer */
S
Sarah Sharp 已提交
2286
		inc_deq(xhci, xhci->event_ring, true);
2287

2288 2289 2290 2291
	/* Are there more items on the event ring?  Caller will call us again to
	 * check.
	 */
	return 1;
2292
}
2293 2294 2295 2296 2297 2298 2299 2300 2301

/*
 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
 * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
 * indicators of an event TRB error, but we check the status *first* to be safe.
 */
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2302
	u32 status;
2303
	union xhci_trb *trb;
2304
	u64 temp_64;
2305 2306
	union xhci_trb *event_ring_deq;
	dma_addr_t deq;
2307 2308 2309 2310

	spin_lock(&xhci->lock);
	trb = xhci->event_ring->dequeue;
	/* Check if the xHC generated the interrupt, or the irq is shared */
2311
	status = xhci_readl(xhci, &xhci->op_regs->status);
2312
	if (status == 0xffffffff)
2313 2314
		goto hw_died;

2315
	if (!(status & STS_EINT)) {
2316 2317 2318
		spin_unlock(&xhci->lock);
		return IRQ_NONE;
	}
2319
	if (status & STS_FATAL) {
2320 2321 2322 2323 2324 2325 2326
		xhci_warn(xhci, "WARNING: Host System Error\n");
		xhci_halt(xhci);
hw_died:
		spin_unlock(&xhci->lock);
		return -ESHUTDOWN;
	}

2327 2328 2329 2330 2331
	/*
	 * Clear the op reg interrupt status first,
	 * so we can receive interrupts from other MSI-X interrupters.
	 * Write 1 to clear the interrupt status.
	 */
2332 2333
	status |= STS_EINT;
	xhci_writel(xhci, status, &xhci->op_regs->status);
2334 2335 2336
	/* FIXME when MSI-X is supported and there are multiple vectors */
	/* Clear the MSI-X event interrupt status */

2337 2338 2339 2340 2341 2342 2343
	if (hcd->irq != -1) {
		u32 irq_pending;
		/* Acknowledge the PCI interrupt */
		irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
		irq_pending |= 0x3;
		xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
	}
2344

2345
	if (xhci->xhc_state & XHCI_STATE_DYING) {
2346 2347
		xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
				"Shouldn't IRQs be disabled?\n");
2348 2349
		/* Clear the event handler busy flag (RW1C);
		 * the event ring should be empty.
2350
		 */
2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362
		temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
		xhci_write_64(xhci, temp_64 | ERST_EHB,
				&xhci->ir_set->erst_dequeue);
		spin_unlock(&xhci->lock);

		return IRQ_HANDLED;
	}

	event_ring_deq = xhci->event_ring->dequeue;
	/* FIXME this should be a delayed service routine
	 * that clears the EHB.
	 */
2363
	while (xhci_handle_event(xhci) > 0) {}
2364 2365

	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381
	/* If necessary, update the HW's version of the event ring deq ptr. */
	if (event_ring_deq != xhci->event_ring->dequeue) {
		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
				xhci->event_ring->dequeue);
		if (deq == 0)
			xhci_warn(xhci, "WARN something wrong with SW event "
					"ring dequeue ptr.\n");
		/* Update HC event ring dequeue pointer */
		temp_64 &= ERST_PTR_MASK;
		temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
	}

	/* Clear the event handler busy flag (RW1C); event ring is empty. */
	temp_64 |= ERST_EHB;
	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);

2382 2383 2384 2385 2386 2387 2388 2389
	spin_unlock(&xhci->lock);

	return IRQ_HANDLED;
}

irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
{
	irqreturn_t ret;
2390
	struct xhci_hcd *xhci;
2391

2392
	xhci = hcd_to_xhci(hcd);
2393
	set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
2394 2395
	if (xhci->shared_hcd)
		set_bit(HCD_FLAG_SAW_IRQ, &xhci->shared_hcd->flags);
2396 2397 2398 2399 2400

	ret = xhci_irq(hcd);

	return ret;
}
2401

2402 2403
/****		Endpoint Ring Operations	****/

2404 2405 2406
/*
 * Generic function for queueing a TRB on a ring.
 * The caller must have checked to make sure there's room on the ring.
2407 2408 2409
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
2410 2411
 */
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2412
		bool consumer, bool more_trbs_coming,
2413 2414 2415 2416 2417
		u32 field1, u32 field2, u32 field3, u32 field4)
{
	struct xhci_generic_trb *trb;

	trb = &ring->enqueue->generic;
M
Matt Evans 已提交
2418 2419 2420 2421
	trb->field[0] = cpu_to_le32(field1);
	trb->field[1] = cpu_to_le32(field2);
	trb->field[2] = cpu_to_le32(field3);
	trb->field[3] = cpu_to_le32(field4);
2422
	inc_enq(xhci, ring, consumer, more_trbs_coming);
2423 2424
}

2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441
/*
 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
 * FIXME allocate segments if the ring is full.
 */
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
	/* Make sure the endpoint has been added to xHC schedule */
	switch (ep_state) {
	case EP_STATE_DISABLED:
		/*
		 * USB core changed config/interfaces without notifying us,
		 * or hardware is reporting the wrong state.
		 */
		xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
		return -ENOENT;
	case EP_STATE_ERROR:
2442
		xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2443 2444 2445
		/* FIXME event handling code for error needs to clear it */
		/* XXX not sure if this should be -ENOENT or not */
		return -EINVAL;
2446 2447
	case EP_STATE_HALTED:
		xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463
	case EP_STATE_STOPPED:
	case EP_STATE_RUNNING:
		break;
	default:
		xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
		/*
		 * FIXME issue Configure Endpoint command to try to get the HC
		 * back into a known state.
		 */
		return -EINVAL;
	}
	if (!room_on_ring(xhci, ep_ring, num_trbs)) {
		/* FIXME allocate more room */
		xhci_err(xhci, "ERROR no room on ep ring\n");
		return -ENOMEM;
	}
2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475

	if (enqueue_is_link_trb(ep_ring)) {
		struct xhci_ring *ring = ep_ring;
		union xhci_trb *next;

		next = ring->enqueue;

		while (last_trb(xhci, ring, ring->enq_seg, next)) {
			/* If we're not dealing with 0.95 hardware,
			 * clear the chain bit.
			 */
			if (!xhci_link_trb_quirk(xhci))
M
Matt Evans 已提交
2476
				next->link.control &= cpu_to_le32(~TRB_CHAIN);
2477
			else
M
Matt Evans 已提交
2478
				next->link.control |= cpu_to_le32(TRB_CHAIN);
2479 2480

			wmb();
2481
			next->link.control ^= cpu_to_le32(TRB_CYCLE);
2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497

			/* Toggle the cycle bit after the last ring segment. */
			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
				ring->cycle_state = (ring->cycle_state ? 0 : 1);
				if (!in_interrupt()) {
					xhci_dbg(xhci, "queue_trb: Toggle cycle "
						"state for ring %p = %i\n",
						ring, (unsigned int)ring->cycle_state);
				}
			}
			ring->enq_seg = ring->enq_seg->next;
			ring->enqueue = ring->enq_seg->trbs;
			next = ring->enqueue;
		}
	}

2498 2499 2500
	return 0;
}

2501
static int prepare_transfer(struct xhci_hcd *xhci,
2502 2503
		struct xhci_virt_device *xdev,
		unsigned int ep_index,
2504
		unsigned int stream_id,
2505 2506
		unsigned int num_trbs,
		struct urb *urb,
2507
		unsigned int td_index,
2508 2509 2510
		gfp_t mem_flags)
{
	int ret;
2511 2512
	struct urb_priv *urb_priv;
	struct xhci_td	*td;
2513
	struct xhci_ring *ep_ring;
2514
	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2515 2516 2517 2518 2519 2520 2521 2522 2523

	ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
	if (!ep_ring) {
		xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
				stream_id);
		return -EINVAL;
	}

	ret = prepare_ring(xhci, ep_ring,
M
Matt Evans 已提交
2524 2525
			   le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
			   num_trbs, mem_flags);
2526 2527 2528
	if (ret)
		return ret;

2529 2530 2531 2532 2533 2534 2535
	urb_priv = urb->hcpriv;
	td = urb_priv->td[td_index];

	INIT_LIST_HEAD(&td->td_list);
	INIT_LIST_HEAD(&td->cancelled_td_list);

	if (td_index == 0) {
2536
		ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2537
		if (unlikely(ret))
2538
			return ret;
2539 2540
	}

2541
	td->urb = urb;
2542
	/* Add this TD to the tail of the endpoint ring's TD list */
2543 2544 2545 2546 2547
	list_add_tail(&td->td_list, &ep_ring->td_list);
	td->start_seg = ep_ring->enq_seg;
	td->first_trb = ep_ring->enqueue;

	urb_priv->td[td_index] = td;
2548 2549 2550 2551

	return 0;
}

2552
static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2553 2554 2555 2556 2557 2558 2559 2560 2561 2562
{
	int num_sgs, num_trbs, running_total, temp, i;
	struct scatterlist *sg;

	sg = NULL;
	num_sgs = urb->num_sgs;
	temp = urb->transfer_buffer_length;

	xhci_dbg(xhci, "count sg list trbs: \n");
	num_trbs = 0;
2563
	for_each_sg(urb->sg, sg, num_sgs, i) {
2564 2565 2566 2567 2568
		unsigned int previous_total_trbs = num_trbs;
		unsigned int len = sg_dma_len(sg);

		/* Scatter gather list entries may cross 64KB boundaries */
		running_total = TRB_MAX_BUFF_SIZE -
2569
			(sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
2570
		running_total &= TRB_MAX_BUFF_SIZE - 1;
2571 2572 2573 2574
		if (running_total != 0)
			num_trbs++;

		/* How many more 64KB chunks to transfer, how many more TRBs? */
2575
		while (running_total < sg_dma_len(sg) && running_total < temp) {
2576 2577 2578
			num_trbs++;
			running_total += TRB_MAX_BUFF_SIZE;
		}
2579 2580 2581
		xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
				i, (unsigned long long)sg_dma_address(sg),
				len, len, num_trbs - previous_total_trbs);
2582 2583 2584 2585 2586 2587 2588 2589

		len = min_t(int, len, temp);
		temp -= len;
		if (temp == 0)
			break;
	}
	xhci_dbg(xhci, "\n");
	if (!in_interrupt())
2590 2591
		xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
				"num_trbs = %d\n",
2592 2593 2594 2595 2596 2597
				urb->ep->desc.bEndpointAddress,
				urb->transfer_buffer_length,
				num_trbs);
	return num_trbs;
}

2598
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2599 2600
{
	if (num_trbs != 0)
2601
		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
2602 2603 2604
				"TRBs, %d left\n", __func__,
				urb->ep->desc.bEndpointAddress, num_trbs);
	if (running_total != urb->transfer_buffer_length)
2605
		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2606 2607 2608 2609 2610 2611 2612 2613
				"queued %#x (%d), asked for %#x (%d)\n",
				__func__,
				urb->ep->desc.bEndpointAddress,
				running_total, running_total,
				urb->transfer_buffer_length,
				urb->transfer_buffer_length);
}

2614
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2615
		unsigned int ep_index, unsigned int stream_id, int start_cycle,
2616
		struct xhci_generic_trb *start_trb)
2617 2618 2619 2620 2621 2622
{
	/*
	 * Pass all the TRBs to the hardware at once and make sure this write
	 * isn't reordered.
	 */
	wmb();
2623
	if (start_cycle)
M
Matt Evans 已提交
2624
		start_trb->field[3] |= cpu_to_le32(start_cycle);
2625
	else
M
Matt Evans 已提交
2626
		start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
2627
	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2628 2629
}

2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643
/*
 * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
 * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
 * (comprised of sg list entries) can take several service intervals to
 * transmit.
 */
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
			xhci->devs[slot_id]->out_ctx, ep_index);
	int xhci_interval;
	int ep_interval;

M
Matt Evans 已提交
2644
	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
2645 2646 2647 2648 2649 2650 2651 2652 2653
	ep_interval = urb->interval;
	/* Convert to microframes */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		ep_interval *= 8;
	/* FIXME change this to a warning and a suggestion to use the new API
	 * to set the polling interval (once the API is added).
	 */
	if (xhci_interval != ep_interval) {
A
Andiry Xu 已提交
2654
		if (printk_ratelimit())
2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670
			dev_dbg(&urb->dev->dev, "Driver uses different interval"
					" (%d microframe%s) than xHCI "
					"(%d microframe%s)\n",
					ep_interval,
					ep_interval == 1 ? "" : "s",
					xhci_interval,
					xhci_interval == 1 ? "" : "s");
		urb->interval = xhci_interval;
		/* Convert back to frames for LS/FS devices */
		if (urb->dev->speed == USB_SPEED_LOW ||
				urb->dev->speed == USB_SPEED_FULL)
			urb->interval /= 8;
	}
	return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
}

2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685
/*
 * The TD size is the number of bytes remaining in the TD (including this TRB),
 * right shifted by 10.
 * It must fit in bits 21:17, so it can't be bigger than 31.
 */
static u32 xhci_td_remainder(unsigned int remainder)
{
	u32 max = (1 << (21 - 17 + 1)) - 1;

	if ((remainder >> 10) >= max)
		return max << 17;
	else
		return (remainder >> 10) << 17;
}

2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
/*
 * For xHCI 1.0 host controllers, TD size is the number of packets remaining in
 * the TD (*not* including this TRB).
 *
 * Total TD packet count = total_packet_count =
 *     roundup(TD size in bytes / wMaxPacketSize)
 *
 * Packets transferred up to and including this TRB = packets_transferred =
 *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
 *
 * TD size = total_packet_count - packets_transferred
 *
 * It must fit in bits 21:17, so it can't be bigger than 31.
 */

static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
		unsigned int total_packet_count, struct urb *urb)
{
	int packets_transferred;

2706 2707 2708 2709
	/* One TRB with a zero-length data packet. */
	if (running_total == 0 && trb_buff_len == 0)
		return 0;

2710 2711 2712 2713 2714 2715 2716 2717 2718
	/* All the TRB queueing functions don't count the current TRB in
	 * running_total.
	 */
	packets_transferred = (running_total + trb_buff_len) /
		le16_to_cpu(urb->ep->desc.wMaxPacketSize);

	return xhci_td_remainder(total_packet_count - packets_transferred);
}

2719
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2720 2721 2722 2723
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	unsigned int num_trbs;
2724
	struct urb_priv *urb_priv;
2725 2726 2727 2728
	struct xhci_td *td;
	struct scatterlist *sg;
	int num_sgs;
	int trb_buff_len, this_sg_len, running_total;
2729
	unsigned int total_packet_count;
2730 2731
	bool first_trb;
	u64 addr;
2732
	bool more_trbs_coming;
2733 2734 2735 2736

	struct xhci_generic_trb *start_trb;
	int start_cycle;

2737 2738 2739 2740
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;

2741 2742
	num_trbs = count_sg_trbs_needed(xhci, urb);
	num_sgs = urb->num_sgs;
2743 2744
	total_packet_count = roundup(urb->transfer_buffer_length,
			le16_to_cpu(urb->ep->desc.wMaxPacketSize));
2745

2746
	trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
2747
			ep_index, urb->stream_id,
2748
			num_trbs, urb, 0, mem_flags);
2749 2750
	if (trb_buff_len < 0)
		return trb_buff_len;
2751 2752 2753 2754

	urb_priv = urb->hcpriv;
	td = urb_priv->td[0];

2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	running_total = 0;
	/*
	 * How much data is in the first TRB?
	 *
	 * There are three forces at work for TRB buffer pointers and lengths:
	 * 1. We don't want to walk off the end of this sg-list entry buffer.
	 * 2. The transfer length that the driver requested may be smaller than
	 *    the amount of memory allocated for this scatter-gather list.
	 * 3. TRBs buffers can't cross 64KB boundaries.
	 */
2773
	sg = urb->sg;
2774 2775
	addr = (u64) sg_dma_address(sg);
	this_sg_len = sg_dma_len(sg);
2776
	trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
2777 2778 2779 2780 2781 2782 2783 2784 2785 2786
	trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
	if (trb_buff_len > urb->transfer_buffer_length)
		trb_buff_len = urb->transfer_buffer_length;
	xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
			trb_buff_len);

	first_trb = true;
	/* Queue the first TRB, even if it's zero-length */
	do {
		u32 field = 0;
2787
		u32 length_field = 0;
2788
		u32 remainder = 0;
2789 2790

		/* Don't change the cycle bit of the first TRB until later */
2791
		if (first_trb) {
2792
			first_trb = false;
2793 2794 2795
			if (start_cycle == 0)
				field |= 0x1;
		} else
2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807
			field |= ep_ring->cycle_state;

		/* Chain all the TRBs together; clear the chain bit in the last
		 * TRB to indicate it's the last TRB in the chain.
		 */
		if (num_trbs > 1) {
			field |= TRB_CHAIN;
		} else {
			/* FIXME - add check for ZERO_PACKET flag before this */
			td->last_trb = ep_ring->enqueue;
			field |= TRB_IOC;
		}
2808 2809 2810 2811 2812

		/* Only set interrupt on short packet for IN endpoints */
		if (usb_urb_dir_in(urb))
			field |= TRB_ISP;

2813 2814 2815 2816 2817 2818
		xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
				"64KB boundary at %#x, end dma = %#x\n",
				(unsigned int) addr, trb_buff_len, trb_buff_len,
				(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
				(unsigned int) addr + trb_buff_len);
		if (TRB_MAX_BUFF_SIZE -
2819
				(addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
2820 2821 2822 2823 2824
			xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
			xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
					(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
					(unsigned int) addr + trb_buff_len);
		}
2825 2826 2827 2828 2829 2830 2831 2832 2833 2834

		/* Set the TRB length, TD size, and interrupter fields. */
		if (xhci->hci_version < 0x100) {
			remainder = xhci_td_remainder(
					urb->transfer_buffer_length -
					running_total);
		} else {
			remainder = xhci_v1_0_td_remainder(running_total,
					trb_buff_len, total_packet_count, urb);
		}
2835
		length_field = TRB_LEN(trb_buff_len) |
2836
			remainder |
2837
			TRB_INTR_TARGET(0);
2838

2839 2840 2841 2842 2843
		if (num_trbs > 1)
			more_trbs_coming = true;
		else
			more_trbs_coming = false;
		queue_trb(xhci, ep_ring, false, more_trbs_coming,
2844 2845
				lower_32_bits(addr),
				upper_32_bits(addr),
2846
				length_field,
2847
				field | TRB_TYPE(TRB_NORMAL));
2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866
		--num_trbs;
		running_total += trb_buff_len;

		/* Calculate length for next transfer --
		 * Are we done queueing all the TRBs for this sg entry?
		 */
		this_sg_len -= trb_buff_len;
		if (this_sg_len == 0) {
			--num_sgs;
			if (num_sgs == 0)
				break;
			sg = sg_next(sg);
			addr = (u64) sg_dma_address(sg);
			this_sg_len = sg_dma_len(sg);
		} else {
			addr += trb_buff_len;
		}

		trb_buff_len = TRB_MAX_BUFF_SIZE -
2867
			(addr & (TRB_MAX_BUFF_SIZE - 1));
2868 2869 2870 2871 2872 2873 2874
		trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
		if (running_total + trb_buff_len > urb->transfer_buffer_length)
			trb_buff_len =
				urb->transfer_buffer_length - running_total;
	} while (running_total < urb->transfer_buffer_length);

	check_trb_math(urb, num_trbs, running_total);
2875
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2876
			start_cycle, start_trb);
2877 2878 2879
	return 0;
}

S
Sarah Sharp 已提交
2880
/* This is very similar to what ehci-q.c qtd_fill() does */
2881
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
S
Sarah Sharp 已提交
2882 2883 2884
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
2885
	struct urb_priv *urb_priv;
S
Sarah Sharp 已提交
2886 2887 2888 2889
	struct xhci_td *td;
	int num_trbs;
	struct xhci_generic_trb *start_trb;
	bool first_trb;
2890
	bool more_trbs_coming;
S
Sarah Sharp 已提交
2891
	int start_cycle;
2892
	u32 field, length_field;
S
Sarah Sharp 已提交
2893 2894

	int running_total, trb_buff_len, ret;
2895
	unsigned int total_packet_count;
S
Sarah Sharp 已提交
2896 2897
	u64 addr;

2898
	if (urb->num_sgs)
2899 2900
		return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);

2901 2902 2903
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;
S
Sarah Sharp 已提交
2904 2905 2906 2907

	num_trbs = 0;
	/* How much data is (potentially) left before the 64KB boundary? */
	running_total = TRB_MAX_BUFF_SIZE -
2908
		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
2909
	running_total &= TRB_MAX_BUFF_SIZE - 1;
S
Sarah Sharp 已提交
2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923

	/* If there's some data on this 64KB chunk, or we have to send a
	 * zero-length transfer, we need at least one TRB
	 */
	if (running_total != 0 || urb->transfer_buffer_length == 0)
		num_trbs++;
	/* How many more 64KB chunks to transfer, how many more TRBs? */
	while (running_total < urb->transfer_buffer_length) {
		num_trbs++;
		running_total += TRB_MAX_BUFF_SIZE;
	}
	/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */

	if (!in_interrupt())
2924 2925
		xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
				"addr = %#llx, num_trbs = %d\n",
S
Sarah Sharp 已提交
2926
				urb->ep->desc.bEndpointAddress,
2927 2928
				urb->transfer_buffer_length,
				urb->transfer_buffer_length,
2929
				(unsigned long long)urb->transfer_dma,
S
Sarah Sharp 已提交
2930
				num_trbs);
2931

2932 2933
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
			ep_index, urb->stream_id,
2934
			num_trbs, urb, 0, mem_flags);
S
Sarah Sharp 已提交
2935 2936 2937
	if (ret < 0)
		return ret;

2938 2939 2940
	urb_priv = urb->hcpriv;
	td = urb_priv->td[0];

S
Sarah Sharp 已提交
2941 2942 2943 2944 2945 2946 2947 2948 2949
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	running_total = 0;
2950 2951
	total_packet_count = roundup(urb->transfer_buffer_length,
			le16_to_cpu(urb->ep->desc.wMaxPacketSize));
S
Sarah Sharp 已提交
2952 2953 2954
	/* How much data is in the first TRB? */
	addr = (u64) urb->transfer_dma;
	trb_buff_len = TRB_MAX_BUFF_SIZE -
2955 2956
		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
	if (trb_buff_len > urb->transfer_buffer_length)
S
Sarah Sharp 已提交
2957 2958 2959 2960 2961 2962
		trb_buff_len = urb->transfer_buffer_length;

	first_trb = true;

	/* Queue the first TRB, even if it's zero-length */
	do {
2963
		u32 remainder = 0;
S
Sarah Sharp 已提交
2964 2965 2966
		field = 0;

		/* Don't change the cycle bit of the first TRB until later */
2967
		if (first_trb) {
S
Sarah Sharp 已提交
2968
			first_trb = false;
2969 2970 2971
			if (start_cycle == 0)
				field |= 0x1;
		} else
S
Sarah Sharp 已提交
2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983
			field |= ep_ring->cycle_state;

		/* Chain all the TRBs together; clear the chain bit in the last
		 * TRB to indicate it's the last TRB in the chain.
		 */
		if (num_trbs > 1) {
			field |= TRB_CHAIN;
		} else {
			/* FIXME - add check for ZERO_PACKET flag before this */
			td->last_trb = ep_ring->enqueue;
			field |= TRB_IOC;
		}
2984 2985 2986 2987 2988

		/* Only set interrupt on short packet for IN endpoints */
		if (usb_urb_dir_in(urb))
			field |= TRB_ISP;

2989 2990 2991 2992 2993 2994 2995 2996 2997
		/* Set the TRB length, TD size, and interrupter fields. */
		if (xhci->hci_version < 0x100) {
			remainder = xhci_td_remainder(
					urb->transfer_buffer_length -
					running_total);
		} else {
			remainder = xhci_v1_0_td_remainder(running_total,
					trb_buff_len, total_packet_count, urb);
		}
2998
		length_field = TRB_LEN(trb_buff_len) |
2999
			remainder |
3000
			TRB_INTR_TARGET(0);
3001

3002 3003 3004 3005 3006
		if (num_trbs > 1)
			more_trbs_coming = true;
		else
			more_trbs_coming = false;
		queue_trb(xhci, ep_ring, false, more_trbs_coming,
3007 3008
				lower_32_bits(addr),
				upper_32_bits(addr),
3009
				length_field,
3010
				field | TRB_TYPE(TRB_NORMAL));
S
Sarah Sharp 已提交
3011 3012 3013 3014 3015 3016 3017 3018 3019 3020
		--num_trbs;
		running_total += trb_buff_len;

		/* Calculate length for next transfer */
		addr += trb_buff_len;
		trb_buff_len = urb->transfer_buffer_length - running_total;
		if (trb_buff_len > TRB_MAX_BUFF_SIZE)
			trb_buff_len = TRB_MAX_BUFF_SIZE;
	} while (running_total < urb->transfer_buffer_length);

3021
	check_trb_math(urb, num_trbs, running_total);
3022
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3023
			start_cycle, start_trb);
S
Sarah Sharp 已提交
3024 3025 3026
	return 0;
}

3027
/* Caller must have locked xhci->lock */
3028
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3029 3030 3031 3032 3033 3034 3035 3036
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	int num_trbs;
	int ret;
	struct usb_ctrlrequest *setup;
	struct xhci_generic_trb *start_trb;
	int start_cycle;
3037
	u32 field, length_field;
3038
	struct urb_priv *urb_priv;
3039 3040
	struct xhci_td *td;

3041 3042 3043
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;
3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063

	/*
	 * Need to copy setup packet into setup TRB, so we can't use the setup
	 * DMA address.
	 */
	if (!urb->setup_packet)
		return -EINVAL;

	if (!in_interrupt())
		xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
				slot_id, ep_index);
	/* 1 TRB for setup, 1 for status */
	num_trbs = 2;
	/*
	 * Don't need to check if we need additional event data and normal TRBs,
	 * since data in control transfers will never get bigger than 16MB
	 * XXX: can we get a buffer that crosses 64KB boundaries?
	 */
	if (urb->transfer_buffer_length > 0)
		num_trbs++;
3064 3065
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
			ep_index, urb->stream_id,
3066
			num_trbs, urb, 0, mem_flags);
3067 3068 3069
	if (ret < 0)
		return ret;

3070 3071 3072
	urb_priv = urb->hcpriv;
	td = urb_priv->td[0];

3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	/* Queue setup TRB - see section 6.4.1.2.1 */
	/* FIXME better way to translate setup_packet into two u32 fields? */
	setup = (struct usb_ctrlrequest *) urb->setup_packet;
3084 3085 3086 3087
	field = 0;
	field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
	if (start_cycle == 0)
		field |= 0x1;
3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098

	/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
	if (xhci->hci_version == 0x100) {
		if (urb->transfer_buffer_length > 0) {
			if (setup->bRequestType & USB_DIR_IN)
				field |= TRB_TX_TYPE(TRB_DATA_IN);
			else
				field |= TRB_TX_TYPE(TRB_DATA_OUT);
		}
	}

3099
	queue_trb(xhci, ep_ring, false, true,
M
Matt Evans 已提交
3100 3101 3102 3103 3104
		  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
		  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
		  TRB_LEN(8) | TRB_INTR_TARGET(0),
		  /* Immediate data in pointer */
		  field);
3105 3106

	/* If there's data, queue data TRBs */
3107 3108 3109 3110 3111 3112
	/* Only set interrupt on short packet for IN endpoints */
	if (usb_urb_dir_in(urb))
		field = TRB_ISP | TRB_TYPE(TRB_DATA);
	else
		field = TRB_TYPE(TRB_DATA);

3113
	length_field = TRB_LEN(urb->transfer_buffer_length) |
3114
		xhci_td_remainder(urb->transfer_buffer_length) |
3115
		TRB_INTR_TARGET(0);
3116 3117 3118
	if (urb->transfer_buffer_length > 0) {
		if (setup->bRequestType & USB_DIR_IN)
			field |= TRB_DIR_IN;
3119
		queue_trb(xhci, ep_ring, false, true,
3120 3121
				lower_32_bits(urb->transfer_dma),
				upper_32_bits(urb->transfer_dma),
3122
				length_field,
3123
				field | ep_ring->cycle_state);
3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134
	}

	/* Save the DMA address of the last TRB in the TD */
	td->last_trb = ep_ring->enqueue;

	/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
	/* If the device sent data, the status stage is an OUT transfer */
	if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
		field = 0;
	else
		field = TRB_DIR_IN;
3135
	queue_trb(xhci, ep_ring, false, false,
3136 3137 3138 3139 3140 3141
			0,
			0,
			TRB_INTR_TARGET(0),
			/* Event on completion */
			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);

3142
	giveback_first_trb(xhci, slot_id, ep_index, 0,
3143
			start_cycle, start_trb);
3144 3145 3146
	return 0;
}

3147 3148 3149 3150
static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
		struct urb *urb, int i)
{
	int num_trbs = 0;
3151
	u64 addr, td_len;
3152 3153 3154 3155

	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
	td_len = urb->iso_frame_desc[i].length;

3156 3157 3158
	num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
			TRB_MAX_BUFF_SIZE);
	if (num_trbs == 0)
3159 3160 3161 3162 3163
		num_trbs++;

	return num_trbs;
}

3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184
/*
 * The transfer burst count field of the isochronous TRB defines the number of
 * bursts that are required to move all packets in this TD.  Only SuperSpeed
 * devices can burst up to bMaxBurst number of packets per service interval.
 * This field is zero based, meaning a value of zero in the field means one
 * burst.  Basically, for everything but SuperSpeed devices, this field will be
 * zero.  Only xHCI 1.0 host controllers support this field.
 */
static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
		struct usb_device *udev,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;

	if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
		return 0;

	max_burst = urb->ep->ss_ep_comp.bMaxBurst;
	return roundup(total_packet_count, max_burst + 1) - 1;
}

3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220
/*
 * Returns the number of packets in the last "burst" of packets.  This field is
 * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
 * the last burst packet count is equal to the total number of packets in the
 * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
 * must contain (bMaxBurst + 1) number of packets, but the last burst can
 * contain 1 to (bMaxBurst + 1) packets.
 */
static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
		struct usb_device *udev,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;
	unsigned int residue;

	if (xhci->hci_version < 0x100)
		return 0;

	switch (udev->speed) {
	case USB_SPEED_SUPER:
		/* bMaxBurst is zero based: 0 means 1 packet per burst */
		max_burst = urb->ep->ss_ep_comp.bMaxBurst;
		residue = total_packet_count % (max_burst + 1);
		/* If residue is zero, the last burst contains (max_burst + 1)
		 * number of packets, but the TLBPC field is zero-based.
		 */
		if (residue == 0)
			return max_burst;
		return residue - 1;
	default:
		if (total_packet_count == 0)
			return 0;
		return total_packet_count - 1;
	}
}

3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235
/* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct xhci_td *td;
	int num_tds, trbs_per_td;
	struct xhci_generic_trb *start_trb;
	bool first_trb;
	int start_cycle;
	u32 field, length_field;
	int running_total, trb_buff_len, td_len, td_remain_len, ret;
	u64 start_addr, addr;
	int i, j;
A
Andiry Xu 已提交
3236
	bool more_trbs_coming;
3237 3238 3239 3240 3241 3242 3243 3244 3245 3246

	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;

	num_tds = urb->number_of_packets;
	if (num_tds < 1) {
		xhci_dbg(xhci, "Isoc URB with zero packets?\n");
		return -EINVAL;
	}

	if (!in_interrupt())
3247
		xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258
				" addr = %#llx, num_tds = %d\n",
				urb->ep->desc.bEndpointAddress,
				urb->transfer_buffer_length,
				urb->transfer_buffer_length,
				(unsigned long long)urb->transfer_dma,
				num_tds);

	start_addr = (u64) urb->transfer_dma;
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

3259
	urb_priv = urb->hcpriv;
3260 3261
	/* Queue the first TRB, even if it's zero-length */
	for (i = 0; i < num_tds; i++) {
3262
		unsigned int total_packet_count;
3263
		unsigned int burst_count;
3264
		unsigned int residue;
3265

3266
		first_trb = true;
3267 3268 3269 3270
		running_total = 0;
		addr = start_addr + urb->iso_frame_desc[i].offset;
		td_len = urb->iso_frame_desc[i].length;
		td_remain_len = td_len;
3271 3272
		total_packet_count = roundup(td_len,
				le16_to_cpu(urb->ep->desc.wMaxPacketSize));
3273 3274 3275
		/* A zero-length transfer still involves at least one packet. */
		if (total_packet_count == 0)
			total_packet_count++;
3276 3277
		burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
				total_packet_count);
3278 3279
		residue = xhci_get_last_burst_packet_count(xhci,
				urb->dev, urb, total_packet_count);
3280 3281 3282 3283 3284

		trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);

		ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
				urb->stream_id, trbs_per_td, urb, i, mem_flags);
3285 3286 3287 3288 3289
		if (ret < 0) {
			if (i == 0)
				return ret;
			goto cleanup;
		}
3290 3291 3292 3293

		td = urb_priv->td[i];
		for (j = 0; j < trbs_per_td; j++) {
			u32 remainder = 0;
3294
			field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
3295 3296 3297 3298 3299 3300

			if (first_trb) {
				/* Queue the isoc TRB */
				field |= TRB_TYPE(TRB_ISOC);
				/* Assume URB_ISO_ASAP is set */
				field |= TRB_SIA;
3301 3302 3303 3304
				if (i == 0) {
					if (start_cycle == 0)
						field |= 0x1;
				} else
3305 3306 3307 3308 3309 3310 3311 3312
					field |= ep_ring->cycle_state;
				first_trb = false;
			} else {
				/* Queue other normal TRBs */
				field |= TRB_TYPE(TRB_NORMAL);
				field |= ep_ring->cycle_state;
			}

3313 3314 3315 3316
			/* Only set interrupt on short packet for IN EPs */
			if (usb_urb_dir_in(urb))
				field |= TRB_ISP;

3317 3318 3319 3320 3321 3322
			/* Chain all the TRBs together; clear the chain bit in
			 * the last TRB to indicate it's the last TRB in the
			 * chain.
			 */
			if (j < trbs_per_td - 1) {
				field |= TRB_CHAIN;
A
Andiry Xu 已提交
3323
				more_trbs_coming = true;
3324 3325 3326
			} else {
				td->last_trb = ep_ring->enqueue;
				field |= TRB_IOC;
3327 3328 3329 3330 3331
				if (xhci->hci_version == 0x100) {
					/* Set BEI bit except for the last td */
					if (i < num_tds - 1)
						field |= TRB_BEI;
				}
A
Andiry Xu 已提交
3332
				more_trbs_coming = false;
3333 3334 3335 3336 3337 3338 3339 3340
			}

			/* Calculate TRB length */
			trb_buff_len = TRB_MAX_BUFF_SIZE -
				(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
			if (trb_buff_len > td_remain_len)
				trb_buff_len = td_remain_len;

3341 3342 3343 3344 3345 3346 3347 3348 3349
			/* Set the TRB length, TD size, & interrupter fields. */
			if (xhci->hci_version < 0x100) {
				remainder = xhci_td_remainder(
						td_len - running_total);
			} else {
				remainder = xhci_v1_0_td_remainder(
						running_total, trb_buff_len,
						total_packet_count, urb);
			}
3350 3351 3352
			length_field = TRB_LEN(trb_buff_len) |
				remainder |
				TRB_INTR_TARGET(0);
3353

A
Andiry Xu 已提交
3354
			queue_trb(xhci, ep_ring, false, more_trbs_coming,
3355 3356 3357
				lower_32_bits(addr),
				upper_32_bits(addr),
				length_field,
3358
				field);
3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371
			running_total += trb_buff_len;

			addr += trb_buff_len;
			td_remain_len -= trb_buff_len;
		}

		/* Check TD length */
		if (running_total != td_len) {
			xhci_err(xhci, "ISOC TD length unmatch\n");
			return -EINVAL;
		}
	}

A
Andiry Xu 已提交
3372 3373 3374 3375 3376 3377
	if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
		if (xhci->quirks & XHCI_AMD_PLL_FIX)
			usb_amd_quirk_pll_disable();
	}
	xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;

3378 3379
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
			start_cycle, start_trb);
3380
	return 0;
3381 3382 3383 3384
cleanup:
	/* Clean up a partially enqueued isoc transfer. */

	for (i--; i >= 0; i--)
3385
		list_del_init(&urb_priv->td[i]->td_list);
3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401

	/* Use the first TD as a temporary variable to turn the TDs we've queued
	 * into No-ops with a software-owned cycle bit. That way the hardware
	 * won't accidentally start executing bogus TDs when we partially
	 * overwrite them.  td->first_trb and td->start_seg are already set.
	 */
	urb_priv->td[0]->last_trb = ep_ring->enqueue;
	/* Every TRB except the first & last will have its cycle bit flipped. */
	td_to_noop(xhci, ep_ring, urb_priv->td[0], true);

	/* Reset the ring enqueue back to the first TRB and its cycle bit. */
	ep_ring->enqueue = urb_priv->td[0]->first_trb;
	ep_ring->enq_seg = urb_priv->td[0]->start_seg;
	ep_ring->cycle_state = start_cycle;
	usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
	return ret;
3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434
}

/*
 * Check transfer ring to guarantee there is enough room for the urb.
 * Update ISO URB start_frame and interval.
 * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
 * update the urb->start_frame by now.
 * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
 */
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	struct xhci_ep_ctx *ep_ctx;
	int start_frame;
	int xhci_interval;
	int ep_interval;
	int num_tds, num_trbs, i;
	int ret;

	xdev = xhci->devs[slot_id];
	ep_ring = xdev->eps[ep_index].ring;
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);

	num_trbs = 0;
	num_tds = urb->number_of_packets;
	for (i = 0; i < num_tds; i++)
		num_trbs += count_isoc_trbs_needed(xhci, urb, i);

	/* Check the ring to guarantee there is enough room for the whole urb.
	 * Do not insert any td of the urb to the ring if the check failed.
	 */
M
Matt Evans 已提交
3435 3436
	ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
			   num_trbs, mem_flags);
3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447
	if (ret)
		return ret;

	start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
	start_frame &= 0x3fff;

	urb->start_frame = start_frame;
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		urb->start_frame >>= 3;

M
Matt Evans 已提交
3448
	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3449 3450 3451 3452 3453 3454 3455 3456 3457
	ep_interval = urb->interval;
	/* Convert to microframes */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		ep_interval *= 8;
	/* FIXME change this to a warning and a suggestion to use the new API
	 * to set the polling interval (once the API is added).
	 */
	if (xhci_interval != ep_interval) {
A
Andiry Xu 已提交
3458
		if (printk_ratelimit())
3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474
			dev_dbg(&urb->dev->dev, "Driver uses different interval"
					" (%d microframe%s) than xHCI "
					"(%d microframe%s)\n",
					ep_interval,
					ep_interval == 1 ? "" : "s",
					xhci_interval,
					xhci_interval == 1 ? "" : "s");
		urb->interval = xhci_interval;
		/* Convert back to frames for LS/FS devices */
		if (urb->dev->speed == USB_SPEED_LOW ||
				urb->dev->speed == USB_SPEED_FULL)
			urb->interval /= 8;
	}
	return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
}

3475 3476
/****		Command Ring Operations		****/

3477 3478 3479 3480 3481 3482 3483 3484 3485 3486
/* Generic function for queueing a command TRB on the command ring.
 * Check to make sure there's room on the command ring for one command TRB.
 * Also check that there's room reserved for commands that must not fail.
 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
 * then only check for the number of reserved spots.
 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
 * because the command event handler may want to resubmit a failed command.
 */
static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
		u32 field3, u32 field4, bool command_must_succeed)
3487
{
3488
	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3489 3490
	int ret;

3491 3492 3493
	if (!command_must_succeed)
		reserved_trbs++;

3494 3495 3496 3497
	ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
			reserved_trbs, GFP_ATOMIC);
	if (ret < 0) {
		xhci_err(xhci, "ERR: No room for command on command ring\n");
3498 3499 3500
		if (command_must_succeed)
			xhci_err(xhci, "ERR: Reserved TRB counting for "
					"unfailable commands failed.\n");
3501
		return ret;
3502
	}
3503
	queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
3504 3505 3506 3507
			field4 | xhci->cmd_ring->cycle_state);
	return 0;
}

3508
/* Queue a slot enable or disable request on the command ring */
3509
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
3510 3511
{
	return queue_command(xhci, 0, 0, 0,
3512
			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3513 3514 3515
}

/* Queue an address device command TRB */
3516 3517
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
		u32 slot_id)
3518
{
3519 3520
	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
			upper_32_bits(in_ctx_ptr), 0,
3521
			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
3522 3523 3524
			false);
}

3525 3526 3527 3528 3529 3530
int xhci_queue_vendor_command(struct xhci_hcd *xhci,
		u32 field1, u32 field2, u32 field3, u32 field4)
{
	return queue_command(xhci, field1, field2, field3, field4, false);
}

3531 3532 3533 3534 3535
/* Queue a reset device command TRB */
int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
{
	return queue_command(xhci, 0, 0, 0,
			TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
3536
			false);
3537
}
3538 3539

/* Queue a configure endpoint command TRB */
3540
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3541
		u32 slot_id, bool command_must_succeed)
3542
{
3543 3544
	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
			upper_32_bits(in_ctx_ptr), 0,
3545 3546
			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
			command_must_succeed);
3547
}
3548

3549 3550 3551 3552 3553 3554
/* Queue an evaluate context command TRB */
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
		u32 slot_id)
{
	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
			upper_32_bits(in_ctx_ptr), 0,
3555 3556
			TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
			false);
3557 3558
}

3559 3560 3561 3562
/*
 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
 * activity on an endpoint that is about to be suspended.
 */
3563
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
3564
		unsigned int ep_index, int suspend)
3565 3566 3567 3568
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_STOP_RING);
3569
	u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
3570 3571

	return queue_command(xhci, 0, 0, 0,
3572
			trb_slot_id | trb_ep_index | type | trb_suspend, false);
3573 3574 3575 3576 3577 3578
}

/* Set Transfer Ring Dequeue Pointer command.
 * This should not be used for endpoints that have streams enabled.
 */
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
3579 3580
		unsigned int ep_index, unsigned int stream_id,
		struct xhci_segment *deq_seg,
3581 3582 3583 3584 3585
		union xhci_trb *deq_ptr, u32 cycle_state)
{
	dma_addr_t addr;
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3586
	u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
3587
	u32 type = TRB_TYPE(TRB_SET_DEQ);
3588
	struct xhci_virt_ep *ep;
3589

3590
	addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
3591
	if (addr == 0) {
3592
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3593 3594
		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
				deq_seg, deq_ptr);
3595 3596
		return 0;
	}
3597 3598 3599 3600 3601 3602 3603 3604
	ep = &xhci->devs[slot_id]->eps[ep_index];
	if ((ep->ep_state & SET_DEQ_PENDING)) {
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
		xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
		return 0;
	}
	ep->queued_deq_seg = deq_seg;
	ep->queued_deq_ptr = deq_ptr;
3605
	return queue_command(xhci, lower_32_bits(addr) | cycle_state,
3606
			upper_32_bits(addr), trb_stream_id,
3607
			trb_slot_id | trb_ep_index | type, false);
3608
}
3609 3610 3611 3612 3613 3614 3615 3616

int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
		unsigned int ep_index)
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_RESET_EP);

3617 3618
	return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
			false);
3619
}