xhci-ring.c 113.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * xHCI host controller driver
 *
 * Copyright (C) 2008 Intel Corp.
 *
 * Author: Sarah Sharp
 * Some code borrowed from the Linux EHCI driver.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

/*
 * Ring initialization rules:
 * 1. Each segment is initialized to zero, except for link TRBs.
 * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
 *    Consumer Cycle State (CCS), depending on ring function.
 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
 *
 * Ring behavior rules:
 * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
 *    least one free TRB in the ring.  This is useful if you want to turn that
 *    into a link TRB and expand the ring.
 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
 *    link TRB, then load the pointer with the address in the link TRB.  If the
 *    link TRB had its toggle bit set, you may need to update the ring cycle
 *    state (see cycle bit rules).  You may have to do this multiple times
 *    until you reach a non-link TRB.
 * 3. A ring is full if enqueue++ (for the definition of increment above)
 *    equals the dequeue pointer.
 *
 * Cycle bit rules:
 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 *
 * Producer rules:
 * 1. Check if ring is full before you enqueue.
 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
 *    Update enqueue pointer between each write (which may update the ring
 *    cycle state).
 * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
 *    and endpoint rings.  If HC is the producer for the event ring,
 *    and it generates an interrupt according to interrupt modulation rules.
 *
 * Consumer rules:
 * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
 *    the TRB is owned by the consumer.
 * 2. Update dequeue pointer (which may update the ring cycle state) and
 *    continue processing TRBs until you reach a TRB which is not owned by you.
 * 3. Notify the producer.  SW is the consumer for the event ring, and it
 *   updates event ring dequeue pointer.  HC is the consumer for the command and
 *   endpoint rings; it generates events on the event ring for these.
 */

67
#include <linux/scatterlist.h>
68
#include <linux/slab.h>
69 70
#include "xhci.h"

71 72 73 74
static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
		struct xhci_virt_device *virt_dev,
		struct xhci_event_cmd *event);

75 76 77 78
/*
 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
 * address of the TRB.
 */
79
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
80 81
		union xhci_trb *trb)
{
82
	unsigned long segment_offset;
83

84
	if (!seg || !trb || trb < seg->trbs)
85
		return 0;
86 87 88
	/* offset in TRBs */
	segment_offset = trb - seg->trbs;
	if (segment_offset > TRBS_PER_SEGMENT)
89
		return 0;
90
	return seg->dma + (segment_offset * sizeof(*trb));
91 92 93 94 95
}

/* Does this link TRB point to the first segment in a ring,
 * or was the previous TRB the last TRB on the last segment in the ERST?
 */
96
static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
97 98 99 100 101 102
		struct xhci_segment *seg, union xhci_trb *trb)
{
	if (ring == xhci->event_ring)
		return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
			(seg->next == xhci->event_ring->first_seg);
	else
M
Matt Evans 已提交
103
		return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
104 105 106 107 108 109
}

/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
 * segment?  I.e. would the updated event TRB pointer step off the end of the
 * event seg?
 */
110
static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
111 112 113 114 115
		struct xhci_segment *seg, union xhci_trb *trb)
{
	if (ring == xhci->event_ring)
		return trb == &seg->trbs[TRBS_PER_SEGMENT];
	else
116
		return TRB_TYPE_LINK_LE32(trb->link.control);
117 118
}

119
static int enqueue_is_link_trb(struct xhci_ring *ring)
120 121
{
	struct xhci_link_trb *link = &ring->enqueue->link;
122
	return TRB_TYPE_LINK_LE32(link->control);
123 124
}

125 126 127 128 129 130 131 132 133 134 135 136 137
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
 * TRB is in a new segment.  This does not skip over link TRBs, and it does not
 * effect the ring dequeue or enqueue pointers.
 */
static void next_trb(struct xhci_hcd *xhci,
		struct xhci_ring *ring,
		struct xhci_segment **seg,
		union xhci_trb **trb)
{
	if (last_trb(xhci, ring, *seg, *trb)) {
		*seg = (*seg)->next;
		*trb = ((*seg)->trbs);
	} else {
138
		(*trb)++;
139 140 141
	}
}

142 143 144 145
/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 */
A
Andiry Xu 已提交
146
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
147
{
148
	union xhci_trb *next;
149
	unsigned long long addr;
150 151

	ring->deq_updates++;
152 153 154 155 156 157 158

	/* If this is not event ring, there is one more usable TRB */
	if (ring->type != TYPE_EVENT &&
			!last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
		ring->num_trbs_free++;
	next = ++(ring->dequeue);

159 160 161 162
	/* Update the dequeue pointer further if that was a link TRB or we're at
	 * the end of an event ring segment (which doesn't have link TRBS)
	 */
	while (last_trb(xhci, ring, ring->deq_seg, next)) {
A
Andiry Xu 已提交
163 164
		if (ring->type == TYPE_EVENT &&	last_trb_on_last_seg(xhci,
				ring, ring->deq_seg, next)) {
165 166 167 168 169 170
			ring->cycle_state = (ring->cycle_state ? 0 : 1);
		}
		ring->deq_seg = ring->deq_seg->next;
		ring->dequeue = ring->deq_seg->trbs;
		next = ring->dequeue;
	}
171
	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
172 173 174 175 176 177 178 179 180 181 182 183
}

/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 *
 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
 * chain bit is set), then set the chain bit in all the following link TRBs.
 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
 * have their chain bit cleared (so that each Link TRB is a separate TD).
 *
 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
184 185 186
 * set, but other sections talk about dealing with the chain bit set.  This was
 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
187 188 189
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
190
 */
191
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
A
Andiry Xu 已提交
192
			bool more_trbs_coming)
193 194 195
{
	u32 chain;
	union xhci_trb *next;
196
	unsigned long long addr;
197

M
Matt Evans 已提交
198
	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
199 200 201 202
	/* If this is not event ring, there is one less usable TRB */
	if (ring->type != TYPE_EVENT &&
			!last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
		ring->num_trbs_free--;
203 204 205 206 207 208 209
	next = ++(ring->enqueue);

	ring->enq_updates++;
	/* Update the dequeue pointer further if that was a link TRB or we're at
	 * the end of an event ring segment (which doesn't have link TRBS)
	 */
	while (last_trb(xhci, ring, ring->enq_seg, next)) {
A
Andiry Xu 已提交
210 211 212 213 214 215 216 217 218 219 220
		if (ring->type != TYPE_EVENT) {
			/*
			 * If the caller doesn't plan on enqueueing more
			 * TDs before ringing the doorbell, then we
			 * don't want to give the link TRB to the
			 * hardware just yet.  We'll give the link TRB
			 * back in prepare_ring() just before we enqueue
			 * the TD at the top of the ring.
			 */
			if (!chain && !more_trbs_coming)
				break;
221

A
Andiry Xu 已提交
222 223 224 225 226 227 228
			/* If we're not dealing with 0.95 hardware or
			 * isoc rings on AMD 0.96 host,
			 * carry over the chain bit of the previous TRB
			 * (which may mean the chain bit is cleared).
			 */
			if (!(ring->type == TYPE_ISOC &&
					(xhci->quirks & XHCI_AMD_0x96_HOST))
229
						&& !xhci_link_trb_quirk(xhci)) {
A
Andiry Xu 已提交
230 231 232 233
				next->link.control &=
					cpu_to_le32(~TRB_CHAIN);
				next->link.control |=
					cpu_to_le32(chain);
234
			}
A
Andiry Xu 已提交
235 236 237 238
			/* Give this link TRB to the hardware */
			wmb();
			next->link.control ^= cpu_to_le32(TRB_CYCLE);

239 240 241 242 243 244 245 246 247
			/* Toggle the cycle bit after the last ring segment. */
			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
				ring->cycle_state = (ring->cycle_state ? 0 : 1);
			}
		}
		ring->enq_seg = ring->enq_seg->next;
		ring->enqueue = ring->enq_seg->trbs;
		next = ring->enqueue;
	}
248
	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
249 250 251
}

/*
252 253
 * Check to see if there's room to enqueue num_trbs on the ring and make sure
 * enqueue pointer will not advance into dequeue segment. See rules above.
254
 */
255
static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
256 257
		unsigned int num_trbs)
{
258
	int num_trbs_in_deq_seg;
259

260 261 262 263 264 265 266 267 268 269
	if (ring->num_trbs_free < num_trbs)
		return 0;

	if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
		num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
		if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
			return 0;
	}

	return 1;
270 271 272
}

/* Ring the host controller doorbell after placing a command on the ring */
273
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
274 275
{
	xhci_dbg(xhci, "// Ding dong!\n");
276
	xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
277 278 279 280
	/* Flush PCI posted writes */
	xhci_readl(xhci, &xhci->dba->doorbell[0]);
}

281
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
282
		unsigned int slot_id,
283 284
		unsigned int ep_index,
		unsigned int stream_id)
285
{
M
Matt Evans 已提交
286
	__le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
287 288
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
	unsigned int ep_state = ep->ep_state;
289 290

	/* Don't ring the doorbell for this endpoint if there are pending
291
	 * cancellations because we don't want to interrupt processing.
292 293 294 295
	 * We don't want to restart any stream rings if there's a set dequeue
	 * pointer command pending because the device can choose to start any
	 * stream once the endpoint is on the HW schedule.
	 * FIXME - check all the stream rings for pending cancellations.
296
	 */
297 298 299 300 301 302 303
	if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
	    (ep_state & EP_HALTED))
		return;
	xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
	/* The CPU has better things to do at this point than wait for a
	 * write-posting flush.  It'll get there soon enough.
	 */
304 305
}

306 307 308 309 310 311 312 313 314 315 316 317 318
/* Ring the doorbell for any rings with pending URBs */
static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
		unsigned int slot_id,
		unsigned int ep_index)
{
	unsigned int stream_id;
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];

	/* A ring has pending URBs if its TD list is not empty */
	if (!(ep->ep_state & EP_HAS_STREAMS)) {
		if (!(list_empty(&ep->ring->td_list)))
319
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
320 321 322 323 324 325 326
		return;
	}

	for (stream_id = 1; stream_id < ep->stream_info->num_streams;
			stream_id++) {
		struct xhci_stream_info *stream_info = ep->stream_info;
		if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
327 328
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
						stream_id);
329 330 331
	}
}

332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
/*
 * Find the segment that trb is in.  Start searching in start_seg.
 * If we must move past a segment that has a link TRB with a toggle cycle state
 * bit set, then we will toggle the value pointed at by cycle_state.
 */
static struct xhci_segment *find_trb_seg(
		struct xhci_segment *start_seg,
		union xhci_trb	*trb, int *cycle_state)
{
	struct xhci_segment *cur_seg = start_seg;
	struct xhci_generic_trb *generic_trb;

	while (cur_seg->trbs > trb ||
			&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
		generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
347
		if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
348
			*cycle_state ^= 0x1;
349 350 351
		cur_seg = cur_seg->next;
		if (cur_seg == start_seg)
			/* Looped over the entire list.  Oops! */
352
			return NULL;
353 354 355 356
	}
	return cur_seg;
}

357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400

static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
		unsigned int stream_id)
{
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];
	/* Common case: no streams */
	if (!(ep->ep_state & EP_HAS_STREAMS))
		return ep->ring;

	if (stream_id == 0) {
		xhci_warn(xhci,
				"WARN: Slot ID %u, ep index %u has streams, "
				"but URB has no stream ID.\n",
				slot_id, ep_index);
		return NULL;
	}

	if (stream_id < ep->stream_info->num_streams)
		return ep->stream_info->stream_rings[stream_id];

	xhci_warn(xhci,
			"WARN: Slot ID %u, ep index %u has "
			"stream IDs 1 to %u allocated, "
			"but stream ID %u is requested.\n",
			slot_id, ep_index,
			ep->stream_info->num_streams - 1,
			stream_id);
	return NULL;
}

/* Get the right ring for the given URB.
 * If the endpoint supports streams, boundary check the URB's stream ID.
 * If the endpoint doesn't support streams, return the singular endpoint ring.
 */
static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
		struct urb *urb)
{
	return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
		xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
}

401 402 403 404 405 406 407 408 409 410 411 412 413
/*
 * Move the xHC's endpoint ring dequeue pointer past cur_td.
 * Record the new state of the xHC's endpoint ring dequeue segment,
 * dequeue pointer, and new consumer cycle state in state.
 * Update our internal representation of the ring's dequeue pointer.
 *
 * We do this in three jumps:
 *  - First we update our new ring state to be the same as when the xHC stopped.
 *  - Then we traverse the ring to find the segment that contains
 *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
 *    any link TRBs with the toggle cycle bit set.
 *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
 *    if we've moved it past a link TRB with the toggle cycle bit set.
M
Matt Evans 已提交
414 415 416 417
 *
 * Some of the uses of xhci_generic_trb are grotty, but if they're done
 * with correct __le32 accesses they should work fine.  Only users of this are
 * in here.
418
 */
419
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
420
		unsigned int slot_id, unsigned int ep_index,
421 422
		unsigned int stream_id, struct xhci_td *cur_td,
		struct xhci_dequeue_state *state)
423 424
{
	struct xhci_virt_device *dev = xhci->devs[slot_id];
425
	struct xhci_ring *ep_ring;
426
	struct xhci_generic_trb *trb;
427
	struct xhci_ep_ctx *ep_ctx;
428
	dma_addr_t addr;
429

430 431 432 433 434 435 436 437
	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
			ep_index, stream_id);
	if (!ep_ring) {
		xhci_warn(xhci, "WARN can't find new dequeue state "
				"for invalid stream ID %u.\n",
				stream_id);
		return;
	}
438
	state->new_cycle_state = 0;
439
	xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
440
	state->new_deq_seg = find_trb_seg(cur_td->start_seg,
441
			dev->eps[ep_index].stopped_trb,
442
			&state->new_cycle_state);
443 444 445 446 447
	if (!state->new_deq_seg) {
		WARN_ON(1);
		return;
	}

448
	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
449
	xhci_dbg(xhci, "Finding endpoint context\n");
450
	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
M
Matt Evans 已提交
451
	state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
452 453

	state->new_deq_ptr = cur_td->last_trb;
454
	xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
455 456 457
	state->new_deq_seg = find_trb_seg(state->new_deq_seg,
			state->new_deq_ptr,
			&state->new_cycle_state);
458 459 460 461
	if (!state->new_deq_seg) {
		WARN_ON(1);
		return;
	}
462 463

	trb = &state->new_deq_ptr->generic;
464 465
	if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
	    (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
466
		state->new_cycle_state ^= 0x1;
467 468
	next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);

469 470 471 472 473 474 475 476 477 478 479 480 481 482
	/*
	 * If there is only one segment in a ring, find_trb_seg()'s while loop
	 * will not run, and it will return before it has a chance to see if it
	 * needs to toggle the cycle bit.  It can't tell if the stalled transfer
	 * ended just before the link TRB on a one-segment ring, or if the TD
	 * wrapped around the top of the ring, because it doesn't have the TD in
	 * question.  Look for the one-segment case where stalled TRB's address
	 * is greater than the new dequeue pointer address.
	 */
	if (ep_ring->first_seg == ep_ring->first_seg->next &&
			state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
		state->new_cycle_state ^= 0x1;
	xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);

483
	/* Don't update the ring cycle state for the producer (us). */
484 485 486 487 488
	xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
			state->new_deq_seg);
	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
	xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
			(unsigned long long) addr);
489 490
}

491 492 493 494
/* flip_cycle means flip the cycle bit of all but the first and last TRB.
 * (The last TRB actually points to the ring enqueue pointer, which is not part
 * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
 */
495
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
496
		struct xhci_td *cur_td, bool flip_cycle)
497 498 499 500 501 502 503
{
	struct xhci_segment *cur_seg;
	union xhci_trb *cur_trb;

	for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
			true;
			next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
504
		if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
505 506 507
			/* Unchain any chained Link TRBs, but
			 * leave the pointers intact.
			 */
M
Matt Evans 已提交
508
			cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
509 510 511 512 513 514
			/* Flip the cycle bit (link TRBs can't be the first
			 * or last TRB).
			 */
			if (flip_cycle)
				cur_trb->generic.field[3] ^=
					cpu_to_le32(TRB_CYCLE);
515
			xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
516 517 518
			xhci_dbg(xhci, "Address = %p (0x%llx dma); "
					"in seg %p (0x%llx dma)\n",
					cur_trb,
519
					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
520 521
					cur_seg,
					(unsigned long long)cur_seg->dma);
522 523 524 525 526
		} else {
			cur_trb->generic.field[0] = 0;
			cur_trb->generic.field[1] = 0;
			cur_trb->generic.field[2] = 0;
			/* Preserve only the cycle bit of this TRB */
M
Matt Evans 已提交
527
			cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
528 529 530 531 532
			/* Flip the cycle bit except on the first or last TRB */
			if (flip_cycle && cur_trb != cur_td->first_trb &&
					cur_trb != cur_td->last_trb)
				cur_trb->generic.field[3] ^=
					cpu_to_le32(TRB_CYCLE);
M
Matt Evans 已提交
533 534
			cur_trb->generic.field[3] |= cpu_to_le32(
				TRB_TYPE(TRB_TR_NOOP));
535 536 537
			xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n",
					(unsigned long long)
					xhci_trb_virt_to_dma(cur_seg, cur_trb));
538 539 540 541 542 543 544
		}
		if (cur_trb == cur_td->last_trb)
			break;
	}
}

static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
545 546
		unsigned int ep_index, unsigned int stream_id,
		struct xhci_segment *deq_seg,
547 548
		union xhci_trb *deq_ptr, u32 cycle_state);

549
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
550
		unsigned int slot_id, unsigned int ep_index,
551
		unsigned int stream_id,
552
		struct xhci_dequeue_state *deq_state)
553
{
554 555
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];

556 557 558 559 560 561 562
	xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
			"new deq ptr = %p (0x%llx dma), new cycle = %u\n",
			deq_state->new_deq_seg,
			(unsigned long long)deq_state->new_deq_seg->dma,
			deq_state->new_deq_ptr,
			(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
			deq_state->new_cycle_state);
563
	queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
564 565 566 567 568 569 570 571
			deq_state->new_deq_seg,
			deq_state->new_deq_ptr,
			(u32) deq_state->new_cycle_state);
	/* Stop the TD queueing code from ringing the doorbell until
	 * this command completes.  The HC won't set the dequeue pointer
	 * if the ring is running, and ringing the doorbell starts the
	 * ring running.
	 */
572
	ep->ep_state |= SET_DEQ_PENDING;
573 574
}

575
static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
		struct xhci_virt_ep *ep)
{
	ep->ep_state &= ~EP_HALT_PENDING;
	/* Can't del_timer_sync in interrupt, so we attempt to cancel.  If the
	 * timer is running on another CPU, we don't decrement stop_cmds_pending
	 * (since we didn't successfully stop the watchdog timer).
	 */
	if (del_timer(&ep->stop_cmd_timer))
		ep->stop_cmds_pending--;
}

/* Must be called with xhci->lock held in interrupt context */
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
		struct xhci_td *cur_td, int status, char *adjective)
{
591
	struct usb_hcd *hcd;
592 593
	struct urb	*urb;
	struct urb_priv	*urb_priv;
594

595 596 597
	urb = cur_td->urb;
	urb_priv = urb->hcpriv;
	urb_priv->td_cnt++;
598
	hcd = bus_to_hcd(urb->dev->bus);
599

600 601
	/* Only giveback urb when this is the last td in urb */
	if (urb_priv->td_cnt == urb_priv->length) {
A
Andiry Xu 已提交
602 603 604 605 606 607 608
		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
			xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
			if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs	== 0) {
				if (xhci->quirks & XHCI_AMD_PLL_FIX)
					usb_amd_quirk_pll_enable();
			}
		}
609 610 611 612 613 614 615
		usb_hcd_unlink_urb_from_ep(hcd, urb);

		spin_unlock(&xhci->lock);
		usb_hcd_giveback_urb(hcd, urb, status);
		xhci_urb_free_priv(xhci, urb_priv);
		spin_lock(&xhci->lock);
	}
616 617
}

618 619 620 621 622 623 624 625 626 627 628
/*
 * When we get a command completion for a Stop Endpoint Command, we need to
 * unlink any cancelled TDs from the ring.  There are two ways to do that:
 *
 *  1. If the HW was in the middle of processing the TD that needs to be
 *     cancelled, then we must move the ring's dequeue pointer past the last TRB
 *     in the TD with a Set Dequeue Pointer Command.
 *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
 *     bit cleared) so that the HW will skip over them.
 */
static void handle_stopped_endpoint(struct xhci_hcd *xhci,
629
		union xhci_trb *trb, struct xhci_event_cmd *event)
630 631 632
{
	unsigned int slot_id;
	unsigned int ep_index;
633
	struct xhci_virt_device *virt_dev;
634
	struct xhci_ring *ep_ring;
635
	struct xhci_virt_ep *ep;
636
	struct list_head *entry;
637
	struct xhci_td *cur_td = NULL;
638 639
	struct xhci_td *last_unlinked_td;

640
	struct xhci_dequeue_state deq_state;
641

642
	if (unlikely(TRB_TO_SUSPEND_PORT(
M
Matt Evans 已提交
643
			     le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
644
		slot_id = TRB_TO_SLOT_ID(
M
Matt Evans 已提交
645
			le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
646 647 648 649 650 651 652 653 654 655 656
		virt_dev = xhci->devs[slot_id];
		if (virt_dev)
			handle_cmd_in_cmd_wait_list(xhci, virt_dev,
				event);
		else
			xhci_warn(xhci, "Stop endpoint command "
				"completion for disabled slot %u\n",
				slot_id);
		return;
	}

657
	memset(&deq_state, 0, sizeof(deq_state));
M
Matt Evans 已提交
658 659
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
660
	ep = &xhci->devs[slot_id]->eps[ep_index];
661

662
	if (list_empty(&ep->cancelled_td_list)) {
663
		xhci_stop_watchdog_timer_in_irq(xhci, ep);
664 665
		ep->stopped_td = NULL;
		ep->stopped_trb = NULL;
666
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
667
		return;
668
	}
669 670 671 672 673 674

	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
	 * We have the xHCI lock, so nothing can modify this list until we drop
	 * it.  We're also in the event handler, so we can't get re-interrupted
	 * if another Stop Endpoint command completes
	 */
675
	list_for_each(entry, &ep->cancelled_td_list) {
676
		cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
677 678 679
		xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n",
				(unsigned long long)xhci_trb_virt_to_dma(
					cur_td->start_seg, cur_td->first_trb));
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
		if (!ep_ring) {
			/* This shouldn't happen unless a driver is mucking
			 * with the stream ID after submission.  This will
			 * leave the TD on the hardware ring, and the hardware
			 * will try to execute it, and may access a buffer
			 * that has already been freed.  In the best case, the
			 * hardware will execute it, and the event handler will
			 * ignore the completion event for that TD, since it was
			 * removed from the td_list for that endpoint.  In
			 * short, don't muck with the stream ID after
			 * submission.
			 */
			xhci_warn(xhci, "WARN Cancelled URB %p "
					"has invalid stream ID %u.\n",
					cur_td->urb,
					cur_td->urb->stream_id);
			goto remove_finished_td;
		}
699 700 701 702
		/*
		 * If we stopped on the TD we need to cancel, then we have to
		 * move the xHC endpoint ring dequeue pointer past this TD.
		 */
703
		if (cur_td == ep->stopped_td)
704 705 706
			xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
					cur_td->urb->stream_id,
					cur_td, &deq_state);
707
		else
708
			td_to_noop(xhci, ep_ring, cur_td, false);
709
remove_finished_td:
710 711 712 713 714
		/*
		 * The event handler won't see a completion for this TD anymore,
		 * so remove it from the endpoint ring's TD list.  Keep it in
		 * the cancelled TD list for URB completion later.
		 */
715
		list_del_init(&cur_td->td_list);
716 717
	}
	last_unlinked_td = cur_td;
718
	xhci_stop_watchdog_timer_in_irq(xhci, ep);
719 720 721

	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
722
		xhci_queue_new_dequeue_state(xhci,
723 724 725
				slot_id, ep_index,
				ep->stopped_td->urb->stream_id,
				&deq_state);
726
		xhci_ring_cmd_db(xhci);
727
	} else {
728 729
		/* Otherwise ring the doorbell(s) to restart queued transfers */
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
730
	}
731 732
	ep->stopped_td = NULL;
	ep->stopped_trb = NULL;
733 734 735 736 737 738 739 740

	/*
	 * Drop the lock and complete the URBs in the cancelled TD list.
	 * New TDs to be cancelled might be added to the end of the list before
	 * we can complete all the URBs for the TDs we already unlinked.
	 * So stop when we've completed the URB for the last TD we unlinked.
	 */
	do {
741
		cur_td = list_entry(ep->cancelled_td_list.next,
742
				struct xhci_td, cancelled_td_list);
743
		list_del_init(&cur_td->cancelled_td_list);
744 745 746 747 748

		/* Clean up the cancelled URB */
		/* Doesn't matter what we pass for status, since the core will
		 * just overwrite it (because the URB has been unlinked).
		 */
749
		xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
750

751 752 753 754 755
		/* Stop processing the cancelled list if the watchdog timer is
		 * running.
		 */
		if (xhci->xhc_state & XHCI_STATE_DYING)
			return;
756 757 758 759 760
	} while (cur_td != last_unlinked_td);

	/* Return to the event handler with xhci->lock re-acquired */
}

761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787
/* Watchdog timer function for when a stop endpoint command fails to complete.
 * In this case, we assume the host controller is broken or dying or dead.  The
 * host may still be completing some other events, so we have to be careful to
 * let the event ring handler and the URB dequeueing/enqueueing functions know
 * through xhci->state.
 *
 * The timer may also fire if the host takes a very long time to respond to the
 * command, and the stop endpoint command completion handler cannot delete the
 * timer before the timer function is called.  Another endpoint cancellation may
 * sneak in before the timer function can grab the lock, and that may queue
 * another stop endpoint command and add the timer back.  So we cannot use a
 * simple flag to say whether there is a pending stop endpoint command for a
 * particular endpoint.
 *
 * Instead we use a combination of that flag and a counter for the number of
 * pending stop endpoint commands.  If the timer is the tail end of the last
 * stop endpoint command, and the endpoint's command is still pending, we assume
 * the host is dying.
 */
void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
	struct xhci_hcd *xhci;
	struct xhci_virt_ep *ep;
	struct xhci_virt_ep *temp_ep;
	struct xhci_ring *ring;
	struct xhci_td *cur_td;
	int ret, i, j;
788
	unsigned long flags;
789 790 791 792

	ep = (struct xhci_virt_ep *) arg;
	xhci = ep->xhci;

793
	spin_lock_irqsave(&xhci->lock, flags);
794 795 796 797 798

	ep->stop_cmds_pending--;
	if (xhci->xhc_state & XHCI_STATE_DYING) {
		xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
				"xHCI as DYING, exiting.\n");
799
		spin_unlock_irqrestore(&xhci->lock, flags);
800 801 802 803 804
		return;
	}
	if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
		xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
				"exiting.\n");
805
		spin_unlock_irqrestore(&xhci->lock, flags);
806 807 808 809 810 811 812 813 814 815 816
		return;
	}

	xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
	xhci_warn(xhci, "Assuming host is dying, halting host.\n");
	/* Oops, HC is dead or dying or at least not responding to the stop
	 * endpoint command.
	 */
	xhci->xhc_state |= XHCI_STATE_DYING;
	/* Disable interrupts from the host controller and start halting it */
	xhci_quiesce(xhci);
817
	spin_unlock_irqrestore(&xhci->lock, flags);
818 819 820

	ret = xhci_halt(xhci);

821
	spin_lock_irqsave(&xhci->lock, flags);
822 823 824
	if (ret < 0) {
		/* This is bad; the host is not responding to commands and it's
		 * not allowing itself to be halted.  At least interrupts are
825
		 * disabled. If we call usb_hc_died(), it will attempt to
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
		 * disconnect all device drivers under this host.  Those
		 * disconnect() methods will wait for all URBs to be unlinked,
		 * so we must complete them.
		 */
		xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
		xhci_warn(xhci, "Completing active URBs anyway.\n");
		/* We could turn all TDs on the rings to no-ops.  This won't
		 * help if the host has cached part of the ring, and is slow if
		 * we want to preserve the cycle bit.  Skip it and hope the host
		 * doesn't touch the memory.
		 */
	}
	for (i = 0; i < MAX_HC_SLOTS; i++) {
		if (!xhci->devs[i])
			continue;
		for (j = 0; j < 31; j++) {
			temp_ep = &xhci->devs[i]->eps[j];
			ring = temp_ep->ring;
			if (!ring)
				continue;
			xhci_dbg(xhci, "Killing URBs for slot ID %u, "
					"ep index %u\n", i, j);
			while (!list_empty(&ring->td_list)) {
				cur_td = list_first_entry(&ring->td_list,
						struct xhci_td,
						td_list);
852
				list_del_init(&cur_td->td_list);
853
				if (!list_empty(&cur_td->cancelled_td_list))
854
					list_del_init(&cur_td->cancelled_td_list);
855 856 857 858 859 860 861 862
				xhci_giveback_urb_in_irq(xhci, cur_td,
						-ESHUTDOWN, "killed");
			}
			while (!list_empty(&temp_ep->cancelled_td_list)) {
				cur_td = list_first_entry(
						&temp_ep->cancelled_td_list,
						struct xhci_td,
						cancelled_td_list);
863
				list_del_init(&cur_td->cancelled_td_list);
864 865 866 867 868
				xhci_giveback_urb_in_irq(xhci, cur_td,
						-ESHUTDOWN, "killed");
			}
		}
	}
869
	spin_unlock_irqrestore(&xhci->lock, flags);
870
	xhci_dbg(xhci, "Calling usb_hc_died()\n");
871
	usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
872 873 874
	xhci_dbg(xhci, "xHCI host controller is dead.\n");
}

875 876 877 878 879 880 881 882 883 884 885 886 887

static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
		struct xhci_virt_device *dev,
		struct xhci_ring *ep_ring,
		unsigned int ep_index)
{
	union xhci_trb *dequeue_temp;
	int num_trbs_free_temp;
	bool revert = false;

	num_trbs_free_temp = ep_ring->num_trbs_free;
	dequeue_temp = ep_ring->dequeue;

888 889 890 891 892 893 894 895 896 897 898
	/* If we get two back-to-back stalls, and the first stalled transfer
	 * ends just before a link TRB, the dequeue pointer will be left on
	 * the link TRB by the code in the while loop.  So we have to update
	 * the dequeue pointer one segment further, or we'll jump off
	 * the segment into la-la-land.
	 */
	if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
		ep_ring->deq_seg = ep_ring->deq_seg->next;
		ep_ring->dequeue = ep_ring->deq_seg->trbs;
	}

899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922
	while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
		/* We have more usable TRBs */
		ep_ring->num_trbs_free++;
		ep_ring->dequeue++;
		if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
				ep_ring->dequeue)) {
			if (ep_ring->dequeue ==
					dev->eps[ep_index].queued_deq_ptr)
				break;
			ep_ring->deq_seg = ep_ring->deq_seg->next;
			ep_ring->dequeue = ep_ring->deq_seg->trbs;
		}
		if (ep_ring->dequeue == dequeue_temp) {
			revert = true;
			break;
		}
	}

	if (revert) {
		xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
		ep_ring->num_trbs_free = num_trbs_free_temp;
	}
}

923 924 925 926 927 928 929 930 931 932 933 934 935
/*
 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
 * we need to clear the set deq pending flag in the endpoint ring state, so that
 * the TD queueing code can ring the doorbell again.  We also need to ring the
 * endpoint doorbell to restart the ring, but only if there aren't more
 * cancellations pending.
 */
static void handle_set_deq_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event,
		union xhci_trb *trb)
{
	unsigned int slot_id;
	unsigned int ep_index;
936
	unsigned int stream_id;
937 938
	struct xhci_ring *ep_ring;
	struct xhci_virt_device *dev;
939 940
	struct xhci_ep_ctx *ep_ctx;
	struct xhci_slot_ctx *slot_ctx;
941

M
Matt Evans 已提交
942 943 944
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
945
	dev = xhci->devs[slot_id];
946 947 948 949 950 951 952 953 954 955 956

	ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
	if (!ep_ring) {
		xhci_warn(xhci, "WARN Set TR deq ptr command for "
				"freed stream ID %u\n",
				stream_id);
		/* XXX: Harmless??? */
		dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
		return;
	}

957 958
	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
959

M
Matt Evans 已提交
960
	if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
961 962 963
		unsigned int ep_state;
		unsigned int slot_state;

M
Matt Evans 已提交
964
		switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
965 966 967 968 969 970 971
		case COMP_TRB_ERR:
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
					"of stream ID configuration\n");
			break;
		case COMP_CTX_STATE:
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
					"to incorrect slot or ep state.\n");
M
Matt Evans 已提交
972
			ep_state = le32_to_cpu(ep_ctx->ep_info);
973
			ep_state &= EP_STATE_MASK;
M
Matt Evans 已提交
974
			slot_state = le32_to_cpu(slot_ctx->dev_state);
975 976 977 978 979 980 981 982 983 984 985
			slot_state = GET_SLOT_STATE(slot_state);
			xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
					slot_state, ep_state);
			break;
		case COMP_EBADSLT:
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
					"slot %u was not enabled.\n", slot_id);
			break;
		default:
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
					"completion code of %u.\n",
M
Matt Evans 已提交
986
				  GET_COMP_CODE(le32_to_cpu(event->status)));
987 988 989 990 991 992 993 994 995
			break;
		}
		/* OK what do we do now?  The endpoint state is hosed, and we
		 * should never get to this point if the synchronization between
		 * queueing, and endpoint state are correct.  This might happen
		 * if the device gets disconnected after we've finished
		 * cancelling URBs, which might not be an error...
		 */
	} else {
996
		xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
M
Matt Evans 已提交
997
			 le64_to_cpu(ep_ctx->deq));
998
		if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
M
Matt Evans 已提交
999 1000
					 dev->eps[ep_index].queued_deq_ptr) ==
		    (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
1001 1002 1003
			/* Update the ring's dequeue segment and dequeue pointer
			 * to reflect the new position.
			 */
1004 1005
			update_ring_for_set_deq_completion(xhci, dev,
				ep_ring, ep_index);
1006 1007 1008 1009 1010 1011 1012
		} else {
			xhci_warn(xhci, "Mismatch between completed Set TR Deq "
					"Ptr command & xHCI internal state.\n");
			xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
					dev->eps[ep_index].queued_deq_seg,
					dev->eps[ep_index].queued_deq_ptr);
		}
1013 1014
	}

1015
	dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1016 1017
	dev->eps[ep_index].queued_deq_seg = NULL;
	dev->eps[ep_index].queued_deq_ptr = NULL;
1018 1019
	/* Restart any rings with pending URBs */
	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1020 1021
}

1022 1023 1024 1025 1026 1027 1028
static void handle_reset_ep_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event,
		union xhci_trb *trb)
{
	int slot_id;
	unsigned int ep_index;

M
Matt Evans 已提交
1029 1030
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1031 1032 1033 1034
	/* This command will only fail if the endpoint wasn't halted,
	 * but we don't care.
	 */
	xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
1035
		 GET_COMP_CODE(le32_to_cpu(event->status)));
1036

1037 1038 1039 1040 1041 1042 1043
	/* HW with the reset endpoint quirk needs to have a configure endpoint
	 * command complete before the endpoint can be used.  Queue that here
	 * because the HW can't handle two commands being queued in a row.
	 */
	if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
		xhci_dbg(xhci, "Queueing configure endpoint command\n");
		xhci_queue_configure_endpoint(xhci,
1044 1045
				xhci->devs[slot_id]->in_ctx->dma, slot_id,
				false);
1046 1047
		xhci_ring_cmd_db(xhci);
	} else {
1048
		/* Clear our internal halted state and restart the ring(s) */
1049
		xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1050
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1051
	}
1052
}
1053

1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
/* Check to see if a command in the device's command queue matches this one.
 * Signal the completion or free the command, and return 1.  Return 0 if the
 * completed command isn't at the head of the command list.
 */
static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
		struct xhci_virt_device *virt_dev,
		struct xhci_event_cmd *event)
{
	struct xhci_command *command;

	if (list_empty(&virt_dev->cmd_list))
		return 0;

	command = list_entry(virt_dev->cmd_list.next,
			struct xhci_command, cmd_list);
	if (xhci->cmd_ring->dequeue != command->command_trb)
		return 0;

M
Matt Evans 已提交
1072
	command->status = GET_COMP_CODE(le32_to_cpu(event->status));
1073 1074 1075 1076 1077 1078 1079 1080
	list_del(&command->cmd_list);
	if (command->completion)
		complete(command->completion);
	else
		xhci_free_command(xhci, command);
	return 1;
}

1081 1082 1083
static void handle_cmd_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
M
Matt Evans 已提交
1084
	int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1085 1086
	u64 cmd_dma;
	dma_addr_t cmd_dequeue_dma;
1087
	struct xhci_input_control_ctx *ctrl_ctx;
1088
	struct xhci_virt_device *virt_dev;
1089 1090 1091
	unsigned int ep_index;
	struct xhci_ring *ep_ring;
	unsigned int ep_state;
1092

M
Matt Evans 已提交
1093
	cmd_dma = le64_to_cpu(event->cmd_trb);
1094
	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
			xhci->cmd_ring->dequeue);
	/* Is the command ring deq ptr out of sync with the deq seg ptr? */
	if (cmd_dequeue_dma == 0) {
		xhci->error_bitmask |= 1 << 4;
		return;
	}
	/* Does the DMA address match our internal dequeue pointer address? */
	if (cmd_dma != (u64) cmd_dequeue_dma) {
		xhci->error_bitmask |= 1 << 5;
		return;
	}
M
Matt Evans 已提交
1106 1107
	switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
		& TRB_TYPE_BITMASK) {
1108
	case TRB_TYPE(TRB_ENABLE_SLOT):
M
Matt Evans 已提交
1109
		if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
1110 1111 1112 1113 1114 1115
			xhci->slot_id = slot_id;
		else
			xhci->slot_id = 0;
		complete(&xhci->addr_dev);
		break;
	case TRB_TYPE(TRB_DISABLE_SLOT):
1116 1117 1118 1119 1120
		if (xhci->devs[slot_id]) {
			if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
				/* Delete default control endpoint resources */
				xhci_free_device_endpoint_resources(xhci,
						xhci->devs[slot_id], true);
1121
			xhci_free_virt_device(xhci, slot_id);
1122
		}
1123
		break;
1124
	case TRB_TYPE(TRB_CONFIG_EP):
1125
		virt_dev = xhci->devs[slot_id];
1126
		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1127
			break;
1128 1129 1130 1131
		/*
		 * Configure endpoint commands can come from the USB core
		 * configuration or alt setting changes, or because the HW
		 * needed an extra configure endpoint command after a reset
1132 1133 1134
		 * endpoint command or streams were being configured.
		 * If the command was for a halted endpoint, the xHCI driver
		 * is not waiting on the configure endpoint command.
1135 1136
		 */
		ctrl_ctx = xhci_get_input_control_ctx(xhci,
1137
				virt_dev->in_ctx);
1138
		/* Input ctx add_flags are the endpoint index plus one */
M
Matt Evans 已提交
1139
		ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
1140
		/* A usb_set_interface() call directly after clearing a halted
1141 1142 1143 1144
		 * condition may race on this quirky hardware.  Not worth
		 * worrying about, since this is prototype hardware.  Not sure
		 * if this will work for streams, but streams support was
		 * untested on this prototype.
1145
		 */
1146
		if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1147
				ep_index != (unsigned int) -1 &&
M
Matt Evans 已提交
1148 1149
		    le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
		    le32_to_cpu(ctrl_ctx->drop_flags)) {
1150 1151 1152 1153 1154 1155 1156
			ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
			ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
			if (!(ep_state & EP_HALTED))
				goto bandwidth_change;
			xhci_dbg(xhci, "Completed config ep cmd - "
					"last ep index = %d, state = %d\n",
					ep_index, ep_state);
1157
			/* Clear internal halted state and restart ring(s) */
1158
			xhci->devs[slot_id]->eps[ep_index].ep_state &=
1159
				~EP_HALTED;
1160
			ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1161
			break;
1162
		}
1163 1164 1165
bandwidth_change:
		xhci_dbg(xhci, "Completed config ep cmd\n");
		xhci->devs[slot_id]->cmd_status =
M
Matt Evans 已提交
1166
			GET_COMP_CODE(le32_to_cpu(event->status));
1167
		complete(&xhci->devs[slot_id]->cmd_completion);
1168
		break;
1169
	case TRB_TYPE(TRB_EVAL_CONTEXT):
S
Sarah Sharp 已提交
1170 1171 1172
		virt_dev = xhci->devs[slot_id];
		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
			break;
M
Matt Evans 已提交
1173
		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1174 1175
		complete(&xhci->devs[slot_id]->cmd_completion);
		break;
1176
	case TRB_TYPE(TRB_ADDR_DEV):
M
Matt Evans 已提交
1177
		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1178 1179
		complete(&xhci->addr_dev);
		break;
1180
	case TRB_TYPE(TRB_STOP_RING):
1181
		handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
1182 1183 1184 1185
		break;
	case TRB_TYPE(TRB_SET_DEQ):
		handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
		break;
1186 1187
	case TRB_TYPE(TRB_CMD_NOOP):
		break;
1188 1189 1190
	case TRB_TYPE(TRB_RESET_EP):
		handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
		break;
1191 1192 1193
	case TRB_TYPE(TRB_RESET_DEV):
		xhci_dbg(xhci, "Completed reset device command.\n");
		slot_id = TRB_TO_SLOT_ID(
M
Matt Evans 已提交
1194
			le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
1195 1196 1197 1198 1199 1200 1201
		virt_dev = xhci->devs[slot_id];
		if (virt_dev)
			handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
		else
			xhci_warn(xhci, "Reset device command completion "
					"for disabled slot %u\n", slot_id);
		break;
1202 1203 1204 1205 1206 1207
	case TRB_TYPE(TRB_NEC_GET_FW):
		if (!(xhci->quirks & XHCI_NEC_HOST)) {
			xhci->error_bitmask |= 1 << 6;
			break;
		}
		xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
M
Matt Evans 已提交
1208 1209
			 NEC_FW_MAJOR(le32_to_cpu(event->status)),
			 NEC_FW_MINOR(le32_to_cpu(event->status)));
1210
		break;
1211 1212 1213 1214 1215
	default:
		/* Skip over unknown commands on the event ring */
		xhci->error_bitmask |= 1 << 6;
		break;
	}
A
Andiry Xu 已提交
1216
	inc_deq(xhci, xhci->cmd_ring);
1217 1218
}

1219 1220 1221 1222 1223
static void handle_vendor_event(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 trb_type;

M
Matt Evans 已提交
1224
	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1225 1226 1227 1228 1229
	xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
	if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
		handle_cmd_completion(xhci, &event->event_cmd);
}

1230 1231 1232 1233 1234
/* @port_id: the one-based port ID from the hardware (indexed from array of all
 * port registers -- USB 3.0 and USB 2.0).
 *
 * Returns a zero-based port number, which is suitable for indexing into each of
 * the split roothubs' port arrays and bus state arrays.
1235
 * Add one to it in order to call xhci_find_slot_id_by_port.
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
 */
static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
		struct xhci_hcd *xhci, u32 port_id)
{
	unsigned int i;
	unsigned int num_similar_speed_ports = 0;

	/* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
	 * and usb2_ports are 0-based indexes.  Count the number of similar
	 * speed ports, up to 1 port before this port.
	 */
	for (i = 0; i < (port_id - 1); i++) {
		u8 port_speed = xhci->port_array[i];

		/*
		 * Skip ports that don't have known speeds, or have duplicate
		 * Extended Capabilities port speed entries.
		 */
1254
		if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
			continue;

		/*
		 * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
		 * 1.1 ports are under the USB 2.0 hub.  If the port speed
		 * matches the device speed, it's a similar speed port.
		 */
		if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
			num_similar_speed_ports++;
	}
	return num_similar_speed_ports;
}

1268 1269 1270 1271
static void handle_device_notification(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 slot_id;
1272
	struct usb_device *udev;
1273 1274

	slot_id = TRB_TO_SLOT_ID(event->generic.field[3]);
1275
	if (!xhci->devs[slot_id]) {
1276 1277
		xhci_warn(xhci, "Device Notification event for "
				"unused slot %u\n", slot_id);
1278 1279 1280 1281 1282 1283 1284 1285
		return;
	}

	xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
			slot_id);
	udev = xhci->devs[slot_id]->udev;
	if (udev && udev->parent)
		usb_wakeup_notification(udev->parent, udev->portnum);
1286 1287
}

S
Sarah Sharp 已提交
1288 1289 1290
static void handle_port_status(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
1291
	struct usb_hcd *hcd;
S
Sarah Sharp 已提交
1292
	u32 port_id;
1293
	u32 temp, temp1;
1294
	int max_ports;
1295
	int slot_id;
1296
	unsigned int faked_port_index;
1297
	u8 major_revision;
1298
	struct xhci_bus_state *bus_state;
M
Matt Evans 已提交
1299
	__le32 __iomem **port_array;
1300
	bool bogus_port_status = false;
S
Sarah Sharp 已提交
1301 1302

	/* Port status change events always have a successful completion code */
M
Matt Evans 已提交
1303
	if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
S
Sarah Sharp 已提交
1304 1305 1306
		xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
		xhci->error_bitmask |= 1 << 8;
	}
M
Matt Evans 已提交
1307
	port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
S
Sarah Sharp 已提交
1308 1309
	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);

1310 1311
	max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
	if ((port_id <= 0) || (port_id > max_ports)) {
1312
		xhci_warn(xhci, "Invalid port id %d\n", port_id);
1313
		bogus_port_status = true;
1314 1315 1316
		goto cleanup;
	}

1317 1318 1319 1320 1321 1322 1323 1324
	/* Figure out which usb_hcd this port is attached to:
	 * is it a USB 3.0 port or a USB 2.0/1.1 port?
	 */
	major_revision = xhci->port_array[port_id - 1];
	if (major_revision == 0) {
		xhci_warn(xhci, "Event for port %u not in "
				"Extended Capabilities, ignoring.\n",
				port_id);
1325
		bogus_port_status = true;
1326
		goto cleanup;
1327
	}
1328
	if (major_revision == DUPLICATE_ENTRY) {
1329 1330 1331
		xhci_warn(xhci, "Event for port %u duplicated in"
				"Extended Capabilities, ignoring.\n",
				port_id);
1332
		bogus_port_status = true;
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
		goto cleanup;
	}

	/*
	 * Hardware port IDs reported by a Port Status Change Event include USB
	 * 3.0 and USB 2.0 ports.  We want to check if the port has reported a
	 * resume event, but we first need to translate the hardware port ID
	 * into the index into the ports on the correct split roothub, and the
	 * correct bus_state structure.
	 */
	/* Find the right roothub. */
	hcd = xhci_to_hcd(xhci);
	if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
		hcd = xhci->shared_hcd;
	bus_state = &xhci->bus_state[hcd_index(hcd)];
	if (hcd->speed == HCD_USB3)
		port_array = xhci->usb3_ports;
	else
		port_array = xhci->usb2_ports;
	/* Find the faked port hub number */
	faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
			port_id);
1355 1356

	temp = xhci_readl(xhci, port_array[faked_port_index]);
1357
	if (hcd->state == HC_STATE_SUSPENDED) {
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371
		xhci_dbg(xhci, "resume root hub\n");
		usb_hcd_resume_root_hub(hcd);
	}

	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
		xhci_dbg(xhci, "port resume event for port %d\n", port_id);

		temp1 = xhci_readl(xhci, &xhci->op_regs->command);
		if (!(temp1 & CMD_RUN)) {
			xhci_warn(xhci, "xHC is not running.\n");
			goto cleanup;
		}

		if (DEV_SUPERSPEED(temp)) {
1372
			xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1373 1374 1375 1376 1377
			/* Set a flag to say the port signaled remote wakeup,
			 * so we can tell the difference between the end of
			 * device and host initiated resume.
			 */
			bus_state->port_remote_wakeup |= 1 << faked_port_index;
1378 1379
			xhci_test_and_clear_bit(xhci, port_array,
					faked_port_index, PORT_PLC);
A
Andiry Xu 已提交
1380 1381
			xhci_set_link_state(xhci, port_array, faked_port_index,
						XDEV_U0);
1382 1383 1384 1385 1386
			/* Need to wait until the next link state change
			 * indicates the device is actually in U0.
			 */
			bogus_port_status = true;
			goto cleanup;
1387 1388
		} else {
			xhci_dbg(xhci, "resume HS port %d\n", port_id);
1389
			bus_state->resume_done[faked_port_index] = jiffies +
1390
				msecs_to_jiffies(20);
1391
			set_bit(faked_port_index, &bus_state->resuming_ports);
1392
			mod_timer(&hcd->rh_timer,
1393
				  bus_state->resume_done[faked_port_index]);
1394 1395 1396
			/* Do the rest in GetPortStatus */
		}
	}
1397 1398 1399 1400

	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
			DEV_SUPERSPEED(temp)) {
		xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1401 1402 1403 1404 1405 1406 1407
		/* We've just brought the device into U0 through either the
		 * Resume state after a device remote wakeup, or through the
		 * U3Exit state after a host-initiated resume.  If it's a device
		 * initiated remote wake, don't pass up the link state change,
		 * so the roothub behavior is consistent with external
		 * USB 3.0 hub behavior.
		 */
1408 1409 1410 1411
		slot_id = xhci_find_slot_id_by_port(hcd, xhci,
				faked_port_index + 1);
		if (slot_id && xhci->devs[slot_id])
			xhci_ring_device(xhci, slot_id);
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
		if (bus_state->port_remote_wakeup && (1 << faked_port_index)) {
			bus_state->port_remote_wakeup &=
				~(1 << faked_port_index);
			xhci_test_and_clear_bit(xhci, port_array,
					faked_port_index, PORT_PLC);
			usb_wakeup_notification(hcd->self.root_hub,
					faked_port_index + 1);
			bogus_port_status = true;
			goto cleanup;
		}
1422
	}
1423

1424 1425 1426 1427
	if (hcd->speed != HCD_USB3)
		xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
					PORT_PLC);

1428
cleanup:
S
Sarah Sharp 已提交
1429
	/* Update event ring dequeue pointer before dropping the lock */
A
Andiry Xu 已提交
1430
	inc_deq(xhci, xhci->event_ring);
S
Sarah Sharp 已提交
1431

1432 1433 1434 1435 1436 1437 1438
	/* Don't make the USB core poll the roothub if we got a bad port status
	 * change event.  Besides, at that point we can't tell which roothub
	 * (USB 2.0 or USB 3.0) to kick.
	 */
	if (bogus_port_status)
		return;

S
Sarah Sharp 已提交
1439 1440
	spin_unlock(&xhci->lock);
	/* Pass this up to the core */
1441
	usb_hcd_poll_rh_status(hcd);
S
Sarah Sharp 已提交
1442 1443 1444
	spin_lock(&xhci->lock);
}

1445 1446 1447 1448 1449 1450
/*
 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
 * at end_trb, which may be in another segment.  If the suspect DMA address is a
 * TRB in this TD, this function returns that TRB's segment.  Otherwise it
 * returns 0.
 */
1451
struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1452 1453 1454 1455 1456 1457 1458 1459 1460
		union xhci_trb	*start_trb,
		union xhci_trb	*end_trb,
		dma_addr_t	suspect_dma)
{
	dma_addr_t start_dma;
	dma_addr_t end_seg_dma;
	dma_addr_t end_trb_dma;
	struct xhci_segment *cur_seg;

1461
	start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1462 1463 1464
	cur_seg = start_seg;

	do {
1465
		if (start_dma == 0)
1466
			return NULL;
1467
		/* We may get an event for a Link TRB in the middle of a TD */
1468
		end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1469
				&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1470
		/* If the end TRB isn't in this segment, this is set to 0 */
1471
		end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487

		if (end_trb_dma > 0) {
			/* The end TRB is in this segment, so suspect should be here */
			if (start_dma <= end_trb_dma) {
				if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
					return cur_seg;
			} else {
				/* Case for one segment with
				 * a TD wrapped around to the top
				 */
				if ((suspect_dma >= start_dma &&
							suspect_dma <= end_seg_dma) ||
						(suspect_dma >= cur_seg->dma &&
						 suspect_dma <= end_trb_dma))
					return cur_seg;
			}
1488
			return NULL;
1489 1490 1491 1492 1493 1494
		} else {
			/* Might still be somewhere in this segment */
			if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
				return cur_seg;
		}
		cur_seg = cur_seg->next;
1495
		start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1496
	} while (cur_seg != start_seg);
1497

1498
	return NULL;
1499 1500
}

1501 1502
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
1503
		unsigned int stream_id,
1504 1505 1506 1507 1508 1509
		struct xhci_td *td, union xhci_trb *event_trb)
{
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
	ep->ep_state |= EP_HALTED;
	ep->stopped_td = td;
	ep->stopped_trb = event_trb;
1510
	ep->stopped_stream = stream_id;
1511

1512 1513
	xhci_queue_reset_ep(xhci, slot_id, ep_index);
	xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1514 1515 1516

	ep->stopped_td = NULL;
	ep->stopped_trb = NULL;
1517
	ep->stopped_stream = 0;
1518

1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
	xhci_ring_cmd_db(xhci);
}

/* Check if an error has halted the endpoint ring.  The class driver will
 * cleanup the halt for a non-default control endpoint if we indicate a stall.
 * However, a babble and other errors also halt the endpoint ring, and the class
 * driver won't clear the halt in that case, so we need to issue a Set Transfer
 * Ring Dequeue Pointer command manually.
 */
static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
		struct xhci_ep_ctx *ep_ctx,
		unsigned int trb_comp_code)
{
	/* TRB completion codes that may require a manual halt cleanup */
	if (trb_comp_code == COMP_TX_ERR ||
			trb_comp_code == COMP_BABBLE ||
			trb_comp_code == COMP_SPLIT_ERR)
		/* The 0.96 spec says a babbling control endpoint
		 * is not halted. The 0.96 spec says it is.  Some HW
		 * claims to be 0.95 compliant, but it halts the control
		 * endpoint anyway.  Check if a babble halted the
		 * endpoint.
		 */
1542 1543
		if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
		    cpu_to_le32(EP_STATE_HALTED))
1544 1545 1546 1547 1548
			return 1;

	return 0;
}

1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
{
	if (trb_comp_code >= 224 && trb_comp_code <= 255) {
		/* Vendor defined "informational" completion code,
		 * treat as not-an-error.
		 */
		xhci_dbg(xhci, "Vendor defined info completion code %u\n",
				trb_comp_code);
		xhci_dbg(xhci, "Treating code as success.\n");
		return 1;
	}
	return 0;
}

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
/*
 * Finish the td processing, remove the td from td list;
 * Return 1 if the urb can be given back.
 */
static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status, bool skip)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	unsigned int slot_id;
	int ep_index;
	struct urb *urb = NULL;
	struct xhci_ep_ctx *ep_ctx;
	int ret = 0;
1578
	struct urb_priv	*urb_priv;
1579 1580
	u32 trb_comp_code;

M
Matt Evans 已提交
1581
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1582
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
1583 1584
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1585
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
1586
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624

	if (skip)
		goto td_cleanup;

	if (trb_comp_code == COMP_STOP_INVAL ||
			trb_comp_code == COMP_STOP) {
		/* The Endpoint Stop Command completion will take care of any
		 * stopped TDs.  A stopped TD may be restarted, so don't update
		 * the ring dequeue pointer or take this TD off any lists yet.
		 */
		ep->stopped_td = td;
		ep->stopped_trb = event_trb;
		return 0;
	} else {
		if (trb_comp_code == COMP_STALL) {
			/* The transfer is completed from the driver's
			 * perspective, but we need to issue a set dequeue
			 * command for this stalled endpoint to move the dequeue
			 * pointer past the TD.  We can't do that here because
			 * the halt condition must be cleared first.  Let the
			 * USB class driver clear the stall later.
			 */
			ep->stopped_td = td;
			ep->stopped_trb = event_trb;
			ep->stopped_stream = ep_ring->stream_id;
		} else if (xhci_requires_manual_halt_cleanup(xhci,
					ep_ctx, trb_comp_code)) {
			/* Other types of errors halt the endpoint, but the
			 * class driver doesn't call usb_reset_endpoint() unless
			 * the error is -EPIPE.  Clear the halted status in the
			 * xHCI hardware manually.
			 */
			xhci_cleanup_halted_endpoint(xhci,
					slot_id, ep_index, ep_ring->stream_id,
					td, event_trb);
		} else {
			/* Update ring dequeue pointer */
			while (ep_ring->dequeue != td->last_trb)
A
Andiry Xu 已提交
1625 1626
				inc_deq(xhci, ep_ring);
			inc_deq(xhci, ep_ring);
1627 1628 1629 1630 1631
		}

td_cleanup:
		/* Clean up the endpoint's TD list */
		urb = td->urb;
1632
		urb_priv = urb->hcpriv;
1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651

		/* Do one last check of the actual transfer length.
		 * If the host controller said we transferred more data than
		 * the buffer length, urb->actual_length will be a very big
		 * number (since it's unsigned).  Play it safe and say we didn't
		 * transfer anything.
		 */
		if (urb->actual_length > urb->transfer_buffer_length) {
			xhci_warn(xhci, "URB transfer length is wrong, "
					"xHC issue? req. len = %u, "
					"act. len = %u\n",
					urb->transfer_buffer_length,
					urb->actual_length);
			urb->actual_length = 0;
			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
				*status = -EREMOTEIO;
			else
				*status = 0;
		}
1652
		list_del_init(&td->td_list);
1653 1654
		/* Was this TD slated to be cancelled but completed anyway? */
		if (!list_empty(&td->cancelled_td_list))
1655
			list_del_init(&td->cancelled_td_list);
1656

1657 1658
		urb_priv->td_cnt++;
		/* Giveback the urb when all the tds are completed */
A
Andiry Xu 已提交
1659
		if (urb_priv->td_cnt == urb_priv->length) {
1660
			ret = 1;
A
Andiry Xu 已提交
1661 1662 1663 1664 1665 1666 1667 1668 1669
			if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
				xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
				if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
					== 0) {
					if (xhci->quirks & XHCI_AMD_PLL_FIX)
						usb_amd_quirk_pll_enable();
				}
			}
		}
1670 1671 1672 1673 1674
	}

	return ret;
}

1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
/*
 * Process control tds, update urb status and actual_length.
 */
static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	unsigned int slot_id;
	int ep_index;
	struct xhci_ep_ctx *ep_ctx;
	u32 trb_comp_code;

M
Matt Evans 已提交
1689
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1690
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
1691 1692
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1693
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
1694
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715

	switch (trb_comp_code) {
	case COMP_SUCCESS:
		if (event_trb == ep_ring->dequeue) {
			xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
					"without IOC set??\n");
			*status = -ESHUTDOWN;
		} else if (event_trb != td->last_trb) {
			xhci_warn(xhci, "WARN: Success on ctrl data TRB "
					"without IOC set??\n");
			*status = -ESHUTDOWN;
		} else {
			*status = 0;
		}
		break;
	case COMP_SHORT_TX:
		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
			*status = -EREMOTEIO;
		else
			*status = 0;
		break;
1716 1717 1718
	case COMP_STOP_INVAL:
	case COMP_STOP:
		return finish_td(xhci, td, event_trb, event, ep, status, false);
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
	default:
		if (!xhci_requires_manual_halt_cleanup(xhci,
					ep_ctx, trb_comp_code))
			break;
		xhci_dbg(xhci, "TRB error code %u, "
				"halted endpoint index = %u\n",
				trb_comp_code, ep_index);
		/* else fall through */
	case COMP_STALL:
		/* Did we transfer part of the data (middle) phase? */
		if (event_trb != ep_ring->dequeue &&
				event_trb != td->last_trb)
			td->urb->actual_length =
				td->urb->transfer_buffer_length
M
Matt Evans 已提交
1733
				- TRB_LEN(le32_to_cpu(event->transfer_len));
1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
		else
			td->urb->actual_length = 0;

		xhci_cleanup_halted_endpoint(xhci,
			slot_id, ep_index, 0, td, event_trb);
		return finish_td(xhci, td, event_trb, event, ep, status, true);
	}
	/*
	 * Did we transfer any data, despite the errors that might have
	 * happened?  I.e. did we get past the setup stage?
	 */
	if (event_trb != ep_ring->dequeue) {
		/* The event was for the status stage */
		if (event_trb == td->last_trb) {
			if (td->urb->actual_length != 0) {
				/* Don't overwrite a previously set error code
				 */
				if ((*status == -EINPROGRESS || *status == 0) &&
						(td->urb->transfer_flags
						 & URB_SHORT_NOT_OK))
					/* Did we already see a short data
					 * stage? */
					*status = -EREMOTEIO;
			} else {
				td->urb->actual_length =
					td->urb->transfer_buffer_length;
			}
		} else {
		/* Maybe the event was for the data stage? */
1763 1764 1765 1766 1767 1768
			td->urb->actual_length =
				td->urb->transfer_buffer_length -
				TRB_LEN(le32_to_cpu(event->transfer_len));
			xhci_dbg(xhci, "Waiting for status "
					"stage event\n");
			return 0;
1769 1770 1771 1772 1773 1774
		}
	}

	return finish_td(xhci, td, event_trb, event, ep, status, false);
}

1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787
/*
 * Process isochronous tds, update urb packet status and actual_length.
 */
static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	int idx;
	int len = 0;
	union xhci_trb *cur_trb;
	struct xhci_segment *cur_seg;
1788
	struct usb_iso_packet_descriptor *frame;
1789
	u32 trb_comp_code;
1790
	bool skip_td = false;
1791

M
Matt Evans 已提交
1792 1793
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1794 1795
	urb_priv = td->urb->hcpriv;
	idx = urb_priv->td_cnt;
1796
	frame = &td->urb->iso_frame_desc[idx];
1797

1798 1799 1800
	/* handle completion code */
	switch (trb_comp_code) {
	case COMP_SUCCESS:
1801 1802 1803 1804 1805 1806
		if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
			frame->status = 0;
			break;
		}
		if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
			trb_comp_code = COMP_SHORT_TX;
1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
	case COMP_SHORT_TX:
		frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
				-EREMOTEIO : 0;
		break;
	case COMP_BW_OVER:
		frame->status = -ECOMM;
		skip_td = true;
		break;
	case COMP_BUFF_OVER:
	case COMP_BABBLE:
		frame->status = -EOVERFLOW;
		skip_td = true;
		break;
A
Alex He 已提交
1820
	case COMP_DEV_ERR:
1821
	case COMP_STALL:
1822
	case COMP_TX_ERR:
1823 1824 1825 1826 1827 1828 1829 1830 1831
		frame->status = -EPROTO;
		skip_td = true;
		break;
	case COMP_STOP:
	case COMP_STOP_INVAL:
		break;
	default:
		frame->status = -1;
		break;
1832 1833
	}

1834 1835 1836
	if (trb_comp_code == COMP_SUCCESS || skip_td) {
		frame->actual_length = frame->length;
		td->urb->actual_length += frame->length;
1837 1838 1839 1840
	} else {
		for (cur_trb = ep_ring->dequeue,
		     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
		     next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1841 1842
			if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
			    !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
M
Matt Evans 已提交
1843
				len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1844
		}
M
Matt Evans 已提交
1845 1846
		len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
			TRB_LEN(le32_to_cpu(event->transfer_len));
1847 1848

		if (trb_comp_code != COMP_STOP_INVAL) {
1849
			frame->actual_length = len;
1850 1851 1852 1853 1854 1855 1856
			td->urb->actual_length += len;
		}
	}

	return finish_td(xhci, td, event_trb, event, ep, status, false);
}

1857 1858 1859 1860 1861 1862 1863 1864 1865
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
			struct xhci_transfer_event *event,
			struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct usb_iso_packet_descriptor *frame;
	int idx;

1866
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1867 1868 1869 1870
	urb_priv = td->urb->hcpriv;
	idx = urb_priv->td_cnt;
	frame = &td->urb->iso_frame_desc[idx];

1871
	/* The transfer is partly done. */
1872 1873 1874 1875 1876 1877 1878
	frame->status = -EXDEV;

	/* calc actual length */
	frame->actual_length = 0;

	/* Update ring dequeue pointer */
	while (ep_ring->dequeue != td->last_trb)
A
Andiry Xu 已提交
1879 1880
		inc_deq(xhci, ep_ring);
	inc_deq(xhci, ep_ring);
1881 1882 1883 1884

	return finish_td(xhci, td, NULL, event, ep, status, true);
}

1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896
/*
 * Process bulk and interrupt tds, update urb status and actual_length.
 */
static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	union xhci_trb *cur_trb;
	struct xhci_segment *cur_seg;
	u32 trb_comp_code;

M
Matt Evans 已提交
1897 1898
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1899 1900 1901 1902

	switch (trb_comp_code) {
	case COMP_SUCCESS:
		/* Double check that the HW transferred everything. */
1903 1904
		if (event_trb != td->last_trb ||
				TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
1905 1906 1907 1908 1909 1910
			xhci_warn(xhci, "WARN Successful completion "
					"on short TX\n");
			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
				*status = -EREMOTEIO;
			else
				*status = 0;
1911 1912
			if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
				trb_comp_code = COMP_SHORT_TX;
1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926
		} else {
			*status = 0;
		}
		break;
	case COMP_SHORT_TX:
		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
			*status = -EREMOTEIO;
		else
			*status = 0;
		break;
	default:
		/* Others already handled above */
		break;
	}
1927 1928 1929 1930 1931 1932
	if (trb_comp_code == COMP_SHORT_TX)
		xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
				"%d bytes untransferred\n",
				td->urb->ep->desc.bEndpointAddress,
				td->urb->transfer_buffer_length,
				TRB_LEN(le32_to_cpu(event->transfer_len)));
1933 1934
	/* Fast path - was this the last TRB in the TD for this URB? */
	if (event_trb == td->last_trb) {
M
Matt Evans 已提交
1935
		if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
1936 1937
			td->urb->actual_length =
				td->urb->transfer_buffer_length -
M
Matt Evans 已提交
1938
				TRB_LEN(le32_to_cpu(event->transfer_len));
1939 1940 1941 1942
			if (td->urb->transfer_buffer_length <
					td->urb->actual_length) {
				xhci_warn(xhci, "HC gave bad length "
						"of %d bytes left\n",
M
Matt Evans 已提交
1943
					  TRB_LEN(le32_to_cpu(event->transfer_len)));
1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973
				td->urb->actual_length = 0;
				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
					*status = -EREMOTEIO;
				else
					*status = 0;
			}
			/* Don't overwrite a previously set error code */
			if (*status == -EINPROGRESS) {
				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
					*status = -EREMOTEIO;
				else
					*status = 0;
			}
		} else {
			td->urb->actual_length =
				td->urb->transfer_buffer_length;
			/* Ignore a short packet completion if the
			 * untransferred length was zero.
			 */
			if (*status == -EREMOTEIO)
				*status = 0;
		}
	} else {
		/* Slow path - walk the list, starting from the dequeue
		 * pointer, to get the actual length transferred.
		 */
		td->urb->actual_length = 0;
		for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
				cur_trb != event_trb;
				next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1974 1975
			if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
			    !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
1976
				td->urb->actual_length +=
M
Matt Evans 已提交
1977
					TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1978 1979 1980 1981 1982 1983
		}
		/* If the ring didn't stop on a Link or No-op TRB, add
		 * in the actual bytes transferred from the Normal TRB
		 */
		if (trb_comp_code != COMP_STOP_INVAL)
			td->urb->actual_length +=
M
Matt Evans 已提交
1984 1985
				TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
				TRB_LEN(le32_to_cpu(event->transfer_len));
1986 1987 1988 1989 1990
	}

	return finish_td(xhci, td, event_trb, event, ep, status, false);
}

1991 1992 1993 1994 1995 1996 1997 1998 1999
/*
 * If this function returns an error condition, it means it got a Transfer
 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
 * At this point, the host controller is probably hosed and should be reset.
 */
static int handle_tx_event(struct xhci_hcd *xhci,
		struct xhci_transfer_event *event)
{
	struct xhci_virt_device *xdev;
2000
	struct xhci_virt_ep *ep;
2001
	struct xhci_ring *ep_ring;
2002
	unsigned int slot_id;
2003
	int ep_index;
2004
	struct xhci_td *td = NULL;
2005 2006 2007
	dma_addr_t event_dma;
	struct xhci_segment *event_seg;
	union xhci_trb *event_trb;
2008
	struct urb *urb = NULL;
2009
	int status = -EINPROGRESS;
2010
	struct urb_priv *urb_priv;
2011
	struct xhci_ep_ctx *ep_ctx;
2012
	struct list_head *tmp;
2013
	u32 trb_comp_code;
2014
	int ret = 0;
2015
	int td_num = 0;
2016

M
Matt Evans 已提交
2017
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2018
	xdev = xhci->devs[slot_id];
2019 2020
	if (!xdev) {
		xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
2021
		xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2022 2023
			 (unsigned long long) xhci_trb_virt_to_dma(
				 xhci->event_ring->deq_seg,
2024 2025 2026 2027 2028 2029 2030
				 xhci->event_ring->dequeue),
			 lower_32_bits(le64_to_cpu(event->buffer)),
			 upper_32_bits(le64_to_cpu(event->buffer)),
			 le32_to_cpu(event->transfer_len),
			 le32_to_cpu(event->flags));
		xhci_dbg(xhci, "Event ring:\n");
		xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2031 2032 2033 2034
		return -ENODEV;
	}

	/* Endpoint ID is 1 based, our index is zero based */
M
Matt Evans 已提交
2035
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2036
	ep = &xdev->eps[ep_index];
M
Matt Evans 已提交
2037
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2038
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2039
	if (!ep_ring ||
M
Matt Evans 已提交
2040 2041
	    (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
	    EP_STATE_DISABLED) {
2042 2043
		xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
				"or incorrect stream ring\n");
2044
		xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2045 2046
			 (unsigned long long) xhci_trb_virt_to_dma(
				 xhci->event_ring->deq_seg,
2047 2048 2049 2050 2051 2052 2053
				 xhci->event_ring->dequeue),
			 lower_32_bits(le64_to_cpu(event->buffer)),
			 upper_32_bits(le64_to_cpu(event->buffer)),
			 le32_to_cpu(event->transfer_len),
			 le32_to_cpu(event->flags));
		xhci_dbg(xhci, "Event ring:\n");
		xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2054 2055 2056
		return -ENODEV;
	}

2057 2058 2059 2060 2061 2062
	/* Count current td numbers if ep->skip is set */
	if (ep->skip) {
		list_for_each(tmp, &ep_ring->td_list)
			td_num++;
	}

M
Matt Evans 已提交
2063 2064
	event_dma = le64_to_cpu(event->buffer);
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2065
	/* Look for common error cases */
2066
	switch (trb_comp_code) {
S
Sarah Sharp 已提交
2067 2068 2069 2070
	/* Skip codes that require special handling depending on
	 * transfer type
	 */
	case COMP_SUCCESS:
2071 2072 2073 2074 2075 2076 2077
		if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
			break;
		if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
			trb_comp_code = COMP_SHORT_TX;
		else
			xhci_warn(xhci, "WARN Successful completion on short TX: "
					"needs XHCI_TRUST_TX_LENGTH quirk?\n");
S
Sarah Sharp 已提交
2078 2079
	case COMP_SHORT_TX:
		break;
2080 2081 2082 2083 2084 2085
	case COMP_STOP:
		xhci_dbg(xhci, "Stopped on Transfer TRB\n");
		break;
	case COMP_STOP_INVAL:
		xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
		break;
S
Sarah Sharp 已提交
2086
	case COMP_STALL:
2087
		xhci_dbg(xhci, "Stalled endpoint\n");
2088
		ep->ep_state |= EP_HALTED;
S
Sarah Sharp 已提交
2089 2090 2091 2092 2093 2094
		status = -EPIPE;
		break;
	case COMP_TRB_ERR:
		xhci_warn(xhci, "WARN: TRB error on endpoint\n");
		status = -EILSEQ;
		break;
2095
	case COMP_SPLIT_ERR:
S
Sarah Sharp 已提交
2096
	case COMP_TX_ERR:
2097
		xhci_dbg(xhci, "Transfer error on endpoint\n");
S
Sarah Sharp 已提交
2098 2099
		status = -EPROTO;
		break;
2100
	case COMP_BABBLE:
2101
		xhci_dbg(xhci, "Babble error on endpoint\n");
2102 2103
		status = -EOVERFLOW;
		break;
S
Sarah Sharp 已提交
2104 2105 2106 2107
	case COMP_DB_ERR:
		xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
		status = -ENOSR;
		break;
2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
	case COMP_BW_OVER:
		xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
		break;
	case COMP_BUFF_OVER:
		xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
		break;
	case COMP_UNDERRUN:
		/*
		 * When the Isoch ring is empty, the xHC will generate
		 * a Ring Overrun Event for IN Isoch endpoint or Ring
		 * Underrun Event for OUT Isoch endpoint.
		 */
		xhci_dbg(xhci, "underrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2124 2125
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2126 2127 2128 2129 2130 2131
		goto cleanup;
	case COMP_OVERRUN:
		xhci_dbg(xhci, "overrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2132 2133
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2134
		goto cleanup;
A
Alex He 已提交
2135 2136 2137 2138
	case COMP_DEV_ERR:
		xhci_warn(xhci, "WARN: detect an incompatible device");
		status = -EPROTO;
		break;
2139 2140 2141 2142 2143 2144 2145 2146 2147 2148
	case COMP_MISSED_INT:
		/*
		 * When encounter missed service error, one or more isoc tds
		 * may be missed by xHC.
		 * Set skip flag of the ep_ring; Complete the missed tds as
		 * short transfer when process the ep_ring next time.
		 */
		ep->skip = true;
		xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
		goto cleanup;
S
Sarah Sharp 已提交
2149
	default:
2150
		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2151 2152 2153
			status = 0;
			break;
		}
2154 2155 2156 2157 2158
		xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
				"busted\n");
		goto cleanup;
	}

2159 2160 2161 2162 2163 2164 2165
	do {
		/* This TRB should be in the TD at the head of this ring's
		 * TD list.
		 */
		if (list_empty(&ep_ring->td_list)) {
			xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
					"with no TDs queued?\n",
M
Matt Evans 已提交
2166 2167
				  TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				  ep_index);
2168
			xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2169 2170
				 (le32_to_cpu(event->flags) &
				  TRB_TYPE_BITMASK)>>10);
2171 2172 2173 2174 2175 2176 2177 2178 2179
			xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
			if (ep->skip) {
				ep->skip = false;
				xhci_dbg(xhci, "td_list is empty while skip "
						"flag set. Clear skip flag.\n");
			}
			ret = 0;
			goto cleanup;
		}
2180

2181 2182 2183 2184 2185 2186 2187 2188 2189
		/* We've skipped all the TDs on the ep ring when ep->skip set */
		if (ep->skip && td_num == 0) {
			ep->skip = false;
			xhci_dbg(xhci, "All tds on the ep_ring skipped. "
						"Clear skip flag.\n");
			ret = 0;
			goto cleanup;
		}

2190
		td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2191 2192
		if (ep->skip)
			td_num--;
2193

2194 2195 2196
		/* Is this a TRB in the currently executing TD? */
		event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
				td->last_trb, event_dma);
A
Alex He 已提交
2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210

		/*
		 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
		 * is not in the current TD pointed by ep_ring->dequeue because
		 * that the hardware dequeue pointer still at the previous TRB
		 * of the current TD. The previous TRB maybe a Link TD or the
		 * last TRB of the previous TD. The command completion handle
		 * will take care the rest.
		 */
		if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
			ret = 0;
			goto cleanup;
		}

2211 2212 2213
		if (!event_seg) {
			if (!ep->skip ||
			    !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2214 2215 2216 2217 2218 2219 2220 2221 2222 2223
				/* Some host controllers give a spurious
				 * successful event after a short transfer.
				 * Ignore it.
				 */
				if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 
						ep_ring->last_td_was_short) {
					ep_ring->last_td_was_short = false;
					ret = 0;
					goto cleanup;
				}
2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
				/* HC is busted, give up! */
				xhci_err(xhci,
					"ERROR Transfer event TRB DMA ptr not "
					"part of current TD\n");
				return -ESHUTDOWN;
			}

			ret = skip_isoc_td(xhci, td, event, ep, &status);
			goto cleanup;
		}
2234 2235 2236 2237
		if (trb_comp_code == COMP_SHORT_TX)
			ep_ring->last_td_was_short = true;
		else
			ep_ring->last_td_was_short = false;
2238 2239

		if (ep->skip) {
2240 2241 2242
			xhci_dbg(xhci, "Found td. Clear skip flag.\n");
			ep->skip = false;
		}
2243

2244 2245 2246 2247 2248 2249 2250 2251
		event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
						sizeof(*event_trb)];
		/*
		 * No-op TRB should not trigger interrupts.
		 * If event_trb is a no-op TRB, it means the
		 * corresponding TD has been cancelled. Just ignore
		 * the TD.
		 */
2252
		if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2253 2254 2255
			xhci_dbg(xhci,
				 "event_trb is a no-op TRB. Skip it\n");
			goto cleanup;
2256
		}
2257

2258 2259
		/* Now update the urb's actual_length and give back to
		 * the core
2260
		 */
2261 2262 2263
		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
			ret = process_ctrl_td(xhci, td, event_trb, event, ep,
						 &status);
2264 2265 2266
		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
			ret = process_isoc_td(xhci, td, event_trb, event, ep,
						 &status);
2267 2268 2269 2270 2271 2272 2273 2274 2275 2276
		else
			ret = process_bulk_intr_td(xhci, td, event_trb, event,
						 ep, &status);

cleanup:
		/*
		 * Do not update event ring dequeue pointer if ep->skip is set.
		 * Will roll back to continue process missed tds.
		 */
		if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
A
Andiry Xu 已提交
2277
			inc_deq(xhci, xhci->event_ring);
2278 2279 2280 2281
		}

		if (ret) {
			urb = td->urb;
2282
			urb_priv = urb->hcpriv;
2283 2284 2285 2286 2287 2288 2289 2290
			/* Leave the TD around for the reset endpoint function
			 * to use(but only if it's not a control endpoint,
			 * since we already queued the Set TR dequeue pointer
			 * command for stalled control endpoints).
			 */
			if (usb_endpoint_xfer_control(&urb->ep->desc) ||
				(trb_comp_code != COMP_STALL &&
					trb_comp_code != COMP_BABBLE))
2291
				xhci_urb_free_priv(xhci, urb_priv);
2292

2293
			usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2294 2295 2296
			if ((urb->actual_length != urb->transfer_buffer_length &&
						(urb->transfer_flags &
						 URB_SHORT_NOT_OK)) ||
2297 2298
					(status != 0 &&
					 !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2299
				xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2300
						"expected = %d, status = %d\n",
2301 2302 2303
						urb, urb->actual_length,
						urb->transfer_buffer_length,
						status);
2304
			spin_unlock(&xhci->lock);
2305 2306 2307 2308 2309
			/* EHCI, UHCI, and OHCI always unconditionally set the
			 * urb->status of an isochronous endpoint to 0.
			 */
			if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
				status = 0;
2310
			usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321
			spin_lock(&xhci->lock);
		}

	/*
	 * If ep->skip is set, it means there are missed tds on the
	 * endpoint ring need to take care of.
	 * Process them as short transfer until reach the td pointed by
	 * the event.
	 */
	} while (ep->skip && trb_comp_code != COMP_MISSED_INT);

2322 2323 2324
	return 0;
}

S
Sarah Sharp 已提交
2325 2326 2327
/*
 * This function handles all OS-owned events on the event ring.  It may drop
 * xhci->lock between event processing (e.g. to pass up port status changes).
2328 2329
 * Returns >0 for "possibly more events to process" (caller should call again),
 * otherwise 0 if done.  In future, <0 returns should indicate error code.
S
Sarah Sharp 已提交
2330
 */
2331
static int xhci_handle_event(struct xhci_hcd *xhci)
2332 2333
{
	union xhci_trb *event;
S
Sarah Sharp 已提交
2334
	int update_ptrs = 1;
2335
	int ret;
2336 2337 2338

	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
		xhci->error_bitmask |= 1 << 1;
2339
		return 0;
2340 2341 2342 2343
	}

	event = xhci->event_ring->dequeue;
	/* Does the HC or OS own the TRB? */
M
Matt Evans 已提交
2344 2345
	if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
	    xhci->event_ring->cycle_state) {
2346
		xhci->error_bitmask |= 1 << 2;
2347
		return 0;
2348 2349
	}

2350 2351 2352 2353 2354
	/*
	 * Barrier between reading the TRB_CYCLE (valid) flag above and any
	 * speculative reads of the event's flags/data below.
	 */
	rmb();
S
Sarah Sharp 已提交
2355
	/* FIXME: Handle more event types. */
M
Matt Evans 已提交
2356
	switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2357 2358 2359
	case TRB_TYPE(TRB_COMPLETION):
		handle_cmd_completion(xhci, &event->event_cmd);
		break;
S
Sarah Sharp 已提交
2360 2361 2362 2363
	case TRB_TYPE(TRB_PORT_STATUS):
		handle_port_status(xhci, event);
		update_ptrs = 0;
		break;
2364 2365 2366 2367 2368 2369 2370
	case TRB_TYPE(TRB_TRANSFER):
		ret = handle_tx_event(xhci, &event->trans_event);
		if (ret < 0)
			xhci->error_bitmask |= 1 << 9;
		else
			update_ptrs = 0;
		break;
2371 2372 2373
	case TRB_TYPE(TRB_DEV_NOTE):
		handle_device_notification(xhci, event);
		break;
2374
	default:
M
Matt Evans 已提交
2375 2376
		if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
		    TRB_TYPE(48))
2377 2378 2379
			handle_vendor_event(xhci, event);
		else
			xhci->error_bitmask |= 1 << 3;
2380
	}
2381 2382 2383 2384 2385 2386
	/* Any of the above functions may drop and re-acquire the lock, so check
	 * to make sure a watchdog timer didn't mark the host as non-responsive.
	 */
	if (xhci->xhc_state & XHCI_STATE_DYING) {
		xhci_dbg(xhci, "xHCI host dying, returning from "
				"event handler.\n");
2387
		return 0;
2388
	}
2389

2390 2391
	if (update_ptrs)
		/* Update SW event ring dequeue pointer */
A
Andiry Xu 已提交
2392
		inc_deq(xhci, xhci->event_ring);
2393

2394 2395 2396 2397
	/* Are there more items on the event ring?  Caller will call us again to
	 * check.
	 */
	return 1;
2398
}
2399 2400 2401 2402 2403 2404 2405 2406 2407

/*
 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
 * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
 * indicators of an event TRB error, but we check the status *first* to be safe.
 */
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2408
	u32 status;
2409
	union xhci_trb *trb;
2410
	u64 temp_64;
2411 2412
	union xhci_trb *event_ring_deq;
	dma_addr_t deq;
2413 2414 2415 2416

	spin_lock(&xhci->lock);
	trb = xhci->event_ring->dequeue;
	/* Check if the xHC generated the interrupt, or the irq is shared */
2417
	status = xhci_readl(xhci, &xhci->op_regs->status);
2418
	if (status == 0xffffffff)
2419 2420
		goto hw_died;

2421
	if (!(status & STS_EINT)) {
2422 2423 2424
		spin_unlock(&xhci->lock);
		return IRQ_NONE;
	}
2425
	if (status & STS_FATAL) {
2426 2427 2428 2429 2430 2431 2432
		xhci_warn(xhci, "WARNING: Host System Error\n");
		xhci_halt(xhci);
hw_died:
		spin_unlock(&xhci->lock);
		return -ESHUTDOWN;
	}

2433 2434 2435 2436 2437
	/*
	 * Clear the op reg interrupt status first,
	 * so we can receive interrupts from other MSI-X interrupters.
	 * Write 1 to clear the interrupt status.
	 */
2438 2439
	status |= STS_EINT;
	xhci_writel(xhci, status, &xhci->op_regs->status);
2440 2441 2442
	/* FIXME when MSI-X is supported and there are multiple vectors */
	/* Clear the MSI-X event interrupt status */

2443
	if (hcd->irq) {
2444 2445 2446
		u32 irq_pending;
		/* Acknowledge the PCI interrupt */
		irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
2447
		irq_pending |= IMAN_IP;
2448 2449
		xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
	}
2450

2451
	if (xhci->xhc_state & XHCI_STATE_DYING) {
2452 2453
		xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
				"Shouldn't IRQs be disabled?\n");
2454 2455
		/* Clear the event handler busy flag (RW1C);
		 * the event ring should be empty.
2456
		 */
2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
		temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
		xhci_write_64(xhci, temp_64 | ERST_EHB,
				&xhci->ir_set->erst_dequeue);
		spin_unlock(&xhci->lock);

		return IRQ_HANDLED;
	}

	event_ring_deq = xhci->event_ring->dequeue;
	/* FIXME this should be a delayed service routine
	 * that clears the EHB.
	 */
2469
	while (xhci_handle_event(xhci) > 0) {}
2470 2471

	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
	/* If necessary, update the HW's version of the event ring deq ptr. */
	if (event_ring_deq != xhci->event_ring->dequeue) {
		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
				xhci->event_ring->dequeue);
		if (deq == 0)
			xhci_warn(xhci, "WARN something wrong with SW event "
					"ring dequeue ptr.\n");
		/* Update HC event ring dequeue pointer */
		temp_64 &= ERST_PTR_MASK;
		temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
	}

	/* Clear the event handler busy flag (RW1C); event ring is empty. */
	temp_64 |= ERST_EHB;
	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);

2488 2489 2490 2491 2492 2493 2494
	spin_unlock(&xhci->lock);

	return IRQ_HANDLED;
}

irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
{
A
Alan Stern 已提交
2495
	return xhci_irq(hcd);
2496
}
2497

2498 2499
/****		Endpoint Ring Operations	****/

2500 2501 2502
/*
 * Generic function for queueing a TRB on a ring.
 * The caller must have checked to make sure there's room on the ring.
2503 2504 2505
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
2506 2507
 */
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
A
Andiry Xu 已提交
2508
		bool more_trbs_coming,
2509 2510 2511 2512 2513
		u32 field1, u32 field2, u32 field3, u32 field4)
{
	struct xhci_generic_trb *trb;

	trb = &ring->enqueue->generic;
M
Matt Evans 已提交
2514 2515 2516 2517
	trb->field[0] = cpu_to_le32(field1);
	trb->field[1] = cpu_to_le32(field2);
	trb->field[2] = cpu_to_le32(field3);
	trb->field[3] = cpu_to_le32(field4);
A
Andiry Xu 已提交
2518
	inc_enq(xhci, ring, more_trbs_coming);
2519 2520
}

2521 2522 2523 2524 2525
/*
 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
 * FIXME allocate segments if the ring is full.
 */
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
A
Andiry Xu 已提交
2526
		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2527
{
A
Andiry Xu 已提交
2528 2529
	unsigned int num_trbs_needed;

2530 2531 2532 2533 2534 2535 2536 2537 2538 2539
	/* Make sure the endpoint has been added to xHC schedule */
	switch (ep_state) {
	case EP_STATE_DISABLED:
		/*
		 * USB core changed config/interfaces without notifying us,
		 * or hardware is reporting the wrong state.
		 */
		xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
		return -ENOENT;
	case EP_STATE_ERROR:
2540
		xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2541 2542 2543
		/* FIXME event handling code for error needs to clear it */
		/* XXX not sure if this should be -ENOENT or not */
		return -EINVAL;
2544 2545
	case EP_STATE_HALTED:
		xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556
	case EP_STATE_STOPPED:
	case EP_STATE_RUNNING:
		break;
	default:
		xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
		/*
		 * FIXME issue Configure Endpoint command to try to get the HC
		 * back into a known state.
		 */
		return -EINVAL;
	}
A
Andiry Xu 已提交
2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575

	while (1) {
		if (room_on_ring(xhci, ep_ring, num_trbs))
			break;

		if (ep_ring == xhci->cmd_ring) {
			xhci_err(xhci, "Do not support expand command ring\n");
			return -ENOMEM;
		}

		xhci_dbg(xhci, "ERROR no room on ep ring, "
					"try ring expansion\n");
		num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
		if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
					mem_flags)) {
			xhci_err(xhci, "Ring expansion failed\n");
			return -ENOMEM;
		}
	};
2576 2577 2578 2579 2580 2581 2582 2583

	if (enqueue_is_link_trb(ep_ring)) {
		struct xhci_ring *ring = ep_ring;
		union xhci_trb *next;

		next = ring->enqueue;

		while (last_trb(xhci, ring, ring->enq_seg, next)) {
2584 2585
			/* If we're not dealing with 0.95 hardware or isoc rings
			 * on AMD 0.96 host, clear the chain bit.
2586
			 */
A
Andiry Xu 已提交
2587 2588 2589
			if (!xhci_link_trb_quirk(xhci) &&
					!(ring->type == TYPE_ISOC &&
					 (xhci->quirks & XHCI_AMD_0x96_HOST)))
M
Matt Evans 已提交
2590
				next->link.control &= cpu_to_le32(~TRB_CHAIN);
2591
			else
M
Matt Evans 已提交
2592
				next->link.control |= cpu_to_le32(TRB_CHAIN);
2593 2594

			wmb();
2595
			next->link.control ^= cpu_to_le32(TRB_CYCLE);
2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606

			/* Toggle the cycle bit after the last ring segment. */
			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
				ring->cycle_state = (ring->cycle_state ? 0 : 1);
			}
			ring->enq_seg = ring->enq_seg->next;
			ring->enqueue = ring->enq_seg->trbs;
			next = ring->enqueue;
		}
	}

2607 2608 2609
	return 0;
}

2610
static int prepare_transfer(struct xhci_hcd *xhci,
2611 2612
		struct xhci_virt_device *xdev,
		unsigned int ep_index,
2613
		unsigned int stream_id,
2614 2615
		unsigned int num_trbs,
		struct urb *urb,
2616
		unsigned int td_index,
2617 2618 2619
		gfp_t mem_flags)
{
	int ret;
2620 2621
	struct urb_priv *urb_priv;
	struct xhci_td	*td;
2622
	struct xhci_ring *ep_ring;
2623
	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2624 2625 2626 2627 2628 2629 2630 2631 2632

	ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
	if (!ep_ring) {
		xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
				stream_id);
		return -EINVAL;
	}

	ret = prepare_ring(xhci, ep_ring,
M
Matt Evans 已提交
2633
			   le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
A
Andiry Xu 已提交
2634
			   num_trbs, mem_flags);
2635 2636 2637
	if (ret)
		return ret;

2638 2639 2640 2641 2642 2643 2644
	urb_priv = urb->hcpriv;
	td = urb_priv->td[td_index];

	INIT_LIST_HEAD(&td->td_list);
	INIT_LIST_HEAD(&td->cancelled_td_list);

	if (td_index == 0) {
2645
		ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2646
		if (unlikely(ret))
2647
			return ret;
2648 2649
	}

2650
	td->urb = urb;
2651
	/* Add this TD to the tail of the endpoint ring's TD list */
2652 2653 2654 2655 2656
	list_add_tail(&td->td_list, &ep_ring->td_list);
	td->start_seg = ep_ring->enq_seg;
	td->first_trb = ep_ring->enqueue;

	urb_priv->td[td_index] = td;
2657 2658 2659 2660

	return 0;
}

2661
static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2662 2663 2664 2665 2666
{
	int num_sgs, num_trbs, running_total, temp, i;
	struct scatterlist *sg;

	sg = NULL;
2667
	num_sgs = urb->num_mapped_sgs;
2668 2669 2670
	temp = urb->transfer_buffer_length;

	num_trbs = 0;
2671
	for_each_sg(urb->sg, sg, num_sgs, i) {
2672 2673 2674 2675
		unsigned int len = sg_dma_len(sg);

		/* Scatter gather list entries may cross 64KB boundaries */
		running_total = TRB_MAX_BUFF_SIZE -
2676
			(sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
2677
		running_total &= TRB_MAX_BUFF_SIZE - 1;
2678 2679 2680 2681
		if (running_total != 0)
			num_trbs++;

		/* How many more 64KB chunks to transfer, how many more TRBs? */
2682
		while (running_total < sg_dma_len(sg) && running_total < temp) {
2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693
			num_trbs++;
			running_total += TRB_MAX_BUFF_SIZE;
		}
		len = min_t(int, len, temp);
		temp -= len;
		if (temp == 0)
			break;
	}
	return num_trbs;
}

2694
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2695 2696
{
	if (num_trbs != 0)
2697
		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
2698 2699 2700
				"TRBs, %d left\n", __func__,
				urb->ep->desc.bEndpointAddress, num_trbs);
	if (running_total != urb->transfer_buffer_length)
2701
		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2702 2703 2704 2705 2706 2707 2708 2709
				"queued %#x (%d), asked for %#x (%d)\n",
				__func__,
				urb->ep->desc.bEndpointAddress,
				running_total, running_total,
				urb->transfer_buffer_length,
				urb->transfer_buffer_length);
}

2710
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2711
		unsigned int ep_index, unsigned int stream_id, int start_cycle,
2712
		struct xhci_generic_trb *start_trb)
2713 2714 2715 2716 2717 2718
{
	/*
	 * Pass all the TRBs to the hardware at once and make sure this write
	 * isn't reordered.
	 */
	wmb();
2719
	if (start_cycle)
M
Matt Evans 已提交
2720
		start_trb->field[3] |= cpu_to_le32(start_cycle);
2721
	else
M
Matt Evans 已提交
2722
		start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
2723
	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2724 2725
}

2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739
/*
 * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
 * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
 * (comprised of sg list entries) can take several service intervals to
 * transmit.
 */
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
			xhci->devs[slot_id]->out_ctx, ep_index);
	int xhci_interval;
	int ep_interval;

M
Matt Evans 已提交
2740
	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
2741 2742 2743 2744 2745 2746 2747 2748 2749
	ep_interval = urb->interval;
	/* Convert to microframes */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		ep_interval *= 8;
	/* FIXME change this to a warning and a suggestion to use the new API
	 * to set the polling interval (once the API is added).
	 */
	if (xhci_interval != ep_interval) {
A
Andiry Xu 已提交
2750
		if (printk_ratelimit())
2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763
			dev_dbg(&urb->dev->dev, "Driver uses different interval"
					" (%d microframe%s) than xHCI "
					"(%d microframe%s)\n",
					ep_interval,
					ep_interval == 1 ? "" : "s",
					xhci_interval,
					xhci_interval == 1 ? "" : "s");
		urb->interval = xhci_interval;
		/* Convert back to frames for LS/FS devices */
		if (urb->dev->speed == USB_SPEED_LOW ||
				urb->dev->speed == USB_SPEED_FULL)
			urb->interval /= 8;
	}
2764
	return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
2765 2766
}

2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781
/*
 * The TD size is the number of bytes remaining in the TD (including this TRB),
 * right shifted by 10.
 * It must fit in bits 21:17, so it can't be bigger than 31.
 */
static u32 xhci_td_remainder(unsigned int remainder)
{
	u32 max = (1 << (21 - 17 + 1)) - 1;

	if ((remainder >> 10) >= max)
		return max << 17;
	else
		return (remainder >> 10) << 17;
}

2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801
/*
 * For xHCI 1.0 host controllers, TD size is the number of packets remaining in
 * the TD (*not* including this TRB).
 *
 * Total TD packet count = total_packet_count =
 *     roundup(TD size in bytes / wMaxPacketSize)
 *
 * Packets transferred up to and including this TRB = packets_transferred =
 *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
 *
 * TD size = total_packet_count - packets_transferred
 *
 * It must fit in bits 21:17, so it can't be bigger than 31.
 */

static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
		unsigned int total_packet_count, struct urb *urb)
{
	int packets_transferred;

2802 2803 2804 2805
	/* One TRB with a zero-length data packet. */
	if (running_total == 0 && trb_buff_len == 0)
		return 0;

2806 2807 2808 2809
	/* All the TRB queueing functions don't count the current TRB in
	 * running_total.
	 */
	packets_transferred = (running_total + trb_buff_len) /
2810
		usb_endpoint_maxp(&urb->ep->desc);
2811 2812 2813 2814

	return xhci_td_remainder(total_packet_count - packets_transferred);
}

2815
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2816 2817 2818 2819
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	unsigned int num_trbs;
2820
	struct urb_priv *urb_priv;
2821 2822 2823 2824
	struct xhci_td *td;
	struct scatterlist *sg;
	int num_sgs;
	int trb_buff_len, this_sg_len, running_total;
2825
	unsigned int total_packet_count;
2826 2827
	bool first_trb;
	u64 addr;
2828
	bool more_trbs_coming;
2829 2830 2831 2832

	struct xhci_generic_trb *start_trb;
	int start_cycle;

2833 2834 2835 2836
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;

2837
	num_trbs = count_sg_trbs_needed(xhci, urb);
2838
	num_sgs = urb->num_mapped_sgs;
2839
	total_packet_count = roundup(urb->transfer_buffer_length,
2840
			usb_endpoint_maxp(&urb->ep->desc));
2841

2842
	trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
2843
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
2844
			num_trbs, urb, 0, mem_flags);
2845 2846
	if (trb_buff_len < 0)
		return trb_buff_len;
2847 2848 2849 2850

	urb_priv = urb->hcpriv;
	td = urb_priv->td[0];

2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	running_total = 0;
	/*
	 * How much data is in the first TRB?
	 *
	 * There are three forces at work for TRB buffer pointers and lengths:
	 * 1. We don't want to walk off the end of this sg-list entry buffer.
	 * 2. The transfer length that the driver requested may be smaller than
	 *    the amount of memory allocated for this scatter-gather list.
	 * 3. TRBs buffers can't cross 64KB boundaries.
	 */
2869
	sg = urb->sg;
2870 2871
	addr = (u64) sg_dma_address(sg);
	this_sg_len = sg_dma_len(sg);
2872
	trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
2873 2874 2875 2876 2877 2878 2879 2880
	trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
	if (trb_buff_len > urb->transfer_buffer_length)
		trb_buff_len = urb->transfer_buffer_length;

	first_trb = true;
	/* Queue the first TRB, even if it's zero-length */
	do {
		u32 field = 0;
2881
		u32 length_field = 0;
2882
		u32 remainder = 0;
2883 2884

		/* Don't change the cycle bit of the first TRB until later */
2885
		if (first_trb) {
2886
			first_trb = false;
2887 2888 2889
			if (start_cycle == 0)
				field |= 0x1;
		} else
2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901
			field |= ep_ring->cycle_state;

		/* Chain all the TRBs together; clear the chain bit in the last
		 * TRB to indicate it's the last TRB in the chain.
		 */
		if (num_trbs > 1) {
			field |= TRB_CHAIN;
		} else {
			/* FIXME - add check for ZERO_PACKET flag before this */
			td->last_trb = ep_ring->enqueue;
			field |= TRB_IOC;
		}
2902 2903 2904 2905 2906

		/* Only set interrupt on short packet for IN endpoints */
		if (usb_urb_dir_in(urb))
			field |= TRB_ISP;

2907
		if (TRB_MAX_BUFF_SIZE -
2908
				(addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
2909 2910 2911 2912 2913
			xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
			xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
					(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
					(unsigned int) addr + trb_buff_len);
		}
2914 2915 2916 2917 2918 2919 2920 2921 2922 2923

		/* Set the TRB length, TD size, and interrupter fields. */
		if (xhci->hci_version < 0x100) {
			remainder = xhci_td_remainder(
					urb->transfer_buffer_length -
					running_total);
		} else {
			remainder = xhci_v1_0_td_remainder(running_total,
					trb_buff_len, total_packet_count, urb);
		}
2924
		length_field = TRB_LEN(trb_buff_len) |
2925
			remainder |
2926
			TRB_INTR_TARGET(0);
2927

2928 2929 2930 2931
		if (num_trbs > 1)
			more_trbs_coming = true;
		else
			more_trbs_coming = false;
A
Andiry Xu 已提交
2932
		queue_trb(xhci, ep_ring, more_trbs_coming,
2933 2934
				lower_32_bits(addr),
				upper_32_bits(addr),
2935
				length_field,
2936
				field | TRB_TYPE(TRB_NORMAL));
2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955
		--num_trbs;
		running_total += trb_buff_len;

		/* Calculate length for next transfer --
		 * Are we done queueing all the TRBs for this sg entry?
		 */
		this_sg_len -= trb_buff_len;
		if (this_sg_len == 0) {
			--num_sgs;
			if (num_sgs == 0)
				break;
			sg = sg_next(sg);
			addr = (u64) sg_dma_address(sg);
			this_sg_len = sg_dma_len(sg);
		} else {
			addr += trb_buff_len;
		}

		trb_buff_len = TRB_MAX_BUFF_SIZE -
2956
			(addr & (TRB_MAX_BUFF_SIZE - 1));
2957 2958 2959 2960 2961 2962 2963
		trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
		if (running_total + trb_buff_len > urb->transfer_buffer_length)
			trb_buff_len =
				urb->transfer_buffer_length - running_total;
	} while (running_total < urb->transfer_buffer_length);

	check_trb_math(urb, num_trbs, running_total);
2964
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2965
			start_cycle, start_trb);
2966 2967 2968
	return 0;
}

S
Sarah Sharp 已提交
2969
/* This is very similar to what ehci-q.c qtd_fill() does */
2970
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
S
Sarah Sharp 已提交
2971 2972 2973
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
2974
	struct urb_priv *urb_priv;
S
Sarah Sharp 已提交
2975 2976 2977 2978
	struct xhci_td *td;
	int num_trbs;
	struct xhci_generic_trb *start_trb;
	bool first_trb;
2979
	bool more_trbs_coming;
S
Sarah Sharp 已提交
2980
	int start_cycle;
2981
	u32 field, length_field;
S
Sarah Sharp 已提交
2982 2983

	int running_total, trb_buff_len, ret;
2984
	unsigned int total_packet_count;
S
Sarah Sharp 已提交
2985 2986
	u64 addr;

2987
	if (urb->num_sgs)
2988 2989
		return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);

2990 2991 2992
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;
S
Sarah Sharp 已提交
2993 2994 2995 2996

	num_trbs = 0;
	/* How much data is (potentially) left before the 64KB boundary? */
	running_total = TRB_MAX_BUFF_SIZE -
2997
		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
2998
	running_total &= TRB_MAX_BUFF_SIZE - 1;
S
Sarah Sharp 已提交
2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011

	/* If there's some data on this 64KB chunk, or we have to send a
	 * zero-length transfer, we need at least one TRB
	 */
	if (running_total != 0 || urb->transfer_buffer_length == 0)
		num_trbs++;
	/* How many more 64KB chunks to transfer, how many more TRBs? */
	while (running_total < urb->transfer_buffer_length) {
		num_trbs++;
		running_total += TRB_MAX_BUFF_SIZE;
	}
	/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */

3012 3013
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
3014
			num_trbs, urb, 0, mem_flags);
S
Sarah Sharp 已提交
3015 3016 3017
	if (ret < 0)
		return ret;

3018 3019 3020
	urb_priv = urb->hcpriv;
	td = urb_priv->td[0];

S
Sarah Sharp 已提交
3021 3022 3023 3024 3025 3026 3027 3028 3029
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	running_total = 0;
3030
	total_packet_count = roundup(urb->transfer_buffer_length,
3031
			usb_endpoint_maxp(&urb->ep->desc));
S
Sarah Sharp 已提交
3032 3033 3034
	/* How much data is in the first TRB? */
	addr = (u64) urb->transfer_dma;
	trb_buff_len = TRB_MAX_BUFF_SIZE -
3035 3036
		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
	if (trb_buff_len > urb->transfer_buffer_length)
S
Sarah Sharp 已提交
3037 3038 3039 3040 3041 3042
		trb_buff_len = urb->transfer_buffer_length;

	first_trb = true;

	/* Queue the first TRB, even if it's zero-length */
	do {
3043
		u32 remainder = 0;
S
Sarah Sharp 已提交
3044 3045 3046
		field = 0;

		/* Don't change the cycle bit of the first TRB until later */
3047
		if (first_trb) {
S
Sarah Sharp 已提交
3048
			first_trb = false;
3049 3050 3051
			if (start_cycle == 0)
				field |= 0x1;
		} else
S
Sarah Sharp 已提交
3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063
			field |= ep_ring->cycle_state;

		/* Chain all the TRBs together; clear the chain bit in the last
		 * TRB to indicate it's the last TRB in the chain.
		 */
		if (num_trbs > 1) {
			field |= TRB_CHAIN;
		} else {
			/* FIXME - add check for ZERO_PACKET flag before this */
			td->last_trb = ep_ring->enqueue;
			field |= TRB_IOC;
		}
3064 3065 3066 3067 3068

		/* Only set interrupt on short packet for IN endpoints */
		if (usb_urb_dir_in(urb))
			field |= TRB_ISP;

3069 3070 3071 3072 3073 3074 3075 3076 3077
		/* Set the TRB length, TD size, and interrupter fields. */
		if (xhci->hci_version < 0x100) {
			remainder = xhci_td_remainder(
					urb->transfer_buffer_length -
					running_total);
		} else {
			remainder = xhci_v1_0_td_remainder(running_total,
					trb_buff_len, total_packet_count, urb);
		}
3078
		length_field = TRB_LEN(trb_buff_len) |
3079
			remainder |
3080
			TRB_INTR_TARGET(0);
3081

3082 3083 3084 3085
		if (num_trbs > 1)
			more_trbs_coming = true;
		else
			more_trbs_coming = false;
A
Andiry Xu 已提交
3086
		queue_trb(xhci, ep_ring, more_trbs_coming,
3087 3088
				lower_32_bits(addr),
				upper_32_bits(addr),
3089
				length_field,
3090
				field | TRB_TYPE(TRB_NORMAL));
S
Sarah Sharp 已提交
3091 3092 3093 3094 3095 3096 3097 3098 3099 3100
		--num_trbs;
		running_total += trb_buff_len;

		/* Calculate length for next transfer */
		addr += trb_buff_len;
		trb_buff_len = urb->transfer_buffer_length - running_total;
		if (trb_buff_len > TRB_MAX_BUFF_SIZE)
			trb_buff_len = TRB_MAX_BUFF_SIZE;
	} while (running_total < urb->transfer_buffer_length);

3101
	check_trb_math(urb, num_trbs, running_total);
3102
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3103
			start_cycle, start_trb);
S
Sarah Sharp 已提交
3104 3105 3106
	return 0;
}

3107
/* Caller must have locked xhci->lock */
3108
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3109 3110 3111 3112 3113 3114 3115 3116
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	int num_trbs;
	int ret;
	struct usb_ctrlrequest *setup;
	struct xhci_generic_trb *start_trb;
	int start_cycle;
3117
	u32 field, length_field;
3118
	struct urb_priv *urb_priv;
3119 3120
	struct xhci_td *td;

3121 3122 3123
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;
3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140

	/*
	 * Need to copy setup packet into setup TRB, so we can't use the setup
	 * DMA address.
	 */
	if (!urb->setup_packet)
		return -EINVAL;

	/* 1 TRB for setup, 1 for status */
	num_trbs = 2;
	/*
	 * Don't need to check if we need additional event data and normal TRBs,
	 * since data in control transfers will never get bigger than 16MB
	 * XXX: can we get a buffer that crosses 64KB boundaries?
	 */
	if (urb->transfer_buffer_length > 0)
		num_trbs++;
3141 3142
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
3143
			num_trbs, urb, 0, mem_flags);
3144 3145 3146
	if (ret < 0)
		return ret;

3147 3148 3149
	urb_priv = urb->hcpriv;
	td = urb_priv->td[0];

3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	/* Queue setup TRB - see section 6.4.1.2.1 */
	/* FIXME better way to translate setup_packet into two u32 fields? */
	setup = (struct usb_ctrlrequest *) urb->setup_packet;
3161 3162 3163 3164
	field = 0;
	field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
	if (start_cycle == 0)
		field |= 0x1;
3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175

	/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
	if (xhci->hci_version == 0x100) {
		if (urb->transfer_buffer_length > 0) {
			if (setup->bRequestType & USB_DIR_IN)
				field |= TRB_TX_TYPE(TRB_DATA_IN);
			else
				field |= TRB_TX_TYPE(TRB_DATA_OUT);
		}
	}

A
Andiry Xu 已提交
3176
	queue_trb(xhci, ep_ring, true,
M
Matt Evans 已提交
3177 3178 3179 3180 3181
		  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
		  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
		  TRB_LEN(8) | TRB_INTR_TARGET(0),
		  /* Immediate data in pointer */
		  field);
3182 3183

	/* If there's data, queue data TRBs */
3184 3185 3186 3187 3188 3189
	/* Only set interrupt on short packet for IN endpoints */
	if (usb_urb_dir_in(urb))
		field = TRB_ISP | TRB_TYPE(TRB_DATA);
	else
		field = TRB_TYPE(TRB_DATA);

3190
	length_field = TRB_LEN(urb->transfer_buffer_length) |
3191
		xhci_td_remainder(urb->transfer_buffer_length) |
3192
		TRB_INTR_TARGET(0);
3193 3194 3195
	if (urb->transfer_buffer_length > 0) {
		if (setup->bRequestType & USB_DIR_IN)
			field |= TRB_DIR_IN;
A
Andiry Xu 已提交
3196
		queue_trb(xhci, ep_ring, true,
3197 3198
				lower_32_bits(urb->transfer_dma),
				upper_32_bits(urb->transfer_dma),
3199
				length_field,
3200
				field | ep_ring->cycle_state);
3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211
	}

	/* Save the DMA address of the last TRB in the TD */
	td->last_trb = ep_ring->enqueue;

	/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
	/* If the device sent data, the status stage is an OUT transfer */
	if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
		field = 0;
	else
		field = TRB_DIR_IN;
A
Andiry Xu 已提交
3212
	queue_trb(xhci, ep_ring, false,
3213 3214 3215 3216 3217 3218
			0,
			0,
			TRB_INTR_TARGET(0),
			/* Event on completion */
			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);

3219
	giveback_first_trb(xhci, slot_id, ep_index, 0,
3220
			start_cycle, start_trb);
3221 3222 3223
	return 0;
}

3224 3225 3226 3227
static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
		struct urb *urb, int i)
{
	int num_trbs = 0;
3228
	u64 addr, td_len;
3229 3230 3231 3232

	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
	td_len = urb->iso_frame_desc[i].length;

3233 3234 3235
	num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
			TRB_MAX_BUFF_SIZE);
	if (num_trbs == 0)
3236 3237 3238 3239 3240
		num_trbs++;

	return num_trbs;
}

3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261
/*
 * The transfer burst count field of the isochronous TRB defines the number of
 * bursts that are required to move all packets in this TD.  Only SuperSpeed
 * devices can burst up to bMaxBurst number of packets per service interval.
 * This field is zero based, meaning a value of zero in the field means one
 * burst.  Basically, for everything but SuperSpeed devices, this field will be
 * zero.  Only xHCI 1.0 host controllers support this field.
 */
static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
		struct usb_device *udev,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;

	if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
		return 0;

	max_burst = urb->ep->ss_ep_comp.bMaxBurst;
	return roundup(total_packet_count, max_burst + 1) - 1;
}

3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297
/*
 * Returns the number of packets in the last "burst" of packets.  This field is
 * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
 * the last burst packet count is equal to the total number of packets in the
 * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
 * must contain (bMaxBurst + 1) number of packets, but the last burst can
 * contain 1 to (bMaxBurst + 1) packets.
 */
static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
		struct usb_device *udev,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;
	unsigned int residue;

	if (xhci->hci_version < 0x100)
		return 0;

	switch (udev->speed) {
	case USB_SPEED_SUPER:
		/* bMaxBurst is zero based: 0 means 1 packet per burst */
		max_burst = urb->ep->ss_ep_comp.bMaxBurst;
		residue = total_packet_count % (max_burst + 1);
		/* If residue is zero, the last burst contains (max_burst + 1)
		 * number of packets, but the TLBPC field is zero-based.
		 */
		if (residue == 0)
			return max_burst;
		return residue - 1;
	default:
		if (total_packet_count == 0)
			return 0;
		return total_packet_count - 1;
	}
}

3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312
/* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct xhci_td *td;
	int num_tds, trbs_per_td;
	struct xhci_generic_trb *start_trb;
	bool first_trb;
	int start_cycle;
	u32 field, length_field;
	int running_total, trb_buff_len, td_len, td_remain_len, ret;
	u64 start_addr, addr;
	int i, j;
A
Andiry Xu 已提交
3313
	bool more_trbs_coming;
3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326

	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;

	num_tds = urb->number_of_packets;
	if (num_tds < 1) {
		xhci_dbg(xhci, "Isoc URB with zero packets?\n");
		return -EINVAL;
	}

	start_addr = (u64) urb->transfer_dma;
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

3327
	urb_priv = urb->hcpriv;
3328 3329
	/* Queue the first TRB, even if it's zero-length */
	for (i = 0; i < num_tds; i++) {
3330
		unsigned int total_packet_count;
3331
		unsigned int burst_count;
3332
		unsigned int residue;
3333

3334
		first_trb = true;
3335 3336 3337 3338
		running_total = 0;
		addr = start_addr + urb->iso_frame_desc[i].offset;
		td_len = urb->iso_frame_desc[i].length;
		td_remain_len = td_len;
3339
		total_packet_count = roundup(td_len,
3340
				usb_endpoint_maxp(&urb->ep->desc));
3341 3342 3343
		/* A zero-length transfer still involves at least one packet. */
		if (total_packet_count == 0)
			total_packet_count++;
3344 3345
		burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
				total_packet_count);
3346 3347
		residue = xhci_get_last_burst_packet_count(xhci,
				urb->dev, urb, total_packet_count);
3348 3349 3350 3351

		trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);

		ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
A
Andiry Xu 已提交
3352
				urb->stream_id, trbs_per_td, urb, i, mem_flags);
3353 3354 3355 3356 3357
		if (ret < 0) {
			if (i == 0)
				return ret;
			goto cleanup;
		}
3358 3359 3360 3361

		td = urb_priv->td[i];
		for (j = 0; j < trbs_per_td; j++) {
			u32 remainder = 0;
3362
			field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
3363 3364 3365 3366 3367 3368

			if (first_trb) {
				/* Queue the isoc TRB */
				field |= TRB_TYPE(TRB_ISOC);
				/* Assume URB_ISO_ASAP is set */
				field |= TRB_SIA;
3369 3370 3371 3372
				if (i == 0) {
					if (start_cycle == 0)
						field |= 0x1;
				} else
3373 3374 3375 3376 3377 3378 3379 3380
					field |= ep_ring->cycle_state;
				first_trb = false;
			} else {
				/* Queue other normal TRBs */
				field |= TRB_TYPE(TRB_NORMAL);
				field |= ep_ring->cycle_state;
			}

3381 3382 3383 3384
			/* Only set interrupt on short packet for IN EPs */
			if (usb_urb_dir_in(urb))
				field |= TRB_ISP;

3385 3386 3387 3388 3389 3390
			/* Chain all the TRBs together; clear the chain bit in
			 * the last TRB to indicate it's the last TRB in the
			 * chain.
			 */
			if (j < trbs_per_td - 1) {
				field |= TRB_CHAIN;
A
Andiry Xu 已提交
3391
				more_trbs_coming = true;
3392 3393 3394
			} else {
				td->last_trb = ep_ring->enqueue;
				field |= TRB_IOC;
3395 3396 3397 3398 3399
				if (xhci->hci_version == 0x100) {
					/* Set BEI bit except for the last td */
					if (i < num_tds - 1)
						field |= TRB_BEI;
				}
A
Andiry Xu 已提交
3400
				more_trbs_coming = false;
3401 3402 3403 3404 3405 3406 3407 3408
			}

			/* Calculate TRB length */
			trb_buff_len = TRB_MAX_BUFF_SIZE -
				(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
			if (trb_buff_len > td_remain_len)
				trb_buff_len = td_remain_len;

3409 3410 3411 3412 3413 3414 3415 3416 3417
			/* Set the TRB length, TD size, & interrupter fields. */
			if (xhci->hci_version < 0x100) {
				remainder = xhci_td_remainder(
						td_len - running_total);
			} else {
				remainder = xhci_v1_0_td_remainder(
						running_total, trb_buff_len,
						total_packet_count, urb);
			}
3418 3419 3420
			length_field = TRB_LEN(trb_buff_len) |
				remainder |
				TRB_INTR_TARGET(0);
3421

A
Andiry Xu 已提交
3422
			queue_trb(xhci, ep_ring, more_trbs_coming,
3423 3424 3425
				lower_32_bits(addr),
				upper_32_bits(addr),
				length_field,
3426
				field);
3427 3428 3429 3430 3431 3432 3433 3434 3435
			running_total += trb_buff_len;

			addr += trb_buff_len;
			td_remain_len -= trb_buff_len;
		}

		/* Check TD length */
		if (running_total != td_len) {
			xhci_err(xhci, "ISOC TD length unmatch\n");
3436 3437
			ret = -EINVAL;
			goto cleanup;
3438 3439 3440
		}
	}

A
Andiry Xu 已提交
3441 3442 3443 3444 3445 3446
	if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
		if (xhci->quirks & XHCI_AMD_PLL_FIX)
			usb_amd_quirk_pll_disable();
	}
	xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;

3447 3448
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
			start_cycle, start_trb);
3449
	return 0;
3450 3451 3452 3453
cleanup:
	/* Clean up a partially enqueued isoc transfer. */

	for (i--; i >= 0; i--)
3454
		list_del_init(&urb_priv->td[i]->td_list);
3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468

	/* Use the first TD as a temporary variable to turn the TDs we've queued
	 * into No-ops with a software-owned cycle bit. That way the hardware
	 * won't accidentally start executing bogus TDs when we partially
	 * overwrite them.  td->first_trb and td->start_seg are already set.
	 */
	urb_priv->td[0]->last_trb = ep_ring->enqueue;
	/* Every TRB except the first & last will have its cycle bit flipped. */
	td_to_noop(xhci, ep_ring, urb_priv->td[0], true);

	/* Reset the ring enqueue back to the first TRB and its cycle bit. */
	ep_ring->enqueue = urb_priv->td[0]->first_trb;
	ep_ring->enq_seg = urb_priv->td[0]->start_seg;
	ep_ring->cycle_state = start_cycle;
3469
	ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3470 3471
	usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
	return ret;
3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504
}

/*
 * Check transfer ring to guarantee there is enough room for the urb.
 * Update ISO URB start_frame and interval.
 * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
 * update the urb->start_frame by now.
 * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
 */
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	struct xhci_ep_ctx *ep_ctx;
	int start_frame;
	int xhci_interval;
	int ep_interval;
	int num_tds, num_trbs, i;
	int ret;

	xdev = xhci->devs[slot_id];
	ep_ring = xdev->eps[ep_index].ring;
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);

	num_trbs = 0;
	num_tds = urb->number_of_packets;
	for (i = 0; i < num_tds; i++)
		num_trbs += count_isoc_trbs_needed(xhci, urb, i);

	/* Check the ring to guarantee there is enough room for the whole urb.
	 * Do not insert any td of the urb to the ring if the check failed.
	 */
M
Matt Evans 已提交
3505
	ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
A
Andiry Xu 已提交
3506
			   num_trbs, mem_flags);
3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517
	if (ret)
		return ret;

	start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
	start_frame &= 0x3fff;

	urb->start_frame = start_frame;
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		urb->start_frame >>= 3;

M
Matt Evans 已提交
3518
	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3519 3520 3521 3522 3523 3524 3525 3526 3527
	ep_interval = urb->interval;
	/* Convert to microframes */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		ep_interval *= 8;
	/* FIXME change this to a warning and a suggestion to use the new API
	 * to set the polling interval (once the API is added).
	 */
	if (xhci_interval != ep_interval) {
A
Andiry Xu 已提交
3528
		if (printk_ratelimit())
3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541
			dev_dbg(&urb->dev->dev, "Driver uses different interval"
					" (%d microframe%s) than xHCI "
					"(%d microframe%s)\n",
					ep_interval,
					ep_interval == 1 ? "" : "s",
					xhci_interval,
					xhci_interval == 1 ? "" : "s");
		urb->interval = xhci_interval;
		/* Convert back to frames for LS/FS devices */
		if (urb->dev->speed == USB_SPEED_LOW ||
				urb->dev->speed == USB_SPEED_FULL)
			urb->interval /= 8;
	}
3542 3543
	ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;

3544
	return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
3545 3546
}

3547 3548
/****		Command Ring Operations		****/

3549 3550 3551 3552 3553 3554 3555 3556 3557 3558
/* Generic function for queueing a command TRB on the command ring.
 * Check to make sure there's room on the command ring for one command TRB.
 * Also check that there's room reserved for commands that must not fail.
 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
 * then only check for the number of reserved spots.
 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
 * because the command event handler may want to resubmit a failed command.
 */
static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
		u32 field3, u32 field4, bool command_must_succeed)
3559
{
3560
	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3561 3562
	int ret;

3563 3564 3565
	if (!command_must_succeed)
		reserved_trbs++;

3566
	ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
A
Andiry Xu 已提交
3567
			reserved_trbs, GFP_ATOMIC);
3568 3569
	if (ret < 0) {
		xhci_err(xhci, "ERR: No room for command on command ring\n");
3570 3571 3572
		if (command_must_succeed)
			xhci_err(xhci, "ERR: Reserved TRB counting for "
					"unfailable commands failed.\n");
3573
		return ret;
3574
	}
A
Andiry Xu 已提交
3575 3576
	queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
			field4 | xhci->cmd_ring->cycle_state);
3577 3578 3579
	return 0;
}

3580
/* Queue a slot enable or disable request on the command ring */
3581
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
3582 3583
{
	return queue_command(xhci, 0, 0, 0,
3584
			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3585 3586 3587
}

/* Queue an address device command TRB */
3588 3589
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
		u32 slot_id)
3590
{
3591 3592
	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
			upper_32_bits(in_ctx_ptr), 0,
3593
			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
3594 3595 3596
			false);
}

3597 3598 3599 3600 3601 3602
int xhci_queue_vendor_command(struct xhci_hcd *xhci,
		u32 field1, u32 field2, u32 field3, u32 field4)
{
	return queue_command(xhci, field1, field2, field3, field4, false);
}

3603 3604 3605 3606 3607
/* Queue a reset device command TRB */
int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
{
	return queue_command(xhci, 0, 0, 0,
			TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
3608
			false);
3609
}
3610 3611

/* Queue a configure endpoint command TRB */
3612
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3613
		u32 slot_id, bool command_must_succeed)
3614
{
3615 3616
	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
			upper_32_bits(in_ctx_ptr), 0,
3617 3618
			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
			command_must_succeed);
3619
}
3620

3621 3622
/* Queue an evaluate context command TRB */
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3623
		u32 slot_id, bool command_must_succeed)
3624 3625 3626
{
	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
			upper_32_bits(in_ctx_ptr), 0,
3627
			TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
3628
			command_must_succeed);
3629 3630
}

3631 3632 3633 3634
/*
 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
 * activity on an endpoint that is about to be suspended.
 */
3635
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
3636
		unsigned int ep_index, int suspend)
3637 3638 3639 3640
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_STOP_RING);
3641
	u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
3642 3643

	return queue_command(xhci, 0, 0, 0,
3644
			trb_slot_id | trb_ep_index | type | trb_suspend, false);
3645 3646 3647 3648 3649 3650
}

/* Set Transfer Ring Dequeue Pointer command.
 * This should not be used for endpoints that have streams enabled.
 */
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
3651 3652
		unsigned int ep_index, unsigned int stream_id,
		struct xhci_segment *deq_seg,
3653 3654 3655 3656 3657
		union xhci_trb *deq_ptr, u32 cycle_state)
{
	dma_addr_t addr;
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3658
	u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
3659
	u32 type = TRB_TYPE(TRB_SET_DEQ);
3660
	struct xhci_virt_ep *ep;
3661

3662
	addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
3663
	if (addr == 0) {
3664
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3665 3666
		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
				deq_seg, deq_ptr);
3667 3668
		return 0;
	}
3669 3670 3671 3672 3673 3674 3675 3676
	ep = &xhci->devs[slot_id]->eps[ep_index];
	if ((ep->ep_state & SET_DEQ_PENDING)) {
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
		xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
		return 0;
	}
	ep->queued_deq_seg = deq_seg;
	ep->queued_deq_ptr = deq_ptr;
3677
	return queue_command(xhci, lower_32_bits(addr) | cycle_state,
3678
			upper_32_bits(addr), trb_stream_id,
3679
			trb_slot_id | trb_ep_index | type, false);
3680
}
3681 3682 3683 3684 3685 3686 3687 3688

int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
		unsigned int ep_index)
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_RESET_EP);

3689 3690
	return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
			false);
3691
}