xhci-ring.c 126.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * xHCI host controller driver
 *
 * Copyright (C) 2008 Intel Corp.
 *
 * Author: Sarah Sharp
 * Some code borrowed from the Linux EHCI driver.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

/*
 * Ring initialization rules:
 * 1. Each segment is initialized to zero, except for link TRBs.
 * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
 *    Consumer Cycle State (CCS), depending on ring function.
 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
 *
 * Ring behavior rules:
 * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
 *    least one free TRB in the ring.  This is useful if you want to turn that
 *    into a link TRB and expand the ring.
 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
 *    link TRB, then load the pointer with the address in the link TRB.  If the
 *    link TRB had its toggle bit set, you may need to update the ring cycle
 *    state (see cycle bit rules).  You may have to do this multiple times
 *    until you reach a non-link TRB.
 * 3. A ring is full if enqueue++ (for the definition of increment above)
 *    equals the dequeue pointer.
 *
 * Cycle bit rules:
 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 *
 * Producer rules:
 * 1. Check if ring is full before you enqueue.
 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
 *    Update enqueue pointer between each write (which may update the ring
 *    cycle state).
 * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
 *    and endpoint rings.  If HC is the producer for the event ring,
 *    and it generates an interrupt according to interrupt modulation rules.
 *
 * Consumer rules:
 * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
 *    the TRB is owned by the consumer.
 * 2. Update dequeue pointer (which may update the ring cycle state) and
 *    continue processing TRBs until you reach a TRB which is not owned by you.
 * 3. Notify the producer.  SW is the consumer for the event ring, and it
 *   updates event ring dequeue pointer.  HC is the consumer for the command and
 *   endpoint rings; it generates events on the event ring for these.
 */

67
#include <linux/scatterlist.h>
68
#include <linux/slab.h>
69
#include "xhci.h"
70
#include "xhci-trace.h"
71

72 73 74 75
static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
		struct xhci_virt_device *virt_dev,
		struct xhci_event_cmd *event);

76 77 78 79
/*
 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
 * address of the TRB.
 */
80
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
81 82
		union xhci_trb *trb)
{
83
	unsigned long segment_offset;
84

85
	if (!seg || !trb || trb < seg->trbs)
86
		return 0;
87 88 89
	/* offset in TRBs */
	segment_offset = trb - seg->trbs;
	if (segment_offset > TRBS_PER_SEGMENT)
90
		return 0;
91
	return seg->dma + (segment_offset * sizeof(*trb));
92 93 94 95 96
}

/* Does this link TRB point to the first segment in a ring,
 * or was the previous TRB the last TRB on the last segment in the ERST?
 */
97
static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
98 99 100 101 102 103
		struct xhci_segment *seg, union xhci_trb *trb)
{
	if (ring == xhci->event_ring)
		return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
			(seg->next == xhci->event_ring->first_seg);
	else
M
Matt Evans 已提交
104
		return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
105 106 107 108 109 110
}

/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
 * segment?  I.e. would the updated event TRB pointer step off the end of the
 * event seg?
 */
111
static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
112 113 114 115 116
		struct xhci_segment *seg, union xhci_trb *trb)
{
	if (ring == xhci->event_ring)
		return trb == &seg->trbs[TRBS_PER_SEGMENT];
	else
117
		return TRB_TYPE_LINK_LE32(trb->link.control);
118 119
}

120
static int enqueue_is_link_trb(struct xhci_ring *ring)
121 122
{
	struct xhci_link_trb *link = &ring->enqueue->link;
123
	return TRB_TYPE_LINK_LE32(link->control);
124 125
}

126 127 128 129 130 131 132 133 134 135
union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
{
	/* Enqueue pointer can be left pointing to the link TRB,
	 * we must handle that
	 */
	if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
		return ring->enq_seg->next->trbs;
	return ring->enqueue;
}

136 137 138 139 140 141 142 143 144 145 146 147 148
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
 * TRB is in a new segment.  This does not skip over link TRBs, and it does not
 * effect the ring dequeue or enqueue pointers.
 */
static void next_trb(struct xhci_hcd *xhci,
		struct xhci_ring *ring,
		struct xhci_segment **seg,
		union xhci_trb **trb)
{
	if (last_trb(xhci, ring, *seg, *trb)) {
		*seg = (*seg)->next;
		*trb = ((*seg)->trbs);
	} else {
149
		(*trb)++;
150 151 152
	}
}

153 154 155 156
/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 */
A
Andiry Xu 已提交
157
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
158 159
{
	ring->deq_updates++;
160

161 162 163 164
	/*
	 * If this is not event ring, and the dequeue pointer
	 * is not on a link TRB, there is one more usable TRB
	 */
165 166 167 168
	if (ring->type != TYPE_EVENT &&
			!last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
		ring->num_trbs_free++;

169 170 171 172 173 174 175 176 177 178
	do {
		/*
		 * Update the dequeue pointer further if that was a link TRB or
		 * we're at the end of an event ring segment (which doesn't have
		 * link TRBS)
		 */
		if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
			if (ring->type == TYPE_EVENT &&
					last_trb_on_last_seg(xhci, ring,
						ring->deq_seg, ring->dequeue)) {
179
				ring->cycle_state ^= 1;
180 181 182 183 184
			}
			ring->deq_seg = ring->deq_seg->next;
			ring->dequeue = ring->deq_seg->trbs;
		} else {
			ring->dequeue++;
185
		}
186
	} while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
187 188 189 190 191 192 193 194 195 196 197 198
}

/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 *
 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
 * chain bit is set), then set the chain bit in all the following link TRBs.
 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
 * have their chain bit cleared (so that each Link TRB is a separate TD).
 *
 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
199 200 201
 * set, but other sections talk about dealing with the chain bit set.  This was
 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
202 203 204
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
205
 */
206
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
A
Andiry Xu 已提交
207
			bool more_trbs_coming)
208 209 210 211
{
	u32 chain;
	union xhci_trb *next;

M
Matt Evans 已提交
212
	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
213 214 215 216
	/* If this is not event ring, there is one less usable TRB */
	if (ring->type != TYPE_EVENT &&
			!last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
		ring->num_trbs_free--;
217 218 219 220 221 222 223
	next = ++(ring->enqueue);

	ring->enq_updates++;
	/* Update the dequeue pointer further if that was a link TRB or we're at
	 * the end of an event ring segment (which doesn't have link TRBS)
	 */
	while (last_trb(xhci, ring, ring->enq_seg, next)) {
A
Andiry Xu 已提交
224 225 226 227 228 229 230 231 232 233 234
		if (ring->type != TYPE_EVENT) {
			/*
			 * If the caller doesn't plan on enqueueing more
			 * TDs before ringing the doorbell, then we
			 * don't want to give the link TRB to the
			 * hardware just yet.  We'll give the link TRB
			 * back in prepare_ring() just before we enqueue
			 * the TD at the top of the ring.
			 */
			if (!chain && !more_trbs_coming)
				break;
235

A
Andiry Xu 已提交
236 237 238 239 240 241 242
			/* If we're not dealing with 0.95 hardware or
			 * isoc rings on AMD 0.96 host,
			 * carry over the chain bit of the previous TRB
			 * (which may mean the chain bit is cleared).
			 */
			if (!(ring->type == TYPE_ISOC &&
					(xhci->quirks & XHCI_AMD_0x96_HOST))
243
						&& !xhci_link_trb_quirk(xhci)) {
A
Andiry Xu 已提交
244 245 246 247
				next->link.control &=
					cpu_to_le32(~TRB_CHAIN);
				next->link.control |=
					cpu_to_le32(chain);
248
			}
A
Andiry Xu 已提交
249 250 251 252
			/* Give this link TRB to the hardware */
			wmb();
			next->link.control ^= cpu_to_le32(TRB_CYCLE);

253 254 255 256 257 258 259 260 261 262 263 264
			/* Toggle the cycle bit after the last ring segment. */
			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
				ring->cycle_state = (ring->cycle_state ? 0 : 1);
			}
		}
		ring->enq_seg = ring->enq_seg->next;
		ring->enqueue = ring->enq_seg->trbs;
		next = ring->enqueue;
	}
}

/*
265 266
 * Check to see if there's room to enqueue num_trbs on the ring and make sure
 * enqueue pointer will not advance into dequeue segment. See rules above.
267
 */
268
static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
269 270
		unsigned int num_trbs)
{
271
	int num_trbs_in_deq_seg;
272

273 274 275 276 277 278 279 280 281 282
	if (ring->num_trbs_free < num_trbs)
		return 0;

	if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
		num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
		if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
			return 0;
	}

	return 1;
283 284 285
}

/* Ring the host controller doorbell after placing a command on the ring */
286
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
287
{
E
Elric Fu 已提交
288 289 290
	if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
		return;

291
	xhci_dbg(xhci, "// Ding dong!\n");
292
	writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
293
	/* Flush PCI posted writes */
294
	readl(&xhci->dba->doorbell[0]);
295 296
}

297 298 299 300 301 302 303 304 305 306 307 308 309
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
{
	u64 temp_64;
	int ret;

	xhci_dbg(xhci, "Abort command ring\n");

	if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
		xhci_dbg(xhci, "The command ring isn't running, "
				"Have the command ring been stopped?\n");
		return 0;
	}

310
	temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
311 312 313 314 315
	if (!(temp_64 & CMD_RING_RUNNING)) {
		xhci_dbg(xhci, "Command ring had been stopped\n");
		return 0;
	}
	xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
316 317
	xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
			&xhci->op_regs->cmd_ring);
318 319 320 321 322 323 324 325

	/* Section 4.6.1.2 of xHCI 1.0 spec says software should
	 * time the completion od all xHCI commands, including
	 * the Command Abort operation. If software doesn't see
	 * CRR negated in a timely manner (e.g. longer than 5
	 * seconds), then it should assume that the there are
	 * larger problems with the xHC and assert HCRST.
	 */
326
	ret = xhci_handshake(xhci, &xhci->op_regs->cmd_ring,
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
			CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
	if (ret < 0) {
		xhci_err(xhci, "Stopped the command ring failed, "
				"maybe the host is dead\n");
		xhci->xhc_state |= XHCI_STATE_DYING;
		xhci_quiesce(xhci);
		xhci_halt(xhci);
		return -ESHUTDOWN;
	}

	return 0;
}

static int xhci_queue_cd(struct xhci_hcd *xhci,
		struct xhci_command *command,
		union xhci_trb *cmd_trb)
{
	struct xhci_cd *cd;
	cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
	if (!cd)
		return -ENOMEM;
	INIT_LIST_HEAD(&cd->cancel_cmd_list);

	cd->command = command;
	cd->cmd_trb = cmd_trb;
	list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);

	return 0;
}

/*
 * Cancel the command which has issue.
 *
 * Some commands may hang due to waiting for acknowledgement from
 * usb device. It is outside of the xHC's ability to control and
 * will cause the command ring is blocked. When it occurs software
 * should intervene to recover the command ring.
 * See Section 4.6.1.1 and 4.6.1.2
 */
int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
		union xhci_trb *cmd_trb)
{
	int retval = 0;
	unsigned long flags;

	spin_lock_irqsave(&xhci->lock, flags);

	if (xhci->xhc_state & XHCI_STATE_DYING) {
		xhci_warn(xhci, "Abort the command ring,"
				" but the xHCI is dead.\n");
		retval = -ESHUTDOWN;
		goto fail;
	}

	/* queue the cmd desriptor to cancel_cmd_list */
	retval = xhci_queue_cd(xhci, command, cmd_trb);
	if (retval) {
		xhci_warn(xhci, "Queuing command descriptor failed.\n");
		goto fail;
	}

	/* abort command ring */
	retval = xhci_abort_cmd_ring(xhci);
	if (retval) {
		xhci_err(xhci, "Abort command ring failed\n");
		if (unlikely(retval == -ESHUTDOWN)) {
			spin_unlock_irqrestore(&xhci->lock, flags);
			usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
			xhci_dbg(xhci, "xHCI host controller is dead.\n");
			return retval;
		}
	}

fail:
	spin_unlock_irqrestore(&xhci->lock, flags);
	return retval;
}

405
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
406
		unsigned int slot_id,
407 408
		unsigned int ep_index,
		unsigned int stream_id)
409
{
M
Matt Evans 已提交
410
	__le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
411 412
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
	unsigned int ep_state = ep->ep_state;
413 414

	/* Don't ring the doorbell for this endpoint if there are pending
415
	 * cancellations because we don't want to interrupt processing.
416 417 418 419
	 * We don't want to restart any stream rings if there's a set dequeue
	 * pointer command pending because the device can choose to start any
	 * stream once the endpoint is on the HW schedule.
	 * FIXME - check all the stream rings for pending cancellations.
420
	 */
421 422 423
	if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
	    (ep_state & EP_HALTED))
		return;
424
	writel(DB_VALUE(ep_index, stream_id), db_addr);
425 426 427
	/* The CPU has better things to do at this point than wait for a
	 * write-posting flush.  It'll get there soon enough.
	 */
428 429
}

430 431 432 433 434 435 436 437 438 439 440 441
/* Ring the doorbell for any rings with pending URBs */
static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
		unsigned int slot_id,
		unsigned int ep_index)
{
	unsigned int stream_id;
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];

	/* A ring has pending URBs if its TD list is not empty */
	if (!(ep->ep_state & EP_HAS_STREAMS)) {
442
		if (ep->ring && !(list_empty(&ep->ring->td_list)))
443
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
444 445 446 447 448 449 450
		return;
	}

	for (stream_id = 1; stream_id < ep->stream_info->num_streams;
			stream_id++) {
		struct xhci_stream_info *stream_info = ep->stream_info;
		if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
451 452
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
						stream_id);
453 454 455
	}
}

456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
/*
 * Find the segment that trb is in.  Start searching in start_seg.
 * If we must move past a segment that has a link TRB with a toggle cycle state
 * bit set, then we will toggle the value pointed at by cycle_state.
 */
static struct xhci_segment *find_trb_seg(
		struct xhci_segment *start_seg,
		union xhci_trb	*trb, int *cycle_state)
{
	struct xhci_segment *cur_seg = start_seg;
	struct xhci_generic_trb *generic_trb;

	while (cur_seg->trbs > trb ||
			&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
		generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
471
		if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
472
			*cycle_state ^= 0x1;
473 474 475
		cur_seg = cur_seg->next;
		if (cur_seg == start_seg)
			/* Looped over the entire list.  Oops! */
476
			return NULL;
477 478 479 480
	}
	return cur_seg;
}

481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524

static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
		unsigned int stream_id)
{
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];
	/* Common case: no streams */
	if (!(ep->ep_state & EP_HAS_STREAMS))
		return ep->ring;

	if (stream_id == 0) {
		xhci_warn(xhci,
				"WARN: Slot ID %u, ep index %u has streams, "
				"but URB has no stream ID.\n",
				slot_id, ep_index);
		return NULL;
	}

	if (stream_id < ep->stream_info->num_streams)
		return ep->stream_info->stream_rings[stream_id];

	xhci_warn(xhci,
			"WARN: Slot ID %u, ep index %u has "
			"stream IDs 1 to %u allocated, "
			"but stream ID %u is requested.\n",
			slot_id, ep_index,
			ep->stream_info->num_streams - 1,
			stream_id);
	return NULL;
}

/* Get the right ring for the given URB.
 * If the endpoint supports streams, boundary check the URB's stream ID.
 * If the endpoint doesn't support streams, return the singular endpoint ring.
 */
static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
		struct urb *urb)
{
	return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
		xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
}

525 526 527 528 529 530 531 532 533 534 535 536 537
/*
 * Move the xHC's endpoint ring dequeue pointer past cur_td.
 * Record the new state of the xHC's endpoint ring dequeue segment,
 * dequeue pointer, and new consumer cycle state in state.
 * Update our internal representation of the ring's dequeue pointer.
 *
 * We do this in three jumps:
 *  - First we update our new ring state to be the same as when the xHC stopped.
 *  - Then we traverse the ring to find the segment that contains
 *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
 *    any link TRBs with the toggle cycle bit set.
 *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
 *    if we've moved it past a link TRB with the toggle cycle bit set.
M
Matt Evans 已提交
538 539 540 541
 *
 * Some of the uses of xhci_generic_trb are grotty, but if they're done
 * with correct __le32 accesses they should work fine.  Only users of this are
 * in here.
542
 */
543
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
544
		unsigned int slot_id, unsigned int ep_index,
545 546
		unsigned int stream_id, struct xhci_td *cur_td,
		struct xhci_dequeue_state *state)
547 548
{
	struct xhci_virt_device *dev = xhci->devs[slot_id];
549
	struct xhci_virt_ep *ep = &dev->eps[ep_index];
550
	struct xhci_ring *ep_ring;
551
	struct xhci_generic_trb *trb;
552
	dma_addr_t addr;
553

554 555 556 557 558 559 560 561
	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
			ep_index, stream_id);
	if (!ep_ring) {
		xhci_warn(xhci, "WARN can't find new dequeue state "
				"for invalid stream ID %u.\n",
				stream_id);
		return;
	}
562
	state->new_cycle_state = 0;
563 564
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Finding segment containing stopped TRB.");
565
	state->new_deq_seg = find_trb_seg(cur_td->start_seg,
566
			dev->eps[ep_index].stopped_trb,
567
			&state->new_cycle_state);
568 569 570 571 572
	if (!state->new_deq_seg) {
		WARN_ON(1);
		return;
	}

573
	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
574 575
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Finding endpoint context");
576 577 578 579 580 581 582 583 584 585
	/* 4.6.9 the css flag is written to the stream context for streams */
	if (ep->ep_state & EP_HAS_STREAMS) {
		struct xhci_stream_ctx *ctx =
			&ep->stream_info->stream_ctx_array[stream_id];
		state->new_cycle_state = 0x1 & le64_to_cpu(ctx->stream_ring);
	} else {
		struct xhci_ep_ctx *ep_ctx
			= xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
		state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
	}
586 587

	state->new_deq_ptr = cur_td->last_trb;
588 589
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Finding segment containing last TRB in TD.");
590 591 592
	state->new_deq_seg = find_trb_seg(state->new_deq_seg,
			state->new_deq_ptr,
			&state->new_cycle_state);
593 594 595 596
	if (!state->new_deq_seg) {
		WARN_ON(1);
		return;
	}
597 598

	trb = &state->new_deq_ptr->generic;
599 600
	if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
	    (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
601
		state->new_cycle_state ^= 0x1;
602 603
	next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);

604 605 606 607 608 609 610 611 612 613 614 615
	/*
	 * If there is only one segment in a ring, find_trb_seg()'s while loop
	 * will not run, and it will return before it has a chance to see if it
	 * needs to toggle the cycle bit.  It can't tell if the stalled transfer
	 * ended just before the link TRB on a one-segment ring, or if the TD
	 * wrapped around the top of the ring, because it doesn't have the TD in
	 * question.  Look for the one-segment case where stalled TRB's address
	 * is greater than the new dequeue pointer address.
	 */
	if (ep_ring->first_seg == ep_ring->first_seg->next &&
			state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
		state->new_cycle_state ^= 0x1;
616 617
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Cycle state = 0x%x", state->new_cycle_state);
618

619
	/* Don't update the ring cycle state for the producer (us). */
620 621
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"New dequeue segment = %p (virtual)",
622 623
			state->new_deq_seg);
	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
624 625
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"New dequeue pointer = 0x%llx (DMA)",
626
			(unsigned long long) addr);
627 628
}

629 630 631 632
/* flip_cycle means flip the cycle bit of all but the first and last TRB.
 * (The last TRB actually points to the ring enqueue pointer, which is not part
 * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
 */
633
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
634
		struct xhci_td *cur_td, bool flip_cycle)
635 636 637 638 639 640 641
{
	struct xhci_segment *cur_seg;
	union xhci_trb *cur_trb;

	for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
			true;
			next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
642
		if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
643 644 645
			/* Unchain any chained Link TRBs, but
			 * leave the pointers intact.
			 */
M
Matt Evans 已提交
646
			cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
647 648 649 650 651 652
			/* Flip the cycle bit (link TRBs can't be the first
			 * or last TRB).
			 */
			if (flip_cycle)
				cur_trb->generic.field[3] ^=
					cpu_to_le32(TRB_CYCLE);
653 654 655 656 657
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Cancel (unchain) link TRB");
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Address = %p (0x%llx dma); "
					"in seg %p (0x%llx dma)",
658
					cur_trb,
659
					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
660 661
					cur_seg,
					(unsigned long long)cur_seg->dma);
662 663 664 665 666
		} else {
			cur_trb->generic.field[0] = 0;
			cur_trb->generic.field[1] = 0;
			cur_trb->generic.field[2] = 0;
			/* Preserve only the cycle bit of this TRB */
M
Matt Evans 已提交
667
			cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
668 669 670 671 672
			/* Flip the cycle bit except on the first or last TRB */
			if (flip_cycle && cur_trb != cur_td->first_trb &&
					cur_trb != cur_td->last_trb)
				cur_trb->generic.field[3] ^=
					cpu_to_le32(TRB_CYCLE);
M
Matt Evans 已提交
673 674
			cur_trb->generic.field[3] |= cpu_to_le32(
				TRB_TYPE(TRB_TR_NOOP));
675 676
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"TRB to noop at offset 0x%llx",
677 678
					(unsigned long long)
					xhci_trb_virt_to_dma(cur_seg, cur_trb));
679 680 681 682 683 684 685
		}
		if (cur_trb == cur_td->last_trb)
			break;
	}
}

static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
686 687
		unsigned int ep_index, unsigned int stream_id,
		struct xhci_segment *deq_seg,
688 689
		union xhci_trb *deq_ptr, u32 cycle_state);

690
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
691
		unsigned int slot_id, unsigned int ep_index,
692
		unsigned int stream_id,
693
		struct xhci_dequeue_state *deq_state)
694
{
695 696
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];

697 698 699
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
			"new deq ptr = %p (0x%llx dma), new cycle = %u",
700 701 702 703 704
			deq_state->new_deq_seg,
			(unsigned long long)deq_state->new_deq_seg->dma,
			deq_state->new_deq_ptr,
			(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
			deq_state->new_cycle_state);
705
	queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
706 707 708 709 710 711 712 713
			deq_state->new_deq_seg,
			deq_state->new_deq_ptr,
			(u32) deq_state->new_cycle_state);
	/* Stop the TD queueing code from ringing the doorbell until
	 * this command completes.  The HC won't set the dequeue pointer
	 * if the ring is running, and ringing the doorbell starts the
	 * ring running.
	 */
714
	ep->ep_state |= SET_DEQ_PENDING;
715 716
}

717
static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
718 719 720 721 722 723 724 725 726 727 728 729 730
		struct xhci_virt_ep *ep)
{
	ep->ep_state &= ~EP_HALT_PENDING;
	/* Can't del_timer_sync in interrupt, so we attempt to cancel.  If the
	 * timer is running on another CPU, we don't decrement stop_cmds_pending
	 * (since we didn't successfully stop the watchdog timer).
	 */
	if (del_timer(&ep->stop_cmd_timer))
		ep->stop_cmds_pending--;
}

/* Must be called with xhci->lock held in interrupt context */
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
731
		struct xhci_td *cur_td, int status)
732
{
733
	struct usb_hcd *hcd;
734 735
	struct urb	*urb;
	struct urb_priv	*urb_priv;
736

737 738 739
	urb = cur_td->urb;
	urb_priv = urb->hcpriv;
	urb_priv->td_cnt++;
740
	hcd = bus_to_hcd(urb->dev->bus);
741

742 743
	/* Only giveback urb when this is the last td in urb */
	if (urb_priv->td_cnt == urb_priv->length) {
A
Andiry Xu 已提交
744 745 746 747 748 749 750
		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
			xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
			if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs	== 0) {
				if (xhci->quirks & XHCI_AMD_PLL_FIX)
					usb_amd_quirk_pll_enable();
			}
		}
751 752 753 754 755 756 757
		usb_hcd_unlink_urb_from_ep(hcd, urb);

		spin_unlock(&xhci->lock);
		usb_hcd_giveback_urb(hcd, urb, status);
		xhci_urb_free_priv(xhci, urb_priv);
		spin_lock(&xhci->lock);
	}
758 759
}

760 761 762 763 764 765 766 767 768 769
/*
 * When we get a command completion for a Stop Endpoint Command, we need to
 * unlink any cancelled TDs from the ring.  There are two ways to do that:
 *
 *  1. If the HW was in the middle of processing the TD that needs to be
 *     cancelled, then we must move the ring's dequeue pointer past the last TRB
 *     in the TD with a Set Dequeue Pointer Command.
 *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
 *     bit cleared) so that the HW will skip over them.
 */
770
static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
771
		union xhci_trb *trb, struct xhci_event_cmd *event)
772 773
{
	unsigned int ep_index;
774
	struct xhci_virt_device *virt_dev;
775
	struct xhci_ring *ep_ring;
776
	struct xhci_virt_ep *ep;
777
	struct list_head *entry;
778
	struct xhci_td *cur_td = NULL;
779 780
	struct xhci_td *last_unlinked_td;

781
	struct xhci_dequeue_state deq_state;
782

783
	if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
784 785 786 787 788 789 790 791 792 793 794
		virt_dev = xhci->devs[slot_id];
		if (virt_dev)
			handle_cmd_in_cmd_wait_list(xhci, virt_dev,
				event);
		else
			xhci_warn(xhci, "Stop endpoint command "
				"completion for disabled slot %u\n",
				slot_id);
		return;
	}

795
	memset(&deq_state, 0, sizeof(deq_state));
M
Matt Evans 已提交
796
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
797
	ep = &xhci->devs[slot_id]->eps[ep_index];
798

799
	if (list_empty(&ep->cancelled_td_list)) {
800
		xhci_stop_watchdog_timer_in_irq(xhci, ep);
801 802
		ep->stopped_td = NULL;
		ep->stopped_trb = NULL;
803
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
804
		return;
805
	}
806 807 808 809 810 811

	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
	 * We have the xHCI lock, so nothing can modify this list until we drop
	 * it.  We're also in the event handler, so we can't get re-interrupted
	 * if another Stop Endpoint command completes
	 */
812
	list_for_each(entry, &ep->cancelled_td_list) {
813
		cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
814 815
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Removing canceled TD starting at 0x%llx (dma).",
816 817
				(unsigned long long)xhci_trb_virt_to_dma(
					cur_td->start_seg, cur_td->first_trb));
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836
		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
		if (!ep_ring) {
			/* This shouldn't happen unless a driver is mucking
			 * with the stream ID after submission.  This will
			 * leave the TD on the hardware ring, and the hardware
			 * will try to execute it, and may access a buffer
			 * that has already been freed.  In the best case, the
			 * hardware will execute it, and the event handler will
			 * ignore the completion event for that TD, since it was
			 * removed from the td_list for that endpoint.  In
			 * short, don't muck with the stream ID after
			 * submission.
			 */
			xhci_warn(xhci, "WARN Cancelled URB %p "
					"has invalid stream ID %u.\n",
					cur_td->urb,
					cur_td->urb->stream_id);
			goto remove_finished_td;
		}
837 838 839 840
		/*
		 * If we stopped on the TD we need to cancel, then we have to
		 * move the xHC endpoint ring dequeue pointer past this TD.
		 */
841
		if (cur_td == ep->stopped_td)
842 843 844
			xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
					cur_td->urb->stream_id,
					cur_td, &deq_state);
845
		else
846
			td_to_noop(xhci, ep_ring, cur_td, false);
847
remove_finished_td:
848 849 850 851 852
		/*
		 * The event handler won't see a completion for this TD anymore,
		 * so remove it from the endpoint ring's TD list.  Keep it in
		 * the cancelled TD list for URB completion later.
		 */
853
		list_del_init(&cur_td->td_list);
854 855
	}
	last_unlinked_td = cur_td;
856
	xhci_stop_watchdog_timer_in_irq(xhci, ep);
857 858 859

	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
860
		xhci_queue_new_dequeue_state(xhci,
861 862 863
				slot_id, ep_index,
				ep->stopped_td->urb->stream_id,
				&deq_state);
864
		xhci_ring_cmd_db(xhci);
865
	} else {
866 867
		/* Otherwise ring the doorbell(s) to restart queued transfers */
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
868
	}
869 870 871 872 873 874

	/* Clear stopped_td and stopped_trb if endpoint is not halted */
	if (!(ep->ep_state & EP_HALTED)) {
		ep->stopped_td = NULL;
		ep->stopped_trb = NULL;
	}
875 876 877 878 879 880 881 882

	/*
	 * Drop the lock and complete the URBs in the cancelled TD list.
	 * New TDs to be cancelled might be added to the end of the list before
	 * we can complete all the URBs for the TDs we already unlinked.
	 * So stop when we've completed the URB for the last TD we unlinked.
	 */
	do {
883
		cur_td = list_entry(ep->cancelled_td_list.next,
884
				struct xhci_td, cancelled_td_list);
885
		list_del_init(&cur_td->cancelled_td_list);
886 887 888 889 890

		/* Clean up the cancelled URB */
		/* Doesn't matter what we pass for status, since the core will
		 * just overwrite it (because the URB has been unlinked).
		 */
891
		xhci_giveback_urb_in_irq(xhci, cur_td, 0);
892

893 894 895 896 897
		/* Stop processing the cancelled list if the watchdog timer is
		 * running.
		 */
		if (xhci->xhc_state & XHCI_STATE_DYING)
			return;
898 899 900 901 902
	} while (cur_td != last_unlinked_td);

	/* Return to the event handler with xhci->lock re-acquired */
}

903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
	struct xhci_td *cur_td;

	while (!list_empty(&ring->td_list)) {
		cur_td = list_first_entry(&ring->td_list,
				struct xhci_td, td_list);
		list_del_init(&cur_td->td_list);
		if (!list_empty(&cur_td->cancelled_td_list))
			list_del_init(&cur_td->cancelled_td_list);
		xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
	}
}

static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
		int slot_id, int ep_index)
{
	struct xhci_td *cur_td;
	struct xhci_virt_ep *ep;
	struct xhci_ring *ring;

	ep = &xhci->devs[slot_id]->eps[ep_index];
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945
	if ((ep->ep_state & EP_HAS_STREAMS) ||
			(ep->ep_state & EP_GETTING_NO_STREAMS)) {
		int stream_id;

		for (stream_id = 0; stream_id < ep->stream_info->num_streams;
				stream_id++) {
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Killing URBs for slot ID %u, ep index %u, stream %u",
					slot_id, ep_index, stream_id + 1);
			xhci_kill_ring_urbs(xhci,
					ep->stream_info->stream_rings[stream_id]);
		}
	} else {
		ring = ep->ring;
		if (!ring)
			return;
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Killing URBs for slot ID %u, ep index %u",
				slot_id, ep_index);
		xhci_kill_ring_urbs(xhci, ring);
	}
946 947 948 949 950 951 952 953
	while (!list_empty(&ep->cancelled_td_list)) {
		cur_td = list_first_entry(&ep->cancelled_td_list,
				struct xhci_td, cancelled_td_list);
		list_del_init(&cur_td->cancelled_td_list);
		xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
	}
}

954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
/* Watchdog timer function for when a stop endpoint command fails to complete.
 * In this case, we assume the host controller is broken or dying or dead.  The
 * host may still be completing some other events, so we have to be careful to
 * let the event ring handler and the URB dequeueing/enqueueing functions know
 * through xhci->state.
 *
 * The timer may also fire if the host takes a very long time to respond to the
 * command, and the stop endpoint command completion handler cannot delete the
 * timer before the timer function is called.  Another endpoint cancellation may
 * sneak in before the timer function can grab the lock, and that may queue
 * another stop endpoint command and add the timer back.  So we cannot use a
 * simple flag to say whether there is a pending stop endpoint command for a
 * particular endpoint.
 *
 * Instead we use a combination of that flag and a counter for the number of
 * pending stop endpoint commands.  If the timer is the tail end of the last
 * stop endpoint command, and the endpoint's command is still pending, we assume
 * the host is dying.
 */
void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
	struct xhci_hcd *xhci;
	struct xhci_virt_ep *ep;
	int ret, i, j;
978
	unsigned long flags;
979 980 981 982

	ep = (struct xhci_virt_ep *) arg;
	xhci = ep->xhci;

983
	spin_lock_irqsave(&xhci->lock, flags);
984 985 986

	ep->stop_cmds_pending--;
	if (xhci->xhc_state & XHCI_STATE_DYING) {
987 988 989
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Stop EP timer ran, but another timer marked "
				"xHCI as DYING, exiting.");
990
		spin_unlock_irqrestore(&xhci->lock, flags);
991 992 993
		return;
	}
	if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
994 995 996
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Stop EP timer ran, but no command pending, "
				"exiting.");
997
		spin_unlock_irqrestore(&xhci->lock, flags);
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
		return;
	}

	xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
	xhci_warn(xhci, "Assuming host is dying, halting host.\n");
	/* Oops, HC is dead or dying or at least not responding to the stop
	 * endpoint command.
	 */
	xhci->xhc_state |= XHCI_STATE_DYING;
	/* Disable interrupts from the host controller and start halting it */
	xhci_quiesce(xhci);
1009
	spin_unlock_irqrestore(&xhci->lock, flags);
1010 1011 1012

	ret = xhci_halt(xhci);

1013
	spin_lock_irqsave(&xhci->lock, flags);
1014 1015 1016
	if (ret < 0) {
		/* This is bad; the host is not responding to commands and it's
		 * not allowing itself to be halted.  At least interrupts are
1017
		 * disabled. If we call usb_hc_died(), it will attempt to
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
		 * disconnect all device drivers under this host.  Those
		 * disconnect() methods will wait for all URBs to be unlinked,
		 * so we must complete them.
		 */
		xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
		xhci_warn(xhci, "Completing active URBs anyway.\n");
		/* We could turn all TDs on the rings to no-ops.  This won't
		 * help if the host has cached part of the ring, and is slow if
		 * we want to preserve the cycle bit.  Skip it and hope the host
		 * doesn't touch the memory.
		 */
	}
	for (i = 0; i < MAX_HC_SLOTS; i++) {
		if (!xhci->devs[i])
			continue;
1033 1034
		for (j = 0; j < 31; j++)
			xhci_kill_endpoint_urbs(xhci, i, j);
1035
	}
1036
	spin_unlock_irqrestore(&xhci->lock, flags);
1037 1038
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Calling usb_hc_died()");
1039
	usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1040 1041
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"xHCI host controller is dead.");
1042 1043
}

1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056

static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
		struct xhci_virt_device *dev,
		struct xhci_ring *ep_ring,
		unsigned int ep_index)
{
	union xhci_trb *dequeue_temp;
	int num_trbs_free_temp;
	bool revert = false;

	num_trbs_free_temp = ep_ring->num_trbs_free;
	dequeue_temp = ep_ring->dequeue;

1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
	/* If we get two back-to-back stalls, and the first stalled transfer
	 * ends just before a link TRB, the dequeue pointer will be left on
	 * the link TRB by the code in the while loop.  So we have to update
	 * the dequeue pointer one segment further, or we'll jump off
	 * the segment into la-la-land.
	 */
	if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
		ep_ring->deq_seg = ep_ring->deq_seg->next;
		ep_ring->dequeue = ep_ring->deq_seg->trbs;
	}

1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
	while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
		/* We have more usable TRBs */
		ep_ring->num_trbs_free++;
		ep_ring->dequeue++;
		if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
				ep_ring->dequeue)) {
			if (ep_ring->dequeue ==
					dev->eps[ep_index].queued_deq_ptr)
				break;
			ep_ring->deq_seg = ep_ring->deq_seg->next;
			ep_ring->dequeue = ep_ring->deq_seg->trbs;
		}
		if (ep_ring->dequeue == dequeue_temp) {
			revert = true;
			break;
		}
	}

	if (revert) {
		xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
		ep_ring->num_trbs_free = num_trbs_free_temp;
	}
}

1092 1093 1094 1095 1096 1097 1098
/*
 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
 * we need to clear the set deq pending flag in the endpoint ring state, so that
 * the TD queueing code can ring the doorbell again.  We also need to ring the
 * endpoint doorbell to restart the ring, but only if there aren't more
 * cancellations pending.
 */
1099
static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1100
		union xhci_trb *trb, u32 cmd_comp_code)
1101 1102
{
	unsigned int ep_index;
1103
	unsigned int stream_id;
1104 1105
	struct xhci_ring *ep_ring;
	struct xhci_virt_device *dev;
1106
	struct xhci_virt_ep *ep;
1107 1108
	struct xhci_ep_ctx *ep_ctx;
	struct xhci_slot_ctx *slot_ctx;
1109

M
Matt Evans 已提交
1110 1111
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1112
	dev = xhci->devs[slot_id];
1113
	ep = &dev->eps[ep_index];
1114 1115 1116

	ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
	if (!ep_ring) {
O
Oliver Neukum 已提交
1117
		xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
1118 1119 1120 1121 1122 1123
				stream_id);
		/* XXX: Harmless??? */
		dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
		return;
	}

1124 1125
	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
1126

1127
	if (cmd_comp_code != COMP_SUCCESS) {
1128 1129 1130
		unsigned int ep_state;
		unsigned int slot_state;

1131
		switch (cmd_comp_code) {
1132
		case COMP_TRB_ERR:
O
Oliver Neukum 已提交
1133
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
1134 1135
			break;
		case COMP_CTX_STATE:
O
Oliver Neukum 已提交
1136
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
M
Matt Evans 已提交
1137
			ep_state = le32_to_cpu(ep_ctx->ep_info);
1138
			ep_state &= EP_STATE_MASK;
M
Matt Evans 已提交
1139
			slot_state = le32_to_cpu(slot_ctx->dev_state);
1140
			slot_state = GET_SLOT_STATE(slot_state);
1141 1142
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Slot state = %u, EP state = %u",
1143 1144 1145
					slot_state, ep_state);
			break;
		case COMP_EBADSLT:
O
Oliver Neukum 已提交
1146 1147
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
					slot_id);
1148 1149
			break;
		default:
O
Oliver Neukum 已提交
1150 1151
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
					cmd_comp_code);
1152 1153 1154 1155 1156 1157 1158 1159 1160
			break;
		}
		/* OK what do we do now?  The endpoint state is hosed, and we
		 * should never get to this point if the synchronization between
		 * queueing, and endpoint state are correct.  This might happen
		 * if the device gets disconnected after we've finished
		 * cancelling URBs, which might not be an error...
		 */
	} else {
1161 1162 1163 1164 1165 1166 1167 1168 1169
		u64 deq;
		/* 4.6.10 deq ptr is written to the stream ctx for streams */
		if (ep->ep_state & EP_HAS_STREAMS) {
			struct xhci_stream_ctx *ctx =
				&ep->stream_info->stream_ctx_array[stream_id];
			deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
		} else {
			deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
		}
1170
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1171 1172 1173
			"Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
		if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
					 ep->queued_deq_ptr) == deq) {
1174 1175 1176
			/* Update the ring's dequeue segment and dequeue pointer
			 * to reflect the new position.
			 */
1177 1178
			update_ring_for_set_deq_completion(xhci, dev,
				ep_ring, ep_index);
1179
		} else {
O
Oliver Neukum 已提交
1180
			xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1181
			xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1182
				  ep->queued_deq_seg, ep->queued_deq_ptr);
1183
		}
1184 1185
	}

1186
	dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1187 1188
	dev->eps[ep_index].queued_deq_seg = NULL;
	dev->eps[ep_index].queued_deq_ptr = NULL;
1189 1190
	/* Restart any rings with pending URBs */
	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1191 1192
}

1193
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1194
		union xhci_trb *trb, u32 cmd_comp_code)
1195 1196 1197
{
	unsigned int ep_index;

M
Matt Evans 已提交
1198
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1199 1200 1201
	/* This command will only fail if the endpoint wasn't halted,
	 * but we don't care.
	 */
1202
	xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1203
		"Ignoring reset ep completion code of %u", cmd_comp_code);
1204

1205 1206 1207 1208 1209
	/* HW with the reset endpoint quirk needs to have a configure endpoint
	 * command complete before the endpoint can be used.  Queue that here
	 * because the HW can't handle two commands being queued in a row.
	 */
	if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1210 1211
		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
				"Queueing configure endpoint command");
1212
		xhci_queue_configure_endpoint(xhci,
1213 1214
				xhci->devs[slot_id]->in_ctx->dma, slot_id,
				false);
1215 1216
		xhci_ring_cmd_db(xhci);
	} else {
1217
		/* Clear our internal halted state and restart the ring(s) */
1218
		xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1219
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1220
	}
1221
}
1222

1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236
/* Complete the command and detele it from the devcie's command queue.
 */
static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
		struct xhci_command *command, u32 status)
{
	command->status = status;
	list_del(&command->cmd_list);
	if (command->completion)
		complete(command->completion);
	else
		xhci_free_command(xhci, command);
}


1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
/* Check to see if a command in the device's command queue matches this one.
 * Signal the completion or free the command, and return 1.  Return 0 if the
 * completed command isn't at the head of the command list.
 */
static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
		struct xhci_virt_device *virt_dev,
		struct xhci_event_cmd *event)
{
	struct xhci_command *command;

	if (list_empty(&virt_dev->cmd_list))
		return 0;

	command = list_entry(virt_dev->cmd_list.next,
			struct xhci_command, cmd_list);
	if (xhci->cmd_ring->dequeue != command->command_trb)
		return 0;

1255 1256
	xhci_complete_cmd_in_cmd_wait_list(xhci, command,
			GET_COMP_CODE(le32_to_cpu(event->status)));
1257 1258 1259
	return 1;
}

1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280
/*
 * Finding the command trb need to be cancelled and modifying it to
 * NO OP command. And if the command is in device's command wait
 * list, finishing and freeing it.
 *
 * If we can't find the command trb, we think it had already been
 * executed.
 */
static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
{
	struct xhci_segment *cur_seg;
	union xhci_trb *cmd_trb;
	u32 cycle_state;

	if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
		return;

	/* find the current segment of command ring */
	cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
			xhci->cmd_ring->dequeue, &cycle_state);

1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
	if (!cur_seg) {
		xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n",
				xhci->cmd_ring->dequeue,
				(unsigned long long)
				xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
					xhci->cmd_ring->dequeue));
		xhci_debug_ring(xhci, xhci->cmd_ring);
		xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
		return;
	}

1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
	/* find the command trb matched by cd from command ring */
	for (cmd_trb = xhci->cmd_ring->dequeue;
			cmd_trb != xhci->cmd_ring->enqueue;
			next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
		/* If the trb is link trb, continue */
		if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
			continue;

		if (cur_cd->cmd_trb == cmd_trb) {

			/* If the command in device's command list, we should
			 * finish it and free the command structure.
			 */
			if (cur_cd->command)
				xhci_complete_cmd_in_cmd_wait_list(xhci,
					cur_cd->command, COMP_CMD_STOP);

			/* get cycle state from the origin command trb */
			cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
				& TRB_CYCLE;

			/* modify the command trb to NO OP command */
			cmd_trb->generic.field[0] = 0;
			cmd_trb->generic.field[1] = 0;
			cmd_trb->generic.field[2] = 0;
			cmd_trb->generic.field[3] = cpu_to_le32(
					TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
			break;
		}
	}
}

static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
{
	struct xhci_cd *cur_cd, *next_cd;

	if (list_empty(&xhci->cancel_cmd_list))
		return;

	list_for_each_entry_safe(cur_cd, next_cd,
			&xhci->cancel_cmd_list, cancel_cmd_list) {
		xhci_cmd_to_noop(xhci, cur_cd);
		list_del(&cur_cd->cancel_cmd_list);
		kfree(cur_cd);
	}
}

/*
 * traversing the cancel_cmd_list. If the command descriptor according
 * to cmd_trb is found, the function free it and return 1, otherwise
 * return 0.
 */
static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
		union xhci_trb *cmd_trb)
{
	struct xhci_cd *cur_cd, *next_cd;

	if (list_empty(&xhci->cancel_cmd_list))
		return 0;

	list_for_each_entry_safe(cur_cd, next_cd,
			&xhci->cancel_cmd_list, cancel_cmd_list) {
		if (cur_cd->cmd_trb == cmd_trb) {
			if (cur_cd->command)
				xhci_complete_cmd_in_cmd_wait_list(xhci,
					cur_cd->command, COMP_CMD_STOP);
			list_del(&cur_cd->cancel_cmd_list);
			kfree(cur_cd);
			return 1;
		}
	}

	return 0;
}

/*
 * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
 * trb pointed by the command ring dequeue pointer is the trb we want to
 * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
 * traverse the cancel_cmd_list to trun the all of the commands according
 * to command descriptor to NO-OP trb.
 */
static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
		int cmd_trb_comp_code)
{
	int cur_trb_is_good = 0;

	/* Searching the cmd trb pointed by the command ring dequeue
	 * pointer in command descriptor list. If it is found, free it.
	 */
	cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
			xhci->cmd_ring->dequeue);

	if (cmd_trb_comp_code == COMP_CMD_ABORT)
		xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
	else if (cmd_trb_comp_code == COMP_CMD_STOP) {
		/* traversing the cancel_cmd_list and canceling
		 * the command according to command descriptor
		 */
		xhci_cancel_cmd_in_cd_list(xhci);

		xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
		/*
		 * ring command ring doorbell again to restart the
		 * command ring
		 */
		if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
			xhci_ring_cmd_db(xhci);
	}
	return cur_trb_is_good;
}

1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
		u32 cmd_comp_code)
{
	if (cmd_comp_code == COMP_SUCCESS)
		xhci->slot_id = slot_id;
	else
		xhci->slot_id = 0;
	complete(&xhci->addr_dev);
}

1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
{
	struct xhci_virt_device *virt_dev;

	virt_dev = xhci->devs[slot_id];
	if (!virt_dev)
		return;
	if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
		/* Delete default control endpoint resources */
		xhci_free_device_endpoint_resources(xhci, virt_dev, true);
	xhci_free_virt_device(xhci, slot_id);
}

1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
		struct xhci_event_cmd *event, u32 cmd_comp_code)
{
	struct xhci_virt_device *virt_dev;
	struct xhci_input_control_ctx *ctrl_ctx;
	unsigned int ep_index;
	unsigned int ep_state;
	u32 add_flags, drop_flags;

	virt_dev = xhci->devs[slot_id];
	if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
		return;
	/*
	 * Configure endpoint commands can come from the USB core
	 * configuration or alt setting changes, or because the HW
	 * needed an extra configure endpoint command after a reset
	 * endpoint command or streams were being configured.
	 * If the command was for a halted endpoint, the xHCI driver
	 * is not waiting on the configure endpoint command.
	 */
	ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
	if (!ctrl_ctx) {
		xhci_warn(xhci, "Could not get input context, bad type.\n");
		return;
	}

	add_flags = le32_to_cpu(ctrl_ctx->add_flags);
	drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
	/* Input ctx add_flags are the endpoint index plus one */
	ep_index = xhci_last_valid_endpoint(add_flags) - 1;

	/* A usb_set_interface() call directly after clearing a halted
	 * condition may race on this quirky hardware.  Not worth
	 * worrying about, since this is prototype hardware.  Not sure
	 * if this will work for streams, but streams support was
	 * untested on this prototype.
	 */
	if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
			ep_index != (unsigned int) -1 &&
			add_flags - SLOT_FLAG == drop_flags) {
		ep_state = virt_dev->eps[ep_index].ep_state;
		if (!(ep_state & EP_HALTED))
			goto bandwidth_change;
		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
				"Completed config ep cmd - "
				"last ep index = %d, state = %d",
				ep_index, ep_state);
		/* Clear internal halted state and restart ring(s) */
		virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
		return;
	}
bandwidth_change:
	xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
			"Completed config ep cmd");
	virt_dev->cmd_status = cmd_comp_code;
	complete(&virt_dev->cmd_completion);
	return;
}

1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
static void xhci_handle_cmd_eval_ctx(struct xhci_hcd *xhci, int slot_id,
		struct xhci_event_cmd *event, u32 cmd_comp_code)
{
	struct xhci_virt_device *virt_dev;

	virt_dev = xhci->devs[slot_id];
	if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
		return;
	virt_dev->cmd_status = cmd_comp_code;
	complete(&virt_dev->cmd_completion);
}

1499 1500 1501 1502 1503 1504 1505
static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id,
		u32 cmd_comp_code)
{
	xhci->devs[slot_id]->cmd_status = cmd_comp_code;
	complete(&xhci->addr_dev);
}

1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519
static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
		struct xhci_event_cmd *event)
{
	struct xhci_virt_device *virt_dev;

	xhci_dbg(xhci, "Completed reset device command.\n");
	virt_dev = xhci->devs[slot_id];
	if (virt_dev)
		handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
	else
		xhci_warn(xhci, "Reset device command completion "
				"for disabled slot %u\n", slot_id);
}

1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532
static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
	if (!(xhci->quirks & XHCI_NEC_HOST)) {
		xhci->error_bitmask |= 1 << 6;
		return;
	}
	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
			"NEC firmware version %2x.%02x",
			NEC_FW_MAJOR(le32_to_cpu(event->status)),
			NEC_FW_MINOR(le32_to_cpu(event->status)));
}

1533 1534 1535
static void handle_cmd_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
M
Matt Evans 已提交
1536
	int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1537 1538
	u64 cmd_dma;
	dma_addr_t cmd_dequeue_dma;
1539
	u32 cmd_comp_code;
1540
	union xhci_trb *cmd_trb;
1541
	u32 cmd_type;
1542

M
Matt Evans 已提交
1543
	cmd_dma = le64_to_cpu(event->cmd_trb);
1544
	cmd_trb = xhci->cmd_ring->dequeue;
1545
	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1546
			cmd_trb);
1547 1548 1549 1550 1551 1552 1553 1554 1555 1556
	/* Is the command ring deq ptr out of sync with the deq seg ptr? */
	if (cmd_dequeue_dma == 0) {
		xhci->error_bitmask |= 1 << 4;
		return;
	}
	/* Does the DMA address match our internal dequeue pointer address? */
	if (cmd_dma != (u64) cmd_dequeue_dma) {
		xhci->error_bitmask |= 1 << 5;
		return;
	}
1557

1558
	trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
1559

1560 1561
	cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
	if (cmd_comp_code == COMP_CMD_ABORT || cmd_comp_code == COMP_CMD_STOP) {
1562 1563 1564 1565 1566 1567
		/* If the return value is 0, we think the trb pointed by
		 * command ring dequeue pointer is a good trb. The good
		 * trb means we don't want to cancel the trb, but it have
		 * been stopped by host. So we should handle it normally.
		 * Otherwise, driver should invoke inc_deq() and return.
		 */
1568
		if (handle_stopped_cmd_ring(xhci, cmd_comp_code)) {
1569 1570 1571
			inc_deq(xhci, xhci->cmd_ring);
			return;
		}
1572 1573 1574 1575 1576 1577
		/* There is no command to handle if we get a stop event when the
		 * command ring is empty, event->cmd_trb points to the next
		 * unset command
		 */
		if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
			return;
1578 1579
	}

1580 1581 1582
	cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
	switch (cmd_type) {
	case TRB_ENABLE_SLOT:
1583
		xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
1584
		break;
1585
	case TRB_DISABLE_SLOT:
1586
		xhci_handle_cmd_disable_slot(xhci, slot_id);
1587
		break;
1588
	case TRB_CONFIG_EP:
1589
		xhci_handle_cmd_config_ep(xhci, slot_id, event, cmd_comp_code);
1590
		break;
1591
	case TRB_EVAL_CONTEXT:
1592
		xhci_handle_cmd_eval_ctx(xhci, slot_id, event, cmd_comp_code);
1593
		break;
1594
	case TRB_ADDR_DEV:
1595
		xhci_handle_cmd_addr_dev(xhci, slot_id, cmd_comp_code);
1596
		break;
1597
	case TRB_STOP_RING:
1598 1599 1600
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
		xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
1601
		break;
1602
	case TRB_SET_DEQ:
1603 1604
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
1605
		xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1606
		break;
1607
	case TRB_CMD_NOOP:
1608
		break;
1609
	case TRB_RESET_EP:
1610 1611
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
1612
		xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1613
		break;
1614
	case TRB_RESET_DEV:
1615
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
1616
				le32_to_cpu(cmd_trb->generic.field[3])));
1617
		xhci_handle_cmd_reset_dev(xhci, slot_id, event);
1618
		break;
1619
	case TRB_NEC_GET_FW:
1620
		xhci_handle_cmd_nec_get_fw(xhci, event);
1621
		break;
1622 1623 1624 1625 1626
	default:
		/* Skip over unknown commands on the event ring */
		xhci->error_bitmask |= 1 << 6;
		break;
	}
A
Andiry Xu 已提交
1627
	inc_deq(xhci, xhci->cmd_ring);
1628 1629
}

1630 1631 1632 1633 1634
static void handle_vendor_event(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 trb_type;

M
Matt Evans 已提交
1635
	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1636 1637 1638 1639 1640
	xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
	if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
		handle_cmd_completion(xhci, &event->event_cmd);
}

1641 1642 1643 1644 1645
/* @port_id: the one-based port ID from the hardware (indexed from array of all
 * port registers -- USB 3.0 and USB 2.0).
 *
 * Returns a zero-based port number, which is suitable for indexing into each of
 * the split roothubs' port arrays and bus state arrays.
1646
 * Add one to it in order to call xhci_find_slot_id_by_port.
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
 */
static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
		struct xhci_hcd *xhci, u32 port_id)
{
	unsigned int i;
	unsigned int num_similar_speed_ports = 0;

	/* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
	 * and usb2_ports are 0-based indexes.  Count the number of similar
	 * speed ports, up to 1 port before this port.
	 */
	for (i = 0; i < (port_id - 1); i++) {
		u8 port_speed = xhci->port_array[i];

		/*
		 * Skip ports that don't have known speeds, or have duplicate
		 * Extended Capabilities port speed entries.
		 */
1665
		if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
			continue;

		/*
		 * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
		 * 1.1 ports are under the USB 2.0 hub.  If the port speed
		 * matches the device speed, it's a similar speed port.
		 */
		if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
			num_similar_speed_ports++;
	}
	return num_similar_speed_ports;
}

1679 1680 1681 1682
static void handle_device_notification(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 slot_id;
1683
	struct usb_device *udev;
1684

1685
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1686
	if (!xhci->devs[slot_id]) {
1687 1688
		xhci_warn(xhci, "Device Notification event for "
				"unused slot %u\n", slot_id);
1689 1690 1691 1692 1693 1694 1695 1696
		return;
	}

	xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
			slot_id);
	udev = xhci->devs[slot_id]->udev;
	if (udev && udev->parent)
		usb_wakeup_notification(udev->parent, udev->portnum);
1697 1698
}

S
Sarah Sharp 已提交
1699 1700 1701
static void handle_port_status(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
1702
	struct usb_hcd *hcd;
S
Sarah Sharp 已提交
1703
	u32 port_id;
1704
	u32 temp, temp1;
1705
	int max_ports;
1706
	int slot_id;
1707
	unsigned int faked_port_index;
1708
	u8 major_revision;
1709
	struct xhci_bus_state *bus_state;
M
Matt Evans 已提交
1710
	__le32 __iomem **port_array;
1711
	bool bogus_port_status = false;
S
Sarah Sharp 已提交
1712 1713

	/* Port status change events always have a successful completion code */
M
Matt Evans 已提交
1714
	if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
S
Sarah Sharp 已提交
1715 1716 1717
		xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
		xhci->error_bitmask |= 1 << 8;
	}
M
Matt Evans 已提交
1718
	port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
S
Sarah Sharp 已提交
1719 1720
	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);

1721 1722
	max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
	if ((port_id <= 0) || (port_id > max_ports)) {
1723
		xhci_warn(xhci, "Invalid port id %d\n", port_id);
P
Peter Chen 已提交
1724 1725
		inc_deq(xhci, xhci->event_ring);
		return;
1726 1727
	}

1728 1729 1730 1731
	/* Figure out which usb_hcd this port is attached to:
	 * is it a USB 3.0 port or a USB 2.0/1.1 port?
	 */
	major_revision = xhci->port_array[port_id - 1];
P
Peter Chen 已提交
1732 1733 1734 1735 1736 1737

	/* Find the right roothub. */
	hcd = xhci_to_hcd(xhci);
	if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
		hcd = xhci->shared_hcd;

1738 1739 1740 1741
	if (major_revision == 0) {
		xhci_warn(xhci, "Event for port %u not in "
				"Extended Capabilities, ignoring.\n",
				port_id);
1742
		bogus_port_status = true;
1743
		goto cleanup;
1744
	}
1745
	if (major_revision == DUPLICATE_ENTRY) {
1746 1747 1748
		xhci_warn(xhci, "Event for port %u duplicated in"
				"Extended Capabilities, ignoring.\n",
				port_id);
1749
		bogus_port_status = true;
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767
		goto cleanup;
	}

	/*
	 * Hardware port IDs reported by a Port Status Change Event include USB
	 * 3.0 and USB 2.0 ports.  We want to check if the port has reported a
	 * resume event, but we first need to translate the hardware port ID
	 * into the index into the ports on the correct split roothub, and the
	 * correct bus_state structure.
	 */
	bus_state = &xhci->bus_state[hcd_index(hcd)];
	if (hcd->speed == HCD_USB3)
		port_array = xhci->usb3_ports;
	else
		port_array = xhci->usb2_ports;
	/* Find the faked port hub number */
	faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
			port_id);
1768

1769
	temp = readl(port_array[faked_port_index]);
1770
	if (hcd->state == HC_STATE_SUSPENDED) {
1771 1772 1773 1774 1775 1776 1777
		xhci_dbg(xhci, "resume root hub\n");
		usb_hcd_resume_root_hub(hcd);
	}

	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
		xhci_dbg(xhci, "port resume event for port %d\n", port_id);

1778
		temp1 = readl(&xhci->op_regs->command);
1779 1780 1781 1782 1783 1784
		if (!(temp1 & CMD_RUN)) {
			xhci_warn(xhci, "xHC is not running.\n");
			goto cleanup;
		}

		if (DEV_SUPERSPEED(temp)) {
1785
			xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1786 1787 1788 1789 1790
			/* Set a flag to say the port signaled remote wakeup,
			 * so we can tell the difference between the end of
			 * device and host initiated resume.
			 */
			bus_state->port_remote_wakeup |= 1 << faked_port_index;
1791 1792
			xhci_test_and_clear_bit(xhci, port_array,
					faked_port_index, PORT_PLC);
A
Andiry Xu 已提交
1793 1794
			xhci_set_link_state(xhci, port_array, faked_port_index,
						XDEV_U0);
1795 1796 1797 1798 1799
			/* Need to wait until the next link state change
			 * indicates the device is actually in U0.
			 */
			bogus_port_status = true;
			goto cleanup;
1800 1801
		} else {
			xhci_dbg(xhci, "resume HS port %d\n", port_id);
1802
			bus_state->resume_done[faked_port_index] = jiffies +
1803
				msecs_to_jiffies(20);
1804
			set_bit(faked_port_index, &bus_state->resuming_ports);
1805
			mod_timer(&hcd->rh_timer,
1806
				  bus_state->resume_done[faked_port_index]);
1807 1808 1809
			/* Do the rest in GetPortStatus */
		}
	}
1810 1811 1812 1813

	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
			DEV_SUPERSPEED(temp)) {
		xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1814 1815 1816 1817 1818 1819 1820
		/* We've just brought the device into U0 through either the
		 * Resume state after a device remote wakeup, or through the
		 * U3Exit state after a host-initiated resume.  If it's a device
		 * initiated remote wake, don't pass up the link state change,
		 * so the roothub behavior is consistent with external
		 * USB 3.0 hub behavior.
		 */
1821 1822 1823 1824
		slot_id = xhci_find_slot_id_by_port(hcd, xhci,
				faked_port_index + 1);
		if (slot_id && xhci->devs[slot_id])
			xhci_ring_device(xhci, slot_id);
1825
		if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
1826 1827 1828 1829 1830 1831 1832 1833 1834
			bus_state->port_remote_wakeup &=
				~(1 << faked_port_index);
			xhci_test_and_clear_bit(xhci, port_array,
					faked_port_index, PORT_PLC);
			usb_wakeup_notification(hcd->self.root_hub,
					faked_port_index + 1);
			bogus_port_status = true;
			goto cleanup;
		}
1835
	}
1836

1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849
	/*
	 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
	 * RExit to a disconnect state).  If so, let the the driver know it's
	 * out of the RExit state.
	 */
	if (!DEV_SUPERSPEED(temp) &&
			test_and_clear_bit(faked_port_index,
				&bus_state->rexit_ports)) {
		complete(&bus_state->rexit_done[faked_port_index]);
		bogus_port_status = true;
		goto cleanup;
	}

1850 1851 1852 1853
	if (hcd->speed != HCD_USB3)
		xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
					PORT_PLC);

1854
cleanup:
S
Sarah Sharp 已提交
1855
	/* Update event ring dequeue pointer before dropping the lock */
A
Andiry Xu 已提交
1856
	inc_deq(xhci, xhci->event_ring);
S
Sarah Sharp 已提交
1857

1858 1859 1860 1861 1862 1863 1864
	/* Don't make the USB core poll the roothub if we got a bad port status
	 * change event.  Besides, at that point we can't tell which roothub
	 * (USB 2.0 or USB 3.0) to kick.
	 */
	if (bogus_port_status)
		return;

1865 1866 1867 1868 1869 1870 1871 1872 1873
	/*
	 * xHCI port-status-change events occur when the "or" of all the
	 * status-change bits in the portsc register changes from 0 to 1.
	 * New status changes won't cause an event if any other change
	 * bits are still set.  When an event occurs, switch over to
	 * polling to avoid losing status changes.
	 */
	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
S
Sarah Sharp 已提交
1874 1875
	spin_unlock(&xhci->lock);
	/* Pass this up to the core */
1876
	usb_hcd_poll_rh_status(hcd);
S
Sarah Sharp 已提交
1877 1878 1879
	spin_lock(&xhci->lock);
}

1880 1881 1882 1883 1884 1885
/*
 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
 * at end_trb, which may be in another segment.  If the suspect DMA address is a
 * TRB in this TD, this function returns that TRB's segment.  Otherwise it
 * returns 0.
 */
1886
struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1887 1888 1889 1890 1891 1892 1893 1894 1895
		union xhci_trb	*start_trb,
		union xhci_trb	*end_trb,
		dma_addr_t	suspect_dma)
{
	dma_addr_t start_dma;
	dma_addr_t end_seg_dma;
	dma_addr_t end_trb_dma;
	struct xhci_segment *cur_seg;

1896
	start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1897 1898 1899
	cur_seg = start_seg;

	do {
1900
		if (start_dma == 0)
1901
			return NULL;
1902
		/* We may get an event for a Link TRB in the middle of a TD */
1903
		end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1904
				&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1905
		/* If the end TRB isn't in this segment, this is set to 0 */
1906
		end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922

		if (end_trb_dma > 0) {
			/* The end TRB is in this segment, so suspect should be here */
			if (start_dma <= end_trb_dma) {
				if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
					return cur_seg;
			} else {
				/* Case for one segment with
				 * a TD wrapped around to the top
				 */
				if ((suspect_dma >= start_dma &&
							suspect_dma <= end_seg_dma) ||
						(suspect_dma >= cur_seg->dma &&
						 suspect_dma <= end_trb_dma))
					return cur_seg;
			}
1923
			return NULL;
1924 1925 1926 1927 1928 1929
		} else {
			/* Might still be somewhere in this segment */
			if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
				return cur_seg;
		}
		cur_seg = cur_seg->next;
1930
		start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1931
	} while (cur_seg != start_seg);
1932

1933
	return NULL;
1934 1935
}

1936 1937
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
1938
		unsigned int stream_id,
1939 1940 1941 1942 1943 1944
		struct xhci_td *td, union xhci_trb *event_trb)
{
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
	ep->ep_state |= EP_HALTED;
	ep->stopped_td = td;
	ep->stopped_trb = event_trb;
1945
	ep->stopped_stream = stream_id;
1946

1947 1948
	xhci_queue_reset_ep(xhci, slot_id, ep_index);
	xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1949 1950 1951

	ep->stopped_td = NULL;
	ep->stopped_trb = NULL;
1952
	ep->stopped_stream = 0;
1953

1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
	xhci_ring_cmd_db(xhci);
}

/* Check if an error has halted the endpoint ring.  The class driver will
 * cleanup the halt for a non-default control endpoint if we indicate a stall.
 * However, a babble and other errors also halt the endpoint ring, and the class
 * driver won't clear the halt in that case, so we need to issue a Set Transfer
 * Ring Dequeue Pointer command manually.
 */
static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
		struct xhci_ep_ctx *ep_ctx,
		unsigned int trb_comp_code)
{
	/* TRB completion codes that may require a manual halt cleanup */
	if (trb_comp_code == COMP_TX_ERR ||
			trb_comp_code == COMP_BABBLE ||
			trb_comp_code == COMP_SPLIT_ERR)
		/* The 0.96 spec says a babbling control endpoint
		 * is not halted. The 0.96 spec says it is.  Some HW
		 * claims to be 0.95 compliant, but it halts the control
		 * endpoint anyway.  Check if a babble halted the
		 * endpoint.
		 */
1977 1978
		if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
		    cpu_to_le32(EP_STATE_HALTED))
1979 1980 1981 1982 1983
			return 1;

	return 0;
}

1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
{
	if (trb_comp_code >= 224 && trb_comp_code <= 255) {
		/* Vendor defined "informational" completion code,
		 * treat as not-an-error.
		 */
		xhci_dbg(xhci, "Vendor defined info completion code %u\n",
				trb_comp_code);
		xhci_dbg(xhci, "Treating code as success.\n");
		return 1;
	}
	return 0;
}

1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
/*
 * Finish the td processing, remove the td from td list;
 * Return 1 if the urb can be given back.
 */
static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status, bool skip)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	unsigned int slot_id;
	int ep_index;
	struct urb *urb = NULL;
	struct xhci_ep_ctx *ep_ctx;
	int ret = 0;
2013
	struct urb_priv	*urb_priv;
2014 2015
	u32 trb_comp_code;

M
Matt Evans 已提交
2016
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2017
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
2018 2019
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2020
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
2021
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059

	if (skip)
		goto td_cleanup;

	if (trb_comp_code == COMP_STOP_INVAL ||
			trb_comp_code == COMP_STOP) {
		/* The Endpoint Stop Command completion will take care of any
		 * stopped TDs.  A stopped TD may be restarted, so don't update
		 * the ring dequeue pointer or take this TD off any lists yet.
		 */
		ep->stopped_td = td;
		ep->stopped_trb = event_trb;
		return 0;
	} else {
		if (trb_comp_code == COMP_STALL) {
			/* The transfer is completed from the driver's
			 * perspective, but we need to issue a set dequeue
			 * command for this stalled endpoint to move the dequeue
			 * pointer past the TD.  We can't do that here because
			 * the halt condition must be cleared first.  Let the
			 * USB class driver clear the stall later.
			 */
			ep->stopped_td = td;
			ep->stopped_trb = event_trb;
			ep->stopped_stream = ep_ring->stream_id;
		} else if (xhci_requires_manual_halt_cleanup(xhci,
					ep_ctx, trb_comp_code)) {
			/* Other types of errors halt the endpoint, but the
			 * class driver doesn't call usb_reset_endpoint() unless
			 * the error is -EPIPE.  Clear the halted status in the
			 * xHCI hardware manually.
			 */
			xhci_cleanup_halted_endpoint(xhci,
					slot_id, ep_index, ep_ring->stream_id,
					td, event_trb);
		} else {
			/* Update ring dequeue pointer */
			while (ep_ring->dequeue != td->last_trb)
A
Andiry Xu 已提交
2060 2061
				inc_deq(xhci, ep_ring);
			inc_deq(xhci, ep_ring);
2062 2063 2064 2065 2066
		}

td_cleanup:
		/* Clean up the endpoint's TD list */
		urb = td->urb;
2067
		urb_priv = urb->hcpriv;
2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086

		/* Do one last check of the actual transfer length.
		 * If the host controller said we transferred more data than
		 * the buffer length, urb->actual_length will be a very big
		 * number (since it's unsigned).  Play it safe and say we didn't
		 * transfer anything.
		 */
		if (urb->actual_length > urb->transfer_buffer_length) {
			xhci_warn(xhci, "URB transfer length is wrong, "
					"xHC issue? req. len = %u, "
					"act. len = %u\n",
					urb->transfer_buffer_length,
					urb->actual_length);
			urb->actual_length = 0;
			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
				*status = -EREMOTEIO;
			else
				*status = 0;
		}
2087
		list_del_init(&td->td_list);
2088 2089
		/* Was this TD slated to be cancelled but completed anyway? */
		if (!list_empty(&td->cancelled_td_list))
2090
			list_del_init(&td->cancelled_td_list);
2091

2092 2093
		urb_priv->td_cnt++;
		/* Giveback the urb when all the tds are completed */
A
Andiry Xu 已提交
2094
		if (urb_priv->td_cnt == urb_priv->length) {
2095
			ret = 1;
A
Andiry Xu 已提交
2096 2097 2098 2099 2100 2101 2102 2103 2104
			if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
				xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
				if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
					== 0) {
					if (xhci->quirks & XHCI_AMD_PLL_FIX)
						usb_amd_quirk_pll_enable();
				}
			}
		}
2105 2106 2107 2108 2109
	}

	return ret;
}

2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
/*
 * Process control tds, update urb status and actual_length.
 */
static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	unsigned int slot_id;
	int ep_index;
	struct xhci_ep_ctx *ep_ctx;
	u32 trb_comp_code;

M
Matt Evans 已提交
2124
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2125
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
2126 2127
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2128
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
2129
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150

	switch (trb_comp_code) {
	case COMP_SUCCESS:
		if (event_trb == ep_ring->dequeue) {
			xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
					"without IOC set??\n");
			*status = -ESHUTDOWN;
		} else if (event_trb != td->last_trb) {
			xhci_warn(xhci, "WARN: Success on ctrl data TRB "
					"without IOC set??\n");
			*status = -ESHUTDOWN;
		} else {
			*status = 0;
		}
		break;
	case COMP_SHORT_TX:
		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
			*status = -EREMOTEIO;
		else
			*status = 0;
		break;
2151 2152 2153
	case COMP_STOP_INVAL:
	case COMP_STOP:
		return finish_td(xhci, td, event_trb, event, ep, status, false);
2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166
	default:
		if (!xhci_requires_manual_halt_cleanup(xhci,
					ep_ctx, trb_comp_code))
			break;
		xhci_dbg(xhci, "TRB error code %u, "
				"halted endpoint index = %u\n",
				trb_comp_code, ep_index);
		/* else fall through */
	case COMP_STALL:
		/* Did we transfer part of the data (middle) phase? */
		if (event_trb != ep_ring->dequeue &&
				event_trb != td->last_trb)
			td->urb->actual_length =
2167 2168
				td->urb->transfer_buffer_length -
				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197
		else
			td->urb->actual_length = 0;

		xhci_cleanup_halted_endpoint(xhci,
			slot_id, ep_index, 0, td, event_trb);
		return finish_td(xhci, td, event_trb, event, ep, status, true);
	}
	/*
	 * Did we transfer any data, despite the errors that might have
	 * happened?  I.e. did we get past the setup stage?
	 */
	if (event_trb != ep_ring->dequeue) {
		/* The event was for the status stage */
		if (event_trb == td->last_trb) {
			if (td->urb->actual_length != 0) {
				/* Don't overwrite a previously set error code
				 */
				if ((*status == -EINPROGRESS || *status == 0) &&
						(td->urb->transfer_flags
						 & URB_SHORT_NOT_OK))
					/* Did we already see a short data
					 * stage? */
					*status = -EREMOTEIO;
			} else {
				td->urb->actual_length =
					td->urb->transfer_buffer_length;
			}
		} else {
		/* Maybe the event was for the data stage? */
2198 2199
			td->urb->actual_length =
				td->urb->transfer_buffer_length -
2200
				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2201 2202 2203
			xhci_dbg(xhci, "Waiting for status "
					"stage event\n");
			return 0;
2204 2205 2206 2207 2208 2209
		}
	}

	return finish_td(xhci, td, event_trb, event, ep, status, false);
}

2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
/*
 * Process isochronous tds, update urb packet status and actual_length.
 */
static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	int idx;
	int len = 0;
	union xhci_trb *cur_trb;
	struct xhci_segment *cur_seg;
2223
	struct usb_iso_packet_descriptor *frame;
2224
	u32 trb_comp_code;
2225
	bool skip_td = false;
2226

M
Matt Evans 已提交
2227 2228
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2229 2230
	urb_priv = td->urb->hcpriv;
	idx = urb_priv->td_cnt;
2231
	frame = &td->urb->iso_frame_desc[idx];
2232

2233 2234 2235
	/* handle completion code */
	switch (trb_comp_code) {
	case COMP_SUCCESS:
2236
		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
2237 2238 2239 2240 2241
			frame->status = 0;
			break;
		}
		if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
			trb_comp_code = COMP_SHORT_TX;
2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254
	case COMP_SHORT_TX:
		frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
				-EREMOTEIO : 0;
		break;
	case COMP_BW_OVER:
		frame->status = -ECOMM;
		skip_td = true;
		break;
	case COMP_BUFF_OVER:
	case COMP_BABBLE:
		frame->status = -EOVERFLOW;
		skip_td = true;
		break;
A
Alex He 已提交
2255
	case COMP_DEV_ERR:
2256
	case COMP_STALL:
2257
	case COMP_TX_ERR:
2258 2259 2260 2261 2262 2263 2264 2265 2266
		frame->status = -EPROTO;
		skip_td = true;
		break;
	case COMP_STOP:
	case COMP_STOP_INVAL:
		break;
	default:
		frame->status = -1;
		break;
2267 2268
	}

2269 2270 2271
	if (trb_comp_code == COMP_SUCCESS || skip_td) {
		frame->actual_length = frame->length;
		td->urb->actual_length += frame->length;
2272 2273 2274 2275
	} else {
		for (cur_trb = ep_ring->dequeue,
		     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
		     next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2276 2277
			if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
			    !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
M
Matt Evans 已提交
2278
				len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2279
		}
M
Matt Evans 已提交
2280
		len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2281
			EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2282 2283

		if (trb_comp_code != COMP_STOP_INVAL) {
2284
			frame->actual_length = len;
2285 2286 2287 2288 2289 2290 2291
			td->urb->actual_length += len;
		}
	}

	return finish_td(xhci, td, event_trb, event, ep, status, false);
}

2292 2293 2294 2295 2296 2297 2298 2299 2300
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
			struct xhci_transfer_event *event,
			struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct usb_iso_packet_descriptor *frame;
	int idx;

2301
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2302 2303 2304 2305
	urb_priv = td->urb->hcpriv;
	idx = urb_priv->td_cnt;
	frame = &td->urb->iso_frame_desc[idx];

2306
	/* The transfer is partly done. */
2307 2308 2309 2310 2311 2312 2313
	frame->status = -EXDEV;

	/* calc actual length */
	frame->actual_length = 0;

	/* Update ring dequeue pointer */
	while (ep_ring->dequeue != td->last_trb)
A
Andiry Xu 已提交
2314 2315
		inc_deq(xhci, ep_ring);
	inc_deq(xhci, ep_ring);
2316 2317 2318 2319

	return finish_td(xhci, td, NULL, event, ep, status, true);
}

2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331
/*
 * Process bulk and interrupt tds, update urb status and actual_length.
 */
static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
	union xhci_trb *event_trb, struct xhci_transfer_event *event,
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	union xhci_trb *cur_trb;
	struct xhci_segment *cur_seg;
	u32 trb_comp_code;

M
Matt Evans 已提交
2332 2333
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2334 2335 2336 2337

	switch (trb_comp_code) {
	case COMP_SUCCESS:
		/* Double check that the HW transferred everything. */
2338
		if (event_trb != td->last_trb ||
2339
		    EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2340 2341 2342 2343 2344 2345
			xhci_warn(xhci, "WARN Successful completion "
					"on short TX\n");
			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
				*status = -EREMOTEIO;
			else
				*status = 0;
2346 2347
			if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
				trb_comp_code = COMP_SHORT_TX;
2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361
		} else {
			*status = 0;
		}
		break;
	case COMP_SHORT_TX:
		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
			*status = -EREMOTEIO;
		else
			*status = 0;
		break;
	default:
		/* Others already handled above */
		break;
	}
2362 2363 2364 2365 2366
	if (trb_comp_code == COMP_SHORT_TX)
		xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
				"%d bytes untransferred\n",
				td->urb->ep->desc.bEndpointAddress,
				td->urb->transfer_buffer_length,
2367
				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2368 2369
	/* Fast path - was this the last TRB in the TD for this URB? */
	if (event_trb == td->last_trb) {
2370
		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2371 2372
			td->urb->actual_length =
				td->urb->transfer_buffer_length -
2373
				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2374 2375 2376 2377
			if (td->urb->transfer_buffer_length <
					td->urb->actual_length) {
				xhci_warn(xhci, "HC gave bad length "
						"of %d bytes left\n",
2378
					  EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
				td->urb->actual_length = 0;
				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
					*status = -EREMOTEIO;
				else
					*status = 0;
			}
			/* Don't overwrite a previously set error code */
			if (*status == -EINPROGRESS) {
				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
					*status = -EREMOTEIO;
				else
					*status = 0;
			}
		} else {
			td->urb->actual_length =
				td->urb->transfer_buffer_length;
			/* Ignore a short packet completion if the
			 * untransferred length was zero.
			 */
			if (*status == -EREMOTEIO)
				*status = 0;
		}
	} else {
		/* Slow path - walk the list, starting from the dequeue
		 * pointer, to get the actual length transferred.
		 */
		td->urb->actual_length = 0;
		for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
				cur_trb != event_trb;
				next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2409 2410
			if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
			    !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2411
				td->urb->actual_length +=
M
Matt Evans 已提交
2412
					TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2413 2414 2415 2416 2417 2418
		}
		/* If the ring didn't stop on a Link or No-op TRB, add
		 * in the actual bytes transferred from the Normal TRB
		 */
		if (trb_comp_code != COMP_STOP_INVAL)
			td->urb->actual_length +=
M
Matt Evans 已提交
2419
				TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2420
				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2421 2422 2423 2424 2425
	}

	return finish_td(xhci, td, event_trb, event, ep, status, false);
}

2426 2427 2428 2429 2430 2431 2432
/*
 * If this function returns an error condition, it means it got a Transfer
 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
 * At this point, the host controller is probably hosed and should be reset.
 */
static int handle_tx_event(struct xhci_hcd *xhci,
		struct xhci_transfer_event *event)
F
Felipe Balbi 已提交
2433 2434
	__releases(&xhci->lock)
	__acquires(&xhci->lock)
2435 2436
{
	struct xhci_virt_device *xdev;
2437
	struct xhci_virt_ep *ep;
2438
	struct xhci_ring *ep_ring;
2439
	unsigned int slot_id;
2440
	int ep_index;
2441
	struct xhci_td *td = NULL;
2442 2443 2444
	dma_addr_t event_dma;
	struct xhci_segment *event_seg;
	union xhci_trb *event_trb;
2445
	struct urb *urb = NULL;
2446
	int status = -EINPROGRESS;
2447
	struct urb_priv *urb_priv;
2448
	struct xhci_ep_ctx *ep_ctx;
2449
	struct list_head *tmp;
2450
	u32 trb_comp_code;
2451
	int ret = 0;
2452
	int td_num = 0;
2453

M
Matt Evans 已提交
2454
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2455
	xdev = xhci->devs[slot_id];
2456 2457
	if (!xdev) {
		xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
2458
		xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2459 2460
			 (unsigned long long) xhci_trb_virt_to_dma(
				 xhci->event_ring->deq_seg,
2461 2462 2463 2464 2465 2466 2467
				 xhci->event_ring->dequeue),
			 lower_32_bits(le64_to_cpu(event->buffer)),
			 upper_32_bits(le64_to_cpu(event->buffer)),
			 le32_to_cpu(event->transfer_len),
			 le32_to_cpu(event->flags));
		xhci_dbg(xhci, "Event ring:\n");
		xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2468 2469 2470 2471
		return -ENODEV;
	}

	/* Endpoint ID is 1 based, our index is zero based */
M
Matt Evans 已提交
2472
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2473
	ep = &xdev->eps[ep_index];
M
Matt Evans 已提交
2474
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2475
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2476
	if (!ep_ring ||
M
Matt Evans 已提交
2477 2478
	    (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
	    EP_STATE_DISABLED) {
2479 2480
		xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
				"or incorrect stream ring\n");
2481
		xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2482 2483
			 (unsigned long long) xhci_trb_virt_to_dma(
				 xhci->event_ring->deq_seg,
2484 2485 2486 2487 2488 2489 2490
				 xhci->event_ring->dequeue),
			 lower_32_bits(le64_to_cpu(event->buffer)),
			 upper_32_bits(le64_to_cpu(event->buffer)),
			 le32_to_cpu(event->transfer_len),
			 le32_to_cpu(event->flags));
		xhci_dbg(xhci, "Event ring:\n");
		xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2491 2492 2493
		return -ENODEV;
	}

2494 2495 2496 2497 2498 2499
	/* Count current td numbers if ep->skip is set */
	if (ep->skip) {
		list_for_each(tmp, &ep_ring->td_list)
			td_num++;
	}

M
Matt Evans 已提交
2500 2501
	event_dma = le64_to_cpu(event->buffer);
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2502
	/* Look for common error cases */
2503
	switch (trb_comp_code) {
S
Sarah Sharp 已提交
2504 2505 2506 2507
	/* Skip codes that require special handling depending on
	 * transfer type
	 */
	case COMP_SUCCESS:
2508
		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2509 2510 2511 2512
			break;
		if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
			trb_comp_code = COMP_SHORT_TX;
		else
2513 2514
			xhci_warn_ratelimited(xhci,
					"WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
S
Sarah Sharp 已提交
2515 2516
	case COMP_SHORT_TX:
		break;
2517 2518 2519 2520 2521 2522
	case COMP_STOP:
		xhci_dbg(xhci, "Stopped on Transfer TRB\n");
		break;
	case COMP_STOP_INVAL:
		xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
		break;
S
Sarah Sharp 已提交
2523
	case COMP_STALL:
2524
		xhci_dbg(xhci, "Stalled endpoint\n");
2525
		ep->ep_state |= EP_HALTED;
S
Sarah Sharp 已提交
2526 2527 2528 2529 2530 2531
		status = -EPIPE;
		break;
	case COMP_TRB_ERR:
		xhci_warn(xhci, "WARN: TRB error on endpoint\n");
		status = -EILSEQ;
		break;
2532
	case COMP_SPLIT_ERR:
S
Sarah Sharp 已提交
2533
	case COMP_TX_ERR:
2534
		xhci_dbg(xhci, "Transfer error on endpoint\n");
S
Sarah Sharp 已提交
2535 2536
		status = -EPROTO;
		break;
2537
	case COMP_BABBLE:
2538
		xhci_dbg(xhci, "Babble error on endpoint\n");
2539 2540
		status = -EOVERFLOW;
		break;
S
Sarah Sharp 已提交
2541 2542 2543 2544
	case COMP_DB_ERR:
		xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
		status = -ENOSR;
		break;
2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560
	case COMP_BW_OVER:
		xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
		break;
	case COMP_BUFF_OVER:
		xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
		break;
	case COMP_UNDERRUN:
		/*
		 * When the Isoch ring is empty, the xHC will generate
		 * a Ring Overrun Event for IN Isoch endpoint or Ring
		 * Underrun Event for OUT Isoch endpoint.
		 */
		xhci_dbg(xhci, "underrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2561 2562
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2563 2564 2565 2566 2567 2568
		goto cleanup;
	case COMP_OVERRUN:
		xhci_dbg(xhci, "overrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2569 2570
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2571
		goto cleanup;
A
Alex He 已提交
2572 2573 2574 2575
	case COMP_DEV_ERR:
		xhci_warn(xhci, "WARN: detect an incompatible device");
		status = -EPROTO;
		break;
2576 2577 2578 2579 2580 2581 2582 2583 2584 2585
	case COMP_MISSED_INT:
		/*
		 * When encounter missed service error, one or more isoc tds
		 * may be missed by xHC.
		 * Set skip flag of the ep_ring; Complete the missed tds as
		 * short transfer when process the ep_ring next time.
		 */
		ep->skip = true;
		xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
		goto cleanup;
S
Sarah Sharp 已提交
2586
	default:
2587
		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2588 2589 2590
			status = 0;
			break;
		}
2591 2592 2593 2594 2595
		xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
				"busted\n");
		goto cleanup;
	}

2596 2597 2598 2599 2600
	do {
		/* This TRB should be in the TD at the head of this ring's
		 * TD list.
		 */
		if (list_empty(&ep_ring->td_list)) {
2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615
			/*
			 * A stopped endpoint may generate an extra completion
			 * event if the device was suspended.  Don't print
			 * warnings.
			 */
			if (!(trb_comp_code == COMP_STOP ||
						trb_comp_code == COMP_STOP_INVAL)) {
				xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
						TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
						ep_index);
				xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
						(le32_to_cpu(event->flags) &
						 TRB_TYPE_BITMASK)>>10);
				xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
			}
2616 2617 2618 2619 2620 2621 2622 2623
			if (ep->skip) {
				ep->skip = false;
				xhci_dbg(xhci, "td_list is empty while skip "
						"flag set. Clear skip flag.\n");
			}
			ret = 0;
			goto cleanup;
		}
2624

2625 2626 2627 2628 2629 2630 2631 2632 2633
		/* We've skipped all the TDs on the ep ring when ep->skip set */
		if (ep->skip && td_num == 0) {
			ep->skip = false;
			xhci_dbg(xhci, "All tds on the ep_ring skipped. "
						"Clear skip flag.\n");
			ret = 0;
			goto cleanup;
		}

2634
		td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2635 2636
		if (ep->skip)
			td_num--;
2637

2638 2639 2640
		/* Is this a TRB in the currently executing TD? */
		event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
				td->last_trb, event_dma);
A
Alex He 已提交
2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654

		/*
		 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
		 * is not in the current TD pointed by ep_ring->dequeue because
		 * that the hardware dequeue pointer still at the previous TRB
		 * of the current TD. The previous TRB maybe a Link TD or the
		 * last TRB of the previous TD. The command completion handle
		 * will take care the rest.
		 */
		if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
			ret = 0;
			goto cleanup;
		}

2655 2656 2657
		if (!event_seg) {
			if (!ep->skip ||
			    !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2658 2659 2660 2661 2662 2663 2664 2665 2666 2667
				/* Some host controllers give a spurious
				 * successful event after a short transfer.
				 * Ignore it.
				 */
				if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 
						ep_ring->last_td_was_short) {
					ep_ring->last_td_was_short = false;
					ret = 0;
					goto cleanup;
				}
2668 2669 2670 2671 2672 2673 2674 2675 2676 2677
				/* HC is busted, give up! */
				xhci_err(xhci,
					"ERROR Transfer event TRB DMA ptr not "
					"part of current TD\n");
				return -ESHUTDOWN;
			}

			ret = skip_isoc_td(xhci, td, event, ep, &status);
			goto cleanup;
		}
2678 2679 2680 2681
		if (trb_comp_code == COMP_SHORT_TX)
			ep_ring->last_td_was_short = true;
		else
			ep_ring->last_td_was_short = false;
2682 2683

		if (ep->skip) {
2684 2685 2686
			xhci_dbg(xhci, "Found td. Clear skip flag.\n");
			ep->skip = false;
		}
2687

2688 2689 2690 2691 2692 2693 2694 2695
		event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
						sizeof(*event_trb)];
		/*
		 * No-op TRB should not trigger interrupts.
		 * If event_trb is a no-op TRB, it means the
		 * corresponding TD has been cancelled. Just ignore
		 * the TD.
		 */
2696
		if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2697 2698 2699
			xhci_dbg(xhci,
				 "event_trb is a no-op TRB. Skip it\n");
			goto cleanup;
2700
		}
2701

2702 2703
		/* Now update the urb's actual_length and give back to
		 * the core
2704
		 */
2705 2706 2707
		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
			ret = process_ctrl_td(xhci, td, event_trb, event, ep,
						 &status);
2708 2709 2710
		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
			ret = process_isoc_td(xhci, td, event_trb, event, ep,
						 &status);
2711 2712 2713 2714 2715 2716 2717 2718 2719 2720
		else
			ret = process_bulk_intr_td(xhci, td, event_trb, event,
						 ep, &status);

cleanup:
		/*
		 * Do not update event ring dequeue pointer if ep->skip is set.
		 * Will roll back to continue process missed tds.
		 */
		if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
A
Andiry Xu 已提交
2721
			inc_deq(xhci, xhci->event_ring);
2722 2723 2724 2725
		}

		if (ret) {
			urb = td->urb;
2726
			urb_priv = urb->hcpriv;
2727 2728 2729 2730 2731 2732 2733 2734
			/* Leave the TD around for the reset endpoint function
			 * to use(but only if it's not a control endpoint,
			 * since we already queued the Set TR dequeue pointer
			 * command for stalled control endpoints).
			 */
			if (usb_endpoint_xfer_control(&urb->ep->desc) ||
				(trb_comp_code != COMP_STALL &&
					trb_comp_code != COMP_BABBLE))
2735
				xhci_urb_free_priv(xhci, urb_priv);
2736 2737
			else
				kfree(urb_priv);
2738

2739
			usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2740 2741 2742
			if ((urb->actual_length != urb->transfer_buffer_length &&
						(urb->transfer_flags &
						 URB_SHORT_NOT_OK)) ||
2743 2744
					(status != 0 &&
					 !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2745
				xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2746
						"expected = %d, status = %d\n",
2747 2748 2749
						urb, urb->actual_length,
						urb->transfer_buffer_length,
						status);
2750
			spin_unlock(&xhci->lock);
2751 2752 2753 2754 2755
			/* EHCI, UHCI, and OHCI always unconditionally set the
			 * urb->status of an isochronous endpoint to 0.
			 */
			if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
				status = 0;
2756
			usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767
			spin_lock(&xhci->lock);
		}

	/*
	 * If ep->skip is set, it means there are missed tds on the
	 * endpoint ring need to take care of.
	 * Process them as short transfer until reach the td pointed by
	 * the event.
	 */
	} while (ep->skip && trb_comp_code != COMP_MISSED_INT);

2768 2769 2770
	return 0;
}

S
Sarah Sharp 已提交
2771 2772 2773
/*
 * This function handles all OS-owned events on the event ring.  It may drop
 * xhci->lock between event processing (e.g. to pass up port status changes).
2774 2775
 * Returns >0 for "possibly more events to process" (caller should call again),
 * otherwise 0 if done.  In future, <0 returns should indicate error code.
S
Sarah Sharp 已提交
2776
 */
2777
static int xhci_handle_event(struct xhci_hcd *xhci)
2778 2779
{
	union xhci_trb *event;
S
Sarah Sharp 已提交
2780
	int update_ptrs = 1;
2781
	int ret;
2782 2783 2784

	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
		xhci->error_bitmask |= 1 << 1;
2785
		return 0;
2786 2787 2788 2789
	}

	event = xhci->event_ring->dequeue;
	/* Does the HC or OS own the TRB? */
M
Matt Evans 已提交
2790 2791
	if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
	    xhci->event_ring->cycle_state) {
2792
		xhci->error_bitmask |= 1 << 2;
2793
		return 0;
2794 2795
	}

2796 2797 2798 2799 2800
	/*
	 * Barrier between reading the TRB_CYCLE (valid) flag above and any
	 * speculative reads of the event's flags/data below.
	 */
	rmb();
S
Sarah Sharp 已提交
2801
	/* FIXME: Handle more event types. */
M
Matt Evans 已提交
2802
	switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2803 2804 2805
	case TRB_TYPE(TRB_COMPLETION):
		handle_cmd_completion(xhci, &event->event_cmd);
		break;
S
Sarah Sharp 已提交
2806 2807 2808 2809
	case TRB_TYPE(TRB_PORT_STATUS):
		handle_port_status(xhci, event);
		update_ptrs = 0;
		break;
2810 2811 2812 2813 2814 2815 2816
	case TRB_TYPE(TRB_TRANSFER):
		ret = handle_tx_event(xhci, &event->trans_event);
		if (ret < 0)
			xhci->error_bitmask |= 1 << 9;
		else
			update_ptrs = 0;
		break;
2817 2818 2819
	case TRB_TYPE(TRB_DEV_NOTE):
		handle_device_notification(xhci, event);
		break;
2820
	default:
M
Matt Evans 已提交
2821 2822
		if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
		    TRB_TYPE(48))
2823 2824 2825
			handle_vendor_event(xhci, event);
		else
			xhci->error_bitmask |= 1 << 3;
2826
	}
2827 2828 2829 2830 2831 2832
	/* Any of the above functions may drop and re-acquire the lock, so check
	 * to make sure a watchdog timer didn't mark the host as non-responsive.
	 */
	if (xhci->xhc_state & XHCI_STATE_DYING) {
		xhci_dbg(xhci, "xHCI host dying, returning from "
				"event handler.\n");
2833
		return 0;
2834
	}
2835

2836 2837
	if (update_ptrs)
		/* Update SW event ring dequeue pointer */
A
Andiry Xu 已提交
2838
		inc_deq(xhci, xhci->event_ring);
2839

2840 2841 2842 2843
	/* Are there more items on the event ring?  Caller will call us again to
	 * check.
	 */
	return 1;
2844
}
2845 2846 2847 2848 2849 2850 2851 2852 2853

/*
 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
 * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
 * indicators of an event TRB error, but we check the status *first* to be safe.
 */
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2854
	u32 status;
2855
	u64 temp_64;
2856 2857
	union xhci_trb *event_ring_deq;
	dma_addr_t deq;
2858 2859 2860

	spin_lock(&xhci->lock);
	/* Check if the xHC generated the interrupt, or the irq is shared */
2861
	status = readl(&xhci->op_regs->status);
2862
	if (status == 0xffffffff)
2863 2864
		goto hw_died;

2865
	if (!(status & STS_EINT)) {
2866 2867 2868
		spin_unlock(&xhci->lock);
		return IRQ_NONE;
	}
2869
	if (status & STS_FATAL) {
2870 2871 2872 2873 2874 2875 2876
		xhci_warn(xhci, "WARNING: Host System Error\n");
		xhci_halt(xhci);
hw_died:
		spin_unlock(&xhci->lock);
		return -ESHUTDOWN;
	}

2877 2878 2879 2880 2881
	/*
	 * Clear the op reg interrupt status first,
	 * so we can receive interrupts from other MSI-X interrupters.
	 * Write 1 to clear the interrupt status.
	 */
2882
	status |= STS_EINT;
2883
	writel(status, &xhci->op_regs->status);
2884 2885 2886
	/* FIXME when MSI-X is supported and there are multiple vectors */
	/* Clear the MSI-X event interrupt status */

2887
	if (hcd->irq) {
2888 2889
		u32 irq_pending;
		/* Acknowledge the PCI interrupt */
2890
		irq_pending = readl(&xhci->ir_set->irq_pending);
2891
		irq_pending |= IMAN_IP;
2892
		writel(irq_pending, &xhci->ir_set->irq_pending);
2893
	}
2894

2895
	if (xhci->xhc_state & XHCI_STATE_DYING) {
2896 2897
		xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
				"Shouldn't IRQs be disabled?\n");
2898 2899
		/* Clear the event handler busy flag (RW1C);
		 * the event ring should be empty.
2900
		 */
2901
		temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2902 2903
		xhci_write_64(xhci, temp_64 | ERST_EHB,
				&xhci->ir_set->erst_dequeue);
2904 2905 2906 2907 2908 2909 2910 2911 2912
		spin_unlock(&xhci->lock);

		return IRQ_HANDLED;
	}

	event_ring_deq = xhci->event_ring->dequeue;
	/* FIXME this should be a delayed service routine
	 * that clears the EHB.
	 */
2913
	while (xhci_handle_event(xhci) > 0) {}
2914

2915
	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929
	/* If necessary, update the HW's version of the event ring deq ptr. */
	if (event_ring_deq != xhci->event_ring->dequeue) {
		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
				xhci->event_ring->dequeue);
		if (deq == 0)
			xhci_warn(xhci, "WARN something wrong with SW event "
					"ring dequeue ptr.\n");
		/* Update HC event ring dequeue pointer */
		temp_64 &= ERST_PTR_MASK;
		temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
	}

	/* Clear the event handler busy flag (RW1C); event ring is empty. */
	temp_64 |= ERST_EHB;
2930
	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2931

2932 2933 2934 2935 2936
	spin_unlock(&xhci->lock);

	return IRQ_HANDLED;
}

2937
irqreturn_t xhci_msi_irq(int irq, void *hcd)
2938
{
A
Alan Stern 已提交
2939
	return xhci_irq(hcd);
2940
}
2941

2942 2943
/****		Endpoint Ring Operations	****/

2944 2945 2946
/*
 * Generic function for queueing a TRB on a ring.
 * The caller must have checked to make sure there's room on the ring.
2947 2948 2949
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
2950 2951
 */
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
A
Andiry Xu 已提交
2952
		bool more_trbs_coming,
2953 2954 2955 2956 2957
		u32 field1, u32 field2, u32 field3, u32 field4)
{
	struct xhci_generic_trb *trb;

	trb = &ring->enqueue->generic;
M
Matt Evans 已提交
2958 2959 2960 2961
	trb->field[0] = cpu_to_le32(field1);
	trb->field[1] = cpu_to_le32(field2);
	trb->field[2] = cpu_to_le32(field3);
	trb->field[3] = cpu_to_le32(field4);
A
Andiry Xu 已提交
2962
	inc_enq(xhci, ring, more_trbs_coming);
2963 2964
}

2965 2966 2967 2968 2969
/*
 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
 * FIXME allocate segments if the ring is full.
 */
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
A
Andiry Xu 已提交
2970
		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2971
{
A
Andiry Xu 已提交
2972 2973
	unsigned int num_trbs_needed;

2974 2975 2976 2977 2978 2979 2980 2981 2982 2983
	/* Make sure the endpoint has been added to xHC schedule */
	switch (ep_state) {
	case EP_STATE_DISABLED:
		/*
		 * USB core changed config/interfaces without notifying us,
		 * or hardware is reporting the wrong state.
		 */
		xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
		return -ENOENT;
	case EP_STATE_ERROR:
2984
		xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2985 2986 2987
		/* FIXME event handling code for error needs to clear it */
		/* XXX not sure if this should be -ENOENT or not */
		return -EINVAL;
2988 2989
	case EP_STATE_HALTED:
		xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000
	case EP_STATE_STOPPED:
	case EP_STATE_RUNNING:
		break;
	default:
		xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
		/*
		 * FIXME issue Configure Endpoint command to try to get the HC
		 * back into a known state.
		 */
		return -EINVAL;
	}
A
Andiry Xu 已提交
3001 3002

	while (1) {
3003 3004
		if (room_on_ring(xhci, ep_ring, num_trbs))
			break;
A
Andiry Xu 已提交
3005 3006 3007 3008 3009 3010

		if (ep_ring == xhci->cmd_ring) {
			xhci_err(xhci, "Do not support expand command ring\n");
			return -ENOMEM;
		}

3011 3012
		xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
				"ERROR no room on ep ring, try ring expansion");
A
Andiry Xu 已提交
3013 3014 3015 3016 3017 3018
		num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
		if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
					mem_flags)) {
			xhci_err(xhci, "Ring expansion failed\n");
			return -ENOMEM;
		}
3019
	}
3020 3021 3022 3023 3024 3025 3026 3027

	if (enqueue_is_link_trb(ep_ring)) {
		struct xhci_ring *ring = ep_ring;
		union xhci_trb *next;

		next = ring->enqueue;

		while (last_trb(xhci, ring, ring->enq_seg, next)) {
3028 3029
			/* If we're not dealing with 0.95 hardware or isoc rings
			 * on AMD 0.96 host, clear the chain bit.
3030
			 */
A
Andiry Xu 已提交
3031 3032 3033
			if (!xhci_link_trb_quirk(xhci) &&
					!(ring->type == TYPE_ISOC &&
					 (xhci->quirks & XHCI_AMD_0x96_HOST)))
M
Matt Evans 已提交
3034
				next->link.control &= cpu_to_le32(~TRB_CHAIN);
3035
			else
M
Matt Evans 已提交
3036
				next->link.control |= cpu_to_le32(TRB_CHAIN);
3037 3038

			wmb();
3039
			next->link.control ^= cpu_to_le32(TRB_CYCLE);
3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050

			/* Toggle the cycle bit after the last ring segment. */
			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
				ring->cycle_state = (ring->cycle_state ? 0 : 1);
			}
			ring->enq_seg = ring->enq_seg->next;
			ring->enqueue = ring->enq_seg->trbs;
			next = ring->enqueue;
		}
	}

3051 3052 3053
	return 0;
}

3054
static int prepare_transfer(struct xhci_hcd *xhci,
3055 3056
		struct xhci_virt_device *xdev,
		unsigned int ep_index,
3057
		unsigned int stream_id,
3058 3059
		unsigned int num_trbs,
		struct urb *urb,
3060
		unsigned int td_index,
3061 3062 3063
		gfp_t mem_flags)
{
	int ret;
3064 3065
	struct urb_priv *urb_priv;
	struct xhci_td	*td;
3066
	struct xhci_ring *ep_ring;
3067
	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3068 3069 3070 3071 3072 3073 3074 3075 3076

	ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
	if (!ep_ring) {
		xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
				stream_id);
		return -EINVAL;
	}

	ret = prepare_ring(xhci, ep_ring,
M
Matt Evans 已提交
3077
			   le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
A
Andiry Xu 已提交
3078
			   num_trbs, mem_flags);
3079 3080 3081
	if (ret)
		return ret;

3082 3083 3084 3085 3086 3087 3088
	urb_priv = urb->hcpriv;
	td = urb_priv->td[td_index];

	INIT_LIST_HEAD(&td->td_list);
	INIT_LIST_HEAD(&td->cancelled_td_list);

	if (td_index == 0) {
3089
		ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
3090
		if (unlikely(ret))
3091
			return ret;
3092 3093
	}

3094
	td->urb = urb;
3095
	/* Add this TD to the tail of the endpoint ring's TD list */
3096 3097 3098 3099 3100
	list_add_tail(&td->td_list, &ep_ring->td_list);
	td->start_seg = ep_ring->enq_seg;
	td->first_trb = ep_ring->enqueue;

	urb_priv->td[td_index] = td;
3101 3102 3103 3104

	return 0;
}

3105
static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
3106 3107 3108 3109 3110
{
	int num_sgs, num_trbs, running_total, temp, i;
	struct scatterlist *sg;

	sg = NULL;
3111
	num_sgs = urb->num_mapped_sgs;
3112 3113 3114
	temp = urb->transfer_buffer_length;

	num_trbs = 0;
3115
	for_each_sg(urb->sg, sg, num_sgs, i) {
3116 3117 3118 3119
		unsigned int len = sg_dma_len(sg);

		/* Scatter gather list entries may cross 64KB boundaries */
		running_total = TRB_MAX_BUFF_SIZE -
3120
			(sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
3121
		running_total &= TRB_MAX_BUFF_SIZE - 1;
3122 3123 3124 3125
		if (running_total != 0)
			num_trbs++;

		/* How many more 64KB chunks to transfer, how many more TRBs? */
3126
		while (running_total < sg_dma_len(sg) && running_total < temp) {
3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137
			num_trbs++;
			running_total += TRB_MAX_BUFF_SIZE;
		}
		len = min_t(int, len, temp);
		temp -= len;
		if (temp == 0)
			break;
	}
	return num_trbs;
}

3138
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
3139 3140
{
	if (num_trbs != 0)
3141
		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
3142 3143 3144
				"TRBs, %d left\n", __func__,
				urb->ep->desc.bEndpointAddress, num_trbs);
	if (running_total != urb->transfer_buffer_length)
3145
		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
3146 3147 3148 3149 3150 3151 3152 3153
				"queued %#x (%d), asked for %#x (%d)\n",
				__func__,
				urb->ep->desc.bEndpointAddress,
				running_total, running_total,
				urb->transfer_buffer_length,
				urb->transfer_buffer_length);
}

3154
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3155
		unsigned int ep_index, unsigned int stream_id, int start_cycle,
3156
		struct xhci_generic_trb *start_trb)
3157 3158 3159 3160 3161 3162
{
	/*
	 * Pass all the TRBs to the hardware at once and make sure this write
	 * isn't reordered.
	 */
	wmb();
3163
	if (start_cycle)
M
Matt Evans 已提交
3164
		start_trb->field[3] |= cpu_to_le32(start_cycle);
3165
	else
M
Matt Evans 已提交
3166
		start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3167
	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3168 3169
}

3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183
/*
 * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
 * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
 * (comprised of sg list entries) can take several service intervals to
 * transmit.
 */
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
			xhci->devs[slot_id]->out_ctx, ep_index);
	int xhci_interval;
	int ep_interval;

M
Matt Evans 已提交
3184
	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3185 3186 3187 3188 3189 3190 3191 3192 3193
	ep_interval = urb->interval;
	/* Convert to microframes */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		ep_interval *= 8;
	/* FIXME change this to a warning and a suggestion to use the new API
	 * to set the polling interval (once the API is added).
	 */
	if (xhci_interval != ep_interval) {
3194 3195 3196 3197
		dev_dbg_ratelimited(&urb->dev->dev,
				"Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
				ep_interval, ep_interval == 1 ? "" : "s",
				xhci_interval, xhci_interval == 1 ? "" : "s");
3198 3199 3200 3201 3202 3203
		urb->interval = xhci_interval;
		/* Convert back to frames for LS/FS devices */
		if (urb->dev->speed == USB_SPEED_LOW ||
				urb->dev->speed == USB_SPEED_FULL)
			urb->interval /= 8;
	}
3204
	return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3205 3206
}

3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221
/*
 * The TD size is the number of bytes remaining in the TD (including this TRB),
 * right shifted by 10.
 * It must fit in bits 21:17, so it can't be bigger than 31.
 */
static u32 xhci_td_remainder(unsigned int remainder)
{
	u32 max = (1 << (21 - 17 + 1)) - 1;

	if ((remainder >> 10) >= max)
		return max << 17;
	else
		return (remainder >> 10) << 17;
}

3222
/*
3223 3224
 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
 * packets remaining in the TD (*not* including this TRB).
3225 3226
 *
 * Total TD packet count = total_packet_count =
3227
 *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
3228 3229 3230 3231 3232 3233 3234
 *
 * Packets transferred up to and including this TRB = packets_transferred =
 *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
 *
 * TD size = total_packet_count - packets_transferred
 *
 * It must fit in bits 21:17, so it can't be bigger than 31.
3235
 * The last TRB in a TD must have the TD size set to zero.
3236 3237
 */
static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
3238 3239
		unsigned int total_packet_count, struct urb *urb,
		unsigned int num_trbs_left)
3240 3241 3242
{
	int packets_transferred;

3243
	/* One TRB with a zero-length data packet. */
3244
	if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
3245 3246
		return 0;

3247 3248 3249 3250
	/* All the TRB queueing functions don't count the current TRB in
	 * running_total.
	 */
	packets_transferred = (running_total + trb_buff_len) /
3251
		GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3252

3253 3254 3255
	if ((total_packet_count - packets_transferred) > 31)
		return 31 << 17;
	return (total_packet_count - packets_transferred) << 17;
3256 3257
}

3258
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3259 3260 3261 3262
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	unsigned int num_trbs;
3263
	struct urb_priv *urb_priv;
3264 3265 3266 3267
	struct xhci_td *td;
	struct scatterlist *sg;
	int num_sgs;
	int trb_buff_len, this_sg_len, running_total;
3268
	unsigned int total_packet_count;
3269 3270
	bool first_trb;
	u64 addr;
3271
	bool more_trbs_coming;
3272 3273 3274 3275

	struct xhci_generic_trb *start_trb;
	int start_cycle;

3276 3277 3278 3279
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;

3280
	num_trbs = count_sg_trbs_needed(xhci, urb);
3281
	num_sgs = urb->num_mapped_sgs;
3282
	total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3283
			usb_endpoint_maxp(&urb->ep->desc));
3284

3285
	trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
3286
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
3287
			num_trbs, urb, 0, mem_flags);
3288 3289
	if (trb_buff_len < 0)
		return trb_buff_len;
3290 3291 3292 3293

	urb_priv = urb->hcpriv;
	td = urb_priv->td[0];

3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	running_total = 0;
	/*
	 * How much data is in the first TRB?
	 *
	 * There are three forces at work for TRB buffer pointers and lengths:
	 * 1. We don't want to walk off the end of this sg-list entry buffer.
	 * 2. The transfer length that the driver requested may be smaller than
	 *    the amount of memory allocated for this scatter-gather list.
	 * 3. TRBs buffers can't cross 64KB boundaries.
	 */
3312
	sg = urb->sg;
3313 3314
	addr = (u64) sg_dma_address(sg);
	this_sg_len = sg_dma_len(sg);
3315
	trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
3316 3317 3318 3319 3320 3321 3322 3323
	trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
	if (trb_buff_len > urb->transfer_buffer_length)
		trb_buff_len = urb->transfer_buffer_length;

	first_trb = true;
	/* Queue the first TRB, even if it's zero-length */
	do {
		u32 field = 0;
3324
		u32 length_field = 0;
3325
		u32 remainder = 0;
3326 3327

		/* Don't change the cycle bit of the first TRB until later */
3328
		if (first_trb) {
3329
			first_trb = false;
3330 3331 3332
			if (start_cycle == 0)
				field |= 0x1;
		} else
3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344
			field |= ep_ring->cycle_state;

		/* Chain all the TRBs together; clear the chain bit in the last
		 * TRB to indicate it's the last TRB in the chain.
		 */
		if (num_trbs > 1) {
			field |= TRB_CHAIN;
		} else {
			/* FIXME - add check for ZERO_PACKET flag before this */
			td->last_trb = ep_ring->enqueue;
			field |= TRB_IOC;
		}
3345 3346 3347 3348 3349

		/* Only set interrupt on short packet for IN endpoints */
		if (usb_urb_dir_in(urb))
			field |= TRB_ISP;

3350
		if (TRB_MAX_BUFF_SIZE -
3351
				(addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
3352 3353 3354 3355 3356
			xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
			xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
					(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
					(unsigned int) addr + trb_buff_len);
		}
3357 3358 3359 3360 3361 3362 3363 3364

		/* Set the TRB length, TD size, and interrupter fields. */
		if (xhci->hci_version < 0x100) {
			remainder = xhci_td_remainder(
					urb->transfer_buffer_length -
					running_total);
		} else {
			remainder = xhci_v1_0_td_remainder(running_total,
3365 3366
					trb_buff_len, total_packet_count, urb,
					num_trbs - 1);
3367
		}
3368
		length_field = TRB_LEN(trb_buff_len) |
3369
			remainder |
3370
			TRB_INTR_TARGET(0);
3371

3372 3373 3374 3375
		if (num_trbs > 1)
			more_trbs_coming = true;
		else
			more_trbs_coming = false;
A
Andiry Xu 已提交
3376
		queue_trb(xhci, ep_ring, more_trbs_coming,
3377 3378
				lower_32_bits(addr),
				upper_32_bits(addr),
3379
				length_field,
3380
				field | TRB_TYPE(TRB_NORMAL));
3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399
		--num_trbs;
		running_total += trb_buff_len;

		/* Calculate length for next transfer --
		 * Are we done queueing all the TRBs for this sg entry?
		 */
		this_sg_len -= trb_buff_len;
		if (this_sg_len == 0) {
			--num_sgs;
			if (num_sgs == 0)
				break;
			sg = sg_next(sg);
			addr = (u64) sg_dma_address(sg);
			this_sg_len = sg_dma_len(sg);
		} else {
			addr += trb_buff_len;
		}

		trb_buff_len = TRB_MAX_BUFF_SIZE -
3400
			(addr & (TRB_MAX_BUFF_SIZE - 1));
3401 3402 3403 3404 3405 3406 3407
		trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
		if (running_total + trb_buff_len > urb->transfer_buffer_length)
			trb_buff_len =
				urb->transfer_buffer_length - running_total;
	} while (running_total < urb->transfer_buffer_length);

	check_trb_math(urb, num_trbs, running_total);
3408
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3409
			start_cycle, start_trb);
3410 3411 3412
	return 0;
}

S
Sarah Sharp 已提交
3413
/* This is very similar to what ehci-q.c qtd_fill() does */
3414
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
S
Sarah Sharp 已提交
3415 3416 3417
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
3418
	struct urb_priv *urb_priv;
S
Sarah Sharp 已提交
3419 3420 3421 3422
	struct xhci_td *td;
	int num_trbs;
	struct xhci_generic_trb *start_trb;
	bool first_trb;
3423
	bool more_trbs_coming;
S
Sarah Sharp 已提交
3424
	int start_cycle;
3425
	u32 field, length_field;
S
Sarah Sharp 已提交
3426 3427

	int running_total, trb_buff_len, ret;
3428
	unsigned int total_packet_count;
S
Sarah Sharp 已提交
3429 3430
	u64 addr;

3431
	if (urb->num_sgs)
3432 3433
		return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);

3434 3435 3436
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;
S
Sarah Sharp 已提交
3437 3438 3439 3440

	num_trbs = 0;
	/* How much data is (potentially) left before the 64KB boundary? */
	running_total = TRB_MAX_BUFF_SIZE -
3441
		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3442
	running_total &= TRB_MAX_BUFF_SIZE - 1;
S
Sarah Sharp 已提交
3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455

	/* If there's some data on this 64KB chunk, or we have to send a
	 * zero-length transfer, we need at least one TRB
	 */
	if (running_total != 0 || urb->transfer_buffer_length == 0)
		num_trbs++;
	/* How many more 64KB chunks to transfer, how many more TRBs? */
	while (running_total < urb->transfer_buffer_length) {
		num_trbs++;
		running_total += TRB_MAX_BUFF_SIZE;
	}
	/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */

3456 3457
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
3458
			num_trbs, urb, 0, mem_flags);
S
Sarah Sharp 已提交
3459 3460 3461
	if (ret < 0)
		return ret;

3462 3463 3464
	urb_priv = urb->hcpriv;
	td = urb_priv->td[0];

S
Sarah Sharp 已提交
3465 3466 3467 3468 3469 3470 3471 3472 3473
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	running_total = 0;
3474
	total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3475
			usb_endpoint_maxp(&urb->ep->desc));
S
Sarah Sharp 已提交
3476 3477 3478
	/* How much data is in the first TRB? */
	addr = (u64) urb->transfer_dma;
	trb_buff_len = TRB_MAX_BUFF_SIZE -
3479 3480
		(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
	if (trb_buff_len > urb->transfer_buffer_length)
S
Sarah Sharp 已提交
3481 3482 3483 3484 3485 3486
		trb_buff_len = urb->transfer_buffer_length;

	first_trb = true;

	/* Queue the first TRB, even if it's zero-length */
	do {
3487
		u32 remainder = 0;
S
Sarah Sharp 已提交
3488 3489 3490
		field = 0;

		/* Don't change the cycle bit of the first TRB until later */
3491
		if (first_trb) {
S
Sarah Sharp 已提交
3492
			first_trb = false;
3493 3494 3495
			if (start_cycle == 0)
				field |= 0x1;
		} else
S
Sarah Sharp 已提交
3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507
			field |= ep_ring->cycle_state;

		/* Chain all the TRBs together; clear the chain bit in the last
		 * TRB to indicate it's the last TRB in the chain.
		 */
		if (num_trbs > 1) {
			field |= TRB_CHAIN;
		} else {
			/* FIXME - add check for ZERO_PACKET flag before this */
			td->last_trb = ep_ring->enqueue;
			field |= TRB_IOC;
		}
3508 3509 3510 3511 3512

		/* Only set interrupt on short packet for IN endpoints */
		if (usb_urb_dir_in(urb))
			field |= TRB_ISP;

3513 3514 3515 3516 3517 3518 3519
		/* Set the TRB length, TD size, and interrupter fields. */
		if (xhci->hci_version < 0x100) {
			remainder = xhci_td_remainder(
					urb->transfer_buffer_length -
					running_total);
		} else {
			remainder = xhci_v1_0_td_remainder(running_total,
3520 3521
					trb_buff_len, total_packet_count, urb,
					num_trbs - 1);
3522
		}
3523
		length_field = TRB_LEN(trb_buff_len) |
3524
			remainder |
3525
			TRB_INTR_TARGET(0);
3526

3527 3528 3529 3530
		if (num_trbs > 1)
			more_trbs_coming = true;
		else
			more_trbs_coming = false;
A
Andiry Xu 已提交
3531
		queue_trb(xhci, ep_ring, more_trbs_coming,
3532 3533
				lower_32_bits(addr),
				upper_32_bits(addr),
3534
				length_field,
3535
				field | TRB_TYPE(TRB_NORMAL));
S
Sarah Sharp 已提交
3536 3537 3538 3539 3540 3541 3542 3543 3544 3545
		--num_trbs;
		running_total += trb_buff_len;

		/* Calculate length for next transfer */
		addr += trb_buff_len;
		trb_buff_len = urb->transfer_buffer_length - running_total;
		if (trb_buff_len > TRB_MAX_BUFF_SIZE)
			trb_buff_len = TRB_MAX_BUFF_SIZE;
	} while (running_total < urb->transfer_buffer_length);

3546
	check_trb_math(urb, num_trbs, running_total);
3547
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3548
			start_cycle, start_trb);
S
Sarah Sharp 已提交
3549 3550 3551
	return 0;
}

3552
/* Caller must have locked xhci->lock */
3553
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3554 3555 3556 3557 3558 3559 3560 3561
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	int num_trbs;
	int ret;
	struct usb_ctrlrequest *setup;
	struct xhci_generic_trb *start_trb;
	int start_cycle;
3562
	u32 field, length_field;
3563
	struct urb_priv *urb_priv;
3564 3565
	struct xhci_td *td;

3566 3567 3568
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;
3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585

	/*
	 * Need to copy setup packet into setup TRB, so we can't use the setup
	 * DMA address.
	 */
	if (!urb->setup_packet)
		return -EINVAL;

	/* 1 TRB for setup, 1 for status */
	num_trbs = 2;
	/*
	 * Don't need to check if we need additional event data and normal TRBs,
	 * since data in control transfers will never get bigger than 16MB
	 * XXX: can we get a buffer that crosses 64KB boundaries?
	 */
	if (urb->transfer_buffer_length > 0)
		num_trbs++;
3586 3587
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
3588
			num_trbs, urb, 0, mem_flags);
3589 3590 3591
	if (ret < 0)
		return ret;

3592 3593 3594
	urb_priv = urb->hcpriv;
	td = urb_priv->td[0];

3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	/* Queue setup TRB - see section 6.4.1.2.1 */
	/* FIXME better way to translate setup_packet into two u32 fields? */
	setup = (struct usb_ctrlrequest *) urb->setup_packet;
3606 3607 3608 3609
	field = 0;
	field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
	if (start_cycle == 0)
		field |= 0x1;
3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620

	/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
	if (xhci->hci_version == 0x100) {
		if (urb->transfer_buffer_length > 0) {
			if (setup->bRequestType & USB_DIR_IN)
				field |= TRB_TX_TYPE(TRB_DATA_IN);
			else
				field |= TRB_TX_TYPE(TRB_DATA_OUT);
		}
	}

A
Andiry Xu 已提交
3621
	queue_trb(xhci, ep_ring, true,
M
Matt Evans 已提交
3622 3623 3624 3625 3626
		  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
		  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
		  TRB_LEN(8) | TRB_INTR_TARGET(0),
		  /* Immediate data in pointer */
		  field);
3627 3628

	/* If there's data, queue data TRBs */
3629 3630 3631 3632 3633 3634
	/* Only set interrupt on short packet for IN endpoints */
	if (usb_urb_dir_in(urb))
		field = TRB_ISP | TRB_TYPE(TRB_DATA);
	else
		field = TRB_TYPE(TRB_DATA);

3635
	length_field = TRB_LEN(urb->transfer_buffer_length) |
3636
		xhci_td_remainder(urb->transfer_buffer_length) |
3637
		TRB_INTR_TARGET(0);
3638 3639 3640
	if (urb->transfer_buffer_length > 0) {
		if (setup->bRequestType & USB_DIR_IN)
			field |= TRB_DIR_IN;
A
Andiry Xu 已提交
3641
		queue_trb(xhci, ep_ring, true,
3642 3643
				lower_32_bits(urb->transfer_dma),
				upper_32_bits(urb->transfer_dma),
3644
				length_field,
3645
				field | ep_ring->cycle_state);
3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656
	}

	/* Save the DMA address of the last TRB in the TD */
	td->last_trb = ep_ring->enqueue;

	/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
	/* If the device sent data, the status stage is an OUT transfer */
	if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
		field = 0;
	else
		field = TRB_DIR_IN;
A
Andiry Xu 已提交
3657
	queue_trb(xhci, ep_ring, false,
3658 3659 3660 3661 3662 3663
			0,
			0,
			TRB_INTR_TARGET(0),
			/* Event on completion */
			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);

3664
	giveback_first_trb(xhci, slot_id, ep_index, 0,
3665
			start_cycle, start_trb);
3666 3667 3668
	return 0;
}

3669 3670 3671 3672
static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
		struct urb *urb, int i)
{
	int num_trbs = 0;
3673
	u64 addr, td_len;
3674 3675 3676 3677

	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
	td_len = urb->iso_frame_desc[i].length;

3678 3679 3680
	num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
			TRB_MAX_BUFF_SIZE);
	if (num_trbs == 0)
3681 3682 3683 3684 3685
		num_trbs++;

	return num_trbs;
}

3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706
/*
 * The transfer burst count field of the isochronous TRB defines the number of
 * bursts that are required to move all packets in this TD.  Only SuperSpeed
 * devices can burst up to bMaxBurst number of packets per service interval.
 * This field is zero based, meaning a value of zero in the field means one
 * burst.  Basically, for everything but SuperSpeed devices, this field will be
 * zero.  Only xHCI 1.0 host controllers support this field.
 */
static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
		struct usb_device *udev,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;

	if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
		return 0;

	max_burst = urb->ep->ss_ep_comp.bMaxBurst;
	return roundup(total_packet_count, max_burst + 1) - 1;
}

3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742
/*
 * Returns the number of packets in the last "burst" of packets.  This field is
 * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
 * the last burst packet count is equal to the total number of packets in the
 * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
 * must contain (bMaxBurst + 1) number of packets, but the last burst can
 * contain 1 to (bMaxBurst + 1) packets.
 */
static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
		struct usb_device *udev,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;
	unsigned int residue;

	if (xhci->hci_version < 0x100)
		return 0;

	switch (udev->speed) {
	case USB_SPEED_SUPER:
		/* bMaxBurst is zero based: 0 means 1 packet per burst */
		max_burst = urb->ep->ss_ep_comp.bMaxBurst;
		residue = total_packet_count % (max_burst + 1);
		/* If residue is zero, the last burst contains (max_burst + 1)
		 * number of packets, but the TLBPC field is zero-based.
		 */
		if (residue == 0)
			return max_burst;
		return residue - 1;
	default:
		if (total_packet_count == 0)
			return 0;
		return total_packet_count - 1;
	}
}

3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757
/* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct xhci_td *td;
	int num_tds, trbs_per_td;
	struct xhci_generic_trb *start_trb;
	bool first_trb;
	int start_cycle;
	u32 field, length_field;
	int running_total, trb_buff_len, td_len, td_remain_len, ret;
	u64 start_addr, addr;
	int i, j;
A
Andiry Xu 已提交
3758
	bool more_trbs_coming;
3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771

	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;

	num_tds = urb->number_of_packets;
	if (num_tds < 1) {
		xhci_dbg(xhci, "Isoc URB with zero packets?\n");
		return -EINVAL;
	}

	start_addr = (u64) urb->transfer_dma;
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

3772
	urb_priv = urb->hcpriv;
3773 3774
	/* Queue the first TRB, even if it's zero-length */
	for (i = 0; i < num_tds; i++) {
3775
		unsigned int total_packet_count;
3776
		unsigned int burst_count;
3777
		unsigned int residue;
3778

3779
		first_trb = true;
3780 3781 3782 3783
		running_total = 0;
		addr = start_addr + urb->iso_frame_desc[i].offset;
		td_len = urb->iso_frame_desc[i].length;
		td_remain_len = td_len;
3784
		total_packet_count = DIV_ROUND_UP(td_len,
3785 3786
				GET_MAX_PACKET(
					usb_endpoint_maxp(&urb->ep->desc)));
3787 3788 3789
		/* A zero-length transfer still involves at least one packet. */
		if (total_packet_count == 0)
			total_packet_count++;
3790 3791
		burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
				total_packet_count);
3792 3793
		residue = xhci_get_last_burst_packet_count(xhci,
				urb->dev, urb, total_packet_count);
3794 3795 3796 3797

		trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);

		ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
A
Andiry Xu 已提交
3798
				urb->stream_id, trbs_per_td, urb, i, mem_flags);
3799 3800 3801 3802 3803
		if (ret < 0) {
			if (i == 0)
				return ret;
			goto cleanup;
		}
3804 3805 3806 3807

		td = urb_priv->td[i];
		for (j = 0; j < trbs_per_td; j++) {
			u32 remainder = 0;
S
Sarah Sharp 已提交
3808
			field = 0;
3809 3810

			if (first_trb) {
S
Sarah Sharp 已提交
3811 3812
				field = TRB_TBC(burst_count) |
					TRB_TLBPC(residue);
3813 3814 3815 3816
				/* Queue the isoc TRB */
				field |= TRB_TYPE(TRB_ISOC);
				/* Assume URB_ISO_ASAP is set */
				field |= TRB_SIA;
3817 3818 3819 3820
				if (i == 0) {
					if (start_cycle == 0)
						field |= 0x1;
				} else
3821 3822 3823 3824 3825 3826 3827 3828
					field |= ep_ring->cycle_state;
				first_trb = false;
			} else {
				/* Queue other normal TRBs */
				field |= TRB_TYPE(TRB_NORMAL);
				field |= ep_ring->cycle_state;
			}

3829 3830 3831 3832
			/* Only set interrupt on short packet for IN EPs */
			if (usb_urb_dir_in(urb))
				field |= TRB_ISP;

3833 3834 3835 3836 3837 3838
			/* Chain all the TRBs together; clear the chain bit in
			 * the last TRB to indicate it's the last TRB in the
			 * chain.
			 */
			if (j < trbs_per_td - 1) {
				field |= TRB_CHAIN;
A
Andiry Xu 已提交
3839
				more_trbs_coming = true;
3840 3841 3842
			} else {
				td->last_trb = ep_ring->enqueue;
				field |= TRB_IOC;
3843 3844 3845
				if (xhci->hci_version == 0x100 &&
						!(xhci->quirks &
							XHCI_AVOID_BEI)) {
3846 3847 3848 3849
					/* Set BEI bit except for the last td */
					if (i < num_tds - 1)
						field |= TRB_BEI;
				}
A
Andiry Xu 已提交
3850
				more_trbs_coming = false;
3851 3852 3853 3854 3855 3856 3857 3858
			}

			/* Calculate TRB length */
			trb_buff_len = TRB_MAX_BUFF_SIZE -
				(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
			if (trb_buff_len > td_remain_len)
				trb_buff_len = td_remain_len;

3859 3860 3861 3862 3863 3864 3865
			/* Set the TRB length, TD size, & interrupter fields. */
			if (xhci->hci_version < 0x100) {
				remainder = xhci_td_remainder(
						td_len - running_total);
			} else {
				remainder = xhci_v1_0_td_remainder(
						running_total, trb_buff_len,
3866 3867
						total_packet_count, urb,
						(trbs_per_td - j - 1));
3868
			}
3869 3870 3871
			length_field = TRB_LEN(trb_buff_len) |
				remainder |
				TRB_INTR_TARGET(0);
3872

A
Andiry Xu 已提交
3873
			queue_trb(xhci, ep_ring, more_trbs_coming,
3874 3875 3876
				lower_32_bits(addr),
				upper_32_bits(addr),
				length_field,
3877
				field);
3878 3879 3880 3881 3882 3883 3884 3885 3886
			running_total += trb_buff_len;

			addr += trb_buff_len;
			td_remain_len -= trb_buff_len;
		}

		/* Check TD length */
		if (running_total != td_len) {
			xhci_err(xhci, "ISOC TD length unmatch\n");
3887 3888
			ret = -EINVAL;
			goto cleanup;
3889 3890 3891
		}
	}

A
Andiry Xu 已提交
3892 3893 3894 3895 3896 3897
	if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
		if (xhci->quirks & XHCI_AMD_PLL_FIX)
			usb_amd_quirk_pll_disable();
	}
	xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;

3898 3899
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
			start_cycle, start_trb);
3900
	return 0;
3901 3902 3903 3904
cleanup:
	/* Clean up a partially enqueued isoc transfer. */

	for (i--; i >= 0; i--)
3905
		list_del_init(&urb_priv->td[i]->td_list);
3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919

	/* Use the first TD as a temporary variable to turn the TDs we've queued
	 * into No-ops with a software-owned cycle bit. That way the hardware
	 * won't accidentally start executing bogus TDs when we partially
	 * overwrite them.  td->first_trb and td->start_seg are already set.
	 */
	urb_priv->td[0]->last_trb = ep_ring->enqueue;
	/* Every TRB except the first & last will have its cycle bit flipped. */
	td_to_noop(xhci, ep_ring, urb_priv->td[0], true);

	/* Reset the ring enqueue back to the first TRB and its cycle bit. */
	ep_ring->enqueue = urb_priv->td[0]->first_trb;
	ep_ring->enq_seg = urb_priv->td[0]->start_seg;
	ep_ring->cycle_state = start_cycle;
3920
	ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3921 3922
	usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
	return ret;
3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955
}

/*
 * Check transfer ring to guarantee there is enough room for the urb.
 * Update ISO URB start_frame and interval.
 * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
 * update the urb->start_frame by now.
 * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
 */
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	struct xhci_ep_ctx *ep_ctx;
	int start_frame;
	int xhci_interval;
	int ep_interval;
	int num_tds, num_trbs, i;
	int ret;

	xdev = xhci->devs[slot_id];
	ep_ring = xdev->eps[ep_index].ring;
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);

	num_trbs = 0;
	num_tds = urb->number_of_packets;
	for (i = 0; i < num_tds; i++)
		num_trbs += count_isoc_trbs_needed(xhci, urb, i);

	/* Check the ring to guarantee there is enough room for the whole urb.
	 * Do not insert any td of the urb to the ring if the check failed.
	 */
M
Matt Evans 已提交
3956
	ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
A
Andiry Xu 已提交
3957
			   num_trbs, mem_flags);
3958 3959 3960
	if (ret)
		return ret;

3961
	start_frame = readl(&xhci->run_regs->microframe_index);
3962 3963 3964 3965 3966 3967 3968
	start_frame &= 0x3fff;

	urb->start_frame = start_frame;
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		urb->start_frame >>= 3;

M
Matt Evans 已提交
3969
	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3970 3971 3972 3973 3974 3975 3976 3977 3978
	ep_interval = urb->interval;
	/* Convert to microframes */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		ep_interval *= 8;
	/* FIXME change this to a warning and a suggestion to use the new API
	 * to set the polling interval (once the API is added).
	 */
	if (xhci_interval != ep_interval) {
3979 3980 3981 3982
		dev_dbg_ratelimited(&urb->dev->dev,
				"Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
				ep_interval, ep_interval == 1 ? "" : "s",
				xhci_interval, xhci_interval == 1 ? "" : "s");
3983 3984 3985 3986 3987 3988
		urb->interval = xhci_interval;
		/* Convert back to frames for LS/FS devices */
		if (urb->dev->speed == USB_SPEED_LOW ||
				urb->dev->speed == USB_SPEED_FULL)
			urb->interval /= 8;
	}
3989 3990
	ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;

3991
	return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
3992 3993
}

3994 3995
/****		Command Ring Operations		****/

3996 3997 3998 3999 4000 4001 4002 4003 4004 4005
/* Generic function for queueing a command TRB on the command ring.
 * Check to make sure there's room on the command ring for one command TRB.
 * Also check that there's room reserved for commands that must not fail.
 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
 * then only check for the number of reserved spots.
 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
 * because the command event handler may want to resubmit a failed command.
 */
static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
		u32 field3, u32 field4, bool command_must_succeed)
4006
{
4007
	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4008 4009
	int ret;

4010 4011 4012
	if (!command_must_succeed)
		reserved_trbs++;

4013
	ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
A
Andiry Xu 已提交
4014
			reserved_trbs, GFP_ATOMIC);
4015 4016
	if (ret < 0) {
		xhci_err(xhci, "ERR: No room for command on command ring\n");
4017 4018 4019
		if (command_must_succeed)
			xhci_err(xhci, "ERR: Reserved TRB counting for "
					"unfailable commands failed.\n");
4020
		return ret;
4021
	}
A
Andiry Xu 已提交
4022 4023
	queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
			field4 | xhci->cmd_ring->cycle_state);
4024 4025 4026
	return 0;
}

4027
/* Queue a slot enable or disable request on the command ring */
4028
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
4029 4030
{
	return queue_command(xhci, 0, 0, 0,
4031
			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
4032 4033 4034
}

/* Queue an address device command TRB */
4035
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
4036
			      u32 slot_id, enum xhci_setup_dev setup)
4037
{
4038 4039
	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
			upper_32_bits(in_ctx_ptr), 0,
4040 4041
			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
			| (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
4042 4043
}

4044 4045 4046 4047 4048 4049
int xhci_queue_vendor_command(struct xhci_hcd *xhci,
		u32 field1, u32 field2, u32 field3, u32 field4)
{
	return queue_command(xhci, field1, field2, field3, field4, false);
}

4050 4051 4052 4053 4054
/* Queue a reset device command TRB */
int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
{
	return queue_command(xhci, 0, 0, 0,
			TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
4055
			false);
4056
}
4057 4058

/* Queue a configure endpoint command TRB */
4059
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
4060
		u32 slot_id, bool command_must_succeed)
4061
{
4062 4063
	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
			upper_32_bits(in_ctx_ptr), 0,
4064 4065
			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
			command_must_succeed);
4066
}
4067

4068 4069
/* Queue an evaluate context command TRB */
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
4070
		u32 slot_id, bool command_must_succeed)
4071 4072 4073
{
	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
			upper_32_bits(in_ctx_ptr), 0,
4074
			TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4075
			command_must_succeed);
4076 4077
}

4078 4079 4080 4081
/*
 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
 * activity on an endpoint that is about to be suspended.
 */
4082
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
4083
		unsigned int ep_index, int suspend)
4084 4085 4086 4087
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_STOP_RING);
4088
	u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
4089 4090

	return queue_command(xhci, 0, 0, 0,
4091
			trb_slot_id | trb_ep_index | type | trb_suspend, false);
4092 4093 4094 4095 4096 4097
}

/* Set Transfer Ring Dequeue Pointer command.
 * This should not be used for endpoints that have streams enabled.
 */
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
4098 4099
		unsigned int ep_index, unsigned int stream_id,
		struct xhci_segment *deq_seg,
4100 4101 4102 4103 4104
		union xhci_trb *deq_ptr, u32 cycle_state)
{
	dma_addr_t addr;
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4105
	u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
4106
	u32 trb_sct = 0;
4107
	u32 type = TRB_TYPE(TRB_SET_DEQ);
4108
	struct xhci_virt_ep *ep;
4109

4110
	addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
4111
	if (addr == 0) {
4112
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4113 4114
		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
				deq_seg, deq_ptr);
4115 4116
		return 0;
	}
4117 4118 4119 4120 4121 4122 4123 4124
	ep = &xhci->devs[slot_id]->eps[ep_index];
	if ((ep->ep_state & SET_DEQ_PENDING)) {
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
		xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
		return 0;
	}
	ep->queued_deq_seg = deq_seg;
	ep->queued_deq_ptr = deq_ptr;
4125 4126 4127
	if (stream_id)
		trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
	return queue_command(xhci, lower_32_bits(addr) | trb_sct | cycle_state,
4128
			upper_32_bits(addr), trb_stream_id,
4129
			trb_slot_id | trb_ep_index | type, false);
4130
}
4131 4132 4133 4134 4135 4136 4137 4138

int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
		unsigned int ep_index)
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_RESET_EP);

4139 4140
	return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
			false);
4141
}