xhci-ring.c 120.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * xHCI host controller driver
 *
 * Copyright (C) 2008 Intel Corp.
 *
 * Author: Sarah Sharp
 * Some code borrowed from the Linux EHCI driver.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

/*
 * Ring initialization rules:
 * 1. Each segment is initialized to zero, except for link TRBs.
 * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
 *    Consumer Cycle State (CCS), depending on ring function.
 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
 *
 * Ring behavior rules:
 * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
 *    least one free TRB in the ring.  This is useful if you want to turn that
 *    into a link TRB and expand the ring.
 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
 *    link TRB, then load the pointer with the address in the link TRB.  If the
 *    link TRB had its toggle bit set, you may need to update the ring cycle
 *    state (see cycle bit rules).  You may have to do this multiple times
 *    until you reach a non-link TRB.
 * 3. A ring is full if enqueue++ (for the definition of increment above)
 *    equals the dequeue pointer.
 *
 * Cycle bit rules:
 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 *
 * Producer rules:
 * 1. Check if ring is full before you enqueue.
 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
 *    Update enqueue pointer between each write (which may update the ring
 *    cycle state).
 * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
 *    and endpoint rings.  If HC is the producer for the event ring,
 *    and it generates an interrupt according to interrupt modulation rules.
 *
 * Consumer rules:
 * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
 *    the TRB is owned by the consumer.
 * 2. Update dequeue pointer (which may update the ring cycle state) and
 *    continue processing TRBs until you reach a TRB which is not owned by you.
 * 3. Notify the producer.  SW is the consumer for the event ring, and it
 *   updates event ring dequeue pointer.  HC is the consumer for the command and
 *   endpoint rings; it generates events on the event ring for these.
 */

67
#include <linux/scatterlist.h>
68
#include <linux/slab.h>
69
#include <linux/dma-mapping.h>
70
#include "xhci.h"
71
#include "xhci-trace.h"
72
#include "xhci-mtk.h"
73 74 75 76 77

/*
 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
 * address of the TRB.
 */
78
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
79 80
		union xhci_trb *trb)
{
81
	unsigned long segment_offset;
82

83
	if (!seg || !trb || trb < seg->trbs)
84
		return 0;
85 86
	/* offset in TRBs */
	segment_offset = trb - seg->trbs;
87
	if (segment_offset >= TRBS_PER_SEGMENT)
88
		return 0;
89
	return seg->dma + (segment_offset * sizeof(*trb));
90 91
}

92 93 94 95 96
static bool trb_is_noop(union xhci_trb *trb)
{
	return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
}

97 98 99 100 101
static bool trb_is_link(union xhci_trb *trb)
{
	return TRB_TYPE_LINK_LE32(trb->link.control);
}

102 103 104 105 106 107 108 109 110 111 112
static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
{
	return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
}

static bool last_trb_on_ring(struct xhci_ring *ring,
			struct xhci_segment *seg, union xhci_trb *trb)
{
	return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
}

113 114 115 116 117
static bool link_trb_toggles_cycle(union xhci_trb *trb)
{
	return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
}

118 119 120 121 122 123 124 125 126 127 128 129 130 131
static bool last_td_in_urb(struct xhci_td *td)
{
	struct urb_priv *urb_priv = td->urb->hcpriv;

	return urb_priv->td_cnt == urb_priv->length;
}

static void inc_td_cnt(struct urb *urb)
{
	struct urb_priv *urb_priv = urb->hcpriv;

	urb_priv->td_cnt++;
}

132 133 134 135 136 137 138 139 140
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
 * TRB is in a new segment.  This does not skip over link TRBs, and it does not
 * effect the ring dequeue or enqueue pointers.
 */
static void next_trb(struct xhci_hcd *xhci,
		struct xhci_ring *ring,
		struct xhci_segment **seg,
		union xhci_trb **trb)
{
141
	if (trb_is_link(*trb)) {
142 143 144
		*seg = (*seg)->next;
		*trb = ((*seg)->trbs);
	} else {
145
		(*trb)++;
146 147 148
	}
}

149 150 151 152
/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 */
A
Andiry Xu 已提交
153
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
154 155
{
	ring->deq_updates++;
156

157 158 159
	/* event ring doesn't have link trbs, check for last trb */
	if (ring->type == TYPE_EVENT) {
		if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
160
			ring->dequeue++;
161
			return;
162
		}
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
		if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
			ring->cycle_state ^= 1;
		ring->deq_seg = ring->deq_seg->next;
		ring->dequeue = ring->deq_seg->trbs;
		return;
	}

	/* All other rings have link trbs */
	if (!trb_is_link(ring->dequeue)) {
		ring->dequeue++;
		ring->num_trbs_free++;
	}
	while (trb_is_link(ring->dequeue)) {
		ring->deq_seg = ring->deq_seg->next;
		ring->dequeue = ring->deq_seg->trbs;
	}
	return;
180 181 182 183 184 185 186 187 188 189 190 191
}

/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 *
 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
 * chain bit is set), then set the chain bit in all the following link TRBs.
 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
 * have their chain bit cleared (so that each Link TRB is a separate TD).
 *
 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
192 193 194
 * set, but other sections talk about dealing with the chain bit set.  This was
 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
195 196 197
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
198
 */
199
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
A
Andiry Xu 已提交
200
			bool more_trbs_coming)
201 202 203 204
{
	u32 chain;
	union xhci_trb *next;

M
Matt Evans 已提交
205
	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
206
	/* If this is not event ring, there is one less usable TRB */
207
	if (!trb_is_link(ring->enqueue))
208
		ring->num_trbs_free--;
209 210 211
	next = ++(ring->enqueue);

	ring->enq_updates++;
212
	/* Update the dequeue pointer further if that was a link TRB */
213
	while (trb_is_link(next)) {
214

215 216 217 218 219 220 221 222 223
		/*
		 * If the caller doesn't plan on enqueueing more TDs before
		 * ringing the doorbell, then we don't want to give the link TRB
		 * to the hardware just yet. We'll give the link TRB back in
		 * prepare_ring() just before we enqueue the TD at the top of
		 * the ring.
		 */
		if (!chain && !more_trbs_coming)
			break;
A
Andiry Xu 已提交
224

225 226 227 228 229 230 231 232 233
		/* If we're not dealing with 0.95 hardware or isoc rings on
		 * AMD 0.96 host, carry over the chain bit of the previous TRB
		 * (which may mean the chain bit is cleared).
		 */
		if (!(ring->type == TYPE_ISOC &&
		      (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
		    !xhci_link_trb_quirk(xhci)) {
			next->link.control &= cpu_to_le32(~TRB_CHAIN);
			next->link.control |= cpu_to_le32(chain);
234
		}
235 236 237 238 239
		/* Give this link TRB to the hardware */
		wmb();
		next->link.control ^= cpu_to_le32(TRB_CYCLE);

		/* Toggle the cycle bit after the last ring segment. */
240
		if (link_trb_toggles_cycle(next))
241 242
			ring->cycle_state ^= 1;

243 244 245 246 247 248 249
		ring->enq_seg = ring->enq_seg->next;
		ring->enqueue = ring->enq_seg->trbs;
		next = ring->enqueue;
	}
}

/*
250 251
 * Check to see if there's room to enqueue num_trbs on the ring and make sure
 * enqueue pointer will not advance into dequeue segment. See rules above.
252
 */
253
static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
254 255
		unsigned int num_trbs)
{
256
	int num_trbs_in_deq_seg;
257

258 259 260 261 262 263 264 265 266 267
	if (ring->num_trbs_free < num_trbs)
		return 0;

	if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
		num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
		if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
			return 0;
	}

	return 1;
268 269 270
}

/* Ring the host controller doorbell after placing a command on the ring */
271
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
272
{
E
Elric Fu 已提交
273 274 275
	if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
		return;

276
	xhci_dbg(xhci, "// Ding dong!\n");
277
	writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
278
	/* Flush PCI posted writes */
279
	readl(&xhci->dba->doorbell[0]);
280 281
}

282 283 284 285 286 287 288
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
{
	u64 temp_64;
	int ret;

	xhci_dbg(xhci, "Abort command ring\n");

289
	temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
290
	xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
291 292 293 294 295 296 297 298

	/*
	 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
	 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
	 * but the completion event in never sent. Use the cmd timeout timer to
	 * handle those cases. Use twice the time to cover the bit polling retry
	 */
	mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
299 300
	xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
			&xhci->op_regs->cmd_ring);
301 302 303 304 305 306 307 308

	/* Section 4.6.1.2 of xHCI 1.0 spec says software should
	 * time the completion od all xHCI commands, including
	 * the Command Abort operation. If software doesn't see
	 * CRR negated in a timely manner (e.g. longer than 5
	 * seconds), then it should assume that the there are
	 * larger problems with the xHC and assert HCRST.
	 */
309
	ret = xhci_handshake(&xhci->op_regs->cmd_ring,
310 311
			CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
	if (ret < 0) {
312 313 314 315 316 317 318 319 320
		/* we are about to kill xhci, give it one more chance */
		xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
			      &xhci->op_regs->cmd_ring);
		udelay(1000);
		ret = xhci_handshake(&xhci->op_regs->cmd_ring,
				     CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
		if (ret == 0)
			return 0;

321 322
		xhci_err(xhci, "Stopped the command ring failed, "
				"maybe the host is dead\n");
323
		del_timer(&xhci->cmd_timer);
324 325 326 327 328 329 330 331
		xhci->xhc_state |= XHCI_STATE_DYING;
		xhci_halt(xhci);
		return -ESHUTDOWN;
	}

	return 0;
}

332
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
333
		unsigned int slot_id,
334 335
		unsigned int ep_index,
		unsigned int stream_id)
336
{
M
Matt Evans 已提交
337
	__le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
338 339
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
	unsigned int ep_state = ep->ep_state;
340 341

	/* Don't ring the doorbell for this endpoint if there are pending
342
	 * cancellations because we don't want to interrupt processing.
343 344 345
	 * We don't want to restart any stream rings if there's a set dequeue
	 * pointer command pending because the device can choose to start any
	 * stream once the endpoint is on the HW schedule.
346
	 */
347 348 349
	if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
	    (ep_state & EP_HALTED))
		return;
350
	writel(DB_VALUE(ep_index, stream_id), db_addr);
351 352 353
	/* The CPU has better things to do at this point than wait for a
	 * write-posting flush.  It'll get there soon enough.
	 */
354 355
}

356 357 358 359 360 361 362 363 364 365 366 367
/* Ring the doorbell for any rings with pending URBs */
static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
		unsigned int slot_id,
		unsigned int ep_index)
{
	unsigned int stream_id;
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];

	/* A ring has pending URBs if its TD list is not empty */
	if (!(ep->ep_state & EP_HAS_STREAMS)) {
368
		if (ep->ring && !(list_empty(&ep->ring->td_list)))
369
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
370 371 372 373 374 375 376
		return;
	}

	for (stream_id = 1; stream_id < ep->stream_info->num_streams;
			stream_id++) {
		struct xhci_stream_info *stream_info = ep->stream_info;
		if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
377 378
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
						stream_id);
379 380 381
	}
}

382 383 384 385 386
/* Get the right ring for the given slot_id, ep_index and stream_id.
 * If the endpoint supports streams, boundary check the URB's stream ID.
 * If the endpoint doesn't support streams, return the singular endpoint ring.
 */
struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
		unsigned int slot_id, unsigned int ep_index,
		unsigned int stream_id)
{
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];
	/* Common case: no streams */
	if (!(ep->ep_state & EP_HAS_STREAMS))
		return ep->ring;

	if (stream_id == 0) {
		xhci_warn(xhci,
				"WARN: Slot ID %u, ep index %u has streams, "
				"but URB has no stream ID.\n",
				slot_id, ep_index);
		return NULL;
	}

	if (stream_id < ep->stream_info->num_streams)
		return ep->stream_info->stream_rings[stream_id];

	xhci_warn(xhci,
			"WARN: Slot ID %u, ep index %u has "
			"stream IDs 1 to %u allocated, "
			"but stream ID %u is requested.\n",
			slot_id, ep_index,
			ep->stream_info->num_streams - 1,
			stream_id);
	return NULL;
}

418 419 420 421 422 423 424 425 426 427 428 429 430
/*
 * Move the xHC's endpoint ring dequeue pointer past cur_td.
 * Record the new state of the xHC's endpoint ring dequeue segment,
 * dequeue pointer, and new consumer cycle state in state.
 * Update our internal representation of the ring's dequeue pointer.
 *
 * We do this in three jumps:
 *  - First we update our new ring state to be the same as when the xHC stopped.
 *  - Then we traverse the ring to find the segment that contains
 *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
 *    any link TRBs with the toggle cycle bit set.
 *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
 *    if we've moved it past a link TRB with the toggle cycle bit set.
M
Matt Evans 已提交
431 432 433 434
 *
 * Some of the uses of xhci_generic_trb are grotty, but if they're done
 * with correct __le32 accesses they should work fine.  Only users of this are
 * in here.
435
 */
436
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
437
		unsigned int slot_id, unsigned int ep_index,
438 439
		unsigned int stream_id, struct xhci_td *cur_td,
		struct xhci_dequeue_state *state)
440 441
{
	struct xhci_virt_device *dev = xhci->devs[slot_id];
442
	struct xhci_virt_ep *ep = &dev->eps[ep_index];
443
	struct xhci_ring *ep_ring;
444 445
	struct xhci_segment *new_seg;
	union xhci_trb *new_deq;
446
	dma_addr_t addr;
447
	u64 hw_dequeue;
448 449
	bool cycle_found = false;
	bool td_last_trb_found = false;
450

451 452 453 454 455 456 457 458
	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
			ep_index, stream_id);
	if (!ep_ring) {
		xhci_warn(xhci, "WARN can't find new dequeue state "
				"for invalid stream ID %u.\n",
				stream_id);
		return;
	}
459

460
	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
461 462
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Finding endpoint context");
463 464 465 466
	/* 4.6.9 the css flag is written to the stream context for streams */
	if (ep->ep_state & EP_HAS_STREAMS) {
		struct xhci_stream_ctx *ctx =
			&ep->stream_info->stream_ctx_array[stream_id];
467
		hw_dequeue = le64_to_cpu(ctx->stream_ring);
468 469 470
	} else {
		struct xhci_ep_ctx *ep_ctx
			= xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
471
		hw_dequeue = le64_to_cpu(ep_ctx->deq);
472
	}
473

474 475 476 477
	new_seg = ep_ring->deq_seg;
	new_deq = ep_ring->dequeue;
	state->new_cycle_state = hw_dequeue & 0x1;

478
	/*
479 480 481 482
	 * We want to find the pointer, segment and cycle state of the new trb
	 * (the one after current TD's last_trb). We know the cycle state at
	 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
	 * found.
483
	 */
484 485 486 487 488 489 490 491 492
	do {
		if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
		    == (dma_addr_t)(hw_dequeue & ~0xf)) {
			cycle_found = true;
			if (td_last_trb_found)
				break;
		}
		if (new_deq == cur_td->last_trb)
			td_last_trb_found = true;
493

494 495
		if (cycle_found && trb_is_link(new_deq) &&
		    link_trb_toggles_cycle(new_deq))
496 497 498 499 500 501 502 503 504 505 506 507 508
			state->new_cycle_state ^= 0x1;

		next_trb(xhci, ep_ring, &new_seg, &new_deq);

		/* Search wrapped around, bail out */
		if (new_deq == ep->ring->dequeue) {
			xhci_err(xhci, "Error: Failed finding new dequeue state\n");
			state->new_deq_seg = NULL;
			state->new_deq_ptr = NULL;
			return;
		}

	} while (!cycle_found || !td_last_trb_found);
509

510 511
	state->new_deq_seg = new_seg;
	state->new_deq_ptr = new_deq;
512

513
	/* Don't update the ring cycle state for the producer (us). */
514 515
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Cycle state = 0x%x", state->new_cycle_state);
516

517 518
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"New dequeue segment = %p (virtual)",
519 520
			state->new_deq_seg);
	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
521 522
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"New dequeue pointer = 0x%llx (DMA)",
523
			(unsigned long long) addr);
524 525
}

526 527 528 529
/* flip_cycle means flip the cycle bit of all but the first and last TRB.
 * (The last TRB actually points to the ring enqueue pointer, which is not part
 * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
 */
530
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
531
		       struct xhci_td *td, bool flip_cycle)
532
{
533 534 535 536 537 538 539
	struct xhci_segment *seg	= td->start_seg;
	union xhci_trb *trb		= td->first_trb;

	while (1) {
		if (trb_is_link(trb)) {
			/* unchain chained link TRBs */
			trb->link.control &= cpu_to_le32(~TRB_CHAIN);
540
		} else {
541 542 543
			trb->generic.field[0] = 0;
			trb->generic.field[1] = 0;
			trb->generic.field[2] = 0;
544
			/* Preserve only the cycle bit of this TRB */
545 546
			trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
			trb->generic.field[3] |= cpu_to_le32(
M
Matt Evans 已提交
547
				TRB_TYPE(TRB_TR_NOOP));
548
		}
549 550 551 552 553
		/* flip cycle if asked to */
		if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
			trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);

		if (trb == td->last_trb)
554
			break;
555 556

		next_trb(xhci, ep_ring, &seg, &trb);
557 558 559
	}
}

560
static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
561 562 563 564 565 566 567 568 569 570 571
		struct xhci_virt_ep *ep)
{
	ep->ep_state &= ~EP_HALT_PENDING;
	/* Can't del_timer_sync in interrupt, so we attempt to cancel.  If the
	 * timer is running on another CPU, we don't decrement stop_cmds_pending
	 * (since we didn't successfully stop the watchdog timer).
	 */
	if (del_timer(&ep->stop_cmd_timer))
		ep->stop_cmds_pending--;
}

572 573 574 575
/*
 * Must be called with xhci->lock held in interrupt context,
 * releases and re-acquires xhci->lock
 */
576
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
577
				     struct xhci_td *cur_td, int status)
578
{
579 580 581 582 583 584 585 586 587
	struct urb	*urb		= cur_td->urb;
	struct urb_priv	*urb_priv	= urb->hcpriv;
	struct usb_hcd	*hcd		= bus_to_hcd(urb->dev->bus);

	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
		xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
		if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs	== 0) {
			if (xhci->quirks & XHCI_AMD_PLL_FIX)
				usb_amd_quirk_pll_enable();
A
Andiry Xu 已提交
588
		}
589
	}
590
	xhci_urb_free_priv(urb_priv);
591
	usb_hcd_unlink_urb_from_ep(hcd, urb);
592
	spin_unlock(&xhci->lock);
593
	usb_hcd_giveback_urb(hcd, urb, status);
594 595 596
	spin_lock(&xhci->lock);
}

W
Wei Yongjun 已提交
597 598
static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
		struct xhci_ring *ring, struct xhci_td *td)
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
{
	struct device *dev = xhci_to_hcd(xhci)->self.controller;
	struct xhci_segment *seg = td->bounce_seg;
	struct urb *urb = td->urb;

	if (!seg || !urb)
		return;

	if (usb_urb_dir_out(urb)) {
		dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
				 DMA_TO_DEVICE);
		return;
	}

	/* for in tranfers we need to copy the data from bounce to sg */
	sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
			     seg->bounce_len, seg->bounce_offs);
	dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
			 DMA_FROM_DEVICE);
	seg->bounce_len = 0;
	seg->bounce_offs = 0;
}

622 623 624 625 626 627 628 629 630 631
/*
 * When we get a command completion for a Stop Endpoint Command, we need to
 * unlink any cancelled TDs from the ring.  There are two ways to do that:
 *
 *  1. If the HW was in the middle of processing the TD that needs to be
 *     cancelled, then we must move the ring's dequeue pointer past the last TRB
 *     in the TD with a Set Dequeue Pointer Command.
 *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
 *     bit cleared) so that the HW will skip over them.
 */
632
static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
633
		union xhci_trb *trb, struct xhci_event_cmd *event)
634 635 636
{
	unsigned int ep_index;
	struct xhci_ring *ep_ring;
637
	struct xhci_virt_ep *ep;
638
	struct list_head *entry;
639
	struct xhci_td *cur_td = NULL;
640 641
	struct xhci_td *last_unlinked_td;

642
	struct xhci_dequeue_state deq_state;
643

644
	if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
645
		if (!xhci->devs[slot_id])
646 647 648 649 650 651
			xhci_warn(xhci, "Stop endpoint command "
				"completion for disabled slot %u\n",
				slot_id);
		return;
	}

652
	memset(&deq_state, 0, sizeof(deq_state));
M
Matt Evans 已提交
653
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
654
	ep = &xhci->devs[slot_id]->eps[ep_index];
655

656
	if (list_empty(&ep->cancelled_td_list)) {
657
		xhci_stop_watchdog_timer_in_irq(xhci, ep);
658
		ep->stopped_td = NULL;
659
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
660
		return;
661
	}
662 663 664 665 666 667

	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
	 * We have the xHCI lock, so nothing can modify this list until we drop
	 * it.  We're also in the event handler, so we can't get re-interrupted
	 * if another Stop Endpoint command completes
	 */
668
	list_for_each(entry, &ep->cancelled_td_list) {
669
		cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
670 671
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Removing canceled TD starting at 0x%llx (dma).",
672 673
				(unsigned long long)xhci_trb_virt_to_dma(
					cur_td->start_seg, cur_td->first_trb));
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
		if (!ep_ring) {
			/* This shouldn't happen unless a driver is mucking
			 * with the stream ID after submission.  This will
			 * leave the TD on the hardware ring, and the hardware
			 * will try to execute it, and may access a buffer
			 * that has already been freed.  In the best case, the
			 * hardware will execute it, and the event handler will
			 * ignore the completion event for that TD, since it was
			 * removed from the td_list for that endpoint.  In
			 * short, don't muck with the stream ID after
			 * submission.
			 */
			xhci_warn(xhci, "WARN Cancelled URB %p "
					"has invalid stream ID %u.\n",
					cur_td->urb,
					cur_td->urb->stream_id);
			goto remove_finished_td;
		}
693 694 695 696
		/*
		 * If we stopped on the TD we need to cancel, then we have to
		 * move the xHC endpoint ring dequeue pointer past this TD.
		 */
697
		if (cur_td == ep->stopped_td)
698 699 700
			xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
					cur_td->urb->stream_id,
					cur_td, &deq_state);
701
		else
702
			td_to_noop(xhci, ep_ring, cur_td, false);
703
remove_finished_td:
704 705 706 707 708
		/*
		 * The event handler won't see a completion for this TD anymore,
		 * so remove it from the endpoint ring's TD list.  Keep it in
		 * the cancelled TD list for URB completion later.
		 */
709
		list_del_init(&cur_td->td_list);
710 711
	}
	last_unlinked_td = cur_td;
712
	xhci_stop_watchdog_timer_in_irq(xhci, ep);
713 714 715

	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
716 717
		xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
				ep->stopped_td->urb->stream_id, &deq_state);
718
		xhci_ring_cmd_db(xhci);
719
	} else {
720 721
		/* Otherwise ring the doorbell(s) to restart queued transfers */
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
722
	}
723

724
	ep->stopped_td = NULL;
725 726 727 728 729 730 731 732

	/*
	 * Drop the lock and complete the URBs in the cancelled TD list.
	 * New TDs to be cancelled might be added to the end of the list before
	 * we can complete all the URBs for the TDs we already unlinked.
	 * So stop when we've completed the URB for the last TD we unlinked.
	 */
	do {
733
		cur_td = list_entry(ep->cancelled_td_list.next,
734
				struct xhci_td, cancelled_td_list);
735
		list_del_init(&cur_td->cancelled_td_list);
736 737 738 739 740

		/* Clean up the cancelled URB */
		/* Doesn't matter what we pass for status, since the core will
		 * just overwrite it (because the URB has been unlinked).
		 */
A
Arnd Bergmann 已提交
741
		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
742 743
		if (ep_ring && cur_td->bounce_seg)
			xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
744 745 746
		inc_td_cnt(cur_td->urb);
		if (last_td_in_urb(cur_td))
			xhci_giveback_urb_in_irq(xhci, cur_td, 0);
747

748 749 750 751 752
		/* Stop processing the cancelled list if the watchdog timer is
		 * running.
		 */
		if (xhci->xhc_state & XHCI_STATE_DYING)
			return;
753 754 755 756 757
	} while (cur_td != last_unlinked_td);

	/* Return to the event handler with xhci->lock re-acquired */
}

758 759 760 761 762 763 764 765 766 767
static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
	struct xhci_td *cur_td;

	while (!list_empty(&ring->td_list)) {
		cur_td = list_first_entry(&ring->td_list,
				struct xhci_td, td_list);
		list_del_init(&cur_td->td_list);
		if (!list_empty(&cur_td->cancelled_td_list))
			list_del_init(&cur_td->cancelled_td_list);
768 769 770

		if (cur_td->bounce_seg)
			xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
771 772 773 774

		inc_td_cnt(cur_td->urb);
		if (last_td_in_urb(cur_td))
			xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
775 776 777 778 779 780 781 782 783 784 785
	}
}

static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
		int slot_id, int ep_index)
{
	struct xhci_td *cur_td;
	struct xhci_virt_ep *ep;
	struct xhci_ring *ring;

	ep = &xhci->devs[slot_id]->eps[ep_index];
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
	if ((ep->ep_state & EP_HAS_STREAMS) ||
			(ep->ep_state & EP_GETTING_NO_STREAMS)) {
		int stream_id;

		for (stream_id = 0; stream_id < ep->stream_info->num_streams;
				stream_id++) {
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Killing URBs for slot ID %u, ep index %u, stream %u",
					slot_id, ep_index, stream_id + 1);
			xhci_kill_ring_urbs(xhci,
					ep->stream_info->stream_rings[stream_id]);
		}
	} else {
		ring = ep->ring;
		if (!ring)
			return;
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Killing URBs for slot ID %u, ep index %u",
				slot_id, ep_index);
		xhci_kill_ring_urbs(xhci, ring);
	}
807 808 809 810
	while (!list_empty(&ep->cancelled_td_list)) {
		cur_td = list_first_entry(&ep->cancelled_td_list,
				struct xhci_td, cancelled_td_list);
		list_del_init(&cur_td->cancelled_td_list);
811 812 813 814

		inc_td_cnt(cur_td->urb);
		if (last_td_in_urb(cur_td))
			xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
815 816 817
	}
}

818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
/* Watchdog timer function for when a stop endpoint command fails to complete.
 * In this case, we assume the host controller is broken or dying or dead.  The
 * host may still be completing some other events, so we have to be careful to
 * let the event ring handler and the URB dequeueing/enqueueing functions know
 * through xhci->state.
 *
 * The timer may also fire if the host takes a very long time to respond to the
 * command, and the stop endpoint command completion handler cannot delete the
 * timer before the timer function is called.  Another endpoint cancellation may
 * sneak in before the timer function can grab the lock, and that may queue
 * another stop endpoint command and add the timer back.  So we cannot use a
 * simple flag to say whether there is a pending stop endpoint command for a
 * particular endpoint.
 *
 * Instead we use a combination of that flag and a counter for the number of
 * pending stop endpoint commands.  If the timer is the tail end of the last
 * stop endpoint command, and the endpoint's command is still pending, we assume
 * the host is dying.
 */
void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
	struct xhci_hcd *xhci;
	struct xhci_virt_ep *ep;
	int ret, i, j;
842
	unsigned long flags;
843 844 845 846

	ep = (struct xhci_virt_ep *) arg;
	xhci = ep->xhci;

847
	spin_lock_irqsave(&xhci->lock, flags);
848 849

	ep->stop_cmds_pending--;
850 851 852 853
	if (xhci->xhc_state & XHCI_STATE_REMOVING) {
		spin_unlock_irqrestore(&xhci->lock, flags);
		return;
	}
854
	if (xhci->xhc_state & XHCI_STATE_DYING) {
855 856 857
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Stop EP timer ran, but another timer marked "
				"xHCI as DYING, exiting.");
858
		spin_unlock_irqrestore(&xhci->lock, flags);
859 860 861
		return;
	}
	if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
862 863 864
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Stop EP timer ran, but no command pending, "
				"exiting.");
865
		spin_unlock_irqrestore(&xhci->lock, flags);
866 867 868 869 870 871 872 873 874 875 876
		return;
	}

	xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
	xhci_warn(xhci, "Assuming host is dying, halting host.\n");
	/* Oops, HC is dead or dying or at least not responding to the stop
	 * endpoint command.
	 */
	xhci->xhc_state |= XHCI_STATE_DYING;
	/* Disable interrupts from the host controller and start halting it */
	xhci_quiesce(xhci);
877
	spin_unlock_irqrestore(&xhci->lock, flags);
878 879 880

	ret = xhci_halt(xhci);

881
	spin_lock_irqsave(&xhci->lock, flags);
882 883 884
	if (ret < 0) {
		/* This is bad; the host is not responding to commands and it's
		 * not allowing itself to be halted.  At least interrupts are
885
		 * disabled. If we call usb_hc_died(), it will attempt to
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
		 * disconnect all device drivers under this host.  Those
		 * disconnect() methods will wait for all URBs to be unlinked,
		 * so we must complete them.
		 */
		xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
		xhci_warn(xhci, "Completing active URBs anyway.\n");
		/* We could turn all TDs on the rings to no-ops.  This won't
		 * help if the host has cached part of the ring, and is slow if
		 * we want to preserve the cycle bit.  Skip it and hope the host
		 * doesn't touch the memory.
		 */
	}
	for (i = 0; i < MAX_HC_SLOTS; i++) {
		if (!xhci->devs[i])
			continue;
901 902
		for (j = 0; j < 31; j++)
			xhci_kill_endpoint_urbs(xhci, i, j);
903
	}
904
	spin_unlock_irqrestore(&xhci->lock, flags);
905 906
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Calling usb_hc_died()");
907
	usb_hc_died(xhci_to_hcd(xhci));
908 909
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"xHCI host controller is dead.");
910 911
}

912 913 914 915 916 917 918 919 920 921 922 923 924

static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
		struct xhci_virt_device *dev,
		struct xhci_ring *ep_ring,
		unsigned int ep_index)
{
	union xhci_trb *dequeue_temp;
	int num_trbs_free_temp;
	bool revert = false;

	num_trbs_free_temp = ep_ring->num_trbs_free;
	dequeue_temp = ep_ring->dequeue;

925 926 927 928 929 930
	/* If we get two back-to-back stalls, and the first stalled transfer
	 * ends just before a link TRB, the dequeue pointer will be left on
	 * the link TRB by the code in the while loop.  So we have to update
	 * the dequeue pointer one segment further, or we'll jump off
	 * the segment into la-la-land.
	 */
931
	if (trb_is_link(ep_ring->dequeue)) {
932 933 934 935
		ep_ring->deq_seg = ep_ring->deq_seg->next;
		ep_ring->dequeue = ep_ring->deq_seg->trbs;
	}

936 937 938 939
	while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
		/* We have more usable TRBs */
		ep_ring->num_trbs_free++;
		ep_ring->dequeue++;
940
		if (trb_is_link(ep_ring->dequeue)) {
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
			if (ep_ring->dequeue ==
					dev->eps[ep_index].queued_deq_ptr)
				break;
			ep_ring->deq_seg = ep_ring->deq_seg->next;
			ep_ring->dequeue = ep_ring->deq_seg->trbs;
		}
		if (ep_ring->dequeue == dequeue_temp) {
			revert = true;
			break;
		}
	}

	if (revert) {
		xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
		ep_ring->num_trbs_free = num_trbs_free_temp;
	}
}

959 960 961 962 963 964 965
/*
 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
 * we need to clear the set deq pending flag in the endpoint ring state, so that
 * the TD queueing code can ring the doorbell again.  We also need to ring the
 * endpoint doorbell to restart the ring, but only if there aren't more
 * cancellations pending.
 */
966
static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
967
		union xhci_trb *trb, u32 cmd_comp_code)
968 969
{
	unsigned int ep_index;
970
	unsigned int stream_id;
971 972
	struct xhci_ring *ep_ring;
	struct xhci_virt_device *dev;
973
	struct xhci_virt_ep *ep;
974 975
	struct xhci_ep_ctx *ep_ctx;
	struct xhci_slot_ctx *slot_ctx;
976

M
Matt Evans 已提交
977 978
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
979
	dev = xhci->devs[slot_id];
980
	ep = &dev->eps[ep_index];
981 982 983

	ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
	if (!ep_ring) {
O
Oliver Neukum 已提交
984
		xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
985 986
				stream_id);
		/* XXX: Harmless??? */
987
		goto cleanup;
988 989
	}

990 991
	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
992

993
	if (cmd_comp_code != COMP_SUCCESS) {
994 995 996
		unsigned int ep_state;
		unsigned int slot_state;

997
		switch (cmd_comp_code) {
998
		case COMP_TRB_ERR:
O
Oliver Neukum 已提交
999
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
1000 1001
			break;
		case COMP_CTX_STATE:
O
Oliver Neukum 已提交
1002
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
1003
			ep_state = GET_EP_CTX_STATE(ep_ctx);
M
Matt Evans 已提交
1004
			slot_state = le32_to_cpu(slot_ctx->dev_state);
1005
			slot_state = GET_SLOT_STATE(slot_state);
1006 1007
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Slot state = %u, EP state = %u",
1008 1009 1010
					slot_state, ep_state);
			break;
		case COMP_EBADSLT:
O
Oliver Neukum 已提交
1011 1012
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
					slot_id);
1013 1014
			break;
		default:
O
Oliver Neukum 已提交
1015 1016
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
					cmd_comp_code);
1017 1018 1019 1020 1021 1022 1023 1024 1025
			break;
		}
		/* OK what do we do now?  The endpoint state is hosed, and we
		 * should never get to this point if the synchronization between
		 * queueing, and endpoint state are correct.  This might happen
		 * if the device gets disconnected after we've finished
		 * cancelling URBs, which might not be an error...
		 */
	} else {
1026 1027 1028 1029 1030 1031 1032 1033 1034
		u64 deq;
		/* 4.6.10 deq ptr is written to the stream ctx for streams */
		if (ep->ep_state & EP_HAS_STREAMS) {
			struct xhci_stream_ctx *ctx =
				&ep->stream_info->stream_ctx_array[stream_id];
			deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
		} else {
			deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
		}
1035
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1036 1037 1038
			"Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
		if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
					 ep->queued_deq_ptr) == deq) {
1039 1040 1041
			/* Update the ring's dequeue segment and dequeue pointer
			 * to reflect the new position.
			 */
1042 1043
			update_ring_for_set_deq_completion(xhci, dev,
				ep_ring, ep_index);
1044
		} else {
O
Oliver Neukum 已提交
1045
			xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1046
			xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1047
				  ep->queued_deq_seg, ep->queued_deq_ptr);
1048
		}
1049 1050
	}

1051
cleanup:
1052
	dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1053 1054
	dev->eps[ep_index].queued_deq_seg = NULL;
	dev->eps[ep_index].queued_deq_ptr = NULL;
1055 1056
	/* Restart any rings with pending URBs */
	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1057 1058
}

1059
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1060
		union xhci_trb *trb, u32 cmd_comp_code)
1061 1062 1063
{
	unsigned int ep_index;

M
Matt Evans 已提交
1064
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1065 1066 1067
	/* This command will only fail if the endpoint wasn't halted,
	 * but we don't care.
	 */
1068
	xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1069
		"Ignoring reset ep completion code of %u", cmd_comp_code);
1070

1071 1072 1073 1074 1075
	/* HW with the reset endpoint quirk needs to have a configure endpoint
	 * command complete before the endpoint can be used.  Queue that here
	 * because the HW can't handle two commands being queued in a row.
	 */
	if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1076 1077
		struct xhci_command *command;
		command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1078 1079 1080 1081
		if (!command) {
			xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n");
			return;
		}
1082 1083
		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
				"Queueing configure endpoint command");
1084
		xhci_queue_configure_endpoint(xhci, command,
1085 1086
				xhci->devs[slot_id]->in_ctx->dma, slot_id,
				false);
1087 1088
		xhci_ring_cmd_db(xhci);
	} else {
1089
		/* Clear our internal halted state */
1090
		xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1091
	}
1092
}
1093

1094
static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1095
		struct xhci_command *command, u32 cmd_comp_code)
1096 1097
{
	if (cmd_comp_code == COMP_SUCCESS)
1098
		command->slot_id = slot_id;
1099
	else
1100
		command->slot_id = 0;
1101 1102
}

1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
{
	struct xhci_virt_device *virt_dev;

	virt_dev = xhci->devs[slot_id];
	if (!virt_dev)
		return;
	if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
		/* Delete default control endpoint resources */
		xhci_free_device_endpoint_resources(xhci, virt_dev, true);
	xhci_free_virt_device(xhci, slot_id);
}

1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
		struct xhci_event_cmd *event, u32 cmd_comp_code)
{
	struct xhci_virt_device *virt_dev;
	struct xhci_input_control_ctx *ctrl_ctx;
	unsigned int ep_index;
	unsigned int ep_state;
	u32 add_flags, drop_flags;

	/*
	 * Configure endpoint commands can come from the USB core
	 * configuration or alt setting changes, or because the HW
	 * needed an extra configure endpoint command after a reset
	 * endpoint command or streams were being configured.
	 * If the command was for a halted endpoint, the xHCI driver
	 * is not waiting on the configure endpoint command.
	 */
1133
	virt_dev = xhci->devs[slot_id];
1134
	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
	if (!ctrl_ctx) {
		xhci_warn(xhci, "Could not get input context, bad type.\n");
		return;
	}

	add_flags = le32_to_cpu(ctrl_ctx->add_flags);
	drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
	/* Input ctx add_flags are the endpoint index plus one */
	ep_index = xhci_last_valid_endpoint(add_flags) - 1;

	/* A usb_set_interface() call directly after clearing a halted
	 * condition may race on this quirky hardware.  Not worth
	 * worrying about, since this is prototype hardware.  Not sure
	 * if this will work for streams, but streams support was
	 * untested on this prototype.
	 */
	if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
			ep_index != (unsigned int) -1 &&
			add_flags - SLOT_FLAG == drop_flags) {
		ep_state = virt_dev->eps[ep_index].ep_state;
		if (!(ep_state & EP_HALTED))
1156
			return;
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
				"Completed config ep cmd - "
				"last ep index = %d, state = %d",
				ep_index, ep_state);
		/* Clear internal halted state and restart ring(s) */
		virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
		return;
	}
	return;
}

1169 1170 1171 1172
static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
		struct xhci_event_cmd *event)
{
	xhci_dbg(xhci, "Completed reset device command.\n");
1173
	if (!xhci->devs[slot_id])
1174 1175 1176 1177
		xhci_warn(xhci, "Reset device command completion "
				"for disabled slot %u\n", slot_id);
}

1178 1179 1180 1181
static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
	if (!(xhci->quirks & XHCI_NEC_HOST)) {
L
Lu Baolu 已提交
1182
		xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
1183 1184 1185 1186 1187 1188 1189 1190
		return;
	}
	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
			"NEC firmware version %2x.%02x",
			NEC_FW_MAJOR(le32_to_cpu(event->status)),
			NEC_FW_MINOR(le32_to_cpu(event->status)));
}

1191
static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
M
Mathias Nyman 已提交
1192 1193
{
	list_del(&cmd->cmd_list);
1194 1195 1196 1197 1198

	if (cmd->completion) {
		cmd->status = status;
		complete(cmd->completion);
	} else {
M
Mathias Nyman 已提交
1199
		kfree(cmd);
1200
	}
M
Mathias Nyman 已提交
1201 1202 1203 1204 1205 1206
}

void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
{
	struct xhci_command *cur_cmd, *tmp_cmd;
	list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1207
		xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
M
Mathias Nyman 已提交
1208 1209
}

1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
/*
 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
 * If there are other commands waiting then restart the ring and kick the timer.
 * This must be called with command ring stopped and xhci->lock held.
 */
static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
					 struct xhci_command *cur_cmd)
{
	struct xhci_command *i_cmd, *tmp_cmd;
	u32 cycle_state;

	/* Turn all aborted commands in list to no-ops, then restart */
	list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
				 cmd_list) {

		if (i_cmd->status != COMP_CMD_ABORT)
			continue;

		i_cmd->status = COMP_CMD_STOP;

		xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
			 i_cmd->command_trb);
		/* get cycle state from the original cmd trb */
		cycle_state = le32_to_cpu(
			i_cmd->command_trb->generic.field[3]) &	TRB_CYCLE;
		/* modify the command trb to no-op command */
		i_cmd->command_trb->generic.field[0] = 0;
		i_cmd->command_trb->generic.field[1] = 0;
		i_cmd->command_trb->generic.field[2] = 0;
		i_cmd->command_trb->generic.field[3] = cpu_to_le32(
			TRB_TYPE(TRB_CMD_NOOP) | cycle_state);

		/*
		 * caller waiting for completion is called when command
		 *  completion event is received for these no-op commands
		 */
	}

	xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;

	/* ring command ring doorbell to restart the command ring */
	if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
	    !(xhci->xhc_state & XHCI_STATE_DYING)) {
		xhci->current_cmd = cur_cmd;
		mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
		xhci_ring_cmd_db(xhci);
	}
	return;
}


void xhci_handle_command_timeout(unsigned long data)
{
	struct xhci_hcd *xhci;
	int ret;
	unsigned long flags;
	u64 hw_ring_state;
1267
	bool second_timeout = false;
1268 1269 1270
	xhci = (struct xhci_hcd *) data;

	spin_lock_irqsave(&xhci->lock, flags);
L
Lu Baolu 已提交
1271 1272 1273 1274

	if (!xhci->current_cmd) {
		spin_unlock_irqrestore(&xhci->lock, flags);
		return;
1275 1276
	}

L
Lu Baolu 已提交
1277 1278 1279 1280 1281
	/* mark this command to be cancelled */
	if (xhci->current_cmd->status == COMP_CMD_ABORT)
		second_timeout = true;
	xhci->current_cmd->status = COMP_CMD_ABORT;

1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
	/* Make sure command ring is running before aborting it */
	hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
	if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
	    (hw_ring_state & CMD_RING_RUNNING))  {
		spin_unlock_irqrestore(&xhci->lock, flags);
		xhci_dbg(xhci, "Command timeout\n");
		ret = xhci_abort_cmd_ring(xhci);
		if (unlikely(ret == -ESHUTDOWN)) {
			xhci_err(xhci, "Abort command ring failed\n");
			xhci_cleanup_command_queue(xhci);
			usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
			xhci_dbg(xhci, "xHCI host controller is dead.\n");
		}
		return;
	}
1297 1298 1299 1300 1301 1302 1303 1304 1305

	/* command ring failed to restart, or host removed. Bail out */
	if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
		spin_unlock_irqrestore(&xhci->lock, flags);
		xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
		xhci_cleanup_command_queue(xhci);
		return;
	}

1306 1307 1308 1309 1310 1311 1312
	/* command timeout on stopped ring, ring can't be aborted */
	xhci_dbg(xhci, "Command timeout on stopped ring\n");
	xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
	spin_unlock_irqrestore(&xhci->lock, flags);
	return;
}

1313 1314 1315
static void handle_cmd_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
M
Matt Evans 已提交
1316
	int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1317 1318
	u64 cmd_dma;
	dma_addr_t cmd_dequeue_dma;
1319
	u32 cmd_comp_code;
1320
	union xhci_trb *cmd_trb;
M
Mathias Nyman 已提交
1321
	struct xhci_command *cmd;
1322
	u32 cmd_type;
1323

M
Matt Evans 已提交
1324
	cmd_dma = le64_to_cpu(event->cmd_trb);
1325
	cmd_trb = xhci->cmd_ring->dequeue;
1326
	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1327
			cmd_trb);
L
Lu Baolu 已提交
1328 1329 1330 1331 1332 1333 1334
	/*
	 * Check whether the completion event is for our internal kept
	 * command.
	 */
	if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
		xhci_warn(xhci,
			  "ERROR mismatched command completion event\n");
1335 1336
		return;
	}
1337

M
Mathias Nyman 已提交
1338 1339
	cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);

1340 1341
	del_timer(&xhci->cmd_timer);

1342
	trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
1343

1344
	cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1345 1346 1347 1348 1349 1350

	/* If CMD ring stopped we own the trbs between enqueue and dequeue */
	if (cmd_comp_code == COMP_CMD_STOP) {
		xhci_handle_stopped_cmd_ring(xhci, cmd);
		return;
	}
1351 1352 1353 1354 1355 1356 1357

	if (cmd->command_trb != xhci->cmd_ring->dequeue) {
		xhci_err(xhci,
			 "Command completion event does not match command\n");
		return;
	}

1358 1359 1360 1361 1362 1363 1364 1365
	/*
	 * Host aborted the command ring, check if the current command was
	 * supposed to be aborted, otherwise continue normally.
	 * The command ring is stopped now, but the xHC will issue a Command
	 * Ring Stopped event which will cause us to restart it.
	 */
	if (cmd_comp_code == COMP_CMD_ABORT) {
		xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1366 1367 1368
		if (cmd->status == COMP_CMD_ABORT) {
			if (xhci->current_cmd == cmd)
				xhci->current_cmd = NULL;
1369
			goto event_handled;
1370
		}
1371 1372
	}

1373 1374 1375
	cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
	switch (cmd_type) {
	case TRB_ENABLE_SLOT:
1376
		xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
1377
		break;
1378
	case TRB_DISABLE_SLOT:
1379
		xhci_handle_cmd_disable_slot(xhci, slot_id);
1380
		break;
1381
	case TRB_CONFIG_EP:
1382 1383 1384
		if (!cmd->completion)
			xhci_handle_cmd_config_ep(xhci, slot_id, event,
						  cmd_comp_code);
1385
		break;
1386
	case TRB_EVAL_CONTEXT:
1387
		break;
1388
	case TRB_ADDR_DEV:
1389
		break;
1390
	case TRB_STOP_RING:
1391 1392 1393
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
		xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
1394
		break;
1395
	case TRB_SET_DEQ:
1396 1397
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
1398
		xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1399
		break;
1400
	case TRB_CMD_NOOP:
1401 1402 1403
		/* Is this an aborted command turned to NO-OP? */
		if (cmd->status == COMP_CMD_STOP)
			cmd_comp_code = COMP_CMD_STOP;
1404
		break;
1405
	case TRB_RESET_EP:
1406 1407
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
1408
		xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1409
		break;
1410
	case TRB_RESET_DEV:
1411 1412 1413 1414 1415
		/* SLOT_ID field in reset device cmd completion event TRB is 0.
		 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
		 */
		slot_id = TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3]));
1416
		xhci_handle_cmd_reset_dev(xhci, slot_id, event);
1417
		break;
1418
	case TRB_NEC_GET_FW:
1419
		xhci_handle_cmd_nec_get_fw(xhci, event);
1420
		break;
1421 1422
	default:
		/* Skip over unknown commands on the event ring */
L
Lu Baolu 已提交
1423
		xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
1424 1425
		break;
	}
M
Mathias Nyman 已提交
1426

1427 1428 1429 1430 1431
	/* restart timer if this wasn't the last command */
	if (cmd->cmd_list.next != &xhci->cmd_list) {
		xhci->current_cmd = list_entry(cmd->cmd_list.next,
					       struct xhci_command, cmd_list);
		mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
L
Lu Baolu 已提交
1432 1433
	} else if (xhci->current_cmd == cmd) {
		xhci->current_cmd = NULL;
1434 1435 1436
	}

event_handled:
1437
	xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
M
Mathias Nyman 已提交
1438

A
Andiry Xu 已提交
1439
	inc_deq(xhci, xhci->cmd_ring);
1440 1441
}

1442 1443 1444 1445 1446
static void handle_vendor_event(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 trb_type;

M
Matt Evans 已提交
1447
	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1448 1449 1450 1451 1452
	xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
	if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
		handle_cmd_completion(xhci, &event->event_cmd);
}

1453 1454 1455 1456 1457
/* @port_id: the one-based port ID from the hardware (indexed from array of all
 * port registers -- USB 3.0 and USB 2.0).
 *
 * Returns a zero-based port number, which is suitable for indexing into each of
 * the split roothubs' port arrays and bus state arrays.
1458
 * Add one to it in order to call xhci_find_slot_id_by_port.
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
 */
static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
		struct xhci_hcd *xhci, u32 port_id)
{
	unsigned int i;
	unsigned int num_similar_speed_ports = 0;

	/* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
	 * and usb2_ports are 0-based indexes.  Count the number of similar
	 * speed ports, up to 1 port before this port.
	 */
	for (i = 0; i < (port_id - 1); i++) {
		u8 port_speed = xhci->port_array[i];

		/*
		 * Skip ports that don't have known speeds, or have duplicate
		 * Extended Capabilities port speed entries.
		 */
1477
		if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1478 1479 1480 1481 1482 1483 1484
			continue;

		/*
		 * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
		 * 1.1 ports are under the USB 2.0 hub.  If the port speed
		 * matches the device speed, it's a similar speed port.
		 */
1485
		if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3))
1486 1487 1488 1489 1490
			num_similar_speed_ports++;
	}
	return num_similar_speed_ports;
}

1491 1492 1493 1494
static void handle_device_notification(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 slot_id;
1495
	struct usb_device *udev;
1496

1497
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1498
	if (!xhci->devs[slot_id]) {
1499 1500
		xhci_warn(xhci, "Device Notification event for "
				"unused slot %u\n", slot_id);
1501 1502 1503 1504 1505 1506 1507 1508
		return;
	}

	xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
			slot_id);
	udev = xhci->devs[slot_id]->udev;
	if (udev && udev->parent)
		usb_wakeup_notification(udev->parent, udev->portnum);
1509 1510
}

S
Sarah Sharp 已提交
1511 1512 1513
static void handle_port_status(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
1514
	struct usb_hcd *hcd;
S
Sarah Sharp 已提交
1515
	u32 port_id;
1516
	u32 temp, temp1;
1517
	int max_ports;
1518
	int slot_id;
1519
	unsigned int faked_port_index;
1520
	u8 major_revision;
1521
	struct xhci_bus_state *bus_state;
M
Matt Evans 已提交
1522
	__le32 __iomem **port_array;
1523
	bool bogus_port_status = false;
S
Sarah Sharp 已提交
1524 1525

	/* Port status change events always have a successful completion code */
L
Lu Baolu 已提交
1526 1527 1528 1529
	if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
		xhci_warn(xhci,
			  "WARN: xHC returned failed port status event\n");

M
Matt Evans 已提交
1530
	port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
S
Sarah Sharp 已提交
1531 1532
	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);

1533 1534
	max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
	if ((port_id <= 0) || (port_id > max_ports)) {
1535
		xhci_warn(xhci, "Invalid port id %d\n", port_id);
P
Peter Chen 已提交
1536 1537
		inc_deq(xhci, xhci->event_ring);
		return;
1538 1539
	}

1540 1541 1542 1543
	/* Figure out which usb_hcd this port is attached to:
	 * is it a USB 3.0 port or a USB 2.0/1.1 port?
	 */
	major_revision = xhci->port_array[port_id - 1];
P
Peter Chen 已提交
1544 1545 1546

	/* Find the right roothub. */
	hcd = xhci_to_hcd(xhci);
1547
	if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3))
P
Peter Chen 已提交
1548 1549
		hcd = xhci->shared_hcd;

1550 1551 1552 1553
	if (major_revision == 0) {
		xhci_warn(xhci, "Event for port %u not in "
				"Extended Capabilities, ignoring.\n",
				port_id);
1554
		bogus_port_status = true;
1555
		goto cleanup;
1556
	}
1557
	if (major_revision == DUPLICATE_ENTRY) {
1558 1559 1560
		xhci_warn(xhci, "Event for port %u duplicated in"
				"Extended Capabilities, ignoring.\n",
				port_id);
1561
		bogus_port_status = true;
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
		goto cleanup;
	}

	/*
	 * Hardware port IDs reported by a Port Status Change Event include USB
	 * 3.0 and USB 2.0 ports.  We want to check if the port has reported a
	 * resume event, but we first need to translate the hardware port ID
	 * into the index into the ports on the correct split roothub, and the
	 * correct bus_state structure.
	 */
	bus_state = &xhci->bus_state[hcd_index(hcd)];
1573
	if (hcd->speed >= HCD_USB3)
1574 1575 1576 1577 1578 1579
		port_array = xhci->usb3_ports;
	else
		port_array = xhci->usb2_ports;
	/* Find the faked port hub number */
	faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
			port_id);
1580

1581
	temp = readl(port_array[faked_port_index]);
1582
	if (hcd->state == HC_STATE_SUSPENDED) {
1583 1584 1585 1586
		xhci_dbg(xhci, "resume root hub\n");
		usb_hcd_resume_root_hub(hcd);
	}

1587
	if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
1588 1589
		bus_state->port_remote_wakeup &= ~(1 << faked_port_index);

1590 1591 1592
	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
		xhci_dbg(xhci, "port resume event for port %d\n", port_id);

1593
		temp1 = readl(&xhci->op_regs->command);
1594 1595 1596 1597 1598
		if (!(temp1 & CMD_RUN)) {
			xhci_warn(xhci, "xHC is not running.\n");
			goto cleanup;
		}

1599
		if (DEV_SUPERSPEED_ANY(temp)) {
1600
			xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1601 1602 1603 1604 1605
			/* Set a flag to say the port signaled remote wakeup,
			 * so we can tell the difference between the end of
			 * device and host initiated resume.
			 */
			bus_state->port_remote_wakeup |= 1 << faked_port_index;
1606 1607
			xhci_test_and_clear_bit(xhci, port_array,
					faked_port_index, PORT_PLC);
A
Andiry Xu 已提交
1608 1609
			xhci_set_link_state(xhci, port_array, faked_port_index,
						XDEV_U0);
1610 1611 1612 1613 1614
			/* Need to wait until the next link state change
			 * indicates the device is actually in U0.
			 */
			bogus_port_status = true;
			goto cleanup;
1615 1616
		} else if (!test_bit(faked_port_index,
				     &bus_state->resuming_ports)) {
1617
			xhci_dbg(xhci, "resume HS port %d\n", port_id);
1618
			bus_state->resume_done[faked_port_index] = jiffies +
1619
				msecs_to_jiffies(USB_RESUME_TIMEOUT);
1620
			set_bit(faked_port_index, &bus_state->resuming_ports);
1621
			mod_timer(&hcd->rh_timer,
1622
				  bus_state->resume_done[faked_port_index]);
1623 1624 1625
			/* Do the rest in GetPortStatus */
		}
	}
1626 1627

	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
1628
			DEV_SUPERSPEED_ANY(temp)) {
1629
		xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1630 1631 1632 1633 1634 1635 1636
		/* We've just brought the device into U0 through either the
		 * Resume state after a device remote wakeup, or through the
		 * U3Exit state after a host-initiated resume.  If it's a device
		 * initiated remote wake, don't pass up the link state change,
		 * so the roothub behavior is consistent with external
		 * USB 3.0 hub behavior.
		 */
1637 1638 1639 1640
		slot_id = xhci_find_slot_id_by_port(hcd, xhci,
				faked_port_index + 1);
		if (slot_id && xhci->devs[slot_id])
			xhci_ring_device(xhci, slot_id);
1641
		if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
1642 1643 1644 1645 1646 1647 1648 1649 1650
			bus_state->port_remote_wakeup &=
				~(1 << faked_port_index);
			xhci_test_and_clear_bit(xhci, port_array,
					faked_port_index, PORT_PLC);
			usb_wakeup_notification(hcd->self.root_hub,
					faked_port_index + 1);
			bogus_port_status = true;
			goto cleanup;
		}
1651
	}
1652

1653 1654 1655 1656 1657
	/*
	 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
	 * RExit to a disconnect state).  If so, let the the driver know it's
	 * out of the RExit state.
	 */
1658
	if (!DEV_SUPERSPEED_ANY(temp) &&
1659 1660 1661 1662 1663 1664 1665
			test_and_clear_bit(faked_port_index,
				&bus_state->rexit_ports)) {
		complete(&bus_state->rexit_done[faked_port_index]);
		bogus_port_status = true;
		goto cleanup;
	}

1666
	if (hcd->speed < HCD_USB3)
1667 1668 1669
		xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
					PORT_PLC);

1670
cleanup:
S
Sarah Sharp 已提交
1671
	/* Update event ring dequeue pointer before dropping the lock */
A
Andiry Xu 已提交
1672
	inc_deq(xhci, xhci->event_ring);
S
Sarah Sharp 已提交
1673

1674 1675 1676 1677 1678 1679 1680
	/* Don't make the USB core poll the roothub if we got a bad port status
	 * change event.  Besides, at that point we can't tell which roothub
	 * (USB 2.0 or USB 3.0) to kick.
	 */
	if (bogus_port_status)
		return;

1681 1682 1683 1684 1685 1686 1687 1688 1689
	/*
	 * xHCI port-status-change events occur when the "or" of all the
	 * status-change bits in the portsc register changes from 0 to 1.
	 * New status changes won't cause an event if any other change
	 * bits are still set.  When an event occurs, switch over to
	 * polling to avoid losing status changes.
	 */
	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
S
Sarah Sharp 已提交
1690 1691
	spin_unlock(&xhci->lock);
	/* Pass this up to the core */
1692
	usb_hcd_poll_rh_status(hcd);
S
Sarah Sharp 已提交
1693 1694 1695
	spin_lock(&xhci->lock);
}

1696 1697 1698 1699 1700 1701
/*
 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
 * at end_trb, which may be in another segment.  If the suspect DMA address is a
 * TRB in this TD, this function returns that TRB's segment.  Otherwise it
 * returns 0.
 */
1702 1703
struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
		struct xhci_segment *start_seg,
1704 1705
		union xhci_trb	*start_trb,
		union xhci_trb	*end_trb,
1706 1707
		dma_addr_t	suspect_dma,
		bool		debug)
1708 1709 1710 1711 1712 1713
{
	dma_addr_t start_dma;
	dma_addr_t end_seg_dma;
	dma_addr_t end_trb_dma;
	struct xhci_segment *cur_seg;

1714
	start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1715 1716 1717
	cur_seg = start_seg;

	do {
1718
		if (start_dma == 0)
1719
			return NULL;
1720
		/* We may get an event for a Link TRB in the middle of a TD */
1721
		end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1722
				&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1723
		/* If the end TRB isn't in this segment, this is set to 0 */
1724
		end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1725

1726 1727 1728 1729 1730 1731 1732 1733 1734
		if (debug)
			xhci_warn(xhci,
				"Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
				(unsigned long long)suspect_dma,
				(unsigned long long)start_dma,
				(unsigned long long)end_trb_dma,
				(unsigned long long)cur_seg->dma,
				(unsigned long long)end_seg_dma);

1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749
		if (end_trb_dma > 0) {
			/* The end TRB is in this segment, so suspect should be here */
			if (start_dma <= end_trb_dma) {
				if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
					return cur_seg;
			} else {
				/* Case for one segment with
				 * a TD wrapped around to the top
				 */
				if ((suspect_dma >= start_dma &&
							suspect_dma <= end_seg_dma) ||
						(suspect_dma >= cur_seg->dma &&
						 suspect_dma <= end_trb_dma))
					return cur_seg;
			}
1750
			return NULL;
1751 1752 1753 1754 1755 1756
		} else {
			/* Might still be somewhere in this segment */
			if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
				return cur_seg;
		}
		cur_seg = cur_seg->next;
1757
		start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1758
	} while (cur_seg != start_seg);
1759

1760
	return NULL;
1761 1762
}

1763 1764
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
1765
		unsigned int stream_id,
1766
		struct xhci_td *td, union xhci_trb *ep_trb)
1767 1768
{
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1769 1770 1771 1772 1773
	struct xhci_command *command;
	command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
	if (!command)
		return;

1774
	ep->ep_state |= EP_HALTED;
1775
	ep->stopped_stream = stream_id;
1776

1777
	xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
1778
	xhci_cleanup_stalled_ring(xhci, ep_index, td);
1779

1780
	ep->stopped_stream = 0;
1781

1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798
	xhci_ring_cmd_db(xhci);
}

/* Check if an error has halted the endpoint ring.  The class driver will
 * cleanup the halt for a non-default control endpoint if we indicate a stall.
 * However, a babble and other errors also halt the endpoint ring, and the class
 * driver won't clear the halt in that case, so we need to issue a Set Transfer
 * Ring Dequeue Pointer command manually.
 */
static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
		struct xhci_ep_ctx *ep_ctx,
		unsigned int trb_comp_code)
{
	/* TRB completion codes that may require a manual halt cleanup */
	if (trb_comp_code == COMP_TX_ERR ||
			trb_comp_code == COMP_BABBLE ||
			trb_comp_code == COMP_SPLIT_ERR)
1799
		/* The 0.95 spec says a babbling control endpoint
1800 1801 1802 1803 1804
		 * is not halted. The 0.96 spec says it is.  Some HW
		 * claims to be 0.95 compliant, but it halts the control
		 * endpoint anyway.  Check if a babble halted the
		 * endpoint.
		 */
1805
		if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
1806 1807 1808 1809 1810
			return 1;

	return 0;
}

1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
{
	if (trb_comp_code >= 224 && trb_comp_code <= 255) {
		/* Vendor defined "informational" completion code,
		 * treat as not-an-error.
		 */
		xhci_dbg(xhci, "Vendor defined info completion code %u\n",
				trb_comp_code);
		xhci_dbg(xhci, "Treating code as success.\n");
		return 1;
	}
	return 0;
}

1825 1826 1827 1828 1829
/*
 * Finish the td processing, remove the td from td list;
 * Return 1 if the urb can be given back.
 */
static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1830
	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
1831 1832 1833 1834 1835 1836 1837 1838
	struct xhci_virt_ep *ep, int *status, bool skip)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	unsigned int slot_id;
	int ep_index;
	struct urb *urb = NULL;
	struct xhci_ep_ctx *ep_ctx;
1839
	struct urb_priv	*urb_priv;
1840 1841
	u32 trb_comp_code;

M
Matt Evans 已提交
1842
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1843
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
1844 1845
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1846
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
1847
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1848 1849 1850 1851

	if (skip)
		goto td_cleanup;

1852 1853 1854
	if (trb_comp_code == COMP_STOP_INVAL ||
			trb_comp_code == COMP_STOP ||
			trb_comp_code == COMP_STOP_SHORT) {
1855 1856 1857 1858 1859 1860
		/* The Endpoint Stop Command completion will take care of any
		 * stopped TDs.  A stopped TD may be restarted, so don't update
		 * the ring dequeue pointer or take this TD off any lists yet.
		 */
		ep->stopped_td = td;
		return 0;
M
Mathias Nyman 已提交
1861 1862 1863 1864 1865 1866 1867 1868 1869 1870
	}
	if (trb_comp_code == COMP_STALL ||
		xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
						trb_comp_code)) {
		/* Issue a reset endpoint command to clear the host side
		 * halt, followed by a set dequeue command to move the
		 * dequeue pointer past the TD.
		 * The class driver clears the device side halt later.
		 */
		xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
1871
					ep_ring->stream_id, td, ep_trb);
1872
	} else {
M
Mathias Nyman 已提交
1873 1874
		/* Update ring dequeue pointer */
		while (ep_ring->dequeue != td->last_trb)
A
Andiry Xu 已提交
1875
			inc_deq(xhci, ep_ring);
M
Mathias Nyman 已提交
1876 1877
		inc_deq(xhci, ep_ring);
	}
1878 1879

td_cleanup:
M
Mathias Nyman 已提交
1880 1881 1882 1883
	/* Clean up the endpoint's TD list */
	urb = td->urb;
	urb_priv = urb->hcpriv;

1884 1885 1886 1887
	/* if a bounce buffer was used to align this td then unmap it */
	if (td->bounce_seg)
		xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);

M
Mathias Nyman 已提交
1888 1889 1890 1891 1892 1893
	/* Do one last check of the actual transfer length.
	 * If the host controller said we transferred more data than the buffer
	 * length, urb->actual_length will be a very big number (since it's
	 * unsigned).  Play it safe and say we didn't transfer anything.
	 */
	if (urb->actual_length > urb->transfer_buffer_length) {
1894 1895
		xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
			  urb->transfer_buffer_length, urb->actual_length);
M
Mathias Nyman 已提交
1896
		urb->actual_length = 0;
1897
		*status = 0;
M
Mathias Nyman 已提交
1898 1899 1900 1901 1902 1903
	}
	list_del_init(&td->td_list);
	/* Was this TD slated to be cancelled but completed anyway? */
	if (!list_empty(&td->cancelled_td_list))
		list_del_init(&td->cancelled_td_list);

1904
	inc_td_cnt(urb);
M
Mathias Nyman 已提交
1905
	/* Giveback the urb when all the tds are completed */
1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917
	if (last_td_in_urb(td)) {
		if ((urb->actual_length != urb->transfer_buffer_length &&
		     (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
		    (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
			xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
				 urb, urb->actual_length,
				 urb->transfer_buffer_length, *status);

		/* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
			*status = 0;
		xhci_giveback_urb_in_irq(xhci, td, *status);
1918
	}
1919
	return 0;
1920 1921
}

1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
			   union xhci_trb *stop_trb)
{
	u32 sum;
	union xhci_trb *trb = ring->dequeue;
	struct xhci_segment *seg = ring->deq_seg;

	for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
		if (!trb_is_noop(trb) && !trb_is_link(trb))
			sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
	}
	return sum;
}

1937 1938 1939 1940
/*
 * Process control tds, update urb status and actual_length.
 */
static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1941
	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
1942 1943 1944 1945 1946 1947 1948 1949
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	unsigned int slot_id;
	int ep_index;
	struct xhci_ep_ctx *ep_ctx;
	u32 trb_comp_code;
1950 1951
	u32 remaining, requested;
	bool on_data_stage;
1952

M
Matt Evans 已提交
1953
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1954
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
1955 1956
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1957
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
1958
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1959 1960 1961 1962
	requested = td->urb->transfer_buffer_length;
	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));

	/* not setup (dequeue), or status stage means we are at data stage */
1963
	on_data_stage = (ep_trb != ep_ring->dequeue && ep_trb != td->last_trb);
1964 1965 1966

	switch (trb_comp_code) {
	case COMP_SUCCESS:
1967
		if (ep_trb != td->last_trb) {
1968 1969
			xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
				  on_data_stage ? "data" : "setup");
1970
			*status = -ESHUTDOWN;
1971
			break;
1972
		}
1973
		*status = 0;
1974 1975
		break;
	case COMP_SHORT_TX:
1976
		*status = 0;
1977
		break;
1978
	case COMP_STOP_SHORT:
1979 1980
		if (on_data_stage)
			td->urb->actual_length = remaining;
1981
		else
1982 1983
			xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
		goto finish_td;
1984
	case COMP_STOP:
1985 1986 1987
		if (on_data_stage)
			td->urb->actual_length = requested - remaining;
		goto finish_td;
1988
	case COMP_STOP_INVAL:
1989
		goto finish_td;
1990 1991
	default:
		if (!xhci_requires_manual_halt_cleanup(xhci,
1992
						       ep_ctx, trb_comp_code))
1993
			break;
1994 1995
		xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
			 trb_comp_code, ep_index);
1996 1997 1998
		/* else fall through */
	case COMP_STALL:
		/* Did we transfer part of the data (middle) phase? */
1999 2000
		if (on_data_stage)
			td->urb->actual_length = requested - remaining;
2001
		else if (!td->urb_length_set)
2002
			td->urb->actual_length = 0;
2003
		goto finish_td;
2004
	}
2005 2006

	/* stopped at setup stage, no data transferred */
2007
	if (ep_trb == ep_ring->dequeue)
2008 2009
		goto finish_td;

2010
	/*
2011 2012
	 * if on data stage then update the actual_length of the URB and flag it
	 * as set, so it won't be overwritten in the event for the last TRB.
2013
	 */
2014 2015 2016 2017 2018
	if (on_data_stage) {
		td->urb_length_set = true;
		td->urb->actual_length = requested - remaining;
		xhci_dbg(xhci, "Waiting for status stage event\n");
		return 0;
2019 2020
	}

2021 2022 2023 2024 2025
	/* at status stage */
	if (!td->urb_length_set)
		td->urb->actual_length = requested;

finish_td:
2026
	return finish_td(xhci, td, ep_trb, event, ep, status, false);
2027 2028
}

2029 2030 2031 2032
/*
 * Process isochronous tds, update urb packet status and actual_length.
 */
static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2033
	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
2034 2035 2036 2037 2038
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	int idx;
2039
	struct usb_iso_packet_descriptor *frame;
2040
	u32 trb_comp_code;
2041 2042 2043
	bool sum_trbs_for_length = false;
	u32 remaining, requested, ep_trb_len;
	int short_framestatus;
2044

M
Matt Evans 已提交
2045 2046
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2047 2048
	urb_priv = td->urb->hcpriv;
	idx = urb_priv->td_cnt;
2049
	frame = &td->urb->iso_frame_desc[idx];
2050 2051 2052 2053 2054
	requested = frame->length;
	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
	short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
		-EREMOTEIO : 0;
2055

2056 2057 2058
	/* handle completion code */
	switch (trb_comp_code) {
	case COMP_SUCCESS:
2059 2060 2061 2062
		if (remaining) {
			frame->status = short_framestatus;
			if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
				sum_trbs_for_length = true;
2063 2064
			break;
		}
2065 2066
		frame->status = 0;
		break;
2067
	case COMP_SHORT_TX:
2068 2069
		frame->status = short_framestatus;
		sum_trbs_for_length = true;
2070 2071 2072 2073 2074 2075 2076 2077
		break;
	case COMP_BW_OVER:
		frame->status = -ECOMM;
		break;
	case COMP_BUFF_OVER:
	case COMP_BABBLE:
		frame->status = -EOVERFLOW;
		break;
A
Alex He 已提交
2078
	case COMP_DEV_ERR:
2079
	case COMP_STALL:
2080 2081
		frame->status = -EPROTO;
		break;
2082
	case COMP_TX_ERR:
2083
		frame->status = -EPROTO;
2084
		if (ep_trb != td->last_trb)
2085
			return 0;
2086 2087
		break;
	case COMP_STOP:
2088 2089 2090 2091 2092 2093 2094
		sum_trbs_for_length = true;
		break;
	case COMP_STOP_SHORT:
		/* field normally containing residue now contains tranferred */
		frame->status = short_framestatus;
		requested = remaining;
		break;
2095
	case COMP_STOP_INVAL:
2096 2097
		requested = 0;
		remaining = 0;
2098 2099
		break;
	default:
2100
		sum_trbs_for_length = true;
2101 2102
		frame->status = -1;
		break;
2103 2104
	}

2105 2106 2107 2108 2109
	if (sum_trbs_for_length)
		frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) +
			ep_trb_len - remaining;
	else
		frame->actual_length = requested;
2110

2111
	td->urb->actual_length += frame->actual_length;
2112

2113
	return finish_td(xhci, td, ep_trb, event, ep, status, false);
2114 2115
}

2116 2117 2118 2119 2120 2121 2122 2123 2124
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
			struct xhci_transfer_event *event,
			struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct usb_iso_packet_descriptor *frame;
	int idx;

2125
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2126 2127 2128 2129
	urb_priv = td->urb->hcpriv;
	idx = urb_priv->td_cnt;
	frame = &td->urb->iso_frame_desc[idx];

2130
	/* The transfer is partly done. */
2131 2132 2133 2134 2135 2136 2137
	frame->status = -EXDEV;

	/* calc actual length */
	frame->actual_length = 0;

	/* Update ring dequeue pointer */
	while (ep_ring->dequeue != td->last_trb)
A
Andiry Xu 已提交
2138 2139
		inc_deq(xhci, ep_ring);
	inc_deq(xhci, ep_ring);
2140 2141 2142 2143

	return finish_td(xhci, td, NULL, event, ep, status, true);
}

2144 2145 2146 2147
/*
 * Process bulk and interrupt tds, update urb status and actual_length.
 */
static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2148
	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
2149 2150 2151 2152
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	u32 trb_comp_code;
2153
	u32 remaining, requested, ep_trb_len;
2154

M
Matt Evans 已提交
2155 2156
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2157
	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2158
	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2159
	requested = td->urb->transfer_buffer_length;
2160 2161 2162

	switch (trb_comp_code) {
	case COMP_SUCCESS:
2163
		/* handle success with untransferred data as short packet */
2164
		if (ep_trb != td->last_trb || remaining) {
2165
			xhci_warn(xhci, "WARN Successful completion on short TX\n");
2166 2167 2168
			xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
				 td->urb->ep->desc.bEndpointAddress,
				 requested, remaining);
2169
		}
2170
		*status = 0;
2171 2172
		break;
	case COMP_SHORT_TX:
2173 2174 2175
		xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
			 td->urb->ep->desc.bEndpointAddress,
			 requested, remaining);
2176
		*status = 0;
2177
		break;
2178 2179 2180 2181 2182
	case COMP_STOP_SHORT:
		td->urb->actual_length = remaining;
		goto finish_td;
	case COMP_STOP_INVAL:
		/* stopped on ep trb with invalid length, exclude it */
2183
		ep_trb_len	= 0;
2184 2185
		remaining	= 0;
		break;
2186
	default:
2187
		/* do nothing */
2188 2189
		break;
	}
2190

2191
	if (ep_trb == td->last_trb)
2192 2193 2194
		td->urb->actual_length = requested - remaining;
	else
		td->urb->actual_length =
2195 2196
			sum_trb_lengths(xhci, ep_ring, ep_trb) +
			ep_trb_len - remaining;
2197 2198 2199 2200
finish_td:
	if (remaining > requested) {
		xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
			  remaining);
2201 2202
		td->urb->actual_length = 0;
	}
2203
	return finish_td(xhci, td, ep_trb, event, ep, status, false);
2204 2205
}

2206 2207 2208 2209 2210 2211 2212
/*
 * If this function returns an error condition, it means it got a Transfer
 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
 * At this point, the host controller is probably hosed and should be reset.
 */
static int handle_tx_event(struct xhci_hcd *xhci,
		struct xhci_transfer_event *event)
F
Felipe Balbi 已提交
2213 2214
	__releases(&xhci->lock)
	__acquires(&xhci->lock)
2215 2216
{
	struct xhci_virt_device *xdev;
2217
	struct xhci_virt_ep *ep;
2218
	struct xhci_ring *ep_ring;
2219
	unsigned int slot_id;
2220
	int ep_index;
2221
	struct xhci_td *td = NULL;
2222 2223 2224
	dma_addr_t ep_trb_dma;
	struct xhci_segment *ep_seg;
	union xhci_trb *ep_trb;
2225
	int status = -EINPROGRESS;
2226
	struct xhci_ep_ctx *ep_ctx;
2227
	struct list_head *tmp;
2228
	u32 trb_comp_code;
2229
	int td_num = 0;
2230
	bool handling_skipped_tds = false;
2231

M
Matt Evans 已提交
2232
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2233
	xdev = xhci->devs[slot_id];
2234 2235
	if (!xdev) {
		xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
2236
		xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2237 2238
			 (unsigned long long) xhci_trb_virt_to_dma(
				 xhci->event_ring->deq_seg,
2239 2240 2241 2242 2243 2244 2245
				 xhci->event_ring->dequeue),
			 lower_32_bits(le64_to_cpu(event->buffer)),
			 upper_32_bits(le64_to_cpu(event->buffer)),
			 le32_to_cpu(event->transfer_len),
			 le32_to_cpu(event->flags));
		xhci_dbg(xhci, "Event ring:\n");
		xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2246 2247 2248 2249
		return -ENODEV;
	}

	/* Endpoint ID is 1 based, our index is zero based */
M
Matt Evans 已提交
2250
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2251
	ep = &xdev->eps[ep_index];
M
Matt Evans 已提交
2252
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2253
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2254
	if (!ep_ring ||  GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
2255 2256
		xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
				"or incorrect stream ring\n");
2257
		xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2258 2259
			 (unsigned long long) xhci_trb_virt_to_dma(
				 xhci->event_ring->deq_seg,
2260 2261 2262 2263 2264 2265 2266
				 xhci->event_ring->dequeue),
			 lower_32_bits(le64_to_cpu(event->buffer)),
			 upper_32_bits(le64_to_cpu(event->buffer)),
			 le32_to_cpu(event->transfer_len),
			 le32_to_cpu(event->flags));
		xhci_dbg(xhci, "Event ring:\n");
		xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2267 2268 2269
		return -ENODEV;
	}

2270 2271 2272 2273 2274 2275
	/* Count current td numbers if ep->skip is set */
	if (ep->skip) {
		list_for_each(tmp, &ep_ring->td_list)
			td_num++;
	}

2276
	ep_trb_dma = le64_to_cpu(event->buffer);
M
Matt Evans 已提交
2277
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2278
	/* Look for common error cases */
2279
	switch (trb_comp_code) {
S
Sarah Sharp 已提交
2280 2281 2282 2283
	/* Skip codes that require special handling depending on
	 * transfer type
	 */
	case COMP_SUCCESS:
2284
		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2285 2286 2287 2288
			break;
		if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
			trb_comp_code = COMP_SHORT_TX;
		else
2289 2290
			xhci_warn_ratelimited(xhci,
					"WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
S
Sarah Sharp 已提交
2291 2292
	case COMP_SHORT_TX:
		break;
2293 2294 2295 2296 2297 2298
	case COMP_STOP:
		xhci_dbg(xhci, "Stopped on Transfer TRB\n");
		break;
	case COMP_STOP_INVAL:
		xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
		break;
2299 2300 2301
	case COMP_STOP_SHORT:
		xhci_dbg(xhci, "Stopped with short packet transfer detected\n");
		break;
S
Sarah Sharp 已提交
2302
	case COMP_STALL:
2303
		xhci_dbg(xhci, "Stalled endpoint\n");
2304
		ep->ep_state |= EP_HALTED;
S
Sarah Sharp 已提交
2305 2306 2307 2308 2309 2310
		status = -EPIPE;
		break;
	case COMP_TRB_ERR:
		xhci_warn(xhci, "WARN: TRB error on endpoint\n");
		status = -EILSEQ;
		break;
2311
	case COMP_SPLIT_ERR:
S
Sarah Sharp 已提交
2312
	case COMP_TX_ERR:
2313
		xhci_dbg(xhci, "Transfer error on endpoint\n");
S
Sarah Sharp 已提交
2314 2315
		status = -EPROTO;
		break;
2316
	case COMP_BABBLE:
2317
		xhci_dbg(xhci, "Babble error on endpoint\n");
2318 2319
		status = -EOVERFLOW;
		break;
S
Sarah Sharp 已提交
2320 2321 2322 2323
	case COMP_DB_ERR:
		xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
		status = -ENOSR;
		break;
2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339
	case COMP_BW_OVER:
		xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
		break;
	case COMP_BUFF_OVER:
		xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
		break;
	case COMP_UNDERRUN:
		/*
		 * When the Isoch ring is empty, the xHC will generate
		 * a Ring Overrun Event for IN Isoch endpoint or Ring
		 * Underrun Event for OUT Isoch endpoint.
		 */
		xhci_dbg(xhci, "underrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2340 2341
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2342 2343 2344 2345 2346 2347
		goto cleanup;
	case COMP_OVERRUN:
		xhci_dbg(xhci, "overrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2348 2349
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2350
		goto cleanup;
A
Alex He 已提交
2351 2352 2353 2354
	case COMP_DEV_ERR:
		xhci_warn(xhci, "WARN: detect an incompatible device");
		status = -EPROTO;
		break;
2355 2356 2357 2358 2359 2360 2361 2362 2363 2364
	case COMP_MISSED_INT:
		/*
		 * When encounter missed service error, one or more isoc tds
		 * may be missed by xHC.
		 * Set skip flag of the ep_ring; Complete the missed tds as
		 * short transfer when process the ep_ring next time.
		 */
		ep->skip = true;
		xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
		goto cleanup;
2365 2366 2367 2368
	case COMP_PING_ERR:
		ep->skip = true;
		xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
		goto cleanup;
S
Sarah Sharp 已提交
2369
	default:
2370
		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2371 2372 2373
			status = 0;
			break;
		}
2374 2375
		xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n",
			  trb_comp_code);
2376 2377 2378
		goto cleanup;
	}

2379 2380 2381 2382 2383
	do {
		/* This TRB should be in the TD at the head of this ring's
		 * TD list.
		 */
		if (list_empty(&ep_ring->td_list)) {
2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398
			/*
			 * A stopped endpoint may generate an extra completion
			 * event if the device was suspended.  Don't print
			 * warnings.
			 */
			if (!(trb_comp_code == COMP_STOP ||
						trb_comp_code == COMP_STOP_INVAL)) {
				xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
						TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
						ep_index);
				xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
						(le32_to_cpu(event->flags) &
						 TRB_TYPE_BITMASK)>>10);
				xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
			}
2399 2400 2401 2402 2403 2404 2405
			if (ep->skip) {
				ep->skip = false;
				xhci_dbg(xhci, "td_list is empty while skip "
						"flag set. Clear skip flag.\n");
			}
			goto cleanup;
		}
2406

2407 2408 2409 2410 2411 2412 2413 2414
		/* We've skipped all the TDs on the ep ring when ep->skip set */
		if (ep->skip && td_num == 0) {
			ep->skip = false;
			xhci_dbg(xhci, "All tds on the ep_ring skipped. "
						"Clear skip flag.\n");
			goto cleanup;
		}

2415
		td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2416 2417
		if (ep->skip)
			td_num--;
2418

2419
		/* Is this a TRB in the currently executing TD? */
2420 2421
		ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
				td->last_trb, ep_trb_dma, false);
A
Alex He 已提交
2422 2423 2424 2425 2426 2427 2428 2429 2430

		/*
		 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
		 * is not in the current TD pointed by ep_ring->dequeue because
		 * that the hardware dequeue pointer still at the previous TRB
		 * of the current TD. The previous TRB maybe a Link TD or the
		 * last TRB of the previous TD. The command completion handle
		 * will take care the rest.
		 */
2431
		if (!ep_seg && (trb_comp_code == COMP_STOP ||
2432
				   trb_comp_code == COMP_STOP_INVAL)) {
A
Alex He 已提交
2433 2434 2435
			goto cleanup;
		}

2436
		if (!ep_seg) {
2437 2438
			if (!ep->skip ||
			    !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2439 2440 2441 2442
				/* Some host controllers give a spurious
				 * successful event after a short transfer.
				 * Ignore it.
				 */
2443
				if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2444 2445 2446 2447
						ep_ring->last_td_was_short) {
					ep_ring->last_td_was_short = false;
					goto cleanup;
				}
2448 2449 2450
				/* HC is busted, give up! */
				xhci_err(xhci,
					"ERROR Transfer event TRB DMA ptr not "
2451 2452 2453 2454 2455
					"part of current TD ep_index %d "
					"comp_code %u\n", ep_index,
					trb_comp_code);
				trb_in_td(xhci, ep_ring->deq_seg,
					  ep_ring->dequeue, td->last_trb,
2456
					  ep_trb_dma, true);
2457 2458 2459
				return -ESHUTDOWN;
			}

2460
			skip_isoc_td(xhci, td, event, ep, &status);
2461 2462
			goto cleanup;
		}
2463 2464 2465 2466
		if (trb_comp_code == COMP_SHORT_TX)
			ep_ring->last_td_was_short = true;
		else
			ep_ring->last_td_was_short = false;
2467 2468

		if (ep->skip) {
2469 2470 2471
			xhci_dbg(xhci, "Found td. Clear skip flag.\n");
			ep->skip = false;
		}
2472

2473 2474
		ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
						sizeof(*ep_trb)];
2475 2476
		/*
		 * No-op TRB should not trigger interrupts.
2477
		 * If ep_trb is a no-op TRB, it means the
2478 2479 2480
		 * corresponding TD has been cancelled. Just ignore
		 * the TD.
		 */
2481 2482
		if (trb_is_noop(ep_trb)) {
			xhci_dbg(xhci, "ep_trb is a no-op TRB. Skip it\n");
2483
			goto cleanup;
2484
		}
2485

2486
		/* update the urb's actual_length and give back to the core */
2487
		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2488
			process_ctrl_td(xhci, td, ep_trb, event, ep, &status);
2489
		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2490
			process_isoc_td(xhci, td, ep_trb, event, ep, &status);
2491
		else
2492 2493
			process_bulk_intr_td(xhci, td, ep_trb, event, ep,
					     &status);
2494
cleanup:
2495 2496 2497 2498
		handling_skipped_tds = ep->skip &&
			trb_comp_code != COMP_MISSED_INT &&
			trb_comp_code != COMP_PING_ERR;

2499
		/*
2500 2501
		 * Do not update event ring dequeue pointer if we're in a loop
		 * processing missed tds.
2502
		 */
2503
		if (!handling_skipped_tds)
A
Andiry Xu 已提交
2504
			inc_deq(xhci, xhci->event_ring);
2505 2506 2507 2508 2509 2510 2511

	/*
	 * If ep->skip is set, it means there are missed tds on the
	 * endpoint ring need to take care of.
	 * Process them as short transfer until reach the td pointed by
	 * the event.
	 */
2512
	} while (handling_skipped_tds);
2513

2514 2515 2516
	return 0;
}

S
Sarah Sharp 已提交
2517 2518 2519
/*
 * This function handles all OS-owned events on the event ring.  It may drop
 * xhci->lock between event processing (e.g. to pass up port status changes).
2520 2521
 * Returns >0 for "possibly more events to process" (caller should call again),
 * otherwise 0 if done.  In future, <0 returns should indicate error code.
S
Sarah Sharp 已提交
2522
 */
2523
static int xhci_handle_event(struct xhci_hcd *xhci)
2524 2525
{
	union xhci_trb *event;
S
Sarah Sharp 已提交
2526
	int update_ptrs = 1;
2527
	int ret;
2528

L
Lu Baolu 已提交
2529
	/* Event ring hasn't been allocated yet. */
2530
	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
L
Lu Baolu 已提交
2531 2532
		xhci_err(xhci, "ERROR event ring not ready\n");
		return -ENOMEM;
2533 2534 2535 2536
	}

	event = xhci->event_ring->dequeue;
	/* Does the HC or OS own the TRB? */
M
Matt Evans 已提交
2537
	if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
L
Lu Baolu 已提交
2538
	    xhci->event_ring->cycle_state)
2539
		return 0;
2540

2541 2542 2543 2544 2545
	/*
	 * Barrier between reading the TRB_CYCLE (valid) flag above and any
	 * speculative reads of the event's flags/data below.
	 */
	rmb();
S
Sarah Sharp 已提交
2546
	/* FIXME: Handle more event types. */
L
Lu Baolu 已提交
2547
	switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
2548 2549 2550
	case TRB_TYPE(TRB_COMPLETION):
		handle_cmd_completion(xhci, &event->event_cmd);
		break;
S
Sarah Sharp 已提交
2551 2552 2553 2554
	case TRB_TYPE(TRB_PORT_STATUS):
		handle_port_status(xhci, event);
		update_ptrs = 0;
		break;
2555 2556
	case TRB_TYPE(TRB_TRANSFER):
		ret = handle_tx_event(xhci, &event->trans_event);
L
Lu Baolu 已提交
2557
		if (ret >= 0)
2558 2559
			update_ptrs = 0;
		break;
2560 2561 2562
	case TRB_TYPE(TRB_DEV_NOTE):
		handle_device_notification(xhci, event);
		break;
2563
	default:
M
Matt Evans 已提交
2564 2565
		if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
		    TRB_TYPE(48))
2566 2567
			handle_vendor_event(xhci, event);
		else
L
Lu Baolu 已提交
2568 2569 2570
			xhci_warn(xhci, "ERROR unknown event type %d\n",
				  TRB_FIELD_TO_TYPE(
				  le32_to_cpu(event->event_cmd.flags)));
2571
	}
2572 2573 2574 2575 2576 2577
	/* Any of the above functions may drop and re-acquire the lock, so check
	 * to make sure a watchdog timer didn't mark the host as non-responsive.
	 */
	if (xhci->xhc_state & XHCI_STATE_DYING) {
		xhci_dbg(xhci, "xHCI host dying, returning from "
				"event handler.\n");
2578
		return 0;
2579
	}
2580

2581 2582
	if (update_ptrs)
		/* Update SW event ring dequeue pointer */
A
Andiry Xu 已提交
2583
		inc_deq(xhci, xhci->event_ring);
2584

2585 2586 2587 2588
	/* Are there more items on the event ring?  Caller will call us again to
	 * check.
	 */
	return 1;
2589
}
2590 2591 2592 2593 2594 2595 2596 2597 2598

/*
 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
 * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
 * indicators of an event TRB error, but we check the status *first* to be safe.
 */
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2599
	u32 status;
2600
	u64 temp_64;
2601 2602
	union xhci_trb *event_ring_deq;
	dma_addr_t deq;
2603 2604 2605

	spin_lock(&xhci->lock);
	/* Check if the xHC generated the interrupt, or the irq is shared */
2606
	status = readl(&xhci->op_regs->status);
2607
	if (status == 0xffffffff)
2608 2609
		goto hw_died;

2610
	if (!(status & STS_EINT)) {
2611 2612 2613
		spin_unlock(&xhci->lock);
		return IRQ_NONE;
	}
2614
	if (status & STS_FATAL) {
2615 2616 2617 2618
		xhci_warn(xhci, "WARNING: Host System Error\n");
		xhci_halt(xhci);
hw_died:
		spin_unlock(&xhci->lock);
2619
		return IRQ_HANDLED;
2620 2621
	}

2622 2623 2624 2625 2626
	/*
	 * Clear the op reg interrupt status first,
	 * so we can receive interrupts from other MSI-X interrupters.
	 * Write 1 to clear the interrupt status.
	 */
2627
	status |= STS_EINT;
2628
	writel(status, &xhci->op_regs->status);
2629 2630 2631
	/* FIXME when MSI-X is supported and there are multiple vectors */
	/* Clear the MSI-X event interrupt status */

2632
	if (hcd->irq) {
2633 2634
		u32 irq_pending;
		/* Acknowledge the PCI interrupt */
2635
		irq_pending = readl(&xhci->ir_set->irq_pending);
2636
		irq_pending |= IMAN_IP;
2637
		writel(irq_pending, &xhci->ir_set->irq_pending);
2638
	}
2639

2640 2641
	if (xhci->xhc_state & XHCI_STATE_DYING ||
	    xhci->xhc_state & XHCI_STATE_HALTED) {
2642 2643
		xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
				"Shouldn't IRQs be disabled?\n");
2644 2645
		/* Clear the event handler busy flag (RW1C);
		 * the event ring should be empty.
2646
		 */
2647
		temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2648 2649
		xhci_write_64(xhci, temp_64 | ERST_EHB,
				&xhci->ir_set->erst_dequeue);
2650 2651 2652 2653 2654 2655 2656 2657 2658
		spin_unlock(&xhci->lock);

		return IRQ_HANDLED;
	}

	event_ring_deq = xhci->event_ring->dequeue;
	/* FIXME this should be a delayed service routine
	 * that clears the EHB.
	 */
2659
	while (xhci_handle_event(xhci) > 0) {}
2660

2661
	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675
	/* If necessary, update the HW's version of the event ring deq ptr. */
	if (event_ring_deq != xhci->event_ring->dequeue) {
		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
				xhci->event_ring->dequeue);
		if (deq == 0)
			xhci_warn(xhci, "WARN something wrong with SW event "
					"ring dequeue ptr.\n");
		/* Update HC event ring dequeue pointer */
		temp_64 &= ERST_PTR_MASK;
		temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
	}

	/* Clear the event handler busy flag (RW1C); event ring is empty. */
	temp_64 |= ERST_EHB;
2676
	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2677

2678 2679 2680 2681 2682
	spin_unlock(&xhci->lock);

	return IRQ_HANDLED;
}

2683
irqreturn_t xhci_msi_irq(int irq, void *hcd)
2684
{
A
Alan Stern 已提交
2685
	return xhci_irq(hcd);
2686
}
2687

2688 2689
/****		Endpoint Ring Operations	****/

2690 2691 2692
/*
 * Generic function for queueing a TRB on a ring.
 * The caller must have checked to make sure there's room on the ring.
2693 2694 2695
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
2696 2697
 */
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
A
Andiry Xu 已提交
2698
		bool more_trbs_coming,
2699 2700 2701 2702 2703
		u32 field1, u32 field2, u32 field3, u32 field4)
{
	struct xhci_generic_trb *trb;

	trb = &ring->enqueue->generic;
M
Matt Evans 已提交
2704 2705 2706 2707
	trb->field[0] = cpu_to_le32(field1);
	trb->field[1] = cpu_to_le32(field2);
	trb->field[2] = cpu_to_le32(field3);
	trb->field[3] = cpu_to_le32(field4);
A
Andiry Xu 已提交
2708
	inc_enq(xhci, ring, more_trbs_coming);
2709 2710
}

2711 2712 2713 2714 2715
/*
 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
 * FIXME allocate segments if the ring is full.
 */
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
A
Andiry Xu 已提交
2716
		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2717
{
A
Andiry Xu 已提交
2718 2719
	unsigned int num_trbs_needed;

2720 2721 2722 2723 2724 2725 2726 2727 2728 2729
	/* Make sure the endpoint has been added to xHC schedule */
	switch (ep_state) {
	case EP_STATE_DISABLED:
		/*
		 * USB core changed config/interfaces without notifying us,
		 * or hardware is reporting the wrong state.
		 */
		xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
		return -ENOENT;
	case EP_STATE_ERROR:
2730
		xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2731 2732 2733
		/* FIXME event handling code for error needs to clear it */
		/* XXX not sure if this should be -ENOENT or not */
		return -EINVAL;
2734 2735
	case EP_STATE_HALTED:
		xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746
	case EP_STATE_STOPPED:
	case EP_STATE_RUNNING:
		break;
	default:
		xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
		/*
		 * FIXME issue Configure Endpoint command to try to get the HC
		 * back into a known state.
		 */
		return -EINVAL;
	}
A
Andiry Xu 已提交
2747 2748

	while (1) {
2749 2750
		if (room_on_ring(xhci, ep_ring, num_trbs))
			break;
A
Andiry Xu 已提交
2751 2752 2753 2754 2755 2756

		if (ep_ring == xhci->cmd_ring) {
			xhci_err(xhci, "Do not support expand command ring\n");
			return -ENOMEM;
		}

2757 2758
		xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
				"ERROR no room on ep ring, try ring expansion");
A
Andiry Xu 已提交
2759 2760 2761 2762 2763 2764
		num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
		if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
					mem_flags)) {
			xhci_err(xhci, "Ring expansion failed\n");
			return -ENOMEM;
		}
2765
	}
2766

2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778
	while (trb_is_link(ep_ring->enqueue)) {
		/* If we're not dealing with 0.95 hardware or isoc rings
		 * on AMD 0.96 host, clear the chain bit.
		 */
		if (!xhci_link_trb_quirk(xhci) &&
		    !(ep_ring->type == TYPE_ISOC &&
		      (xhci->quirks & XHCI_AMD_0x96_HOST)))
			ep_ring->enqueue->link.control &=
				cpu_to_le32(~TRB_CHAIN);
		else
			ep_ring->enqueue->link.control |=
				cpu_to_le32(TRB_CHAIN);
2779

2780 2781
		wmb();
		ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
2782

2783 2784 2785
		/* Toggle the cycle bit after the last ring segment. */
		if (link_trb_toggles_cycle(ep_ring->enqueue))
			ep_ring->cycle_state ^= 1;
2786

2787 2788
		ep_ring->enq_seg = ep_ring->enq_seg->next;
		ep_ring->enqueue = ep_ring->enq_seg->trbs;
2789
	}
2790 2791 2792
	return 0;
}

2793
static int prepare_transfer(struct xhci_hcd *xhci,
2794 2795
		struct xhci_virt_device *xdev,
		unsigned int ep_index,
2796
		unsigned int stream_id,
2797 2798
		unsigned int num_trbs,
		struct urb *urb,
2799
		unsigned int td_index,
2800 2801 2802
		gfp_t mem_flags)
{
	int ret;
2803 2804
	struct urb_priv *urb_priv;
	struct xhci_td	*td;
2805
	struct xhci_ring *ep_ring;
2806
	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2807 2808 2809 2810 2811 2812 2813 2814

	ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
	if (!ep_ring) {
		xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
				stream_id);
		return -EINVAL;
	}

2815
	ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
A
Andiry Xu 已提交
2816
			   num_trbs, mem_flags);
2817 2818 2819
	if (ret)
		return ret;

2820 2821 2822 2823 2824 2825 2826
	urb_priv = urb->hcpriv;
	td = urb_priv->td[td_index];

	INIT_LIST_HEAD(&td->td_list);
	INIT_LIST_HEAD(&td->cancelled_td_list);

	if (td_index == 0) {
2827
		ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2828
		if (unlikely(ret))
2829
			return ret;
2830 2831
	}

2832
	td->urb = urb;
2833
	/* Add this TD to the tail of the endpoint ring's TD list */
2834 2835 2836 2837 2838
	list_add_tail(&td->td_list, &ep_ring->td_list);
	td->start_seg = ep_ring->enq_seg;
	td->first_trb = ep_ring->enqueue;

	urb_priv->td[td_index] = td;
2839 2840 2841 2842

	return 0;
}

2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860
static unsigned int count_trbs(u64 addr, u64 len)
{
	unsigned int num_trbs;

	num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
			TRB_MAX_BUFF_SIZE);
	if (num_trbs == 0)
		num_trbs++;

	return num_trbs;
}

static inline unsigned int count_trbs_needed(struct urb *urb)
{
	return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
}

static unsigned int count_sg_trbs_needed(struct urb *urb)
2861 2862
{
	struct scatterlist *sg;
2863
	unsigned int i, len, full_len, num_trbs = 0;
2864

2865
	full_len = urb->transfer_buffer_length;
2866

2867 2868 2869 2870 2871 2872
	for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
		len = sg_dma_len(sg);
		num_trbs += count_trbs(sg_dma_address(sg), len);
		len = min_t(unsigned int, len, full_len);
		full_len -= len;
		if (full_len == 0)
2873 2874
			break;
	}
2875

2876 2877 2878
	return num_trbs;
}

2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889
static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
{
	u64 addr, len;

	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
	len = urb->iso_frame_desc[i].length;

	return count_trbs(addr, len);
}

static void check_trb_math(struct urb *urb, int running_total)
2890
{
2891
	if (unlikely(running_total != urb->transfer_buffer_length))
2892
		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2893 2894 2895 2896 2897 2898 2899 2900
				"queued %#x (%d), asked for %#x (%d)\n",
				__func__,
				urb->ep->desc.bEndpointAddress,
				running_total, running_total,
				urb->transfer_buffer_length,
				urb->transfer_buffer_length);
}

2901
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2902
		unsigned int ep_index, unsigned int stream_id, int start_cycle,
2903
		struct xhci_generic_trb *start_trb)
2904 2905 2906 2907 2908 2909
{
	/*
	 * Pass all the TRBs to the hardware at once and make sure this write
	 * isn't reordered.
	 */
	wmb();
2910
	if (start_cycle)
M
Matt Evans 已提交
2911
		start_trb->field[3] |= cpu_to_le32(start_cycle);
2912
	else
M
Matt Evans 已提交
2913
		start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
2914
	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2915 2916
}

2917 2918
static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
						struct xhci_ep_ctx *ep_ctx)
2919 2920 2921 2922
{
	int xhci_interval;
	int ep_interval;

M
Matt Evans 已提交
2923
	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
2924
	ep_interval = urb->interval;
2925

2926 2927 2928 2929
	/* Convert to microframes */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		ep_interval *= 8;
2930

2931 2932 2933 2934
	/* FIXME change this to a warning and a suggestion to use the new API
	 * to set the polling interval (once the API is added).
	 */
	if (xhci_interval != ep_interval) {
2935 2936 2937 2938
		dev_dbg_ratelimited(&urb->dev->dev,
				"Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
				ep_interval, ep_interval == 1 ? "" : "s",
				xhci_interval, xhci_interval == 1 ? "" : "s");
2939 2940 2941 2942 2943 2944
		urb->interval = xhci_interval;
		/* Convert back to frames for LS/FS devices */
		if (urb->dev->speed == USB_SPEED_LOW ||
				urb->dev->speed == USB_SPEED_FULL)
			urb->interval /= 8;
	}
2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960
}

/*
 * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
 * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
 * (comprised of sg list entries) can take several service intervals to
 * transmit.
 */
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ep_ctx *ep_ctx;

	ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
	check_interval(xhci, urb, ep_ctx);

2961
	return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
2962 2963
}

2964
/*
2965 2966
 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
 * packets remaining in the TD (*not* including this TRB).
2967 2968
 *
 * Total TD packet count = total_packet_count =
2969
 *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
2970 2971 2972 2973 2974 2975
 *
 * Packets transferred up to and including this TRB = packets_transferred =
 *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
 *
 * TD size = total_packet_count - packets_transferred
 *
2976 2977 2978 2979 2980 2981
 * For xHCI 0.96 and older, TD size field should be the remaining bytes
 * including this TRB, right shifted by 10
 *
 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
 * This is taken care of in the TRB_TD_SIZE() macro
 *
2982
 * The last TRB in a TD must have the TD size set to zero.
2983
 */
2984 2985
static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
			      int trb_buff_len, unsigned int td_total_len,
2986
			      struct urb *urb, bool more_trbs_coming)
2987
{
2988 2989
	u32 maxp, total_packet_count;

2990 2991
	/* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
	if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
2992 2993
		return ((td_total_len - transferred) >> 10);

2994
	/* One TRB with a zero-length data packet. */
2995
	if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
2996
	    trb_buff_len == td_total_len)
2997 2998
		return 0;

2999 3000 3001 3002
	/* for MTK xHCI, TD size doesn't include this TRB */
	if (xhci->quirks & XHCI_MTK_HOST)
		trb_buff_len = 0;

3003
	maxp = usb_endpoint_maxp(&urb->ep->desc);
3004 3005
	total_packet_count = DIV_ROUND_UP(td_total_len, maxp);

3006 3007
	/* Queueing functions don't count the current TRB into transferred */
	return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3008 3009
}

3010

3011
static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
3012
			 u32 *trb_buff_len, struct xhci_segment *seg)
3013
{
3014
	struct device *dev = xhci_to_hcd(xhci)->self.controller;
3015 3016
	unsigned int unalign;
	unsigned int max_pkt;
3017
	u32 new_buff_len;
3018

3019
	max_pkt = usb_endpoint_maxp(&urb->ep->desc);
3020 3021 3022 3023 3024 3025
	unalign = (enqd_len + *trb_buff_len) % max_pkt;

	/* we got lucky, last normal TRB data on segment is packet aligned */
	if (unalign == 0)
		return 0;

3026 3027 3028
	xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
		 unalign, *trb_buff_len);

3029 3030 3031
	/* is the last nornal TRB alignable by splitting it */
	if (*trb_buff_len > unalign) {
		*trb_buff_len -= unalign;
3032
		xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
3033 3034
		return 0;
	}
3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067

	/*
	 * We want enqd_len + trb_buff_len to sum up to a number aligned to
	 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
	 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
	 */
	new_buff_len = max_pkt - (enqd_len % max_pkt);

	if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
		new_buff_len = (urb->transfer_buffer_length - enqd_len);

	/* create a max max_pkt sized bounce buffer pointed to by last trb */
	if (usb_urb_dir_out(urb)) {
		sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
				   seg->bounce_buf, new_buff_len, enqd_len);
		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
						 max_pkt, DMA_TO_DEVICE);
	} else {
		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
						 max_pkt, DMA_FROM_DEVICE);
	}

	if (dma_mapping_error(dev, seg->bounce_dma)) {
		/* try without aligning. Some host controllers survive */
		xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
		return 0;
	}
	*trb_buff_len = new_buff_len;
	seg->bounce_len = new_buff_len;
	seg->bounce_offs = enqd_len;

	xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);

3068 3069 3070
	return 1;
}

3071 3072
/* This is very similar to what ehci-q.c qtd_fill() does */
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3073 3074
		struct urb *urb, int slot_id, unsigned int ep_index)
{
3075
	struct xhci_ring *ring;
3076
	struct urb_priv *urb_priv;
3077
	struct xhci_td *td;
3078 3079
	struct xhci_generic_trb *start_trb;
	struct scatterlist *sg = NULL;
3080 3081
	bool more_trbs_coming = true;
	bool need_zero_pkt = false;
3082 3083
	bool first_trb = true;
	unsigned int num_trbs;
3084
	unsigned int start_cycle, num_sgs = 0;
3085
	unsigned int enqd_len, block_len, trb_buff_len, full_len;
3086
	int sent_len, ret;
3087
	u32 field, length_field, remainder;
3088
	u64 addr, send_addr;
3089

3090 3091
	ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ring)
3092 3093
		return -EINVAL;

3094
	full_len = urb->transfer_buffer_length;
3095 3096 3097 3098
	/* If we have scatter/gather list, we use it. */
	if (urb->num_sgs) {
		num_sgs = urb->num_mapped_sgs;
		sg = urb->sg;
3099 3100
		addr = (u64) sg_dma_address(sg);
		block_len = sg_dma_len(sg);
3101
		num_trbs = count_sg_trbs_needed(urb);
3102
	} else {
3103
		num_trbs = count_trbs_needed(urb);
3104 3105 3106
		addr = (u64) urb->transfer_dma;
		block_len = full_len;
	}
3107
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
3108
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
3109
			num_trbs, urb, 0, mem_flags);
3110
	if (unlikely(ret < 0))
3111
		return ret;
3112 3113

	urb_priv = urb->hcpriv;
3114 3115

	/* Deal with URB_ZERO_PACKET - need one more td/trb */
3116 3117
	if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1)
		need_zero_pkt = true;
3118

3119 3120
	td = urb_priv->td[0];

3121 3122 3123 3124 3125
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
3126 3127
	start_trb = &ring->enqueue->generic;
	start_cycle = ring->cycle_state;
3128
	send_addr = addr;
3129

3130
	/* Queue the TRBs, even if they are zero-length */
3131 3132
	for (enqd_len = 0; first_trb || enqd_len < full_len;
			enqd_len += trb_buff_len) {
3133
		field = TRB_TYPE(TRB_NORMAL);
3134

3135 3136 3137
		/* TRB buffer should not cross 64KB boundaries */
		trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
		trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
3138

3139 3140
		if (enqd_len + trb_buff_len > full_len)
			trb_buff_len = full_len - enqd_len;
S
Sarah Sharp 已提交
3141 3142

		/* Don't change the cycle bit of the first TRB until later */
3143 3144
		if (first_trb) {
			first_trb = false;
3145
			if (start_cycle == 0)
3146
				field |= TRB_CYCLE;
3147
		} else
3148
			field |= ring->cycle_state;
S
Sarah Sharp 已提交
3149 3150 3151 3152

		/* Chain all the TRBs together; clear the chain bit in the last
		 * TRB to indicate it's the last TRB in the chain.
		 */
3153
		if (enqd_len + trb_buff_len < full_len) {
S
Sarah Sharp 已提交
3154
			field |= TRB_CHAIN;
3155
			if (trb_is_link(ring->enqueue + 1)) {
3156
				if (xhci_align_td(xhci, urb, enqd_len,
3157 3158 3159 3160 3161 3162
						  &trb_buff_len,
						  ring->enq_seg)) {
					send_addr = ring->enq_seg->bounce_dma;
					/* assuming TD won't span 2 segs */
					td->bounce_seg = ring->enq_seg;
				}
3163
			}
3164 3165 3166
		}
		if (enqd_len + trb_buff_len >= full_len) {
			field &= ~TRB_CHAIN;
3167
			field |= TRB_IOC;
3168
			more_trbs_coming = false;
3169
			td->last_trb = ring->enqueue;
S
Sarah Sharp 已提交
3170
		}
3171 3172 3173 3174 3175

		/* Only set interrupt on short packet for IN endpoints */
		if (usb_urb_dir_in(urb))
			field |= TRB_ISP;

3176
		/* Set the TRB length, TD size, and interrupter fields. */
3177 3178 3179
		remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
					      full_len, urb, more_trbs_coming);

3180
		length_field = TRB_LEN(trb_buff_len) |
3181
			TRB_TD_SIZE(remainder) |
3182
			TRB_INTR_TARGET(0);
3183

3184
		queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
3185 3186
				lower_32_bits(send_addr),
				upper_32_bits(send_addr),
3187
				length_field,
3188
				field);
S
Sarah Sharp 已提交
3189 3190

		addr += trb_buff_len;
3191
		sent_len = trb_buff_len;
3192

3193
		while (sg && sent_len >= block_len) {
3194 3195
			/* New sg entry */
			--num_sgs;
3196
			sent_len -= block_len;
3197
			if (num_sgs != 0) {
3198
				sg = sg_next(sg);
3199 3200
				block_len = sg_dma_len(sg);
				addr = (u64) sg_dma_address(sg);
3201
				addr += sent_len;
3202 3203
			}
		}
3204 3205
		block_len -= sent_len;
		send_addr = addr;
3206
	}
S
Sarah Sharp 已提交
3207

3208 3209 3210 3211 3212 3213 3214 3215 3216
	if (need_zero_pkt) {
		ret = prepare_transfer(xhci, xhci->devs[slot_id],
				       ep_index, urb->stream_id,
				       1, urb, 1, mem_flags);
		urb_priv->td[1]->last_trb = ring->enqueue;
		field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
		queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
	}

3217
	check_trb_math(urb, enqd_len);
3218
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3219
			start_cycle, start_trb);
S
Sarah Sharp 已提交
3220 3221 3222
	return 0;
}

3223
/* Caller must have locked xhci->lock */
3224
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3225 3226 3227 3228 3229 3230 3231 3232
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	int num_trbs;
	int ret;
	struct usb_ctrlrequest *setup;
	struct xhci_generic_trb *start_trb;
	int start_cycle;
3233
	u32 field, length_field, remainder;
3234
	struct urb_priv *urb_priv;
3235 3236
	struct xhci_td *td;

3237 3238 3239
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;
3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256

	/*
	 * Need to copy setup packet into setup TRB, so we can't use the setup
	 * DMA address.
	 */
	if (!urb->setup_packet)
		return -EINVAL;

	/* 1 TRB for setup, 1 for status */
	num_trbs = 2;
	/*
	 * Don't need to check if we need additional event data and normal TRBs,
	 * since data in control transfers will never get bigger than 16MB
	 * XXX: can we get a buffer that crosses 64KB boundaries?
	 */
	if (urb->transfer_buffer_length > 0)
		num_trbs++;
3257 3258
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
3259
			num_trbs, urb, 0, mem_flags);
3260 3261 3262
	if (ret < 0)
		return ret;

3263 3264 3265
	urb_priv = urb->hcpriv;
	td = urb_priv->td[0];

3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	/* Queue setup TRB - see section 6.4.1.2.1 */
	/* FIXME better way to translate setup_packet into two u32 fields? */
	setup = (struct usb_ctrlrequest *) urb->setup_packet;
3277 3278 3279 3280
	field = 0;
	field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
	if (start_cycle == 0)
		field |= 0x1;
3281

3282
	/* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
3283
	if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
3284 3285 3286 3287 3288 3289 3290 3291
		if (urb->transfer_buffer_length > 0) {
			if (setup->bRequestType & USB_DIR_IN)
				field |= TRB_TX_TYPE(TRB_DATA_IN);
			else
				field |= TRB_TX_TYPE(TRB_DATA_OUT);
		}
	}

A
Andiry Xu 已提交
3292
	queue_trb(xhci, ep_ring, true,
M
Matt Evans 已提交
3293 3294 3295 3296 3297
		  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
		  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
		  TRB_LEN(8) | TRB_INTR_TARGET(0),
		  /* Immediate data in pointer */
		  field);
3298 3299

	/* If there's data, queue data TRBs */
3300 3301 3302 3303 3304 3305
	/* Only set interrupt on short packet for IN endpoints */
	if (usb_urb_dir_in(urb))
		field = TRB_ISP | TRB_TYPE(TRB_DATA);
	else
		field = TRB_TYPE(TRB_DATA);

3306 3307 3308 3309 3310
	remainder = xhci_td_remainder(xhci, 0,
				   urb->transfer_buffer_length,
				   urb->transfer_buffer_length,
				   urb, 1);

3311
	length_field = TRB_LEN(urb->transfer_buffer_length) |
3312
		TRB_TD_SIZE(remainder) |
3313
		TRB_INTR_TARGET(0);
3314

3315 3316 3317
	if (urb->transfer_buffer_length > 0) {
		if (setup->bRequestType & USB_DIR_IN)
			field |= TRB_DIR_IN;
A
Andiry Xu 已提交
3318
		queue_trb(xhci, ep_ring, true,
3319 3320
				lower_32_bits(urb->transfer_dma),
				upper_32_bits(urb->transfer_dma),
3321
				length_field,
3322
				field | ep_ring->cycle_state);
3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333
	}

	/* Save the DMA address of the last TRB in the TD */
	td->last_trb = ep_ring->enqueue;

	/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
	/* If the device sent data, the status stage is an OUT transfer */
	if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
		field = 0;
	else
		field = TRB_DIR_IN;
A
Andiry Xu 已提交
3334
	queue_trb(xhci, ep_ring, false,
3335 3336 3337 3338 3339 3340
			0,
			0,
			TRB_INTR_TARGET(0),
			/* Event on completion */
			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);

3341
	giveback_first_trb(xhci, slot_id, ep_index, 0,
3342
			start_cycle, start_trb);
3343 3344 3345
	return 0;
}

3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358
/*
 * The transfer burst count field of the isochronous TRB defines the number of
 * bursts that are required to move all packets in this TD.  Only SuperSpeed
 * devices can burst up to bMaxBurst number of packets per service interval.
 * This field is zero based, meaning a value of zero in the field means one
 * burst.  Basically, for everything but SuperSpeed devices, this field will be
 * zero.  Only xHCI 1.0 host controllers support this field.
 */
static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;

3359
	if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
3360 3361 3362
		return 0;

	max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3363
	return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3364 3365
}

3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382
/*
 * Returns the number of packets in the last "burst" of packets.  This field is
 * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
 * the last burst packet count is equal to the total number of packets in the
 * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
 * must contain (bMaxBurst + 1) number of packets, but the last burst can
 * contain 1 to (bMaxBurst + 1) packets.
 */
static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;
	unsigned int residue;

	if (xhci->hci_version < 0x100)
		return 0;

3383
	if (urb->dev->speed >= USB_SPEED_SUPER) {
3384 3385 3386 3387 3388 3389 3390 3391 3392 3393
		/* bMaxBurst is zero based: 0 means 1 packet per burst */
		max_burst = urb->ep->ss_ep_comp.bMaxBurst;
		residue = total_packet_count % (max_burst + 1);
		/* If residue is zero, the last burst contains (max_burst + 1)
		 * number of packets, but the TLBPC field is zero-based.
		 */
		if (residue == 0)
			return max_burst;
		return residue - 1;
	}
3394 3395 3396
	if (total_packet_count == 0)
		return 0;
	return total_packet_count - 1;
3397 3398
}

3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489
/*
 * Calculates Frame ID field of the isochronous TRB identifies the
 * target frame that the Interval associated with this Isochronous
 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
 *
 * Returns actual frame id on success, negative value on error.
 */
static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
		struct urb *urb, int index)
{
	int start_frame, ist, ret = 0;
	int start_frame_id, end_frame_id, current_frame_id;

	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		start_frame = urb->start_frame + index * urb->interval;
	else
		start_frame = (urb->start_frame + index * urb->interval) >> 3;

	/* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
	 *
	 * If bit [3] of IST is cleared to '0', software can add a TRB no
	 * later than IST[2:0] Microframes before that TRB is scheduled to
	 * be executed.
	 * If bit [3] of IST is set to '1', software can add a TRB no later
	 * than IST[2:0] Frames before that TRB is scheduled to be executed.
	 */
	ist = HCS_IST(xhci->hcs_params2) & 0x7;
	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
		ist <<= 3;

	/* Software shall not schedule an Isoch TD with a Frame ID value that
	 * is less than the Start Frame ID or greater than the End Frame ID,
	 * where:
	 *
	 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
	 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
	 *
	 * Both the End Frame ID and Start Frame ID values are calculated
	 * in microframes. When software determines the valid Frame ID value;
	 * The End Frame ID value should be rounded down to the nearest Frame
	 * boundary, and the Start Frame ID value should be rounded up to the
	 * nearest Frame boundary.
	 */
	current_frame_id = readl(&xhci->run_regs->microframe_index);
	start_frame_id = roundup(current_frame_id + ist + 1, 8);
	end_frame_id = rounddown(current_frame_id + 895 * 8, 8);

	start_frame &= 0x7ff;
	start_frame_id = (start_frame_id >> 3) & 0x7ff;
	end_frame_id = (end_frame_id >> 3) & 0x7ff;

	xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
		 __func__, index, readl(&xhci->run_regs->microframe_index),
		 start_frame_id, end_frame_id, start_frame);

	if (start_frame_id < end_frame_id) {
		if (start_frame > end_frame_id ||
				start_frame < start_frame_id)
			ret = -EINVAL;
	} else if (start_frame_id > end_frame_id) {
		if ((start_frame > end_frame_id &&
				start_frame < start_frame_id))
			ret = -EINVAL;
	} else {
			ret = -EINVAL;
	}

	if (index == 0) {
		if (ret == -EINVAL || start_frame == start_frame_id) {
			start_frame = start_frame_id + 1;
			if (urb->dev->speed == USB_SPEED_LOW ||
					urb->dev->speed == USB_SPEED_FULL)
				urb->start_frame = start_frame;
			else
				urb->start_frame = start_frame << 3;
			ret = 0;
		}
	}

	if (ret) {
		xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
				start_frame, current_frame_id, index,
				start_frame_id, end_frame_id);
		xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
		return ret;
	}

	return start_frame;
}

3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504
/* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct xhci_td *td;
	int num_tds, trbs_per_td;
	struct xhci_generic_trb *start_trb;
	bool first_trb;
	int start_cycle;
	u32 field, length_field;
	int running_total, trb_buff_len, td_len, td_remain_len, ret;
	u64 start_addr, addr;
	int i, j;
A
Andiry Xu 已提交
3505
	bool more_trbs_coming;
3506
	struct xhci_virt_ep *xep;
3507
	int frame_id;
3508

3509
	xep = &xhci->devs[slot_id]->eps[ep_index];
3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520
	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;

	num_tds = urb->number_of_packets;
	if (num_tds < 1) {
		xhci_dbg(xhci, "Isoc URB with zero packets?\n");
		return -EINVAL;
	}
	start_addr = (u64) urb->transfer_dma;
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

3521
	urb_priv = urb->hcpriv;
3522
	/* Queue the TRBs for each TD, even if they are zero-length */
3523
	for (i = 0; i < num_tds; i++) {
3524 3525 3526
		unsigned int total_pkt_count, max_pkt;
		unsigned int burst_count, last_burst_pkt_count;
		u32 sia_frame_id;
3527

3528
		first_trb = true;
3529 3530 3531 3532
		running_total = 0;
		addr = start_addr + urb->iso_frame_desc[i].offset;
		td_len = urb->iso_frame_desc[i].length;
		td_remain_len = td_len;
3533
		max_pkt = usb_endpoint_maxp(&urb->ep->desc);
3534 3535
		total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);

3536
		/* A zero-length transfer still involves at least one packet. */
3537 3538 3539 3540 3541
		if (total_pkt_count == 0)
			total_pkt_count++;
		burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
		last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
							urb, total_pkt_count);
3542

3543
		trbs_per_td = count_isoc_trbs_needed(urb, i);
3544 3545

		ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
A
Andiry Xu 已提交
3546
				urb->stream_id, trbs_per_td, urb, i, mem_flags);
3547 3548 3549 3550 3551
		if (ret < 0) {
			if (i == 0)
				return ret;
			goto cleanup;
		}
3552
		td = urb_priv->td[i];
3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566

		/* use SIA as default, if frame id is used overwrite it */
		sia_frame_id = TRB_SIA;
		if (!(urb->transfer_flags & URB_ISO_ASAP) &&
		    HCC_CFC(xhci->hcc_params)) {
			frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
			if (frame_id >= 0)
				sia_frame_id = TRB_FRAME_ID(frame_id);
		}
		/*
		 * Set isoc specific data for the first TRB in a TD.
		 * Prevent HW from getting the TRBs by keeping the cycle state
		 * inverted in the first TDs isoc TRB.
		 */
3567
		field = TRB_TYPE(TRB_ISOC) |
3568 3569 3570 3571
			TRB_TLBPC(last_burst_pkt_count) |
			sia_frame_id |
			(i ? ep_ring->cycle_state : !start_cycle);

3572 3573 3574 3575
		/* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
		if (!xep->use_extended_tbc)
			field |= TRB_TBC(burst_count);

3576
		/* fill the rest of the TRB fields, and remaining normal TRBs */
3577 3578
		for (j = 0; j < trbs_per_td; j++) {
			u32 remainder = 0;
3579 3580 3581 3582 3583

			/* only first TRB is isoc, overwrite otherwise */
			if (!first_trb)
				field = TRB_TYPE(TRB_NORMAL) |
					ep_ring->cycle_state;
3584

3585 3586 3587 3588
			/* Only set interrupt on short packet for IN EPs */
			if (usb_urb_dir_in(urb))
				field |= TRB_ISP;

3589
			/* Set the chain bit for all except the last TRB  */
3590
			if (j < trbs_per_td - 1) {
A
Andiry Xu 已提交
3591
				more_trbs_coming = true;
3592
				field |= TRB_CHAIN;
3593
			} else {
3594
				more_trbs_coming = false;
3595 3596
				td->last_trb = ep_ring->enqueue;
				field |= TRB_IOC;
3597 3598 3599 3600 3601
				/* set BEI, except for the last TD */
				if (xhci->hci_version >= 0x100 &&
				    !(xhci->quirks & XHCI_AVOID_BEI) &&
				    i < num_tds - 1)
					field |= TRB_BEI;
3602 3603
			}
			/* Calculate TRB length */
3604
			trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3605 3606 3607
			if (trb_buff_len > td_remain_len)
				trb_buff_len = td_remain_len;

3608
			/* Set the TRB length, TD size, & interrupter fields. */
3609 3610
			remainder = xhci_td_remainder(xhci, running_total,
						   trb_buff_len, td_len,
3611
						   urb, more_trbs_coming);
3612

3613 3614
			length_field = TRB_LEN(trb_buff_len) |
				TRB_INTR_TARGET(0);
3615

3616 3617 3618 3619 3620 3621 3622
			/* xhci 1.1 with ETE uses TD Size field for TBC */
			if (first_trb && xep->use_extended_tbc)
				length_field |= TRB_TD_SIZE_TBC(burst_count);
			else
				length_field |= TRB_TD_SIZE(remainder);
			first_trb = false;

A
Andiry Xu 已提交
3623
			queue_trb(xhci, ep_ring, more_trbs_coming,
3624 3625 3626
				lower_32_bits(addr),
				upper_32_bits(addr),
				length_field,
3627
				field);
3628 3629 3630 3631 3632 3633 3634 3635 3636
			running_total += trb_buff_len;

			addr += trb_buff_len;
			td_remain_len -= trb_buff_len;
		}

		/* Check TD length */
		if (running_total != td_len) {
			xhci_err(xhci, "ISOC TD length unmatch\n");
3637 3638
			ret = -EINVAL;
			goto cleanup;
3639 3640 3641
		}
	}

3642 3643 3644 3645
	/* store the next frame id */
	if (HCC_CFC(xhci->hcc_params))
		xep->next_frame_id = urb->start_frame + num_tds * urb->interval;

A
Andiry Xu 已提交
3646 3647 3648 3649 3650 3651
	if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
		if (xhci->quirks & XHCI_AMD_PLL_FIX)
			usb_amd_quirk_pll_disable();
	}
	xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;

3652 3653
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
			start_cycle, start_trb);
3654
	return 0;
3655 3656 3657 3658
cleanup:
	/* Clean up a partially enqueued isoc transfer. */

	for (i--; i >= 0; i--)
3659
		list_del_init(&urb_priv->td[i]->td_list);
3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673

	/* Use the first TD as a temporary variable to turn the TDs we've queued
	 * into No-ops with a software-owned cycle bit. That way the hardware
	 * won't accidentally start executing bogus TDs when we partially
	 * overwrite them.  td->first_trb and td->start_seg are already set.
	 */
	urb_priv->td[0]->last_trb = ep_ring->enqueue;
	/* Every TRB except the first & last will have its cycle bit flipped. */
	td_to_noop(xhci, ep_ring, urb_priv->td[0], true);

	/* Reset the ring enqueue back to the first TRB and its cycle bit. */
	ep_ring->enqueue = urb_priv->td[0]->first_trb;
	ep_ring->enq_seg = urb_priv->td[0]->start_seg;
	ep_ring->cycle_state = start_cycle;
3674
	ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3675 3676
	usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
	return ret;
3677 3678 3679 3680 3681
}

/*
 * Check transfer ring to guarantee there is enough room for the urb.
 * Update ISO URB start_frame and interval.
3682 3683 3684
 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
 * Contiguous Frame ID is not supported by HC.
3685 3686 3687 3688 3689 3690 3691 3692 3693 3694
 */
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	struct xhci_ep_ctx *ep_ctx;
	int start_frame;
	int num_tds, num_trbs, i;
	int ret;
3695 3696
	struct xhci_virt_ep *xep;
	int ist;
3697 3698

	xdev = xhci->devs[slot_id];
3699
	xep = &xhci->devs[slot_id]->eps[ep_index];
3700 3701 3702 3703 3704 3705
	ep_ring = xdev->eps[ep_index].ring;
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);

	num_trbs = 0;
	num_tds = urb->number_of_packets;
	for (i = 0; i < num_tds; i++)
3706
		num_trbs += count_isoc_trbs_needed(urb, i);
3707 3708 3709 3710

	/* Check the ring to guarantee there is enough room for the whole urb.
	 * Do not insert any td of the urb to the ring if the check failed.
	 */
3711
	ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
A
Andiry Xu 已提交
3712
			   num_trbs, mem_flags);
3713 3714 3715
	if (ret)
		return ret;

3716 3717 3718 3719
	/*
	 * Check interval value. This should be done before we start to
	 * calculate the start frame value.
	 */
3720
	check_interval(xhci, urb, ep_ctx);
3721 3722

	/* Calculate the start frame and put it in urb->start_frame. */
L
Lu Baolu 已提交
3723
	if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
3724
		if (GET_EP_CTX_STATE(ep_ctx) ==	EP_STATE_RUNNING) {
L
Lu Baolu 已提交
3725 3726 3727
			urb->start_frame = xep->next_frame_id;
			goto skip_start_over;
		}
3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755
	}

	start_frame = readl(&xhci->run_regs->microframe_index);
	start_frame &= 0x3fff;
	/*
	 * Round up to the next frame and consider the time before trb really
	 * gets scheduled by hardare.
	 */
	ist = HCS_IST(xhci->hcs_params2) & 0x7;
	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
		ist <<= 3;
	start_frame += ist + XHCI_CFC_DELAY;
	start_frame = roundup(start_frame, 8);

	/*
	 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
	 * is greate than 8 microframes.
	 */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL) {
		start_frame = roundup(start_frame, urb->interval << 3);
		urb->start_frame = start_frame >> 3;
	} else {
		start_frame = roundup(start_frame, urb->interval);
		urb->start_frame = start_frame;
	}

skip_start_over:
3756 3757
	ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;

3758
	return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
3759 3760
}

3761 3762
/****		Command Ring Operations		****/

3763 3764 3765 3766 3767 3768 3769 3770
/* Generic function for queueing a command TRB on the command ring.
 * Check to make sure there's room on the command ring for one command TRB.
 * Also check that there's room reserved for commands that must not fail.
 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
 * then only check for the number of reserved spots.
 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
 * because the command event handler may want to resubmit a failed command.
 */
3771 3772 3773
static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
			 u32 field1, u32 field2,
			 u32 field3, u32 field4, bool command_must_succeed)
3774
{
3775
	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3776
	int ret;
3777

3778 3779
	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
		(xhci->xhc_state & XHCI_STATE_HALTED)) {
3780
		xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
M
Mathias Nyman 已提交
3781
		return -ESHUTDOWN;
3782
	}
3783

3784 3785 3786
	if (!command_must_succeed)
		reserved_trbs++;

3787
	ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
A
Andiry Xu 已提交
3788
			reserved_trbs, GFP_ATOMIC);
3789 3790
	if (ret < 0) {
		xhci_err(xhci, "ERR: No room for command on command ring\n");
3791 3792 3793
		if (command_must_succeed)
			xhci_err(xhci, "ERR: Reserved TRB counting for "
					"unfailable commands failed.\n");
3794
		return ret;
3795
	}
M
Mathias Nyman 已提交
3796 3797 3798

	cmd->command_trb = xhci->cmd_ring->enqueue;
	list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
3799

3800 3801 3802 3803 3804 3805 3806
	/* if there are no other commands queued we start the timeout timer */
	if (xhci->cmd_list.next == &cmd->cmd_list &&
	    !timer_pending(&xhci->cmd_timer)) {
		xhci->current_cmd = cmd;
		mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
	}

A
Andiry Xu 已提交
3807 3808
	queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
			field4 | xhci->cmd_ring->cycle_state);
3809 3810 3811
	return 0;
}

3812
/* Queue a slot enable or disable request on the command ring */
3813 3814
int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
		u32 trb_type, u32 slot_id)
3815
{
3816
	return queue_command(xhci, cmd, 0, 0, 0,
3817
			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3818 3819 3820
}

/* Queue an address device command TRB */
3821 3822
int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
		dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
3823
{
3824
	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3825
			upper_32_bits(in_ctx_ptr), 0,
3826 3827
			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
			| (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
3828 3829
}

3830
int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3831 3832
		u32 field1, u32 field2, u32 field3, u32 field4)
{
3833
	return queue_command(xhci, cmd, field1, field2, field3, field4, false);
3834 3835
}

3836
/* Queue a reset device command TRB */
3837 3838
int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
		u32 slot_id)
3839
{
3840
	return queue_command(xhci, cmd, 0, 0, 0,
3841
			TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
3842
			false);
3843
}
3844 3845

/* Queue a configure endpoint command TRB */
3846 3847
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
		struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
3848
		u32 slot_id, bool command_must_succeed)
3849
{
3850
	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3851
			upper_32_bits(in_ctx_ptr), 0,
3852 3853
			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
			command_must_succeed);
3854
}
3855

3856
/* Queue an evaluate context command TRB */
3857 3858
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
		dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
3859
{
3860
	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3861
			upper_32_bits(in_ctx_ptr), 0,
3862
			TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
3863
			command_must_succeed);
3864 3865
}

3866 3867 3868 3869
/*
 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
 * activity on an endpoint that is about to be suspended.
 */
3870 3871
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
			     int slot_id, unsigned int ep_index, int suspend)
3872 3873 3874 3875
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_STOP_RING);
3876
	u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
3877

3878
	return queue_command(xhci, cmd, 0, 0, 0,
3879
			trb_slot_id | trb_ep_index | type | trb_suspend, false);
3880 3881
}

3882 3883 3884 3885 3886
/* Set Transfer Ring Dequeue Pointer command */
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
		unsigned int stream_id,
		struct xhci_dequeue_state *deq_state)
3887 3888 3889 3890
{
	dma_addr_t addr;
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3891
	u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
3892
	u32 trb_sct = 0;
3893
	u32 type = TRB_TYPE(TRB_SET_DEQ);
3894
	struct xhci_virt_ep *ep;
3895 3896
	struct xhci_command *cmd;
	int ret;
3897

3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
		"Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
		deq_state->new_deq_seg,
		(unsigned long long)deq_state->new_deq_seg->dma,
		deq_state->new_deq_ptr,
		(unsigned long long)xhci_trb_virt_to_dma(
			deq_state->new_deq_seg, deq_state->new_deq_ptr),
		deq_state->new_cycle_state);

	addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
				    deq_state->new_deq_ptr);
3909
	if (addr == 0) {
3910
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3911
		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
3912 3913
			  deq_state->new_deq_seg, deq_state->new_deq_ptr);
		return;
3914
	}
3915 3916 3917 3918
	ep = &xhci->devs[slot_id]->eps[ep_index];
	if ((ep->ep_state & SET_DEQ_PENDING)) {
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
		xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
3919
		return;
3920
	}
3921 3922 3923 3924 3925

	/* This function gets called from contexts where it cannot sleep */
	cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
	if (!cmd) {
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
3926
		return;
3927 3928
	}

3929 3930
	ep->queued_deq_seg = deq_state->new_deq_seg;
	ep->queued_deq_ptr = deq_state->new_deq_ptr;
3931 3932
	if (stream_id)
		trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
3933
	ret = queue_command(xhci, cmd,
3934 3935 3936
		lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
		upper_32_bits(addr), trb_stream_id,
		trb_slot_id | trb_ep_index | type, false);
3937 3938
	if (ret < 0) {
		xhci_free_command(xhci, cmd);
3939
		return;
3940 3941
	}

3942 3943 3944 3945 3946 3947
	/* Stop the TD queueing code from ringing the doorbell until
	 * this command completes.  The HC won't set the dequeue pointer
	 * if the ring is running, and ringing the doorbell starts the
	 * ring running.
	 */
	ep->ep_state |= SET_DEQ_PENDING;
3948
}
3949

3950 3951
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
			int slot_id, unsigned int ep_index)
3952 3953 3954 3955 3956
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_RESET_EP);

3957 3958
	return queue_command(xhci, cmd, 0, 0, 0,
			trb_slot_id | trb_ep_index | type, false);
3959
}