xhci-ring.c 121.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * xHCI host controller driver
 *
 * Copyright (C) 2008 Intel Corp.
 *
 * Author: Sarah Sharp
 * Some code borrowed from the Linux EHCI driver.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

/*
 * Ring initialization rules:
 * 1. Each segment is initialized to zero, except for link TRBs.
 * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
 *    Consumer Cycle State (CCS), depending on ring function.
 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
 *
 * Ring behavior rules:
 * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
 *    least one free TRB in the ring.  This is useful if you want to turn that
 *    into a link TRB and expand the ring.
 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
 *    link TRB, then load the pointer with the address in the link TRB.  If the
 *    link TRB had its toggle bit set, you may need to update the ring cycle
 *    state (see cycle bit rules).  You may have to do this multiple times
 *    until you reach a non-link TRB.
 * 3. A ring is full if enqueue++ (for the definition of increment above)
 *    equals the dequeue pointer.
 *
 * Cycle bit rules:
 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 *
 * Producer rules:
 * 1. Check if ring is full before you enqueue.
 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
 *    Update enqueue pointer between each write (which may update the ring
 *    cycle state).
 * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
 *    and endpoint rings.  If HC is the producer for the event ring,
 *    and it generates an interrupt according to interrupt modulation rules.
 *
 * Consumer rules:
 * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
 *    the TRB is owned by the consumer.
 * 2. Update dequeue pointer (which may update the ring cycle state) and
 *    continue processing TRBs until you reach a TRB which is not owned by you.
 * 3. Notify the producer.  SW is the consumer for the event ring, and it
 *   updates event ring dequeue pointer.  HC is the consumer for the command and
 *   endpoint rings; it generates events on the event ring for these.
 */

67
#include <linux/scatterlist.h>
68
#include <linux/slab.h>
69
#include <linux/dma-mapping.h>
70
#include "xhci.h"
71
#include "xhci-trace.h"
72
#include "xhci-mtk.h"
73 74 75 76 77

/*
 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
 * address of the TRB.
 */
78
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
79 80
		union xhci_trb *trb)
{
81
	unsigned long segment_offset;
82

83
	if (!seg || !trb || trb < seg->trbs)
84
		return 0;
85 86
	/* offset in TRBs */
	segment_offset = trb - seg->trbs;
87
	if (segment_offset >= TRBS_PER_SEGMENT)
88
		return 0;
89
	return seg->dma + (segment_offset * sizeof(*trb));
90 91
}

92 93 94 95 96
static bool trb_is_noop(union xhci_trb *trb)
{
	return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
}

97 98 99 100 101
static bool trb_is_link(union xhci_trb *trb)
{
	return TRB_TYPE_LINK_LE32(trb->link.control);
}

102 103 104 105 106 107 108 109 110 111 112
static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
{
	return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
}

static bool last_trb_on_ring(struct xhci_ring *ring,
			struct xhci_segment *seg, union xhci_trb *trb)
{
	return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
}

113 114 115 116 117
static bool link_trb_toggles_cycle(union xhci_trb *trb)
{
	return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
}

118 119 120 121 122 123 124 125 126 127 128 129 130 131
static bool last_td_in_urb(struct xhci_td *td)
{
	struct urb_priv *urb_priv = td->urb->hcpriv;

	return urb_priv->td_cnt == urb_priv->length;
}

static void inc_td_cnt(struct urb *urb)
{
	struct urb_priv *urb_priv = urb->hcpriv;

	urb_priv->td_cnt++;
}

132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
{
	if (trb_is_link(trb)) {
		/* unchain chained link TRBs */
		trb->link.control &= cpu_to_le32(~TRB_CHAIN);
	} else {
		trb->generic.field[0] = 0;
		trb->generic.field[1] = 0;
		trb->generic.field[2] = 0;
		/* Preserve only the cycle bit of this TRB */
		trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
		trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
	}
}

147 148 149 150 151 152 153 154 155
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
 * TRB is in a new segment.  This does not skip over link TRBs, and it does not
 * effect the ring dequeue or enqueue pointers.
 */
static void next_trb(struct xhci_hcd *xhci,
		struct xhci_ring *ring,
		struct xhci_segment **seg,
		union xhci_trb **trb)
{
156
	if (trb_is_link(*trb)) {
157 158 159
		*seg = (*seg)->next;
		*trb = ((*seg)->trbs);
	} else {
160
		(*trb)++;
161 162 163
	}
}

164 165 166 167
/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 */
A
Andiry Xu 已提交
168
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
169 170
{
	ring->deq_updates++;
171

172 173 174
	/* event ring doesn't have link trbs, check for last trb */
	if (ring->type == TYPE_EVENT) {
		if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
175
			ring->dequeue++;
176
			return;
177
		}
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
		if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
			ring->cycle_state ^= 1;
		ring->deq_seg = ring->deq_seg->next;
		ring->dequeue = ring->deq_seg->trbs;
		return;
	}

	/* All other rings have link trbs */
	if (!trb_is_link(ring->dequeue)) {
		ring->dequeue++;
		ring->num_trbs_free++;
	}
	while (trb_is_link(ring->dequeue)) {
		ring->deq_seg = ring->deq_seg->next;
		ring->dequeue = ring->deq_seg->trbs;
	}
	return;
195 196 197 198 199 200 201 202 203 204 205 206
}

/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 *
 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
 * chain bit is set), then set the chain bit in all the following link TRBs.
 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
 * have their chain bit cleared (so that each Link TRB is a separate TD).
 *
 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
207 208 209
 * set, but other sections talk about dealing with the chain bit set.  This was
 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
210 211 212
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
213
 */
214
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
A
Andiry Xu 已提交
215
			bool more_trbs_coming)
216 217 218 219
{
	u32 chain;
	union xhci_trb *next;

M
Matt Evans 已提交
220
	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
221
	/* If this is not event ring, there is one less usable TRB */
222
	if (!trb_is_link(ring->enqueue))
223
		ring->num_trbs_free--;
224 225 226
	next = ++(ring->enqueue);

	ring->enq_updates++;
227
	/* Update the dequeue pointer further if that was a link TRB */
228
	while (trb_is_link(next)) {
229

230 231 232 233 234 235 236 237 238
		/*
		 * If the caller doesn't plan on enqueueing more TDs before
		 * ringing the doorbell, then we don't want to give the link TRB
		 * to the hardware just yet. We'll give the link TRB back in
		 * prepare_ring() just before we enqueue the TD at the top of
		 * the ring.
		 */
		if (!chain && !more_trbs_coming)
			break;
A
Andiry Xu 已提交
239

240 241 242 243 244 245 246 247 248
		/* If we're not dealing with 0.95 hardware or isoc rings on
		 * AMD 0.96 host, carry over the chain bit of the previous TRB
		 * (which may mean the chain bit is cleared).
		 */
		if (!(ring->type == TYPE_ISOC &&
		      (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
		    !xhci_link_trb_quirk(xhci)) {
			next->link.control &= cpu_to_le32(~TRB_CHAIN);
			next->link.control |= cpu_to_le32(chain);
249
		}
250 251 252 253 254
		/* Give this link TRB to the hardware */
		wmb();
		next->link.control ^= cpu_to_le32(TRB_CYCLE);

		/* Toggle the cycle bit after the last ring segment. */
255
		if (link_trb_toggles_cycle(next))
256 257
			ring->cycle_state ^= 1;

258 259 260 261 262 263 264
		ring->enq_seg = ring->enq_seg->next;
		ring->enqueue = ring->enq_seg->trbs;
		next = ring->enqueue;
	}
}

/*
265 266
 * Check to see if there's room to enqueue num_trbs on the ring and make sure
 * enqueue pointer will not advance into dequeue segment. See rules above.
267
 */
268
static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
269 270
		unsigned int num_trbs)
{
271
	int num_trbs_in_deq_seg;
272

273 274 275 276 277 278 279 280 281 282
	if (ring->num_trbs_free < num_trbs)
		return 0;

	if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
		num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
		if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
			return 0;
	}

	return 1;
283 284 285
}

/* Ring the host controller doorbell after placing a command on the ring */
286
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
287
{
E
Elric Fu 已提交
288 289 290
	if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
		return;

291
	xhci_dbg(xhci, "// Ding dong!\n");
292
	writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
293
	/* Flush PCI posted writes */
294
	readl(&xhci->dba->doorbell[0]);
295 296
}

297 298 299 300 301
static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
{
	return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
}

302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
{
	return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
					cmd_list);
}

/*
 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
 * If there are other commands waiting then restart the ring and kick the timer.
 * This must be called with command ring stopped and xhci->lock held.
 */
static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
					 struct xhci_command *cur_cmd)
{
	struct xhci_command *i_cmd;

	/* Turn all aborted commands in list to no-ops, then restart */
	list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {

321
		if (i_cmd->status != COMP_COMMAND_ABORTED)
322 323
			continue;

324
		i_cmd->status = COMP_STOPPED;
325 326 327

		xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
			 i_cmd->command_trb);
328 329

		trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349

		/*
		 * caller waiting for completion is called when command
		 *  completion event is received for these no-op commands
		 */
	}

	xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;

	/* ring command ring doorbell to restart the command ring */
	if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
	    !(xhci->xhc_state & XHCI_STATE_DYING)) {
		xhci->current_cmd = cur_cmd;
		xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
		xhci_ring_cmd_db(xhci);
	}
}

/* Must be called with xhci->lock held, releases and aquires lock back */
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
350 351 352 353 354 355
{
	u64 temp_64;
	int ret;

	xhci_dbg(xhci, "Abort command ring\n");

356
	reinit_completion(&xhci->cmd_ring_stop_completion);
357

358
	temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
359 360
	xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
			&xhci->op_regs->cmd_ring);
361 362 363 364 365 366 367 368

	/* Section 4.6.1.2 of xHCI 1.0 spec says software should
	 * time the completion od all xHCI commands, including
	 * the Command Abort operation. If software doesn't see
	 * CRR negated in a timely manner (e.g. longer than 5
	 * seconds), then it should assume that the there are
	 * larger problems with the xHC and assert HCRST.
	 */
369
	ret = xhci_handshake(&xhci->op_regs->cmd_ring,
370 371
			CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
	if (ret < 0) {
372 373 374 375 376
		xhci_err(xhci,
			 "Stop command ring failed, maybe the host is dead\n");
		xhci->xhc_state |= XHCI_STATE_DYING;
		xhci_halt(xhci);
		return -ESHUTDOWN;
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
	}
	/*
	 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
	 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
	 * but the completion event in never sent. Wait 2 secs (arbitrary
	 * number) to handle those cases after negation of CMD_RING_RUNNING.
	 */
	spin_unlock_irqrestore(&xhci->lock, flags);
	ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
					  msecs_to_jiffies(2000));
	spin_lock_irqsave(&xhci->lock, flags);
	if (!ret) {
		xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
		xhci_cleanup_command_queue(xhci);
	} else {
		xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
393 394 395 396
	}
	return 0;
}

397
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
398
		unsigned int slot_id,
399 400
		unsigned int ep_index,
		unsigned int stream_id)
401
{
M
Matt Evans 已提交
402
	__le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
403 404
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
	unsigned int ep_state = ep->ep_state;
405 406

	/* Don't ring the doorbell for this endpoint if there are pending
407
	 * cancellations because we don't want to interrupt processing.
408 409 410
	 * We don't want to restart any stream rings if there's a set dequeue
	 * pointer command pending because the device can choose to start any
	 * stream once the endpoint is on the HW schedule.
411
	 */
412
	if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
413 414
	    (ep_state & EP_HALTED))
		return;
415
	writel(DB_VALUE(ep_index, stream_id), db_addr);
416 417 418
	/* The CPU has better things to do at this point than wait for a
	 * write-posting flush.  It'll get there soon enough.
	 */
419 420
}

421 422 423 424 425 426 427 428 429 430 431 432
/* Ring the doorbell for any rings with pending URBs */
static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
		unsigned int slot_id,
		unsigned int ep_index)
{
	unsigned int stream_id;
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];

	/* A ring has pending URBs if its TD list is not empty */
	if (!(ep->ep_state & EP_HAS_STREAMS)) {
433
		if (ep->ring && !(list_empty(&ep->ring->td_list)))
434
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
435 436 437 438 439 440 441
		return;
	}

	for (stream_id = 1; stream_id < ep->stream_info->num_streams;
			stream_id++) {
		struct xhci_stream_info *stream_info = ep->stream_info;
		if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
442 443
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
						stream_id);
444 445 446
	}
}

447 448 449 450 451
/* Get the right ring for the given slot_id, ep_index and stream_id.
 * If the endpoint supports streams, boundary check the URB's stream ID.
 * If the endpoint doesn't support streams, return the singular endpoint ring.
 */
struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
		unsigned int slot_id, unsigned int ep_index,
		unsigned int stream_id)
{
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];
	/* Common case: no streams */
	if (!(ep->ep_state & EP_HAS_STREAMS))
		return ep->ring;

	if (stream_id == 0) {
		xhci_warn(xhci,
				"WARN: Slot ID %u, ep index %u has streams, "
				"but URB has no stream ID.\n",
				slot_id, ep_index);
		return NULL;
	}

	if (stream_id < ep->stream_info->num_streams)
		return ep->stream_info->stream_rings[stream_id];

	xhci_warn(xhci,
			"WARN: Slot ID %u, ep index %u has "
			"stream IDs 1 to %u allocated, "
			"but stream ID %u is requested.\n",
			slot_id, ep_index,
			ep->stream_info->num_streams - 1,
			stream_id);
	return NULL;
}

483 484 485 486 487 488 489 490 491 492 493 494 495
/*
 * Move the xHC's endpoint ring dequeue pointer past cur_td.
 * Record the new state of the xHC's endpoint ring dequeue segment,
 * dequeue pointer, and new consumer cycle state in state.
 * Update our internal representation of the ring's dequeue pointer.
 *
 * We do this in three jumps:
 *  - First we update our new ring state to be the same as when the xHC stopped.
 *  - Then we traverse the ring to find the segment that contains
 *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
 *    any link TRBs with the toggle cycle bit set.
 *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
 *    if we've moved it past a link TRB with the toggle cycle bit set.
M
Matt Evans 已提交
496 497 498 499
 *
 * Some of the uses of xhci_generic_trb are grotty, but if they're done
 * with correct __le32 accesses they should work fine.  Only users of this are
 * in here.
500
 */
501
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
502
		unsigned int slot_id, unsigned int ep_index,
503 504
		unsigned int stream_id, struct xhci_td *cur_td,
		struct xhci_dequeue_state *state)
505 506
{
	struct xhci_virt_device *dev = xhci->devs[slot_id];
507
	struct xhci_virt_ep *ep = &dev->eps[ep_index];
508
	struct xhci_ring *ep_ring;
509 510
	struct xhci_segment *new_seg;
	union xhci_trb *new_deq;
511
	dma_addr_t addr;
512
	u64 hw_dequeue;
513 514
	bool cycle_found = false;
	bool td_last_trb_found = false;
515

516 517 518 519 520 521 522 523
	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
			ep_index, stream_id);
	if (!ep_ring) {
		xhci_warn(xhci, "WARN can't find new dequeue state "
				"for invalid stream ID %u.\n",
				stream_id);
		return;
	}
524

525
	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
526 527
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Finding endpoint context");
528 529 530 531
	/* 4.6.9 the css flag is written to the stream context for streams */
	if (ep->ep_state & EP_HAS_STREAMS) {
		struct xhci_stream_ctx *ctx =
			&ep->stream_info->stream_ctx_array[stream_id];
532
		hw_dequeue = le64_to_cpu(ctx->stream_ring);
533 534 535
	} else {
		struct xhci_ep_ctx *ep_ctx
			= xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
536
		hw_dequeue = le64_to_cpu(ep_ctx->deq);
537
	}
538

539 540 541 542
	new_seg = ep_ring->deq_seg;
	new_deq = ep_ring->dequeue;
	state->new_cycle_state = hw_dequeue & 0x1;

543
	/*
544 545 546 547
	 * We want to find the pointer, segment and cycle state of the new trb
	 * (the one after current TD's last_trb). We know the cycle state at
	 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
	 * found.
548
	 */
549 550 551 552 553 554 555 556 557
	do {
		if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
		    == (dma_addr_t)(hw_dequeue & ~0xf)) {
			cycle_found = true;
			if (td_last_trb_found)
				break;
		}
		if (new_deq == cur_td->last_trb)
			td_last_trb_found = true;
558

559 560
		if (cycle_found && trb_is_link(new_deq) &&
		    link_trb_toggles_cycle(new_deq))
561 562 563 564 565 566 567 568 569 570 571 572 573
			state->new_cycle_state ^= 0x1;

		next_trb(xhci, ep_ring, &new_seg, &new_deq);

		/* Search wrapped around, bail out */
		if (new_deq == ep->ring->dequeue) {
			xhci_err(xhci, "Error: Failed finding new dequeue state\n");
			state->new_deq_seg = NULL;
			state->new_deq_ptr = NULL;
			return;
		}

	} while (!cycle_found || !td_last_trb_found);
574

575 576
	state->new_deq_seg = new_seg;
	state->new_deq_ptr = new_deq;
577

578
	/* Don't update the ring cycle state for the producer (us). */
579 580
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Cycle state = 0x%x", state->new_cycle_state);
581

582 583
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"New dequeue segment = %p (virtual)",
584 585
			state->new_deq_seg);
	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
586 587
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"New dequeue pointer = 0x%llx (DMA)",
588
			(unsigned long long) addr);
589 590
}

591 592 593 594
/* flip_cycle means flip the cycle bit of all but the first and last TRB.
 * (The last TRB actually points to the ring enqueue pointer, which is not part
 * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
 */
595
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
596
		       struct xhci_td *td, bool flip_cycle)
597
{
598 599 600 601
	struct xhci_segment *seg	= td->start_seg;
	union xhci_trb *trb		= td->first_trb;

	while (1) {
602 603
		trb_to_noop(trb, TRB_TR_NOOP);

604 605 606 607 608
		/* flip cycle if asked to */
		if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
			trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);

		if (trb == td->last_trb)
609
			break;
610 611

		next_trb(xhci, ep_ring, &seg, &trb);
612 613 614
	}
}

615
static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
616 617
		struct xhci_virt_ep *ep)
{
618
	ep->ep_state &= ~EP_STOP_CMD_PENDING;
619 620
	/* Can't del_timer_sync in interrupt */
	del_timer(&ep->stop_cmd_timer);
621 622
}

623 624 625 626
/*
 * Must be called with xhci->lock held in interrupt context,
 * releases and re-acquires xhci->lock
 */
627
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
628
				     struct xhci_td *cur_td, int status)
629
{
630 631 632 633 634 635 636 637 638
	struct urb	*urb		= cur_td->urb;
	struct urb_priv	*urb_priv	= urb->hcpriv;
	struct usb_hcd	*hcd		= bus_to_hcd(urb->dev->bus);

	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
		xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
		if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs	== 0) {
			if (xhci->quirks & XHCI_AMD_PLL_FIX)
				usb_amd_quirk_pll_enable();
A
Andiry Xu 已提交
639
		}
640
	}
641
	xhci_urb_free_priv(urb_priv);
642
	usb_hcd_unlink_urb_from_ep(hcd, urb);
643
	spin_unlock(&xhci->lock);
644
	usb_hcd_giveback_urb(hcd, urb, status);
645 646 647
	spin_lock(&xhci->lock);
}

W
Wei Yongjun 已提交
648 649
static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
		struct xhci_ring *ring, struct xhci_td *td)
650 651 652 653 654
{
	struct device *dev = xhci_to_hcd(xhci)->self.controller;
	struct xhci_segment *seg = td->bounce_seg;
	struct urb *urb = td->urb;

655
	if (!ring || !seg || !urb)
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
		return;

	if (usb_urb_dir_out(urb)) {
		dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
				 DMA_TO_DEVICE);
		return;
	}

	/* for in tranfers we need to copy the data from bounce to sg */
	sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
			     seg->bounce_len, seg->bounce_offs);
	dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
			 DMA_FROM_DEVICE);
	seg->bounce_len = 0;
	seg->bounce_offs = 0;
}

673 674 675 676 677 678 679 680 681 682
/*
 * When we get a command completion for a Stop Endpoint Command, we need to
 * unlink any cancelled TDs from the ring.  There are two ways to do that:
 *
 *  1. If the HW was in the middle of processing the TD that needs to be
 *     cancelled, then we must move the ring's dequeue pointer past the last TRB
 *     in the TD with a Set Dequeue Pointer Command.
 *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
 *     bit cleared) so that the HW will skip over them.
 */
683
static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
684
		union xhci_trb *trb, struct xhci_event_cmd *event)
685 686 687
{
	unsigned int ep_index;
	struct xhci_ring *ep_ring;
688
	struct xhci_virt_ep *ep;
689
	struct xhci_td *cur_td = NULL;
690 691
	struct xhci_td *last_unlinked_td;

692
	struct xhci_dequeue_state deq_state;
693

694
	if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
695
		if (!xhci->devs[slot_id])
696 697 698 699 700 701
			xhci_warn(xhci, "Stop endpoint command "
				"completion for disabled slot %u\n",
				slot_id);
		return;
	}

702
	memset(&deq_state, 0, sizeof(deq_state));
M
Matt Evans 已提交
703
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
704
	ep = &xhci->devs[slot_id]->eps[ep_index];
705 706
	last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
			struct xhci_td, cancelled_td_list);
707

708
	if (list_empty(&ep->cancelled_td_list)) {
709
		xhci_stop_watchdog_timer_in_irq(xhci, ep);
710
		ep->stopped_td = NULL;
711
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
712
		return;
713
	}
714 715 716 717 718 719

	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
	 * We have the xHCI lock, so nothing can modify this list until we drop
	 * it.  We're also in the event handler, so we can't get re-interrupted
	 * if another Stop Endpoint command completes
	 */
720
	list_for_each_entry(cur_td, &ep->cancelled_td_list, cancelled_td_list) {
721 722
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Removing canceled TD starting at 0x%llx (dma).",
723 724
				(unsigned long long)xhci_trb_virt_to_dma(
					cur_td->start_seg, cur_td->first_trb));
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
		if (!ep_ring) {
			/* This shouldn't happen unless a driver is mucking
			 * with the stream ID after submission.  This will
			 * leave the TD on the hardware ring, and the hardware
			 * will try to execute it, and may access a buffer
			 * that has already been freed.  In the best case, the
			 * hardware will execute it, and the event handler will
			 * ignore the completion event for that TD, since it was
			 * removed from the td_list for that endpoint.  In
			 * short, don't muck with the stream ID after
			 * submission.
			 */
			xhci_warn(xhci, "WARN Cancelled URB %p "
					"has invalid stream ID %u.\n",
					cur_td->urb,
					cur_td->urb->stream_id);
			goto remove_finished_td;
		}
744 745 746 747
		/*
		 * If we stopped on the TD we need to cancel, then we have to
		 * move the xHC endpoint ring dequeue pointer past this TD.
		 */
748
		if (cur_td == ep->stopped_td)
749 750 751
			xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
					cur_td->urb->stream_id,
					cur_td, &deq_state);
752
		else
753
			td_to_noop(xhci, ep_ring, cur_td, false);
754
remove_finished_td:
755 756 757 758 759
		/*
		 * The event handler won't see a completion for this TD anymore,
		 * so remove it from the endpoint ring's TD list.  Keep it in
		 * the cancelled TD list for URB completion later.
		 */
760
		list_del_init(&cur_td->td_list);
761
	}
762

763
	xhci_stop_watchdog_timer_in_irq(xhci, ep);
764 765 766

	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
767 768
		xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
				ep->stopped_td->urb->stream_id, &deq_state);
769
		xhci_ring_cmd_db(xhci);
770
	} else {
771 772
		/* Otherwise ring the doorbell(s) to restart queued transfers */
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
773
	}
774

775
	ep->stopped_td = NULL;
776 777 778 779 780 781 782 783

	/*
	 * Drop the lock and complete the URBs in the cancelled TD list.
	 * New TDs to be cancelled might be added to the end of the list before
	 * we can complete all the URBs for the TDs we already unlinked.
	 * So stop when we've completed the URB for the last TD we unlinked.
	 */
	do {
784
		cur_td = list_first_entry(&ep->cancelled_td_list,
785
				struct xhci_td, cancelled_td_list);
786
		list_del_init(&cur_td->cancelled_td_list);
787 788 789 790 791

		/* Clean up the cancelled URB */
		/* Doesn't matter what we pass for status, since the core will
		 * just overwrite it (because the URB has been unlinked).
		 */
A
Arnd Bergmann 已提交
792
		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
793
		xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
794 795 796
		inc_td_cnt(cur_td->urb);
		if (last_td_in_urb(cur_td))
			xhci_giveback_urb_in_irq(xhci, cur_td, 0);
797

798 799 800 801 802
		/* Stop processing the cancelled list if the watchdog timer is
		 * running.
		 */
		if (xhci->xhc_state & XHCI_STATE_DYING)
			return;
803 804 805 806 807
	} while (cur_td != last_unlinked_td);

	/* Return to the event handler with xhci->lock re-acquired */
}

808 809 810
static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
	struct xhci_td *cur_td;
811
	struct xhci_td *tmp;
812

813
	list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) {
814
		list_del_init(&cur_td->td_list);
815

816 817
		if (!list_empty(&cur_td->cancelled_td_list))
			list_del_init(&cur_td->cancelled_td_list);
818

819
		xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
820 821 822 823

		inc_td_cnt(cur_td->urb);
		if (last_td_in_urb(cur_td))
			xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
824 825 826 827 828 829 830
	}
}

static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
		int slot_id, int ep_index)
{
	struct xhci_td *cur_td;
831
	struct xhci_td *tmp;
832 833 834 835
	struct xhci_virt_ep *ep;
	struct xhci_ring *ring;

	ep = &xhci->devs[slot_id]->eps[ep_index];
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
	if ((ep->ep_state & EP_HAS_STREAMS) ||
			(ep->ep_state & EP_GETTING_NO_STREAMS)) {
		int stream_id;

		for (stream_id = 0; stream_id < ep->stream_info->num_streams;
				stream_id++) {
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Killing URBs for slot ID %u, ep index %u, stream %u",
					slot_id, ep_index, stream_id + 1);
			xhci_kill_ring_urbs(xhci,
					ep->stream_info->stream_rings[stream_id]);
		}
	} else {
		ring = ep->ring;
		if (!ring)
			return;
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Killing URBs for slot ID %u, ep index %u",
				slot_id, ep_index);
		xhci_kill_ring_urbs(xhci, ring);
	}
857

858 859 860
	list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list,
			cancelled_td_list) {
		list_del_init(&cur_td->cancelled_td_list);
861
		inc_td_cnt(cur_td->urb);
862

863 864
		if (last_td_in_urb(cur_td))
			xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
865 866 867
	}
}

868 869 870 871 872 873 874 875 876 877 878 879 880 881
/* Watchdog timer function for when a stop endpoint command fails to complete.
 * In this case, we assume the host controller is broken or dying or dead.  The
 * host may still be completing some other events, so we have to be careful to
 * let the event ring handler and the URB dequeueing/enqueueing functions know
 * through xhci->state.
 *
 * The timer may also fire if the host takes a very long time to respond to the
 * command, and the stop endpoint command completion handler cannot delete the
 * timer before the timer function is called.  Another endpoint cancellation may
 * sneak in before the timer function can grab the lock, and that may queue
 * another stop endpoint command and add the timer back.  So we cannot use a
 * simple flag to say whether there is a pending stop endpoint command for a
 * particular endpoint.
 *
882 883
 * Instead we use a combination of that flag and checking if a new timer is
 * pending.
884 885 886 887 888 889
 */
void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
	struct xhci_hcd *xhci;
	struct xhci_virt_ep *ep;
	int ret, i, j;
890
	unsigned long flags;
891 892 893 894

	ep = (struct xhci_virt_ep *) arg;
	xhci = ep->xhci;

895
	spin_lock_irqsave(&xhci->lock, flags);
896

897 898 899
	/* bail out if cmd completed but raced with stop ep watchdog timer.*/
	if (!(ep->ep_state & EP_STOP_CMD_PENDING) ||
	    timer_pending(&ep->stop_cmd_timer)) {
900
		spin_unlock_irqrestore(&xhci->lock, flags);
901
		xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit");
902 903 904 905 906 907 908 909
		return;
	}

	xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
	xhci_warn(xhci, "Assuming host is dying, halting host.\n");
	/* Oops, HC is dead or dying or at least not responding to the stop
	 * endpoint command.
	 */
910

911
	xhci->xhc_state |= XHCI_STATE_DYING;
912 913
	ep->ep_state &= ~EP_STOP_CMD_PENDING;

914 915
	/* Disable interrupts from the host controller and start halting it */
	xhci_quiesce(xhci);
916
	spin_unlock_irqrestore(&xhci->lock, flags);
917 918 919

	ret = xhci_halt(xhci);

920
	spin_lock_irqsave(&xhci->lock, flags);
921 922 923
	if (ret < 0) {
		/* This is bad; the host is not responding to commands and it's
		 * not allowing itself to be halted.  At least interrupts are
924
		 * disabled. If we call usb_hc_died(), it will attempt to
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
		 * disconnect all device drivers under this host.  Those
		 * disconnect() methods will wait for all URBs to be unlinked,
		 * so we must complete them.
		 */
		xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
		xhci_warn(xhci, "Completing active URBs anyway.\n");
		/* We could turn all TDs on the rings to no-ops.  This won't
		 * help if the host has cached part of the ring, and is slow if
		 * we want to preserve the cycle bit.  Skip it and hope the host
		 * doesn't touch the memory.
		 */
	}
	for (i = 0; i < MAX_HC_SLOTS; i++) {
		if (!xhci->devs[i])
			continue;
940 941
		for (j = 0; j < 31; j++)
			xhci_kill_endpoint_urbs(xhci, i, j);
942
	}
943
	spin_unlock_irqrestore(&xhci->lock, flags);
944 945
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Calling usb_hc_died()");
946
	usb_hc_died(xhci_to_hcd(xhci));
947 948
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"xHCI host controller is dead.");
949 950
}

951 952 953 954 955 956 957 958 959 960 961 962 963

static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
		struct xhci_virt_device *dev,
		struct xhci_ring *ep_ring,
		unsigned int ep_index)
{
	union xhci_trb *dequeue_temp;
	int num_trbs_free_temp;
	bool revert = false;

	num_trbs_free_temp = ep_ring->num_trbs_free;
	dequeue_temp = ep_ring->dequeue;

964 965 966 967 968 969
	/* If we get two back-to-back stalls, and the first stalled transfer
	 * ends just before a link TRB, the dequeue pointer will be left on
	 * the link TRB by the code in the while loop.  So we have to update
	 * the dequeue pointer one segment further, or we'll jump off
	 * the segment into la-la-land.
	 */
970
	if (trb_is_link(ep_ring->dequeue)) {
971 972 973 974
		ep_ring->deq_seg = ep_ring->deq_seg->next;
		ep_ring->dequeue = ep_ring->deq_seg->trbs;
	}

975 976 977 978
	while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
		/* We have more usable TRBs */
		ep_ring->num_trbs_free++;
		ep_ring->dequeue++;
979
		if (trb_is_link(ep_ring->dequeue)) {
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997
			if (ep_ring->dequeue ==
					dev->eps[ep_index].queued_deq_ptr)
				break;
			ep_ring->deq_seg = ep_ring->deq_seg->next;
			ep_ring->dequeue = ep_ring->deq_seg->trbs;
		}
		if (ep_ring->dequeue == dequeue_temp) {
			revert = true;
			break;
		}
	}

	if (revert) {
		xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
		ep_ring->num_trbs_free = num_trbs_free_temp;
	}
}

998 999 1000 1001 1002 1003 1004
/*
 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
 * we need to clear the set deq pending flag in the endpoint ring state, so that
 * the TD queueing code can ring the doorbell again.  We also need to ring the
 * endpoint doorbell to restart the ring, but only if there aren't more
 * cancellations pending.
 */
1005
static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1006
		union xhci_trb *trb, u32 cmd_comp_code)
1007 1008
{
	unsigned int ep_index;
1009
	unsigned int stream_id;
1010 1011
	struct xhci_ring *ep_ring;
	struct xhci_virt_device *dev;
1012
	struct xhci_virt_ep *ep;
1013 1014
	struct xhci_ep_ctx *ep_ctx;
	struct xhci_slot_ctx *slot_ctx;
1015

M
Matt Evans 已提交
1016 1017
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1018
	dev = xhci->devs[slot_id];
1019
	ep = &dev->eps[ep_index];
1020 1021 1022

	ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
	if (!ep_ring) {
O
Oliver Neukum 已提交
1023
		xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
1024 1025
				stream_id);
		/* XXX: Harmless??? */
1026
		goto cleanup;
1027 1028
	}

1029 1030
	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
1031

1032
	if (cmd_comp_code != COMP_SUCCESS) {
1033 1034 1035
		unsigned int ep_state;
		unsigned int slot_state;

1036
		switch (cmd_comp_code) {
1037
		case COMP_TRB_ERROR:
O
Oliver Neukum 已提交
1038
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
1039
			break;
1040
		case COMP_CONTEXT_STATE_ERROR:
O
Oliver Neukum 已提交
1041
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
1042
			ep_state = GET_EP_CTX_STATE(ep_ctx);
M
Matt Evans 已提交
1043
			slot_state = le32_to_cpu(slot_ctx->dev_state);
1044
			slot_state = GET_SLOT_STATE(slot_state);
1045 1046
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Slot state = %u, EP state = %u",
1047 1048
					slot_state, ep_state);
			break;
1049
		case COMP_SLOT_NOT_ENABLED_ERROR:
O
Oliver Neukum 已提交
1050 1051
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
					slot_id);
1052 1053
			break;
		default:
O
Oliver Neukum 已提交
1054 1055
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
					cmd_comp_code);
1056 1057 1058 1059 1060 1061 1062 1063 1064
			break;
		}
		/* OK what do we do now?  The endpoint state is hosed, and we
		 * should never get to this point if the synchronization between
		 * queueing, and endpoint state are correct.  This might happen
		 * if the device gets disconnected after we've finished
		 * cancelling URBs, which might not be an error...
		 */
	} else {
1065 1066 1067 1068 1069 1070 1071 1072 1073
		u64 deq;
		/* 4.6.10 deq ptr is written to the stream ctx for streams */
		if (ep->ep_state & EP_HAS_STREAMS) {
			struct xhci_stream_ctx *ctx =
				&ep->stream_info->stream_ctx_array[stream_id];
			deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
		} else {
			deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
		}
1074
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1075 1076 1077
			"Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
		if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
					 ep->queued_deq_ptr) == deq) {
1078 1079 1080
			/* Update the ring's dequeue segment and dequeue pointer
			 * to reflect the new position.
			 */
1081 1082
			update_ring_for_set_deq_completion(xhci, dev,
				ep_ring, ep_index);
1083
		} else {
O
Oliver Neukum 已提交
1084
			xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1085
			xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1086
				  ep->queued_deq_seg, ep->queued_deq_ptr);
1087
		}
1088 1089
	}

1090
cleanup:
1091
	dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1092 1093
	dev->eps[ep_index].queued_deq_seg = NULL;
	dev->eps[ep_index].queued_deq_ptr = NULL;
1094 1095
	/* Restart any rings with pending URBs */
	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1096 1097
}

1098
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1099
		union xhci_trb *trb, u32 cmd_comp_code)
1100 1101 1102
{
	unsigned int ep_index;

M
Matt Evans 已提交
1103
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1104 1105 1106
	/* This command will only fail if the endpoint wasn't halted,
	 * but we don't care.
	 */
1107
	xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1108
		"Ignoring reset ep completion code of %u", cmd_comp_code);
1109

1110 1111 1112 1113 1114
	/* HW with the reset endpoint quirk needs to have a configure endpoint
	 * command complete before the endpoint can be used.  Queue that here
	 * because the HW can't handle two commands being queued in a row.
	 */
	if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1115 1116
		struct xhci_command *command;
		command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1117 1118 1119 1120
		if (!command) {
			xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n");
			return;
		}
1121 1122
		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
				"Queueing configure endpoint command");
1123
		xhci_queue_configure_endpoint(xhci, command,
1124 1125
				xhci->devs[slot_id]->in_ctx->dma, slot_id,
				false);
1126 1127
		xhci_ring_cmd_db(xhci);
	} else {
1128
		/* Clear our internal halted state */
1129
		xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1130
	}
1131
}
1132

1133
static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1134
		struct xhci_command *command, u32 cmd_comp_code)
1135 1136
{
	if (cmd_comp_code == COMP_SUCCESS)
1137
		command->slot_id = slot_id;
1138
	else
1139
		command->slot_id = 0;
1140 1141
}

1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
{
	struct xhci_virt_device *virt_dev;

	virt_dev = xhci->devs[slot_id];
	if (!virt_dev)
		return;
	if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
		/* Delete default control endpoint resources */
		xhci_free_device_endpoint_resources(xhci, virt_dev, true);
	xhci_free_virt_device(xhci, slot_id);
}

1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
		struct xhci_event_cmd *event, u32 cmd_comp_code)
{
	struct xhci_virt_device *virt_dev;
	struct xhci_input_control_ctx *ctrl_ctx;
	unsigned int ep_index;
	unsigned int ep_state;
	u32 add_flags, drop_flags;

	/*
	 * Configure endpoint commands can come from the USB core
	 * configuration or alt setting changes, or because the HW
	 * needed an extra configure endpoint command after a reset
	 * endpoint command or streams were being configured.
	 * If the command was for a halted endpoint, the xHCI driver
	 * is not waiting on the configure endpoint command.
	 */
1172
	virt_dev = xhci->devs[slot_id];
1173
	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
	if (!ctrl_ctx) {
		xhci_warn(xhci, "Could not get input context, bad type.\n");
		return;
	}

	add_flags = le32_to_cpu(ctrl_ctx->add_flags);
	drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
	/* Input ctx add_flags are the endpoint index plus one */
	ep_index = xhci_last_valid_endpoint(add_flags) - 1;

	/* A usb_set_interface() call directly after clearing a halted
	 * condition may race on this quirky hardware.  Not worth
	 * worrying about, since this is prototype hardware.  Not sure
	 * if this will work for streams, but streams support was
	 * untested on this prototype.
	 */
	if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
			ep_index != (unsigned int) -1 &&
			add_flags - SLOT_FLAG == drop_flags) {
		ep_state = virt_dev->eps[ep_index].ep_state;
		if (!(ep_state & EP_HALTED))
1195
			return;
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
				"Completed config ep cmd - "
				"last ep index = %d, state = %d",
				ep_index, ep_state);
		/* Clear internal halted state and restart ring(s) */
		virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
		return;
	}
	return;
}

1208 1209 1210 1211
static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
		struct xhci_event_cmd *event)
{
	xhci_dbg(xhci, "Completed reset device command.\n");
1212
	if (!xhci->devs[slot_id])
1213 1214 1215 1216
		xhci_warn(xhci, "Reset device command completion "
				"for disabled slot %u\n", slot_id);
}

1217 1218 1219 1220
static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
	if (!(xhci->quirks & XHCI_NEC_HOST)) {
L
Lu Baolu 已提交
1221
		xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
1222 1223 1224 1225 1226 1227 1228 1229
		return;
	}
	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
			"NEC firmware version %2x.%02x",
			NEC_FW_MAJOR(le32_to_cpu(event->status)),
			NEC_FW_MINOR(le32_to_cpu(event->status)));
}

1230
static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
M
Mathias Nyman 已提交
1231 1232
{
	list_del(&cmd->cmd_list);
1233 1234 1235 1236 1237

	if (cmd->completion) {
		cmd->status = status;
		complete(cmd->completion);
	} else {
M
Mathias Nyman 已提交
1238
		kfree(cmd);
1239
	}
M
Mathias Nyman 已提交
1240 1241 1242 1243 1244 1245
}

void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
{
	struct xhci_command *cur_cmd, *tmp_cmd;
	list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1246
		xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
M
Mathias Nyman 已提交
1247 1248
}

1249
void xhci_handle_command_timeout(struct work_struct *work)
1250 1251 1252 1253 1254
{
	struct xhci_hcd *xhci;
	int ret;
	unsigned long flags;
	u64 hw_ring_state;
1255 1256

	xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
1257 1258

	spin_lock_irqsave(&xhci->lock, flags);
L
Lu Baolu 已提交
1259

1260 1261 1262 1263
	/*
	 * If timeout work is pending, or current_cmd is NULL, it means we
	 * raced with command completion. Command is handled so just return.
	 */
1264
	if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
L
Lu Baolu 已提交
1265 1266
		spin_unlock_irqrestore(&xhci->lock, flags);
		return;
1267
	}
L
Lu Baolu 已提交
1268
	/* mark this command to be cancelled */
1269
	xhci->current_cmd->status = COMP_COMMAND_ABORTED;
L
Lu Baolu 已提交
1270

1271 1272 1273 1274
	/* Make sure command ring is running before aborting it */
	hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
	if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
	    (hw_ring_state & CMD_RING_RUNNING))  {
1275 1276
		/* Prevent new doorbell, and start command abort */
		xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
1277
		xhci_dbg(xhci, "Command timeout\n");
1278
		ret = xhci_abort_cmd_ring(xhci, flags);
1279 1280 1281
		if (unlikely(ret == -ESHUTDOWN)) {
			xhci_err(xhci, "Abort command ring failed\n");
			xhci_cleanup_command_queue(xhci);
1282
			spin_unlock_irqrestore(&xhci->lock, flags);
1283 1284
			usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
			xhci_dbg(xhci, "xHCI host controller is dead.\n");
1285 1286

			return;
1287
		}
1288 1289

		goto time_out_completed;
1290
	}
1291

1292 1293 1294
	/* host removed. Bail out */
	if (xhci->xhc_state & XHCI_STATE_REMOVING) {
		xhci_dbg(xhci, "host removed, ring start fail?\n");
1295
		xhci_cleanup_command_queue(xhci);
1296 1297

		goto time_out_completed;
1298 1299
	}

1300 1301 1302
	/* command timeout on stopped ring, ring can't be aborted */
	xhci_dbg(xhci, "Command timeout on stopped ring\n");
	xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1303 1304

time_out_completed:
1305 1306 1307 1308
	spin_unlock_irqrestore(&xhci->lock, flags);
	return;
}

1309 1310 1311
static void handle_cmd_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
M
Matt Evans 已提交
1312
	int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1313 1314
	u64 cmd_dma;
	dma_addr_t cmd_dequeue_dma;
1315
	u32 cmd_comp_code;
1316
	union xhci_trb *cmd_trb;
M
Mathias Nyman 已提交
1317
	struct xhci_command *cmd;
1318
	u32 cmd_type;
1319

M
Matt Evans 已提交
1320
	cmd_dma = le64_to_cpu(event->cmd_trb);
1321
	cmd_trb = xhci->cmd_ring->dequeue;
1322 1323 1324

	trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);

1325
	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1326
			cmd_trb);
L
Lu Baolu 已提交
1327 1328 1329 1330 1331 1332 1333
	/*
	 * Check whether the completion event is for our internal kept
	 * command.
	 */
	if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
		xhci_warn(xhci,
			  "ERROR mismatched command completion event\n");
1334 1335
		return;
	}
1336

1337
	cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list);
M
Mathias Nyman 已提交
1338

1339
	cancel_delayed_work(&xhci->cmd_timer);
1340

1341
	cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1342 1343

	/* If CMD ring stopped we own the trbs between enqueue and dequeue */
1344
	if (cmd_comp_code == COMP_STOPPED) {
1345
		complete_all(&xhci->cmd_ring_stop_completion);
1346 1347
		return;
	}
1348 1349 1350 1351 1352 1353 1354

	if (cmd->command_trb != xhci->cmd_ring->dequeue) {
		xhci_err(xhci,
			 "Command completion event does not match command\n");
		return;
	}

1355 1356 1357 1358 1359 1360
	/*
	 * Host aborted the command ring, check if the current command was
	 * supposed to be aborted, otherwise continue normally.
	 * The command ring is stopped now, but the xHC will issue a Command
	 * Ring Stopped event which will cause us to restart it.
	 */
1361
	if (cmd_comp_code == COMP_COMMAND_ABORTED) {
1362
		xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1363
		if (cmd->status == COMP_COMMAND_ABORTED) {
1364 1365
			if (xhci->current_cmd == cmd)
				xhci->current_cmd = NULL;
1366
			goto event_handled;
1367
		}
1368 1369
	}

1370 1371 1372
	cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
	switch (cmd_type) {
	case TRB_ENABLE_SLOT:
1373
		xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
1374
		break;
1375
	case TRB_DISABLE_SLOT:
1376
		xhci_handle_cmd_disable_slot(xhci, slot_id);
1377
		break;
1378
	case TRB_CONFIG_EP:
1379 1380 1381
		if (!cmd->completion)
			xhci_handle_cmd_config_ep(xhci, slot_id, event,
						  cmd_comp_code);
1382
		break;
1383
	case TRB_EVAL_CONTEXT:
1384
		break;
1385
	case TRB_ADDR_DEV:
1386
		break;
1387
	case TRB_STOP_RING:
1388 1389 1390
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
		xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
1391
		break;
1392
	case TRB_SET_DEQ:
1393 1394
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
1395
		xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1396
		break;
1397
	case TRB_CMD_NOOP:
1398
		/* Is this an aborted command turned to NO-OP? */
1399 1400
		if (cmd->status == COMP_STOPPED)
			cmd_comp_code = COMP_STOPPED;
1401
		break;
1402
	case TRB_RESET_EP:
1403 1404
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
1405
		xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1406
		break;
1407
	case TRB_RESET_DEV:
1408 1409 1410 1411 1412
		/* SLOT_ID field in reset device cmd completion event TRB is 0.
		 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
		 */
		slot_id = TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3]));
1413
		xhci_handle_cmd_reset_dev(xhci, slot_id, event);
1414
		break;
1415
	case TRB_NEC_GET_FW:
1416
		xhci_handle_cmd_nec_get_fw(xhci, event);
1417
		break;
1418 1419
	default:
		/* Skip over unknown commands on the event ring */
L
Lu Baolu 已提交
1420
		xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
1421 1422
		break;
	}
M
Mathias Nyman 已提交
1423

1424
	/* restart timer if this wasn't the last command */
1425
	if (!list_is_singular(&xhci->cmd_list)) {
1426 1427
		xhci->current_cmd = list_first_entry(&cmd->cmd_list,
						struct xhci_command, cmd_list);
1428
		xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
L
Lu Baolu 已提交
1429 1430
	} else if (xhci->current_cmd == cmd) {
		xhci->current_cmd = NULL;
1431 1432 1433
	}

event_handled:
1434
	xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
M
Mathias Nyman 已提交
1435

A
Andiry Xu 已提交
1436
	inc_deq(xhci, xhci->cmd_ring);
1437 1438
}

1439 1440 1441 1442 1443
static void handle_vendor_event(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 trb_type;

M
Matt Evans 已提交
1444
	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1445 1446 1447 1448 1449
	xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
	if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
		handle_cmd_completion(xhci, &event->event_cmd);
}

1450 1451 1452 1453 1454
/* @port_id: the one-based port ID from the hardware (indexed from array of all
 * port registers -- USB 3.0 and USB 2.0).
 *
 * Returns a zero-based port number, which is suitable for indexing into each of
 * the split roothubs' port arrays and bus state arrays.
1455
 * Add one to it in order to call xhci_find_slot_id_by_port.
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473
 */
static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
		struct xhci_hcd *xhci, u32 port_id)
{
	unsigned int i;
	unsigned int num_similar_speed_ports = 0;

	/* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
	 * and usb2_ports are 0-based indexes.  Count the number of similar
	 * speed ports, up to 1 port before this port.
	 */
	for (i = 0; i < (port_id - 1); i++) {
		u8 port_speed = xhci->port_array[i];

		/*
		 * Skip ports that don't have known speeds, or have duplicate
		 * Extended Capabilities port speed entries.
		 */
1474
		if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1475 1476 1477 1478 1479 1480 1481
			continue;

		/*
		 * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
		 * 1.1 ports are under the USB 2.0 hub.  If the port speed
		 * matches the device speed, it's a similar speed port.
		 */
1482
		if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3))
1483 1484 1485 1486 1487
			num_similar_speed_ports++;
	}
	return num_similar_speed_ports;
}

1488 1489 1490 1491
static void handle_device_notification(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 slot_id;
1492
	struct usb_device *udev;
1493

1494
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1495
	if (!xhci->devs[slot_id]) {
1496 1497
		xhci_warn(xhci, "Device Notification event for "
				"unused slot %u\n", slot_id);
1498 1499 1500 1501 1502 1503 1504 1505
		return;
	}

	xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
			slot_id);
	udev = xhci->devs[slot_id]->udev;
	if (udev && udev->parent)
		usb_wakeup_notification(udev->parent, udev->portnum);
1506 1507
}

S
Sarah Sharp 已提交
1508 1509 1510
static void handle_port_status(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
1511
	struct usb_hcd *hcd;
S
Sarah Sharp 已提交
1512
	u32 port_id;
1513
	u32 temp, temp1;
1514
	int max_ports;
1515
	int slot_id;
1516
	unsigned int faked_port_index;
1517
	u8 major_revision;
1518
	struct xhci_bus_state *bus_state;
M
Matt Evans 已提交
1519
	__le32 __iomem **port_array;
1520
	bool bogus_port_status = false;
S
Sarah Sharp 已提交
1521 1522

	/* Port status change events always have a successful completion code */
L
Lu Baolu 已提交
1523 1524 1525 1526
	if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
		xhci_warn(xhci,
			  "WARN: xHC returned failed port status event\n");

M
Matt Evans 已提交
1527
	port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
S
Sarah Sharp 已提交
1528 1529
	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);

1530 1531
	max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
	if ((port_id <= 0) || (port_id > max_ports)) {
1532
		xhci_warn(xhci, "Invalid port id %d\n", port_id);
P
Peter Chen 已提交
1533 1534
		inc_deq(xhci, xhci->event_ring);
		return;
1535 1536
	}

1537 1538 1539 1540
	/* Figure out which usb_hcd this port is attached to:
	 * is it a USB 3.0 port or a USB 2.0/1.1 port?
	 */
	major_revision = xhci->port_array[port_id - 1];
P
Peter Chen 已提交
1541 1542 1543

	/* Find the right roothub. */
	hcd = xhci_to_hcd(xhci);
1544
	if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3))
P
Peter Chen 已提交
1545 1546
		hcd = xhci->shared_hcd;

1547 1548 1549 1550
	if (major_revision == 0) {
		xhci_warn(xhci, "Event for port %u not in "
				"Extended Capabilities, ignoring.\n",
				port_id);
1551
		bogus_port_status = true;
1552
		goto cleanup;
1553
	}
1554
	if (major_revision == DUPLICATE_ENTRY) {
1555 1556 1557
		xhci_warn(xhci, "Event for port %u duplicated in"
				"Extended Capabilities, ignoring.\n",
				port_id);
1558
		bogus_port_status = true;
1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569
		goto cleanup;
	}

	/*
	 * Hardware port IDs reported by a Port Status Change Event include USB
	 * 3.0 and USB 2.0 ports.  We want to check if the port has reported a
	 * resume event, but we first need to translate the hardware port ID
	 * into the index into the ports on the correct split roothub, and the
	 * correct bus_state structure.
	 */
	bus_state = &xhci->bus_state[hcd_index(hcd)];
1570
	if (hcd->speed >= HCD_USB3)
1571 1572 1573 1574 1575 1576
		port_array = xhci->usb3_ports;
	else
		port_array = xhci->usb2_ports;
	/* Find the faked port hub number */
	faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
			port_id);
1577

1578
	temp = readl(port_array[faked_port_index]);
1579
	if (hcd->state == HC_STATE_SUSPENDED) {
1580 1581 1582 1583
		xhci_dbg(xhci, "resume root hub\n");
		usb_hcd_resume_root_hub(hcd);
	}

1584
	if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
1585 1586
		bus_state->port_remote_wakeup &= ~(1 << faked_port_index);

1587 1588 1589
	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
		xhci_dbg(xhci, "port resume event for port %d\n", port_id);

1590
		temp1 = readl(&xhci->op_regs->command);
1591 1592 1593 1594 1595
		if (!(temp1 & CMD_RUN)) {
			xhci_warn(xhci, "xHC is not running.\n");
			goto cleanup;
		}

1596
		if (DEV_SUPERSPEED_ANY(temp)) {
1597
			xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1598 1599 1600 1601 1602
			/* Set a flag to say the port signaled remote wakeup,
			 * so we can tell the difference between the end of
			 * device and host initiated resume.
			 */
			bus_state->port_remote_wakeup |= 1 << faked_port_index;
1603 1604
			xhci_test_and_clear_bit(xhci, port_array,
					faked_port_index, PORT_PLC);
A
Andiry Xu 已提交
1605 1606
			xhci_set_link_state(xhci, port_array, faked_port_index,
						XDEV_U0);
1607 1608 1609 1610 1611
			/* Need to wait until the next link state change
			 * indicates the device is actually in U0.
			 */
			bogus_port_status = true;
			goto cleanup;
1612 1613
		} else if (!test_bit(faked_port_index,
				     &bus_state->resuming_ports)) {
1614
			xhci_dbg(xhci, "resume HS port %d\n", port_id);
1615
			bus_state->resume_done[faked_port_index] = jiffies +
1616
				msecs_to_jiffies(USB_RESUME_TIMEOUT);
1617
			set_bit(faked_port_index, &bus_state->resuming_ports);
1618
			mod_timer(&hcd->rh_timer,
1619
				  bus_state->resume_done[faked_port_index]);
1620 1621 1622
			/* Do the rest in GetPortStatus */
		}
	}
1623 1624

	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
1625
			DEV_SUPERSPEED_ANY(temp)) {
1626
		xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1627 1628 1629 1630 1631 1632 1633
		/* We've just brought the device into U0 through either the
		 * Resume state after a device remote wakeup, or through the
		 * U3Exit state after a host-initiated resume.  If it's a device
		 * initiated remote wake, don't pass up the link state change,
		 * so the roothub behavior is consistent with external
		 * USB 3.0 hub behavior.
		 */
1634 1635 1636 1637
		slot_id = xhci_find_slot_id_by_port(hcd, xhci,
				faked_port_index + 1);
		if (slot_id && xhci->devs[slot_id])
			xhci_ring_device(xhci, slot_id);
1638
		if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
1639 1640 1641 1642 1643 1644 1645 1646 1647
			bus_state->port_remote_wakeup &=
				~(1 << faked_port_index);
			xhci_test_and_clear_bit(xhci, port_array,
					faked_port_index, PORT_PLC);
			usb_wakeup_notification(hcd->self.root_hub,
					faked_port_index + 1);
			bogus_port_status = true;
			goto cleanup;
		}
1648
	}
1649

1650 1651 1652 1653 1654
	/*
	 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
	 * RExit to a disconnect state).  If so, let the the driver know it's
	 * out of the RExit state.
	 */
1655
	if (!DEV_SUPERSPEED_ANY(temp) &&
1656 1657 1658 1659 1660 1661 1662
			test_and_clear_bit(faked_port_index,
				&bus_state->rexit_ports)) {
		complete(&bus_state->rexit_done[faked_port_index]);
		bogus_port_status = true;
		goto cleanup;
	}

1663
	if (hcd->speed < HCD_USB3)
1664 1665 1666
		xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
					PORT_PLC);

1667
cleanup:
S
Sarah Sharp 已提交
1668
	/* Update event ring dequeue pointer before dropping the lock */
A
Andiry Xu 已提交
1669
	inc_deq(xhci, xhci->event_ring);
S
Sarah Sharp 已提交
1670

1671 1672 1673 1674 1675 1676 1677
	/* Don't make the USB core poll the roothub if we got a bad port status
	 * change event.  Besides, at that point we can't tell which roothub
	 * (USB 2.0 or USB 3.0) to kick.
	 */
	if (bogus_port_status)
		return;

1678 1679 1680 1681 1682 1683 1684 1685 1686
	/*
	 * xHCI port-status-change events occur when the "or" of all the
	 * status-change bits in the portsc register changes from 0 to 1.
	 * New status changes won't cause an event if any other change
	 * bits are still set.  When an event occurs, switch over to
	 * polling to avoid losing status changes.
	 */
	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
S
Sarah Sharp 已提交
1687 1688
	spin_unlock(&xhci->lock);
	/* Pass this up to the core */
1689
	usb_hcd_poll_rh_status(hcd);
S
Sarah Sharp 已提交
1690 1691 1692
	spin_lock(&xhci->lock);
}

1693 1694 1695 1696 1697 1698
/*
 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
 * at end_trb, which may be in another segment.  If the suspect DMA address is a
 * TRB in this TD, this function returns that TRB's segment.  Otherwise it
 * returns 0.
 */
1699 1700
struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
		struct xhci_segment *start_seg,
1701 1702
		union xhci_trb	*start_trb,
		union xhci_trb	*end_trb,
1703 1704
		dma_addr_t	suspect_dma,
		bool		debug)
1705 1706 1707 1708 1709 1710
{
	dma_addr_t start_dma;
	dma_addr_t end_seg_dma;
	dma_addr_t end_trb_dma;
	struct xhci_segment *cur_seg;

1711
	start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1712 1713 1714
	cur_seg = start_seg;

	do {
1715
		if (start_dma == 0)
1716
			return NULL;
1717
		/* We may get an event for a Link TRB in the middle of a TD */
1718
		end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1719
				&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1720
		/* If the end TRB isn't in this segment, this is set to 0 */
1721
		end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1722

1723 1724 1725 1726 1727 1728 1729 1730 1731
		if (debug)
			xhci_warn(xhci,
				"Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
				(unsigned long long)suspect_dma,
				(unsigned long long)start_dma,
				(unsigned long long)end_trb_dma,
				(unsigned long long)cur_seg->dma,
				(unsigned long long)end_seg_dma);

1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
		if (end_trb_dma > 0) {
			/* The end TRB is in this segment, so suspect should be here */
			if (start_dma <= end_trb_dma) {
				if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
					return cur_seg;
			} else {
				/* Case for one segment with
				 * a TD wrapped around to the top
				 */
				if ((suspect_dma >= start_dma &&
							suspect_dma <= end_seg_dma) ||
						(suspect_dma >= cur_seg->dma &&
						 suspect_dma <= end_trb_dma))
					return cur_seg;
			}
1747
			return NULL;
1748 1749 1750 1751 1752 1753
		} else {
			/* Might still be somewhere in this segment */
			if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
				return cur_seg;
		}
		cur_seg = cur_seg->next;
1754
		start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1755
	} while (cur_seg != start_seg);
1756

1757
	return NULL;
1758 1759
}

1760 1761
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
1762
		unsigned int stream_id,
1763
		struct xhci_td *td, union xhci_trb *ep_trb)
1764 1765
{
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1766 1767 1768 1769 1770
	struct xhci_command *command;
	command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
	if (!command)
		return;

1771
	ep->ep_state |= EP_HALTED;
1772
	ep->stopped_stream = stream_id;
1773

1774
	xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
1775
	xhci_cleanup_stalled_ring(xhci, ep_index, td);
1776

1777
	ep->stopped_stream = 0;
1778

1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792
	xhci_ring_cmd_db(xhci);
}

/* Check if an error has halted the endpoint ring.  The class driver will
 * cleanup the halt for a non-default control endpoint if we indicate a stall.
 * However, a babble and other errors also halt the endpoint ring, and the class
 * driver won't clear the halt in that case, so we need to issue a Set Transfer
 * Ring Dequeue Pointer command manually.
 */
static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
		struct xhci_ep_ctx *ep_ctx,
		unsigned int trb_comp_code)
{
	/* TRB completion codes that may require a manual halt cleanup */
1793 1794 1795
	if (trb_comp_code == COMP_USB_TRANSACTION_ERROR ||
			trb_comp_code == COMP_BABBLE_DETECTED_ERROR ||
			trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR)
1796
		/* The 0.95 spec says a babbling control endpoint
1797 1798 1799 1800 1801
		 * is not halted. The 0.96 spec says it is.  Some HW
		 * claims to be 0.95 compliant, but it halts the control
		 * endpoint anyway.  Check if a babble halted the
		 * endpoint.
		 */
1802
		if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
1803 1804 1805 1806 1807
			return 1;

	return 0;
}

1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
{
	if (trb_comp_code >= 224 && trb_comp_code <= 255) {
		/* Vendor defined "informational" completion code,
		 * treat as not-an-error.
		 */
		xhci_dbg(xhci, "Vendor defined info completion code %u\n",
				trb_comp_code);
		xhci_dbg(xhci, "Treating code as success.\n");
		return 1;
	}
	return 0;
}

1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
		struct xhci_ring *ep_ring, int *status)
{
	struct urb_priv	*urb_priv;
	struct urb *urb = NULL;

	/* Clean up the endpoint's TD list */
	urb = td->urb;
	urb_priv = urb->hcpriv;

	/* if a bounce buffer was used to align this td then unmap it */
1833
	xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869

	/* Do one last check of the actual transfer length.
	 * If the host controller said we transferred more data than the buffer
	 * length, urb->actual_length will be a very big number (since it's
	 * unsigned).  Play it safe and say we didn't transfer anything.
	 */
	if (urb->actual_length > urb->transfer_buffer_length) {
		xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
			  urb->transfer_buffer_length, urb->actual_length);
		urb->actual_length = 0;
		*status = 0;
	}
	list_del_init(&td->td_list);
	/* Was this TD slated to be cancelled but completed anyway? */
	if (!list_empty(&td->cancelled_td_list))
		list_del_init(&td->cancelled_td_list);

	inc_td_cnt(urb);
	/* Giveback the urb when all the tds are completed */
	if (last_td_in_urb(td)) {
		if ((urb->actual_length != urb->transfer_buffer_length &&
		     (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
		    (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
			xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
				 urb, urb->actual_length,
				 urb->transfer_buffer_length, *status);

		/* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
			*status = 0;
		xhci_giveback_urb_in_irq(xhci, td, *status);
	}

	return 0;
}

1870
static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1871
	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
1872 1873 1874 1875
	struct xhci_virt_ep *ep, int *status, bool skip)
{
	struct xhci_virt_device *xdev;
	struct xhci_ep_ctx *ep_ctx;
1876 1877
	struct xhci_ring *ep_ring;
	unsigned int slot_id;
1878
	u32 trb_comp_code;
1879
	int ep_index;
1880

M
Matt Evans 已提交
1881
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1882
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
1883 1884
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1885
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
1886
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1887 1888 1889 1890

	if (skip)
		goto td_cleanup;

1891 1892 1893
	if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
			trb_comp_code == COMP_STOPPED ||
			trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
1894 1895 1896 1897 1898 1899
		/* The Endpoint Stop Command completion will take care of any
		 * stopped TDs.  A stopped TD may be restarted, so don't update
		 * the ring dequeue pointer or take this TD off any lists yet.
		 */
		ep->stopped_td = td;
		return 0;
M
Mathias Nyman 已提交
1900
	}
1901
	if (trb_comp_code == COMP_STALL_ERROR ||
M
Mathias Nyman 已提交
1902 1903 1904 1905 1906 1907 1908 1909
		xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
						trb_comp_code)) {
		/* Issue a reset endpoint command to clear the host side
		 * halt, followed by a set dequeue command to move the
		 * dequeue pointer past the TD.
		 * The class driver clears the device side halt later.
		 */
		xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
1910
					ep_ring->stream_id, td, ep_trb);
1911
	} else {
M
Mathias Nyman 已提交
1912 1913
		/* Update ring dequeue pointer */
		while (ep_ring->dequeue != td->last_trb)
A
Andiry Xu 已提交
1914
			inc_deq(xhci, ep_ring);
M
Mathias Nyman 已提交
1915 1916
		inc_deq(xhci, ep_ring);
	}
1917 1918

td_cleanup:
1919
	return xhci_td_cleanup(xhci, td, ep_ring, status);
1920 1921
}

1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
			   union xhci_trb *stop_trb)
{
	u32 sum;
	union xhci_trb *trb = ring->dequeue;
	struct xhci_segment *seg = ring->deq_seg;

	for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
		if (!trb_is_noop(trb) && !trb_is_link(trb))
			sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
	}
	return sum;
}

1937 1938 1939 1940
/*
 * Process control tds, update urb status and actual_length.
 */
static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1941
	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
1942 1943 1944 1945 1946 1947 1948 1949
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	unsigned int slot_id;
	int ep_index;
	struct xhci_ep_ctx *ep_ctx;
	u32 trb_comp_code;
1950
	u32 remaining, requested;
1951
	u32 trb_type;
1952

1953
	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
M
Matt Evans 已提交
1954
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1955
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
1956 1957
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1958
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
1959
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1960 1961 1962
	requested = td->urb->transfer_buffer_length;
	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));

1963 1964
	switch (trb_comp_code) {
	case COMP_SUCCESS:
1965
		if (trb_type != TRB_STATUS) {
1966
			xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
1967
				  (trb_type == TRB_DATA) ? "data" : "setup");
1968
			*status = -ESHUTDOWN;
1969
			break;
1970
		}
1971
		*status = 0;
1972
		break;
1973
	case COMP_SHORT_PACKET:
1974
		*status = 0;
1975
		break;
1976
	case COMP_STOPPED_SHORT_PACKET:
1977
		if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
1978
			td->urb->actual_length = remaining;
1979
		else
1980 1981
			xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
		goto finish_td;
1982
	case COMP_STOPPED:
1983 1984 1985 1986 1987 1988
		switch (trb_type) {
		case TRB_SETUP:
			td->urb->actual_length = 0;
			goto finish_td;
		case TRB_DATA:
		case TRB_NORMAL:
1989
			td->urb->actual_length = requested - remaining;
1990 1991 1992 1993 1994 1995
			goto finish_td;
		default:
			xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
				  trb_type);
			goto finish_td;
		}
1996
	case COMP_STOPPED_LENGTH_INVALID:
1997
		goto finish_td;
1998 1999
	default:
		if (!xhci_requires_manual_halt_cleanup(xhci,
2000
						       ep_ctx, trb_comp_code))
2001
			break;
2002 2003
		xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
			 trb_comp_code, ep_index);
2004
		/* else fall through */
2005
	case COMP_STALL_ERROR:
2006
		/* Did we transfer part of the data (middle) phase? */
2007
		if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2008
			td->urb->actual_length = requested - remaining;
2009
		else if (!td->urb_length_set)
2010
			td->urb->actual_length = 0;
2011
		goto finish_td;
2012
	}
2013 2014

	/* stopped at setup stage, no data transferred */
2015
	if (trb_type == TRB_SETUP)
2016 2017
		goto finish_td;

2018
	/*
2019 2020
	 * if on data stage then update the actual_length of the URB and flag it
	 * as set, so it won't be overwritten in the event for the last TRB.
2021
	 */
2022 2023
	if (trb_type == TRB_DATA ||
		trb_type == TRB_NORMAL) {
2024 2025 2026 2027
		td->urb_length_set = true;
		td->urb->actual_length = requested - remaining;
		xhci_dbg(xhci, "Waiting for status stage event\n");
		return 0;
2028 2029
	}

2030 2031 2032 2033 2034
	/* at status stage */
	if (!td->urb_length_set)
		td->urb->actual_length = requested;

finish_td:
2035
	return finish_td(xhci, td, ep_trb, event, ep, status, false);
2036 2037
}

2038 2039 2040 2041
/*
 * Process isochronous tds, update urb packet status and actual_length.
 */
static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2042
	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
2043 2044 2045 2046 2047
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	int idx;
2048
	struct usb_iso_packet_descriptor *frame;
2049
	u32 trb_comp_code;
2050 2051 2052
	bool sum_trbs_for_length = false;
	u32 remaining, requested, ep_trb_len;
	int short_framestatus;
2053

M
Matt Evans 已提交
2054 2055
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2056 2057
	urb_priv = td->urb->hcpriv;
	idx = urb_priv->td_cnt;
2058
	frame = &td->urb->iso_frame_desc[idx];
2059 2060 2061 2062 2063
	requested = frame->length;
	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
	short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
		-EREMOTEIO : 0;
2064

2065 2066 2067
	/* handle completion code */
	switch (trb_comp_code) {
	case COMP_SUCCESS:
2068 2069 2070 2071
		if (remaining) {
			frame->status = short_framestatus;
			if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
				sum_trbs_for_length = true;
2072 2073
			break;
		}
2074 2075
		frame->status = 0;
		break;
2076
	case COMP_SHORT_PACKET:
2077 2078
		frame->status = short_framestatus;
		sum_trbs_for_length = true;
2079
		break;
2080
	case COMP_BANDWIDTH_OVERRUN_ERROR:
2081 2082
		frame->status = -ECOMM;
		break;
2083 2084
	case COMP_ISOCH_BUFFER_OVERRUN:
	case COMP_BABBLE_DETECTED_ERROR:
2085 2086
		frame->status = -EOVERFLOW;
		break;
2087 2088
	case COMP_INCOMPATIBLE_DEVICE_ERROR:
	case COMP_STALL_ERROR:
2089 2090
		frame->status = -EPROTO;
		break;
2091
	case COMP_USB_TRANSACTION_ERROR:
2092
		frame->status = -EPROTO;
2093
		if (ep_trb != td->last_trb)
2094
			return 0;
2095
		break;
2096
	case COMP_STOPPED:
2097 2098
		sum_trbs_for_length = true;
		break;
2099
	case COMP_STOPPED_SHORT_PACKET:
2100 2101 2102 2103
		/* field normally containing residue now contains tranferred */
		frame->status = short_framestatus;
		requested = remaining;
		break;
2104
	case COMP_STOPPED_LENGTH_INVALID:
2105 2106
		requested = 0;
		remaining = 0;
2107 2108
		break;
	default:
2109
		sum_trbs_for_length = true;
2110 2111
		frame->status = -1;
		break;
2112 2113
	}

2114 2115 2116 2117 2118
	if (sum_trbs_for_length)
		frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) +
			ep_trb_len - remaining;
	else
		frame->actual_length = requested;
2119

2120
	td->urb->actual_length += frame->actual_length;
2121

2122
	return finish_td(xhci, td, ep_trb, event, ep, status, false);
2123 2124
}

2125 2126 2127 2128 2129 2130 2131 2132 2133
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
			struct xhci_transfer_event *event,
			struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct usb_iso_packet_descriptor *frame;
	int idx;

2134
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2135 2136 2137 2138
	urb_priv = td->urb->hcpriv;
	idx = urb_priv->td_cnt;
	frame = &td->urb->iso_frame_desc[idx];

2139
	/* The transfer is partly done. */
2140 2141 2142 2143 2144 2145 2146
	frame->status = -EXDEV;

	/* calc actual length */
	frame->actual_length = 0;

	/* Update ring dequeue pointer */
	while (ep_ring->dequeue != td->last_trb)
A
Andiry Xu 已提交
2147 2148
		inc_deq(xhci, ep_ring);
	inc_deq(xhci, ep_ring);
2149 2150 2151 2152

	return finish_td(xhci, td, NULL, event, ep, status, true);
}

2153 2154 2155 2156
/*
 * Process bulk and interrupt tds, update urb status and actual_length.
 */
static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2157
	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
2158 2159 2160 2161
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	u32 trb_comp_code;
2162
	u32 remaining, requested, ep_trb_len;
2163

M
Matt Evans 已提交
2164 2165
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2166
	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2167
	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2168
	requested = td->urb->transfer_buffer_length;
2169 2170 2171

	switch (trb_comp_code) {
	case COMP_SUCCESS:
2172
		/* handle success with untransferred data as short packet */
2173
		if (ep_trb != td->last_trb || remaining) {
2174
			xhci_warn(xhci, "WARN Successful completion on short TX\n");
2175 2176 2177
			xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
				 td->urb->ep->desc.bEndpointAddress,
				 requested, remaining);
2178
		}
2179
		*status = 0;
2180
		break;
2181
	case COMP_SHORT_PACKET:
2182 2183 2184
		xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
			 td->urb->ep->desc.bEndpointAddress,
			 requested, remaining);
2185
		*status = 0;
2186
		break;
2187
	case COMP_STOPPED_SHORT_PACKET:
2188 2189
		td->urb->actual_length = remaining;
		goto finish_td;
2190
	case COMP_STOPPED_LENGTH_INVALID:
2191
		/* stopped on ep trb with invalid length, exclude it */
2192
		ep_trb_len	= 0;
2193 2194
		remaining	= 0;
		break;
2195
	default:
2196
		/* do nothing */
2197 2198
		break;
	}
2199

2200
	if (ep_trb == td->last_trb)
2201 2202 2203
		td->urb->actual_length = requested - remaining;
	else
		td->urb->actual_length =
2204 2205
			sum_trb_lengths(xhci, ep_ring, ep_trb) +
			ep_trb_len - remaining;
2206 2207 2208 2209
finish_td:
	if (remaining > requested) {
		xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
			  remaining);
2210 2211
		td->urb->actual_length = 0;
	}
2212
	return finish_td(xhci, td, ep_trb, event, ep, status, false);
2213 2214
}

2215 2216 2217 2218 2219 2220 2221 2222 2223
/*
 * If this function returns an error condition, it means it got a Transfer
 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
 * At this point, the host controller is probably hosed and should be reset.
 */
static int handle_tx_event(struct xhci_hcd *xhci,
		struct xhci_transfer_event *event)
{
	struct xhci_virt_device *xdev;
2224
	struct xhci_virt_ep *ep;
2225
	struct xhci_ring *ep_ring;
2226
	unsigned int slot_id;
2227
	int ep_index;
2228
	struct xhci_td *td = NULL;
2229 2230 2231
	dma_addr_t ep_trb_dma;
	struct xhci_segment *ep_seg;
	union xhci_trb *ep_trb;
2232
	int status = -EINPROGRESS;
2233
	struct xhci_ep_ctx *ep_ctx;
2234
	struct list_head *tmp;
2235
	u32 trb_comp_code;
2236
	int td_num = 0;
2237
	bool handling_skipped_tds = false;
2238

M
Matt Evans 已提交
2239
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2240
	xdev = xhci->devs[slot_id];
2241 2242
	if (!xdev) {
		xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
2243
		xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2244 2245
			 (unsigned long long) xhci_trb_virt_to_dma(
				 xhci->event_ring->deq_seg,
2246 2247 2248 2249 2250 2251 2252
				 xhci->event_ring->dequeue),
			 lower_32_bits(le64_to_cpu(event->buffer)),
			 upper_32_bits(le64_to_cpu(event->buffer)),
			 le32_to_cpu(event->transfer_len),
			 le32_to_cpu(event->flags));
		xhci_dbg(xhci, "Event ring:\n");
		xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2253 2254 2255 2256
		return -ENODEV;
	}

	/* Endpoint ID is 1 based, our index is zero based */
M
Matt Evans 已提交
2257
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2258
	ep = &xdev->eps[ep_index];
M
Matt Evans 已提交
2259
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2260
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2261
	if (!ep_ring ||  GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
2262 2263
		xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
				"or incorrect stream ring\n");
2264
		xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2265 2266
			 (unsigned long long) xhci_trb_virt_to_dma(
				 xhci->event_ring->deq_seg,
2267 2268 2269 2270 2271 2272 2273
				 xhci->event_ring->dequeue),
			 lower_32_bits(le64_to_cpu(event->buffer)),
			 upper_32_bits(le64_to_cpu(event->buffer)),
			 le32_to_cpu(event->transfer_len),
			 le32_to_cpu(event->flags));
		xhci_dbg(xhci, "Event ring:\n");
		xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2274 2275 2276
		return -ENODEV;
	}

2277 2278 2279 2280 2281 2282
	/* Count current td numbers if ep->skip is set */
	if (ep->skip) {
		list_for_each(tmp, &ep_ring->td_list)
			td_num++;
	}

2283
	ep_trb_dma = le64_to_cpu(event->buffer);
M
Matt Evans 已提交
2284
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2285
	/* Look for common error cases */
2286
	switch (trb_comp_code) {
S
Sarah Sharp 已提交
2287 2288 2289 2290
	/* Skip codes that require special handling depending on
	 * transfer type
	 */
	case COMP_SUCCESS:
2291
		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2292 2293
			break;
		if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2294
			trb_comp_code = COMP_SHORT_PACKET;
2295
		else
2296 2297
			xhci_warn_ratelimited(xhci,
					"WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
2298
	case COMP_SHORT_PACKET:
S
Sarah Sharp 已提交
2299
		break;
2300
	case COMP_STOPPED:
2301 2302
		xhci_dbg(xhci, "Stopped on Transfer TRB\n");
		break;
2303
	case COMP_STOPPED_LENGTH_INVALID:
2304 2305
		xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
		break;
2306
	case COMP_STOPPED_SHORT_PACKET:
2307 2308
		xhci_dbg(xhci, "Stopped with short packet transfer detected\n");
		break;
2309
	case COMP_STALL_ERROR:
2310
		xhci_dbg(xhci, "Stalled endpoint\n");
2311
		ep->ep_state |= EP_HALTED;
S
Sarah Sharp 已提交
2312 2313
		status = -EPIPE;
		break;
2314
	case COMP_TRB_ERROR:
S
Sarah Sharp 已提交
2315 2316 2317
		xhci_warn(xhci, "WARN: TRB error on endpoint\n");
		status = -EILSEQ;
		break;
2318 2319
	case COMP_SPLIT_TRANSACTION_ERROR:
	case COMP_USB_TRANSACTION_ERROR:
2320
		xhci_dbg(xhci, "Transfer error on endpoint\n");
S
Sarah Sharp 已提交
2321 2322
		status = -EPROTO;
		break;
2323
	case COMP_BABBLE_DETECTED_ERROR:
2324
		xhci_dbg(xhci, "Babble error on endpoint\n");
2325 2326
		status = -EOVERFLOW;
		break;
2327
	case COMP_DATA_BUFFER_ERROR:
S
Sarah Sharp 已提交
2328 2329 2330
		xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
		status = -ENOSR;
		break;
2331
	case COMP_BANDWIDTH_OVERRUN_ERROR:
2332 2333
		xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
		break;
2334
	case COMP_ISOCH_BUFFER_OVERRUN:
2335 2336
		xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
		break;
2337
	case COMP_RING_UNDERRUN:
2338 2339 2340 2341 2342 2343 2344 2345 2346
		/*
		 * When the Isoch ring is empty, the xHC will generate
		 * a Ring Overrun Event for IN Isoch endpoint or Ring
		 * Underrun Event for OUT Isoch endpoint.
		 */
		xhci_dbg(xhci, "underrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2347 2348
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2349
		goto cleanup;
2350
	case COMP_RING_OVERRUN:
2351 2352 2353 2354
		xhci_dbg(xhci, "overrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2355 2356
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2357
		goto cleanup;
2358
	case COMP_INCOMPATIBLE_DEVICE_ERROR:
A
Alex He 已提交
2359 2360 2361
		xhci_warn(xhci, "WARN: detect an incompatible device");
		status = -EPROTO;
		break;
2362
	case COMP_MISSED_SERVICE_ERROR:
2363 2364 2365 2366 2367 2368 2369 2370 2371
		/*
		 * When encounter missed service error, one or more isoc tds
		 * may be missed by xHC.
		 * Set skip flag of the ep_ring; Complete the missed tds as
		 * short transfer when process the ep_ring next time.
		 */
		ep->skip = true;
		xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
		goto cleanup;
2372
	case COMP_NO_PING_RESPONSE_ERROR:
2373 2374 2375
		ep->skip = true;
		xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
		goto cleanup;
S
Sarah Sharp 已提交
2376
	default:
2377
		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2378 2379 2380
			status = 0;
			break;
		}
2381 2382
		xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n",
			  trb_comp_code);
2383 2384 2385
		goto cleanup;
	}

2386 2387 2388 2389 2390
	do {
		/* This TRB should be in the TD at the head of this ring's
		 * TD list.
		 */
		if (list_empty(&ep_ring->td_list)) {
2391 2392 2393 2394 2395
			/*
			 * A stopped endpoint may generate an extra completion
			 * event if the device was suspended.  Don't print
			 * warnings.
			 */
2396 2397
			if (!(trb_comp_code == COMP_STOPPED ||
				trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
2398 2399 2400 2401 2402 2403 2404 2405
				xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
						TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
						ep_index);
				xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
						(le32_to_cpu(event->flags) &
						 TRB_TYPE_BITMASK)>>10);
				xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
			}
2406 2407 2408 2409 2410 2411 2412
			if (ep->skip) {
				ep->skip = false;
				xhci_dbg(xhci, "td_list is empty while skip "
						"flag set. Clear skip flag.\n");
			}
			goto cleanup;
		}
2413

2414 2415 2416 2417 2418 2419 2420 2421
		/* We've skipped all the TDs on the ep ring when ep->skip set */
		if (ep->skip && td_num == 0) {
			ep->skip = false;
			xhci_dbg(xhci, "All tds on the ep_ring skipped. "
						"Clear skip flag.\n");
			goto cleanup;
		}

2422 2423
		td = list_first_entry(&ep_ring->td_list, struct xhci_td,
				      td_list);
2424 2425
		if (ep->skip)
			td_num--;
2426

2427
		/* Is this a TRB in the currently executing TD? */
2428 2429
		ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
				td->last_trb, ep_trb_dma, false);
A
Alex He 已提交
2430 2431 2432 2433 2434 2435 2436 2437 2438

		/*
		 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
		 * is not in the current TD pointed by ep_ring->dequeue because
		 * that the hardware dequeue pointer still at the previous TRB
		 * of the current TD. The previous TRB maybe a Link TD or the
		 * last TRB of the previous TD. The command completion handle
		 * will take care the rest.
		 */
2439 2440
		if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
			   trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
A
Alex He 已提交
2441 2442 2443
			goto cleanup;
		}

2444
		if (!ep_seg) {
2445 2446
			if (!ep->skip ||
			    !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2447 2448 2449 2450
				/* Some host controllers give a spurious
				 * successful event after a short transfer.
				 * Ignore it.
				 */
2451
				if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2452 2453 2454 2455
						ep_ring->last_td_was_short) {
					ep_ring->last_td_was_short = false;
					goto cleanup;
				}
2456 2457 2458
				/* HC is busted, give up! */
				xhci_err(xhci,
					"ERROR Transfer event TRB DMA ptr not "
2459 2460 2461 2462 2463
					"part of current TD ep_index %d "
					"comp_code %u\n", ep_index,
					trb_comp_code);
				trb_in_td(xhci, ep_ring->deq_seg,
					  ep_ring->dequeue, td->last_trb,
2464
					  ep_trb_dma, true);
2465 2466 2467
				return -ESHUTDOWN;
			}

2468
			skip_isoc_td(xhci, td, event, ep, &status);
2469 2470
			goto cleanup;
		}
2471
		if (trb_comp_code == COMP_SHORT_PACKET)
2472 2473 2474
			ep_ring->last_td_was_short = true;
		else
			ep_ring->last_td_was_short = false;
2475 2476

		if (ep->skip) {
2477 2478 2479
			xhci_dbg(xhci, "Found td. Clear skip flag.\n");
			ep->skip = false;
		}
2480

2481 2482
		ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
						sizeof(*ep_trb)];
2483 2484 2485 2486

		trace_xhci_handle_transfer(ep_ring,
				(struct xhci_generic_trb *) ep_trb);

2487 2488
		/*
		 * No-op TRB should not trigger interrupts.
2489
		 * If ep_trb is a no-op TRB, it means the
2490 2491 2492
		 * corresponding TD has been cancelled. Just ignore
		 * the TD.
		 */
2493 2494
		if (trb_is_noop(ep_trb)) {
			xhci_dbg(xhci, "ep_trb is a no-op TRB. Skip it\n");
2495
			goto cleanup;
2496
		}
2497

2498
		/* update the urb's actual_length and give back to the core */
2499
		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2500
			process_ctrl_td(xhci, td, ep_trb, event, ep, &status);
2501
		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2502
			process_isoc_td(xhci, td, ep_trb, event, ep, &status);
2503
		else
2504 2505
			process_bulk_intr_td(xhci, td, ep_trb, event, ep,
					     &status);
2506
cleanup:
2507
		handling_skipped_tds = ep->skip &&
2508 2509
			trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
			trb_comp_code != COMP_NO_PING_RESPONSE_ERROR;
2510

2511
		/*
2512 2513
		 * Do not update event ring dequeue pointer if we're in a loop
		 * processing missed tds.
2514
		 */
2515
		if (!handling_skipped_tds)
A
Andiry Xu 已提交
2516
			inc_deq(xhci, xhci->event_ring);
2517 2518 2519 2520 2521 2522 2523

	/*
	 * If ep->skip is set, it means there are missed tds on the
	 * endpoint ring need to take care of.
	 * Process them as short transfer until reach the td pointed by
	 * the event.
	 */
2524
	} while (handling_skipped_tds);
2525

2526 2527 2528
	return 0;
}

S
Sarah Sharp 已提交
2529 2530 2531
/*
 * This function handles all OS-owned events on the event ring.  It may drop
 * xhci->lock between event processing (e.g. to pass up port status changes).
2532 2533
 * Returns >0 for "possibly more events to process" (caller should call again),
 * otherwise 0 if done.  In future, <0 returns should indicate error code.
S
Sarah Sharp 已提交
2534
 */
2535
static int xhci_handle_event(struct xhci_hcd *xhci)
2536 2537
{
	union xhci_trb *event;
S
Sarah Sharp 已提交
2538
	int update_ptrs = 1;
2539
	int ret;
2540

L
Lu Baolu 已提交
2541
	/* Event ring hasn't been allocated yet. */
2542
	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
L
Lu Baolu 已提交
2543 2544
		xhci_err(xhci, "ERROR event ring not ready\n");
		return -ENOMEM;
2545 2546 2547 2548
	}

	event = xhci->event_ring->dequeue;
	/* Does the HC or OS own the TRB? */
M
Matt Evans 已提交
2549
	if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
L
Lu Baolu 已提交
2550
	    xhci->event_ring->cycle_state)
2551
		return 0;
2552

2553 2554
	trace_xhci_handle_event(xhci->event_ring, &event->generic);

2555 2556 2557 2558 2559
	/*
	 * Barrier between reading the TRB_CYCLE (valid) flag above and any
	 * speculative reads of the event's flags/data below.
	 */
	rmb();
S
Sarah Sharp 已提交
2560
	/* FIXME: Handle more event types. */
L
Lu Baolu 已提交
2561
	switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
2562 2563 2564
	case TRB_TYPE(TRB_COMPLETION):
		handle_cmd_completion(xhci, &event->event_cmd);
		break;
S
Sarah Sharp 已提交
2565 2566 2567 2568
	case TRB_TYPE(TRB_PORT_STATUS):
		handle_port_status(xhci, event);
		update_ptrs = 0;
		break;
2569 2570
	case TRB_TYPE(TRB_TRANSFER):
		ret = handle_tx_event(xhci, &event->trans_event);
L
Lu Baolu 已提交
2571
		if (ret >= 0)
2572 2573
			update_ptrs = 0;
		break;
2574 2575 2576
	case TRB_TYPE(TRB_DEV_NOTE):
		handle_device_notification(xhci, event);
		break;
2577
	default:
M
Matt Evans 已提交
2578 2579
		if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
		    TRB_TYPE(48))
2580 2581
			handle_vendor_event(xhci, event);
		else
L
Lu Baolu 已提交
2582 2583 2584
			xhci_warn(xhci, "ERROR unknown event type %d\n",
				  TRB_FIELD_TO_TYPE(
				  le32_to_cpu(event->event_cmd.flags)));
2585
	}
2586 2587 2588 2589 2590 2591
	/* Any of the above functions may drop and re-acquire the lock, so check
	 * to make sure a watchdog timer didn't mark the host as non-responsive.
	 */
	if (xhci->xhc_state & XHCI_STATE_DYING) {
		xhci_dbg(xhci, "xHCI host dying, returning from "
				"event handler.\n");
2592
		return 0;
2593
	}
2594

2595 2596
	if (update_ptrs)
		/* Update SW event ring dequeue pointer */
A
Andiry Xu 已提交
2597
		inc_deq(xhci, xhci->event_ring);
2598

2599 2600 2601 2602
	/* Are there more items on the event ring?  Caller will call us again to
	 * check.
	 */
	return 1;
2603
}
2604 2605 2606 2607 2608 2609 2610 2611 2612

/*
 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
 * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
 * indicators of an event TRB error, but we check the status *first* to be safe.
 */
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2613
	union xhci_trb *event_ring_deq;
2614
	irqreturn_t ret = IRQ_NONE;
2615
	dma_addr_t deq;
2616 2617
	u64 temp_64;
	u32 status;
2618 2619 2620

	spin_lock(&xhci->lock);
	/* Check if the xHC generated the interrupt, or the irq is shared */
2621
	status = readl(&xhci->op_regs->status);
2622 2623 2624
	if (status == 0xffffffff) {
		ret = IRQ_HANDLED;
		goto out;
2625
	}
2626 2627 2628 2629

	if (!(status & STS_EINT))
		goto out;

2630
	if (status & STS_FATAL) {
2631 2632
		xhci_warn(xhci, "WARNING: Host System Error\n");
		xhci_halt(xhci);
2633 2634
		ret = IRQ_HANDLED;
		goto out;
2635 2636
	}

2637 2638 2639 2640 2641
	/*
	 * Clear the op reg interrupt status first,
	 * so we can receive interrupts from other MSI-X interrupters.
	 * Write 1 to clear the interrupt status.
	 */
2642
	status |= STS_EINT;
2643
	writel(status, &xhci->op_regs->status);
2644 2645 2646
	/* FIXME when MSI-X is supported and there are multiple vectors */
	/* Clear the MSI-X event interrupt status */

2647
	if (hcd->irq) {
2648 2649
		u32 irq_pending;
		/* Acknowledge the PCI interrupt */
2650
		irq_pending = readl(&xhci->ir_set->irq_pending);
2651
		irq_pending |= IMAN_IP;
2652
		writel(irq_pending, &xhci->ir_set->irq_pending);
2653
	}
2654

2655 2656
	if (xhci->xhc_state & XHCI_STATE_DYING ||
	    xhci->xhc_state & XHCI_STATE_HALTED) {
2657 2658
		xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
				"Shouldn't IRQs be disabled?\n");
2659 2660
		/* Clear the event handler busy flag (RW1C);
		 * the event ring should be empty.
2661
		 */
2662
		temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2663 2664
		xhci_write_64(xhci, temp_64 | ERST_EHB,
				&xhci->ir_set->erst_dequeue);
2665 2666
		ret = IRQ_HANDLED;
		goto out;
2667 2668 2669 2670 2671 2672
	}

	event_ring_deq = xhci->event_ring->dequeue;
	/* FIXME this should be a delayed service routine
	 * that clears the EHB.
	 */
2673
	while (xhci_handle_event(xhci) > 0) {}
2674

2675
	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689
	/* If necessary, update the HW's version of the event ring deq ptr. */
	if (event_ring_deq != xhci->event_ring->dequeue) {
		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
				xhci->event_ring->dequeue);
		if (deq == 0)
			xhci_warn(xhci, "WARN something wrong with SW event "
					"ring dequeue ptr.\n");
		/* Update HC event ring dequeue pointer */
		temp_64 &= ERST_PTR_MASK;
		temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
	}

	/* Clear the event handler busy flag (RW1C); event ring is empty. */
	temp_64 |= ERST_EHB;
2690
	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2691
	ret = IRQ_HANDLED;
2692

2693
out:
2694 2695
	spin_unlock(&xhci->lock);

2696
	return ret;
2697 2698
}

2699
irqreturn_t xhci_msi_irq(int irq, void *hcd)
2700
{
A
Alan Stern 已提交
2701
	return xhci_irq(hcd);
2702
}
2703

2704 2705
/****		Endpoint Ring Operations	****/

2706 2707 2708
/*
 * Generic function for queueing a TRB on a ring.
 * The caller must have checked to make sure there's room on the ring.
2709 2710 2711
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
2712 2713
 */
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
A
Andiry Xu 已提交
2714
		bool more_trbs_coming,
2715 2716 2717 2718 2719
		u32 field1, u32 field2, u32 field3, u32 field4)
{
	struct xhci_generic_trb *trb;

	trb = &ring->enqueue->generic;
M
Matt Evans 已提交
2720 2721 2722 2723
	trb->field[0] = cpu_to_le32(field1);
	trb->field[1] = cpu_to_le32(field2);
	trb->field[2] = cpu_to_le32(field3);
	trb->field[3] = cpu_to_le32(field4);
2724 2725 2726

	trace_xhci_queue_trb(ring, trb);

A
Andiry Xu 已提交
2727
	inc_enq(xhci, ring, more_trbs_coming);
2728 2729
}

2730 2731 2732 2733 2734
/*
 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
 * FIXME allocate segments if the ring is full.
 */
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
A
Andiry Xu 已提交
2735
		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2736
{
A
Andiry Xu 已提交
2737 2738
	unsigned int num_trbs_needed;

2739 2740 2741 2742 2743 2744 2745 2746 2747 2748
	/* Make sure the endpoint has been added to xHC schedule */
	switch (ep_state) {
	case EP_STATE_DISABLED:
		/*
		 * USB core changed config/interfaces without notifying us,
		 * or hardware is reporting the wrong state.
		 */
		xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
		return -ENOENT;
	case EP_STATE_ERROR:
2749
		xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2750 2751 2752
		/* FIXME event handling code for error needs to clear it */
		/* XXX not sure if this should be -ENOENT or not */
		return -EINVAL;
2753 2754
	case EP_STATE_HALTED:
		xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765
	case EP_STATE_STOPPED:
	case EP_STATE_RUNNING:
		break;
	default:
		xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
		/*
		 * FIXME issue Configure Endpoint command to try to get the HC
		 * back into a known state.
		 */
		return -EINVAL;
	}
A
Andiry Xu 已提交
2766 2767

	while (1) {
2768 2769
		if (room_on_ring(xhci, ep_ring, num_trbs))
			break;
A
Andiry Xu 已提交
2770 2771 2772 2773 2774 2775

		if (ep_ring == xhci->cmd_ring) {
			xhci_err(xhci, "Do not support expand command ring\n");
			return -ENOMEM;
		}

2776 2777
		xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
				"ERROR no room on ep ring, try ring expansion");
A
Andiry Xu 已提交
2778 2779 2780 2781 2782 2783
		num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
		if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
					mem_flags)) {
			xhci_err(xhci, "Ring expansion failed\n");
			return -ENOMEM;
		}
2784
	}
2785

2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797
	while (trb_is_link(ep_ring->enqueue)) {
		/* If we're not dealing with 0.95 hardware or isoc rings
		 * on AMD 0.96 host, clear the chain bit.
		 */
		if (!xhci_link_trb_quirk(xhci) &&
		    !(ep_ring->type == TYPE_ISOC &&
		      (xhci->quirks & XHCI_AMD_0x96_HOST)))
			ep_ring->enqueue->link.control &=
				cpu_to_le32(~TRB_CHAIN);
		else
			ep_ring->enqueue->link.control |=
				cpu_to_le32(TRB_CHAIN);
2798

2799 2800
		wmb();
		ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
2801

2802 2803 2804
		/* Toggle the cycle bit after the last ring segment. */
		if (link_trb_toggles_cycle(ep_ring->enqueue))
			ep_ring->cycle_state ^= 1;
2805

2806 2807
		ep_ring->enq_seg = ep_ring->enq_seg->next;
		ep_ring->enqueue = ep_ring->enq_seg->trbs;
2808
	}
2809 2810 2811
	return 0;
}

2812
static int prepare_transfer(struct xhci_hcd *xhci,
2813 2814
		struct xhci_virt_device *xdev,
		unsigned int ep_index,
2815
		unsigned int stream_id,
2816 2817
		unsigned int num_trbs,
		struct urb *urb,
2818
		unsigned int td_index,
2819 2820 2821
		gfp_t mem_flags)
{
	int ret;
2822 2823
	struct urb_priv *urb_priv;
	struct xhci_td	*td;
2824
	struct xhci_ring *ep_ring;
2825
	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2826 2827 2828 2829 2830 2831 2832 2833

	ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
	if (!ep_ring) {
		xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
				stream_id);
		return -EINVAL;
	}

2834
	ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
A
Andiry Xu 已提交
2835
			   num_trbs, mem_flags);
2836 2837 2838
	if (ret)
		return ret;

2839 2840 2841 2842 2843 2844 2845
	urb_priv = urb->hcpriv;
	td = urb_priv->td[td_index];

	INIT_LIST_HEAD(&td->td_list);
	INIT_LIST_HEAD(&td->cancelled_td_list);

	if (td_index == 0) {
2846
		ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2847
		if (unlikely(ret))
2848
			return ret;
2849 2850
	}

2851
	td->urb = urb;
2852
	/* Add this TD to the tail of the endpoint ring's TD list */
2853 2854 2855 2856
	list_add_tail(&td->td_list, &ep_ring->td_list);
	td->start_seg = ep_ring->enq_seg;
	td->first_trb = ep_ring->enqueue;

2857 2858 2859
	return 0;
}

2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877
static unsigned int count_trbs(u64 addr, u64 len)
{
	unsigned int num_trbs;

	num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
			TRB_MAX_BUFF_SIZE);
	if (num_trbs == 0)
		num_trbs++;

	return num_trbs;
}

static inline unsigned int count_trbs_needed(struct urb *urb)
{
	return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
}

static unsigned int count_sg_trbs_needed(struct urb *urb)
2878 2879
{
	struct scatterlist *sg;
2880
	unsigned int i, len, full_len, num_trbs = 0;
2881

2882
	full_len = urb->transfer_buffer_length;
2883

2884 2885 2886 2887 2888 2889
	for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
		len = sg_dma_len(sg);
		num_trbs += count_trbs(sg_dma_address(sg), len);
		len = min_t(unsigned int, len, full_len);
		full_len -= len;
		if (full_len == 0)
2890 2891
			break;
	}
2892

2893 2894 2895
	return num_trbs;
}

2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906
static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
{
	u64 addr, len;

	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
	len = urb->iso_frame_desc[i].length;

	return count_trbs(addr, len);
}

static void check_trb_math(struct urb *urb, int running_total)
2907
{
2908
	if (unlikely(running_total != urb->transfer_buffer_length))
2909
		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2910 2911 2912 2913 2914 2915 2916 2917
				"queued %#x (%d), asked for %#x (%d)\n",
				__func__,
				urb->ep->desc.bEndpointAddress,
				running_total, running_total,
				urb->transfer_buffer_length,
				urb->transfer_buffer_length);
}

2918
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2919
		unsigned int ep_index, unsigned int stream_id, int start_cycle,
2920
		struct xhci_generic_trb *start_trb)
2921 2922 2923 2924 2925 2926
{
	/*
	 * Pass all the TRBs to the hardware at once and make sure this write
	 * isn't reordered.
	 */
	wmb();
2927
	if (start_cycle)
M
Matt Evans 已提交
2928
		start_trb->field[3] |= cpu_to_le32(start_cycle);
2929
	else
M
Matt Evans 已提交
2930
		start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
2931
	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2932 2933
}

2934 2935
static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
						struct xhci_ep_ctx *ep_ctx)
2936 2937 2938 2939
{
	int xhci_interval;
	int ep_interval;

M
Matt Evans 已提交
2940
	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
2941
	ep_interval = urb->interval;
2942

2943 2944 2945 2946
	/* Convert to microframes */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		ep_interval *= 8;
2947

2948 2949 2950 2951
	/* FIXME change this to a warning and a suggestion to use the new API
	 * to set the polling interval (once the API is added).
	 */
	if (xhci_interval != ep_interval) {
2952 2953 2954 2955
		dev_dbg_ratelimited(&urb->dev->dev,
				"Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
				ep_interval, ep_interval == 1 ? "" : "s",
				xhci_interval, xhci_interval == 1 ? "" : "s");
2956 2957 2958 2959 2960 2961
		urb->interval = xhci_interval;
		/* Convert back to frames for LS/FS devices */
		if (urb->dev->speed == USB_SPEED_LOW ||
				urb->dev->speed == USB_SPEED_FULL)
			urb->interval /= 8;
	}
2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977
}

/*
 * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
 * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
 * (comprised of sg list entries) can take several service intervals to
 * transmit.
 */
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ep_ctx *ep_ctx;

	ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
	check_interval(xhci, urb, ep_ctx);

2978
	return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
2979 2980
}

2981
/*
2982 2983
 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
 * packets remaining in the TD (*not* including this TRB).
2984 2985
 *
 * Total TD packet count = total_packet_count =
2986
 *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
2987 2988 2989 2990 2991 2992
 *
 * Packets transferred up to and including this TRB = packets_transferred =
 *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
 *
 * TD size = total_packet_count - packets_transferred
 *
2993 2994 2995 2996 2997 2998
 * For xHCI 0.96 and older, TD size field should be the remaining bytes
 * including this TRB, right shifted by 10
 *
 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
 * This is taken care of in the TRB_TD_SIZE() macro
 *
2999
 * The last TRB in a TD must have the TD size set to zero.
3000
 */
3001 3002
static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
			      int trb_buff_len, unsigned int td_total_len,
3003
			      struct urb *urb, bool more_trbs_coming)
3004
{
3005 3006
	u32 maxp, total_packet_count;

3007 3008
	/* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
	if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3009 3010
		return ((td_total_len - transferred) >> 10);

3011
	/* One TRB with a zero-length data packet. */
3012
	if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
3013
	    trb_buff_len == td_total_len)
3014 3015
		return 0;

3016 3017 3018 3019
	/* for MTK xHCI, TD size doesn't include this TRB */
	if (xhci->quirks & XHCI_MTK_HOST)
		trb_buff_len = 0;

3020
	maxp = usb_endpoint_maxp(&urb->ep->desc);
3021 3022
	total_packet_count = DIV_ROUND_UP(td_total_len, maxp);

3023 3024
	/* Queueing functions don't count the current TRB into transferred */
	return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3025 3026
}

3027

3028
static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
3029
			 u32 *trb_buff_len, struct xhci_segment *seg)
3030
{
3031
	struct device *dev = xhci_to_hcd(xhci)->self.controller;
3032 3033
	unsigned int unalign;
	unsigned int max_pkt;
3034
	u32 new_buff_len;
3035

3036
	max_pkt = usb_endpoint_maxp(&urb->ep->desc);
3037 3038 3039 3040 3041 3042
	unalign = (enqd_len + *trb_buff_len) % max_pkt;

	/* we got lucky, last normal TRB data on segment is packet aligned */
	if (unalign == 0)
		return 0;

3043 3044 3045
	xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
		 unalign, *trb_buff_len);

3046 3047 3048
	/* is the last nornal TRB alignable by splitting it */
	if (*trb_buff_len > unalign) {
		*trb_buff_len -= unalign;
3049
		xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
3050 3051
		return 0;
	}
3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084

	/*
	 * We want enqd_len + trb_buff_len to sum up to a number aligned to
	 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
	 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
	 */
	new_buff_len = max_pkt - (enqd_len % max_pkt);

	if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
		new_buff_len = (urb->transfer_buffer_length - enqd_len);

	/* create a max max_pkt sized bounce buffer pointed to by last trb */
	if (usb_urb_dir_out(urb)) {
		sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
				   seg->bounce_buf, new_buff_len, enqd_len);
		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
						 max_pkt, DMA_TO_DEVICE);
	} else {
		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
						 max_pkt, DMA_FROM_DEVICE);
	}

	if (dma_mapping_error(dev, seg->bounce_dma)) {
		/* try without aligning. Some host controllers survive */
		xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
		return 0;
	}
	*trb_buff_len = new_buff_len;
	seg->bounce_len = new_buff_len;
	seg->bounce_offs = enqd_len;

	xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);

3085 3086 3087
	return 1;
}

3088 3089
/* This is very similar to what ehci-q.c qtd_fill() does */
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3090 3091
		struct urb *urb, int slot_id, unsigned int ep_index)
{
3092
	struct xhci_ring *ring;
3093
	struct urb_priv *urb_priv;
3094
	struct xhci_td *td;
3095 3096
	struct xhci_generic_trb *start_trb;
	struct scatterlist *sg = NULL;
3097 3098
	bool more_trbs_coming = true;
	bool need_zero_pkt = false;
3099 3100
	bool first_trb = true;
	unsigned int num_trbs;
3101
	unsigned int start_cycle, num_sgs = 0;
3102
	unsigned int enqd_len, block_len, trb_buff_len, full_len;
3103
	int sent_len, ret;
3104
	u32 field, length_field, remainder;
3105
	u64 addr, send_addr;
3106

3107 3108
	ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ring)
3109 3110
		return -EINVAL;

3111
	full_len = urb->transfer_buffer_length;
3112 3113 3114 3115
	/* If we have scatter/gather list, we use it. */
	if (urb->num_sgs) {
		num_sgs = urb->num_mapped_sgs;
		sg = urb->sg;
3116 3117
		addr = (u64) sg_dma_address(sg);
		block_len = sg_dma_len(sg);
3118
		num_trbs = count_sg_trbs_needed(urb);
3119
	} else {
3120
		num_trbs = count_trbs_needed(urb);
3121 3122 3123
		addr = (u64) urb->transfer_dma;
		block_len = full_len;
	}
3124
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
3125
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
3126
			num_trbs, urb, 0, mem_flags);
3127
	if (unlikely(ret < 0))
3128
		return ret;
3129 3130

	urb_priv = urb->hcpriv;
3131 3132

	/* Deal with URB_ZERO_PACKET - need one more td/trb */
3133 3134
	if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1)
		need_zero_pkt = true;
3135

3136 3137
	td = urb_priv->td[0];

3138 3139 3140 3141 3142
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
3143 3144
	start_trb = &ring->enqueue->generic;
	start_cycle = ring->cycle_state;
3145
	send_addr = addr;
3146

3147
	/* Queue the TRBs, even if they are zero-length */
3148 3149
	for (enqd_len = 0; first_trb || enqd_len < full_len;
			enqd_len += trb_buff_len) {
3150
		field = TRB_TYPE(TRB_NORMAL);
3151

3152 3153 3154
		/* TRB buffer should not cross 64KB boundaries */
		trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
		trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
3155

3156 3157
		if (enqd_len + trb_buff_len > full_len)
			trb_buff_len = full_len - enqd_len;
S
Sarah Sharp 已提交
3158 3159

		/* Don't change the cycle bit of the first TRB until later */
3160 3161
		if (first_trb) {
			first_trb = false;
3162
			if (start_cycle == 0)
3163
				field |= TRB_CYCLE;
3164
		} else
3165
			field |= ring->cycle_state;
S
Sarah Sharp 已提交
3166 3167 3168 3169

		/* Chain all the TRBs together; clear the chain bit in the last
		 * TRB to indicate it's the last TRB in the chain.
		 */
3170
		if (enqd_len + trb_buff_len < full_len) {
S
Sarah Sharp 已提交
3171
			field |= TRB_CHAIN;
3172
			if (trb_is_link(ring->enqueue + 1)) {
3173
				if (xhci_align_td(xhci, urb, enqd_len,
3174 3175 3176 3177 3178 3179
						  &trb_buff_len,
						  ring->enq_seg)) {
					send_addr = ring->enq_seg->bounce_dma;
					/* assuming TD won't span 2 segs */
					td->bounce_seg = ring->enq_seg;
				}
3180
			}
3181 3182 3183
		}
		if (enqd_len + trb_buff_len >= full_len) {
			field &= ~TRB_CHAIN;
3184
			field |= TRB_IOC;
3185
			more_trbs_coming = false;
3186
			td->last_trb = ring->enqueue;
S
Sarah Sharp 已提交
3187
		}
3188 3189 3190 3191 3192

		/* Only set interrupt on short packet for IN endpoints */
		if (usb_urb_dir_in(urb))
			field |= TRB_ISP;

3193
		/* Set the TRB length, TD size, and interrupter fields. */
3194 3195 3196
		remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
					      full_len, urb, more_trbs_coming);

3197
		length_field = TRB_LEN(trb_buff_len) |
3198
			TRB_TD_SIZE(remainder) |
3199
			TRB_INTR_TARGET(0);
3200

3201
		queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
3202 3203
				lower_32_bits(send_addr),
				upper_32_bits(send_addr),
3204
				length_field,
3205
				field);
S
Sarah Sharp 已提交
3206 3207

		addr += trb_buff_len;
3208
		sent_len = trb_buff_len;
3209

3210
		while (sg && sent_len >= block_len) {
3211 3212
			/* New sg entry */
			--num_sgs;
3213
			sent_len -= block_len;
3214
			if (num_sgs != 0) {
3215
				sg = sg_next(sg);
3216 3217
				block_len = sg_dma_len(sg);
				addr = (u64) sg_dma_address(sg);
3218
				addr += sent_len;
3219 3220
			}
		}
3221 3222
		block_len -= sent_len;
		send_addr = addr;
3223
	}
S
Sarah Sharp 已提交
3224

3225 3226 3227 3228 3229 3230 3231 3232 3233
	if (need_zero_pkt) {
		ret = prepare_transfer(xhci, xhci->devs[slot_id],
				       ep_index, urb->stream_id,
				       1, urb, 1, mem_flags);
		urb_priv->td[1]->last_trb = ring->enqueue;
		field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
		queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
	}

3234
	check_trb_math(urb, enqd_len);
3235
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3236
			start_cycle, start_trb);
S
Sarah Sharp 已提交
3237 3238 3239
	return 0;
}

3240
/* Caller must have locked xhci->lock */
3241
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3242 3243 3244 3245 3246 3247 3248 3249
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	int num_trbs;
	int ret;
	struct usb_ctrlrequest *setup;
	struct xhci_generic_trb *start_trb;
	int start_cycle;
3250
	u32 field;
3251
	struct urb_priv *urb_priv;
3252 3253
	struct xhci_td *td;

3254 3255 3256
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;
3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273

	/*
	 * Need to copy setup packet into setup TRB, so we can't use the setup
	 * DMA address.
	 */
	if (!urb->setup_packet)
		return -EINVAL;

	/* 1 TRB for setup, 1 for status */
	num_trbs = 2;
	/*
	 * Don't need to check if we need additional event data and normal TRBs,
	 * since data in control transfers will never get bigger than 16MB
	 * XXX: can we get a buffer that crosses 64KB boundaries?
	 */
	if (urb->transfer_buffer_length > 0)
		num_trbs++;
3274 3275
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
3276
			num_trbs, urb, 0, mem_flags);
3277 3278 3279
	if (ret < 0)
		return ret;

3280 3281 3282
	urb_priv = urb->hcpriv;
	td = urb_priv->td[0];

3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	/* Queue setup TRB - see section 6.4.1.2.1 */
	/* FIXME better way to translate setup_packet into two u32 fields? */
	setup = (struct usb_ctrlrequest *) urb->setup_packet;
3294 3295 3296 3297
	field = 0;
	field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
	if (start_cycle == 0)
		field |= 0x1;
3298

3299
	/* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
3300
	if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
3301 3302 3303 3304 3305 3306 3307 3308
		if (urb->transfer_buffer_length > 0) {
			if (setup->bRequestType & USB_DIR_IN)
				field |= TRB_TX_TYPE(TRB_DATA_IN);
			else
				field |= TRB_TX_TYPE(TRB_DATA_OUT);
		}
	}

A
Andiry Xu 已提交
3309
	queue_trb(xhci, ep_ring, true,
M
Matt Evans 已提交
3310 3311 3312 3313 3314
		  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
		  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
		  TRB_LEN(8) | TRB_INTR_TARGET(0),
		  /* Immediate data in pointer */
		  field);
3315 3316

	/* If there's data, queue data TRBs */
3317 3318 3319 3320 3321 3322
	/* Only set interrupt on short packet for IN endpoints */
	if (usb_urb_dir_in(urb))
		field = TRB_ISP | TRB_TYPE(TRB_DATA);
	else
		field = TRB_TYPE(TRB_DATA);

3323
	if (urb->transfer_buffer_length > 0) {
3324 3325 3326 3327 3328 3329 3330 3331 3332
		u32 length_field, remainder;

		remainder = xhci_td_remainder(xhci, 0,
				urb->transfer_buffer_length,
				urb->transfer_buffer_length,
				urb, 1);
		length_field = TRB_LEN(urb->transfer_buffer_length) |
				TRB_TD_SIZE(remainder) |
				TRB_INTR_TARGET(0);
3333 3334
		if (setup->bRequestType & USB_DIR_IN)
			field |= TRB_DIR_IN;
A
Andiry Xu 已提交
3335
		queue_trb(xhci, ep_ring, true,
3336 3337
				lower_32_bits(urb->transfer_dma),
				upper_32_bits(urb->transfer_dma),
3338
				length_field,
3339
				field | ep_ring->cycle_state);
3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350
	}

	/* Save the DMA address of the last TRB in the TD */
	td->last_trb = ep_ring->enqueue;

	/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
	/* If the device sent data, the status stage is an OUT transfer */
	if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
		field = 0;
	else
		field = TRB_DIR_IN;
A
Andiry Xu 已提交
3351
	queue_trb(xhci, ep_ring, false,
3352 3353 3354 3355 3356 3357
			0,
			0,
			TRB_INTR_TARGET(0),
			/* Event on completion */
			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);

3358
	giveback_first_trb(xhci, slot_id, ep_index, 0,
3359
			start_cycle, start_trb);
3360 3361 3362
	return 0;
}

3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375
/*
 * The transfer burst count field of the isochronous TRB defines the number of
 * bursts that are required to move all packets in this TD.  Only SuperSpeed
 * devices can burst up to bMaxBurst number of packets per service interval.
 * This field is zero based, meaning a value of zero in the field means one
 * burst.  Basically, for everything but SuperSpeed devices, this field will be
 * zero.  Only xHCI 1.0 host controllers support this field.
 */
static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;

3376
	if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
3377 3378 3379
		return 0;

	max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3380
	return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3381 3382
}

3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399
/*
 * Returns the number of packets in the last "burst" of packets.  This field is
 * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
 * the last burst packet count is equal to the total number of packets in the
 * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
 * must contain (bMaxBurst + 1) number of packets, but the last burst can
 * contain 1 to (bMaxBurst + 1) packets.
 */
static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;
	unsigned int residue;

	if (xhci->hci_version < 0x100)
		return 0;

3400
	if (urb->dev->speed >= USB_SPEED_SUPER) {
3401 3402 3403 3404 3405 3406 3407 3408 3409 3410
		/* bMaxBurst is zero based: 0 means 1 packet per burst */
		max_burst = urb->ep->ss_ep_comp.bMaxBurst;
		residue = total_packet_count % (max_burst + 1);
		/* If residue is zero, the last burst contains (max_burst + 1)
		 * number of packets, but the TLBPC field is zero-based.
		 */
		if (residue == 0)
			return max_burst;
		return residue - 1;
	}
3411 3412 3413
	if (total_packet_count == 0)
		return 0;
	return total_packet_count - 1;
3414 3415
}

3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506
/*
 * Calculates Frame ID field of the isochronous TRB identifies the
 * target frame that the Interval associated with this Isochronous
 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
 *
 * Returns actual frame id on success, negative value on error.
 */
static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
		struct urb *urb, int index)
{
	int start_frame, ist, ret = 0;
	int start_frame_id, end_frame_id, current_frame_id;

	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		start_frame = urb->start_frame + index * urb->interval;
	else
		start_frame = (urb->start_frame + index * urb->interval) >> 3;

	/* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
	 *
	 * If bit [3] of IST is cleared to '0', software can add a TRB no
	 * later than IST[2:0] Microframes before that TRB is scheduled to
	 * be executed.
	 * If bit [3] of IST is set to '1', software can add a TRB no later
	 * than IST[2:0] Frames before that TRB is scheduled to be executed.
	 */
	ist = HCS_IST(xhci->hcs_params2) & 0x7;
	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
		ist <<= 3;

	/* Software shall not schedule an Isoch TD with a Frame ID value that
	 * is less than the Start Frame ID or greater than the End Frame ID,
	 * where:
	 *
	 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
	 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
	 *
	 * Both the End Frame ID and Start Frame ID values are calculated
	 * in microframes. When software determines the valid Frame ID value;
	 * The End Frame ID value should be rounded down to the nearest Frame
	 * boundary, and the Start Frame ID value should be rounded up to the
	 * nearest Frame boundary.
	 */
	current_frame_id = readl(&xhci->run_regs->microframe_index);
	start_frame_id = roundup(current_frame_id + ist + 1, 8);
	end_frame_id = rounddown(current_frame_id + 895 * 8, 8);

	start_frame &= 0x7ff;
	start_frame_id = (start_frame_id >> 3) & 0x7ff;
	end_frame_id = (end_frame_id >> 3) & 0x7ff;

	xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
		 __func__, index, readl(&xhci->run_regs->microframe_index),
		 start_frame_id, end_frame_id, start_frame);

	if (start_frame_id < end_frame_id) {
		if (start_frame > end_frame_id ||
				start_frame < start_frame_id)
			ret = -EINVAL;
	} else if (start_frame_id > end_frame_id) {
		if ((start_frame > end_frame_id &&
				start_frame < start_frame_id))
			ret = -EINVAL;
	} else {
			ret = -EINVAL;
	}

	if (index == 0) {
		if (ret == -EINVAL || start_frame == start_frame_id) {
			start_frame = start_frame_id + 1;
			if (urb->dev->speed == USB_SPEED_LOW ||
					urb->dev->speed == USB_SPEED_FULL)
				urb->start_frame = start_frame;
			else
				urb->start_frame = start_frame << 3;
			ret = 0;
		}
	}

	if (ret) {
		xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
				start_frame, current_frame_id, index,
				start_frame_id, end_frame_id);
		xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
		return ret;
	}

	return start_frame;
}

3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521
/* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct xhci_td *td;
	int num_tds, trbs_per_td;
	struct xhci_generic_trb *start_trb;
	bool first_trb;
	int start_cycle;
	u32 field, length_field;
	int running_total, trb_buff_len, td_len, td_remain_len, ret;
	u64 start_addr, addr;
	int i, j;
A
Andiry Xu 已提交
3522
	bool more_trbs_coming;
3523
	struct xhci_virt_ep *xep;
3524
	int frame_id;
3525

3526
	xep = &xhci->devs[slot_id]->eps[ep_index];
3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537
	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;

	num_tds = urb->number_of_packets;
	if (num_tds < 1) {
		xhci_dbg(xhci, "Isoc URB with zero packets?\n");
		return -EINVAL;
	}
	start_addr = (u64) urb->transfer_dma;
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

3538
	urb_priv = urb->hcpriv;
3539
	/* Queue the TRBs for each TD, even if they are zero-length */
3540
	for (i = 0; i < num_tds; i++) {
3541 3542 3543
		unsigned int total_pkt_count, max_pkt;
		unsigned int burst_count, last_burst_pkt_count;
		u32 sia_frame_id;
3544

3545
		first_trb = true;
3546 3547 3548 3549
		running_total = 0;
		addr = start_addr + urb->iso_frame_desc[i].offset;
		td_len = urb->iso_frame_desc[i].length;
		td_remain_len = td_len;
3550
		max_pkt = usb_endpoint_maxp(&urb->ep->desc);
3551 3552
		total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);

3553
		/* A zero-length transfer still involves at least one packet. */
3554 3555 3556 3557 3558
		if (total_pkt_count == 0)
			total_pkt_count++;
		burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
		last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
							urb, total_pkt_count);
3559

3560
		trbs_per_td = count_isoc_trbs_needed(urb, i);
3561 3562

		ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
A
Andiry Xu 已提交
3563
				urb->stream_id, trbs_per_td, urb, i, mem_flags);
3564 3565 3566 3567 3568
		if (ret < 0) {
			if (i == 0)
				return ret;
			goto cleanup;
		}
3569
		td = urb_priv->td[i];
3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583

		/* use SIA as default, if frame id is used overwrite it */
		sia_frame_id = TRB_SIA;
		if (!(urb->transfer_flags & URB_ISO_ASAP) &&
		    HCC_CFC(xhci->hcc_params)) {
			frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
			if (frame_id >= 0)
				sia_frame_id = TRB_FRAME_ID(frame_id);
		}
		/*
		 * Set isoc specific data for the first TRB in a TD.
		 * Prevent HW from getting the TRBs by keeping the cycle state
		 * inverted in the first TDs isoc TRB.
		 */
3584
		field = TRB_TYPE(TRB_ISOC) |
3585 3586 3587 3588
			TRB_TLBPC(last_burst_pkt_count) |
			sia_frame_id |
			(i ? ep_ring->cycle_state : !start_cycle);

3589 3590 3591 3592
		/* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
		if (!xep->use_extended_tbc)
			field |= TRB_TBC(burst_count);

3593
		/* fill the rest of the TRB fields, and remaining normal TRBs */
3594 3595
		for (j = 0; j < trbs_per_td; j++) {
			u32 remainder = 0;
3596 3597 3598 3599 3600

			/* only first TRB is isoc, overwrite otherwise */
			if (!first_trb)
				field = TRB_TYPE(TRB_NORMAL) |
					ep_ring->cycle_state;
3601

3602 3603 3604 3605
			/* Only set interrupt on short packet for IN EPs */
			if (usb_urb_dir_in(urb))
				field |= TRB_ISP;

3606
			/* Set the chain bit for all except the last TRB  */
3607
			if (j < trbs_per_td - 1) {
A
Andiry Xu 已提交
3608
				more_trbs_coming = true;
3609
				field |= TRB_CHAIN;
3610
			} else {
3611
				more_trbs_coming = false;
3612 3613
				td->last_trb = ep_ring->enqueue;
				field |= TRB_IOC;
3614 3615 3616 3617 3618
				/* set BEI, except for the last TD */
				if (xhci->hci_version >= 0x100 &&
				    !(xhci->quirks & XHCI_AVOID_BEI) &&
				    i < num_tds - 1)
					field |= TRB_BEI;
3619 3620
			}
			/* Calculate TRB length */
3621
			trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3622 3623 3624
			if (trb_buff_len > td_remain_len)
				trb_buff_len = td_remain_len;

3625
			/* Set the TRB length, TD size, & interrupter fields. */
3626 3627
			remainder = xhci_td_remainder(xhci, running_total,
						   trb_buff_len, td_len,
3628
						   urb, more_trbs_coming);
3629

3630 3631
			length_field = TRB_LEN(trb_buff_len) |
				TRB_INTR_TARGET(0);
3632

3633 3634 3635 3636 3637 3638 3639
			/* xhci 1.1 with ETE uses TD Size field for TBC */
			if (first_trb && xep->use_extended_tbc)
				length_field |= TRB_TD_SIZE_TBC(burst_count);
			else
				length_field |= TRB_TD_SIZE(remainder);
			first_trb = false;

A
Andiry Xu 已提交
3640
			queue_trb(xhci, ep_ring, more_trbs_coming,
3641 3642 3643
				lower_32_bits(addr),
				upper_32_bits(addr),
				length_field,
3644
				field);
3645 3646 3647 3648 3649 3650 3651 3652 3653
			running_total += trb_buff_len;

			addr += trb_buff_len;
			td_remain_len -= trb_buff_len;
		}

		/* Check TD length */
		if (running_total != td_len) {
			xhci_err(xhci, "ISOC TD length unmatch\n");
3654 3655
			ret = -EINVAL;
			goto cleanup;
3656 3657 3658
		}
	}

3659 3660 3661 3662
	/* store the next frame id */
	if (HCC_CFC(xhci->hcc_params))
		xep->next_frame_id = urb->start_frame + num_tds * urb->interval;

A
Andiry Xu 已提交
3663 3664 3665 3666 3667 3668
	if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
		if (xhci->quirks & XHCI_AMD_PLL_FIX)
			usb_amd_quirk_pll_disable();
	}
	xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;

3669 3670
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
			start_cycle, start_trb);
3671
	return 0;
3672 3673 3674 3675
cleanup:
	/* Clean up a partially enqueued isoc transfer. */

	for (i--; i >= 0; i--)
3676
		list_del_init(&urb_priv->td[i]->td_list);
3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690

	/* Use the first TD as a temporary variable to turn the TDs we've queued
	 * into No-ops with a software-owned cycle bit. That way the hardware
	 * won't accidentally start executing bogus TDs when we partially
	 * overwrite them.  td->first_trb and td->start_seg are already set.
	 */
	urb_priv->td[0]->last_trb = ep_ring->enqueue;
	/* Every TRB except the first & last will have its cycle bit flipped. */
	td_to_noop(xhci, ep_ring, urb_priv->td[0], true);

	/* Reset the ring enqueue back to the first TRB and its cycle bit. */
	ep_ring->enqueue = urb_priv->td[0]->first_trb;
	ep_ring->enq_seg = urb_priv->td[0]->start_seg;
	ep_ring->cycle_state = start_cycle;
3691
	ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3692 3693
	usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
	return ret;
3694 3695 3696 3697 3698
}

/*
 * Check transfer ring to guarantee there is enough room for the urb.
 * Update ISO URB start_frame and interval.
3699 3700 3701
 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
 * Contiguous Frame ID is not supported by HC.
3702 3703 3704 3705 3706 3707 3708 3709 3710 3711
 */
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	struct xhci_ep_ctx *ep_ctx;
	int start_frame;
	int num_tds, num_trbs, i;
	int ret;
3712 3713
	struct xhci_virt_ep *xep;
	int ist;
3714 3715

	xdev = xhci->devs[slot_id];
3716
	xep = &xhci->devs[slot_id]->eps[ep_index];
3717 3718 3719 3720 3721 3722
	ep_ring = xdev->eps[ep_index].ring;
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);

	num_trbs = 0;
	num_tds = urb->number_of_packets;
	for (i = 0; i < num_tds; i++)
3723
		num_trbs += count_isoc_trbs_needed(urb, i);
3724 3725 3726 3727

	/* Check the ring to guarantee there is enough room for the whole urb.
	 * Do not insert any td of the urb to the ring if the check failed.
	 */
3728
	ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
A
Andiry Xu 已提交
3729
			   num_trbs, mem_flags);
3730 3731 3732
	if (ret)
		return ret;

3733 3734 3735 3736
	/*
	 * Check interval value. This should be done before we start to
	 * calculate the start frame value.
	 */
3737
	check_interval(xhci, urb, ep_ctx);
3738 3739

	/* Calculate the start frame and put it in urb->start_frame. */
L
Lu Baolu 已提交
3740
	if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
3741
		if (GET_EP_CTX_STATE(ep_ctx) ==	EP_STATE_RUNNING) {
L
Lu Baolu 已提交
3742 3743 3744
			urb->start_frame = xep->next_frame_id;
			goto skip_start_over;
		}
3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772
	}

	start_frame = readl(&xhci->run_regs->microframe_index);
	start_frame &= 0x3fff;
	/*
	 * Round up to the next frame and consider the time before trb really
	 * gets scheduled by hardare.
	 */
	ist = HCS_IST(xhci->hcs_params2) & 0x7;
	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
		ist <<= 3;
	start_frame += ist + XHCI_CFC_DELAY;
	start_frame = roundup(start_frame, 8);

	/*
	 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
	 * is greate than 8 microframes.
	 */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL) {
		start_frame = roundup(start_frame, urb->interval << 3);
		urb->start_frame = start_frame >> 3;
	} else {
		start_frame = roundup(start_frame, urb->interval);
		urb->start_frame = start_frame;
	}

skip_start_over:
3773 3774
	ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;

3775
	return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
3776 3777
}

3778 3779
/****		Command Ring Operations		****/

3780 3781 3782 3783 3784 3785 3786 3787
/* Generic function for queueing a command TRB on the command ring.
 * Check to make sure there's room on the command ring for one command TRB.
 * Also check that there's room reserved for commands that must not fail.
 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
 * then only check for the number of reserved spots.
 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
 * because the command event handler may want to resubmit a failed command.
 */
3788 3789 3790
static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
			 u32 field1, u32 field2,
			 u32 field3, u32 field4, bool command_must_succeed)
3791
{
3792
	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3793
	int ret;
3794

3795 3796
	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
		(xhci->xhc_state & XHCI_STATE_HALTED)) {
3797
		xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
M
Mathias Nyman 已提交
3798
		return -ESHUTDOWN;
3799
	}
3800

3801 3802 3803
	if (!command_must_succeed)
		reserved_trbs++;

3804
	ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
A
Andiry Xu 已提交
3805
			reserved_trbs, GFP_ATOMIC);
3806 3807
	if (ret < 0) {
		xhci_err(xhci, "ERR: No room for command on command ring\n");
3808 3809 3810
		if (command_must_succeed)
			xhci_err(xhci, "ERR: Reserved TRB counting for "
					"unfailable commands failed.\n");
3811
		return ret;
3812
	}
M
Mathias Nyman 已提交
3813 3814

	cmd->command_trb = xhci->cmd_ring->enqueue;
3815

3816
	/* if there are no other commands queued we start the timeout timer */
3817
	if (list_empty(&xhci->cmd_list)) {
3818
		xhci->current_cmd = cmd;
3819
		xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
3820 3821
	}

3822 3823
	list_add_tail(&cmd->cmd_list, &xhci->cmd_list);

A
Andiry Xu 已提交
3824 3825
	queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
			field4 | xhci->cmd_ring->cycle_state);
3826 3827 3828
	return 0;
}

3829
/* Queue a slot enable or disable request on the command ring */
3830 3831
int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
		u32 trb_type, u32 slot_id)
3832
{
3833
	return queue_command(xhci, cmd, 0, 0, 0,
3834
			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3835 3836 3837
}

/* Queue an address device command TRB */
3838 3839
int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
		dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
3840
{
3841
	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3842
			upper_32_bits(in_ctx_ptr), 0,
3843 3844
			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
			| (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
3845 3846
}

3847
int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3848 3849
		u32 field1, u32 field2, u32 field3, u32 field4)
{
3850
	return queue_command(xhci, cmd, field1, field2, field3, field4, false);
3851 3852
}

3853
/* Queue a reset device command TRB */
3854 3855
int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
		u32 slot_id)
3856
{
3857
	return queue_command(xhci, cmd, 0, 0, 0,
3858
			TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
3859
			false);
3860
}
3861 3862

/* Queue a configure endpoint command TRB */
3863 3864
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
		struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
3865
		u32 slot_id, bool command_must_succeed)
3866
{
3867
	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3868
			upper_32_bits(in_ctx_ptr), 0,
3869 3870
			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
			command_must_succeed);
3871
}
3872

3873
/* Queue an evaluate context command TRB */
3874 3875
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
		dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
3876
{
3877
	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3878
			upper_32_bits(in_ctx_ptr), 0,
3879
			TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
3880
			command_must_succeed);
3881 3882
}

3883 3884 3885 3886
/*
 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
 * activity on an endpoint that is about to be suspended.
 */
3887 3888
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
			     int slot_id, unsigned int ep_index, int suspend)
3889 3890 3891 3892
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_STOP_RING);
3893
	u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
3894

3895
	return queue_command(xhci, cmd, 0, 0, 0,
3896
			trb_slot_id | trb_ep_index | type | trb_suspend, false);
3897 3898
}

3899 3900 3901 3902 3903
/* Set Transfer Ring Dequeue Pointer command */
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
		unsigned int stream_id,
		struct xhci_dequeue_state *deq_state)
3904 3905 3906 3907
{
	dma_addr_t addr;
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3908
	u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
3909
	u32 trb_sct = 0;
3910
	u32 type = TRB_TYPE(TRB_SET_DEQ);
3911
	struct xhci_virt_ep *ep;
3912 3913
	struct xhci_command *cmd;
	int ret;
3914

3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
		"Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
		deq_state->new_deq_seg,
		(unsigned long long)deq_state->new_deq_seg->dma,
		deq_state->new_deq_ptr,
		(unsigned long long)xhci_trb_virt_to_dma(
			deq_state->new_deq_seg, deq_state->new_deq_ptr),
		deq_state->new_cycle_state);

	addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
				    deq_state->new_deq_ptr);
3926
	if (addr == 0) {
3927
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3928
		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
3929 3930
			  deq_state->new_deq_seg, deq_state->new_deq_ptr);
		return;
3931
	}
3932 3933 3934 3935
	ep = &xhci->devs[slot_id]->eps[ep_index];
	if ((ep->ep_state & SET_DEQ_PENDING)) {
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
		xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
3936
		return;
3937
	}
3938 3939 3940 3941 3942

	/* This function gets called from contexts where it cannot sleep */
	cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
	if (!cmd) {
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
3943
		return;
3944 3945
	}

3946 3947
	ep->queued_deq_seg = deq_state->new_deq_seg;
	ep->queued_deq_ptr = deq_state->new_deq_ptr;
3948 3949
	if (stream_id)
		trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
3950
	ret = queue_command(xhci, cmd,
3951 3952 3953
		lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
		upper_32_bits(addr), trb_stream_id,
		trb_slot_id | trb_ep_index | type, false);
3954 3955
	if (ret < 0) {
		xhci_free_command(xhci, cmd);
3956
		return;
3957 3958
	}

3959 3960 3961 3962 3963 3964
	/* Stop the TD queueing code from ringing the doorbell until
	 * this command completes.  The HC won't set the dequeue pointer
	 * if the ring is running, and ringing the doorbell starts the
	 * ring running.
	 */
	ep->ep_state |= SET_DEQ_PENDING;
3965
}
3966

3967 3968
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
			int slot_id, unsigned int ep_index)
3969 3970 3971 3972 3973
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_RESET_EP);

3974 3975
	return queue_command(xhci, cmd, 0, 0, 0,
			trb_slot_id | trb_ep_index | type, false);
3976
}