xhci-ring.c 123.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * xHCI host controller driver
 *
 * Copyright (C) 2008 Intel Corp.
 *
 * Author: Sarah Sharp
 * Some code borrowed from the Linux EHCI driver.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

/*
 * Ring initialization rules:
 * 1. Each segment is initialized to zero, except for link TRBs.
 * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
 *    Consumer Cycle State (CCS), depending on ring function.
 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
 *
 * Ring behavior rules:
 * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
 *    least one free TRB in the ring.  This is useful if you want to turn that
 *    into a link TRB and expand the ring.
 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
 *    link TRB, then load the pointer with the address in the link TRB.  If the
 *    link TRB had its toggle bit set, you may need to update the ring cycle
 *    state (see cycle bit rules).  You may have to do this multiple times
 *    until you reach a non-link TRB.
 * 3. A ring is full if enqueue++ (for the definition of increment above)
 *    equals the dequeue pointer.
 *
 * Cycle bit rules:
 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 *
 * Producer rules:
 * 1. Check if ring is full before you enqueue.
 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
 *    Update enqueue pointer between each write (which may update the ring
 *    cycle state).
 * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
 *    and endpoint rings.  If HC is the producer for the event ring,
 *    and it generates an interrupt according to interrupt modulation rules.
 *
 * Consumer rules:
 * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
 *    the TRB is owned by the consumer.
 * 2. Update dequeue pointer (which may update the ring cycle state) and
 *    continue processing TRBs until you reach a TRB which is not owned by you.
 * 3. Notify the producer.  SW is the consumer for the event ring, and it
 *   updates event ring dequeue pointer.  HC is the consumer for the command and
 *   endpoint rings; it generates events on the event ring for these.
 */

67
#include <linux/scatterlist.h>
68
#include <linux/slab.h>
69
#include <linux/dma-mapping.h>
70
#include "xhci.h"
71
#include "xhci-trace.h"
72
#include "xhci-mtk.h"
73 74 75 76 77

/*
 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
 * address of the TRB.
 */
78
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
79 80
		union xhci_trb *trb)
{
81
	unsigned long segment_offset;
82

83
	if (!seg || !trb || trb < seg->trbs)
84
		return 0;
85 86
	/* offset in TRBs */
	segment_offset = trb - seg->trbs;
87
	if (segment_offset >= TRBS_PER_SEGMENT)
88
		return 0;
89
	return seg->dma + (segment_offset * sizeof(*trb));
90 91
}

92 93 94 95 96
static bool trb_is_noop(union xhci_trb *trb)
{
	return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
}

97 98 99 100 101
static bool trb_is_link(union xhci_trb *trb)
{
	return TRB_TYPE_LINK_LE32(trb->link.control);
}

102 103 104 105 106 107 108 109 110 111 112
static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
{
	return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
}

static bool last_trb_on_ring(struct xhci_ring *ring,
			struct xhci_segment *seg, union xhci_trb *trb)
{
	return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
}

113 114 115 116 117
static bool link_trb_toggles_cycle(union xhci_trb *trb)
{
	return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
}

118 119 120 121
static bool last_td_in_urb(struct xhci_td *td)
{
	struct urb_priv *urb_priv = td->urb->hcpriv;

122
	return urb_priv->num_tds_done == urb_priv->num_tds;
123 124 125 126 127 128
}

static void inc_td_cnt(struct urb *urb)
{
	struct urb_priv *urb_priv = urb->hcpriv;

129
	urb_priv->num_tds_done++;
130 131
}

132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
{
	if (trb_is_link(trb)) {
		/* unchain chained link TRBs */
		trb->link.control &= cpu_to_le32(~TRB_CHAIN);
	} else {
		trb->generic.field[0] = 0;
		trb->generic.field[1] = 0;
		trb->generic.field[2] = 0;
		/* Preserve only the cycle bit of this TRB */
		trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
		trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
	}
}

147 148 149 150 151 152 153 154 155
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
 * TRB is in a new segment.  This does not skip over link TRBs, and it does not
 * effect the ring dequeue or enqueue pointers.
 */
static void next_trb(struct xhci_hcd *xhci,
		struct xhci_ring *ring,
		struct xhci_segment **seg,
		union xhci_trb **trb)
{
156
	if (trb_is_link(*trb)) {
157 158 159
		*seg = (*seg)->next;
		*trb = ((*seg)->trbs);
	} else {
160
		(*trb)++;
161 162 163
	}
}

164 165 166 167
/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 */
A
Andiry Xu 已提交
168
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
169
{
170 171 172
	/* event ring doesn't have link trbs, check for last trb */
	if (ring->type == TYPE_EVENT) {
		if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
173
			ring->dequeue++;
174
			return;
175
		}
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
		if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
			ring->cycle_state ^= 1;
		ring->deq_seg = ring->deq_seg->next;
		ring->dequeue = ring->deq_seg->trbs;
		return;
	}

	/* All other rings have link trbs */
	if (!trb_is_link(ring->dequeue)) {
		ring->dequeue++;
		ring->num_trbs_free++;
	}
	while (trb_is_link(ring->dequeue)) {
		ring->deq_seg = ring->deq_seg->next;
		ring->dequeue = ring->deq_seg->trbs;
	}
192 193 194

	trace_xhci_inc_deq(ring);

195
	return;
196 197 198 199 200 201 202 203 204 205 206 207
}

/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 *
 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
 * chain bit is set), then set the chain bit in all the following link TRBs.
 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
 * have their chain bit cleared (so that each Link TRB is a separate TD).
 *
 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
208 209 210
 * set, but other sections talk about dealing with the chain bit set.  This was
 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
211 212 213
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
214
 */
215
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
A
Andiry Xu 已提交
216
			bool more_trbs_coming)
217 218 219 220
{
	u32 chain;
	union xhci_trb *next;

M
Matt Evans 已提交
221
	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
222
	/* If this is not event ring, there is one less usable TRB */
223
	if (!trb_is_link(ring->enqueue))
224
		ring->num_trbs_free--;
225 226
	next = ++(ring->enqueue);

227
	/* Update the dequeue pointer further if that was a link TRB */
228
	while (trb_is_link(next)) {
229

230 231 232 233 234 235 236 237 238
		/*
		 * If the caller doesn't plan on enqueueing more TDs before
		 * ringing the doorbell, then we don't want to give the link TRB
		 * to the hardware just yet. We'll give the link TRB back in
		 * prepare_ring() just before we enqueue the TD at the top of
		 * the ring.
		 */
		if (!chain && !more_trbs_coming)
			break;
A
Andiry Xu 已提交
239

240 241 242 243 244 245 246 247 248
		/* If we're not dealing with 0.95 hardware or isoc rings on
		 * AMD 0.96 host, carry over the chain bit of the previous TRB
		 * (which may mean the chain bit is cleared).
		 */
		if (!(ring->type == TYPE_ISOC &&
		      (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
		    !xhci_link_trb_quirk(xhci)) {
			next->link.control &= cpu_to_le32(~TRB_CHAIN);
			next->link.control |= cpu_to_le32(chain);
249
		}
250 251 252 253 254
		/* Give this link TRB to the hardware */
		wmb();
		next->link.control ^= cpu_to_le32(TRB_CYCLE);

		/* Toggle the cycle bit after the last ring segment. */
255
		if (link_trb_toggles_cycle(next))
256 257
			ring->cycle_state ^= 1;

258 259 260 261
		ring->enq_seg = ring->enq_seg->next;
		ring->enqueue = ring->enq_seg->trbs;
		next = ring->enqueue;
	}
262 263

	trace_xhci_inc_enq(ring);
264 265 266
}

/*
267 268
 * Check to see if there's room to enqueue num_trbs on the ring and make sure
 * enqueue pointer will not advance into dequeue segment. See rules above.
269
 */
270
static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
271 272
		unsigned int num_trbs)
{
273
	int num_trbs_in_deq_seg;
274

275 276 277 278 279 280 281 282 283 284
	if (ring->num_trbs_free < num_trbs)
		return 0;

	if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
		num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
		if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
			return 0;
	}

	return 1;
285 286 287
}

/* Ring the host controller doorbell after placing a command on the ring */
288
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
289
{
E
Elric Fu 已提交
290 291 292
	if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
		return;

293
	xhci_dbg(xhci, "// Ding dong!\n");
294
	writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
295
	/* Flush PCI posted writes */
296
	readl(&xhci->dba->doorbell[0]);
297 298
}

299 300 301 302 303
static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
{
	return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
}

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
{
	return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
					cmd_list);
}

/*
 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
 * If there are other commands waiting then restart the ring and kick the timer.
 * This must be called with command ring stopped and xhci->lock held.
 */
static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
					 struct xhci_command *cur_cmd)
{
	struct xhci_command *i_cmd;

	/* Turn all aborted commands in list to no-ops, then restart */
	list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {

323
		if (i_cmd->status != COMP_COMMAND_ABORTED)
324 325
			continue;

326
		i_cmd->status = COMP_COMMAND_RING_STOPPED;
327 328 329

		xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
			 i_cmd->command_trb);
330 331

		trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351

		/*
		 * caller waiting for completion is called when command
		 *  completion event is received for these no-op commands
		 */
	}

	xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;

	/* ring command ring doorbell to restart the command ring */
	if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
	    !(xhci->xhc_state & XHCI_STATE_DYING)) {
		xhci->current_cmd = cur_cmd;
		xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
		xhci_ring_cmd_db(xhci);
	}
}

/* Must be called with xhci->lock held, releases and aquires lock back */
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
352 353 354 355 356 357
{
	u64 temp_64;
	int ret;

	xhci_dbg(xhci, "Abort command ring\n");

358
	reinit_completion(&xhci->cmd_ring_stop_completion);
359

360
	temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
361 362
	xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
			&xhci->op_regs->cmd_ring);
363

364 365 366 367 368
	/* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
	 * completion of the Command Abort operation. If CRR is not negated in 5
	 * seconds then driver handles it as if host died (-ENODEV).
	 * In the future we should distinguish between -ENODEV and -ETIMEDOUT
	 * and try to recover a -ETIMEDOUT with a host controller reset.
369
	 */
370
	ret = xhci_handshake(&xhci->op_regs->cmd_ring,
371 372
			CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
	if (ret < 0) {
373
		xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
374
		xhci_halt(xhci);
375 376
		xhci_hc_died(xhci);
		return ret;
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
	}
	/*
	 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
	 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
	 * but the completion event in never sent. Wait 2 secs (arbitrary
	 * number) to handle those cases after negation of CMD_RING_RUNNING.
	 */
	spin_unlock_irqrestore(&xhci->lock, flags);
	ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
					  msecs_to_jiffies(2000));
	spin_lock_irqsave(&xhci->lock, flags);
	if (!ret) {
		xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
		xhci_cleanup_command_queue(xhci);
	} else {
		xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
393 394 395 396
	}
	return 0;
}

397
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
398
		unsigned int slot_id,
399 400
		unsigned int ep_index,
		unsigned int stream_id)
401
{
M
Matt Evans 已提交
402
	__le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
403 404
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
	unsigned int ep_state = ep->ep_state;
405 406

	/* Don't ring the doorbell for this endpoint if there are pending
407
	 * cancellations because we don't want to interrupt processing.
408 409 410
	 * We don't want to restart any stream rings if there's a set dequeue
	 * pointer command pending because the device can choose to start any
	 * stream once the endpoint is on the HW schedule.
411
	 */
412
	if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
413 414
	    (ep_state & EP_HALTED))
		return;
415
	writel(DB_VALUE(ep_index, stream_id), db_addr);
416 417 418
	/* The CPU has better things to do at this point than wait for a
	 * write-posting flush.  It'll get there soon enough.
	 */
419 420
}

421 422 423 424 425 426 427 428 429 430 431 432
/* Ring the doorbell for any rings with pending URBs */
static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
		unsigned int slot_id,
		unsigned int ep_index)
{
	unsigned int stream_id;
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];

	/* A ring has pending URBs if its TD list is not empty */
	if (!(ep->ep_state & EP_HAS_STREAMS)) {
433
		if (ep->ring && !(list_empty(&ep->ring->td_list)))
434
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
435 436 437 438 439 440 441
		return;
	}

	for (stream_id = 1; stream_id < ep->stream_info->num_streams;
			stream_id++) {
		struct xhci_stream_info *stream_info = ep->stream_info;
		if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
442 443
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
						stream_id);
444 445 446
	}
}

447 448 449 450 451
/* Get the right ring for the given slot_id, ep_index and stream_id.
 * If the endpoint supports streams, boundary check the URB's stream ID.
 * If the endpoint doesn't support streams, return the singular endpoint ring.
 */
struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
		unsigned int slot_id, unsigned int ep_index,
		unsigned int stream_id)
{
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];
	/* Common case: no streams */
	if (!(ep->ep_state & EP_HAS_STREAMS))
		return ep->ring;

	if (stream_id == 0) {
		xhci_warn(xhci,
				"WARN: Slot ID %u, ep index %u has streams, "
				"but URB has no stream ID.\n",
				slot_id, ep_index);
		return NULL;
	}

	if (stream_id < ep->stream_info->num_streams)
		return ep->stream_info->stream_rings[stream_id];

	xhci_warn(xhci,
			"WARN: Slot ID %u, ep index %u has "
			"stream IDs 1 to %u allocated, "
			"but stream ID %u is requested.\n",
			slot_id, ep_index,
			ep->stream_info->num_streams - 1,
			stream_id);
	return NULL;
}

483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506

/*
 * Get the hw dequeue pointer xHC stopped on, either directly from the
 * endpoint context, or if streams are in use from the stream context.
 * The returned hw_dequeue contains the lowest four bits with cycle state
 * and possbile stream context type.
 */
static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
			   unsigned int ep_index, unsigned int stream_id)
{
	struct xhci_ep_ctx *ep_ctx;
	struct xhci_stream_ctx *st_ctx;
	struct xhci_virt_ep *ep;

	ep = &vdev->eps[ep_index];

	if (ep->ep_state & EP_HAS_STREAMS) {
		st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
		return le64_to_cpu(st_ctx->stream_ring);
	}
	ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
	return le64_to_cpu(ep_ctx->deq);
}

507 508 509
/*
 * Move the xHC's endpoint ring dequeue pointer past cur_td.
 * Record the new state of the xHC's endpoint ring dequeue segment,
510
 * dequeue pointer, stream id, and new consumer cycle state in state.
511 512 513 514 515 516 517 518 519
 * Update our internal representation of the ring's dequeue pointer.
 *
 * We do this in three jumps:
 *  - First we update our new ring state to be the same as when the xHC stopped.
 *  - Then we traverse the ring to find the segment that contains
 *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
 *    any link TRBs with the toggle cycle bit set.
 *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
 *    if we've moved it past a link TRB with the toggle cycle bit set.
M
Matt Evans 已提交
520 521 522 523
 *
 * Some of the uses of xhci_generic_trb are grotty, but if they're done
 * with correct __le32 accesses they should work fine.  Only users of this are
 * in here.
524
 */
525
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
526
		unsigned int slot_id, unsigned int ep_index,
527 528
		unsigned int stream_id, struct xhci_td *cur_td,
		struct xhci_dequeue_state *state)
529 530
{
	struct xhci_virt_device *dev = xhci->devs[slot_id];
531
	struct xhci_virt_ep *ep = &dev->eps[ep_index];
532
	struct xhci_ring *ep_ring;
533 534
	struct xhci_segment *new_seg;
	union xhci_trb *new_deq;
535
	dma_addr_t addr;
536
	u64 hw_dequeue;
537 538
	bool cycle_found = false;
	bool td_last_trb_found = false;
539

540 541 542 543 544 545 546 547
	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
			ep_index, stream_id);
	if (!ep_ring) {
		xhci_warn(xhci, "WARN can't find new dequeue state "
				"for invalid stream ID %u.\n",
				stream_id);
		return;
	}
548
	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
549 550
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Finding endpoint context");
551

552
	hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
553 554 555
	new_seg = ep_ring->deq_seg;
	new_deq = ep_ring->dequeue;
	state->new_cycle_state = hw_dequeue & 0x1;
556
	state->stream_id = stream_id;
557

558
	/*
559 560 561 562
	 * We want to find the pointer, segment and cycle state of the new trb
	 * (the one after current TD's last_trb). We know the cycle state at
	 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
	 * found.
563
	 */
564 565 566 567 568 569 570 571 572
	do {
		if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
		    == (dma_addr_t)(hw_dequeue & ~0xf)) {
			cycle_found = true;
			if (td_last_trb_found)
				break;
		}
		if (new_deq == cur_td->last_trb)
			td_last_trb_found = true;
573

574 575
		if (cycle_found && trb_is_link(new_deq) &&
		    link_trb_toggles_cycle(new_deq))
576 577 578 579 580 581 582 583 584 585 586 587 588
			state->new_cycle_state ^= 0x1;

		next_trb(xhci, ep_ring, &new_seg, &new_deq);

		/* Search wrapped around, bail out */
		if (new_deq == ep->ring->dequeue) {
			xhci_err(xhci, "Error: Failed finding new dequeue state\n");
			state->new_deq_seg = NULL;
			state->new_deq_ptr = NULL;
			return;
		}

	} while (!cycle_found || !td_last_trb_found);
589

590 591
	state->new_deq_seg = new_seg;
	state->new_deq_ptr = new_deq;
592

593
	/* Don't update the ring cycle state for the producer (us). */
594 595
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Cycle state = 0x%x", state->new_cycle_state);
596

597 598
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"New dequeue segment = %p (virtual)",
599 600
			state->new_deq_seg);
	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
601 602
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"New dequeue pointer = 0x%llx (DMA)",
603
			(unsigned long long) addr);
604 605
}

606 607 608 609
/* flip_cycle means flip the cycle bit of all but the first and last TRB.
 * (The last TRB actually points to the ring enqueue pointer, which is not part
 * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
 */
610
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
611
		       struct xhci_td *td, bool flip_cycle)
612
{
613 614 615 616
	struct xhci_segment *seg	= td->start_seg;
	union xhci_trb *trb		= td->first_trb;

	while (1) {
617 618
		trb_to_noop(trb, TRB_TR_NOOP);

619 620 621 622 623
		/* flip cycle if asked to */
		if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
			trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);

		if (trb == td->last_trb)
624
			break;
625 626

		next_trb(xhci, ep_ring, &seg, &trb);
627 628 629
	}
}

630
static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
631 632
		struct xhci_virt_ep *ep)
{
633
	ep->ep_state &= ~EP_STOP_CMD_PENDING;
634 635
	/* Can't del_timer_sync in interrupt */
	del_timer(&ep->stop_cmd_timer);
636 637
}

638 639 640 641
/*
 * Must be called with xhci->lock held in interrupt context,
 * releases and re-acquires xhci->lock
 */
642
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
643
				     struct xhci_td *cur_td, int status)
644
{
645 646 647 648 649 650 651 652 653
	struct urb	*urb		= cur_td->urb;
	struct urb_priv	*urb_priv	= urb->hcpriv;
	struct usb_hcd	*hcd		= bus_to_hcd(urb->dev->bus);

	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
		xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
		if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs	== 0) {
			if (xhci->quirks & XHCI_AMD_PLL_FIX)
				usb_amd_quirk_pll_enable();
A
Andiry Xu 已提交
654
		}
655
	}
656
	xhci_urb_free_priv(urb_priv);
657
	usb_hcd_unlink_urb_from_ep(hcd, urb);
658
	spin_unlock(&xhci->lock);
659
	trace_xhci_urb_giveback(urb);
660
	usb_hcd_giveback_urb(hcd, urb, status);
661 662 663
	spin_lock(&xhci->lock);
}

W
Wei Yongjun 已提交
664 665
static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
		struct xhci_ring *ring, struct xhci_td *td)
666 667 668 669 670
{
	struct device *dev = xhci_to_hcd(xhci)->self.controller;
	struct xhci_segment *seg = td->bounce_seg;
	struct urb *urb = td->urb;

671
	if (!ring || !seg || !urb)
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
		return;

	if (usb_urb_dir_out(urb)) {
		dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
				 DMA_TO_DEVICE);
		return;
	}

	/* for in tranfers we need to copy the data from bounce to sg */
	sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
			     seg->bounce_len, seg->bounce_offs);
	dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
			 DMA_FROM_DEVICE);
	seg->bounce_len = 0;
	seg->bounce_offs = 0;
}

689 690 691 692 693 694 695 696 697 698
/*
 * When we get a command completion for a Stop Endpoint Command, we need to
 * unlink any cancelled TDs from the ring.  There are two ways to do that:
 *
 *  1. If the HW was in the middle of processing the TD that needs to be
 *     cancelled, then we must move the ring's dequeue pointer past the last TRB
 *     in the TD with a Set Dequeue Pointer Command.
 *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
 *     bit cleared) so that the HW will skip over them.
 */
699
static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
700
		union xhci_trb *trb, struct xhci_event_cmd *event)
701 702 703
{
	unsigned int ep_index;
	struct xhci_ring *ep_ring;
704
	struct xhci_virt_ep *ep;
705
	struct xhci_td *cur_td = NULL;
706
	struct xhci_td *last_unlinked_td;
707 708
	struct xhci_ep_ctx *ep_ctx;
	struct xhci_virt_device *vdev;
709
	u64 hw_deq;
710
	struct xhci_dequeue_state deq_state;
711

712
	if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
713
		if (!xhci->devs[slot_id])
714 715 716 717 718 719
			xhci_warn(xhci, "Stop endpoint command "
				"completion for disabled slot %u\n",
				slot_id);
		return;
	}

720
	memset(&deq_state, 0, sizeof(deq_state));
M
Matt Evans 已提交
721
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
722 723 724 725 726

	vdev = xhci->devs[slot_id];
	ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
	trace_xhci_handle_cmd_stop_ep(ep_ctx);

727
	ep = &xhci->devs[slot_id]->eps[ep_index];
728 729
	last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
			struct xhci_td, cancelled_td_list);
730

731
	if (list_empty(&ep->cancelled_td_list)) {
732
		xhci_stop_watchdog_timer_in_irq(xhci, ep);
733
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
734
		return;
735
	}
736 737 738 739 740 741

	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
	 * We have the xHCI lock, so nothing can modify this list until we drop
	 * it.  We're also in the event handler, so we can't get re-interrupted
	 * if another Stop Endpoint command completes
	 */
742
	list_for_each_entry(cur_td, &ep->cancelled_td_list, cancelled_td_list) {
743 744
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Removing canceled TD starting at 0x%llx (dma).",
745 746
				(unsigned long long)xhci_trb_virt_to_dma(
					cur_td->start_seg, cur_td->first_trb));
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
		if (!ep_ring) {
			/* This shouldn't happen unless a driver is mucking
			 * with the stream ID after submission.  This will
			 * leave the TD on the hardware ring, and the hardware
			 * will try to execute it, and may access a buffer
			 * that has already been freed.  In the best case, the
			 * hardware will execute it, and the event handler will
			 * ignore the completion event for that TD, since it was
			 * removed from the td_list for that endpoint.  In
			 * short, don't muck with the stream ID after
			 * submission.
			 */
			xhci_warn(xhci, "WARN Cancelled URB %p "
					"has invalid stream ID %u.\n",
					cur_td->urb,
					cur_td->urb->stream_id);
			goto remove_finished_td;
		}
766 767 768 769
		/*
		 * If we stopped on the TD we need to cancel, then we have to
		 * move the xHC endpoint ring dequeue pointer past this TD.
		 */
770 771 772 773 774 775
		hw_deq = xhci_get_hw_deq(xhci, vdev, ep_index,
					 cur_td->urb->stream_id);
		hw_deq &= ~0xf;

		if (trb_in_td(xhci, cur_td->start_seg, cur_td->first_trb,
			      cur_td->last_trb, hw_deq, false)) {
776
			xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
777 778 779
						    cur_td->urb->stream_id,
						    cur_td, &deq_state);
		} else {
780
			td_to_noop(xhci, ep_ring, cur_td, false);
781 782
		}

783
remove_finished_td:
784 785 786 787 788
		/*
		 * The event handler won't see a completion for this TD anymore,
		 * so remove it from the endpoint ring's TD list.  Keep it in
		 * the cancelled TD list for URB completion later.
		 */
789
		list_del_init(&cur_td->td_list);
790
	}
791

792
	xhci_stop_watchdog_timer_in_irq(xhci, ep);
793 794 795

	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
796
		xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
797
					     &deq_state);
798
		xhci_ring_cmd_db(xhci);
799
	} else {
800 801
		/* Otherwise ring the doorbell(s) to restart queued transfers */
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
802
	}
803

804 805 806 807 808 809 810
	/*
	 * Drop the lock and complete the URBs in the cancelled TD list.
	 * New TDs to be cancelled might be added to the end of the list before
	 * we can complete all the URBs for the TDs we already unlinked.
	 * So stop when we've completed the URB for the last TD we unlinked.
	 */
	do {
811
		cur_td = list_first_entry(&ep->cancelled_td_list,
812
				struct xhci_td, cancelled_td_list);
813
		list_del_init(&cur_td->cancelled_td_list);
814 815 816 817 818

		/* Clean up the cancelled URB */
		/* Doesn't matter what we pass for status, since the core will
		 * just overwrite it (because the URB has been unlinked).
		 */
A
Arnd Bergmann 已提交
819
		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
820
		xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
821 822 823
		inc_td_cnt(cur_td->urb);
		if (last_td_in_urb(cur_td))
			xhci_giveback_urb_in_irq(xhci, cur_td, 0);
824

825 826 827 828 829
		/* Stop processing the cancelled list if the watchdog timer is
		 * running.
		 */
		if (xhci->xhc_state & XHCI_STATE_DYING)
			return;
830 831 832 833 834
	} while (cur_td != last_unlinked_td);

	/* Return to the event handler with xhci->lock re-acquired */
}

835 836 837
static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
	struct xhci_td *cur_td;
838
	struct xhci_td *tmp;
839

840
	list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) {
841
		list_del_init(&cur_td->td_list);
842

843 844
		if (!list_empty(&cur_td->cancelled_td_list))
			list_del_init(&cur_td->cancelled_td_list);
845

846
		xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
847 848 849 850

		inc_td_cnt(cur_td->urb);
		if (last_td_in_urb(cur_td))
			xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
851 852 853 854 855 856 857
	}
}

static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
		int slot_id, int ep_index)
{
	struct xhci_td *cur_td;
858
	struct xhci_td *tmp;
859 860 861 862
	struct xhci_virt_ep *ep;
	struct xhci_ring *ring;

	ep = &xhci->devs[slot_id]->eps[ep_index];
863 864 865 866
	if ((ep->ep_state & EP_HAS_STREAMS) ||
			(ep->ep_state & EP_GETTING_NO_STREAMS)) {
		int stream_id;

867
		for (stream_id = 1; stream_id < ep->stream_info->num_streams;
868
				stream_id++) {
869 870 871 872
			ring = ep->stream_info->stream_rings[stream_id];
			if (!ring)
				continue;

873 874
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Killing URBs for slot ID %u, ep index %u, stream %u",
875 876
					slot_id, ep_index, stream_id);
			xhci_kill_ring_urbs(xhci, ring);
877 878 879 880 881 882 883 884 885 886
		}
	} else {
		ring = ep->ring;
		if (!ring)
			return;
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Killing URBs for slot ID %u, ep index %u",
				slot_id, ep_index);
		xhci_kill_ring_urbs(xhci, ring);
	}
887

888 889 890
	list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list,
			cancelled_td_list) {
		list_del_init(&cur_td->cancelled_td_list);
891
		inc_td_cnt(cur_td->urb);
892

893 894
		if (last_td_in_urb(cur_td))
			xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
895 896 897
	}
}

898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931
/*
 * host controller died, register read returns 0xffffffff
 * Complete pending commands, mark them ABORTED.
 * URBs need to be given back as usb core might be waiting with device locks
 * held for the URBs to finish during device disconnect, blocking host remove.
 *
 * Call with xhci->lock held.
 * lock is relased and re-acquired while giving back urb.
 */
void xhci_hc_died(struct xhci_hcd *xhci)
{
	int i, j;

	if (xhci->xhc_state & XHCI_STATE_DYING)
		return;

	xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
	xhci->xhc_state |= XHCI_STATE_DYING;

	xhci_cleanup_command_queue(xhci);

	/* return any pending urbs, remove may be waiting for them */
	for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
		if (!xhci->devs[i])
			continue;
		for (j = 0; j < 31; j++)
			xhci_kill_endpoint_urbs(xhci, i, j);
	}

	/* inform usb core hc died if PCI remove isn't already handling it */
	if (!(xhci->xhc_state & XHCI_STATE_REMOVING))
		usb_hc_died(xhci_to_hcd(xhci));
}

932 933 934 935 936 937 938 939 940 941 942 943 944 945
/* Watchdog timer function for when a stop endpoint command fails to complete.
 * In this case, we assume the host controller is broken or dying or dead.  The
 * host may still be completing some other events, so we have to be careful to
 * let the event ring handler and the URB dequeueing/enqueueing functions know
 * through xhci->state.
 *
 * The timer may also fire if the host takes a very long time to respond to the
 * command, and the stop endpoint command completion handler cannot delete the
 * timer before the timer function is called.  Another endpoint cancellation may
 * sneak in before the timer function can grab the lock, and that may queue
 * another stop endpoint command and add the timer back.  So we cannot use a
 * simple flag to say whether there is a pending stop endpoint command for a
 * particular endpoint.
 *
946 947
 * Instead we use a combination of that flag and checking if a new timer is
 * pending.
948 949 950 951 952
 */
void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
	struct xhci_hcd *xhci;
	struct xhci_virt_ep *ep;
953
	unsigned long flags;
954 955 956 957

	ep = (struct xhci_virt_ep *) arg;
	xhci = ep->xhci;

958
	spin_lock_irqsave(&xhci->lock, flags);
959

960 961 962
	/* bail out if cmd completed but raced with stop ep watchdog timer.*/
	if (!(ep->ep_state & EP_STOP_CMD_PENDING) ||
	    timer_pending(&ep->stop_cmd_timer)) {
963
		spin_unlock_irqrestore(&xhci->lock, flags);
964
		xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit");
965 966 967 968
		return;
	}

	xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
969 970
	ep->ep_state &= ~EP_STOP_CMD_PENDING;

971
	xhci_halt(xhci);
972

973 974 975 976 977 978
	/*
	 * handle a stop endpoint cmd timeout as if host died (-ENODEV).
	 * In the future we could distinguish between -ENODEV and -ETIMEDOUT
	 * and try to recover a -ETIMEDOUT with a host controller reset
	 */
	xhci_hc_died(xhci);
979

980
	spin_unlock_irqrestore(&xhci->lock, flags);
981 982
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"xHCI host controller is dead.");
983 984
}

985 986 987 988 989 990 991 992 993 994 995 996
static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
		struct xhci_virt_device *dev,
		struct xhci_ring *ep_ring,
		unsigned int ep_index)
{
	union xhci_trb *dequeue_temp;
	int num_trbs_free_temp;
	bool revert = false;

	num_trbs_free_temp = ep_ring->num_trbs_free;
	dequeue_temp = ep_ring->dequeue;

997 998 999 1000 1001 1002
	/* If we get two back-to-back stalls, and the first stalled transfer
	 * ends just before a link TRB, the dequeue pointer will be left on
	 * the link TRB by the code in the while loop.  So we have to update
	 * the dequeue pointer one segment further, or we'll jump off
	 * the segment into la-la-land.
	 */
1003
	if (trb_is_link(ep_ring->dequeue)) {
1004 1005 1006 1007
		ep_ring->deq_seg = ep_ring->deq_seg->next;
		ep_ring->dequeue = ep_ring->deq_seg->trbs;
	}

1008 1009 1010 1011
	while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
		/* We have more usable TRBs */
		ep_ring->num_trbs_free++;
		ep_ring->dequeue++;
1012
		if (trb_is_link(ep_ring->dequeue)) {
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
			if (ep_ring->dequeue ==
					dev->eps[ep_index].queued_deq_ptr)
				break;
			ep_ring->deq_seg = ep_ring->deq_seg->next;
			ep_ring->dequeue = ep_ring->deq_seg->trbs;
		}
		if (ep_ring->dequeue == dequeue_temp) {
			revert = true;
			break;
		}
	}

	if (revert) {
		xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
		ep_ring->num_trbs_free = num_trbs_free_temp;
	}
}

1031 1032 1033 1034 1035 1036 1037
/*
 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
 * we need to clear the set deq pending flag in the endpoint ring state, so that
 * the TD queueing code can ring the doorbell again.  We also need to ring the
 * endpoint doorbell to restart the ring, but only if there aren't more
 * cancellations pending.
 */
1038
static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1039
		union xhci_trb *trb, u32 cmd_comp_code)
1040 1041
{
	unsigned int ep_index;
1042
	unsigned int stream_id;
1043 1044
	struct xhci_ring *ep_ring;
	struct xhci_virt_device *dev;
1045
	struct xhci_virt_ep *ep;
1046 1047
	struct xhci_ep_ctx *ep_ctx;
	struct xhci_slot_ctx *slot_ctx;
1048

M
Matt Evans 已提交
1049 1050
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1051
	dev = xhci->devs[slot_id];
1052
	ep = &dev->eps[ep_index];
1053 1054 1055

	ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
	if (!ep_ring) {
O
Oliver Neukum 已提交
1056
		xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
1057 1058
				stream_id);
		/* XXX: Harmless??? */
1059
		goto cleanup;
1060 1061
	}

1062 1063
	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
1064 1065
	trace_xhci_handle_cmd_set_deq(slot_ctx);
	trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
1066

1067
	if (cmd_comp_code != COMP_SUCCESS) {
1068 1069 1070
		unsigned int ep_state;
		unsigned int slot_state;

1071
		switch (cmd_comp_code) {
1072
		case COMP_TRB_ERROR:
O
Oliver Neukum 已提交
1073
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
1074
			break;
1075
		case COMP_CONTEXT_STATE_ERROR:
O
Oliver Neukum 已提交
1076
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
1077
			ep_state = GET_EP_CTX_STATE(ep_ctx);
M
Matt Evans 已提交
1078
			slot_state = le32_to_cpu(slot_ctx->dev_state);
1079
			slot_state = GET_SLOT_STATE(slot_state);
1080 1081
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Slot state = %u, EP state = %u",
1082 1083
					slot_state, ep_state);
			break;
1084
		case COMP_SLOT_NOT_ENABLED_ERROR:
O
Oliver Neukum 已提交
1085 1086
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
					slot_id);
1087 1088
			break;
		default:
O
Oliver Neukum 已提交
1089 1090
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
					cmd_comp_code);
1091 1092 1093 1094 1095 1096 1097 1098 1099
			break;
		}
		/* OK what do we do now?  The endpoint state is hosed, and we
		 * should never get to this point if the synchronization between
		 * queueing, and endpoint state are correct.  This might happen
		 * if the device gets disconnected after we've finished
		 * cancelling URBs, which might not be an error...
		 */
	} else {
1100 1101 1102 1103 1104 1105 1106 1107 1108
		u64 deq;
		/* 4.6.10 deq ptr is written to the stream ctx for streams */
		if (ep->ep_state & EP_HAS_STREAMS) {
			struct xhci_stream_ctx *ctx =
				&ep->stream_info->stream_ctx_array[stream_id];
			deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
		} else {
			deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
		}
1109
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1110 1111 1112
			"Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
		if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
					 ep->queued_deq_ptr) == deq) {
1113 1114 1115
			/* Update the ring's dequeue segment and dequeue pointer
			 * to reflect the new position.
			 */
1116 1117
			update_ring_for_set_deq_completion(xhci, dev,
				ep_ring, ep_index);
1118
		} else {
O
Oliver Neukum 已提交
1119
			xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1120
			xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1121
				  ep->queued_deq_seg, ep->queued_deq_ptr);
1122
		}
1123 1124
	}

1125
cleanup:
1126
	dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1127 1128
	dev->eps[ep_index].queued_deq_seg = NULL;
	dev->eps[ep_index].queued_deq_ptr = NULL;
1129 1130
	/* Restart any rings with pending URBs */
	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1131 1132
}

1133
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1134
		union xhci_trb *trb, u32 cmd_comp_code)
1135
{
1136 1137
	struct xhci_virt_device *vdev;
	struct xhci_ep_ctx *ep_ctx;
1138 1139
	unsigned int ep_index;

M
Matt Evans 已提交
1140
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1141 1142 1143 1144
	vdev = xhci->devs[slot_id];
	ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
	trace_xhci_handle_cmd_reset_ep(ep_ctx);

1145 1146 1147
	/* This command will only fail if the endpoint wasn't halted,
	 * but we don't care.
	 */
1148
	xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1149
		"Ignoring reset ep completion code of %u", cmd_comp_code);
1150

1151 1152 1153 1154 1155
	/* HW with the reset endpoint quirk needs to have a configure endpoint
	 * command complete before the endpoint can be used.  Queue that here
	 * because the HW can't handle two commands being queued in a row.
	 */
	if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1156
		struct xhci_command *command;
1157

1158
		command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1159
		if (!command)
1160
			return;
1161

1162 1163
		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
				"Queueing configure endpoint command");
1164
		xhci_queue_configure_endpoint(xhci, command,
1165 1166
				xhci->devs[slot_id]->in_ctx->dma, slot_id,
				false);
1167 1168
		xhci_ring_cmd_db(xhci);
	} else {
1169
		/* Clear our internal halted state */
1170
		xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1171
	}
1172
}
1173

1174
static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1175
		struct xhci_command *command, u32 cmd_comp_code)
1176 1177
{
	if (cmd_comp_code == COMP_SUCCESS)
1178
		command->slot_id = slot_id;
1179
	else
1180
		command->slot_id = 0;
1181 1182
}

1183 1184 1185
static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
{
	struct xhci_virt_device *virt_dev;
1186
	struct xhci_slot_ctx *slot_ctx;
1187 1188 1189 1190

	virt_dev = xhci->devs[slot_id];
	if (!virt_dev)
		return;
1191 1192 1193 1194

	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
	trace_xhci_handle_cmd_disable_slot(slot_ctx);

1195 1196 1197 1198 1199 1200
	if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
		/* Delete default control endpoint resources */
		xhci_free_device_endpoint_resources(xhci, virt_dev, true);
	xhci_free_virt_device(xhci, slot_id);
}

1201 1202 1203 1204 1205
static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
		struct xhci_event_cmd *event, u32 cmd_comp_code)
{
	struct xhci_virt_device *virt_dev;
	struct xhci_input_control_ctx *ctrl_ctx;
1206
	struct xhci_ep_ctx *ep_ctx;
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
	unsigned int ep_index;
	unsigned int ep_state;
	u32 add_flags, drop_flags;

	/*
	 * Configure endpoint commands can come from the USB core
	 * configuration or alt setting changes, or because the HW
	 * needed an extra configure endpoint command after a reset
	 * endpoint command or streams were being configured.
	 * If the command was for a halted endpoint, the xHCI driver
	 * is not waiting on the configure endpoint command.
	 */
1219
	virt_dev = xhci->devs[slot_id];
1220
	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
	if (!ctrl_ctx) {
		xhci_warn(xhci, "Could not get input context, bad type.\n");
		return;
	}

	add_flags = le32_to_cpu(ctrl_ctx->add_flags);
	drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
	/* Input ctx add_flags are the endpoint index plus one */
	ep_index = xhci_last_valid_endpoint(add_flags) - 1;

1231 1232 1233
	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
	trace_xhci_handle_cmd_config_ep(ep_ctx);

1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
	/* A usb_set_interface() call directly after clearing a halted
	 * condition may race on this quirky hardware.  Not worth
	 * worrying about, since this is prototype hardware.  Not sure
	 * if this will work for streams, but streams support was
	 * untested on this prototype.
	 */
	if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
			ep_index != (unsigned int) -1 &&
			add_flags - SLOT_FLAG == drop_flags) {
		ep_state = virt_dev->eps[ep_index].ep_state;
		if (!(ep_state & EP_HALTED))
1245
			return;
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
				"Completed config ep cmd - "
				"last ep index = %d, state = %d",
				ep_index, ep_state);
		/* Clear internal halted state and restart ring(s) */
		virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
		return;
	}
	return;
}

1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
{
	struct xhci_virt_device *vdev;
	struct xhci_slot_ctx *slot_ctx;

	vdev = xhci->devs[slot_id];
	slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
	trace_xhci_handle_cmd_addr_dev(slot_ctx);
}

1268 1269 1270
static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
		struct xhci_event_cmd *event)
{
1271 1272 1273 1274 1275 1276 1277
	struct xhci_virt_device *vdev;
	struct xhci_slot_ctx *slot_ctx;

	vdev = xhci->devs[slot_id];
	slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
	trace_xhci_handle_cmd_reset_dev(slot_ctx);

1278
	xhci_dbg(xhci, "Completed reset device command.\n");
1279
	if (!xhci->devs[slot_id])
1280 1281 1282 1283
		xhci_warn(xhci, "Reset device command completion "
				"for disabled slot %u\n", slot_id);
}

1284 1285 1286 1287
static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
	if (!(xhci->quirks & XHCI_NEC_HOST)) {
L
Lu Baolu 已提交
1288
		xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
1289 1290 1291 1292 1293 1294 1295 1296
		return;
	}
	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
			"NEC firmware version %2x.%02x",
			NEC_FW_MAJOR(le32_to_cpu(event->status)),
			NEC_FW_MINOR(le32_to_cpu(event->status)));
}

1297
static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
M
Mathias Nyman 已提交
1298 1299
{
	list_del(&cmd->cmd_list);
1300 1301 1302 1303 1304

	if (cmd->completion) {
		cmd->status = status;
		complete(cmd->completion);
	} else {
M
Mathias Nyman 已提交
1305
		kfree(cmd);
1306
	}
M
Mathias Nyman 已提交
1307 1308 1309 1310 1311 1312
}

void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
{
	struct xhci_command *cur_cmd, *tmp_cmd;
	list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1313
		xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
M
Mathias Nyman 已提交
1314 1315
}

1316
void xhci_handle_command_timeout(struct work_struct *work)
1317 1318 1319 1320
{
	struct xhci_hcd *xhci;
	unsigned long flags;
	u64 hw_ring_state;
1321 1322

	xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
1323 1324

	spin_lock_irqsave(&xhci->lock, flags);
L
Lu Baolu 已提交
1325

1326 1327 1328 1329
	/*
	 * If timeout work is pending, or current_cmd is NULL, it means we
	 * raced with command completion. Command is handled so just return.
	 */
1330
	if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
L
Lu Baolu 已提交
1331 1332
		spin_unlock_irqrestore(&xhci->lock, flags);
		return;
1333
	}
L
Lu Baolu 已提交
1334
	/* mark this command to be cancelled */
1335
	xhci->current_cmd->status = COMP_COMMAND_ABORTED;
L
Lu Baolu 已提交
1336

1337 1338
	/* Make sure command ring is running before aborting it */
	hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1339 1340 1341 1342 1343
	if (hw_ring_state == ~(u64)0) {
		xhci_hc_died(xhci);
		goto time_out_completed;
	}

1344 1345
	if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
	    (hw_ring_state & CMD_RING_RUNNING))  {
1346 1347
		/* Prevent new doorbell, and start command abort */
		xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
1348
		xhci_dbg(xhci, "Command timeout\n");
1349
		xhci_abort_cmd_ring(xhci, flags);
1350
		goto time_out_completed;
1351
	}
1352

1353 1354 1355
	/* host removed. Bail out */
	if (xhci->xhc_state & XHCI_STATE_REMOVING) {
		xhci_dbg(xhci, "host removed, ring start fail?\n");
1356
		xhci_cleanup_command_queue(xhci);
1357 1358

		goto time_out_completed;
1359 1360
	}

1361 1362 1363
	/* command timeout on stopped ring, ring can't be aborted */
	xhci_dbg(xhci, "Command timeout on stopped ring\n");
	xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1364 1365

time_out_completed:
1366 1367 1368 1369
	spin_unlock_irqrestore(&xhci->lock, flags);
	return;
}

1370 1371 1372
static void handle_cmd_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
M
Matt Evans 已提交
1373
	int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1374 1375
	u64 cmd_dma;
	dma_addr_t cmd_dequeue_dma;
1376
	u32 cmd_comp_code;
1377
	union xhci_trb *cmd_trb;
M
Mathias Nyman 已提交
1378
	struct xhci_command *cmd;
1379
	u32 cmd_type;
1380

M
Matt Evans 已提交
1381
	cmd_dma = le64_to_cpu(event->cmd_trb);
1382
	cmd_trb = xhci->cmd_ring->dequeue;
1383 1384 1385

	trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);

1386
	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1387
			cmd_trb);
L
Lu Baolu 已提交
1388 1389 1390 1391 1392 1393 1394
	/*
	 * Check whether the completion event is for our internal kept
	 * command.
	 */
	if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
		xhci_warn(xhci,
			  "ERROR mismatched command completion event\n");
1395 1396
		return;
	}
1397

1398
	cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list);
M
Mathias Nyman 已提交
1399

1400
	cancel_delayed_work(&xhci->cmd_timer);
1401

1402
	cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1403 1404

	/* If CMD ring stopped we own the trbs between enqueue and dequeue */
1405
	if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
1406
		complete_all(&xhci->cmd_ring_stop_completion);
1407 1408
		return;
	}
1409 1410 1411 1412 1413 1414 1415

	if (cmd->command_trb != xhci->cmd_ring->dequeue) {
		xhci_err(xhci,
			 "Command completion event does not match command\n");
		return;
	}

1416 1417 1418 1419 1420 1421
	/*
	 * Host aborted the command ring, check if the current command was
	 * supposed to be aborted, otherwise continue normally.
	 * The command ring is stopped now, but the xHC will issue a Command
	 * Ring Stopped event which will cause us to restart it.
	 */
1422
	if (cmd_comp_code == COMP_COMMAND_ABORTED) {
1423
		xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1424
		if (cmd->status == COMP_COMMAND_ABORTED) {
1425 1426
			if (xhci->current_cmd == cmd)
				xhci->current_cmd = NULL;
1427
			goto event_handled;
1428
		}
1429 1430
	}

1431 1432 1433
	cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
	switch (cmd_type) {
	case TRB_ENABLE_SLOT:
1434
		xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
1435
		break;
1436
	case TRB_DISABLE_SLOT:
1437
		xhci_handle_cmd_disable_slot(xhci, slot_id);
1438
		break;
1439
	case TRB_CONFIG_EP:
1440 1441 1442
		if (!cmd->completion)
			xhci_handle_cmd_config_ep(xhci, slot_id, event,
						  cmd_comp_code);
1443
		break;
1444
	case TRB_EVAL_CONTEXT:
1445
		break;
1446
	case TRB_ADDR_DEV:
1447
		xhci_handle_cmd_addr_dev(xhci, slot_id);
1448
		break;
1449
	case TRB_STOP_RING:
1450 1451 1452
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
		xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
1453
		break;
1454
	case TRB_SET_DEQ:
1455 1456
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
1457
		xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1458
		break;
1459
	case TRB_CMD_NOOP:
1460
		/* Is this an aborted command turned to NO-OP? */
1461 1462
		if (cmd->status == COMP_COMMAND_RING_STOPPED)
			cmd_comp_code = COMP_COMMAND_RING_STOPPED;
1463
		break;
1464
	case TRB_RESET_EP:
1465 1466
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
1467
		xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1468
		break;
1469
	case TRB_RESET_DEV:
1470 1471 1472 1473 1474
		/* SLOT_ID field in reset device cmd completion event TRB is 0.
		 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
		 */
		slot_id = TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3]));
1475
		xhci_handle_cmd_reset_dev(xhci, slot_id, event);
1476
		break;
1477
	case TRB_NEC_GET_FW:
1478
		xhci_handle_cmd_nec_get_fw(xhci, event);
1479
		break;
1480 1481
	default:
		/* Skip over unknown commands on the event ring */
L
Lu Baolu 已提交
1482
		xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
1483 1484
		break;
	}
M
Mathias Nyman 已提交
1485

1486
	/* restart timer if this wasn't the last command */
1487
	if (!list_is_singular(&xhci->cmd_list)) {
1488 1489
		xhci->current_cmd = list_first_entry(&cmd->cmd_list,
						struct xhci_command, cmd_list);
1490
		xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
L
Lu Baolu 已提交
1491 1492
	} else if (xhci->current_cmd == cmd) {
		xhci->current_cmd = NULL;
1493 1494 1495
	}

event_handled:
1496
	xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
M
Mathias Nyman 已提交
1497

A
Andiry Xu 已提交
1498
	inc_deq(xhci, xhci->cmd_ring);
1499 1500
}

1501 1502 1503 1504 1505
static void handle_vendor_event(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 trb_type;

M
Matt Evans 已提交
1506
	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1507 1508 1509 1510 1511
	xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
	if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
		handle_cmd_completion(xhci, &event->event_cmd);
}

1512 1513 1514 1515 1516
/* @port_id: the one-based port ID from the hardware (indexed from array of all
 * port registers -- USB 3.0 and USB 2.0).
 *
 * Returns a zero-based port number, which is suitable for indexing into each of
 * the split roothubs' port arrays and bus state arrays.
1517
 * Add one to it in order to call xhci_find_slot_id_by_port.
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
 */
static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
		struct xhci_hcd *xhci, u32 port_id)
{
	unsigned int i;
	unsigned int num_similar_speed_ports = 0;

	/* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
	 * and usb2_ports are 0-based indexes.  Count the number of similar
	 * speed ports, up to 1 port before this port.
	 */
	for (i = 0; i < (port_id - 1); i++) {
		u8 port_speed = xhci->port_array[i];

		/*
		 * Skip ports that don't have known speeds, or have duplicate
		 * Extended Capabilities port speed entries.
		 */
1536
		if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1537 1538 1539 1540 1541 1542 1543
			continue;

		/*
		 * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
		 * 1.1 ports are under the USB 2.0 hub.  If the port speed
		 * matches the device speed, it's a similar speed port.
		 */
1544
		if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3))
1545 1546 1547 1548 1549
			num_similar_speed_ports++;
	}
	return num_similar_speed_ports;
}

1550 1551 1552 1553
static void handle_device_notification(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 slot_id;
1554
	struct usb_device *udev;
1555

1556
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1557
	if (!xhci->devs[slot_id]) {
1558 1559
		xhci_warn(xhci, "Device Notification event for "
				"unused slot %u\n", slot_id);
1560 1561 1562 1563 1564 1565 1566 1567
		return;
	}

	xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
			slot_id);
	udev = xhci->devs[slot_id]->udev;
	if (udev && udev->parent)
		usb_wakeup_notification(udev->parent, udev->portnum);
1568 1569
}

S
Sarah Sharp 已提交
1570 1571 1572
static void handle_port_status(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
1573
	struct usb_hcd *hcd;
S
Sarah Sharp 已提交
1574
	u32 port_id;
1575
	u32 portsc, cmd_reg;
1576
	int max_ports;
1577
	int slot_id;
1578
	unsigned int faked_port_index;
1579
	u8 major_revision;
1580
	struct xhci_bus_state *bus_state;
M
Matt Evans 已提交
1581
	__le32 __iomem **port_array;
1582
	bool bogus_port_status = false;
S
Sarah Sharp 已提交
1583 1584

	/* Port status change events always have a successful completion code */
L
Lu Baolu 已提交
1585 1586 1587 1588
	if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
		xhci_warn(xhci,
			  "WARN: xHC returned failed port status event\n");

M
Matt Evans 已提交
1589
	port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
S
Sarah Sharp 已提交
1590 1591
	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);

1592 1593
	max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
	if ((port_id <= 0) || (port_id > max_ports)) {
1594
		xhci_warn(xhci, "Invalid port id %d\n", port_id);
P
Peter Chen 已提交
1595 1596
		inc_deq(xhci, xhci->event_ring);
		return;
1597 1598
	}

1599 1600 1601 1602
	/* Figure out which usb_hcd this port is attached to:
	 * is it a USB 3.0 port or a USB 2.0/1.1 port?
	 */
	major_revision = xhci->port_array[port_id - 1];
P
Peter Chen 已提交
1603 1604 1605

	/* Find the right roothub. */
	hcd = xhci_to_hcd(xhci);
1606
	if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3))
P
Peter Chen 已提交
1607 1608
		hcd = xhci->shared_hcd;

1609 1610 1611 1612
	if (major_revision == 0) {
		xhci_warn(xhci, "Event for port %u not in "
				"Extended Capabilities, ignoring.\n",
				port_id);
1613
		bogus_port_status = true;
1614
		goto cleanup;
1615
	}
1616
	if (major_revision == DUPLICATE_ENTRY) {
1617 1618 1619
		xhci_warn(xhci, "Event for port %u duplicated in"
				"Extended Capabilities, ignoring.\n",
				port_id);
1620
		bogus_port_status = true;
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
		goto cleanup;
	}

	/*
	 * Hardware port IDs reported by a Port Status Change Event include USB
	 * 3.0 and USB 2.0 ports.  We want to check if the port has reported a
	 * resume event, but we first need to translate the hardware port ID
	 * into the index into the ports on the correct split roothub, and the
	 * correct bus_state structure.
	 */
	bus_state = &xhci->bus_state[hcd_index(hcd)];
1632
	if (hcd->speed >= HCD_USB3)
1633 1634 1635 1636 1637 1638
		port_array = xhci->usb3_ports;
	else
		port_array = xhci->usb2_ports;
	/* Find the faked port hub number */
	faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
			port_id);
1639
	portsc = readl(port_array[faked_port_index]);
1640

1641
	if (hcd->state == HC_STATE_SUSPENDED) {
1642 1643 1644 1645
		xhci_dbg(xhci, "resume root hub\n");
		usb_hcd_resume_root_hub(hcd);
	}

1646
	if (hcd->speed >= HCD_USB3 && (portsc & PORT_PLS_MASK) == XDEV_INACTIVE)
1647 1648
		bus_state->port_remote_wakeup &= ~(1 << faked_port_index);

1649
	if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
1650 1651
		xhci_dbg(xhci, "port resume event for port %d\n", port_id);

1652 1653
		cmd_reg = readl(&xhci->op_regs->command);
		if (!(cmd_reg & CMD_RUN)) {
1654 1655 1656 1657
			xhci_warn(xhci, "xHC is not running.\n");
			goto cleanup;
		}

1658
		if (DEV_SUPERSPEED_ANY(portsc)) {
1659
			xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1660 1661 1662 1663 1664
			/* Set a flag to say the port signaled remote wakeup,
			 * so we can tell the difference between the end of
			 * device and host initiated resume.
			 */
			bus_state->port_remote_wakeup |= 1 << faked_port_index;
1665 1666
			xhci_test_and_clear_bit(xhci, port_array,
					faked_port_index, PORT_PLC);
A
Andiry Xu 已提交
1667 1668
			xhci_set_link_state(xhci, port_array, faked_port_index,
						XDEV_U0);
1669 1670 1671 1672 1673
			/* Need to wait until the next link state change
			 * indicates the device is actually in U0.
			 */
			bogus_port_status = true;
			goto cleanup;
1674 1675
		} else if (!test_bit(faked_port_index,
				     &bus_state->resuming_ports)) {
1676
			xhci_dbg(xhci, "resume HS port %d\n", port_id);
1677
			bus_state->resume_done[faked_port_index] = jiffies +
1678
				msecs_to_jiffies(USB_RESUME_TIMEOUT);
1679
			set_bit(faked_port_index, &bus_state->resuming_ports);
1680
			mod_timer(&hcd->rh_timer,
1681
				  bus_state->resume_done[faked_port_index]);
1682 1683 1684
			/* Do the rest in GetPortStatus */
		}
	}
1685

1686 1687
	if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 &&
			DEV_SUPERSPEED_ANY(portsc)) {
1688
		xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1689 1690 1691 1692 1693 1694 1695
		/* We've just brought the device into U0 through either the
		 * Resume state after a device remote wakeup, or through the
		 * U3Exit state after a host-initiated resume.  If it's a device
		 * initiated remote wake, don't pass up the link state change,
		 * so the roothub behavior is consistent with external
		 * USB 3.0 hub behavior.
		 */
1696 1697 1698 1699
		slot_id = xhci_find_slot_id_by_port(hcd, xhci,
				faked_port_index + 1);
		if (slot_id && xhci->devs[slot_id])
			xhci_ring_device(xhci, slot_id);
1700
		if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
1701 1702 1703 1704 1705 1706 1707 1708 1709
			bus_state->port_remote_wakeup &=
				~(1 << faked_port_index);
			xhci_test_and_clear_bit(xhci, port_array,
					faked_port_index, PORT_PLC);
			usb_wakeup_notification(hcd->self.root_hub,
					faked_port_index + 1);
			bogus_port_status = true;
			goto cleanup;
		}
1710
	}
1711

1712 1713 1714 1715 1716
	/*
	 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
	 * RExit to a disconnect state).  If so, let the the driver know it's
	 * out of the RExit state.
	 */
1717
	if (!DEV_SUPERSPEED_ANY(portsc) &&
1718 1719 1720 1721 1722 1723 1724
			test_and_clear_bit(faked_port_index,
				&bus_state->rexit_ports)) {
		complete(&bus_state->rexit_done[faked_port_index]);
		bogus_port_status = true;
		goto cleanup;
	}

1725
	if (hcd->speed < HCD_USB3)
1726 1727 1728
		xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
					PORT_PLC);

1729
cleanup:
S
Sarah Sharp 已提交
1730
	/* Update event ring dequeue pointer before dropping the lock */
A
Andiry Xu 已提交
1731
	inc_deq(xhci, xhci->event_ring);
S
Sarah Sharp 已提交
1732

1733 1734 1735 1736 1737 1738 1739
	/* Don't make the USB core poll the roothub if we got a bad port status
	 * change event.  Besides, at that point we can't tell which roothub
	 * (USB 2.0 or USB 3.0) to kick.
	 */
	if (bogus_port_status)
		return;

1740 1741 1742 1743 1744 1745 1746 1747 1748
	/*
	 * xHCI port-status-change events occur when the "or" of all the
	 * status-change bits in the portsc register changes from 0 to 1.
	 * New status changes won't cause an event if any other change
	 * bits are still set.  When an event occurs, switch over to
	 * polling to avoid losing status changes.
	 */
	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
S
Sarah Sharp 已提交
1749 1750
	spin_unlock(&xhci->lock);
	/* Pass this up to the core */
1751
	usb_hcd_poll_rh_status(hcd);
S
Sarah Sharp 已提交
1752 1753 1754
	spin_lock(&xhci->lock);
}

1755 1756 1757 1758 1759 1760
/*
 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
 * at end_trb, which may be in another segment.  If the suspect DMA address is a
 * TRB in this TD, this function returns that TRB's segment.  Otherwise it
 * returns 0.
 */
1761 1762
struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
		struct xhci_segment *start_seg,
1763 1764
		union xhci_trb	*start_trb,
		union xhci_trb	*end_trb,
1765 1766
		dma_addr_t	suspect_dma,
		bool		debug)
1767 1768 1769 1770 1771 1772
{
	dma_addr_t start_dma;
	dma_addr_t end_seg_dma;
	dma_addr_t end_trb_dma;
	struct xhci_segment *cur_seg;

1773
	start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1774 1775 1776
	cur_seg = start_seg;

	do {
1777
		if (start_dma == 0)
1778
			return NULL;
1779
		/* We may get an event for a Link TRB in the middle of a TD */
1780
		end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1781
				&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1782
		/* If the end TRB isn't in this segment, this is set to 0 */
1783
		end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1784

1785 1786 1787 1788 1789 1790 1791 1792 1793
		if (debug)
			xhci_warn(xhci,
				"Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
				(unsigned long long)suspect_dma,
				(unsigned long long)start_dma,
				(unsigned long long)end_trb_dma,
				(unsigned long long)cur_seg->dma,
				(unsigned long long)end_seg_dma);

1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
		if (end_trb_dma > 0) {
			/* The end TRB is in this segment, so suspect should be here */
			if (start_dma <= end_trb_dma) {
				if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
					return cur_seg;
			} else {
				/* Case for one segment with
				 * a TD wrapped around to the top
				 */
				if ((suspect_dma >= start_dma &&
							suspect_dma <= end_seg_dma) ||
						(suspect_dma >= cur_seg->dma &&
						 suspect_dma <= end_trb_dma))
					return cur_seg;
			}
1809
			return NULL;
1810 1811 1812 1813 1814 1815
		} else {
			/* Might still be somewhere in this segment */
			if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
				return cur_seg;
		}
		cur_seg = cur_seg->next;
1816
		start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1817
	} while (cur_seg != start_seg);
1818

1819
	return NULL;
1820 1821
}

1822 1823
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
1824
		unsigned int stream_id,
1825 1826
		struct xhci_td *td, union xhci_trb *ep_trb,
		enum xhci_ep_reset_type reset_type)
1827 1828
{
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1829 1830 1831 1832 1833
	struct xhci_command *command;
	command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
	if (!command)
		return;

1834
	ep->ep_state |= EP_HALTED;
1835

1836
	xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
1837

1838 1839
	if (reset_type == EP_HARD_RESET)
		xhci_cleanup_stalled_ring(xhci, ep_index, stream_id, td);
1840

1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
	xhci_ring_cmd_db(xhci);
}

/* Check if an error has halted the endpoint ring.  The class driver will
 * cleanup the halt for a non-default control endpoint if we indicate a stall.
 * However, a babble and other errors also halt the endpoint ring, and the class
 * driver won't clear the halt in that case, so we need to issue a Set Transfer
 * Ring Dequeue Pointer command manually.
 */
static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
		struct xhci_ep_ctx *ep_ctx,
		unsigned int trb_comp_code)
{
	/* TRB completion codes that may require a manual halt cleanup */
1855 1856 1857
	if (trb_comp_code == COMP_USB_TRANSACTION_ERROR ||
			trb_comp_code == COMP_BABBLE_DETECTED_ERROR ||
			trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR)
1858
		/* The 0.95 spec says a babbling control endpoint
1859 1860 1861 1862 1863
		 * is not halted. The 0.96 spec says it is.  Some HW
		 * claims to be 0.95 compliant, but it halts the control
		 * endpoint anyway.  Check if a babble halted the
		 * endpoint.
		 */
1864
		if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
1865 1866 1867 1868 1869
			return 1;

	return 0;
}

1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
{
	if (trb_comp_code >= 224 && trb_comp_code <= 255) {
		/* Vendor defined "informational" completion code,
		 * treat as not-an-error.
		 */
		xhci_dbg(xhci, "Vendor defined info completion code %u\n",
				trb_comp_code);
		xhci_dbg(xhci, "Treating code as success.\n");
		return 1;
	}
	return 0;
}

1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
		struct xhci_ring *ep_ring, int *status)
{
	struct urb_priv	*urb_priv;
	struct urb *urb = NULL;

	/* Clean up the endpoint's TD list */
	urb = td->urb;
	urb_priv = urb->hcpriv;

	/* if a bounce buffer was used to align this td then unmap it */
1895
	xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931

	/* Do one last check of the actual transfer length.
	 * If the host controller said we transferred more data than the buffer
	 * length, urb->actual_length will be a very big number (since it's
	 * unsigned).  Play it safe and say we didn't transfer anything.
	 */
	if (urb->actual_length > urb->transfer_buffer_length) {
		xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
			  urb->transfer_buffer_length, urb->actual_length);
		urb->actual_length = 0;
		*status = 0;
	}
	list_del_init(&td->td_list);
	/* Was this TD slated to be cancelled but completed anyway? */
	if (!list_empty(&td->cancelled_td_list))
		list_del_init(&td->cancelled_td_list);

	inc_td_cnt(urb);
	/* Giveback the urb when all the tds are completed */
	if (last_td_in_urb(td)) {
		if ((urb->actual_length != urb->transfer_buffer_length &&
		     (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
		    (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
			xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
				 urb, urb->actual_length,
				 urb->transfer_buffer_length, *status);

		/* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
			*status = 0;
		xhci_giveback_urb_in_irq(xhci, td, *status);
	}

	return 0;
}

1932
static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1933
	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
1934
	struct xhci_virt_ep *ep, int *status)
1935 1936 1937
{
	struct xhci_virt_device *xdev;
	struct xhci_ep_ctx *ep_ctx;
1938 1939
	struct xhci_ring *ep_ring;
	unsigned int slot_id;
1940
	u32 trb_comp_code;
1941
	int ep_index;
1942

M
Matt Evans 已提交
1943
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1944
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
1945 1946
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1947
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
1948
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1949

1950 1951 1952
	if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
			trb_comp_code == COMP_STOPPED ||
			trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
1953 1954 1955 1956 1957
		/* The Endpoint Stop Command completion will take care of any
		 * stopped TDs.  A stopped TD may be restarted, so don't update
		 * the ring dequeue pointer or take this TD off any lists yet.
		 */
		return 0;
M
Mathias Nyman 已提交
1958
	}
1959
	if (trb_comp_code == COMP_STALL_ERROR ||
M
Mathias Nyman 已提交
1960 1961 1962 1963 1964 1965 1966 1967
		xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
						trb_comp_code)) {
		/* Issue a reset endpoint command to clear the host side
		 * halt, followed by a set dequeue command to move the
		 * dequeue pointer past the TD.
		 * The class driver clears the device side halt later.
		 */
		xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
1968 1969
					ep_ring->stream_id, td, ep_trb,
					EP_HARD_RESET);
1970
	} else {
M
Mathias Nyman 已提交
1971 1972
		/* Update ring dequeue pointer */
		while (ep_ring->dequeue != td->last_trb)
A
Andiry Xu 已提交
1973
			inc_deq(xhci, ep_ring);
M
Mathias Nyman 已提交
1974 1975
		inc_deq(xhci, ep_ring);
	}
1976

1977
	return xhci_td_cleanup(xhci, td, ep_ring, status);
1978 1979
}

1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994
/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
			   union xhci_trb *stop_trb)
{
	u32 sum;
	union xhci_trb *trb = ring->dequeue;
	struct xhci_segment *seg = ring->deq_seg;

	for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
		if (!trb_is_noop(trb) && !trb_is_link(trb))
			sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
	}
	return sum;
}

1995 1996 1997 1998
/*
 * Process control tds, update urb status and actual_length.
 */
static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1999
	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
2000 2001 2002 2003 2004 2005 2006 2007
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	unsigned int slot_id;
	int ep_index;
	struct xhci_ep_ctx *ep_ctx;
	u32 trb_comp_code;
2008
	u32 remaining, requested;
2009
	u32 trb_type;
2010

2011
	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
M
Matt Evans 已提交
2012
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2013
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
2014 2015
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2016
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
2017
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2018 2019 2020
	requested = td->urb->transfer_buffer_length;
	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));

2021 2022
	switch (trb_comp_code) {
	case COMP_SUCCESS:
2023
		if (trb_type != TRB_STATUS) {
2024
			xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
2025
				  (trb_type == TRB_DATA) ? "data" : "setup");
2026
			*status = -ESHUTDOWN;
2027
			break;
2028
		}
2029
		*status = 0;
2030
		break;
2031
	case COMP_SHORT_PACKET:
2032
		*status = 0;
2033
		break;
2034
	case COMP_STOPPED_SHORT_PACKET:
2035
		if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2036
			td->urb->actual_length = remaining;
2037
		else
2038 2039
			xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
		goto finish_td;
2040
	case COMP_STOPPED:
2041 2042 2043 2044 2045 2046
		switch (trb_type) {
		case TRB_SETUP:
			td->urb->actual_length = 0;
			goto finish_td;
		case TRB_DATA:
		case TRB_NORMAL:
2047
			td->urb->actual_length = requested - remaining;
2048
			goto finish_td;
2049 2050 2051
		case TRB_STATUS:
			td->urb->actual_length = requested;
			goto finish_td;
2052 2053 2054 2055 2056
		default:
			xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
				  trb_type);
			goto finish_td;
		}
2057
	case COMP_STOPPED_LENGTH_INVALID:
2058
		goto finish_td;
2059 2060
	default:
		if (!xhci_requires_manual_halt_cleanup(xhci,
2061
						       ep_ctx, trb_comp_code))
2062
			break;
2063 2064
		xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
			 trb_comp_code, ep_index);
2065
		/* else fall through */
2066
	case COMP_STALL_ERROR:
2067
		/* Did we transfer part of the data (middle) phase? */
2068
		if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2069
			td->urb->actual_length = requested - remaining;
2070
		else if (!td->urb_length_set)
2071
			td->urb->actual_length = 0;
2072
		goto finish_td;
2073
	}
2074 2075

	/* stopped at setup stage, no data transferred */
2076
	if (trb_type == TRB_SETUP)
2077 2078
		goto finish_td;

2079
	/*
2080 2081
	 * if on data stage then update the actual_length of the URB and flag it
	 * as set, so it won't be overwritten in the event for the last TRB.
2082
	 */
2083 2084
	if (trb_type == TRB_DATA ||
		trb_type == TRB_NORMAL) {
2085 2086 2087 2088
		td->urb_length_set = true;
		td->urb->actual_length = requested - remaining;
		xhci_dbg(xhci, "Waiting for status stage event\n");
		return 0;
2089 2090
	}

2091 2092 2093 2094 2095
	/* at status stage */
	if (!td->urb_length_set)
		td->urb->actual_length = requested;

finish_td:
2096
	return finish_td(xhci, td, ep_trb, event, ep, status);
2097 2098
}

2099 2100 2101 2102
/*
 * Process isochronous tds, update urb packet status and actual_length.
 */
static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2103
	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
2104 2105 2106 2107 2108
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	int idx;
2109
	struct usb_iso_packet_descriptor *frame;
2110
	u32 trb_comp_code;
2111 2112 2113
	bool sum_trbs_for_length = false;
	u32 remaining, requested, ep_trb_len;
	int short_framestatus;
2114

M
Matt Evans 已提交
2115 2116
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2117
	urb_priv = td->urb->hcpriv;
2118
	idx = urb_priv->num_tds_done;
2119
	frame = &td->urb->iso_frame_desc[idx];
2120 2121 2122 2123 2124
	requested = frame->length;
	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
	short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
		-EREMOTEIO : 0;
2125

2126 2127 2128
	/* handle completion code */
	switch (trb_comp_code) {
	case COMP_SUCCESS:
2129 2130 2131 2132
		if (remaining) {
			frame->status = short_framestatus;
			if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
				sum_trbs_for_length = true;
2133 2134
			break;
		}
2135 2136
		frame->status = 0;
		break;
2137
	case COMP_SHORT_PACKET:
2138 2139
		frame->status = short_framestatus;
		sum_trbs_for_length = true;
2140
		break;
2141
	case COMP_BANDWIDTH_OVERRUN_ERROR:
2142 2143
		frame->status = -ECOMM;
		break;
2144 2145
	case COMP_ISOCH_BUFFER_OVERRUN:
	case COMP_BABBLE_DETECTED_ERROR:
2146 2147
		frame->status = -EOVERFLOW;
		break;
2148 2149
	case COMP_INCOMPATIBLE_DEVICE_ERROR:
	case COMP_STALL_ERROR:
2150 2151
		frame->status = -EPROTO;
		break;
2152
	case COMP_USB_TRANSACTION_ERROR:
2153
		frame->status = -EPROTO;
2154
		if (ep_trb != td->last_trb)
2155
			return 0;
2156
		break;
2157
	case COMP_STOPPED:
2158 2159
		sum_trbs_for_length = true;
		break;
2160
	case COMP_STOPPED_SHORT_PACKET:
2161 2162 2163 2164
		/* field normally containing residue now contains tranferred */
		frame->status = short_framestatus;
		requested = remaining;
		break;
2165
	case COMP_STOPPED_LENGTH_INVALID:
2166 2167
		requested = 0;
		remaining = 0;
2168 2169
		break;
	default:
2170
		sum_trbs_for_length = true;
2171 2172
		frame->status = -1;
		break;
2173 2174
	}

2175 2176 2177 2178 2179
	if (sum_trbs_for_length)
		frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) +
			ep_trb_len - remaining;
	else
		frame->actual_length = requested;
2180

2181
	td->urb->actual_length += frame->actual_length;
2182

2183
	return finish_td(xhci, td, ep_trb, event, ep, status);
2184 2185
}

2186 2187 2188 2189 2190 2191 2192 2193 2194
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
			struct xhci_transfer_event *event,
			struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct usb_iso_packet_descriptor *frame;
	int idx;

2195
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2196
	urb_priv = td->urb->hcpriv;
2197
	idx = urb_priv->num_tds_done;
2198 2199
	frame = &td->urb->iso_frame_desc[idx];

2200
	/* The transfer is partly done. */
2201 2202 2203 2204 2205 2206 2207
	frame->status = -EXDEV;

	/* calc actual length */
	frame->actual_length = 0;

	/* Update ring dequeue pointer */
	while (ep_ring->dequeue != td->last_trb)
A
Andiry Xu 已提交
2208 2209
		inc_deq(xhci, ep_ring);
	inc_deq(xhci, ep_ring);
2210

2211
	return xhci_td_cleanup(xhci, td, ep_ring, status);
2212 2213
}

2214 2215 2216 2217
/*
 * Process bulk and interrupt tds, update urb status and actual_length.
 */
static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2218
	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
2219 2220 2221 2222
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	u32 trb_comp_code;
2223
	u32 remaining, requested, ep_trb_len;
2224

M
Matt Evans 已提交
2225 2226
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2227
	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2228
	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2229
	requested = td->urb->transfer_buffer_length;
2230 2231 2232

	switch (trb_comp_code) {
	case COMP_SUCCESS:
2233
		/* handle success with untransferred data as short packet */
2234
		if (ep_trb != td->last_trb || remaining) {
2235
			xhci_warn(xhci, "WARN Successful completion on short TX\n");
2236 2237 2238
			xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
				 td->urb->ep->desc.bEndpointAddress,
				 requested, remaining);
2239
		}
2240
		*status = 0;
2241
		break;
2242
	case COMP_SHORT_PACKET:
2243 2244 2245
		xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
			 td->urb->ep->desc.bEndpointAddress,
			 requested, remaining);
2246
		*status = 0;
2247
		break;
2248
	case COMP_STOPPED_SHORT_PACKET:
2249 2250
		td->urb->actual_length = remaining;
		goto finish_td;
2251
	case COMP_STOPPED_LENGTH_INVALID:
2252
		/* stopped on ep trb with invalid length, exclude it */
2253
		ep_trb_len	= 0;
2254 2255
		remaining	= 0;
		break;
2256
	default:
2257
		/* do nothing */
2258 2259
		break;
	}
2260

2261
	if (ep_trb == td->last_trb)
2262 2263 2264
		td->urb->actual_length = requested - remaining;
	else
		td->urb->actual_length =
2265 2266
			sum_trb_lengths(xhci, ep_ring, ep_trb) +
			ep_trb_len - remaining;
2267 2268 2269 2270
finish_td:
	if (remaining > requested) {
		xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
			  remaining);
2271 2272
		td->urb->actual_length = 0;
	}
2273
	return finish_td(xhci, td, ep_trb, event, ep, status);
2274 2275
}

2276 2277 2278 2279 2280 2281 2282 2283 2284
/*
 * If this function returns an error condition, it means it got a Transfer
 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
 * At this point, the host controller is probably hosed and should be reset.
 */
static int handle_tx_event(struct xhci_hcd *xhci,
		struct xhci_transfer_event *event)
{
	struct xhci_virt_device *xdev;
2285
	struct xhci_virt_ep *ep;
2286
	struct xhci_ring *ep_ring;
2287
	unsigned int slot_id;
2288
	int ep_index;
2289
	struct xhci_td *td = NULL;
2290 2291 2292
	dma_addr_t ep_trb_dma;
	struct xhci_segment *ep_seg;
	union xhci_trb *ep_trb;
2293
	int status = -EINPROGRESS;
2294
	struct xhci_ep_ctx *ep_ctx;
2295
	struct list_head *tmp;
2296
	u32 trb_comp_code;
2297
	int td_num = 0;
2298
	bool handling_skipped_tds = false;
2299

M
Matt Evans 已提交
2300
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2301 2302 2303 2304
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
	ep_trb_dma = le64_to_cpu(event->buffer);

2305
	xdev = xhci->devs[slot_id];
2306
	if (!xdev) {
2307 2308
		xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n",
			 slot_id);
2309 2310 2311
		goto err_out;
	}

2312
	ep = &xdev->eps[ep_index];
2313
	ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
2314
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2315

2316
	if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
2317
		xhci_err(xhci,
2318
			 "ERROR Transfer event for disabled endpoint slot %u ep %u\n",
2319
			  slot_id, ep_index);
2320
		goto err_out;
2321 2322
	}

2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
	/* Some transfer events don't always point to a trb, see xhci 4.17.4 */
	if (!ep_ring) {
		switch (trb_comp_code) {
		case COMP_STALL_ERROR:
		case COMP_USB_TRANSACTION_ERROR:
		case COMP_INVALID_STREAM_TYPE_ERROR:
		case COMP_INVALID_STREAM_ID_ERROR:
			xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 0,
						     NULL, NULL, EP_SOFT_RESET);
			goto cleanup;
		case COMP_RING_UNDERRUN:
		case COMP_RING_OVERRUN:
			goto cleanup;
		default:
			xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
				 slot_id, ep_index);
			goto err_out;
		}
	}

2343 2344 2345 2346 2347 2348
	/* Count current td numbers if ep->skip is set */
	if (ep->skip) {
		list_for_each(tmp, &ep_ring->td_list)
			td_num++;
	}

2349
	/* Look for common error cases */
2350
	switch (trb_comp_code) {
S
Sarah Sharp 已提交
2351 2352 2353 2354
	/* Skip codes that require special handling depending on
	 * transfer type
	 */
	case COMP_SUCCESS:
2355
		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2356 2357
			break;
		if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2358
			trb_comp_code = COMP_SHORT_PACKET;
2359
		else
2360
			xhci_warn_ratelimited(xhci,
2361 2362
					      "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
					      slot_id, ep_index);
2363
	case COMP_SHORT_PACKET:
S
Sarah Sharp 已提交
2364
		break;
2365
	/* Completion codes for endpoint stopped state */
2366
	case COMP_STOPPED:
2367 2368
		xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
			 slot_id, ep_index);
2369
		break;
2370
	case COMP_STOPPED_LENGTH_INVALID:
2371 2372 2373
		xhci_dbg(xhci,
			 "Stopped on No-op or Link TRB for slot %u ep %u\n",
			 slot_id, ep_index);
2374
		break;
2375
	case COMP_STOPPED_SHORT_PACKET:
2376 2377 2378
		xhci_dbg(xhci,
			 "Stopped with short packet transfer detected for slot %u ep %u\n",
			 slot_id, ep_index);
2379
		break;
2380
	/* Completion codes for endpoint halted state */
2381
	case COMP_STALL_ERROR:
2382 2383
		xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
			 ep_index);
2384
		ep->ep_state |= EP_HALTED;
S
Sarah Sharp 已提交
2385 2386
		status = -EPIPE;
		break;
2387 2388
	case COMP_SPLIT_TRANSACTION_ERROR:
	case COMP_USB_TRANSACTION_ERROR:
2389 2390
		xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
			 slot_id, ep_index);
S
Sarah Sharp 已提交
2391 2392
		status = -EPROTO;
		break;
2393
	case COMP_BABBLE_DETECTED_ERROR:
2394 2395
		xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n",
			 slot_id, ep_index);
2396 2397
		status = -EOVERFLOW;
		break;
2398 2399 2400 2401 2402 2403 2404 2405
	/* Completion codes for endpoint error state */
	case COMP_TRB_ERROR:
		xhci_warn(xhci,
			  "WARN: TRB error for slot %u ep %u on endpoint\n",
			  slot_id, ep_index);
		status = -EILSEQ;
		break;
	/* completion codes not indicating endpoint state change */
2406
	case COMP_DATA_BUFFER_ERROR:
2407 2408 2409
		xhci_warn(xhci,
			  "WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
			  slot_id, ep_index);
S
Sarah Sharp 已提交
2410 2411
		status = -ENOSR;
		break;
2412
	case COMP_BANDWIDTH_OVERRUN_ERROR:
2413 2414 2415
		xhci_warn(xhci,
			  "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n",
			  slot_id, ep_index);
2416
		break;
2417
	case COMP_ISOCH_BUFFER_OVERRUN:
2418 2419 2420
		xhci_warn(xhci,
			  "WARN: buffer overrun event for slot %u ep %u on endpoint",
			  slot_id, ep_index);
2421
		break;
2422
	case COMP_RING_UNDERRUN:
2423 2424 2425 2426 2427 2428 2429 2430 2431
		/*
		 * When the Isoch ring is empty, the xHC will generate
		 * a Ring Overrun Event for IN Isoch endpoint or Ring
		 * Underrun Event for OUT Isoch endpoint.
		 */
		xhci_dbg(xhci, "underrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2432 2433
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2434
		goto cleanup;
2435
	case COMP_RING_OVERRUN:
2436 2437 2438 2439
		xhci_dbg(xhci, "overrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2440 2441
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2442
		goto cleanup;
2443
	case COMP_MISSED_SERVICE_ERROR:
2444 2445 2446 2447 2448 2449 2450
		/*
		 * When encounter missed service error, one or more isoc tds
		 * may be missed by xHC.
		 * Set skip flag of the ep_ring; Complete the missed tds as
		 * short transfer when process the ep_ring next time.
		 */
		ep->skip = true;
2451 2452 2453
		xhci_dbg(xhci,
			 "Miss service interval error for slot %u ep %u, set skip flag\n",
			 slot_id, ep_index);
2454
		goto cleanup;
2455
	case COMP_NO_PING_RESPONSE_ERROR:
2456
		ep->skip = true;
2457 2458 2459
		xhci_dbg(xhci,
			 "No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
			 slot_id, ep_index);
2460
		goto cleanup;
2461 2462 2463 2464 2465 2466 2467 2468

	case COMP_INCOMPATIBLE_DEVICE_ERROR:
		/* needs disable slot command to recover */
		xhci_warn(xhci,
			  "WARN: detect an incompatible device for slot %u ep %u",
			  slot_id, ep_index);
		status = -EPROTO;
		break;
S
Sarah Sharp 已提交
2469
	default:
2470
		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2471 2472 2473
			status = 0;
			break;
		}
2474 2475 2476
		xhci_warn(xhci,
			  "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n",
			  trb_comp_code, slot_id, ep_index);
2477 2478 2479
		goto cleanup;
	}

2480 2481 2482 2483 2484
	do {
		/* This TRB should be in the TD at the head of this ring's
		 * TD list.
		 */
		if (list_empty(&ep_ring->td_list)) {
2485 2486 2487 2488 2489
			/*
			 * A stopped endpoint may generate an extra completion
			 * event if the device was suspended.  Don't print
			 * warnings.
			 */
2490 2491
			if (!(trb_comp_code == COMP_STOPPED ||
				trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
2492 2493 2494 2495
				xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
						TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
						ep_index);
			}
2496 2497
			if (ep->skip) {
				ep->skip = false;
2498 2499
				xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
					 slot_id, ep_index);
2500 2501 2502
			}
			goto cleanup;
		}
2503

2504 2505 2506
		/* We've skipped all the TDs on the ep ring when ep->skip set */
		if (ep->skip && td_num == 0) {
			ep->skip = false;
2507 2508
			xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n",
				 slot_id, ep_index);
2509 2510 2511
			goto cleanup;
		}

2512 2513
		td = list_first_entry(&ep_ring->td_list, struct xhci_td,
				      td_list);
2514 2515
		if (ep->skip)
			td_num--;
2516

2517
		/* Is this a TRB in the currently executing TD? */
2518 2519
		ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
				td->last_trb, ep_trb_dma, false);
A
Alex He 已提交
2520 2521 2522 2523 2524 2525 2526 2527 2528

		/*
		 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
		 * is not in the current TD pointed by ep_ring->dequeue because
		 * that the hardware dequeue pointer still at the previous TRB
		 * of the current TD. The previous TRB maybe a Link TD or the
		 * last TRB of the previous TD. The command completion handle
		 * will take care the rest.
		 */
2529 2530
		if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
			   trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
A
Alex He 已提交
2531 2532 2533
			goto cleanup;
		}

2534
		if (!ep_seg) {
2535 2536
			if (!ep->skip ||
			    !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2537 2538 2539 2540
				/* Some host controllers give a spurious
				 * successful event after a short transfer.
				 * Ignore it.
				 */
2541
				if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2542 2543 2544 2545
						ep_ring->last_td_was_short) {
					ep_ring->last_td_was_short = false;
					goto cleanup;
				}
2546 2547 2548
				/* HC is busted, give up! */
				xhci_err(xhci,
					"ERROR Transfer event TRB DMA ptr not "
2549 2550 2551 2552 2553
					"part of current TD ep_index %d "
					"comp_code %u\n", ep_index,
					trb_comp_code);
				trb_in_td(xhci, ep_ring->deq_seg,
					  ep_ring->dequeue, td->last_trb,
2554
					  ep_trb_dma, true);
2555 2556 2557
				return -ESHUTDOWN;
			}

2558
			skip_isoc_td(xhci, td, event, ep, &status);
2559 2560
			goto cleanup;
		}
2561
		if (trb_comp_code == COMP_SHORT_PACKET)
2562 2563 2564
			ep_ring->last_td_was_short = true;
		else
			ep_ring->last_td_was_short = false;
2565 2566

		if (ep->skip) {
2567 2568 2569
			xhci_dbg(xhci,
				 "Found td. Clear skip flag for slot %u ep %u.\n",
				 slot_id, ep_index);
2570 2571
			ep->skip = false;
		}
2572

2573 2574
		ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
						sizeof(*ep_trb)];
2575 2576 2577 2578

		trace_xhci_handle_transfer(ep_ring,
				(struct xhci_generic_trb *) ep_trb);

2579 2580
		/*
		 * No-op TRB should not trigger interrupts.
2581
		 * If ep_trb is a no-op TRB, it means the
2582 2583 2584
		 * corresponding TD has been cancelled. Just ignore
		 * the TD.
		 */
2585
		if (trb_is_noop(ep_trb)) {
2586 2587 2588
			xhci_dbg(xhci,
				 "ep_trb is a no-op TRB. Skip it for slot %u ep %u\n",
				 slot_id, ep_index);
2589
			goto cleanup;
2590
		}
2591

2592
		/* update the urb's actual_length and give back to the core */
2593
		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2594
			process_ctrl_td(xhci, td, ep_trb, event, ep, &status);
2595
		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2596
			process_isoc_td(xhci, td, ep_trb, event, ep, &status);
2597
		else
2598 2599
			process_bulk_intr_td(xhci, td, ep_trb, event, ep,
					     &status);
2600
cleanup:
2601
		handling_skipped_tds = ep->skip &&
2602 2603
			trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
			trb_comp_code != COMP_NO_PING_RESPONSE_ERROR;
2604

2605
		/*
2606 2607
		 * Do not update event ring dequeue pointer if we're in a loop
		 * processing missed tds.
2608
		 */
2609
		if (!handling_skipped_tds)
A
Andiry Xu 已提交
2610
			inc_deq(xhci, xhci->event_ring);
2611 2612 2613 2614 2615 2616 2617

	/*
	 * If ep->skip is set, it means there are missed tds on the
	 * endpoint ring need to take care of.
	 * Process them as short transfer until reach the td pointed by
	 * the event.
	 */
2618
	} while (handling_skipped_tds);
2619

2620
	return 0;
2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631

err_out:
	xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
		 (unsigned long long) xhci_trb_virt_to_dma(
			 xhci->event_ring->deq_seg,
			 xhci->event_ring->dequeue),
		 lower_32_bits(le64_to_cpu(event->buffer)),
		 upper_32_bits(le64_to_cpu(event->buffer)),
		 le32_to_cpu(event->transfer_len),
		 le32_to_cpu(event->flags));
	return -ENODEV;
2632 2633
}

S
Sarah Sharp 已提交
2634 2635 2636
/*
 * This function handles all OS-owned events on the event ring.  It may drop
 * xhci->lock between event processing (e.g. to pass up port status changes).
2637 2638
 * Returns >0 for "possibly more events to process" (caller should call again),
 * otherwise 0 if done.  In future, <0 returns should indicate error code.
S
Sarah Sharp 已提交
2639
 */
2640
static int xhci_handle_event(struct xhci_hcd *xhci)
2641 2642
{
	union xhci_trb *event;
S
Sarah Sharp 已提交
2643
	int update_ptrs = 1;
2644
	int ret;
2645

L
Lu Baolu 已提交
2646
	/* Event ring hasn't been allocated yet. */
2647
	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
L
Lu Baolu 已提交
2648 2649
		xhci_err(xhci, "ERROR event ring not ready\n");
		return -ENOMEM;
2650 2651 2652 2653
	}

	event = xhci->event_ring->dequeue;
	/* Does the HC or OS own the TRB? */
M
Matt Evans 已提交
2654
	if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
L
Lu Baolu 已提交
2655
	    xhci->event_ring->cycle_state)
2656
		return 0;
2657

2658 2659
	trace_xhci_handle_event(xhci->event_ring, &event->generic);

2660 2661 2662 2663 2664
	/*
	 * Barrier between reading the TRB_CYCLE (valid) flag above and any
	 * speculative reads of the event's flags/data below.
	 */
	rmb();
S
Sarah Sharp 已提交
2665
	/* FIXME: Handle more event types. */
L
Lu Baolu 已提交
2666
	switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
2667 2668 2669
	case TRB_TYPE(TRB_COMPLETION):
		handle_cmd_completion(xhci, &event->event_cmd);
		break;
S
Sarah Sharp 已提交
2670 2671 2672 2673
	case TRB_TYPE(TRB_PORT_STATUS):
		handle_port_status(xhci, event);
		update_ptrs = 0;
		break;
2674 2675
	case TRB_TYPE(TRB_TRANSFER):
		ret = handle_tx_event(xhci, &event->trans_event);
L
Lu Baolu 已提交
2676
		if (ret >= 0)
2677 2678
			update_ptrs = 0;
		break;
2679 2680 2681
	case TRB_TYPE(TRB_DEV_NOTE):
		handle_device_notification(xhci, event);
		break;
2682
	default:
M
Matt Evans 已提交
2683 2684
		if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
		    TRB_TYPE(48))
2685 2686
			handle_vendor_event(xhci, event);
		else
L
Lu Baolu 已提交
2687 2688 2689
			xhci_warn(xhci, "ERROR unknown event type %d\n",
				  TRB_FIELD_TO_TYPE(
				  le32_to_cpu(event->event_cmd.flags)));
2690
	}
2691 2692 2693 2694 2695 2696
	/* Any of the above functions may drop and re-acquire the lock, so check
	 * to make sure a watchdog timer didn't mark the host as non-responsive.
	 */
	if (xhci->xhc_state & XHCI_STATE_DYING) {
		xhci_dbg(xhci, "xHCI host dying, returning from "
				"event handler.\n");
2697
		return 0;
2698
	}
2699

2700 2701
	if (update_ptrs)
		/* Update SW event ring dequeue pointer */
A
Andiry Xu 已提交
2702
		inc_deq(xhci, xhci->event_ring);
2703

2704 2705 2706 2707
	/* Are there more items on the event ring?  Caller will call us again to
	 * check.
	 */
	return 1;
2708
}
2709 2710 2711 2712 2713 2714 2715 2716 2717

/*
 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
 * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
 * indicators of an event TRB error, but we check the status *first* to be safe.
 */
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2718
	union xhci_trb *event_ring_deq;
2719
	irqreturn_t ret = IRQ_NONE;
2720
	unsigned long flags;
2721
	dma_addr_t deq;
2722 2723
	u64 temp_64;
	u32 status;
2724

2725
	spin_lock_irqsave(&xhci->lock, flags);
2726
	/* Check if the xHC generated the interrupt, or the irq is shared */
2727
	status = readl(&xhci->op_regs->status);
2728 2729
	if (status == ~(u32)0) {
		xhci_hc_died(xhci);
2730 2731
		ret = IRQ_HANDLED;
		goto out;
2732
	}
2733 2734 2735 2736

	if (!(status & STS_EINT))
		goto out;

2737
	if (status & STS_FATAL) {
2738 2739
		xhci_warn(xhci, "WARNING: Host System Error\n");
		xhci_halt(xhci);
2740 2741
		ret = IRQ_HANDLED;
		goto out;
2742 2743
	}

2744 2745 2746 2747 2748
	/*
	 * Clear the op reg interrupt status first,
	 * so we can receive interrupts from other MSI-X interrupters.
	 * Write 1 to clear the interrupt status.
	 */
2749
	status |= STS_EINT;
2750
	writel(status, &xhci->op_regs->status);
2751

2752
	if (!hcd->msi_enabled) {
2753
		u32 irq_pending;
2754
		irq_pending = readl(&xhci->ir_set->irq_pending);
2755
		irq_pending |= IMAN_IP;
2756
		writel(irq_pending, &xhci->ir_set->irq_pending);
2757
	}
2758

2759 2760
	if (xhci->xhc_state & XHCI_STATE_DYING ||
	    xhci->xhc_state & XHCI_STATE_HALTED) {
2761 2762
		xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
				"Shouldn't IRQs be disabled?\n");
2763 2764
		/* Clear the event handler busy flag (RW1C);
		 * the event ring should be empty.
2765
		 */
2766
		temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2767 2768
		xhci_write_64(xhci, temp_64 | ERST_EHB,
				&xhci->ir_set->erst_dequeue);
2769 2770
		ret = IRQ_HANDLED;
		goto out;
2771 2772 2773 2774 2775 2776
	}

	event_ring_deq = xhci->event_ring->dequeue;
	/* FIXME this should be a delayed service routine
	 * that clears the EHB.
	 */
2777
	while (xhci_handle_event(xhci) > 0) {}
2778

2779
	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793
	/* If necessary, update the HW's version of the event ring deq ptr. */
	if (event_ring_deq != xhci->event_ring->dequeue) {
		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
				xhci->event_ring->dequeue);
		if (deq == 0)
			xhci_warn(xhci, "WARN something wrong with SW event "
					"ring dequeue ptr.\n");
		/* Update HC event ring dequeue pointer */
		temp_64 &= ERST_PTR_MASK;
		temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
	}

	/* Clear the event handler busy flag (RW1C); event ring is empty. */
	temp_64 |= ERST_EHB;
2794
	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2795
	ret = IRQ_HANDLED;
2796

2797
out:
2798
	spin_unlock_irqrestore(&xhci->lock, flags);
2799

2800
	return ret;
2801 2802
}

2803
irqreturn_t xhci_msi_irq(int irq, void *hcd)
2804
{
A
Alan Stern 已提交
2805
	return xhci_irq(hcd);
2806
}
2807

2808 2809
/****		Endpoint Ring Operations	****/

2810 2811 2812
/*
 * Generic function for queueing a TRB on a ring.
 * The caller must have checked to make sure there's room on the ring.
2813 2814 2815
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
2816 2817
 */
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
A
Andiry Xu 已提交
2818
		bool more_trbs_coming,
2819 2820 2821 2822 2823
		u32 field1, u32 field2, u32 field3, u32 field4)
{
	struct xhci_generic_trb *trb;

	trb = &ring->enqueue->generic;
M
Matt Evans 已提交
2824 2825 2826 2827
	trb->field[0] = cpu_to_le32(field1);
	trb->field[1] = cpu_to_le32(field2);
	trb->field[2] = cpu_to_le32(field3);
	trb->field[3] = cpu_to_le32(field4);
2828 2829 2830

	trace_xhci_queue_trb(ring, trb);

A
Andiry Xu 已提交
2831
	inc_enq(xhci, ring, more_trbs_coming);
2832 2833
}

2834 2835 2836 2837 2838
/*
 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
 * FIXME allocate segments if the ring is full.
 */
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
A
Andiry Xu 已提交
2839
		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2840
{
A
Andiry Xu 已提交
2841 2842
	unsigned int num_trbs_needed;

2843 2844 2845 2846 2847 2848 2849 2850 2851 2852
	/* Make sure the endpoint has been added to xHC schedule */
	switch (ep_state) {
	case EP_STATE_DISABLED:
		/*
		 * USB core changed config/interfaces without notifying us,
		 * or hardware is reporting the wrong state.
		 */
		xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
		return -ENOENT;
	case EP_STATE_ERROR:
2853
		xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2854 2855 2856
		/* FIXME event handling code for error needs to clear it */
		/* XXX not sure if this should be -ENOENT or not */
		return -EINVAL;
2857 2858
	case EP_STATE_HALTED:
		xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869
	case EP_STATE_STOPPED:
	case EP_STATE_RUNNING:
		break;
	default:
		xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
		/*
		 * FIXME issue Configure Endpoint command to try to get the HC
		 * back into a known state.
		 */
		return -EINVAL;
	}
A
Andiry Xu 已提交
2870 2871

	while (1) {
2872 2873
		if (room_on_ring(xhci, ep_ring, num_trbs))
			break;
A
Andiry Xu 已提交
2874 2875 2876 2877 2878 2879

		if (ep_ring == xhci->cmd_ring) {
			xhci_err(xhci, "Do not support expand command ring\n");
			return -ENOMEM;
		}

2880 2881
		xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
				"ERROR no room on ep ring, try ring expansion");
A
Andiry Xu 已提交
2882 2883 2884 2885 2886 2887
		num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
		if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
					mem_flags)) {
			xhci_err(xhci, "Ring expansion failed\n");
			return -ENOMEM;
		}
2888
	}
2889

2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901
	while (trb_is_link(ep_ring->enqueue)) {
		/* If we're not dealing with 0.95 hardware or isoc rings
		 * on AMD 0.96 host, clear the chain bit.
		 */
		if (!xhci_link_trb_quirk(xhci) &&
		    !(ep_ring->type == TYPE_ISOC &&
		      (xhci->quirks & XHCI_AMD_0x96_HOST)))
			ep_ring->enqueue->link.control &=
				cpu_to_le32(~TRB_CHAIN);
		else
			ep_ring->enqueue->link.control |=
				cpu_to_le32(TRB_CHAIN);
2902

2903 2904
		wmb();
		ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
2905

2906 2907 2908
		/* Toggle the cycle bit after the last ring segment. */
		if (link_trb_toggles_cycle(ep_ring->enqueue))
			ep_ring->cycle_state ^= 1;
2909

2910 2911
		ep_ring->enq_seg = ep_ring->enq_seg->next;
		ep_ring->enqueue = ep_ring->enq_seg->trbs;
2912
	}
2913 2914 2915
	return 0;
}

2916
static int prepare_transfer(struct xhci_hcd *xhci,
2917 2918
		struct xhci_virt_device *xdev,
		unsigned int ep_index,
2919
		unsigned int stream_id,
2920 2921
		unsigned int num_trbs,
		struct urb *urb,
2922
		unsigned int td_index,
2923 2924 2925
		gfp_t mem_flags)
{
	int ret;
2926 2927
	struct urb_priv *urb_priv;
	struct xhci_td	*td;
2928
	struct xhci_ring *ep_ring;
2929
	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2930 2931 2932 2933 2934 2935 2936 2937

	ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
	if (!ep_ring) {
		xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
				stream_id);
		return -EINVAL;
	}

2938
	ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
A
Andiry Xu 已提交
2939
			   num_trbs, mem_flags);
2940 2941 2942
	if (ret)
		return ret;

2943
	urb_priv = urb->hcpriv;
2944
	td = &urb_priv->td[td_index];
2945 2946 2947 2948 2949

	INIT_LIST_HEAD(&td->td_list);
	INIT_LIST_HEAD(&td->cancelled_td_list);

	if (td_index == 0) {
2950
		ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2951
		if (unlikely(ret))
2952
			return ret;
2953 2954
	}

2955
	td->urb = urb;
2956
	/* Add this TD to the tail of the endpoint ring's TD list */
2957 2958 2959 2960
	list_add_tail(&td->td_list, &ep_ring->td_list);
	td->start_seg = ep_ring->enq_seg;
	td->first_trb = ep_ring->enqueue;

2961 2962 2963
	return 0;
}

2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981
static unsigned int count_trbs(u64 addr, u64 len)
{
	unsigned int num_trbs;

	num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
			TRB_MAX_BUFF_SIZE);
	if (num_trbs == 0)
		num_trbs++;

	return num_trbs;
}

static inline unsigned int count_trbs_needed(struct urb *urb)
{
	return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
}

static unsigned int count_sg_trbs_needed(struct urb *urb)
2982 2983
{
	struct scatterlist *sg;
2984
	unsigned int i, len, full_len, num_trbs = 0;
2985

2986
	full_len = urb->transfer_buffer_length;
2987

2988 2989 2990 2991 2992 2993
	for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
		len = sg_dma_len(sg);
		num_trbs += count_trbs(sg_dma_address(sg), len);
		len = min_t(unsigned int, len, full_len);
		full_len -= len;
		if (full_len == 0)
2994 2995
			break;
	}
2996

2997 2998 2999
	return num_trbs;
}

3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010
static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
{
	u64 addr, len;

	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
	len = urb->iso_frame_desc[i].length;

	return count_trbs(addr, len);
}

static void check_trb_math(struct urb *urb, int running_total)
3011
{
3012
	if (unlikely(running_total != urb->transfer_buffer_length))
3013
		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
3014 3015 3016 3017 3018 3019 3020 3021
				"queued %#x (%d), asked for %#x (%d)\n",
				__func__,
				urb->ep->desc.bEndpointAddress,
				running_total, running_total,
				urb->transfer_buffer_length,
				urb->transfer_buffer_length);
}

3022
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3023
		unsigned int ep_index, unsigned int stream_id, int start_cycle,
3024
		struct xhci_generic_trb *start_trb)
3025 3026 3027 3028 3029 3030
{
	/*
	 * Pass all the TRBs to the hardware at once and make sure this write
	 * isn't reordered.
	 */
	wmb();
3031
	if (start_cycle)
M
Matt Evans 已提交
3032
		start_trb->field[3] |= cpu_to_le32(start_cycle);
3033
	else
M
Matt Evans 已提交
3034
		start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3035
	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3036 3037
}

3038 3039
static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
						struct xhci_ep_ctx *ep_ctx)
3040 3041 3042 3043
{
	int xhci_interval;
	int ep_interval;

M
Matt Evans 已提交
3044
	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3045
	ep_interval = urb->interval;
3046

3047 3048 3049 3050
	/* Convert to microframes */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		ep_interval *= 8;
3051

3052 3053 3054 3055
	/* FIXME change this to a warning and a suggestion to use the new API
	 * to set the polling interval (once the API is added).
	 */
	if (xhci_interval != ep_interval) {
3056 3057 3058 3059
		dev_dbg_ratelimited(&urb->dev->dev,
				"Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
				ep_interval, ep_interval == 1 ? "" : "s",
				xhci_interval, xhci_interval == 1 ? "" : "s");
3060 3061 3062 3063 3064 3065
		urb->interval = xhci_interval;
		/* Convert back to frames for LS/FS devices */
		if (urb->dev->speed == USB_SPEED_LOW ||
				urb->dev->speed == USB_SPEED_FULL)
			urb->interval /= 8;
	}
3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081
}

/*
 * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
 * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
 * (comprised of sg list entries) can take several service intervals to
 * transmit.
 */
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ep_ctx *ep_ctx;

	ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
	check_interval(xhci, urb, ep_ctx);

3082
	return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3083 3084
}

3085
/*
3086 3087
 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
 * packets remaining in the TD (*not* including this TRB).
3088 3089
 *
 * Total TD packet count = total_packet_count =
3090
 *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
3091 3092 3093 3094 3095 3096
 *
 * Packets transferred up to and including this TRB = packets_transferred =
 *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
 *
 * TD size = total_packet_count - packets_transferred
 *
3097 3098 3099 3100 3101 3102
 * For xHCI 0.96 and older, TD size field should be the remaining bytes
 * including this TRB, right shifted by 10
 *
 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
 * This is taken care of in the TRB_TD_SIZE() macro
 *
3103
 * The last TRB in a TD must have the TD size set to zero.
3104
 */
3105 3106
static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
			      int trb_buff_len, unsigned int td_total_len,
3107
			      struct urb *urb, bool more_trbs_coming)
3108
{
3109 3110
	u32 maxp, total_packet_count;

3111 3112
	/* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
	if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3113 3114
		return ((td_total_len - transferred) >> 10);

3115
	/* One TRB with a zero-length data packet. */
3116
	if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
3117
	    trb_buff_len == td_total_len)
3118 3119
		return 0;

3120 3121 3122 3123
	/* for MTK xHCI, TD size doesn't include this TRB */
	if (xhci->quirks & XHCI_MTK_HOST)
		trb_buff_len = 0;

3124
	maxp = usb_endpoint_maxp(&urb->ep->desc);
3125 3126
	total_packet_count = DIV_ROUND_UP(td_total_len, maxp);

3127 3128
	/* Queueing functions don't count the current TRB into transferred */
	return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3129 3130
}

3131

3132
static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
3133
			 u32 *trb_buff_len, struct xhci_segment *seg)
3134
{
3135
	struct device *dev = xhci_to_hcd(xhci)->self.controller;
3136 3137
	unsigned int unalign;
	unsigned int max_pkt;
3138
	u32 new_buff_len;
3139

3140
	max_pkt = usb_endpoint_maxp(&urb->ep->desc);
3141 3142 3143 3144 3145 3146
	unalign = (enqd_len + *trb_buff_len) % max_pkt;

	/* we got lucky, last normal TRB data on segment is packet aligned */
	if (unalign == 0)
		return 0;

3147 3148 3149
	xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
		 unalign, *trb_buff_len);

3150 3151 3152
	/* is the last nornal TRB alignable by splitting it */
	if (*trb_buff_len > unalign) {
		*trb_buff_len -= unalign;
3153
		xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
3154 3155
		return 0;
	}
3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188

	/*
	 * We want enqd_len + trb_buff_len to sum up to a number aligned to
	 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
	 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
	 */
	new_buff_len = max_pkt - (enqd_len % max_pkt);

	if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
		new_buff_len = (urb->transfer_buffer_length - enqd_len);

	/* create a max max_pkt sized bounce buffer pointed to by last trb */
	if (usb_urb_dir_out(urb)) {
		sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
				   seg->bounce_buf, new_buff_len, enqd_len);
		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
						 max_pkt, DMA_TO_DEVICE);
	} else {
		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
						 max_pkt, DMA_FROM_DEVICE);
	}

	if (dma_mapping_error(dev, seg->bounce_dma)) {
		/* try without aligning. Some host controllers survive */
		xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
		return 0;
	}
	*trb_buff_len = new_buff_len;
	seg->bounce_len = new_buff_len;
	seg->bounce_offs = enqd_len;

	xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);

3189 3190 3191
	return 1;
}

3192 3193
/* This is very similar to what ehci-q.c qtd_fill() does */
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3194 3195
		struct urb *urb, int slot_id, unsigned int ep_index)
{
3196
	struct xhci_ring *ring;
3197
	struct urb_priv *urb_priv;
3198
	struct xhci_td *td;
3199 3200
	struct xhci_generic_trb *start_trb;
	struct scatterlist *sg = NULL;
3201 3202
	bool more_trbs_coming = true;
	bool need_zero_pkt = false;
3203 3204
	bool first_trb = true;
	unsigned int num_trbs;
3205
	unsigned int start_cycle, num_sgs = 0;
3206
	unsigned int enqd_len, block_len, trb_buff_len, full_len;
3207
	int sent_len, ret;
3208
	u32 field, length_field, remainder;
3209
	u64 addr, send_addr;
3210

3211 3212
	ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ring)
3213 3214
		return -EINVAL;

3215
	full_len = urb->transfer_buffer_length;
3216 3217 3218 3219
	/* If we have scatter/gather list, we use it. */
	if (urb->num_sgs) {
		num_sgs = urb->num_mapped_sgs;
		sg = urb->sg;
3220 3221
		addr = (u64) sg_dma_address(sg);
		block_len = sg_dma_len(sg);
3222
		num_trbs = count_sg_trbs_needed(urb);
3223
	} else {
3224
		num_trbs = count_trbs_needed(urb);
3225 3226 3227
		addr = (u64) urb->transfer_dma;
		block_len = full_len;
	}
3228
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
3229
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
3230
			num_trbs, urb, 0, mem_flags);
3231
	if (unlikely(ret < 0))
3232
		return ret;
3233 3234

	urb_priv = urb->hcpriv;
3235 3236

	/* Deal with URB_ZERO_PACKET - need one more td/trb */
3237
	if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1)
3238
		need_zero_pkt = true;
3239

3240
	td = &urb_priv->td[0];
3241

3242 3243 3244 3245 3246
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
3247 3248
	start_trb = &ring->enqueue->generic;
	start_cycle = ring->cycle_state;
3249
	send_addr = addr;
3250

3251
	/* Queue the TRBs, even if they are zero-length */
3252 3253
	for (enqd_len = 0; first_trb || enqd_len < full_len;
			enqd_len += trb_buff_len) {
3254
		field = TRB_TYPE(TRB_NORMAL);
3255

3256 3257 3258
		/* TRB buffer should not cross 64KB boundaries */
		trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
		trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
3259

3260 3261
		if (enqd_len + trb_buff_len > full_len)
			trb_buff_len = full_len - enqd_len;
S
Sarah Sharp 已提交
3262 3263

		/* Don't change the cycle bit of the first TRB until later */
3264 3265
		if (first_trb) {
			first_trb = false;
3266
			if (start_cycle == 0)
3267
				field |= TRB_CYCLE;
3268
		} else
3269
			field |= ring->cycle_state;
S
Sarah Sharp 已提交
3270 3271 3272 3273

		/* Chain all the TRBs together; clear the chain bit in the last
		 * TRB to indicate it's the last TRB in the chain.
		 */
3274
		if (enqd_len + trb_buff_len < full_len) {
S
Sarah Sharp 已提交
3275
			field |= TRB_CHAIN;
3276
			if (trb_is_link(ring->enqueue + 1)) {
3277
				if (xhci_align_td(xhci, urb, enqd_len,
3278 3279 3280 3281 3282 3283
						  &trb_buff_len,
						  ring->enq_seg)) {
					send_addr = ring->enq_seg->bounce_dma;
					/* assuming TD won't span 2 segs */
					td->bounce_seg = ring->enq_seg;
				}
3284
			}
3285 3286 3287
		}
		if (enqd_len + trb_buff_len >= full_len) {
			field &= ~TRB_CHAIN;
3288
			field |= TRB_IOC;
3289
			more_trbs_coming = false;
3290
			td->last_trb = ring->enqueue;
S
Sarah Sharp 已提交
3291
		}
3292 3293 3294 3295 3296

		/* Only set interrupt on short packet for IN endpoints */
		if (usb_urb_dir_in(urb))
			field |= TRB_ISP;

3297
		/* Set the TRB length, TD size, and interrupter fields. */
3298 3299 3300
		remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
					      full_len, urb, more_trbs_coming);

3301
		length_field = TRB_LEN(trb_buff_len) |
3302
			TRB_TD_SIZE(remainder) |
3303
			TRB_INTR_TARGET(0);
3304

3305
		queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
3306 3307
				lower_32_bits(send_addr),
				upper_32_bits(send_addr),
3308
				length_field,
3309
				field);
S
Sarah Sharp 已提交
3310 3311

		addr += trb_buff_len;
3312
		sent_len = trb_buff_len;
3313

3314
		while (sg && sent_len >= block_len) {
3315 3316
			/* New sg entry */
			--num_sgs;
3317
			sent_len -= block_len;
3318
			if (num_sgs != 0) {
3319
				sg = sg_next(sg);
3320 3321
				block_len = sg_dma_len(sg);
				addr = (u64) sg_dma_address(sg);
3322
				addr += sent_len;
3323 3324
			}
		}
3325 3326
		block_len -= sent_len;
		send_addr = addr;
3327
	}
S
Sarah Sharp 已提交
3328

3329 3330 3331 3332
	if (need_zero_pkt) {
		ret = prepare_transfer(xhci, xhci->devs[slot_id],
				       ep_index, urb->stream_id,
				       1, urb, 1, mem_flags);
3333
		urb_priv->td[1].last_trb = ring->enqueue;
3334 3335 3336 3337
		field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
		queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
	}

3338
	check_trb_math(urb, enqd_len);
3339
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3340
			start_cycle, start_trb);
S
Sarah Sharp 已提交
3341 3342 3343
	return 0;
}

3344
/* Caller must have locked xhci->lock */
3345
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3346 3347 3348 3349 3350 3351 3352 3353
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	int num_trbs;
	int ret;
	struct usb_ctrlrequest *setup;
	struct xhci_generic_trb *start_trb;
	int start_cycle;
3354
	u32 field;
3355
	struct urb_priv *urb_priv;
3356 3357
	struct xhci_td *td;

3358 3359 3360
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;
3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377

	/*
	 * Need to copy setup packet into setup TRB, so we can't use the setup
	 * DMA address.
	 */
	if (!urb->setup_packet)
		return -EINVAL;

	/* 1 TRB for setup, 1 for status */
	num_trbs = 2;
	/*
	 * Don't need to check if we need additional event data and normal TRBs,
	 * since data in control transfers will never get bigger than 16MB
	 * XXX: can we get a buffer that crosses 64KB boundaries?
	 */
	if (urb->transfer_buffer_length > 0)
		num_trbs++;
3378 3379
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
3380
			num_trbs, urb, 0, mem_flags);
3381 3382 3383
	if (ret < 0)
		return ret;

3384
	urb_priv = urb->hcpriv;
3385
	td = &urb_priv->td[0];
3386

3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	/* Queue setup TRB - see section 6.4.1.2.1 */
	/* FIXME better way to translate setup_packet into two u32 fields? */
	setup = (struct usb_ctrlrequest *) urb->setup_packet;
3398 3399 3400 3401
	field = 0;
	field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
	if (start_cycle == 0)
		field |= 0x1;
3402

3403
	/* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
3404
	if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
3405 3406 3407 3408 3409 3410 3411 3412
		if (urb->transfer_buffer_length > 0) {
			if (setup->bRequestType & USB_DIR_IN)
				field |= TRB_TX_TYPE(TRB_DATA_IN);
			else
				field |= TRB_TX_TYPE(TRB_DATA_OUT);
		}
	}

A
Andiry Xu 已提交
3413
	queue_trb(xhci, ep_ring, true,
M
Matt Evans 已提交
3414 3415 3416 3417 3418
		  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
		  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
		  TRB_LEN(8) | TRB_INTR_TARGET(0),
		  /* Immediate data in pointer */
		  field);
3419 3420

	/* If there's data, queue data TRBs */
3421 3422 3423 3424 3425 3426
	/* Only set interrupt on short packet for IN endpoints */
	if (usb_urb_dir_in(urb))
		field = TRB_ISP | TRB_TYPE(TRB_DATA);
	else
		field = TRB_TYPE(TRB_DATA);

3427
	if (urb->transfer_buffer_length > 0) {
3428 3429 3430 3431 3432 3433 3434 3435 3436
		u32 length_field, remainder;

		remainder = xhci_td_remainder(xhci, 0,
				urb->transfer_buffer_length,
				urb->transfer_buffer_length,
				urb, 1);
		length_field = TRB_LEN(urb->transfer_buffer_length) |
				TRB_TD_SIZE(remainder) |
				TRB_INTR_TARGET(0);
3437 3438
		if (setup->bRequestType & USB_DIR_IN)
			field |= TRB_DIR_IN;
A
Andiry Xu 已提交
3439
		queue_trb(xhci, ep_ring, true,
3440 3441
				lower_32_bits(urb->transfer_dma),
				upper_32_bits(urb->transfer_dma),
3442
				length_field,
3443
				field | ep_ring->cycle_state);
3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454
	}

	/* Save the DMA address of the last TRB in the TD */
	td->last_trb = ep_ring->enqueue;

	/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
	/* If the device sent data, the status stage is an OUT transfer */
	if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
		field = 0;
	else
		field = TRB_DIR_IN;
A
Andiry Xu 已提交
3455
	queue_trb(xhci, ep_ring, false,
3456 3457 3458 3459 3460 3461
			0,
			0,
			TRB_INTR_TARGET(0),
			/* Event on completion */
			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);

3462
	giveback_first_trb(xhci, slot_id, ep_index, 0,
3463
			start_cycle, start_trb);
3464 3465 3466
	return 0;
}

3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479
/*
 * The transfer burst count field of the isochronous TRB defines the number of
 * bursts that are required to move all packets in this TD.  Only SuperSpeed
 * devices can burst up to bMaxBurst number of packets per service interval.
 * This field is zero based, meaning a value of zero in the field means one
 * burst.  Basically, for everything but SuperSpeed devices, this field will be
 * zero.  Only xHCI 1.0 host controllers support this field.
 */
static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;

3480
	if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
3481 3482 3483
		return 0;

	max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3484
	return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3485 3486
}

3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503
/*
 * Returns the number of packets in the last "burst" of packets.  This field is
 * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
 * the last burst packet count is equal to the total number of packets in the
 * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
 * must contain (bMaxBurst + 1) number of packets, but the last burst can
 * contain 1 to (bMaxBurst + 1) packets.
 */
static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;
	unsigned int residue;

	if (xhci->hci_version < 0x100)
		return 0;

3504
	if (urb->dev->speed >= USB_SPEED_SUPER) {
3505 3506 3507 3508 3509 3510 3511 3512 3513 3514
		/* bMaxBurst is zero based: 0 means 1 packet per burst */
		max_burst = urb->ep->ss_ep_comp.bMaxBurst;
		residue = total_packet_count % (max_burst + 1);
		/* If residue is zero, the last burst contains (max_burst + 1)
		 * number of packets, but the TLBPC field is zero-based.
		 */
		if (residue == 0)
			return max_burst;
		return residue - 1;
	}
3515 3516 3517
	if (total_packet_count == 0)
		return 0;
	return total_packet_count - 1;
3518 3519
}

3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610
/*
 * Calculates Frame ID field of the isochronous TRB identifies the
 * target frame that the Interval associated with this Isochronous
 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
 *
 * Returns actual frame id on success, negative value on error.
 */
static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
		struct urb *urb, int index)
{
	int start_frame, ist, ret = 0;
	int start_frame_id, end_frame_id, current_frame_id;

	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		start_frame = urb->start_frame + index * urb->interval;
	else
		start_frame = (urb->start_frame + index * urb->interval) >> 3;

	/* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
	 *
	 * If bit [3] of IST is cleared to '0', software can add a TRB no
	 * later than IST[2:0] Microframes before that TRB is scheduled to
	 * be executed.
	 * If bit [3] of IST is set to '1', software can add a TRB no later
	 * than IST[2:0] Frames before that TRB is scheduled to be executed.
	 */
	ist = HCS_IST(xhci->hcs_params2) & 0x7;
	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
		ist <<= 3;

	/* Software shall not schedule an Isoch TD with a Frame ID value that
	 * is less than the Start Frame ID or greater than the End Frame ID,
	 * where:
	 *
	 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
	 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
	 *
	 * Both the End Frame ID and Start Frame ID values are calculated
	 * in microframes. When software determines the valid Frame ID value;
	 * The End Frame ID value should be rounded down to the nearest Frame
	 * boundary, and the Start Frame ID value should be rounded up to the
	 * nearest Frame boundary.
	 */
	current_frame_id = readl(&xhci->run_regs->microframe_index);
	start_frame_id = roundup(current_frame_id + ist + 1, 8);
	end_frame_id = rounddown(current_frame_id + 895 * 8, 8);

	start_frame &= 0x7ff;
	start_frame_id = (start_frame_id >> 3) & 0x7ff;
	end_frame_id = (end_frame_id >> 3) & 0x7ff;

	xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
		 __func__, index, readl(&xhci->run_regs->microframe_index),
		 start_frame_id, end_frame_id, start_frame);

	if (start_frame_id < end_frame_id) {
		if (start_frame > end_frame_id ||
				start_frame < start_frame_id)
			ret = -EINVAL;
	} else if (start_frame_id > end_frame_id) {
		if ((start_frame > end_frame_id &&
				start_frame < start_frame_id))
			ret = -EINVAL;
	} else {
			ret = -EINVAL;
	}

	if (index == 0) {
		if (ret == -EINVAL || start_frame == start_frame_id) {
			start_frame = start_frame_id + 1;
			if (urb->dev->speed == USB_SPEED_LOW ||
					urb->dev->speed == USB_SPEED_FULL)
				urb->start_frame = start_frame;
			else
				urb->start_frame = start_frame << 3;
			ret = 0;
		}
	}

	if (ret) {
		xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
				start_frame, current_frame_id, index,
				start_frame_id, end_frame_id);
		xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
		return ret;
	}

	return start_frame;
}

3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625
/* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct xhci_td *td;
	int num_tds, trbs_per_td;
	struct xhci_generic_trb *start_trb;
	bool first_trb;
	int start_cycle;
	u32 field, length_field;
	int running_total, trb_buff_len, td_len, td_remain_len, ret;
	u64 start_addr, addr;
	int i, j;
A
Andiry Xu 已提交
3626
	bool more_trbs_coming;
3627
	struct xhci_virt_ep *xep;
3628
	int frame_id;
3629

3630
	xep = &xhci->devs[slot_id]->eps[ep_index];
3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641
	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;

	num_tds = urb->number_of_packets;
	if (num_tds < 1) {
		xhci_dbg(xhci, "Isoc URB with zero packets?\n");
		return -EINVAL;
	}
	start_addr = (u64) urb->transfer_dma;
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

3642
	urb_priv = urb->hcpriv;
3643
	/* Queue the TRBs for each TD, even if they are zero-length */
3644
	for (i = 0; i < num_tds; i++) {
3645 3646 3647
		unsigned int total_pkt_count, max_pkt;
		unsigned int burst_count, last_burst_pkt_count;
		u32 sia_frame_id;
3648

3649
		first_trb = true;
3650 3651 3652 3653
		running_total = 0;
		addr = start_addr + urb->iso_frame_desc[i].offset;
		td_len = urb->iso_frame_desc[i].length;
		td_remain_len = td_len;
3654
		max_pkt = usb_endpoint_maxp(&urb->ep->desc);
3655 3656
		total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);

3657
		/* A zero-length transfer still involves at least one packet. */
3658 3659 3660 3661 3662
		if (total_pkt_count == 0)
			total_pkt_count++;
		burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
		last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
							urb, total_pkt_count);
3663

3664
		trbs_per_td = count_isoc_trbs_needed(urb, i);
3665 3666

		ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
A
Andiry Xu 已提交
3667
				urb->stream_id, trbs_per_td, urb, i, mem_flags);
3668 3669 3670 3671 3672
		if (ret < 0) {
			if (i == 0)
				return ret;
			goto cleanup;
		}
3673
		td = &urb_priv->td[i];
3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687

		/* use SIA as default, if frame id is used overwrite it */
		sia_frame_id = TRB_SIA;
		if (!(urb->transfer_flags & URB_ISO_ASAP) &&
		    HCC_CFC(xhci->hcc_params)) {
			frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
			if (frame_id >= 0)
				sia_frame_id = TRB_FRAME_ID(frame_id);
		}
		/*
		 * Set isoc specific data for the first TRB in a TD.
		 * Prevent HW from getting the TRBs by keeping the cycle state
		 * inverted in the first TDs isoc TRB.
		 */
3688
		field = TRB_TYPE(TRB_ISOC) |
3689 3690 3691 3692
			TRB_TLBPC(last_burst_pkt_count) |
			sia_frame_id |
			(i ? ep_ring->cycle_state : !start_cycle);

3693 3694 3695 3696
		/* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
		if (!xep->use_extended_tbc)
			field |= TRB_TBC(burst_count);

3697
		/* fill the rest of the TRB fields, and remaining normal TRBs */
3698 3699
		for (j = 0; j < trbs_per_td; j++) {
			u32 remainder = 0;
3700 3701 3702 3703 3704

			/* only first TRB is isoc, overwrite otherwise */
			if (!first_trb)
				field = TRB_TYPE(TRB_NORMAL) |
					ep_ring->cycle_state;
3705

3706 3707 3708 3709
			/* Only set interrupt on short packet for IN EPs */
			if (usb_urb_dir_in(urb))
				field |= TRB_ISP;

3710
			/* Set the chain bit for all except the last TRB  */
3711
			if (j < trbs_per_td - 1) {
A
Andiry Xu 已提交
3712
				more_trbs_coming = true;
3713
				field |= TRB_CHAIN;
3714
			} else {
3715
				more_trbs_coming = false;
3716 3717
				td->last_trb = ep_ring->enqueue;
				field |= TRB_IOC;
3718 3719 3720 3721 3722
				/* set BEI, except for the last TD */
				if (xhci->hci_version >= 0x100 &&
				    !(xhci->quirks & XHCI_AVOID_BEI) &&
				    i < num_tds - 1)
					field |= TRB_BEI;
3723 3724
			}
			/* Calculate TRB length */
3725
			trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3726 3727 3728
			if (trb_buff_len > td_remain_len)
				trb_buff_len = td_remain_len;

3729
			/* Set the TRB length, TD size, & interrupter fields. */
3730 3731
			remainder = xhci_td_remainder(xhci, running_total,
						   trb_buff_len, td_len,
3732
						   urb, more_trbs_coming);
3733

3734 3735
			length_field = TRB_LEN(trb_buff_len) |
				TRB_INTR_TARGET(0);
3736

3737 3738 3739 3740 3741 3742 3743
			/* xhci 1.1 with ETE uses TD Size field for TBC */
			if (first_trb && xep->use_extended_tbc)
				length_field |= TRB_TD_SIZE_TBC(burst_count);
			else
				length_field |= TRB_TD_SIZE(remainder);
			first_trb = false;

A
Andiry Xu 已提交
3744
			queue_trb(xhci, ep_ring, more_trbs_coming,
3745 3746 3747
				lower_32_bits(addr),
				upper_32_bits(addr),
				length_field,
3748
				field);
3749 3750 3751 3752 3753 3754 3755 3756 3757
			running_total += trb_buff_len;

			addr += trb_buff_len;
			td_remain_len -= trb_buff_len;
		}

		/* Check TD length */
		if (running_total != td_len) {
			xhci_err(xhci, "ISOC TD length unmatch\n");
3758 3759
			ret = -EINVAL;
			goto cleanup;
3760 3761 3762
		}
	}

3763 3764 3765 3766
	/* store the next frame id */
	if (HCC_CFC(xhci->hcc_params))
		xep->next_frame_id = urb->start_frame + num_tds * urb->interval;

A
Andiry Xu 已提交
3767 3768 3769 3770 3771 3772
	if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
		if (xhci->quirks & XHCI_AMD_PLL_FIX)
			usb_amd_quirk_pll_disable();
	}
	xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;

3773 3774
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
			start_cycle, start_trb);
3775
	return 0;
3776 3777 3778 3779
cleanup:
	/* Clean up a partially enqueued isoc transfer. */

	for (i--; i >= 0; i--)
3780
		list_del_init(&urb_priv->td[i].td_list);
3781 3782 3783 3784 3785 3786

	/* Use the first TD as a temporary variable to turn the TDs we've queued
	 * into No-ops with a software-owned cycle bit. That way the hardware
	 * won't accidentally start executing bogus TDs when we partially
	 * overwrite them.  td->first_trb and td->start_seg are already set.
	 */
3787
	urb_priv->td[0].last_trb = ep_ring->enqueue;
3788
	/* Every TRB except the first & last will have its cycle bit flipped. */
3789
	td_to_noop(xhci, ep_ring, &urb_priv->td[0], true);
3790 3791

	/* Reset the ring enqueue back to the first TRB and its cycle bit. */
3792 3793
	ep_ring->enqueue = urb_priv->td[0].first_trb;
	ep_ring->enq_seg = urb_priv->td[0].start_seg;
3794
	ep_ring->cycle_state = start_cycle;
3795
	ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3796 3797
	usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
	return ret;
3798 3799 3800 3801 3802
}

/*
 * Check transfer ring to guarantee there is enough room for the urb.
 * Update ISO URB start_frame and interval.
3803 3804 3805
 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
 * Contiguous Frame ID is not supported by HC.
3806 3807 3808 3809 3810 3811 3812 3813 3814 3815
 */
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	struct xhci_ep_ctx *ep_ctx;
	int start_frame;
	int num_tds, num_trbs, i;
	int ret;
3816 3817
	struct xhci_virt_ep *xep;
	int ist;
3818 3819

	xdev = xhci->devs[slot_id];
3820
	xep = &xhci->devs[slot_id]->eps[ep_index];
3821 3822 3823 3824 3825 3826
	ep_ring = xdev->eps[ep_index].ring;
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);

	num_trbs = 0;
	num_tds = urb->number_of_packets;
	for (i = 0; i < num_tds; i++)
3827
		num_trbs += count_isoc_trbs_needed(urb, i);
3828 3829 3830 3831

	/* Check the ring to guarantee there is enough room for the whole urb.
	 * Do not insert any td of the urb to the ring if the check failed.
	 */
3832
	ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
A
Andiry Xu 已提交
3833
			   num_trbs, mem_flags);
3834 3835 3836
	if (ret)
		return ret;

3837 3838 3839 3840
	/*
	 * Check interval value. This should be done before we start to
	 * calculate the start frame value.
	 */
3841
	check_interval(xhci, urb, ep_ctx);
3842 3843

	/* Calculate the start frame and put it in urb->start_frame. */
L
Lu Baolu 已提交
3844
	if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
3845
		if (GET_EP_CTX_STATE(ep_ctx) ==	EP_STATE_RUNNING) {
L
Lu Baolu 已提交
3846 3847 3848
			urb->start_frame = xep->next_frame_id;
			goto skip_start_over;
		}
3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876
	}

	start_frame = readl(&xhci->run_regs->microframe_index);
	start_frame &= 0x3fff;
	/*
	 * Round up to the next frame and consider the time before trb really
	 * gets scheduled by hardare.
	 */
	ist = HCS_IST(xhci->hcs_params2) & 0x7;
	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
		ist <<= 3;
	start_frame += ist + XHCI_CFC_DELAY;
	start_frame = roundup(start_frame, 8);

	/*
	 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
	 * is greate than 8 microframes.
	 */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL) {
		start_frame = roundup(start_frame, urb->interval << 3);
		urb->start_frame = start_frame >> 3;
	} else {
		start_frame = roundup(start_frame, urb->interval);
		urb->start_frame = start_frame;
	}

skip_start_over:
3877 3878
	ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;

3879
	return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
3880 3881
}

3882 3883
/****		Command Ring Operations		****/

3884 3885 3886 3887 3888 3889 3890 3891
/* Generic function for queueing a command TRB on the command ring.
 * Check to make sure there's room on the command ring for one command TRB.
 * Also check that there's room reserved for commands that must not fail.
 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
 * then only check for the number of reserved spots.
 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
 * because the command event handler may want to resubmit a failed command.
 */
3892 3893 3894
static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
			 u32 field1, u32 field2,
			 u32 field3, u32 field4, bool command_must_succeed)
3895
{
3896
	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3897
	int ret;
3898

3899 3900
	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
		(xhci->xhc_state & XHCI_STATE_HALTED)) {
3901
		xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
M
Mathias Nyman 已提交
3902
		return -ESHUTDOWN;
3903
	}
3904

3905 3906 3907
	if (!command_must_succeed)
		reserved_trbs++;

3908
	ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
A
Andiry Xu 已提交
3909
			reserved_trbs, GFP_ATOMIC);
3910 3911
	if (ret < 0) {
		xhci_err(xhci, "ERR: No room for command on command ring\n");
3912 3913 3914
		if (command_must_succeed)
			xhci_err(xhci, "ERR: Reserved TRB counting for "
					"unfailable commands failed.\n");
3915
		return ret;
3916
	}
M
Mathias Nyman 已提交
3917 3918

	cmd->command_trb = xhci->cmd_ring->enqueue;
3919

3920
	/* if there are no other commands queued we start the timeout timer */
3921
	if (list_empty(&xhci->cmd_list)) {
3922
		xhci->current_cmd = cmd;
3923
		xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
3924 3925
	}

3926 3927
	list_add_tail(&cmd->cmd_list, &xhci->cmd_list);

A
Andiry Xu 已提交
3928 3929
	queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
			field4 | xhci->cmd_ring->cycle_state);
3930 3931 3932
	return 0;
}

3933
/* Queue a slot enable or disable request on the command ring */
3934 3935
int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
		u32 trb_type, u32 slot_id)
3936
{
3937
	return queue_command(xhci, cmd, 0, 0, 0,
3938
			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3939 3940 3941
}

/* Queue an address device command TRB */
3942 3943
int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
		dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
3944
{
3945
	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3946
			upper_32_bits(in_ctx_ptr), 0,
3947 3948
			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
			| (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
3949 3950
}

3951
int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3952 3953
		u32 field1, u32 field2, u32 field3, u32 field4)
{
3954
	return queue_command(xhci, cmd, field1, field2, field3, field4, false);
3955 3956
}

3957
/* Queue a reset device command TRB */
3958 3959
int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
		u32 slot_id)
3960
{
3961
	return queue_command(xhci, cmd, 0, 0, 0,
3962
			TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
3963
			false);
3964
}
3965 3966

/* Queue a configure endpoint command TRB */
3967 3968
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
		struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
3969
		u32 slot_id, bool command_must_succeed)
3970
{
3971
	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3972
			upper_32_bits(in_ctx_ptr), 0,
3973 3974
			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
			command_must_succeed);
3975
}
3976

3977
/* Queue an evaluate context command TRB */
3978 3979
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
		dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
3980
{
3981
	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3982
			upper_32_bits(in_ctx_ptr), 0,
3983
			TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
3984
			command_must_succeed);
3985 3986
}

3987 3988 3989 3990
/*
 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
 * activity on an endpoint that is about to be suspended.
 */
3991 3992
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
			     int slot_id, unsigned int ep_index, int suspend)
3993 3994 3995 3996
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_STOP_RING);
3997
	u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
3998

3999
	return queue_command(xhci, cmd, 0, 0, 0,
4000
			trb_slot_id | trb_ep_index | type | trb_suspend, false);
4001 4002
}

4003 4004 4005 4006
/* Set Transfer Ring Dequeue Pointer command */
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
		struct xhci_dequeue_state *deq_state)
4007 4008 4009 4010
{
	dma_addr_t addr;
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4011
	u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
4012
	u32 trb_sct = 0;
4013
	u32 type = TRB_TYPE(TRB_SET_DEQ);
4014
	struct xhci_virt_ep *ep;
4015 4016
	struct xhci_command *cmd;
	int ret;
4017

4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
		"Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
		deq_state->new_deq_seg,
		(unsigned long long)deq_state->new_deq_seg->dma,
		deq_state->new_deq_ptr,
		(unsigned long long)xhci_trb_virt_to_dma(
			deq_state->new_deq_seg, deq_state->new_deq_ptr),
		deq_state->new_cycle_state);

	addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
				    deq_state->new_deq_ptr);
4029
	if (addr == 0) {
4030
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4031
		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
4032 4033
			  deq_state->new_deq_seg, deq_state->new_deq_ptr);
		return;
4034
	}
4035 4036 4037 4038
	ep = &xhci->devs[slot_id]->eps[ep_index];
	if ((ep->ep_state & SET_DEQ_PENDING)) {
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
		xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
4039
		return;
4040
	}
4041 4042 4043

	/* This function gets called from contexts where it cannot sleep */
	cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
4044
	if (!cmd)
4045
		return;
4046

4047 4048
	ep->queued_deq_seg = deq_state->new_deq_seg;
	ep->queued_deq_ptr = deq_state->new_deq_ptr;
4049
	if (deq_state->stream_id)
4050
		trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
4051
	ret = queue_command(xhci, cmd,
4052 4053 4054
		lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
		upper_32_bits(addr), trb_stream_id,
		trb_slot_id | trb_ep_index | type, false);
4055 4056
	if (ret < 0) {
		xhci_free_command(xhci, cmd);
4057
		return;
4058 4059
	}

4060 4061 4062 4063 4064 4065
	/* Stop the TD queueing code from ringing the doorbell until
	 * this command completes.  The HC won't set the dequeue pointer
	 * if the ring is running, and ringing the doorbell starts the
	 * ring running.
	 */
	ep->ep_state |= SET_DEQ_PENDING;
4066
}
4067

4068
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
4069 4070
			int slot_id, unsigned int ep_index,
			enum xhci_ep_reset_type reset_type)
4071 4072 4073 4074 4075
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_RESET_EP);

4076 4077 4078
	if (reset_type == EP_SOFT_RESET)
		type |= TRB_TSP;

4079 4080
	return queue_command(xhci, cmd, 0, 0, 0,
			trb_slot_id | trb_ep_index | type, false);
4081
}