xhci-ring.c 122.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
/*
 * xHCI host controller driver
 *
 * Copyright (C) 2008 Intel Corp.
 *
 * Author: Sarah Sharp
 * Some code borrowed from the Linux EHCI driver.
 */

/*
 * Ring initialization rules:
 * 1. Each segment is initialized to zero, except for link TRBs.
 * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
 *    Consumer Cycle State (CCS), depending on ring function.
 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
 *
 * Ring behavior rules:
 * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
 *    least one free TRB in the ring.  This is useful if you want to turn that
 *    into a link TRB and expand the ring.
 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
 *    link TRB, then load the pointer with the address in the link TRB.  If the
 *    link TRB had its toggle bit set, you may need to update the ring cycle
 *    state (see cycle bit rules).  You may have to do this multiple times
 *    until you reach a non-link TRB.
 * 3. A ring is full if enqueue++ (for the definition of increment above)
 *    equals the dequeue pointer.
 *
 * Cycle bit rules:
 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 *
 * Producer rules:
 * 1. Check if ring is full before you enqueue.
 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
 *    Update enqueue pointer between each write (which may update the ring
 *    cycle state).
 * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
 *    and endpoint rings.  If HC is the producer for the event ring,
 *    and it generates an interrupt according to interrupt modulation rules.
 *
 * Consumer rules:
 * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
 *    the TRB is owned by the consumer.
 * 2. Update dequeue pointer (which may update the ring cycle state) and
 *    continue processing TRBs until you reach a TRB which is not owned by you.
 * 3. Notify the producer.  SW is the consumer for the event ring, and it
 *   updates event ring dequeue pointer.  HC is the consumer for the command and
 *   endpoint rings; it generates events on the event ring for these.
 */

55
#include <linux/scatterlist.h>
56
#include <linux/slab.h>
57
#include <linux/dma-mapping.h>
58
#include "xhci.h"
59
#include "xhci-trace.h"
60
#include "xhci-mtk.h"
61 62 63 64 65

/*
 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
 * address of the TRB.
 */
66
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
67 68
		union xhci_trb *trb)
{
69
	unsigned long segment_offset;
70

71
	if (!seg || !trb || trb < seg->trbs)
72
		return 0;
73 74
	/* offset in TRBs */
	segment_offset = trb - seg->trbs;
75
	if (segment_offset >= TRBS_PER_SEGMENT)
76
		return 0;
77
	return seg->dma + (segment_offset * sizeof(*trb));
78 79
}

80 81 82 83 84
static bool trb_is_noop(union xhci_trb *trb)
{
	return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
}

85 86 87 88 89
static bool trb_is_link(union xhci_trb *trb)
{
	return TRB_TYPE_LINK_LE32(trb->link.control);
}

90 91 92 93 94 95 96 97 98 99 100
static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
{
	return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
}

static bool last_trb_on_ring(struct xhci_ring *ring,
			struct xhci_segment *seg, union xhci_trb *trb)
{
	return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
}

101 102 103 104 105
static bool link_trb_toggles_cycle(union xhci_trb *trb)
{
	return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
}

106 107 108 109
static bool last_td_in_urb(struct xhci_td *td)
{
	struct urb_priv *urb_priv = td->urb->hcpriv;

110
	return urb_priv->num_tds_done == urb_priv->num_tds;
111 112 113 114 115 116
}

static void inc_td_cnt(struct urb *urb)
{
	struct urb_priv *urb_priv = urb->hcpriv;

117
	urb_priv->num_tds_done++;
118 119
}

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
{
	if (trb_is_link(trb)) {
		/* unchain chained link TRBs */
		trb->link.control &= cpu_to_le32(~TRB_CHAIN);
	} else {
		trb->generic.field[0] = 0;
		trb->generic.field[1] = 0;
		trb->generic.field[2] = 0;
		/* Preserve only the cycle bit of this TRB */
		trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
		trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
	}
}

135 136 137 138 139 140 141 142 143
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
 * TRB is in a new segment.  This does not skip over link TRBs, and it does not
 * effect the ring dequeue or enqueue pointers.
 */
static void next_trb(struct xhci_hcd *xhci,
		struct xhci_ring *ring,
		struct xhci_segment **seg,
		union xhci_trb **trb)
{
144
	if (trb_is_link(*trb)) {
145 146 147
		*seg = (*seg)->next;
		*trb = ((*seg)->trbs);
	} else {
148
		(*trb)++;
149 150 151
	}
}

152 153 154 155
/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 */
156
void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
157
{
158 159 160
	/* event ring doesn't have link trbs, check for last trb */
	if (ring->type == TYPE_EVENT) {
		if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
161
			ring->dequeue++;
162
			goto out;
163
		}
164 165 166 167
		if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
			ring->cycle_state ^= 1;
		ring->deq_seg = ring->deq_seg->next;
		ring->dequeue = ring->deq_seg->trbs;
168
		goto out;
169 170 171 172 173 174 175 176 177 178 179
	}

	/* All other rings have link trbs */
	if (!trb_is_link(ring->dequeue)) {
		ring->dequeue++;
		ring->num_trbs_free++;
	}
	while (trb_is_link(ring->dequeue)) {
		ring->deq_seg = ring->deq_seg->next;
		ring->dequeue = ring->deq_seg->trbs;
	}
180

181
out:
182 183
	trace_xhci_inc_deq(ring);

184
	return;
185 186 187 188 189 190 191 192 193 194 195 196
}

/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 *
 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
 * chain bit is set), then set the chain bit in all the following link TRBs.
 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
 * have their chain bit cleared (so that each Link TRB is a separate TD).
 *
 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
197 198 199
 * set, but other sections talk about dealing with the chain bit set.  This was
 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
200 201 202
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
203
 */
204
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
A
Andiry Xu 已提交
205
			bool more_trbs_coming)
206 207 208 209
{
	u32 chain;
	union xhci_trb *next;

M
Matt Evans 已提交
210
	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
211
	/* If this is not event ring, there is one less usable TRB */
212
	if (!trb_is_link(ring->enqueue))
213
		ring->num_trbs_free--;
214 215
	next = ++(ring->enqueue);

216
	/* Update the dequeue pointer further if that was a link TRB */
217
	while (trb_is_link(next)) {
218

219 220 221 222 223 224 225 226 227
		/*
		 * If the caller doesn't plan on enqueueing more TDs before
		 * ringing the doorbell, then we don't want to give the link TRB
		 * to the hardware just yet. We'll give the link TRB back in
		 * prepare_ring() just before we enqueue the TD at the top of
		 * the ring.
		 */
		if (!chain && !more_trbs_coming)
			break;
A
Andiry Xu 已提交
228

229 230 231 232 233 234 235 236 237
		/* If we're not dealing with 0.95 hardware or isoc rings on
		 * AMD 0.96 host, carry over the chain bit of the previous TRB
		 * (which may mean the chain bit is cleared).
		 */
		if (!(ring->type == TYPE_ISOC &&
		      (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
		    !xhci_link_trb_quirk(xhci)) {
			next->link.control &= cpu_to_le32(~TRB_CHAIN);
			next->link.control |= cpu_to_le32(chain);
238
		}
239 240 241 242 243
		/* Give this link TRB to the hardware */
		wmb();
		next->link.control ^= cpu_to_le32(TRB_CYCLE);

		/* Toggle the cycle bit after the last ring segment. */
244
		if (link_trb_toggles_cycle(next))
245 246
			ring->cycle_state ^= 1;

247 248 249 250
		ring->enq_seg = ring->enq_seg->next;
		ring->enqueue = ring->enq_seg->trbs;
		next = ring->enqueue;
	}
251 252

	trace_xhci_inc_enq(ring);
253 254 255
}

/*
256 257
 * Check to see if there's room to enqueue num_trbs on the ring and make sure
 * enqueue pointer will not advance into dequeue segment. See rules above.
258
 */
259
static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
260 261
		unsigned int num_trbs)
{
262
	int num_trbs_in_deq_seg;
263

264 265 266 267 268 269 270 271 272 273
	if (ring->num_trbs_free < num_trbs)
		return 0;

	if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
		num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
		if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
			return 0;
	}

	return 1;
274 275 276
}

/* Ring the host controller doorbell after placing a command on the ring */
277
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
278
{
E
Elric Fu 已提交
279 280 281
	if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
		return;

282
	xhci_dbg(xhci, "// Ding dong!\n");
283
	writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
284
	/* Flush PCI posted writes */
285
	readl(&xhci->dba->doorbell[0]);
286 287
}

288 289 290 291 292
static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
{
	return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
}

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
{
	return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
					cmd_list);
}

/*
 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
 * If there are other commands waiting then restart the ring and kick the timer.
 * This must be called with command ring stopped and xhci->lock held.
 */
static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
					 struct xhci_command *cur_cmd)
{
	struct xhci_command *i_cmd;

	/* Turn all aborted commands in list to no-ops, then restart */
	list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {

312
		if (i_cmd->status != COMP_COMMAND_ABORTED)
313 314
			continue;

315
		i_cmd->status = COMP_COMMAND_RING_STOPPED;
316 317 318

		xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
			 i_cmd->command_trb);
319 320

		trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340

		/*
		 * caller waiting for completion is called when command
		 *  completion event is received for these no-op commands
		 */
	}

	xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;

	/* ring command ring doorbell to restart the command ring */
	if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
	    !(xhci->xhc_state & XHCI_STATE_DYING)) {
		xhci->current_cmd = cur_cmd;
		xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
		xhci_ring_cmd_db(xhci);
	}
}

/* Must be called with xhci->lock held, releases and aquires lock back */
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
341 342 343 344 345 346
{
	u64 temp_64;
	int ret;

	xhci_dbg(xhci, "Abort command ring\n");

347
	reinit_completion(&xhci->cmd_ring_stop_completion);
348

349
	temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
350 351
	xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
			&xhci->op_regs->cmd_ring);
352

353 354 355 356 357
	/* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
	 * completion of the Command Abort operation. If CRR is not negated in 5
	 * seconds then driver handles it as if host died (-ENODEV).
	 * In the future we should distinguish between -ENODEV and -ETIMEDOUT
	 * and try to recover a -ETIMEDOUT with a host controller reset.
358
	 */
359
	ret = xhci_handshake(&xhci->op_regs->cmd_ring,
360 361
			CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
	if (ret < 0) {
362
		xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
363
		xhci_halt(xhci);
364 365
		xhci_hc_died(xhci);
		return ret;
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
	}
	/*
	 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
	 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
	 * but the completion event in never sent. Wait 2 secs (arbitrary
	 * number) to handle those cases after negation of CMD_RING_RUNNING.
	 */
	spin_unlock_irqrestore(&xhci->lock, flags);
	ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
					  msecs_to_jiffies(2000));
	spin_lock_irqsave(&xhci->lock, flags);
	if (!ret) {
		xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
		xhci_cleanup_command_queue(xhci);
	} else {
		xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
382 383 384 385
	}
	return 0;
}

386
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
387
		unsigned int slot_id,
388 389
		unsigned int ep_index,
		unsigned int stream_id)
390
{
M
Matt Evans 已提交
391
	__le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
392 393
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
	unsigned int ep_state = ep->ep_state;
394 395

	/* Don't ring the doorbell for this endpoint if there are pending
396
	 * cancellations because we don't want to interrupt processing.
397 398 399
	 * We don't want to restart any stream rings if there's a set dequeue
	 * pointer command pending because the device can choose to start any
	 * stream once the endpoint is on the HW schedule.
400
	 */
401
	if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
402 403
	    (ep_state & EP_HALTED))
		return;
404
	writel(DB_VALUE(ep_index, stream_id), db_addr);
405 406 407
	/* The CPU has better things to do at this point than wait for a
	 * write-posting flush.  It'll get there soon enough.
	 */
408 409
}

410 411 412 413 414 415 416 417 418 419 420 421
/* Ring the doorbell for any rings with pending URBs */
static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
		unsigned int slot_id,
		unsigned int ep_index)
{
	unsigned int stream_id;
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];

	/* A ring has pending URBs if its TD list is not empty */
	if (!(ep->ep_state & EP_HAS_STREAMS)) {
422
		if (ep->ring && !(list_empty(&ep->ring->td_list)))
423
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
424 425 426 427 428 429 430
		return;
	}

	for (stream_id = 1; stream_id < ep->stream_info->num_streams;
			stream_id++) {
		struct xhci_stream_info *stream_info = ep->stream_info;
		if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
431 432
			xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
						stream_id);
433 434 435
	}
}

436 437 438 439 440
/* Get the right ring for the given slot_id, ep_index and stream_id.
 * If the endpoint supports streams, boundary check the URB's stream ID.
 * If the endpoint doesn't support streams, return the singular endpoint ring.
 */
struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
		unsigned int slot_id, unsigned int ep_index,
		unsigned int stream_id)
{
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];
	/* Common case: no streams */
	if (!(ep->ep_state & EP_HAS_STREAMS))
		return ep->ring;

	if (stream_id == 0) {
		xhci_warn(xhci,
				"WARN: Slot ID %u, ep index %u has streams, "
				"but URB has no stream ID.\n",
				slot_id, ep_index);
		return NULL;
	}

	if (stream_id < ep->stream_info->num_streams)
		return ep->stream_info->stream_rings[stream_id];

	xhci_warn(xhci,
			"WARN: Slot ID %u, ep index %u has "
			"stream IDs 1 to %u allocated, "
			"but stream ID %u is requested.\n",
			slot_id, ep_index,
			ep->stream_info->num_streams - 1,
			stream_id);
	return NULL;
}

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495

/*
 * Get the hw dequeue pointer xHC stopped on, either directly from the
 * endpoint context, or if streams are in use from the stream context.
 * The returned hw_dequeue contains the lowest four bits with cycle state
 * and possbile stream context type.
 */
static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
			   unsigned int ep_index, unsigned int stream_id)
{
	struct xhci_ep_ctx *ep_ctx;
	struct xhci_stream_ctx *st_ctx;
	struct xhci_virt_ep *ep;

	ep = &vdev->eps[ep_index];

	if (ep->ep_state & EP_HAS_STREAMS) {
		st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
		return le64_to_cpu(st_ctx->stream_ring);
	}
	ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
	return le64_to_cpu(ep_ctx->deq);
}

496 497 498
/*
 * Move the xHC's endpoint ring dequeue pointer past cur_td.
 * Record the new state of the xHC's endpoint ring dequeue segment,
499
 * dequeue pointer, stream id, and new consumer cycle state in state.
500 501 502 503 504 505 506 507 508
 * Update our internal representation of the ring's dequeue pointer.
 *
 * We do this in three jumps:
 *  - First we update our new ring state to be the same as when the xHC stopped.
 *  - Then we traverse the ring to find the segment that contains
 *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
 *    any link TRBs with the toggle cycle bit set.
 *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
 *    if we've moved it past a link TRB with the toggle cycle bit set.
M
Matt Evans 已提交
509 510 511 512
 *
 * Some of the uses of xhci_generic_trb are grotty, but if they're done
 * with correct __le32 accesses they should work fine.  Only users of this are
 * in here.
513
 */
514
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
515
		unsigned int slot_id, unsigned int ep_index,
516 517
		unsigned int stream_id, struct xhci_td *cur_td,
		struct xhci_dequeue_state *state)
518 519
{
	struct xhci_virt_device *dev = xhci->devs[slot_id];
520
	struct xhci_virt_ep *ep = &dev->eps[ep_index];
521
	struct xhci_ring *ep_ring;
522 523
	struct xhci_segment *new_seg;
	union xhci_trb *new_deq;
524
	dma_addr_t addr;
525
	u64 hw_dequeue;
526 527
	bool cycle_found = false;
	bool td_last_trb_found = false;
528

529 530 531 532 533 534 535 536
	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
			ep_index, stream_id);
	if (!ep_ring) {
		xhci_warn(xhci, "WARN can't find new dequeue state "
				"for invalid stream ID %u.\n",
				stream_id);
		return;
	}
537
	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
538 539
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Finding endpoint context");
540

541
	hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
542 543 544
	new_seg = ep_ring->deq_seg;
	new_deq = ep_ring->dequeue;
	state->new_cycle_state = hw_dequeue & 0x1;
545
	state->stream_id = stream_id;
546

547
	/*
548 549 550 551
	 * We want to find the pointer, segment and cycle state of the new trb
	 * (the one after current TD's last_trb). We know the cycle state at
	 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
	 * found.
552
	 */
553 554 555 556 557 558 559 560 561
	do {
		if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
		    == (dma_addr_t)(hw_dequeue & ~0xf)) {
			cycle_found = true;
			if (td_last_trb_found)
				break;
		}
		if (new_deq == cur_td->last_trb)
			td_last_trb_found = true;
562

563 564
		if (cycle_found && trb_is_link(new_deq) &&
		    link_trb_toggles_cycle(new_deq))
565 566 567 568 569 570 571 572 573 574 575 576 577
			state->new_cycle_state ^= 0x1;

		next_trb(xhci, ep_ring, &new_seg, &new_deq);

		/* Search wrapped around, bail out */
		if (new_deq == ep->ring->dequeue) {
			xhci_err(xhci, "Error: Failed finding new dequeue state\n");
			state->new_deq_seg = NULL;
			state->new_deq_ptr = NULL;
			return;
		}

	} while (!cycle_found || !td_last_trb_found);
578

579 580
	state->new_deq_seg = new_seg;
	state->new_deq_ptr = new_deq;
581

582
	/* Don't update the ring cycle state for the producer (us). */
583 584
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"Cycle state = 0x%x", state->new_cycle_state);
585

586 587
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"New dequeue segment = %p (virtual)",
588 589
			state->new_deq_seg);
	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
590 591
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"New dequeue pointer = 0x%llx (DMA)",
592
			(unsigned long long) addr);
593 594
}

595 596 597 598
/* flip_cycle means flip the cycle bit of all but the first and last TRB.
 * (The last TRB actually points to the ring enqueue pointer, which is not part
 * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
 */
599
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
600
		       struct xhci_td *td, bool flip_cycle)
601
{
602 603 604 605
	struct xhci_segment *seg	= td->start_seg;
	union xhci_trb *trb		= td->first_trb;

	while (1) {
606 607
		trb_to_noop(trb, TRB_TR_NOOP);

608 609 610 611 612
		/* flip cycle if asked to */
		if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
			trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);

		if (trb == td->last_trb)
613
			break;
614 615

		next_trb(xhci, ep_ring, &seg, &trb);
616 617 618
	}
}

619
static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
620 621
		struct xhci_virt_ep *ep)
{
622
	ep->ep_state &= ~EP_STOP_CMD_PENDING;
623 624
	/* Can't del_timer_sync in interrupt */
	del_timer(&ep->stop_cmd_timer);
625 626
}

627 628 629 630
/*
 * Must be called with xhci->lock held in interrupt context,
 * releases and re-acquires xhci->lock
 */
631
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
632
				     struct xhci_td *cur_td, int status)
633
{
634 635 636 637 638 639 640 641 642
	struct urb	*urb		= cur_td->urb;
	struct urb_priv	*urb_priv	= urb->hcpriv;
	struct usb_hcd	*hcd		= bus_to_hcd(urb->dev->bus);

	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
		xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
		if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs	== 0) {
			if (xhci->quirks & XHCI_AMD_PLL_FIX)
				usb_amd_quirk_pll_enable();
A
Andiry Xu 已提交
643
		}
644
	}
645
	xhci_urb_free_priv(urb_priv);
646
	usb_hcd_unlink_urb_from_ep(hcd, urb);
647
	spin_unlock(&xhci->lock);
648
	trace_xhci_urb_giveback(urb);
649
	usb_hcd_giveback_urb(hcd, urb, status);
650 651 652
	spin_lock(&xhci->lock);
}

W
Wei Yongjun 已提交
653 654
static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
		struct xhci_ring *ring, struct xhci_td *td)
655 656 657 658
{
	struct device *dev = xhci_to_hcd(xhci)->self.controller;
	struct xhci_segment *seg = td->bounce_seg;
	struct urb *urb = td->urb;
659
	size_t len;
660

661
	if (!ring || !seg || !urb)
662 663 664 665 666 667 668 669 670 671
		return;

	if (usb_urb_dir_out(urb)) {
		dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
				 DMA_TO_DEVICE);
		return;
	}

	dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
			 DMA_FROM_DEVICE);
672 673 674 675 676 677
	/* for in tranfers we need to copy the data from bounce to sg */
	len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
			     seg->bounce_len, seg->bounce_offs);
	if (len != seg->bounce_len)
		xhci_warn(xhci, "WARN Wrong bounce buffer read length: %ld != %d\n",
				len, seg->bounce_len);
678 679 680 681
	seg->bounce_len = 0;
	seg->bounce_offs = 0;
}

682 683 684 685 686 687 688 689 690 691
/*
 * When we get a command completion for a Stop Endpoint Command, we need to
 * unlink any cancelled TDs from the ring.  There are two ways to do that:
 *
 *  1. If the HW was in the middle of processing the TD that needs to be
 *     cancelled, then we must move the ring's dequeue pointer past the last TRB
 *     in the TD with a Set Dequeue Pointer Command.
 *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
 *     bit cleared) so that the HW will skip over them.
 */
692
static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
693
		union xhci_trb *trb, struct xhci_event_cmd *event)
694 695 696
{
	unsigned int ep_index;
	struct xhci_ring *ep_ring;
697
	struct xhci_virt_ep *ep;
698
	struct xhci_td *cur_td = NULL;
699
	struct xhci_td *last_unlinked_td;
700 701
	struct xhci_ep_ctx *ep_ctx;
	struct xhci_virt_device *vdev;
702
	u64 hw_deq;
703
	struct xhci_dequeue_state deq_state;
704

705
	if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
706
		if (!xhci->devs[slot_id])
707 708 709 710 711 712
			xhci_warn(xhci, "Stop endpoint command "
				"completion for disabled slot %u\n",
				slot_id);
		return;
	}

713
	memset(&deq_state, 0, sizeof(deq_state));
M
Matt Evans 已提交
714
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
715 716 717 718 719

	vdev = xhci->devs[slot_id];
	ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
	trace_xhci_handle_cmd_stop_ep(ep_ctx);

720
	ep = &xhci->devs[slot_id]->eps[ep_index];
721 722
	last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
			struct xhci_td, cancelled_td_list);
723

724
	if (list_empty(&ep->cancelled_td_list)) {
725
		xhci_stop_watchdog_timer_in_irq(xhci, ep);
726
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
727
		return;
728
	}
729 730 731 732 733 734

	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
	 * We have the xHCI lock, so nothing can modify this list until we drop
	 * it.  We're also in the event handler, so we can't get re-interrupted
	 * if another Stop Endpoint command completes
	 */
735
	list_for_each_entry(cur_td, &ep->cancelled_td_list, cancelled_td_list) {
736 737
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Removing canceled TD starting at 0x%llx (dma).",
738 739
				(unsigned long long)xhci_trb_virt_to_dma(
					cur_td->start_seg, cur_td->first_trb));
740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
		if (!ep_ring) {
			/* This shouldn't happen unless a driver is mucking
			 * with the stream ID after submission.  This will
			 * leave the TD on the hardware ring, and the hardware
			 * will try to execute it, and may access a buffer
			 * that has already been freed.  In the best case, the
			 * hardware will execute it, and the event handler will
			 * ignore the completion event for that TD, since it was
			 * removed from the td_list for that endpoint.  In
			 * short, don't muck with the stream ID after
			 * submission.
			 */
			xhci_warn(xhci, "WARN Cancelled URB %p "
					"has invalid stream ID %u.\n",
					cur_td->urb,
					cur_td->urb->stream_id);
			goto remove_finished_td;
		}
759 760 761 762
		/*
		 * If we stopped on the TD we need to cancel, then we have to
		 * move the xHC endpoint ring dequeue pointer past this TD.
		 */
763 764 765 766 767 768
		hw_deq = xhci_get_hw_deq(xhci, vdev, ep_index,
					 cur_td->urb->stream_id);
		hw_deq &= ~0xf;

		if (trb_in_td(xhci, cur_td->start_seg, cur_td->first_trb,
			      cur_td->last_trb, hw_deq, false)) {
769
			xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
770 771 772
						    cur_td->urb->stream_id,
						    cur_td, &deq_state);
		} else {
773
			td_to_noop(xhci, ep_ring, cur_td, false);
774 775
		}

776
remove_finished_td:
777 778 779 780 781
		/*
		 * The event handler won't see a completion for this TD anymore,
		 * so remove it from the endpoint ring's TD list.  Keep it in
		 * the cancelled TD list for URB completion later.
		 */
782
		list_del_init(&cur_td->td_list);
783
	}
784

785
	xhci_stop_watchdog_timer_in_irq(xhci, ep);
786 787 788

	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
789
		xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
790
					     &deq_state);
791
		xhci_ring_cmd_db(xhci);
792
	} else {
793 794
		/* Otherwise ring the doorbell(s) to restart queued transfers */
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
795
	}
796

797 798 799 800 801 802 803
	/*
	 * Drop the lock and complete the URBs in the cancelled TD list.
	 * New TDs to be cancelled might be added to the end of the list before
	 * we can complete all the URBs for the TDs we already unlinked.
	 * So stop when we've completed the URB for the last TD we unlinked.
	 */
	do {
804
		cur_td = list_first_entry(&ep->cancelled_td_list,
805
				struct xhci_td, cancelled_td_list);
806
		list_del_init(&cur_td->cancelled_td_list);
807 808 809 810 811

		/* Clean up the cancelled URB */
		/* Doesn't matter what we pass for status, since the core will
		 * just overwrite it (because the URB has been unlinked).
		 */
A
Arnd Bergmann 已提交
812
		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
813
		xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
814 815 816
		inc_td_cnt(cur_td->urb);
		if (last_td_in_urb(cur_td))
			xhci_giveback_urb_in_irq(xhci, cur_td, 0);
817

818 819 820 821 822
		/* Stop processing the cancelled list if the watchdog timer is
		 * running.
		 */
		if (xhci->xhc_state & XHCI_STATE_DYING)
			return;
823 824 825 826 827
	} while (cur_td != last_unlinked_td);

	/* Return to the event handler with xhci->lock re-acquired */
}

828 829 830
static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
	struct xhci_td *cur_td;
831
	struct xhci_td *tmp;
832

833
	list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) {
834
		list_del_init(&cur_td->td_list);
835

836 837
		if (!list_empty(&cur_td->cancelled_td_list))
			list_del_init(&cur_td->cancelled_td_list);
838

839
		xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
840 841 842 843

		inc_td_cnt(cur_td->urb);
		if (last_td_in_urb(cur_td))
			xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
844 845 846 847 848 849 850
	}
}

static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
		int slot_id, int ep_index)
{
	struct xhci_td *cur_td;
851
	struct xhci_td *tmp;
852 853 854 855
	struct xhci_virt_ep *ep;
	struct xhci_ring *ring;

	ep = &xhci->devs[slot_id]->eps[ep_index];
856 857 858 859
	if ((ep->ep_state & EP_HAS_STREAMS) ||
			(ep->ep_state & EP_GETTING_NO_STREAMS)) {
		int stream_id;

860
		for (stream_id = 1; stream_id < ep->stream_info->num_streams;
861
				stream_id++) {
862 863 864 865
			ring = ep->stream_info->stream_rings[stream_id];
			if (!ring)
				continue;

866 867
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Killing URBs for slot ID %u, ep index %u, stream %u",
868 869
					slot_id, ep_index, stream_id);
			xhci_kill_ring_urbs(xhci, ring);
870 871 872 873 874 875 876 877 878 879
		}
	} else {
		ring = ep->ring;
		if (!ring)
			return;
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
				"Killing URBs for slot ID %u, ep index %u",
				slot_id, ep_index);
		xhci_kill_ring_urbs(xhci, ring);
	}
880

881 882 883
	list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list,
			cancelled_td_list) {
		list_del_init(&cur_td->cancelled_td_list);
884
		inc_td_cnt(cur_td->urb);
885

886 887
		if (last_td_in_urb(cur_td))
			xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
888 889 890
	}
}

891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
/*
 * host controller died, register read returns 0xffffffff
 * Complete pending commands, mark them ABORTED.
 * URBs need to be given back as usb core might be waiting with device locks
 * held for the URBs to finish during device disconnect, blocking host remove.
 *
 * Call with xhci->lock held.
 * lock is relased and re-acquired while giving back urb.
 */
void xhci_hc_died(struct xhci_hcd *xhci)
{
	int i, j;

	if (xhci->xhc_state & XHCI_STATE_DYING)
		return;

	xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
	xhci->xhc_state |= XHCI_STATE_DYING;

	xhci_cleanup_command_queue(xhci);

	/* return any pending urbs, remove may be waiting for them */
	for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
		if (!xhci->devs[i])
			continue;
		for (j = 0; j < 31; j++)
			xhci_kill_endpoint_urbs(xhci, i, j);
	}

	/* inform usb core hc died if PCI remove isn't already handling it */
	if (!(xhci->xhc_state & XHCI_STATE_REMOVING))
		usb_hc_died(xhci_to_hcd(xhci));
}

925 926 927 928 929 930 931 932 933 934 935 936 937 938
/* Watchdog timer function for when a stop endpoint command fails to complete.
 * In this case, we assume the host controller is broken or dying or dead.  The
 * host may still be completing some other events, so we have to be careful to
 * let the event ring handler and the URB dequeueing/enqueueing functions know
 * through xhci->state.
 *
 * The timer may also fire if the host takes a very long time to respond to the
 * command, and the stop endpoint command completion handler cannot delete the
 * timer before the timer function is called.  Another endpoint cancellation may
 * sneak in before the timer function can grab the lock, and that may queue
 * another stop endpoint command and add the timer back.  So we cannot use a
 * simple flag to say whether there is a pending stop endpoint command for a
 * particular endpoint.
 *
939 940
 * Instead we use a combination of that flag and checking if a new timer is
 * pending.
941
 */
942
void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
943
{
944 945
	struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer);
	struct xhci_hcd *xhci = ep->xhci;
946
	unsigned long flags;
947

948
	spin_lock_irqsave(&xhci->lock, flags);
949

950 951 952
	/* bail out if cmd completed but raced with stop ep watchdog timer.*/
	if (!(ep->ep_state & EP_STOP_CMD_PENDING) ||
	    timer_pending(&ep->stop_cmd_timer)) {
953
		spin_unlock_irqrestore(&xhci->lock, flags);
954
		xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit");
955 956 957 958
		return;
	}

	xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
959 960
	ep->ep_state &= ~EP_STOP_CMD_PENDING;

961
	xhci_halt(xhci);
962

963 964 965 966 967 968
	/*
	 * handle a stop endpoint cmd timeout as if host died (-ENODEV).
	 * In the future we could distinguish between -ENODEV and -ETIMEDOUT
	 * and try to recover a -ETIMEDOUT with a host controller reset
	 */
	xhci_hc_died(xhci);
969

970
	spin_unlock_irqrestore(&xhci->lock, flags);
971 972
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
			"xHCI host controller is dead.");
973 974
}

975 976 977 978 979 980 981 982 983 984 985 986
static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
		struct xhci_virt_device *dev,
		struct xhci_ring *ep_ring,
		unsigned int ep_index)
{
	union xhci_trb *dequeue_temp;
	int num_trbs_free_temp;
	bool revert = false;

	num_trbs_free_temp = ep_ring->num_trbs_free;
	dequeue_temp = ep_ring->dequeue;

987 988 989 990 991 992
	/* If we get two back-to-back stalls, and the first stalled transfer
	 * ends just before a link TRB, the dequeue pointer will be left on
	 * the link TRB by the code in the while loop.  So we have to update
	 * the dequeue pointer one segment further, or we'll jump off
	 * the segment into la-la-land.
	 */
993
	if (trb_is_link(ep_ring->dequeue)) {
994 995 996 997
		ep_ring->deq_seg = ep_ring->deq_seg->next;
		ep_ring->dequeue = ep_ring->deq_seg->trbs;
	}

998 999 1000 1001
	while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
		/* We have more usable TRBs */
		ep_ring->num_trbs_free++;
		ep_ring->dequeue++;
1002
		if (trb_is_link(ep_ring->dequeue)) {
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
			if (ep_ring->dequeue ==
					dev->eps[ep_index].queued_deq_ptr)
				break;
			ep_ring->deq_seg = ep_ring->deq_seg->next;
			ep_ring->dequeue = ep_ring->deq_seg->trbs;
		}
		if (ep_ring->dequeue == dequeue_temp) {
			revert = true;
			break;
		}
	}

	if (revert) {
		xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
		ep_ring->num_trbs_free = num_trbs_free_temp;
	}
}

1021 1022 1023 1024 1025 1026 1027
/*
 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
 * we need to clear the set deq pending flag in the endpoint ring state, so that
 * the TD queueing code can ring the doorbell again.  We also need to ring the
 * endpoint doorbell to restart the ring, but only if there aren't more
 * cancellations pending.
 */
1028
static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1029
		union xhci_trb *trb, u32 cmd_comp_code)
1030 1031
{
	unsigned int ep_index;
1032
	unsigned int stream_id;
1033 1034
	struct xhci_ring *ep_ring;
	struct xhci_virt_device *dev;
1035
	struct xhci_virt_ep *ep;
1036 1037
	struct xhci_ep_ctx *ep_ctx;
	struct xhci_slot_ctx *slot_ctx;
1038

M
Matt Evans 已提交
1039 1040
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
	stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1041
	dev = xhci->devs[slot_id];
1042
	ep = &dev->eps[ep_index];
1043 1044 1045

	ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
	if (!ep_ring) {
O
Oliver Neukum 已提交
1046
		xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
1047 1048
				stream_id);
		/* XXX: Harmless??? */
1049
		goto cleanup;
1050 1051
	}

1052 1053
	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
1054 1055
	trace_xhci_handle_cmd_set_deq(slot_ctx);
	trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
1056

1057
	if (cmd_comp_code != COMP_SUCCESS) {
1058 1059 1060
		unsigned int ep_state;
		unsigned int slot_state;

1061
		switch (cmd_comp_code) {
1062
		case COMP_TRB_ERROR:
O
Oliver Neukum 已提交
1063
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
1064
			break;
1065
		case COMP_CONTEXT_STATE_ERROR:
O
Oliver Neukum 已提交
1066
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
1067
			ep_state = GET_EP_CTX_STATE(ep_ctx);
M
Matt Evans 已提交
1068
			slot_state = le32_to_cpu(slot_ctx->dev_state);
1069
			slot_state = GET_SLOT_STATE(slot_state);
1070 1071
			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
					"Slot state = %u, EP state = %u",
1072 1073
					slot_state, ep_state);
			break;
1074
		case COMP_SLOT_NOT_ENABLED_ERROR:
O
Oliver Neukum 已提交
1075 1076
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
					slot_id);
1077 1078
			break;
		default:
O
Oliver Neukum 已提交
1079 1080
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
					cmd_comp_code);
1081 1082 1083 1084 1085 1086 1087 1088 1089
			break;
		}
		/* OK what do we do now?  The endpoint state is hosed, and we
		 * should never get to this point if the synchronization between
		 * queueing, and endpoint state are correct.  This might happen
		 * if the device gets disconnected after we've finished
		 * cancelling URBs, which might not be an error...
		 */
	} else {
1090 1091 1092 1093 1094 1095 1096 1097 1098
		u64 deq;
		/* 4.6.10 deq ptr is written to the stream ctx for streams */
		if (ep->ep_state & EP_HAS_STREAMS) {
			struct xhci_stream_ctx *ctx =
				&ep->stream_info->stream_ctx_array[stream_id];
			deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
		} else {
			deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
		}
1099
		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1100 1101 1102
			"Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
		if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
					 ep->queued_deq_ptr) == deq) {
1103 1104 1105
			/* Update the ring's dequeue segment and dequeue pointer
			 * to reflect the new position.
			 */
1106 1107
			update_ring_for_set_deq_completion(xhci, dev,
				ep_ring, ep_index);
1108
		} else {
O
Oliver Neukum 已提交
1109
			xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1110
			xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1111
				  ep->queued_deq_seg, ep->queued_deq_ptr);
1112
		}
1113 1114
	}

1115
cleanup:
1116
	dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1117 1118
	dev->eps[ep_index].queued_deq_seg = NULL;
	dev->eps[ep_index].queued_deq_ptr = NULL;
1119 1120
	/* Restart any rings with pending URBs */
	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1121 1122
}

1123
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1124
		union xhci_trb *trb, u32 cmd_comp_code)
1125
{
1126 1127
	struct xhci_virt_device *vdev;
	struct xhci_ep_ctx *ep_ctx;
1128 1129
	unsigned int ep_index;

M
Matt Evans 已提交
1130
	ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1131 1132 1133 1134
	vdev = xhci->devs[slot_id];
	ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
	trace_xhci_handle_cmd_reset_ep(ep_ctx);

1135 1136 1137
	/* This command will only fail if the endpoint wasn't halted,
	 * but we don't care.
	 */
1138
	xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1139
		"Ignoring reset ep completion code of %u", cmd_comp_code);
1140

1141 1142 1143 1144 1145
	/* HW with the reset endpoint quirk needs to have a configure endpoint
	 * command complete before the endpoint can be used.  Queue that here
	 * because the HW can't handle two commands being queued in a row.
	 */
	if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1146
		struct xhci_command *command;
1147

1148
		command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1149
		if (!command)
1150
			return;
1151

1152 1153
		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
				"Queueing configure endpoint command");
1154
		xhci_queue_configure_endpoint(xhci, command,
1155 1156
				xhci->devs[slot_id]->in_ctx->dma, slot_id,
				false);
1157 1158
		xhci_ring_cmd_db(xhci);
	} else {
1159
		/* Clear our internal halted state */
1160
		xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1161
	}
1162
}
1163

1164
static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1165
		struct xhci_command *command, u32 cmd_comp_code)
1166 1167
{
	if (cmd_comp_code == COMP_SUCCESS)
1168
		command->slot_id = slot_id;
1169
	else
1170
		command->slot_id = 0;
1171 1172
}

1173 1174 1175
static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
{
	struct xhci_virt_device *virt_dev;
1176
	struct xhci_slot_ctx *slot_ctx;
1177 1178 1179 1180

	virt_dev = xhci->devs[slot_id];
	if (!virt_dev)
		return;
1181 1182 1183 1184

	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
	trace_xhci_handle_cmd_disable_slot(slot_ctx);

1185 1186 1187 1188 1189 1190
	if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
		/* Delete default control endpoint resources */
		xhci_free_device_endpoint_resources(xhci, virt_dev, true);
	xhci_free_virt_device(xhci, slot_id);
}

1191 1192 1193 1194 1195
static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
		struct xhci_event_cmd *event, u32 cmd_comp_code)
{
	struct xhci_virt_device *virt_dev;
	struct xhci_input_control_ctx *ctrl_ctx;
1196
	struct xhci_ep_ctx *ep_ctx;
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
	unsigned int ep_index;
	unsigned int ep_state;
	u32 add_flags, drop_flags;

	/*
	 * Configure endpoint commands can come from the USB core
	 * configuration or alt setting changes, or because the HW
	 * needed an extra configure endpoint command after a reset
	 * endpoint command or streams were being configured.
	 * If the command was for a halted endpoint, the xHCI driver
	 * is not waiting on the configure endpoint command.
	 */
1209
	virt_dev = xhci->devs[slot_id];
1210
	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
	if (!ctrl_ctx) {
		xhci_warn(xhci, "Could not get input context, bad type.\n");
		return;
	}

	add_flags = le32_to_cpu(ctrl_ctx->add_flags);
	drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
	/* Input ctx add_flags are the endpoint index plus one */
	ep_index = xhci_last_valid_endpoint(add_flags) - 1;

1221 1222 1223
	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
	trace_xhci_handle_cmd_config_ep(ep_ctx);

1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
	/* A usb_set_interface() call directly after clearing a halted
	 * condition may race on this quirky hardware.  Not worth
	 * worrying about, since this is prototype hardware.  Not sure
	 * if this will work for streams, but streams support was
	 * untested on this prototype.
	 */
	if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
			ep_index != (unsigned int) -1 &&
			add_flags - SLOT_FLAG == drop_flags) {
		ep_state = virt_dev->eps[ep_index].ep_state;
		if (!(ep_state & EP_HALTED))
1235
			return;
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
				"Completed config ep cmd - "
				"last ep index = %d, state = %d",
				ep_index, ep_state);
		/* Clear internal halted state and restart ring(s) */
		virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
		return;
	}
	return;
}

1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
{
	struct xhci_virt_device *vdev;
	struct xhci_slot_ctx *slot_ctx;

	vdev = xhci->devs[slot_id];
	slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
	trace_xhci_handle_cmd_addr_dev(slot_ctx);
}

1258 1259 1260
static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
		struct xhci_event_cmd *event)
{
1261 1262 1263 1264 1265 1266 1267
	struct xhci_virt_device *vdev;
	struct xhci_slot_ctx *slot_ctx;

	vdev = xhci->devs[slot_id];
	slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
	trace_xhci_handle_cmd_reset_dev(slot_ctx);

1268
	xhci_dbg(xhci, "Completed reset device command.\n");
1269
	if (!xhci->devs[slot_id])
1270 1271 1272 1273
		xhci_warn(xhci, "Reset device command completion "
				"for disabled slot %u\n", slot_id);
}

1274 1275 1276 1277
static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
	if (!(xhci->quirks & XHCI_NEC_HOST)) {
L
Lu Baolu 已提交
1278
		xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
1279 1280 1281 1282 1283 1284 1285 1286
		return;
	}
	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
			"NEC firmware version %2x.%02x",
			NEC_FW_MAJOR(le32_to_cpu(event->status)),
			NEC_FW_MINOR(le32_to_cpu(event->status)));
}

1287
static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
M
Mathias Nyman 已提交
1288 1289
{
	list_del(&cmd->cmd_list);
1290 1291 1292 1293 1294

	if (cmd->completion) {
		cmd->status = status;
		complete(cmd->completion);
	} else {
M
Mathias Nyman 已提交
1295
		kfree(cmd);
1296
	}
M
Mathias Nyman 已提交
1297 1298 1299 1300 1301
}

void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
{
	struct xhci_command *cur_cmd, *tmp_cmd;
1302
	xhci->current_cmd = NULL;
M
Mathias Nyman 已提交
1303
	list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1304
		xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
M
Mathias Nyman 已提交
1305 1306
}

1307
void xhci_handle_command_timeout(struct work_struct *work)
1308 1309 1310 1311
{
	struct xhci_hcd *xhci;
	unsigned long flags;
	u64 hw_ring_state;
1312 1313

	xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
1314 1315

	spin_lock_irqsave(&xhci->lock, flags);
L
Lu Baolu 已提交
1316

1317 1318 1319 1320
	/*
	 * If timeout work is pending, or current_cmd is NULL, it means we
	 * raced with command completion. Command is handled so just return.
	 */
1321
	if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
L
Lu Baolu 已提交
1322 1323
		spin_unlock_irqrestore(&xhci->lock, flags);
		return;
1324
	}
L
Lu Baolu 已提交
1325
	/* mark this command to be cancelled */
1326
	xhci->current_cmd->status = COMP_COMMAND_ABORTED;
L
Lu Baolu 已提交
1327

1328 1329
	/* Make sure command ring is running before aborting it */
	hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1330 1331 1332 1333 1334
	if (hw_ring_state == ~(u64)0) {
		xhci_hc_died(xhci);
		goto time_out_completed;
	}

1335 1336
	if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
	    (hw_ring_state & CMD_RING_RUNNING))  {
1337 1338
		/* Prevent new doorbell, and start command abort */
		xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
1339
		xhci_dbg(xhci, "Command timeout\n");
1340
		xhci_abort_cmd_ring(xhci, flags);
1341
		goto time_out_completed;
1342
	}
1343

1344 1345 1346
	/* host removed. Bail out */
	if (xhci->xhc_state & XHCI_STATE_REMOVING) {
		xhci_dbg(xhci, "host removed, ring start fail?\n");
1347
		xhci_cleanup_command_queue(xhci);
1348 1349

		goto time_out_completed;
1350 1351
	}

1352 1353 1354
	/* command timeout on stopped ring, ring can't be aborted */
	xhci_dbg(xhci, "Command timeout on stopped ring\n");
	xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1355 1356

time_out_completed:
1357 1358 1359 1360
	spin_unlock_irqrestore(&xhci->lock, flags);
	return;
}

1361 1362 1363
static void handle_cmd_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
M
Matt Evans 已提交
1364
	int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1365 1366
	u64 cmd_dma;
	dma_addr_t cmd_dequeue_dma;
1367
	u32 cmd_comp_code;
1368
	union xhci_trb *cmd_trb;
M
Mathias Nyman 已提交
1369
	struct xhci_command *cmd;
1370
	u32 cmd_type;
1371

M
Matt Evans 已提交
1372
	cmd_dma = le64_to_cpu(event->cmd_trb);
1373
	cmd_trb = xhci->cmd_ring->dequeue;
1374 1375 1376

	trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);

1377
	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1378
			cmd_trb);
L
Lu Baolu 已提交
1379 1380 1381 1382 1383 1384 1385
	/*
	 * Check whether the completion event is for our internal kept
	 * command.
	 */
	if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
		xhci_warn(xhci,
			  "ERROR mismatched command completion event\n");
1386 1387
		return;
	}
1388

1389
	cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list);
M
Mathias Nyman 已提交
1390

1391
	cancel_delayed_work(&xhci->cmd_timer);
1392

1393
	cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1394 1395

	/* If CMD ring stopped we own the trbs between enqueue and dequeue */
1396
	if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
1397
		complete_all(&xhci->cmd_ring_stop_completion);
1398 1399
		return;
	}
1400 1401 1402 1403 1404 1405 1406

	if (cmd->command_trb != xhci->cmd_ring->dequeue) {
		xhci_err(xhci,
			 "Command completion event does not match command\n");
		return;
	}

1407 1408 1409 1410 1411 1412
	/*
	 * Host aborted the command ring, check if the current command was
	 * supposed to be aborted, otherwise continue normally.
	 * The command ring is stopped now, but the xHC will issue a Command
	 * Ring Stopped event which will cause us to restart it.
	 */
1413
	if (cmd_comp_code == COMP_COMMAND_ABORTED) {
1414
		xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1415
		if (cmd->status == COMP_COMMAND_ABORTED) {
1416 1417
			if (xhci->current_cmd == cmd)
				xhci->current_cmd = NULL;
1418
			goto event_handled;
1419
		}
1420 1421
	}

1422 1423 1424
	cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
	switch (cmd_type) {
	case TRB_ENABLE_SLOT:
1425
		xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
1426
		break;
1427
	case TRB_DISABLE_SLOT:
1428
		xhci_handle_cmd_disable_slot(xhci, slot_id);
1429
		break;
1430
	case TRB_CONFIG_EP:
1431 1432 1433
		if (!cmd->completion)
			xhci_handle_cmd_config_ep(xhci, slot_id, event,
						  cmd_comp_code);
1434
		break;
1435
	case TRB_EVAL_CONTEXT:
1436
		break;
1437
	case TRB_ADDR_DEV:
1438
		xhci_handle_cmd_addr_dev(xhci, slot_id);
1439
		break;
1440
	case TRB_STOP_RING:
1441 1442
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
1443 1444
		if (!cmd->completion)
			xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
1445
		break;
1446
	case TRB_SET_DEQ:
1447 1448
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
1449
		xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1450
		break;
1451
	case TRB_CMD_NOOP:
1452
		/* Is this an aborted command turned to NO-OP? */
1453 1454
		if (cmd->status == COMP_COMMAND_RING_STOPPED)
			cmd_comp_code = COMP_COMMAND_RING_STOPPED;
1455
		break;
1456
	case TRB_RESET_EP:
1457 1458
		WARN_ON(slot_id != TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3])));
1459
		xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1460
		break;
1461
	case TRB_RESET_DEV:
1462 1463 1464 1465 1466
		/* SLOT_ID field in reset device cmd completion event TRB is 0.
		 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
		 */
		slot_id = TRB_TO_SLOT_ID(
				le32_to_cpu(cmd_trb->generic.field[3]));
1467
		xhci_handle_cmd_reset_dev(xhci, slot_id, event);
1468
		break;
1469
	case TRB_NEC_GET_FW:
1470
		xhci_handle_cmd_nec_get_fw(xhci, event);
1471
		break;
1472 1473
	default:
		/* Skip over unknown commands on the event ring */
L
Lu Baolu 已提交
1474
		xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
1475 1476
		break;
	}
M
Mathias Nyman 已提交
1477

1478
	/* restart timer if this wasn't the last command */
1479
	if (!list_is_singular(&xhci->cmd_list)) {
1480 1481
		xhci->current_cmd = list_first_entry(&cmd->cmd_list,
						struct xhci_command, cmd_list);
1482
		xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
L
Lu Baolu 已提交
1483 1484
	} else if (xhci->current_cmd == cmd) {
		xhci->current_cmd = NULL;
1485 1486 1487
	}

event_handled:
1488
	xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
M
Mathias Nyman 已提交
1489

A
Andiry Xu 已提交
1490
	inc_deq(xhci, xhci->cmd_ring);
1491 1492
}

1493 1494 1495 1496 1497
static void handle_vendor_event(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 trb_type;

M
Matt Evans 已提交
1498
	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1499 1500 1501 1502 1503
	xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
	if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
		handle_cmd_completion(xhci, &event->event_cmd);
}

1504 1505 1506 1507
static void handle_device_notification(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 slot_id;
1508
	struct usb_device *udev;
1509

1510
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1511
	if (!xhci->devs[slot_id]) {
1512 1513
		xhci_warn(xhci, "Device Notification event for "
				"unused slot %u\n", slot_id);
1514 1515 1516 1517 1518 1519 1520 1521
		return;
	}

	xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
			slot_id);
	udev = xhci->devs[slot_id]->udev;
	if (udev && udev->parent)
		usb_wakeup_notification(udev->parent, udev->portnum);
1522 1523
}

1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
/*
 * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
 * Controller.
 * As per ThunderX2errata-129 USB 2 device may come up as USB 1
 * If a connection to a USB 1 device is followed by another connection
 * to a USB 2 device.
 *
 * Reset the PHY after the USB device is disconnected if device speed
 * is less than HCD_USB3.
 * Retry the reset sequence max of 4 times checking the PLL lock status.
 *
 */
static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
{
	struct usb_hcd *hcd = xhci_to_hcd(xhci);
	u32 pll_lock_check;
	u32 retry_count = 4;

	do {
		/* Assert PHY reset */
		writel(0x6F, hcd->regs + 0x1048);
		udelay(10);
		/* De-assert the PHY reset */
		writel(0x7F, hcd->regs + 0x1048);
		udelay(200);
		pll_lock_check = readl(hcd->regs + 0x1070);
	} while (!(pll_lock_check & 0x1) && --retry_count);
}

S
Sarah Sharp 已提交
1553 1554 1555
static void handle_port_status(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
1556
	struct usb_hcd *hcd;
S
Sarah Sharp 已提交
1557
	u32 port_id;
1558
	u32 portsc, cmd_reg;
1559
	int max_ports;
1560
	int slot_id;
1561
	unsigned int hcd_portnum;
1562
	struct xhci_bus_state *bus_state;
1563
	bool bogus_port_status = false;
1564
	struct xhci_port *port;
S
Sarah Sharp 已提交
1565 1566

	/* Port status change events always have a successful completion code */
L
Lu Baolu 已提交
1567 1568 1569 1570
	if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
		xhci_warn(xhci,
			  "WARN: xHC returned failed port status event\n");

M
Matt Evans 已提交
1571
	port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
S
Sarah Sharp 已提交
1572 1573
	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);

1574 1575
	max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
	if ((port_id <= 0) || (port_id > max_ports)) {
1576
		xhci_warn(xhci, "Invalid port id %d\n", port_id);
P
Peter Chen 已提交
1577 1578
		inc_deq(xhci, xhci->event_ring);
		return;
1579 1580
	}

1581 1582 1583
	port = &xhci->hw_ports[port_id - 1];
	if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) {
		xhci_warn(xhci, "Event for invalid port %u\n", port_id);
1584
		bogus_port_status = true;
1585 1586 1587
		goto cleanup;
	}

1588 1589 1590 1591 1592 1593 1594
	/* We might get interrupts after shared_hcd is removed */
	if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
		xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
		bogus_port_status = true;
		goto cleanup;
	}

1595
	hcd = port->rhub->hcd;
1596
	bus_state = &xhci->bus_state[hcd_index(hcd)];
1597
	hcd_portnum = port->hcd_portnum;
1598
	portsc = readl(port->addr);
1599

1600
	trace_xhci_handle_port_status(hcd_portnum, portsc);
M
Mathias Nyman 已提交
1601

1602
	if (hcd->state == HC_STATE_SUSPENDED) {
1603 1604 1605 1606
		xhci_dbg(xhci, "resume root hub\n");
		usb_hcd_resume_root_hub(hcd);
	}

1607
	if (hcd->speed >= HCD_USB3 && (portsc & PORT_PLS_MASK) == XDEV_INACTIVE)
1608
		bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
1609

1610
	if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
1611 1612
		xhci_dbg(xhci, "port resume event for port %d\n", port_id);

1613 1614
		cmd_reg = readl(&xhci->op_regs->command);
		if (!(cmd_reg & CMD_RUN)) {
1615 1616 1617 1618
			xhci_warn(xhci, "xHC is not running.\n");
			goto cleanup;
		}

1619
		if (DEV_SUPERSPEED_ANY(portsc)) {
1620
			xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1621 1622 1623 1624
			/* Set a flag to say the port signaled remote wakeup,
			 * so we can tell the difference between the end of
			 * device and host initiated resume.
			 */
1625
			bus_state->port_remote_wakeup |= 1 << hcd_portnum;
1626
			xhci_test_and_clear_bit(xhci, port, PORT_PLC);
1627
			xhci_set_link_state(xhci, port, XDEV_U0);
1628 1629 1630 1631 1632
			/* Need to wait until the next link state change
			 * indicates the device is actually in U0.
			 */
			bogus_port_status = true;
			goto cleanup;
1633
		} else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) {
1634
			xhci_dbg(xhci, "resume HS port %d\n", port_id);
1635
			bus_state->resume_done[hcd_portnum] = jiffies +
1636
				msecs_to_jiffies(USB_RESUME_TIMEOUT);
1637
			set_bit(hcd_portnum, &bus_state->resuming_ports);
1638 1639 1640 1641 1642
			/* Do the rest in GetPortStatus after resume time delay.
			 * Avoid polling roothub status before that so that a
			 * usb device auto-resume latency around ~40ms.
			 */
			set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1643
			mod_timer(&hcd->rh_timer,
1644
				  bus_state->resume_done[hcd_portnum]);
1645
			usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
1646
			bogus_port_status = true;
1647 1648
		}
	}
1649

1650 1651 1652 1653 1654
	if ((portsc & PORT_PLC) &&
	    DEV_SUPERSPEED_ANY(portsc) &&
	    ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
	     (portsc & PORT_PLS_MASK) == XDEV_U1 ||
	     (portsc & PORT_PLS_MASK) == XDEV_U2)) {
1655
		xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1656
		/* We've just brought the device into U0/1/2 through either the
1657 1658 1659 1660 1661 1662
		 * Resume state after a device remote wakeup, or through the
		 * U3Exit state after a host-initiated resume.  If it's a device
		 * initiated remote wake, don't pass up the link state change,
		 * so the roothub behavior is consistent with external
		 * USB 3.0 hub behavior.
		 */
1663
		slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
1664 1665
		if (slot_id && xhci->devs[slot_id])
			xhci_ring_device(xhci, slot_id);
1666 1667
		if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
			bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
1668
			xhci_test_and_clear_bit(xhci, port, PORT_PLC);
1669
			usb_wakeup_notification(hcd->self.root_hub,
1670
					hcd_portnum + 1);
1671 1672 1673
			bogus_port_status = true;
			goto cleanup;
		}
1674
	}
1675

1676 1677 1678 1679 1680
	/*
	 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
	 * RExit to a disconnect state).  If so, let the the driver know it's
	 * out of the RExit state.
	 */
1681
	if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 &&
1682
			test_and_clear_bit(hcd_portnum,
1683
				&bus_state->rexit_ports)) {
1684
		complete(&bus_state->rexit_done[hcd_portnum]);
1685 1686 1687 1688
		bogus_port_status = true;
		goto cleanup;
	}

1689
	if (hcd->speed < HCD_USB3) {
1690
		xhci_test_and_clear_bit(xhci, port, PORT_PLC);
1691 1692 1693 1694
		if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
		    (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
			xhci_cavium_reset_phy_quirk(xhci);
	}
1695

1696
cleanup:
S
Sarah Sharp 已提交
1697
	/* Update event ring dequeue pointer before dropping the lock */
A
Andiry Xu 已提交
1698
	inc_deq(xhci, xhci->event_ring);
S
Sarah Sharp 已提交
1699

1700 1701 1702 1703 1704 1705 1706
	/* Don't make the USB core poll the roothub if we got a bad port status
	 * change event.  Besides, at that point we can't tell which roothub
	 * (USB 2.0 or USB 3.0) to kick.
	 */
	if (bogus_port_status)
		return;

1707 1708 1709 1710 1711 1712 1713 1714 1715
	/*
	 * xHCI port-status-change events occur when the "or" of all the
	 * status-change bits in the portsc register changes from 0 to 1.
	 * New status changes won't cause an event if any other change
	 * bits are still set.  When an event occurs, switch over to
	 * polling to avoid losing status changes.
	 */
	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
S
Sarah Sharp 已提交
1716 1717
	spin_unlock(&xhci->lock);
	/* Pass this up to the core */
1718
	usb_hcd_poll_rh_status(hcd);
S
Sarah Sharp 已提交
1719 1720 1721
	spin_lock(&xhci->lock);
}

1722 1723 1724 1725 1726 1727
/*
 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
 * at end_trb, which may be in another segment.  If the suspect DMA address is a
 * TRB in this TD, this function returns that TRB's segment.  Otherwise it
 * returns 0.
 */
1728 1729
struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
		struct xhci_segment *start_seg,
1730 1731
		union xhci_trb	*start_trb,
		union xhci_trb	*end_trb,
1732 1733
		dma_addr_t	suspect_dma,
		bool		debug)
1734 1735 1736 1737 1738 1739
{
	dma_addr_t start_dma;
	dma_addr_t end_seg_dma;
	dma_addr_t end_trb_dma;
	struct xhci_segment *cur_seg;

1740
	start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1741 1742 1743
	cur_seg = start_seg;

	do {
1744
		if (start_dma == 0)
1745
			return NULL;
1746
		/* We may get an event for a Link TRB in the middle of a TD */
1747
		end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1748
				&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1749
		/* If the end TRB isn't in this segment, this is set to 0 */
1750
		end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1751

1752 1753 1754 1755 1756 1757 1758 1759 1760
		if (debug)
			xhci_warn(xhci,
				"Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
				(unsigned long long)suspect_dma,
				(unsigned long long)start_dma,
				(unsigned long long)end_trb_dma,
				(unsigned long long)cur_seg->dma,
				(unsigned long long)end_seg_dma);

1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775
		if (end_trb_dma > 0) {
			/* The end TRB is in this segment, so suspect should be here */
			if (start_dma <= end_trb_dma) {
				if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
					return cur_seg;
			} else {
				/* Case for one segment with
				 * a TD wrapped around to the top
				 */
				if ((suspect_dma >= start_dma &&
							suspect_dma <= end_seg_dma) ||
						(suspect_dma >= cur_seg->dma &&
						 suspect_dma <= end_trb_dma))
					return cur_seg;
			}
1776
			return NULL;
1777 1778 1779 1780 1781 1782
		} else {
			/* Might still be somewhere in this segment */
			if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
				return cur_seg;
		}
		cur_seg = cur_seg->next;
1783
		start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1784
	} while (cur_seg != start_seg);
1785

1786
	return NULL;
1787 1788
}

1789 1790
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
1791
		unsigned int stream_id, struct xhci_td *td,
1792
		enum xhci_ep_reset_type reset_type)
1793 1794
{
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1795
	struct xhci_command *command;
1796
	command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1797 1798 1799
	if (!command)
		return;

1800
	ep->ep_state |= EP_HALTED;
1801

1802
	xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
1803

1804 1805
	if (reset_type == EP_HARD_RESET) {
		ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
1806
		xhci_cleanup_stalled_ring(xhci, ep_index, stream_id, td);
1807
	}
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821
	xhci_ring_cmd_db(xhci);
}

/* Check if an error has halted the endpoint ring.  The class driver will
 * cleanup the halt for a non-default control endpoint if we indicate a stall.
 * However, a babble and other errors also halt the endpoint ring, and the class
 * driver won't clear the halt in that case, so we need to issue a Set Transfer
 * Ring Dequeue Pointer command manually.
 */
static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
		struct xhci_ep_ctx *ep_ctx,
		unsigned int trb_comp_code)
{
	/* TRB completion codes that may require a manual halt cleanup */
1822 1823 1824
	if (trb_comp_code == COMP_USB_TRANSACTION_ERROR ||
			trb_comp_code == COMP_BABBLE_DETECTED_ERROR ||
			trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR)
1825
		/* The 0.95 spec says a babbling control endpoint
1826 1827 1828 1829 1830
		 * is not halted. The 0.96 spec says it is.  Some HW
		 * claims to be 0.95 compliant, but it halts the control
		 * endpoint anyway.  Check if a babble halted the
		 * endpoint.
		 */
1831
		if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
1832 1833 1834 1835 1836
			return 1;

	return 0;
}

1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
{
	if (trb_comp_code >= 224 && trb_comp_code <= 255) {
		/* Vendor defined "informational" completion code,
		 * treat as not-an-error.
		 */
		xhci_dbg(xhci, "Vendor defined info completion code %u\n",
				trb_comp_code);
		xhci_dbg(xhci, "Treating code as success.\n");
		return 1;
	}
	return 0;
}

1851 1852 1853 1854 1855 1856 1857 1858 1859
static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
		struct xhci_ring *ep_ring, int *status)
{
	struct urb *urb = NULL;

	/* Clean up the endpoint's TD list */
	urb = td->urb;

	/* if a bounce buffer was used to align this td then unmap it */
1860
	xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896

	/* Do one last check of the actual transfer length.
	 * If the host controller said we transferred more data than the buffer
	 * length, urb->actual_length will be a very big number (since it's
	 * unsigned).  Play it safe and say we didn't transfer anything.
	 */
	if (urb->actual_length > urb->transfer_buffer_length) {
		xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
			  urb->transfer_buffer_length, urb->actual_length);
		urb->actual_length = 0;
		*status = 0;
	}
	list_del_init(&td->td_list);
	/* Was this TD slated to be cancelled but completed anyway? */
	if (!list_empty(&td->cancelled_td_list))
		list_del_init(&td->cancelled_td_list);

	inc_td_cnt(urb);
	/* Giveback the urb when all the tds are completed */
	if (last_td_in_urb(td)) {
		if ((urb->actual_length != urb->transfer_buffer_length &&
		     (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
		    (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
			xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
				 urb, urb->actual_length,
				 urb->transfer_buffer_length, *status);

		/* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
			*status = 0;
		xhci_giveback_urb_in_irq(xhci, td, *status);
	}

	return 0;
}

1897
static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1898
	struct xhci_transfer_event *event,
1899
	struct xhci_virt_ep *ep, int *status)
1900 1901 1902
{
	struct xhci_virt_device *xdev;
	struct xhci_ep_ctx *ep_ctx;
1903 1904
	struct xhci_ring *ep_ring;
	unsigned int slot_id;
1905
	u32 trb_comp_code;
1906
	int ep_index;
1907

M
Matt Evans 已提交
1908
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1909
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
1910 1911
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1912
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
1913
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1914

1915 1916 1917
	if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
			trb_comp_code == COMP_STOPPED ||
			trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
1918 1919 1920 1921 1922
		/* The Endpoint Stop Command completion will take care of any
		 * stopped TDs.  A stopped TD may be restarted, so don't update
		 * the ring dequeue pointer or take this TD off any lists yet.
		 */
		return 0;
M
Mathias Nyman 已提交
1923
	}
1924
	if (trb_comp_code == COMP_STALL_ERROR ||
M
Mathias Nyman 已提交
1925 1926 1927 1928 1929 1930 1931 1932
		xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
						trb_comp_code)) {
		/* Issue a reset endpoint command to clear the host side
		 * halt, followed by a set dequeue command to move the
		 * dequeue pointer past the TD.
		 * The class driver clears the device side halt later.
		 */
		xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
1933
					ep_ring->stream_id, td, EP_HARD_RESET);
1934
	} else {
M
Mathias Nyman 已提交
1935 1936
		/* Update ring dequeue pointer */
		while (ep_ring->dequeue != td->last_trb)
A
Andiry Xu 已提交
1937
			inc_deq(xhci, ep_ring);
M
Mathias Nyman 已提交
1938 1939
		inc_deq(xhci, ep_ring);
	}
1940

1941
	return xhci_td_cleanup(xhci, td, ep_ring, status);
1942 1943
}

1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958
/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
			   union xhci_trb *stop_trb)
{
	u32 sum;
	union xhci_trb *trb = ring->dequeue;
	struct xhci_segment *seg = ring->deq_seg;

	for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
		if (!trb_is_noop(trb) && !trb_is_link(trb))
			sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
	}
	return sum;
}

1959 1960 1961 1962
/*
 * Process control tds, update urb status and actual_length.
 */
static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1963
	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
1964 1965 1966 1967 1968 1969 1970
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_virt_device *xdev;
	unsigned int slot_id;
	int ep_index;
	struct xhci_ep_ctx *ep_ctx;
	u32 trb_comp_code;
1971
	u32 remaining, requested;
1972
	u32 trb_type;
1973

1974
	trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
M
Matt Evans 已提交
1975
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1976
	xdev = xhci->devs[slot_id];
M
Matt Evans 已提交
1977
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1978
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
M
Matt Evans 已提交
1979
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1980 1981 1982
	requested = td->urb->transfer_buffer_length;
	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));

1983 1984
	switch (trb_comp_code) {
	case COMP_SUCCESS:
1985
		if (trb_type != TRB_STATUS) {
1986
			xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
1987
				  (trb_type == TRB_DATA) ? "data" : "setup");
1988
			*status = -ESHUTDOWN;
1989
			break;
1990
		}
1991
		*status = 0;
1992
		break;
1993
	case COMP_SHORT_PACKET:
1994
		*status = 0;
1995
		break;
1996
	case COMP_STOPPED_SHORT_PACKET:
1997
		if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
1998
			td->urb->actual_length = remaining;
1999
		else
2000 2001
			xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
		goto finish_td;
2002
	case COMP_STOPPED:
2003 2004 2005 2006 2007 2008
		switch (trb_type) {
		case TRB_SETUP:
			td->urb->actual_length = 0;
			goto finish_td;
		case TRB_DATA:
		case TRB_NORMAL:
2009
			td->urb->actual_length = requested - remaining;
2010
			goto finish_td;
2011 2012 2013
		case TRB_STATUS:
			td->urb->actual_length = requested;
			goto finish_td;
2014 2015 2016 2017 2018
		default:
			xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
				  trb_type);
			goto finish_td;
		}
2019
	case COMP_STOPPED_LENGTH_INVALID:
2020
		goto finish_td;
2021 2022
	default:
		if (!xhci_requires_manual_halt_cleanup(xhci,
2023
						       ep_ctx, trb_comp_code))
2024
			break;
2025 2026
		xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
			 trb_comp_code, ep_index);
2027
		/* else fall through */
2028
	case COMP_STALL_ERROR:
2029
		/* Did we transfer part of the data (middle) phase? */
2030
		if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2031
			td->urb->actual_length = requested - remaining;
2032
		else if (!td->urb_length_set)
2033
			td->urb->actual_length = 0;
2034
		goto finish_td;
2035
	}
2036 2037

	/* stopped at setup stage, no data transferred */
2038
	if (trb_type == TRB_SETUP)
2039 2040
		goto finish_td;

2041
	/*
2042 2043
	 * if on data stage then update the actual_length of the URB and flag it
	 * as set, so it won't be overwritten in the event for the last TRB.
2044
	 */
2045 2046
	if (trb_type == TRB_DATA ||
		trb_type == TRB_NORMAL) {
2047 2048 2049 2050
		td->urb_length_set = true;
		td->urb->actual_length = requested - remaining;
		xhci_dbg(xhci, "Waiting for status stage event\n");
		return 0;
2051 2052
	}

2053 2054 2055 2056 2057
	/* at status stage */
	if (!td->urb_length_set)
		td->urb->actual_length = requested;

finish_td:
2058
	return finish_td(xhci, td, event, ep, status);
2059 2060
}

2061 2062 2063 2064
/*
 * Process isochronous tds, update urb packet status and actual_length.
 */
static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2065
	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
2066 2067 2068 2069 2070
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	int idx;
2071
	struct usb_iso_packet_descriptor *frame;
2072
	u32 trb_comp_code;
2073 2074 2075
	bool sum_trbs_for_length = false;
	u32 remaining, requested, ep_trb_len;
	int short_framestatus;
2076

M
Matt Evans 已提交
2077 2078
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2079
	urb_priv = td->urb->hcpriv;
2080
	idx = urb_priv->num_tds_done;
2081
	frame = &td->urb->iso_frame_desc[idx];
2082 2083 2084 2085 2086
	requested = frame->length;
	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
	short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
		-EREMOTEIO : 0;
2087

2088 2089 2090
	/* handle completion code */
	switch (trb_comp_code) {
	case COMP_SUCCESS:
2091 2092 2093 2094
		if (remaining) {
			frame->status = short_framestatus;
			if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
				sum_trbs_for_length = true;
2095 2096
			break;
		}
2097 2098
		frame->status = 0;
		break;
2099
	case COMP_SHORT_PACKET:
2100 2101
		frame->status = short_framestatus;
		sum_trbs_for_length = true;
2102
		break;
2103
	case COMP_BANDWIDTH_OVERRUN_ERROR:
2104 2105
		frame->status = -ECOMM;
		break;
2106 2107
	case COMP_ISOCH_BUFFER_OVERRUN:
	case COMP_BABBLE_DETECTED_ERROR:
2108 2109
		frame->status = -EOVERFLOW;
		break;
2110 2111
	case COMP_INCOMPATIBLE_DEVICE_ERROR:
	case COMP_STALL_ERROR:
2112 2113
		frame->status = -EPROTO;
		break;
2114
	case COMP_USB_TRANSACTION_ERROR:
2115
		frame->status = -EPROTO;
2116
		if (ep_trb != td->last_trb)
2117
			return 0;
2118
		break;
2119
	case COMP_STOPPED:
2120 2121
		sum_trbs_for_length = true;
		break;
2122
	case COMP_STOPPED_SHORT_PACKET:
2123 2124 2125 2126
		/* field normally containing residue now contains tranferred */
		frame->status = short_framestatus;
		requested = remaining;
		break;
2127
	case COMP_STOPPED_LENGTH_INVALID:
2128 2129
		requested = 0;
		remaining = 0;
2130 2131
		break;
	default:
2132
		sum_trbs_for_length = true;
2133 2134
		frame->status = -1;
		break;
2135 2136
	}

2137 2138 2139 2140 2141
	if (sum_trbs_for_length)
		frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) +
			ep_trb_len - remaining;
	else
		frame->actual_length = requested;
2142

2143
	td->urb->actual_length += frame->actual_length;
2144

2145
	return finish_td(xhci, td, event, ep, status);
2146 2147
}

2148 2149 2150 2151 2152 2153 2154 2155 2156
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
			struct xhci_transfer_event *event,
			struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct usb_iso_packet_descriptor *frame;
	int idx;

2157
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2158
	urb_priv = td->urb->hcpriv;
2159
	idx = urb_priv->num_tds_done;
2160 2161
	frame = &td->urb->iso_frame_desc[idx];

2162
	/* The transfer is partly done. */
2163 2164 2165 2166 2167 2168 2169
	frame->status = -EXDEV;

	/* calc actual length */
	frame->actual_length = 0;

	/* Update ring dequeue pointer */
	while (ep_ring->dequeue != td->last_trb)
A
Andiry Xu 已提交
2170 2171
		inc_deq(xhci, ep_ring);
	inc_deq(xhci, ep_ring);
2172

2173
	return xhci_td_cleanup(xhci, td, ep_ring, status);
2174 2175
}

2176 2177 2178 2179
/*
 * Process bulk and interrupt tds, update urb status and actual_length.
 */
static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2180
	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
2181 2182 2183 2184
	struct xhci_virt_ep *ep, int *status)
{
	struct xhci_ring *ep_ring;
	u32 trb_comp_code;
2185
	u32 remaining, requested, ep_trb_len;
2186

M
Matt Evans 已提交
2187 2188
	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2189
	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2190
	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2191
	requested = td->urb->transfer_buffer_length;
2192 2193 2194

	switch (trb_comp_code) {
	case COMP_SUCCESS:
2195
		/* handle success with untransferred data as short packet */
2196
		if (ep_trb != td->last_trb || remaining) {
2197
			xhci_warn(xhci, "WARN Successful completion on short TX\n");
2198 2199 2200
			xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
				 td->urb->ep->desc.bEndpointAddress,
				 requested, remaining);
2201
		}
2202
		*status = 0;
2203
		break;
2204
	case COMP_SHORT_PACKET:
2205 2206 2207
		xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
			 td->urb->ep->desc.bEndpointAddress,
			 requested, remaining);
2208
		*status = 0;
2209
		break;
2210
	case COMP_STOPPED_SHORT_PACKET:
2211 2212
		td->urb->actual_length = remaining;
		goto finish_td;
2213
	case COMP_STOPPED_LENGTH_INVALID:
2214
		/* stopped on ep trb with invalid length, exclude it */
2215
		ep_trb_len	= 0;
2216 2217
		remaining	= 0;
		break;
2218
	default:
2219
		/* do nothing */
2220 2221
		break;
	}
2222

2223
	if (ep_trb == td->last_trb)
2224 2225 2226
		td->urb->actual_length = requested - remaining;
	else
		td->urb->actual_length =
2227 2228
			sum_trb_lengths(xhci, ep_ring, ep_trb) +
			ep_trb_len - remaining;
2229 2230 2231 2232
finish_td:
	if (remaining > requested) {
		xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
			  remaining);
2233 2234
		td->urb->actual_length = 0;
	}
2235
	return finish_td(xhci, td, event, ep, status);
2236 2237
}

2238 2239 2240 2241 2242 2243 2244 2245 2246
/*
 * If this function returns an error condition, it means it got a Transfer
 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
 * At this point, the host controller is probably hosed and should be reset.
 */
static int handle_tx_event(struct xhci_hcd *xhci,
		struct xhci_transfer_event *event)
{
	struct xhci_virt_device *xdev;
2247
	struct xhci_virt_ep *ep;
2248
	struct xhci_ring *ep_ring;
2249
	unsigned int slot_id;
2250
	int ep_index;
2251
	struct xhci_td *td = NULL;
2252 2253 2254
	dma_addr_t ep_trb_dma;
	struct xhci_segment *ep_seg;
	union xhci_trb *ep_trb;
2255
	int status = -EINPROGRESS;
2256
	struct xhci_ep_ctx *ep_ctx;
2257
	struct list_head *tmp;
2258
	u32 trb_comp_code;
2259
	int td_num = 0;
2260
	bool handling_skipped_tds = false;
2261

M
Matt Evans 已提交
2262
	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2263 2264 2265 2266
	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
	ep_trb_dma = le64_to_cpu(event->buffer);

2267
	xdev = xhci->devs[slot_id];
2268
	if (!xdev) {
2269 2270
		xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n",
			 slot_id);
2271 2272 2273
		goto err_out;
	}

2274
	ep = &xdev->eps[ep_index];
2275
	ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
2276
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2277

2278
	if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
2279
		xhci_err(xhci,
2280
			 "ERROR Transfer event for disabled endpoint slot %u ep %u\n",
2281
			  slot_id, ep_index);
2282
		goto err_out;
2283 2284
	}

2285 2286 2287 2288 2289 2290 2291 2292
	/* Some transfer events don't always point to a trb, see xhci 4.17.4 */
	if (!ep_ring) {
		switch (trb_comp_code) {
		case COMP_STALL_ERROR:
		case COMP_USB_TRANSACTION_ERROR:
		case COMP_INVALID_STREAM_TYPE_ERROR:
		case COMP_INVALID_STREAM_ID_ERROR:
			xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 0,
2293
						     NULL, EP_SOFT_RESET);
2294 2295 2296
			goto cleanup;
		case COMP_RING_UNDERRUN:
		case COMP_RING_OVERRUN:
2297
		case COMP_STOPPED_LENGTH_INVALID:
2298 2299 2300 2301 2302 2303 2304 2305
			goto cleanup;
		default:
			xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
				 slot_id, ep_index);
			goto err_out;
		}
	}

2306 2307 2308 2309 2310 2311
	/* Count current td numbers if ep->skip is set */
	if (ep->skip) {
		list_for_each(tmp, &ep_ring->td_list)
			td_num++;
	}

2312
	/* Look for common error cases */
2313
	switch (trb_comp_code) {
S
Sarah Sharp 已提交
2314 2315 2316 2317
	/* Skip codes that require special handling depending on
	 * transfer type
	 */
	case COMP_SUCCESS:
2318
		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2319 2320
			break;
		if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2321
			trb_comp_code = COMP_SHORT_PACKET;
2322
		else
2323
			xhci_warn_ratelimited(xhci,
2324 2325
					      "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
					      slot_id, ep_index);
2326
	case COMP_SHORT_PACKET:
S
Sarah Sharp 已提交
2327
		break;
2328
	/* Completion codes for endpoint stopped state */
2329
	case COMP_STOPPED:
2330 2331
		xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
			 slot_id, ep_index);
2332
		break;
2333
	case COMP_STOPPED_LENGTH_INVALID:
2334 2335 2336
		xhci_dbg(xhci,
			 "Stopped on No-op or Link TRB for slot %u ep %u\n",
			 slot_id, ep_index);
2337
		break;
2338
	case COMP_STOPPED_SHORT_PACKET:
2339 2340 2341
		xhci_dbg(xhci,
			 "Stopped with short packet transfer detected for slot %u ep %u\n",
			 slot_id, ep_index);
2342
		break;
2343
	/* Completion codes for endpoint halted state */
2344
	case COMP_STALL_ERROR:
2345 2346
		xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
			 ep_index);
2347
		ep->ep_state |= EP_HALTED;
S
Sarah Sharp 已提交
2348 2349
		status = -EPIPE;
		break;
2350 2351
	case COMP_SPLIT_TRANSACTION_ERROR:
	case COMP_USB_TRANSACTION_ERROR:
2352 2353
		xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
			 slot_id, ep_index);
S
Sarah Sharp 已提交
2354 2355
		status = -EPROTO;
		break;
2356
	case COMP_BABBLE_DETECTED_ERROR:
2357 2358
		xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n",
			 slot_id, ep_index);
2359 2360
		status = -EOVERFLOW;
		break;
2361 2362 2363 2364 2365 2366 2367 2368
	/* Completion codes for endpoint error state */
	case COMP_TRB_ERROR:
		xhci_warn(xhci,
			  "WARN: TRB error for slot %u ep %u on endpoint\n",
			  slot_id, ep_index);
		status = -EILSEQ;
		break;
	/* completion codes not indicating endpoint state change */
2369
	case COMP_DATA_BUFFER_ERROR:
2370 2371 2372
		xhci_warn(xhci,
			  "WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
			  slot_id, ep_index);
S
Sarah Sharp 已提交
2373 2374
		status = -ENOSR;
		break;
2375
	case COMP_BANDWIDTH_OVERRUN_ERROR:
2376 2377 2378
		xhci_warn(xhci,
			  "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n",
			  slot_id, ep_index);
2379
		break;
2380
	case COMP_ISOCH_BUFFER_OVERRUN:
2381 2382 2383
		xhci_warn(xhci,
			  "WARN: buffer overrun event for slot %u ep %u on endpoint",
			  slot_id, ep_index);
2384
		break;
2385
	case COMP_RING_UNDERRUN:
2386 2387 2388 2389 2390 2391 2392 2393 2394
		/*
		 * When the Isoch ring is empty, the xHC will generate
		 * a Ring Overrun Event for IN Isoch endpoint or Ring
		 * Underrun Event for OUT Isoch endpoint.
		 */
		xhci_dbg(xhci, "underrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2395 2396
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2397
		goto cleanup;
2398
	case COMP_RING_OVERRUN:
2399 2400 2401 2402
		xhci_dbg(xhci, "overrun event on endpoint\n");
		if (!list_empty(&ep_ring->td_list))
			xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
					"still with TDs queued?\n",
M
Matt Evans 已提交
2403 2404
				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
				 ep_index);
2405
		goto cleanup;
2406
	case COMP_MISSED_SERVICE_ERROR:
2407 2408 2409 2410 2411 2412 2413
		/*
		 * When encounter missed service error, one or more isoc tds
		 * may be missed by xHC.
		 * Set skip flag of the ep_ring; Complete the missed tds as
		 * short transfer when process the ep_ring next time.
		 */
		ep->skip = true;
2414 2415 2416
		xhci_dbg(xhci,
			 "Miss service interval error for slot %u ep %u, set skip flag\n",
			 slot_id, ep_index);
2417
		goto cleanup;
2418
	case COMP_NO_PING_RESPONSE_ERROR:
2419
		ep->skip = true;
2420 2421 2422
		xhci_dbg(xhci,
			 "No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
			 slot_id, ep_index);
2423
		goto cleanup;
2424 2425 2426 2427 2428 2429 2430 2431

	case COMP_INCOMPATIBLE_DEVICE_ERROR:
		/* needs disable slot command to recover */
		xhci_warn(xhci,
			  "WARN: detect an incompatible device for slot %u ep %u",
			  slot_id, ep_index);
		status = -EPROTO;
		break;
S
Sarah Sharp 已提交
2432
	default:
2433
		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2434 2435 2436
			status = 0;
			break;
		}
2437 2438 2439
		xhci_warn(xhci,
			  "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n",
			  trb_comp_code, slot_id, ep_index);
2440 2441 2442
		goto cleanup;
	}

2443 2444 2445 2446 2447
	do {
		/* This TRB should be in the TD at the head of this ring's
		 * TD list.
		 */
		if (list_empty(&ep_ring->td_list)) {
2448
			/*
2449 2450 2451 2452 2453
			 * Don't print wanings if it's due to a stopped endpoint
			 * generating an extra completion event if the device
			 * was suspended. Or, a event for the last TRB of a
			 * short TD we already got a short event for.
			 * The short TD is already removed from the TD list.
2454
			 */
2455

2456
			if (!(trb_comp_code == COMP_STOPPED ||
2457 2458
			      trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
			      ep_ring->last_td_was_short)) {
2459 2460 2461 2462
				xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
						TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
						ep_index);
			}
2463 2464
			if (ep->skip) {
				ep->skip = false;
2465 2466
				xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
					 slot_id, ep_index);
2467 2468 2469
			}
			goto cleanup;
		}
2470

2471 2472 2473
		/* We've skipped all the TDs on the ep ring when ep->skip set */
		if (ep->skip && td_num == 0) {
			ep->skip = false;
2474 2475
			xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n",
				 slot_id, ep_index);
2476 2477 2478
			goto cleanup;
		}

2479 2480
		td = list_first_entry(&ep_ring->td_list, struct xhci_td,
				      td_list);
2481 2482
		if (ep->skip)
			td_num--;
2483

2484
		/* Is this a TRB in the currently executing TD? */
2485 2486
		ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
				td->last_trb, ep_trb_dma, false);
A
Alex He 已提交
2487 2488 2489 2490 2491 2492 2493 2494 2495

		/*
		 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
		 * is not in the current TD pointed by ep_ring->dequeue because
		 * that the hardware dequeue pointer still at the previous TRB
		 * of the current TD. The previous TRB maybe a Link TD or the
		 * last TRB of the previous TD. The command completion handle
		 * will take care the rest.
		 */
2496 2497
		if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
			   trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
A
Alex He 已提交
2498 2499 2500
			goto cleanup;
		}

2501
		if (!ep_seg) {
2502 2503
			if (!ep->skip ||
			    !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2504 2505 2506 2507
				/* Some host controllers give a spurious
				 * successful event after a short transfer.
				 * Ignore it.
				 */
2508
				if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2509 2510 2511 2512
						ep_ring->last_td_was_short) {
					ep_ring->last_td_was_short = false;
					goto cleanup;
				}
2513 2514 2515
				/* HC is busted, give up! */
				xhci_err(xhci,
					"ERROR Transfer event TRB DMA ptr not "
2516 2517 2518 2519 2520
					"part of current TD ep_index %d "
					"comp_code %u\n", ep_index,
					trb_comp_code);
				trb_in_td(xhci, ep_ring->deq_seg,
					  ep_ring->dequeue, td->last_trb,
2521
					  ep_trb_dma, true);
2522 2523 2524
				return -ESHUTDOWN;
			}

2525
			skip_isoc_td(xhci, td, event, ep, &status);
2526 2527
			goto cleanup;
		}
2528
		if (trb_comp_code == COMP_SHORT_PACKET)
2529 2530 2531
			ep_ring->last_td_was_short = true;
		else
			ep_ring->last_td_was_short = false;
2532 2533

		if (ep->skip) {
2534 2535 2536
			xhci_dbg(xhci,
				 "Found td. Clear skip flag for slot %u ep %u.\n",
				 slot_id, ep_index);
2537 2538
			ep->skip = false;
		}
2539

2540 2541
		ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
						sizeof(*ep_trb)];
2542 2543 2544 2545

		trace_xhci_handle_transfer(ep_ring,
				(struct xhci_generic_trb *) ep_trb);

2546
		/*
2547 2548 2549 2550 2551
		 * No-op TRB could trigger interrupts in a case where
		 * a URB was killed and a STALL_ERROR happens right
		 * after the endpoint ring stopped. Reset the halted
		 * endpoint. Otherwise, the endpoint remains stalled
		 * indefinitely.
2552
		 */
2553
		if (trb_is_noop(ep_trb)) {
2554 2555 2556 2557 2558 2559
			if (trb_comp_code == COMP_STALL_ERROR ||
			    xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
							      trb_comp_code))
				xhci_cleanup_halted_endpoint(xhci, slot_id,
							     ep_index,
							     ep_ring->stream_id,
2560
							     td, EP_HARD_RESET);
2561
			goto cleanup;
2562
		}
2563

2564
		/* update the urb's actual_length and give back to the core */
2565
		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2566
			process_ctrl_td(xhci, td, ep_trb, event, ep, &status);
2567
		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2568
			process_isoc_td(xhci, td, ep_trb, event, ep, &status);
2569
		else
2570 2571
			process_bulk_intr_td(xhci, td, ep_trb, event, ep,
					     &status);
2572
cleanup:
2573
		handling_skipped_tds = ep->skip &&
2574 2575
			trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
			trb_comp_code != COMP_NO_PING_RESPONSE_ERROR;
2576

2577
		/*
2578 2579
		 * Do not update event ring dequeue pointer if we're in a loop
		 * processing missed tds.
2580
		 */
2581
		if (!handling_skipped_tds)
A
Andiry Xu 已提交
2582
			inc_deq(xhci, xhci->event_ring);
2583 2584 2585 2586 2587 2588 2589

	/*
	 * If ep->skip is set, it means there are missed tds on the
	 * endpoint ring need to take care of.
	 * Process them as short transfer until reach the td pointed by
	 * the event.
	 */
2590
	} while (handling_skipped_tds);
2591

2592
	return 0;
2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603

err_out:
	xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
		 (unsigned long long) xhci_trb_virt_to_dma(
			 xhci->event_ring->deq_seg,
			 xhci->event_ring->dequeue),
		 lower_32_bits(le64_to_cpu(event->buffer)),
		 upper_32_bits(le64_to_cpu(event->buffer)),
		 le32_to_cpu(event->transfer_len),
		 le32_to_cpu(event->flags));
	return -ENODEV;
2604 2605
}

S
Sarah Sharp 已提交
2606 2607 2608
/*
 * This function handles all OS-owned events on the event ring.  It may drop
 * xhci->lock between event processing (e.g. to pass up port status changes).
2609 2610
 * Returns >0 for "possibly more events to process" (caller should call again),
 * otherwise 0 if done.  In future, <0 returns should indicate error code.
S
Sarah Sharp 已提交
2611
 */
2612
static int xhci_handle_event(struct xhci_hcd *xhci)
2613 2614
{
	union xhci_trb *event;
S
Sarah Sharp 已提交
2615
	int update_ptrs = 1;
2616
	int ret;
2617

L
Lu Baolu 已提交
2618
	/* Event ring hasn't been allocated yet. */
2619
	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
L
Lu Baolu 已提交
2620 2621
		xhci_err(xhci, "ERROR event ring not ready\n");
		return -ENOMEM;
2622 2623 2624 2625
	}

	event = xhci->event_ring->dequeue;
	/* Does the HC or OS own the TRB? */
M
Matt Evans 已提交
2626
	if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
L
Lu Baolu 已提交
2627
	    xhci->event_ring->cycle_state)
2628
		return 0;
2629

2630 2631
	trace_xhci_handle_event(xhci->event_ring, &event->generic);

2632 2633 2634 2635 2636
	/*
	 * Barrier between reading the TRB_CYCLE (valid) flag above and any
	 * speculative reads of the event's flags/data below.
	 */
	rmb();
S
Sarah Sharp 已提交
2637
	/* FIXME: Handle more event types. */
L
Lu Baolu 已提交
2638
	switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
2639 2640 2641
	case TRB_TYPE(TRB_COMPLETION):
		handle_cmd_completion(xhci, &event->event_cmd);
		break;
S
Sarah Sharp 已提交
2642 2643 2644 2645
	case TRB_TYPE(TRB_PORT_STATUS):
		handle_port_status(xhci, event);
		update_ptrs = 0;
		break;
2646 2647
	case TRB_TYPE(TRB_TRANSFER):
		ret = handle_tx_event(xhci, &event->trans_event);
L
Lu Baolu 已提交
2648
		if (ret >= 0)
2649 2650
			update_ptrs = 0;
		break;
2651 2652 2653
	case TRB_TYPE(TRB_DEV_NOTE):
		handle_device_notification(xhci, event);
		break;
2654
	default:
M
Matt Evans 已提交
2655 2656
		if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
		    TRB_TYPE(48))
2657 2658
			handle_vendor_event(xhci, event);
		else
L
Lu Baolu 已提交
2659 2660 2661
			xhci_warn(xhci, "ERROR unknown event type %d\n",
				  TRB_FIELD_TO_TYPE(
				  le32_to_cpu(event->event_cmd.flags)));
2662
	}
2663 2664 2665 2666 2667 2668
	/* Any of the above functions may drop and re-acquire the lock, so check
	 * to make sure a watchdog timer didn't mark the host as non-responsive.
	 */
	if (xhci->xhc_state & XHCI_STATE_DYING) {
		xhci_dbg(xhci, "xHCI host dying, returning from "
				"event handler.\n");
2669
		return 0;
2670
	}
2671

2672 2673
	if (update_ptrs)
		/* Update SW event ring dequeue pointer */
A
Andiry Xu 已提交
2674
		inc_deq(xhci, xhci->event_ring);
2675

2676 2677 2678 2679
	/* Are there more items on the event ring?  Caller will call us again to
	 * check.
	 */
	return 1;
2680
}
2681 2682 2683 2684 2685 2686 2687 2688 2689

/*
 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
 * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
 * indicators of an event TRB error, but we check the status *first* to be safe.
 */
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2690
	union xhci_trb *event_ring_deq;
2691
	irqreturn_t ret = IRQ_NONE;
2692
	unsigned long flags;
2693
	dma_addr_t deq;
2694 2695
	u64 temp_64;
	u32 status;
2696

2697
	spin_lock_irqsave(&xhci->lock, flags);
2698
	/* Check if the xHC generated the interrupt, or the irq is shared */
2699
	status = readl(&xhci->op_regs->status);
2700 2701
	if (status == ~(u32)0) {
		xhci_hc_died(xhci);
2702 2703
		ret = IRQ_HANDLED;
		goto out;
2704
	}
2705 2706 2707 2708

	if (!(status & STS_EINT))
		goto out;

2709
	if (status & STS_FATAL) {
2710 2711
		xhci_warn(xhci, "WARNING: Host System Error\n");
		xhci_halt(xhci);
2712 2713
		ret = IRQ_HANDLED;
		goto out;
2714 2715
	}

2716 2717 2718 2719 2720
	/*
	 * Clear the op reg interrupt status first,
	 * so we can receive interrupts from other MSI-X interrupters.
	 * Write 1 to clear the interrupt status.
	 */
2721
	status |= STS_EINT;
2722
	writel(status, &xhci->op_regs->status);
2723

2724
	if (!hcd->msi_enabled) {
2725
		u32 irq_pending;
2726
		irq_pending = readl(&xhci->ir_set->irq_pending);
2727
		irq_pending |= IMAN_IP;
2728
		writel(irq_pending, &xhci->ir_set->irq_pending);
2729
	}
2730

2731 2732
	if (xhci->xhc_state & XHCI_STATE_DYING ||
	    xhci->xhc_state & XHCI_STATE_HALTED) {
2733 2734
		xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
				"Shouldn't IRQs be disabled?\n");
2735 2736
		/* Clear the event handler busy flag (RW1C);
		 * the event ring should be empty.
2737
		 */
2738
		temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2739 2740
		xhci_write_64(xhci, temp_64 | ERST_EHB,
				&xhci->ir_set->erst_dequeue);
2741 2742
		ret = IRQ_HANDLED;
		goto out;
2743 2744 2745 2746 2747 2748
	}

	event_ring_deq = xhci->event_ring->dequeue;
	/* FIXME this should be a delayed service routine
	 * that clears the EHB.
	 */
2749
	while (xhci_handle_event(xhci) > 0) {}
2750

2751
	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765
	/* If necessary, update the HW's version of the event ring deq ptr. */
	if (event_ring_deq != xhci->event_ring->dequeue) {
		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
				xhci->event_ring->dequeue);
		if (deq == 0)
			xhci_warn(xhci, "WARN something wrong with SW event "
					"ring dequeue ptr.\n");
		/* Update HC event ring dequeue pointer */
		temp_64 &= ERST_PTR_MASK;
		temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
	}

	/* Clear the event handler busy flag (RW1C); event ring is empty. */
	temp_64 |= ERST_EHB;
2766
	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2767
	ret = IRQ_HANDLED;
2768

2769
out:
2770
	spin_unlock_irqrestore(&xhci->lock, flags);
2771

2772
	return ret;
2773 2774
}

2775
irqreturn_t xhci_msi_irq(int irq, void *hcd)
2776
{
A
Alan Stern 已提交
2777
	return xhci_irq(hcd);
2778
}
2779

2780 2781
/****		Endpoint Ring Operations	****/

2782 2783 2784
/*
 * Generic function for queueing a TRB on a ring.
 * The caller must have checked to make sure there's room on the ring.
2785 2786 2787
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
2788 2789
 */
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
A
Andiry Xu 已提交
2790
		bool more_trbs_coming,
2791 2792 2793 2794 2795
		u32 field1, u32 field2, u32 field3, u32 field4)
{
	struct xhci_generic_trb *trb;

	trb = &ring->enqueue->generic;
M
Matt Evans 已提交
2796 2797 2798 2799
	trb->field[0] = cpu_to_le32(field1);
	trb->field[1] = cpu_to_le32(field2);
	trb->field[2] = cpu_to_le32(field3);
	trb->field[3] = cpu_to_le32(field4);
2800 2801 2802

	trace_xhci_queue_trb(ring, trb);

A
Andiry Xu 已提交
2803
	inc_enq(xhci, ring, more_trbs_coming);
2804 2805
}

2806 2807 2808 2809 2810
/*
 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
 * FIXME allocate segments if the ring is full.
 */
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
A
Andiry Xu 已提交
2811
		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2812
{
A
Andiry Xu 已提交
2813 2814
	unsigned int num_trbs_needed;

2815 2816 2817 2818 2819 2820 2821 2822 2823 2824
	/* Make sure the endpoint has been added to xHC schedule */
	switch (ep_state) {
	case EP_STATE_DISABLED:
		/*
		 * USB core changed config/interfaces without notifying us,
		 * or hardware is reporting the wrong state.
		 */
		xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
		return -ENOENT;
	case EP_STATE_ERROR:
2825
		xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2826 2827 2828
		/* FIXME event handling code for error needs to clear it */
		/* XXX not sure if this should be -ENOENT or not */
		return -EINVAL;
2829 2830
	case EP_STATE_HALTED:
		xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841
	case EP_STATE_STOPPED:
	case EP_STATE_RUNNING:
		break;
	default:
		xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
		/*
		 * FIXME issue Configure Endpoint command to try to get the HC
		 * back into a known state.
		 */
		return -EINVAL;
	}
A
Andiry Xu 已提交
2842 2843

	while (1) {
2844 2845
		if (room_on_ring(xhci, ep_ring, num_trbs))
			break;
A
Andiry Xu 已提交
2846 2847 2848 2849 2850 2851

		if (ep_ring == xhci->cmd_ring) {
			xhci_err(xhci, "Do not support expand command ring\n");
			return -ENOMEM;
		}

2852 2853
		xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
				"ERROR no room on ep ring, try ring expansion");
A
Andiry Xu 已提交
2854 2855 2856 2857 2858 2859
		num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
		if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
					mem_flags)) {
			xhci_err(xhci, "Ring expansion failed\n");
			return -ENOMEM;
		}
2860
	}
2861

2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873
	while (trb_is_link(ep_ring->enqueue)) {
		/* If we're not dealing with 0.95 hardware or isoc rings
		 * on AMD 0.96 host, clear the chain bit.
		 */
		if (!xhci_link_trb_quirk(xhci) &&
		    !(ep_ring->type == TYPE_ISOC &&
		      (xhci->quirks & XHCI_AMD_0x96_HOST)))
			ep_ring->enqueue->link.control &=
				cpu_to_le32(~TRB_CHAIN);
		else
			ep_ring->enqueue->link.control |=
				cpu_to_le32(TRB_CHAIN);
2874

2875 2876
		wmb();
		ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
2877

2878 2879 2880
		/* Toggle the cycle bit after the last ring segment. */
		if (link_trb_toggles_cycle(ep_ring->enqueue))
			ep_ring->cycle_state ^= 1;
2881

2882 2883
		ep_ring->enq_seg = ep_ring->enq_seg->next;
		ep_ring->enqueue = ep_ring->enq_seg->trbs;
2884
	}
2885 2886 2887
	return 0;
}

2888
static int prepare_transfer(struct xhci_hcd *xhci,
2889 2890
		struct xhci_virt_device *xdev,
		unsigned int ep_index,
2891
		unsigned int stream_id,
2892 2893
		unsigned int num_trbs,
		struct urb *urb,
2894
		unsigned int td_index,
2895 2896 2897
		gfp_t mem_flags)
{
	int ret;
2898 2899
	struct urb_priv *urb_priv;
	struct xhci_td	*td;
2900
	struct xhci_ring *ep_ring;
2901
	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2902 2903 2904 2905 2906 2907 2908 2909

	ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
	if (!ep_ring) {
		xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
				stream_id);
		return -EINVAL;
	}

2910
	ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
A
Andiry Xu 已提交
2911
			   num_trbs, mem_flags);
2912 2913 2914
	if (ret)
		return ret;

2915
	urb_priv = urb->hcpriv;
2916
	td = &urb_priv->td[td_index];
2917 2918 2919 2920 2921

	INIT_LIST_HEAD(&td->td_list);
	INIT_LIST_HEAD(&td->cancelled_td_list);

	if (td_index == 0) {
2922
		ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2923
		if (unlikely(ret))
2924
			return ret;
2925 2926
	}

2927
	td->urb = urb;
2928
	/* Add this TD to the tail of the endpoint ring's TD list */
2929 2930 2931 2932
	list_add_tail(&td->td_list, &ep_ring->td_list);
	td->start_seg = ep_ring->enq_seg;
	td->first_trb = ep_ring->enqueue;

2933 2934 2935
	return 0;
}

2936
unsigned int count_trbs(u64 addr, u64 len)
2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953
{
	unsigned int num_trbs;

	num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
			TRB_MAX_BUFF_SIZE);
	if (num_trbs == 0)
		num_trbs++;

	return num_trbs;
}

static inline unsigned int count_trbs_needed(struct urb *urb)
{
	return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
}

static unsigned int count_sg_trbs_needed(struct urb *urb)
2954 2955
{
	struct scatterlist *sg;
2956
	unsigned int i, len, full_len, num_trbs = 0;
2957

2958
	full_len = urb->transfer_buffer_length;
2959

2960 2961 2962 2963 2964 2965
	for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
		len = sg_dma_len(sg);
		num_trbs += count_trbs(sg_dma_address(sg), len);
		len = min_t(unsigned int, len, full_len);
		full_len -= len;
		if (full_len == 0)
2966 2967
			break;
	}
2968

2969 2970 2971
	return num_trbs;
}

2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982
static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
{
	u64 addr, len;

	addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
	len = urb->iso_frame_desc[i].length;

	return count_trbs(addr, len);
}

static void check_trb_math(struct urb *urb, int running_total)
2983
{
2984
	if (unlikely(running_total != urb->transfer_buffer_length))
2985
		dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2986 2987 2988 2989 2990 2991 2992 2993
				"queued %#x (%d), asked for %#x (%d)\n",
				__func__,
				urb->ep->desc.bEndpointAddress,
				running_total, running_total,
				urb->transfer_buffer_length,
				urb->transfer_buffer_length);
}

2994
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2995
		unsigned int ep_index, unsigned int stream_id, int start_cycle,
2996
		struct xhci_generic_trb *start_trb)
2997 2998 2999 3000 3001 3002
{
	/*
	 * Pass all the TRBs to the hardware at once and make sure this write
	 * isn't reordered.
	 */
	wmb();
3003
	if (start_cycle)
M
Matt Evans 已提交
3004
		start_trb->field[3] |= cpu_to_le32(start_cycle);
3005
	else
M
Matt Evans 已提交
3006
		start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3007
	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3008 3009
}

3010 3011
static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
						struct xhci_ep_ctx *ep_ctx)
3012 3013 3014 3015
{
	int xhci_interval;
	int ep_interval;

M
Matt Evans 已提交
3016
	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3017
	ep_interval = urb->interval;
3018

3019 3020 3021 3022
	/* Convert to microframes */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		ep_interval *= 8;
3023

3024 3025 3026 3027
	/* FIXME change this to a warning and a suggestion to use the new API
	 * to set the polling interval (once the API is added).
	 */
	if (xhci_interval != ep_interval) {
3028 3029 3030 3031
		dev_dbg_ratelimited(&urb->dev->dev,
				"Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
				ep_interval, ep_interval == 1 ? "" : "s",
				xhci_interval, xhci_interval == 1 ? "" : "s");
3032 3033 3034 3035 3036 3037
		urb->interval = xhci_interval;
		/* Convert back to frames for LS/FS devices */
		if (urb->dev->speed == USB_SPEED_LOW ||
				urb->dev->speed == USB_SPEED_FULL)
			urb->interval /= 8;
	}
3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053
}

/*
 * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
 * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
 * (comprised of sg list entries) can take several service intervals to
 * transmit.
 */
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ep_ctx *ep_ctx;

	ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
	check_interval(xhci, urb, ep_ctx);

3054
	return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3055 3056
}

3057
/*
3058 3059
 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
 * packets remaining in the TD (*not* including this TRB).
3060 3061
 *
 * Total TD packet count = total_packet_count =
3062
 *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
3063 3064 3065 3066 3067 3068
 *
 * Packets transferred up to and including this TRB = packets_transferred =
 *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
 *
 * TD size = total_packet_count - packets_transferred
 *
3069 3070 3071 3072 3073 3074
 * For xHCI 0.96 and older, TD size field should be the remaining bytes
 * including this TRB, right shifted by 10
 *
 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
 * This is taken care of in the TRB_TD_SIZE() macro
 *
3075
 * The last TRB in a TD must have the TD size set to zero.
3076
 */
3077 3078
static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
			      int trb_buff_len, unsigned int td_total_len,
3079
			      struct urb *urb, bool more_trbs_coming)
3080
{
3081 3082
	u32 maxp, total_packet_count;

C
Chunfeng Yun 已提交
3083
	/* MTK xHCI 0.96 contains some features from 1.0 */
3084
	if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3085 3086
		return ((td_total_len - transferred) >> 10);

3087
	/* One TRB with a zero-length data packet. */
3088
	if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
3089
	    trb_buff_len == td_total_len)
3090 3091
		return 0;

C
Chunfeng Yun 已提交
3092 3093
	/* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
	if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
3094 3095
		trb_buff_len = 0;

3096
	maxp = usb_endpoint_maxp(&urb->ep->desc);
3097 3098
	total_packet_count = DIV_ROUND_UP(td_total_len, maxp);

3099 3100
	/* Queueing functions don't count the current TRB into transferred */
	return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3101 3102
}

3103

3104
static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
3105
			 u32 *trb_buff_len, struct xhci_segment *seg)
3106
{
3107
	struct device *dev = xhci_to_hcd(xhci)->self.controller;
3108 3109
	unsigned int unalign;
	unsigned int max_pkt;
3110
	u32 new_buff_len;
3111
	size_t len;
3112

3113
	max_pkt = usb_endpoint_maxp(&urb->ep->desc);
3114 3115 3116 3117 3118 3119
	unalign = (enqd_len + *trb_buff_len) % max_pkt;

	/* we got lucky, last normal TRB data on segment is packet aligned */
	if (unalign == 0)
		return 0;

3120 3121 3122
	xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
		 unalign, *trb_buff_len);

3123 3124 3125
	/* is the last nornal TRB alignable by splitting it */
	if (*trb_buff_len > unalign) {
		*trb_buff_len -= unalign;
3126
		xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
3127 3128
		return 0;
	}
3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141

	/*
	 * We want enqd_len + trb_buff_len to sum up to a number aligned to
	 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
	 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
	 */
	new_buff_len = max_pkt - (enqd_len % max_pkt);

	if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
		new_buff_len = (urb->transfer_buffer_length - enqd_len);

	/* create a max max_pkt sized bounce buffer pointed to by last trb */
	if (usb_urb_dir_out(urb)) {
3142
		len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
3143
				   seg->bounce_buf, new_buff_len, enqd_len);
3144 3145 3146 3147
		if (len != seg->bounce_len)
			xhci_warn(xhci,
				"WARN Wrong bounce buffer write length: %ld != %d\n",
				len, seg->bounce_len);
3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165
		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
						 max_pkt, DMA_TO_DEVICE);
	} else {
		seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
						 max_pkt, DMA_FROM_DEVICE);
	}

	if (dma_mapping_error(dev, seg->bounce_dma)) {
		/* try without aligning. Some host controllers survive */
		xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
		return 0;
	}
	*trb_buff_len = new_buff_len;
	seg->bounce_len = new_buff_len;
	seg->bounce_offs = enqd_len;

	xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);

3166 3167 3168
	return 1;
}

3169 3170
/* This is very similar to what ehci-q.c qtd_fill() does */
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3171 3172
		struct urb *urb, int slot_id, unsigned int ep_index)
{
3173
	struct xhci_ring *ring;
3174
	struct urb_priv *urb_priv;
3175
	struct xhci_td *td;
3176 3177
	struct xhci_generic_trb *start_trb;
	struct scatterlist *sg = NULL;
3178 3179
	bool more_trbs_coming = true;
	bool need_zero_pkt = false;
3180 3181
	bool first_trb = true;
	unsigned int num_trbs;
3182
	unsigned int start_cycle, num_sgs = 0;
3183
	unsigned int enqd_len, block_len, trb_buff_len, full_len;
3184
	int sent_len, ret;
3185
	u32 field, length_field, remainder;
3186
	u64 addr, send_addr;
3187

3188 3189
	ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ring)
3190 3191
		return -EINVAL;

3192
	full_len = urb->transfer_buffer_length;
3193 3194 3195 3196
	/* If we have scatter/gather list, we use it. */
	if (urb->num_sgs) {
		num_sgs = urb->num_mapped_sgs;
		sg = urb->sg;
3197 3198
		addr = (u64) sg_dma_address(sg);
		block_len = sg_dma_len(sg);
3199
		num_trbs = count_sg_trbs_needed(urb);
3200
	} else {
3201
		num_trbs = count_trbs_needed(urb);
3202 3203 3204
		addr = (u64) urb->transfer_dma;
		block_len = full_len;
	}
3205
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
3206
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
3207
			num_trbs, urb, 0, mem_flags);
3208
	if (unlikely(ret < 0))
3209
		return ret;
3210 3211

	urb_priv = urb->hcpriv;
3212 3213

	/* Deal with URB_ZERO_PACKET - need one more td/trb */
3214
	if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1)
3215
		need_zero_pkt = true;
3216

3217
	td = &urb_priv->td[0];
3218

3219 3220 3221 3222 3223
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
3224 3225
	start_trb = &ring->enqueue->generic;
	start_cycle = ring->cycle_state;
3226
	send_addr = addr;
3227

3228
	/* Queue the TRBs, even if they are zero-length */
3229 3230
	for (enqd_len = 0; first_trb || enqd_len < full_len;
			enqd_len += trb_buff_len) {
3231
		field = TRB_TYPE(TRB_NORMAL);
3232

3233 3234 3235
		/* TRB buffer should not cross 64KB boundaries */
		trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
		trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
3236

3237 3238
		if (enqd_len + trb_buff_len > full_len)
			trb_buff_len = full_len - enqd_len;
S
Sarah Sharp 已提交
3239 3240

		/* Don't change the cycle bit of the first TRB until later */
3241 3242
		if (first_trb) {
			first_trb = false;
3243
			if (start_cycle == 0)
3244
				field |= TRB_CYCLE;
3245
		} else
3246
			field |= ring->cycle_state;
S
Sarah Sharp 已提交
3247 3248 3249 3250

		/* Chain all the TRBs together; clear the chain bit in the last
		 * TRB to indicate it's the last TRB in the chain.
		 */
3251
		if (enqd_len + trb_buff_len < full_len) {
S
Sarah Sharp 已提交
3252
			field |= TRB_CHAIN;
3253
			if (trb_is_link(ring->enqueue + 1)) {
3254
				if (xhci_align_td(xhci, urb, enqd_len,
3255 3256 3257 3258 3259 3260
						  &trb_buff_len,
						  ring->enq_seg)) {
					send_addr = ring->enq_seg->bounce_dma;
					/* assuming TD won't span 2 segs */
					td->bounce_seg = ring->enq_seg;
				}
3261
			}
3262 3263 3264
		}
		if (enqd_len + trb_buff_len >= full_len) {
			field &= ~TRB_CHAIN;
3265
			field |= TRB_IOC;
3266
			more_trbs_coming = false;
3267
			td->last_trb = ring->enqueue;
S
Sarah Sharp 已提交
3268
		}
3269 3270 3271 3272 3273

		/* Only set interrupt on short packet for IN endpoints */
		if (usb_urb_dir_in(urb))
			field |= TRB_ISP;

3274
		/* Set the TRB length, TD size, and interrupter fields. */
3275 3276 3277
		remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
					      full_len, urb, more_trbs_coming);

3278
		length_field = TRB_LEN(trb_buff_len) |
3279
			TRB_TD_SIZE(remainder) |
3280
			TRB_INTR_TARGET(0);
3281

3282
		queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
3283 3284
				lower_32_bits(send_addr),
				upper_32_bits(send_addr),
3285
				length_field,
3286
				field);
S
Sarah Sharp 已提交
3287 3288

		addr += trb_buff_len;
3289
		sent_len = trb_buff_len;
3290

3291
		while (sg && sent_len >= block_len) {
3292 3293
			/* New sg entry */
			--num_sgs;
3294
			sent_len -= block_len;
3295
			if (num_sgs != 0) {
3296
				sg = sg_next(sg);
3297 3298
				block_len = sg_dma_len(sg);
				addr = (u64) sg_dma_address(sg);
3299
				addr += sent_len;
3300 3301
			}
		}
3302 3303
		block_len -= sent_len;
		send_addr = addr;
3304
	}
S
Sarah Sharp 已提交
3305

3306 3307 3308 3309
	if (need_zero_pkt) {
		ret = prepare_transfer(xhci, xhci->devs[slot_id],
				       ep_index, urb->stream_id,
				       1, urb, 1, mem_flags);
3310
		urb_priv->td[1].last_trb = ring->enqueue;
3311 3312 3313 3314
		field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
		queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
	}

3315
	check_trb_math(urb, enqd_len);
3316
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3317
			start_cycle, start_trb);
S
Sarah Sharp 已提交
3318 3319 3320
	return 0;
}

3321
/* Caller must have locked xhci->lock */
3322
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3323 3324 3325 3326 3327 3328 3329 3330
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	int num_trbs;
	int ret;
	struct usb_ctrlrequest *setup;
	struct xhci_generic_trb *start_trb;
	int start_cycle;
3331
	u32 field;
3332
	struct urb_priv *urb_priv;
3333 3334
	struct xhci_td *td;

3335 3336 3337
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;
3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354

	/*
	 * Need to copy setup packet into setup TRB, so we can't use the setup
	 * DMA address.
	 */
	if (!urb->setup_packet)
		return -EINVAL;

	/* 1 TRB for setup, 1 for status */
	num_trbs = 2;
	/*
	 * Don't need to check if we need additional event data and normal TRBs,
	 * since data in control transfers will never get bigger than 16MB
	 * XXX: can we get a buffer that crosses 64KB boundaries?
	 */
	if (urb->transfer_buffer_length > 0)
		num_trbs++;
3355 3356
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
			ep_index, urb->stream_id,
A
Andiry Xu 已提交
3357
			num_trbs, urb, 0, mem_flags);
3358 3359 3360
	if (ret < 0)
		return ret;

3361
	urb_priv = urb->hcpriv;
3362
	td = &urb_priv->td[0];
3363

3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	/* Queue setup TRB - see section 6.4.1.2.1 */
	/* FIXME better way to translate setup_packet into two u32 fields? */
	setup = (struct usb_ctrlrequest *) urb->setup_packet;
3375 3376 3377 3378
	field = 0;
	field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
	if (start_cycle == 0)
		field |= 0x1;
3379

3380
	/* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
3381
	if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
3382 3383 3384 3385 3386 3387 3388 3389
		if (urb->transfer_buffer_length > 0) {
			if (setup->bRequestType & USB_DIR_IN)
				field |= TRB_TX_TYPE(TRB_DATA_IN);
			else
				field |= TRB_TX_TYPE(TRB_DATA_OUT);
		}
	}

A
Andiry Xu 已提交
3390
	queue_trb(xhci, ep_ring, true,
M
Matt Evans 已提交
3391 3392 3393 3394 3395
		  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
		  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
		  TRB_LEN(8) | TRB_INTR_TARGET(0),
		  /* Immediate data in pointer */
		  field);
3396 3397

	/* If there's data, queue data TRBs */
3398 3399 3400 3401 3402 3403
	/* Only set interrupt on short packet for IN endpoints */
	if (usb_urb_dir_in(urb))
		field = TRB_ISP | TRB_TYPE(TRB_DATA);
	else
		field = TRB_TYPE(TRB_DATA);

3404
	if (urb->transfer_buffer_length > 0) {
3405 3406 3407 3408 3409 3410 3411 3412 3413
		u32 length_field, remainder;

		remainder = xhci_td_remainder(xhci, 0,
				urb->transfer_buffer_length,
				urb->transfer_buffer_length,
				urb, 1);
		length_field = TRB_LEN(urb->transfer_buffer_length) |
				TRB_TD_SIZE(remainder) |
				TRB_INTR_TARGET(0);
3414 3415
		if (setup->bRequestType & USB_DIR_IN)
			field |= TRB_DIR_IN;
A
Andiry Xu 已提交
3416
		queue_trb(xhci, ep_ring, true,
3417 3418
				lower_32_bits(urb->transfer_dma),
				upper_32_bits(urb->transfer_dma),
3419
				length_field,
3420
				field | ep_ring->cycle_state);
3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431
	}

	/* Save the DMA address of the last TRB in the TD */
	td->last_trb = ep_ring->enqueue;

	/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
	/* If the device sent data, the status stage is an OUT transfer */
	if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
		field = 0;
	else
		field = TRB_DIR_IN;
A
Andiry Xu 已提交
3432
	queue_trb(xhci, ep_ring, false,
3433 3434 3435 3436 3437 3438
			0,
			0,
			TRB_INTR_TARGET(0),
			/* Event on completion */
			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);

3439
	giveback_first_trb(xhci, slot_id, ep_index, 0,
3440
			start_cycle, start_trb);
3441 3442 3443
	return 0;
}

3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456
/*
 * The transfer burst count field of the isochronous TRB defines the number of
 * bursts that are required to move all packets in this TD.  Only SuperSpeed
 * devices can burst up to bMaxBurst number of packets per service interval.
 * This field is zero based, meaning a value of zero in the field means one
 * burst.  Basically, for everything but SuperSpeed devices, this field will be
 * zero.  Only xHCI 1.0 host controllers support this field.
 */
static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;

3457
	if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
3458 3459 3460
		return 0;

	max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3461
	return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3462 3463
}

3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480
/*
 * Returns the number of packets in the last "burst" of packets.  This field is
 * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
 * the last burst packet count is equal to the total number of packets in the
 * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
 * must contain (bMaxBurst + 1) number of packets, but the last burst can
 * contain 1 to (bMaxBurst + 1) packets.
 */
static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
		struct urb *urb, unsigned int total_packet_count)
{
	unsigned int max_burst;
	unsigned int residue;

	if (xhci->hci_version < 0x100)
		return 0;

3481
	if (urb->dev->speed >= USB_SPEED_SUPER) {
3482 3483 3484 3485 3486 3487 3488 3489 3490 3491
		/* bMaxBurst is zero based: 0 means 1 packet per burst */
		max_burst = urb->ep->ss_ep_comp.bMaxBurst;
		residue = total_packet_count % (max_burst + 1);
		/* If residue is zero, the last burst contains (max_burst + 1)
		 * number of packets, but the TLBPC field is zero-based.
		 */
		if (residue == 0)
			return max_burst;
		return residue - 1;
	}
3492 3493 3494
	if (total_packet_count == 0)
		return 0;
	return total_packet_count - 1;
3495 3496
}

3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587
/*
 * Calculates Frame ID field of the isochronous TRB identifies the
 * target frame that the Interval associated with this Isochronous
 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
 *
 * Returns actual frame id on success, negative value on error.
 */
static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
		struct urb *urb, int index)
{
	int start_frame, ist, ret = 0;
	int start_frame_id, end_frame_id, current_frame_id;

	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		start_frame = urb->start_frame + index * urb->interval;
	else
		start_frame = (urb->start_frame + index * urb->interval) >> 3;

	/* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
	 *
	 * If bit [3] of IST is cleared to '0', software can add a TRB no
	 * later than IST[2:0] Microframes before that TRB is scheduled to
	 * be executed.
	 * If bit [3] of IST is set to '1', software can add a TRB no later
	 * than IST[2:0] Frames before that TRB is scheduled to be executed.
	 */
	ist = HCS_IST(xhci->hcs_params2) & 0x7;
	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
		ist <<= 3;

	/* Software shall not schedule an Isoch TD with a Frame ID value that
	 * is less than the Start Frame ID or greater than the End Frame ID,
	 * where:
	 *
	 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
	 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
	 *
	 * Both the End Frame ID and Start Frame ID values are calculated
	 * in microframes. When software determines the valid Frame ID value;
	 * The End Frame ID value should be rounded down to the nearest Frame
	 * boundary, and the Start Frame ID value should be rounded up to the
	 * nearest Frame boundary.
	 */
	current_frame_id = readl(&xhci->run_regs->microframe_index);
	start_frame_id = roundup(current_frame_id + ist + 1, 8);
	end_frame_id = rounddown(current_frame_id + 895 * 8, 8);

	start_frame &= 0x7ff;
	start_frame_id = (start_frame_id >> 3) & 0x7ff;
	end_frame_id = (end_frame_id >> 3) & 0x7ff;

	xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
		 __func__, index, readl(&xhci->run_regs->microframe_index),
		 start_frame_id, end_frame_id, start_frame);

	if (start_frame_id < end_frame_id) {
		if (start_frame > end_frame_id ||
				start_frame < start_frame_id)
			ret = -EINVAL;
	} else if (start_frame_id > end_frame_id) {
		if ((start_frame > end_frame_id &&
				start_frame < start_frame_id))
			ret = -EINVAL;
	} else {
			ret = -EINVAL;
	}

	if (index == 0) {
		if (ret == -EINVAL || start_frame == start_frame_id) {
			start_frame = start_frame_id + 1;
			if (urb->dev->speed == USB_SPEED_LOW ||
					urb->dev->speed == USB_SPEED_FULL)
				urb->start_frame = start_frame;
			else
				urb->start_frame = start_frame << 3;
			ret = 0;
		}
	}

	if (ret) {
		xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
				start_frame, current_frame_id, index,
				start_frame_id, end_frame_id);
		xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
		return ret;
	}

	return start_frame;
}

3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602
/* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	struct urb_priv *urb_priv;
	struct xhci_td *td;
	int num_tds, trbs_per_td;
	struct xhci_generic_trb *start_trb;
	bool first_trb;
	int start_cycle;
	u32 field, length_field;
	int running_total, trb_buff_len, td_len, td_remain_len, ret;
	u64 start_addr, addr;
	int i, j;
A
Andiry Xu 已提交
3603
	bool more_trbs_coming;
3604
	struct xhci_virt_ep *xep;
3605
	int frame_id;
3606

3607
	xep = &xhci->devs[slot_id]->eps[ep_index];
3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618
	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;

	num_tds = urb->number_of_packets;
	if (num_tds < 1) {
		xhci_dbg(xhci, "Isoc URB with zero packets?\n");
		return -EINVAL;
	}
	start_addr = (u64) urb->transfer_dma;
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

3619
	urb_priv = urb->hcpriv;
3620
	/* Queue the TRBs for each TD, even if they are zero-length */
3621
	for (i = 0; i < num_tds; i++) {
3622 3623 3624
		unsigned int total_pkt_count, max_pkt;
		unsigned int burst_count, last_burst_pkt_count;
		u32 sia_frame_id;
3625

3626
		first_trb = true;
3627 3628 3629 3630
		running_total = 0;
		addr = start_addr + urb->iso_frame_desc[i].offset;
		td_len = urb->iso_frame_desc[i].length;
		td_remain_len = td_len;
3631
		max_pkt = usb_endpoint_maxp(&urb->ep->desc);
3632 3633
		total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);

3634
		/* A zero-length transfer still involves at least one packet. */
3635 3636 3637 3638 3639
		if (total_pkt_count == 0)
			total_pkt_count++;
		burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
		last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
							urb, total_pkt_count);
3640

3641
		trbs_per_td = count_isoc_trbs_needed(urb, i);
3642 3643

		ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
A
Andiry Xu 已提交
3644
				urb->stream_id, trbs_per_td, urb, i, mem_flags);
3645 3646 3647 3648 3649
		if (ret < 0) {
			if (i == 0)
				return ret;
			goto cleanup;
		}
3650
		td = &urb_priv->td[i];
3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664

		/* use SIA as default, if frame id is used overwrite it */
		sia_frame_id = TRB_SIA;
		if (!(urb->transfer_flags & URB_ISO_ASAP) &&
		    HCC_CFC(xhci->hcc_params)) {
			frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
			if (frame_id >= 0)
				sia_frame_id = TRB_FRAME_ID(frame_id);
		}
		/*
		 * Set isoc specific data for the first TRB in a TD.
		 * Prevent HW from getting the TRBs by keeping the cycle state
		 * inverted in the first TDs isoc TRB.
		 */
3665
		field = TRB_TYPE(TRB_ISOC) |
3666 3667 3668 3669
			TRB_TLBPC(last_burst_pkt_count) |
			sia_frame_id |
			(i ? ep_ring->cycle_state : !start_cycle);

3670 3671 3672 3673
		/* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
		if (!xep->use_extended_tbc)
			field |= TRB_TBC(burst_count);

3674
		/* fill the rest of the TRB fields, and remaining normal TRBs */
3675 3676
		for (j = 0; j < trbs_per_td; j++) {
			u32 remainder = 0;
3677 3678 3679 3680 3681

			/* only first TRB is isoc, overwrite otherwise */
			if (!first_trb)
				field = TRB_TYPE(TRB_NORMAL) |
					ep_ring->cycle_state;
3682

3683 3684 3685 3686
			/* Only set interrupt on short packet for IN EPs */
			if (usb_urb_dir_in(urb))
				field |= TRB_ISP;

3687
			/* Set the chain bit for all except the last TRB  */
3688
			if (j < trbs_per_td - 1) {
A
Andiry Xu 已提交
3689
				more_trbs_coming = true;
3690
				field |= TRB_CHAIN;
3691
			} else {
3692
				more_trbs_coming = false;
3693 3694
				td->last_trb = ep_ring->enqueue;
				field |= TRB_IOC;
3695 3696 3697 3698 3699
				/* set BEI, except for the last TD */
				if (xhci->hci_version >= 0x100 &&
				    !(xhci->quirks & XHCI_AVOID_BEI) &&
				    i < num_tds - 1)
					field |= TRB_BEI;
3700 3701
			}
			/* Calculate TRB length */
3702
			trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3703 3704 3705
			if (trb_buff_len > td_remain_len)
				trb_buff_len = td_remain_len;

3706
			/* Set the TRB length, TD size, & interrupter fields. */
3707 3708
			remainder = xhci_td_remainder(xhci, running_total,
						   trb_buff_len, td_len,
3709
						   urb, more_trbs_coming);
3710

3711 3712
			length_field = TRB_LEN(trb_buff_len) |
				TRB_INTR_TARGET(0);
3713

3714 3715 3716 3717 3718 3719 3720
			/* xhci 1.1 with ETE uses TD Size field for TBC */
			if (first_trb && xep->use_extended_tbc)
				length_field |= TRB_TD_SIZE_TBC(burst_count);
			else
				length_field |= TRB_TD_SIZE(remainder);
			first_trb = false;

A
Andiry Xu 已提交
3721
			queue_trb(xhci, ep_ring, more_trbs_coming,
3722 3723 3724
				lower_32_bits(addr),
				upper_32_bits(addr),
				length_field,
3725
				field);
3726 3727 3728 3729 3730 3731 3732 3733 3734
			running_total += trb_buff_len;

			addr += trb_buff_len;
			td_remain_len -= trb_buff_len;
		}

		/* Check TD length */
		if (running_total != td_len) {
			xhci_err(xhci, "ISOC TD length unmatch\n");
3735 3736
			ret = -EINVAL;
			goto cleanup;
3737 3738 3739
		}
	}

3740 3741 3742 3743
	/* store the next frame id */
	if (HCC_CFC(xhci->hcc_params))
		xep->next_frame_id = urb->start_frame + num_tds * urb->interval;

A
Andiry Xu 已提交
3744 3745 3746 3747 3748 3749
	if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
		if (xhci->quirks & XHCI_AMD_PLL_FIX)
			usb_amd_quirk_pll_disable();
	}
	xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;

3750 3751
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
			start_cycle, start_trb);
3752
	return 0;
3753 3754 3755 3756
cleanup:
	/* Clean up a partially enqueued isoc transfer. */

	for (i--; i >= 0; i--)
3757
		list_del_init(&urb_priv->td[i].td_list);
3758 3759 3760 3761 3762 3763

	/* Use the first TD as a temporary variable to turn the TDs we've queued
	 * into No-ops with a software-owned cycle bit. That way the hardware
	 * won't accidentally start executing bogus TDs when we partially
	 * overwrite them.  td->first_trb and td->start_seg are already set.
	 */
3764
	urb_priv->td[0].last_trb = ep_ring->enqueue;
3765
	/* Every TRB except the first & last will have its cycle bit flipped. */
3766
	td_to_noop(xhci, ep_ring, &urb_priv->td[0], true);
3767 3768

	/* Reset the ring enqueue back to the first TRB and its cycle bit. */
3769 3770
	ep_ring->enqueue = urb_priv->td[0].first_trb;
	ep_ring->enq_seg = urb_priv->td[0].start_seg;
3771
	ep_ring->cycle_state = start_cycle;
3772
	ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3773 3774
	usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
	return ret;
3775 3776 3777 3778 3779
}

/*
 * Check transfer ring to guarantee there is enough room for the urb.
 * Update ISO URB start_frame and interval.
3780 3781 3782
 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
 * Contiguous Frame ID is not supported by HC.
3783 3784 3785 3786 3787 3788 3789 3790 3791 3792
 */
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_virt_device *xdev;
	struct xhci_ring *ep_ring;
	struct xhci_ep_ctx *ep_ctx;
	int start_frame;
	int num_tds, num_trbs, i;
	int ret;
3793 3794
	struct xhci_virt_ep *xep;
	int ist;
3795 3796

	xdev = xhci->devs[slot_id];
3797
	xep = &xhci->devs[slot_id]->eps[ep_index];
3798 3799 3800 3801 3802 3803
	ep_ring = xdev->eps[ep_index].ring;
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);

	num_trbs = 0;
	num_tds = urb->number_of_packets;
	for (i = 0; i < num_tds; i++)
3804
		num_trbs += count_isoc_trbs_needed(urb, i);
3805 3806 3807 3808

	/* Check the ring to guarantee there is enough room for the whole urb.
	 * Do not insert any td of the urb to the ring if the check failed.
	 */
3809
	ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
A
Andiry Xu 已提交
3810
			   num_trbs, mem_flags);
3811 3812 3813
	if (ret)
		return ret;

3814 3815 3816 3817
	/*
	 * Check interval value. This should be done before we start to
	 * calculate the start frame value.
	 */
3818
	check_interval(xhci, urb, ep_ctx);
3819 3820

	/* Calculate the start frame and put it in urb->start_frame. */
L
Lu Baolu 已提交
3821
	if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
3822
		if (GET_EP_CTX_STATE(ep_ctx) ==	EP_STATE_RUNNING) {
L
Lu Baolu 已提交
3823 3824 3825
			urb->start_frame = xep->next_frame_id;
			goto skip_start_over;
		}
3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853
	}

	start_frame = readl(&xhci->run_regs->microframe_index);
	start_frame &= 0x3fff;
	/*
	 * Round up to the next frame and consider the time before trb really
	 * gets scheduled by hardare.
	 */
	ist = HCS_IST(xhci->hcs_params2) & 0x7;
	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
		ist <<= 3;
	start_frame += ist + XHCI_CFC_DELAY;
	start_frame = roundup(start_frame, 8);

	/*
	 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
	 * is greate than 8 microframes.
	 */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL) {
		start_frame = roundup(start_frame, urb->interval << 3);
		urb->start_frame = start_frame >> 3;
	} else {
		start_frame = roundup(start_frame, urb->interval);
		urb->start_frame = start_frame;
	}

skip_start_over:
3854 3855
	ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;

3856
	return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
3857 3858
}

3859 3860
/****		Command Ring Operations		****/

3861 3862 3863 3864 3865 3866 3867 3868
/* Generic function for queueing a command TRB on the command ring.
 * Check to make sure there's room on the command ring for one command TRB.
 * Also check that there's room reserved for commands that must not fail.
 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
 * then only check for the number of reserved spots.
 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
 * because the command event handler may want to resubmit a failed command.
 */
3869 3870 3871
static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
			 u32 field1, u32 field2,
			 u32 field3, u32 field4, bool command_must_succeed)
3872
{
3873
	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3874
	int ret;
3875

3876 3877
	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
		(xhci->xhc_state & XHCI_STATE_HALTED)) {
3878
		xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
M
Mathias Nyman 已提交
3879
		return -ESHUTDOWN;
3880
	}
3881

3882 3883 3884
	if (!command_must_succeed)
		reserved_trbs++;

3885
	ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
A
Andiry Xu 已提交
3886
			reserved_trbs, GFP_ATOMIC);
3887 3888
	if (ret < 0) {
		xhci_err(xhci, "ERR: No room for command on command ring\n");
3889 3890 3891
		if (command_must_succeed)
			xhci_err(xhci, "ERR: Reserved TRB counting for "
					"unfailable commands failed.\n");
3892
		return ret;
3893
	}
M
Mathias Nyman 已提交
3894 3895

	cmd->command_trb = xhci->cmd_ring->enqueue;
3896

3897
	/* if there are no other commands queued we start the timeout timer */
3898
	if (list_empty(&xhci->cmd_list)) {
3899
		xhci->current_cmd = cmd;
3900
		xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
3901 3902
	}

3903 3904
	list_add_tail(&cmd->cmd_list, &xhci->cmd_list);

A
Andiry Xu 已提交
3905 3906
	queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
			field4 | xhci->cmd_ring->cycle_state);
3907 3908 3909
	return 0;
}

3910
/* Queue a slot enable or disable request on the command ring */
3911 3912
int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
		u32 trb_type, u32 slot_id)
3913
{
3914
	return queue_command(xhci, cmd, 0, 0, 0,
3915
			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3916 3917 3918
}

/* Queue an address device command TRB */
3919 3920
int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
		dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
3921
{
3922
	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3923
			upper_32_bits(in_ctx_ptr), 0,
3924 3925
			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
			| (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
3926 3927
}

3928
int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3929 3930
		u32 field1, u32 field2, u32 field3, u32 field4)
{
3931
	return queue_command(xhci, cmd, field1, field2, field3, field4, false);
3932 3933
}

3934
/* Queue a reset device command TRB */
3935 3936
int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
		u32 slot_id)
3937
{
3938
	return queue_command(xhci, cmd, 0, 0, 0,
3939
			TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
3940
			false);
3941
}
3942 3943

/* Queue a configure endpoint command TRB */
3944 3945
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
		struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
3946
		u32 slot_id, bool command_must_succeed)
3947
{
3948
	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3949
			upper_32_bits(in_ctx_ptr), 0,
3950 3951
			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
			command_must_succeed);
3952
}
3953

3954
/* Queue an evaluate context command TRB */
3955 3956
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
		dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
3957
{
3958
	return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
3959
			upper_32_bits(in_ctx_ptr), 0,
3960
			TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
3961
			command_must_succeed);
3962 3963
}

3964 3965 3966 3967
/*
 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
 * activity on an endpoint that is about to be suspended.
 */
3968 3969
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
			     int slot_id, unsigned int ep_index, int suspend)
3970 3971 3972 3973
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_STOP_RING);
3974
	u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
3975

3976
	return queue_command(xhci, cmd, 0, 0, 0,
3977
			trb_slot_id | trb_ep_index | type | trb_suspend, false);
3978 3979
}

3980 3981 3982 3983
/* Set Transfer Ring Dequeue Pointer command */
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
		struct xhci_dequeue_state *deq_state)
3984 3985 3986 3987
{
	dma_addr_t addr;
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3988
	u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
3989
	u32 trb_sct = 0;
3990
	u32 type = TRB_TYPE(TRB_SET_DEQ);
3991
	struct xhci_virt_ep *ep;
3992 3993
	struct xhci_command *cmd;
	int ret;
3994

3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005
	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
		"Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
		deq_state->new_deq_seg,
		(unsigned long long)deq_state->new_deq_seg->dma,
		deq_state->new_deq_ptr,
		(unsigned long long)xhci_trb_virt_to_dma(
			deq_state->new_deq_seg, deq_state->new_deq_ptr),
		deq_state->new_cycle_state);

	addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
				    deq_state->new_deq_ptr);
4006
	if (addr == 0) {
4007
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4008
		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
4009 4010
			  deq_state->new_deq_seg, deq_state->new_deq_ptr);
		return;
4011
	}
4012 4013 4014 4015
	ep = &xhci->devs[slot_id]->eps[ep_index];
	if ((ep->ep_state & SET_DEQ_PENDING)) {
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
		xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
4016
		return;
4017
	}
4018 4019

	/* This function gets called from contexts where it cannot sleep */
4020
	cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
4021
	if (!cmd)
4022
		return;
4023

4024 4025
	ep->queued_deq_seg = deq_state->new_deq_seg;
	ep->queued_deq_ptr = deq_state->new_deq_ptr;
4026
	if (deq_state->stream_id)
4027
		trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
4028
	ret = queue_command(xhci, cmd,
4029 4030 4031
		lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
		upper_32_bits(addr), trb_stream_id,
		trb_slot_id | trb_ep_index | type, false);
4032 4033
	if (ret < 0) {
		xhci_free_command(xhci, cmd);
4034
		return;
4035 4036
	}

4037 4038 4039 4040 4041 4042
	/* Stop the TD queueing code from ringing the doorbell until
	 * this command completes.  The HC won't set the dequeue pointer
	 * if the ring is running, and ringing the doorbell starts the
	 * ring running.
	 */
	ep->ep_state |= SET_DEQ_PENDING;
4043
}
4044

4045
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
4046 4047
			int slot_id, unsigned int ep_index,
			enum xhci_ep_reset_type reset_type)
4048 4049 4050 4051 4052
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_RESET_EP);

4053 4054 4055
	if (reset_type == EP_SOFT_RESET)
		type |= TRB_TSP;

4056 4057
	return queue_command(xhci, cmd, 0, 0, 0,
			trb_slot_id | trb_ep_index | type, false);
4058
}