xhci-ring.c 80.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * xHCI host controller driver
 *
 * Copyright (C) 2008 Intel Corp.
 *
 * Author: Sarah Sharp
 * Some code borrowed from the Linux EHCI driver.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

/*
 * Ring initialization rules:
 * 1. Each segment is initialized to zero, except for link TRBs.
 * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
 *    Consumer Cycle State (CCS), depending on ring function.
 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
 *
 * Ring behavior rules:
 * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
 *    least one free TRB in the ring.  This is useful if you want to turn that
 *    into a link TRB and expand the ring.
 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
 *    link TRB, then load the pointer with the address in the link TRB.  If the
 *    link TRB had its toggle bit set, you may need to update the ring cycle
 *    state (see cycle bit rules).  You may have to do this multiple times
 *    until you reach a non-link TRB.
 * 3. A ring is full if enqueue++ (for the definition of increment above)
 *    equals the dequeue pointer.
 *
 * Cycle bit rules:
 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
 *    in a link TRB, it must toggle the ring cycle state.
 *
 * Producer rules:
 * 1. Check if ring is full before you enqueue.
 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
 *    Update enqueue pointer between each write (which may update the ring
 *    cycle state).
 * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
 *    and endpoint rings.  If HC is the producer for the event ring,
 *    and it generates an interrupt according to interrupt modulation rules.
 *
 * Consumer rules:
 * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
 *    the TRB is owned by the consumer.
 * 2. Update dequeue pointer (which may update the ring cycle state) and
 *    continue processing TRBs until you reach a TRB which is not owned by you.
 * 3. Notify the producer.  SW is the consumer for the event ring, and it
 *   updates event ring dequeue pointer.  HC is the consumer for the command and
 *   endpoint rings; it generates events on the event ring for these.
 */

67
#include <linux/scatterlist.h>
68
#include <linux/slab.h>
69 70 71 72 73 74
#include "xhci.h"

/*
 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
 * address of the TRB.
 */
75
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
76 77
		union xhci_trb *trb)
{
78
	unsigned long segment_offset;
79

80
	if (!seg || !trb || trb < seg->trbs)
81
		return 0;
82 83 84
	/* offset in TRBs */
	segment_offset = trb - seg->trbs;
	if (segment_offset > TRBS_PER_SEGMENT)
85
		return 0;
86
	return seg->dma + (segment_offset * sizeof(*trb));
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
}

/* Does this link TRB point to the first segment in a ring,
 * or was the previous TRB the last TRB on the last segment in the ERST?
 */
static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
		struct xhci_segment *seg, union xhci_trb *trb)
{
	if (ring == xhci->event_ring)
		return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
			(seg->next == xhci->event_ring->first_seg);
	else
		return trb->link.control & LINK_TOGGLE;
}

/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
 * segment?  I.e. would the updated event TRB pointer step off the end of the
 * event seg?
 */
static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
		struct xhci_segment *seg, union xhci_trb *trb)
{
	if (ring == xhci->event_ring)
		return trb == &seg->trbs[TRBS_PER_SEGMENT];
	else
		return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
}

115 116 117 118 119 120
static inline int enqueue_is_link_trb(struct xhci_ring *ring)
{
	struct xhci_link_trb *link = &ring->enqueue->link;
	return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
}

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
 * TRB is in a new segment.  This does not skip over link TRBs, and it does not
 * effect the ring dequeue or enqueue pointers.
 */
static void next_trb(struct xhci_hcd *xhci,
		struct xhci_ring *ring,
		struct xhci_segment **seg,
		union xhci_trb **trb)
{
	if (last_trb(xhci, ring, *seg, *trb)) {
		*seg = (*seg)->next;
		*trb = ((*seg)->trbs);
	} else {
		*trb = (*trb)++;
	}
}

138 139 140 141 142 143 144
/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 */
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
{
	union xhci_trb *next = ++(ring->dequeue);
145
	unsigned long long addr;
146 147 148 149 150 151 152 153 154

	ring->deq_updates++;
	/* Update the dequeue pointer further if that was a link TRB or we're at
	 * the end of an event ring segment (which doesn't have link TRBS)
	 */
	while (last_trb(xhci, ring, ring->deq_seg, next)) {
		if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
			ring->cycle_state = (ring->cycle_state ? 0 : 1);
			if (!in_interrupt())
155 156
				xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
						ring,
157 158 159 160 161 162
						(unsigned int) ring->cycle_state);
		}
		ring->deq_seg = ring->deq_seg->next;
		ring->dequeue = ring->deq_seg->trbs;
		next = ring->dequeue;
	}
163 164 165 166 167 168 169
	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
	if (ring == xhci->event_ring)
		xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
	else if (ring == xhci->cmd_ring)
		xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
	else
		xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
170 171 172 173 174 175 176 177 178 179 180 181
}

/*
 * See Cycle bit rules. SW is the consumer for the event ring only.
 * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
 *
 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
 * chain bit is set), then set the chain bit in all the following link TRBs.
 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
 * have their chain bit cleared (so that each Link TRB is a separate TD).
 *
 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
182 183 184
 * set, but other sections talk about dealing with the chain bit set.  This was
 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
185 186 187
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
188
 */
189 190
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
		bool consumer, bool more_trbs_coming)
191 192 193
{
	u32 chain;
	union xhci_trb *next;
194
	unsigned long long addr;
195 196 197 198 199 200 201 202 203 204 205

	chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
	next = ++(ring->enqueue);

	ring->enq_updates++;
	/* Update the dequeue pointer further if that was a link TRB or we're at
	 * the end of an event ring segment (which doesn't have link TRBS)
	 */
	while (last_trb(xhci, ring, ring->enq_seg, next)) {
		if (!consumer) {
			if (ring != xhci->event_ring) {
206 207 208 209 210 211 212 213 214
				/*
				 * If the caller doesn't plan on enqueueing more
				 * TDs before ringing the doorbell, then we
				 * don't want to give the link TRB to the
				 * hardware just yet.  We'll give the link TRB
				 * back in prepare_ring() just before we enqueue
				 * the TD at the top of the ring.
				 */
				if (!chain && !more_trbs_coming)
215
					break;
216 217 218 219 220 221 222 223

				/* If we're not dealing with 0.95 hardware,
				 * carry over the chain bit of the previous TRB
				 * (which may mean the chain bit is cleared).
				 */
				if (!xhci_link_trb_quirk(xhci)) {
					next->link.control &= ~TRB_CHAIN;
					next->link.control |= chain;
224
				}
225 226 227
				/* Give this link TRB to the hardware */
				wmb();
				next->link.control ^= TRB_CYCLE;
228 229 230 231 232
			}
			/* Toggle the cycle bit after the last ring segment. */
			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
				ring->cycle_state = (ring->cycle_state ? 0 : 1);
				if (!in_interrupt())
233 234
					xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
							ring,
235 236 237 238 239 240 241
							(unsigned int) ring->cycle_state);
			}
		}
		ring->enq_seg = ring->enq_seg->next;
		ring->enqueue = ring->enq_seg->trbs;
		next = ring->enqueue;
	}
242 243 244 245 246 247 248
	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
	if (ring == xhci->event_ring)
		xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
	else if (ring == xhci->cmd_ring)
		xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
	else
		xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
249 250 251 252 253 254 255 256 257 258 259 260 261 262
}

/*
 * Check to see if there's room to enqueue num_trbs on the ring.  See rules
 * above.
 * FIXME: this would be simpler and faster if we just kept track of the number
 * of free TRBs in a ring.
 */
static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
		unsigned int num_trbs)
{
	int i;
	union xhci_trb *enq = ring->enqueue;
	struct xhci_segment *enq_seg = ring->enq_seg;
263 264
	struct xhci_segment *cur_seg;
	unsigned int left_on_ring;
265

266 267 268 269 270 271 272
	/* If we are currently pointing to a link TRB, advance the
	 * enqueue pointer before checking for space */
	while (last_trb(xhci, ring, enq_seg, enq)) {
		enq_seg = enq_seg->next;
		enq = enq_seg->trbs;
	}

273
	/* Check if ring is empty */
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
	if (enq == ring->dequeue) {
		/* Can't use link trbs */
		left_on_ring = TRBS_PER_SEGMENT - 1;
		for (cur_seg = enq_seg->next; cur_seg != enq_seg;
				cur_seg = cur_seg->next)
			left_on_ring += TRBS_PER_SEGMENT - 1;

		/* Always need one TRB free in the ring. */
		left_on_ring -= 1;
		if (num_trbs > left_on_ring) {
			xhci_warn(xhci, "Not enough room on ring; "
					"need %u TRBs, %u TRBs left\n",
					num_trbs, left_on_ring);
			return 0;
		}
289
		return 1;
290
	}
291 292 293 294 295 296 297 298 299 300 301 302 303
	/* Make sure there's an extra empty TRB available */
	for (i = 0; i <= num_trbs; ++i) {
		if (enq == ring->dequeue)
			return 0;
		enq++;
		while (last_trb(xhci, ring, enq_seg, enq)) {
			enq_seg = enq_seg->next;
			enq = enq_seg->trbs;
		}
	}
	return 1;
}

304
void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
305
{
306
	u64 temp;
307 308
	dma_addr_t deq;

309
	deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
310 311 312 313 314
			xhci->event_ring->dequeue);
	if (deq == 0 && !in_interrupt())
		xhci_warn(xhci, "WARN something wrong with SW event ring "
				"dequeue ptr.\n");
	/* Update HC event ring dequeue pointer */
315
	temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
316
	temp &= ERST_PTR_MASK;
317 318 319 320
	/* Don't clear the EHB bit (which is RW1C) because
	 * there might be more events to service.
	 */
	temp &= ~ERST_EHB;
321
	xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n");
322 323
	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
			&xhci->ir_set->erst_dequeue);
324 325 326
}

/* Ring the host controller doorbell after placing a command on the ring */
327
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
328 329 330 331 332 333 334 335 336 337
{
	u32 temp;

	xhci_dbg(xhci, "// Ding dong!\n");
	temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
	xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
	/* Flush PCI posted writes */
	xhci_readl(xhci, &xhci->dba->doorbell[0]);
}

338 339
static void ring_ep_doorbell(struct xhci_hcd *xhci,
		unsigned int slot_id,
340 341
		unsigned int ep_index,
		unsigned int stream_id)
342
{
343 344
	struct xhci_virt_ep *ep;
	unsigned int ep_state;
345 346 347
	u32 field;
	__u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];

348 349
	ep = &xhci->devs[slot_id]->eps[ep_index];
	ep_state = ep->ep_state;
350 351
	/* Don't ring the doorbell for this endpoint if there are pending
	 * cancellations because the we don't want to interrupt processing.
352 353 354 355
	 * We don't want to restart any stream rings if there's a set dequeue
	 * pointer command pending because the device can choose to start any
	 * stream once the endpoint is on the HW schedule.
	 * FIXME - check all the stream rings for pending cancellations.
356
	 */
357
	if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
358
			&& !(ep_state & EP_HALTED)) {
359
		field = xhci_readl(xhci, db_addr) & DB_MASK;
360 361
		field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
		xhci_writel(xhci, field, db_addr);
362 363 364 365 366 367 368 369
		/* Flush PCI posted writes - FIXME Matthew Wilcox says this
		 * isn't time-critical and we shouldn't make the CPU wait for
		 * the flush.
		 */
		xhci_readl(xhci, db_addr);
	}
}

370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
/* Ring the doorbell for any rings with pending URBs */
static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
		unsigned int slot_id,
		unsigned int ep_index)
{
	unsigned int stream_id;
	struct xhci_virt_ep *ep;

	ep = &xhci->devs[slot_id]->eps[ep_index];

	/* A ring has pending URBs if its TD list is not empty */
	if (!(ep->ep_state & EP_HAS_STREAMS)) {
		if (!(list_empty(&ep->ring->td_list)))
			ring_ep_doorbell(xhci, slot_id, ep_index, 0);
		return;
	}

	for (stream_id = 1; stream_id < ep->stream_info->num_streams;
			stream_id++) {
		struct xhci_stream_info *stream_info = ep->stream_info;
		if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
			ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
	}
}

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
/*
 * Find the segment that trb is in.  Start searching in start_seg.
 * If we must move past a segment that has a link TRB with a toggle cycle state
 * bit set, then we will toggle the value pointed at by cycle_state.
 */
static struct xhci_segment *find_trb_seg(
		struct xhci_segment *start_seg,
		union xhci_trb	*trb, int *cycle_state)
{
	struct xhci_segment *cur_seg = start_seg;
	struct xhci_generic_trb *generic_trb;

	while (cur_seg->trbs > trb ||
			&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
		generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
410 411
		if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
				TRB_TYPE(TRB_LINK) &&
412 413 414 415 416
				(generic_trb->field[3] & LINK_TOGGLE))
			*cycle_state = ~(*cycle_state) & 0x1;
		cur_seg = cur_seg->next;
		if (cur_seg == start_seg)
			/* Looped over the entire list.  Oops! */
417
			return NULL;
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
	}
	return cur_seg;
}

/*
 * Move the xHC's endpoint ring dequeue pointer past cur_td.
 * Record the new state of the xHC's endpoint ring dequeue segment,
 * dequeue pointer, and new consumer cycle state in state.
 * Update our internal representation of the ring's dequeue pointer.
 *
 * We do this in three jumps:
 *  - First we update our new ring state to be the same as when the xHC stopped.
 *  - Then we traverse the ring to find the segment that contains
 *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
 *    any link TRBs with the toggle cycle bit set.
 *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
 *    if we've moved it past a link TRB with the toggle cycle bit set.
 */
436
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
437
		unsigned int slot_id, unsigned int ep_index,
438 439
		unsigned int stream_id, struct xhci_td *cur_td,
		struct xhci_dequeue_state *state)
440 441
{
	struct xhci_virt_device *dev = xhci->devs[slot_id];
442
	struct xhci_ring *ep_ring;
443
	struct xhci_generic_trb *trb;
444
	struct xhci_ep_ctx *ep_ctx;
445
	dma_addr_t addr;
446

447 448 449 450 451 452 453 454
	ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
			ep_index, stream_id);
	if (!ep_ring) {
		xhci_warn(xhci, "WARN can't find new dequeue state "
				"for invalid stream ID %u.\n",
				stream_id);
		return;
	}
455
	state->new_cycle_state = 0;
456
	xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
457
	state->new_deq_seg = find_trb_seg(cur_td->start_seg,
458
			dev->eps[ep_index].stopped_trb,
459 460 461 462
			&state->new_cycle_state);
	if (!state->new_deq_seg)
		BUG();
	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
463
	xhci_dbg(xhci, "Finding endpoint context\n");
464 465
	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
	state->new_cycle_state = 0x1 & ep_ctx->deq;
466 467

	state->new_deq_ptr = cur_td->last_trb;
468
	xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
469 470 471 472 473 474 475
	state->new_deq_seg = find_trb_seg(state->new_deq_seg,
			state->new_deq_ptr,
			&state->new_cycle_state);
	if (!state->new_deq_seg)
		BUG();

	trb = &state->new_deq_ptr->generic;
476
	if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
477 478 479 480 481
				(trb->field[3] & LINK_TOGGLE))
		state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
	next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);

	/* Don't update the ring cycle state for the producer (us). */
482 483 484 485 486 487
	xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
			state->new_deq_seg);
	addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
	xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
			(unsigned long long) addr);
	xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
488 489 490 491
	ep_ring->dequeue = state->new_deq_ptr;
	ep_ring->deq_seg = state->new_deq_seg;
}

492
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
		struct xhci_td *cur_td)
{
	struct xhci_segment *cur_seg;
	union xhci_trb *cur_trb;

	for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
			true;
			next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
		if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) ==
				TRB_TYPE(TRB_LINK)) {
			/* Unchain any chained Link TRBs, but
			 * leave the pointers intact.
			 */
			cur_trb->generic.field[3] &= ~TRB_CHAIN;
			xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
508 509 510
			xhci_dbg(xhci, "Address = %p (0x%llx dma); "
					"in seg %p (0x%llx dma)\n",
					cur_trb,
511
					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
512 513
					cur_seg,
					(unsigned long long)cur_seg->dma);
514 515 516 517 518 519 520
		} else {
			cur_trb->generic.field[0] = 0;
			cur_trb->generic.field[1] = 0;
			cur_trb->generic.field[2] = 0;
			/* Preserve only the cycle bit of this TRB */
			cur_trb->generic.field[3] &= TRB_CYCLE;
			cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
521 522 523
			xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
					"in seg %p (0x%llx dma)\n",
					cur_trb,
524
					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
525 526
					cur_seg,
					(unsigned long long)cur_seg->dma);
527 528 529 530 531 532 533
		}
		if (cur_trb == cur_td->last_trb)
			break;
	}
}

static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
534 535
		unsigned int ep_index, unsigned int stream_id,
		struct xhci_segment *deq_seg,
536 537
		union xhci_trb *deq_ptr, u32 cycle_state);

538
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
539
		unsigned int slot_id, unsigned int ep_index,
540
		unsigned int stream_id,
541
		struct xhci_dequeue_state *deq_state)
542
{
543 544
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];

545 546 547 548 549 550 551
	xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
			"new deq ptr = %p (0x%llx dma), new cycle = %u\n",
			deq_state->new_deq_seg,
			(unsigned long long)deq_state->new_deq_seg->dma,
			deq_state->new_deq_ptr,
			(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
			deq_state->new_cycle_state);
552
	queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
553 554 555 556 557 558 559 560
			deq_state->new_deq_seg,
			deq_state->new_deq_ptr,
			(u32) deq_state->new_cycle_state);
	/* Stop the TD queueing code from ringing the doorbell until
	 * this command completes.  The HC won't set the dequeue pointer
	 * if the ring is running, and ringing the doorbell starts the
	 * ring running.
	 */
561
	ep->ep_state |= SET_DEQ_PENDING;
562 563
}

564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
		struct xhci_virt_ep *ep)
{
	ep->ep_state &= ~EP_HALT_PENDING;
	/* Can't del_timer_sync in interrupt, so we attempt to cancel.  If the
	 * timer is running on another CPU, we don't decrement stop_cmds_pending
	 * (since we didn't successfully stop the watchdog timer).
	 */
	if (del_timer(&ep->stop_cmd_timer))
		ep->stop_cmds_pending--;
}

/* Must be called with xhci->lock held in interrupt context */
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
		struct xhci_td *cur_td, int status, char *adjective)
{
	struct usb_hcd *hcd = xhci_to_hcd(xhci);

	cur_td->urb->hcpriv = NULL;
	usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb);
	xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb);

	spin_unlock(&xhci->lock);
	usb_hcd_giveback_urb(hcd, cur_td->urb, status);
	kfree(cur_td);
	spin_lock(&xhci->lock);
	xhci_dbg(xhci, "%s URB given back\n", adjective);
}

593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
/*
 * When we get a command completion for a Stop Endpoint Command, we need to
 * unlink any cancelled TDs from the ring.  There are two ways to do that:
 *
 *  1. If the HW was in the middle of processing the TD that needs to be
 *     cancelled, then we must move the ring's dequeue pointer past the last TRB
 *     in the TD with a Set Dequeue Pointer Command.
 *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
 *     bit cleared) so that the HW will skip over them.
 */
static void handle_stopped_endpoint(struct xhci_hcd *xhci,
		union xhci_trb *trb)
{
	unsigned int slot_id;
	unsigned int ep_index;
	struct xhci_ring *ep_ring;
609
	struct xhci_virt_ep *ep;
610
	struct list_head *entry;
611
	struct xhci_td *cur_td = NULL;
612 613
	struct xhci_td *last_unlinked_td;

614
	struct xhci_dequeue_state deq_state;
615 616 617 618

	memset(&deq_state, 0, sizeof(deq_state));
	slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
	ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
619
	ep = &xhci->devs[slot_id]->eps[ep_index];
620

621
	if (list_empty(&ep->cancelled_td_list)) {
622
		xhci_stop_watchdog_timer_in_irq(xhci, ep);
623
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
624
		return;
625
	}
626 627 628 629 630 631

	/* Fix up the ep ring first, so HW stops executing cancelled TDs.
	 * We have the xHCI lock, so nothing can modify this list until we drop
	 * it.  We're also in the event handler, so we can't get re-interrupted
	 * if another Stop Endpoint command completes
	 */
632
	list_for_each(entry, &ep->cancelled_td_list) {
633
		cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
634 635
		xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
				cur_td->first_trb,
636
				(unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
		if (!ep_ring) {
			/* This shouldn't happen unless a driver is mucking
			 * with the stream ID after submission.  This will
			 * leave the TD on the hardware ring, and the hardware
			 * will try to execute it, and may access a buffer
			 * that has already been freed.  In the best case, the
			 * hardware will execute it, and the event handler will
			 * ignore the completion event for that TD, since it was
			 * removed from the td_list for that endpoint.  In
			 * short, don't muck with the stream ID after
			 * submission.
			 */
			xhci_warn(xhci, "WARN Cancelled URB %p "
					"has invalid stream ID %u.\n",
					cur_td->urb,
					cur_td->urb->stream_id);
			goto remove_finished_td;
		}
656 657 658 659
		/*
		 * If we stopped on the TD we need to cancel, then we have to
		 * move the xHC endpoint ring dequeue pointer past this TD.
		 */
660
		if (cur_td == ep->stopped_td)
661 662 663
			xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
					cur_td->urb->stream_id,
					cur_td, &deq_state);
664 665
		else
			td_to_noop(xhci, ep_ring, cur_td);
666
remove_finished_td:
667 668 669 670 671 672 673 674
		/*
		 * The event handler won't see a completion for this TD anymore,
		 * so remove it from the endpoint ring's TD list.  Keep it in
		 * the cancelled TD list for URB completion later.
		 */
		list_del(&cur_td->td_list);
	}
	last_unlinked_td = cur_td;
675
	xhci_stop_watchdog_timer_in_irq(xhci, ep);
676 677 678

	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
679
		xhci_queue_new_dequeue_state(xhci,
680 681 682
				slot_id, ep_index,
				ep->stopped_td->urb->stream_id,
				&deq_state);
683
		xhci_ring_cmd_db(xhci);
684
	} else {
685 686
		/* Otherwise ring the doorbell(s) to restart queued transfers */
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
687
	}
688 689
	ep->stopped_td = NULL;
	ep->stopped_trb = NULL;
690 691 692 693 694 695 696 697

	/*
	 * Drop the lock and complete the URBs in the cancelled TD list.
	 * New TDs to be cancelled might be added to the end of the list before
	 * we can complete all the URBs for the TDs we already unlinked.
	 * So stop when we've completed the URB for the last TD we unlinked.
	 */
	do {
698
		cur_td = list_entry(ep->cancelled_td_list.next,
699 700 701 702 703 704 705
				struct xhci_td, cancelled_td_list);
		list_del(&cur_td->cancelled_td_list);

		/* Clean up the cancelled URB */
		/* Doesn't matter what we pass for status, since the core will
		 * just overwrite it (because the URB has been unlinked).
		 */
706
		xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
707

708 709 710 711 712
		/* Stop processing the cancelled list if the watchdog timer is
		 * running.
		 */
		if (xhci->xhc_state & XHCI_STATE_DYING)
			return;
713 714 715 716 717
	} while (cur_td != last_unlinked_td);

	/* Return to the event handler with xhci->lock re-acquired */
}

718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
/* Watchdog timer function for when a stop endpoint command fails to complete.
 * In this case, we assume the host controller is broken or dying or dead.  The
 * host may still be completing some other events, so we have to be careful to
 * let the event ring handler and the URB dequeueing/enqueueing functions know
 * through xhci->state.
 *
 * The timer may also fire if the host takes a very long time to respond to the
 * command, and the stop endpoint command completion handler cannot delete the
 * timer before the timer function is called.  Another endpoint cancellation may
 * sneak in before the timer function can grab the lock, and that may queue
 * another stop endpoint command and add the timer back.  So we cannot use a
 * simple flag to say whether there is a pending stop endpoint command for a
 * particular endpoint.
 *
 * Instead we use a combination of that flag and a counter for the number of
 * pending stop endpoint commands.  If the timer is the tail end of the last
 * stop endpoint command, and the endpoint's command is still pending, we assume
 * the host is dying.
 */
void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
	struct xhci_hcd *xhci;
	struct xhci_virt_ep *ep;
	struct xhci_virt_ep *temp_ep;
	struct xhci_ring *ring;
	struct xhci_td *cur_td;
	int ret, i, j;

	ep = (struct xhci_virt_ep *) arg;
	xhci = ep->xhci;

	spin_lock(&xhci->lock);

	ep->stop_cmds_pending--;
	if (xhci->xhc_state & XHCI_STATE_DYING) {
		xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
				"xHCI as DYING, exiting.\n");
		spin_unlock(&xhci->lock);
		return;
	}
	if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
		xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
				"exiting.\n");
		spin_unlock(&xhci->lock);
		return;
	}

	xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
	xhci_warn(xhci, "Assuming host is dying, halting host.\n");
	/* Oops, HC is dead or dying or at least not responding to the stop
	 * endpoint command.
	 */
	xhci->xhc_state |= XHCI_STATE_DYING;
	/* Disable interrupts from the host controller and start halting it */
	xhci_quiesce(xhci);
	spin_unlock(&xhci->lock);

	ret = xhci_halt(xhci);

	spin_lock(&xhci->lock);
	if (ret < 0) {
		/* This is bad; the host is not responding to commands and it's
		 * not allowing itself to be halted.  At least interrupts are
		 * disabled, so we can set HC_STATE_HALT and notify the
		 * USB core.  But if we call usb_hc_died(), it will attempt to
		 * disconnect all device drivers under this host.  Those
		 * disconnect() methods will wait for all URBs to be unlinked,
		 * so we must complete them.
		 */
		xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
		xhci_warn(xhci, "Completing active URBs anyway.\n");
		/* We could turn all TDs on the rings to no-ops.  This won't
		 * help if the host has cached part of the ring, and is slow if
		 * we want to preserve the cycle bit.  Skip it and hope the host
		 * doesn't touch the memory.
		 */
	}
	for (i = 0; i < MAX_HC_SLOTS; i++) {
		if (!xhci->devs[i])
			continue;
		for (j = 0; j < 31; j++) {
			temp_ep = &xhci->devs[i]->eps[j];
			ring = temp_ep->ring;
			if (!ring)
				continue;
			xhci_dbg(xhci, "Killing URBs for slot ID %u, "
					"ep index %u\n", i, j);
			while (!list_empty(&ring->td_list)) {
				cur_td = list_first_entry(&ring->td_list,
						struct xhci_td,
						td_list);
				list_del(&cur_td->td_list);
				if (!list_empty(&cur_td->cancelled_td_list))
					list_del(&cur_td->cancelled_td_list);
				xhci_giveback_urb_in_irq(xhci, cur_td,
						-ESHUTDOWN, "killed");
			}
			while (!list_empty(&temp_ep->cancelled_td_list)) {
				cur_td = list_first_entry(
						&temp_ep->cancelled_td_list,
						struct xhci_td,
						cancelled_td_list);
				list_del(&cur_td->cancelled_td_list);
				xhci_giveback_urb_in_irq(xhci, cur_td,
						-ESHUTDOWN, "killed");
			}
		}
	}
	spin_unlock(&xhci->lock);
	xhci_to_hcd(xhci)->state = HC_STATE_HALT;
	xhci_dbg(xhci, "Calling usb_hc_died()\n");
	usb_hc_died(xhci_to_hcd(xhci));
	xhci_dbg(xhci, "xHCI host controller is dead.\n");
}

833 834 835 836 837 838 839 840 841 842 843 844 845
/*
 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
 * we need to clear the set deq pending flag in the endpoint ring state, so that
 * the TD queueing code can ring the doorbell again.  We also need to ring the
 * endpoint doorbell to restart the ring, but only if there aren't more
 * cancellations pending.
 */
static void handle_set_deq_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event,
		union xhci_trb *trb)
{
	unsigned int slot_id;
	unsigned int ep_index;
846
	unsigned int stream_id;
847 848
	struct xhci_ring *ep_ring;
	struct xhci_virt_device *dev;
849 850
	struct xhci_ep_ctx *ep_ctx;
	struct xhci_slot_ctx *slot_ctx;
851 852 853

	slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
	ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
854
	stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]);
855
	dev = xhci->devs[slot_id];
856 857 858 859 860 861 862 863 864 865 866

	ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
	if (!ep_ring) {
		xhci_warn(xhci, "WARN Set TR deq ptr command for "
				"freed stream ID %u\n",
				stream_id);
		/* XXX: Harmless??? */
		dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
		return;
	}

867 868
	ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
	slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
869 870 871 872 873 874 875 876 877 878 879 880 881

	if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
		unsigned int ep_state;
		unsigned int slot_state;

		switch (GET_COMP_CODE(event->status)) {
		case COMP_TRB_ERR:
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
					"of stream ID configuration\n");
			break;
		case COMP_CTX_STATE:
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
					"to incorrect slot or ep state.\n");
882
			ep_state = ep_ctx->ep_info;
883
			ep_state &= EP_STATE_MASK;
884
			slot_state = slot_ctx->dev_state;
885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
			slot_state = GET_SLOT_STATE(slot_state);
			xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
					slot_state, ep_state);
			break;
		case COMP_EBADSLT:
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
					"slot %u was not enabled.\n", slot_id);
			break;
		default:
			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
					"completion code of %u.\n",
					GET_COMP_CODE(event->status));
			break;
		}
		/* OK what do we do now?  The endpoint state is hosed, and we
		 * should never get to this point if the synchronization between
		 * queueing, and endpoint state are correct.  This might happen
		 * if the device gets disconnected after we've finished
		 * cancelling URBs, which might not be an error...
		 */
	} else {
906
		xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
907
				ep_ctx->deq);
908 909
	}

910
	dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
911 912
	/* Restart any rings with pending URBs */
	ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
913 914
}

915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
static void handle_reset_ep_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event,
		union xhci_trb *trb)
{
	int slot_id;
	unsigned int ep_index;

	slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
	ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
	/* This command will only fail if the endpoint wasn't halted,
	 * but we don't care.
	 */
	xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
			(unsigned int) GET_COMP_CODE(event->status));

930 931 932 933 934 935 936
	/* HW with the reset endpoint quirk needs to have a configure endpoint
	 * command complete before the endpoint can be used.  Queue that here
	 * because the HW can't handle two commands being queued in a row.
	 */
	if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
		xhci_dbg(xhci, "Queueing configure endpoint command\n");
		xhci_queue_configure_endpoint(xhci,
937 938
				xhci->devs[slot_id]->in_ctx->dma, slot_id,
				false);
939 940
		xhci_ring_cmd_db(xhci);
	} else {
941
		/* Clear our internal halted state and restart the ring(s) */
942
		xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
943
		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
944
	}
945
}
946

947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
/* Check to see if a command in the device's command queue matches this one.
 * Signal the completion or free the command, and return 1.  Return 0 if the
 * completed command isn't at the head of the command list.
 */
static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
		struct xhci_virt_device *virt_dev,
		struct xhci_event_cmd *event)
{
	struct xhci_command *command;

	if (list_empty(&virt_dev->cmd_list))
		return 0;

	command = list_entry(virt_dev->cmd_list.next,
			struct xhci_command, cmd_list);
	if (xhci->cmd_ring->dequeue != command->command_trb)
		return 0;

	command->status =
		GET_COMP_CODE(event->status);
	list_del(&command->cmd_list);
	if (command->completion)
		complete(command->completion);
	else
		xhci_free_command(xhci, command);
	return 1;
}

975 976 977
static void handle_cmd_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
978
	int slot_id = TRB_TO_SLOT_ID(event->flags);
979 980
	u64 cmd_dma;
	dma_addr_t cmd_dequeue_dma;
981
	struct xhci_input_control_ctx *ctrl_ctx;
982
	struct xhci_virt_device *virt_dev;
983 984 985
	unsigned int ep_index;
	struct xhci_ring *ep_ring;
	unsigned int ep_state;
986

987
	cmd_dma = event->cmd_trb;
988
	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
989 990 991 992 993 994 995 996 997 998 999 1000
			xhci->cmd_ring->dequeue);
	/* Is the command ring deq ptr out of sync with the deq seg ptr? */
	if (cmd_dequeue_dma == 0) {
		xhci->error_bitmask |= 1 << 4;
		return;
	}
	/* Does the DMA address match our internal dequeue pointer address? */
	if (cmd_dma != (u64) cmd_dequeue_dma) {
		xhci->error_bitmask |= 1 << 5;
		return;
	}
	switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
	case TRB_TYPE(TRB_ENABLE_SLOT):
		if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
			xhci->slot_id = slot_id;
		else
			xhci->slot_id = 0;
		complete(&xhci->addr_dev);
		break;
	case TRB_TYPE(TRB_DISABLE_SLOT):
		if (xhci->devs[slot_id])
			xhci_free_virt_device(xhci, slot_id);
		break;
1012
	case TRB_TYPE(TRB_CONFIG_EP):
1013
		virt_dev = xhci->devs[slot_id];
1014
		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1015
			break;
1016 1017 1018 1019
		/*
		 * Configure endpoint commands can come from the USB core
		 * configuration or alt setting changes, or because the HW
		 * needed an extra configure endpoint command after a reset
1020 1021 1022
		 * endpoint command or streams were being configured.
		 * If the command was for a halted endpoint, the xHCI driver
		 * is not waiting on the configure endpoint command.
1023 1024
		 */
		ctrl_ctx = xhci_get_input_control_ctx(xhci,
1025
				virt_dev->in_ctx);
1026 1027
		/* Input ctx add_flags are the endpoint index plus one */
		ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
1028
		/* A usb_set_interface() call directly after clearing a halted
1029 1030 1031 1032
		 * condition may race on this quirky hardware.  Not worth
		 * worrying about, since this is prototype hardware.  Not sure
		 * if this will work for streams, but streams support was
		 * untested on this prototype.
1033
		 */
1034
		if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
				ep_index != (unsigned int) -1 &&
				ctrl_ctx->add_flags - SLOT_FLAG ==
					ctrl_ctx->drop_flags) {
			ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
			ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
			if (!(ep_state & EP_HALTED))
				goto bandwidth_change;
			xhci_dbg(xhci, "Completed config ep cmd - "
					"last ep index = %d, state = %d\n",
					ep_index, ep_state);
1045
			/* Clear internal halted state and restart ring(s) */
1046
			xhci->devs[slot_id]->eps[ep_index].ep_state &=
1047
				~EP_HALTED;
1048
			ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1049
			break;
1050
		}
1051 1052 1053 1054 1055
bandwidth_change:
		xhci_dbg(xhci, "Completed config ep cmd\n");
		xhci->devs[slot_id]->cmd_status =
			GET_COMP_CODE(event->status);
		complete(&xhci->devs[slot_id]->cmd_completion);
1056
		break;
1057
	case TRB_TYPE(TRB_EVAL_CONTEXT):
S
Sarah Sharp 已提交
1058 1059 1060
		virt_dev = xhci->devs[slot_id];
		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
			break;
1061 1062 1063
		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
		complete(&xhci->devs[slot_id]->cmd_completion);
		break;
1064 1065 1066 1067
	case TRB_TYPE(TRB_ADDR_DEV):
		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
		complete(&xhci->addr_dev);
		break;
1068 1069 1070 1071 1072 1073
	case TRB_TYPE(TRB_STOP_RING):
		handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue);
		break;
	case TRB_TYPE(TRB_SET_DEQ):
		handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
		break;
1074 1075 1076
	case TRB_TYPE(TRB_CMD_NOOP):
		++xhci->noops_handled;
		break;
1077 1078 1079
	case TRB_TYPE(TRB_RESET_EP):
		handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
		break;
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
	case TRB_TYPE(TRB_RESET_DEV):
		xhci_dbg(xhci, "Completed reset device command.\n");
		slot_id = TRB_TO_SLOT_ID(
				xhci->cmd_ring->dequeue->generic.field[3]);
		virt_dev = xhci->devs[slot_id];
		if (virt_dev)
			handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
		else
			xhci_warn(xhci, "Reset device command completion "
					"for disabled slot %u\n", slot_id);
		break;
1091 1092 1093 1094 1095 1096 1097 1098 1099
	case TRB_TYPE(TRB_NEC_GET_FW):
		if (!(xhci->quirks & XHCI_NEC_HOST)) {
			xhci->error_bitmask |= 1 << 6;
			break;
		}
		xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
				NEC_FW_MAJOR(event->status),
				NEC_FW_MINOR(event->status));
		break;
1100 1101 1102 1103 1104 1105 1106 1107
	default:
		/* Skip over unknown commands on the event ring */
		xhci->error_bitmask |= 1 << 6;
		break;
	}
	inc_deq(xhci, xhci->cmd_ring, false);
}

1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
static void handle_vendor_event(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 trb_type;

	trb_type = TRB_FIELD_TO_TYPE(event->generic.field[3]);
	xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
	if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
		handle_cmd_completion(xhci, &event->event_cmd);
}

S
Sarah Sharp 已提交
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
static void handle_port_status(struct xhci_hcd *xhci,
		union xhci_trb *event)
{
	u32 port_id;

	/* Port status change events always have a successful completion code */
	if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
		xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
		xhci->error_bitmask |= 1 << 8;
	}
	/* FIXME: core doesn't care about all port link state changes yet */
	port_id = GET_PORT_ID(event->generic.field[0]);
	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);

	/* Update event ring dequeue pointer before dropping the lock */
	inc_deq(xhci, xhci->event_ring, true);
1135
	xhci_set_hc_event_deq(xhci);
S
Sarah Sharp 已提交
1136 1137 1138 1139 1140 1141 1142

	spin_unlock(&xhci->lock);
	/* Pass this up to the core */
	usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
	spin_lock(&xhci->lock);
}

1143 1144 1145 1146 1147 1148
/*
 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
 * at end_trb, which may be in another segment.  If the suspect DMA address is a
 * TRB in this TD, this function returns that TRB's segment.  Otherwise it
 * returns 0.
 */
1149
struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1150 1151 1152 1153 1154 1155 1156 1157 1158
		union xhci_trb	*start_trb,
		union xhci_trb	*end_trb,
		dma_addr_t	suspect_dma)
{
	dma_addr_t start_dma;
	dma_addr_t end_seg_dma;
	dma_addr_t end_trb_dma;
	struct xhci_segment *cur_seg;

1159
	start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1160 1161 1162
	cur_seg = start_seg;

	do {
1163
		if (start_dma == 0)
1164
			return NULL;
1165
		/* We may get an event for a Link TRB in the middle of a TD */
1166
		end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1167
				&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1168
		/* If the end TRB isn't in this segment, this is set to 0 */
1169
		end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185

		if (end_trb_dma > 0) {
			/* The end TRB is in this segment, so suspect should be here */
			if (start_dma <= end_trb_dma) {
				if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
					return cur_seg;
			} else {
				/* Case for one segment with
				 * a TD wrapped around to the top
				 */
				if ((suspect_dma >= start_dma &&
							suspect_dma <= end_seg_dma) ||
						(suspect_dma >= cur_seg->dma &&
						 suspect_dma <= end_trb_dma))
					return cur_seg;
			}
1186
			return NULL;
1187 1188 1189 1190 1191 1192
		} else {
			/* Might still be somewhere in this segment */
			if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
				return cur_seg;
		}
		cur_seg = cur_seg->next;
1193
		start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1194
	} while (cur_seg != start_seg);
1195

1196
	return NULL;
1197 1198
}

1199 1200
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
1201
		unsigned int stream_id,
1202 1203 1204 1205 1206 1207
		struct xhci_td *td, union xhci_trb *event_trb)
{
	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
	ep->ep_state |= EP_HALTED;
	ep->stopped_td = td;
	ep->stopped_trb = event_trb;
1208
	ep->stopped_stream = stream_id;
1209

1210 1211
	xhci_queue_reset_ep(xhci, slot_id, ep_index);
	xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1212 1213 1214

	ep->stopped_td = NULL;
	ep->stopped_trb = NULL;
1215
	ep->stopped_stream = 0;
1216

1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
	xhci_ring_cmd_db(xhci);
}

/* Check if an error has halted the endpoint ring.  The class driver will
 * cleanup the halt for a non-default control endpoint if we indicate a stall.
 * However, a babble and other errors also halt the endpoint ring, and the class
 * driver won't clear the halt in that case, so we need to issue a Set Transfer
 * Ring Dequeue Pointer command manually.
 */
static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
		struct xhci_ep_ctx *ep_ctx,
		unsigned int trb_comp_code)
{
	/* TRB completion codes that may require a manual halt cleanup */
	if (trb_comp_code == COMP_TX_ERR ||
			trb_comp_code == COMP_BABBLE ||
			trb_comp_code == COMP_SPLIT_ERR)
		/* The 0.96 spec says a babbling control endpoint
		 * is not halted. The 0.96 spec says it is.  Some HW
		 * claims to be 0.95 compliant, but it halts the control
		 * endpoint anyway.  Check if a babble halted the
		 * endpoint.
		 */
		if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED)
			return 1;

	return 0;
}

1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
{
	if (trb_comp_code >= 224 && trb_comp_code <= 255) {
		/* Vendor defined "informational" completion code,
		 * treat as not-an-error.
		 */
		xhci_dbg(xhci, "Vendor defined info completion code %u\n",
				trb_comp_code);
		xhci_dbg(xhci, "Treating code as success.\n");
		return 1;
	}
	return 0;
}

1260 1261 1262 1263 1264 1265 1266 1267 1268
/*
 * If this function returns an error condition, it means it got a Transfer
 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
 * At this point, the host controller is probably hosed and should be reset.
 */
static int handle_tx_event(struct xhci_hcd *xhci,
		struct xhci_transfer_event *event)
{
	struct xhci_virt_device *xdev;
1269
	struct xhci_virt_ep *ep;
1270
	struct xhci_ring *ep_ring;
1271
	unsigned int slot_id;
1272
	int ep_index;
1273
	struct xhci_td *td = NULL;
1274 1275 1276
	dma_addr_t event_dma;
	struct xhci_segment *event_seg;
	union xhci_trb *event_trb;
1277
	struct urb *urb = NULL;
1278
	int status = -EINPROGRESS;
1279
	struct xhci_ep_ctx *ep_ctx;
1280
	u32 trb_comp_code;
1281

1282
	xhci_dbg(xhci, "In %s\n", __func__);
1283 1284
	slot_id = TRB_TO_SLOT_ID(event->flags);
	xdev = xhci->devs[slot_id];
1285 1286 1287 1288 1289 1290 1291
	if (!xdev) {
		xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
		return -ENODEV;
	}

	/* Endpoint ID is 1 based, our index is zero based */
	ep_index = TRB_TO_EP_ID(event->flags) - 1;
1292
	xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
1293
	ep = &xdev->eps[ep_index];
1294
	ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1295 1296
	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
	if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
1297 1298
		xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
				"or incorrect stream ring\n");
1299 1300 1301
		return -ENODEV;
	}

1302
	event_dma = event->buffer;
1303
	/* This TRB should be in the TD at the head of this ring's TD list */
1304
	xhci_dbg(xhci, "%s - checking for list empty\n", __func__);
1305 1306 1307 1308 1309 1310 1311 1312 1313
	if (list_empty(&ep_ring->td_list)) {
		xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
				TRB_TO_SLOT_ID(event->flags), ep_index);
		xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
				(unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
		xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
		urb = NULL;
		goto cleanup;
	}
1314
	xhci_dbg(xhci, "%s - getting list entry\n", __func__);
1315 1316 1317
	td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);

	/* Is this a TRB in the currently executing TD? */
1318
	xhci_dbg(xhci, "%s - looking for TD\n", __func__);
1319 1320
	event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
			td->last_trb, event_dma);
1321
	xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg);
1322 1323 1324 1325 1326 1327
	if (!event_seg) {
		/* HC is busted, give up! */
		xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
		return -ESHUTDOWN;
	}
	event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
S
Sarah Sharp 已提交
1328 1329
	xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
			(unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
1330 1331 1332 1333
	xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
			lower_32_bits(event->buffer));
	xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
			upper_32_bits(event->buffer));
S
Sarah Sharp 已提交
1334 1335 1336 1337 1338 1339
	xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
			(unsigned int) event->transfer_len);
	xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
			(unsigned int) event->flags);

	/* Look for common error cases */
1340 1341
	trb_comp_code = GET_COMP_CODE(event->transfer_len);
	switch (trb_comp_code) {
S
Sarah Sharp 已提交
1342 1343 1344 1345 1346 1347
	/* Skip codes that require special handling depending on
	 * transfer type
	 */
	case COMP_SUCCESS:
	case COMP_SHORT_TX:
		break;
1348 1349 1350 1351 1352 1353
	case COMP_STOP:
		xhci_dbg(xhci, "Stopped on Transfer TRB\n");
		break;
	case COMP_STOP_INVAL:
		xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
		break;
S
Sarah Sharp 已提交
1354 1355
	case COMP_STALL:
		xhci_warn(xhci, "WARN: Stalled endpoint\n");
1356
		ep->ep_state |= EP_HALTED;
S
Sarah Sharp 已提交
1357 1358 1359 1360 1361 1362
		status = -EPIPE;
		break;
	case COMP_TRB_ERR:
		xhci_warn(xhci, "WARN: TRB error on endpoint\n");
		status = -EILSEQ;
		break;
1363
	case COMP_SPLIT_ERR:
S
Sarah Sharp 已提交
1364 1365 1366 1367
	case COMP_TX_ERR:
		xhci_warn(xhci, "WARN: transfer error on endpoint\n");
		status = -EPROTO;
		break;
1368 1369 1370 1371
	case COMP_BABBLE:
		xhci_warn(xhci, "WARN: babble error on endpoint\n");
		status = -EOVERFLOW;
		break;
S
Sarah Sharp 已提交
1372 1373 1374 1375 1376
	case COMP_DB_ERR:
		xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
		status = -ENOSR;
		break;
	default:
1377
		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
1378 1379 1380
			status = 0;
			break;
		}
S
Sarah Sharp 已提交
1381 1382 1383 1384
		xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
		urb = NULL;
		goto cleanup;
	}
1385 1386 1387 1388
	/* Now update the urb's actual_length and give back to the core */
	/* Was this a control transfer? */
	if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
		xhci_debug_trb(xhci, xhci->event_ring->dequeue);
1389
		switch (trb_comp_code) {
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
		case COMP_SUCCESS:
			if (event_trb == ep_ring->dequeue) {
				xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
				status = -ESHUTDOWN;
			} else if (event_trb != td->last_trb) {
				xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
				status = -ESHUTDOWN;
			} else {
				xhci_dbg(xhci, "Successful control transfer!\n");
				status = 0;
			}
			break;
		case COMP_SHORT_TX:
			xhci_warn(xhci, "WARN: short transfer on control ep\n");
1404 1405 1406 1407
			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
				status = -EREMOTEIO;
			else
				status = 0;
1408
			break;
1409 1410 1411 1412

		default:
			if (!xhci_requires_manual_halt_cleanup(xhci,
						ep_ctx, trb_comp_code))
1413
				break;
1414 1415 1416
			xhci_dbg(xhci, "TRB error code %u, "
					"halted endpoint index = %u\n",
					trb_comp_code, ep_index);
1417
			/* else fall through */
1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
		case COMP_STALL:
			/* Did we transfer part of the data (middle) phase? */
			if (event_trb != ep_ring->dequeue &&
					event_trb != td->last_trb)
				td->urb->actual_length =
					td->urb->transfer_buffer_length
					- TRB_LEN(event->transfer_len);
			else
				td->urb->actual_length = 0;

1428
			xhci_cleanup_halted_endpoint(xhci,
1429
					slot_id, ep_index, 0, td, event_trb);
1430
			goto td_cleanup;
1431 1432 1433 1434 1435 1436 1437 1438
		}
		/*
		 * Did we transfer any data, despite the errors that might have
		 * happened?  I.e. did we get past the setup stage?
		 */
		if (event_trb != ep_ring->dequeue) {
			/* The event was for the status stage */
			if (event_trb == td->last_trb) {
1439 1440
				if (td->urb->actual_length != 0) {
					/* Don't overwrite a previously set error code */
1441 1442 1443 1444
					if ((status == -EINPROGRESS ||
								status == 0) &&
							(td->urb->transfer_flags
							 & URB_SHORT_NOT_OK))
1445 1446 1447
						/* Did we already see a short data stage? */
						status = -EREMOTEIO;
				} else {
1448 1449
					td->urb->actual_length =
						td->urb->transfer_buffer_length;
1450
				}
1451
			} else {
1452
			/* Maybe the event was for the data stage? */
1453
				if (trb_comp_code != COMP_STOP_INVAL) {
1454 1455 1456 1457
					/* We didn't stop on a link TRB in the middle */
					td->urb->actual_length =
						td->urb->transfer_buffer_length -
						TRB_LEN(event->transfer_len);
1458 1459 1460 1461
					xhci_dbg(xhci, "Waiting for status stage event\n");
					urb = NULL;
					goto cleanup;
				}
1462 1463 1464
			}
		}
	} else {
1465
		switch (trb_comp_code) {
S
Sarah Sharp 已提交
1466 1467 1468 1469 1470 1471 1472 1473 1474 1475
		case COMP_SUCCESS:
			/* Double check that the HW transferred everything. */
			if (event_trb != td->last_trb) {
				xhci_warn(xhci, "WARN Successful completion "
						"on short TX\n");
				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
					status = -EREMOTEIO;
				else
					status = 0;
			} else {
1476 1477 1478 1479 1480 1481
				if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
					xhci_dbg(xhci, "Successful bulk "
							"transfer!\n");
				else
					xhci_dbg(xhci, "Successful interrupt "
							"transfer!\n");
S
Sarah Sharp 已提交
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506
				status = 0;
			}
			break;
		case COMP_SHORT_TX:
			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
				status = -EREMOTEIO;
			else
				status = 0;
			break;
		default:
			/* Others already handled above */
			break;
		}
		dev_dbg(&td->urb->dev->dev,
				"ep %#x - asked for %d bytes, "
				"%d bytes untransferred\n",
				td->urb->ep->desc.bEndpointAddress,
				td->urb->transfer_buffer_length,
				TRB_LEN(event->transfer_len));
		/* Fast path - was this the last TRB in the TD for this URB? */
		if (event_trb == td->last_trb) {
			if (TRB_LEN(event->transfer_len) != 0) {
				td->urb->actual_length =
					td->urb->transfer_buffer_length -
					TRB_LEN(event->transfer_len);
1507 1508
				if (td->urb->transfer_buffer_length <
						td->urb->actual_length) {
S
Sarah Sharp 已提交
1509 1510 1511 1512
					xhci_warn(xhci, "HC gave bad length "
							"of %d bytes left\n",
							TRB_LEN(event->transfer_len));
					td->urb->actual_length = 0;
1513 1514 1515 1516 1517
					if (td->urb->transfer_flags &
							URB_SHORT_NOT_OK)
						status = -EREMOTEIO;
					else
						status = 0;
S
Sarah Sharp 已提交
1518
				}
1519 1520 1521 1522 1523 1524 1525
				/* Don't overwrite a previously set error code */
				if (status == -EINPROGRESS) {
					if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
						status = -EREMOTEIO;
					else
						status = 0;
				}
S
Sarah Sharp 已提交
1526 1527 1528 1529 1530
			} else {
				td->urb->actual_length = td->urb->transfer_buffer_length;
				/* Ignore a short packet completion if the
				 * untransferred length was zero.
				 */
1531 1532
				if (status == -EREMOTEIO)
					status = 0;
S
Sarah Sharp 已提交
1533 1534
			}
		} else {
1535 1536
			/* Slow path - walk the list, starting from the dequeue
			 * pointer, to get the actual length transferred.
S
Sarah Sharp 已提交
1537
			 */
1538 1539 1540
			union xhci_trb *cur_trb;
			struct xhci_segment *cur_seg;

S
Sarah Sharp 已提交
1541
			td->urb->actual_length = 0;
1542 1543 1544
			for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
					cur_trb != event_trb;
					next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1545 1546 1547 1548
				if ((cur_trb->generic.field[3] &
				 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
				    (cur_trb->generic.field[3] &
				 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1549 1550
					td->urb->actual_length +=
						TRB_LEN(cur_trb->generic.field[2]);
S
Sarah Sharp 已提交
1551
			}
1552 1553 1554
			/* If the ring didn't stop on a Link or No-op TRB, add
			 * in the actual bytes transferred from the Normal TRB
			 */
1555
			if (trb_comp_code != COMP_STOP_INVAL)
1556 1557 1558
				td->urb->actual_length +=
					TRB_LEN(cur_trb->generic.field[2]) -
					TRB_LEN(event->transfer_len);
S
Sarah Sharp 已提交
1559
		}
1560
	}
1561 1562
	if (trb_comp_code == COMP_STOP_INVAL ||
			trb_comp_code == COMP_STOP) {
1563 1564 1565 1566
		/* The Endpoint Stop Command completion will take care of any
		 * stopped TDs.  A stopped TD may be restarted, so don't update
		 * the ring dequeue pointer or take this TD off any lists yet.
		 */
1567 1568
		ep->stopped_td = td;
		ep->stopped_trb = event_trb;
1569
	} else {
1570
		if (trb_comp_code == COMP_STALL) {
1571 1572 1573 1574
			/* The transfer is completed from the driver's
			 * perspective, but we need to issue a set dequeue
			 * command for this stalled endpoint to move the dequeue
			 * pointer past the TD.  We can't do that here because
1575 1576
			 * the halt condition must be cleared first.  Let the
			 * USB class driver clear the stall later.
1577
			 */
1578 1579
			ep->stopped_td = td;
			ep->stopped_trb = event_trb;
1580
			ep->stopped_stream = ep_ring->stream_id;
1581 1582 1583 1584 1585 1586 1587 1588
		} else if (xhci_requires_manual_halt_cleanup(xhci,
					ep_ctx, trb_comp_code)) {
			/* Other types of errors halt the endpoint, but the
			 * class driver doesn't call usb_reset_endpoint() unless
			 * the error is -EPIPE.  Clear the halted status in the
			 * xHCI hardware manually.
			 */
			xhci_cleanup_halted_endpoint(xhci,
1589
					slot_id, ep_index, ep_ring->stream_id, td, event_trb);
1590 1591 1592 1593
		} else {
			/* Update ring dequeue pointer */
			while (ep_ring->dequeue != td->last_trb)
				inc_deq(xhci, ep_ring, false);
1594
			inc_deq(xhci, ep_ring, false);
1595
		}
S
Sarah Sharp 已提交
1596

1597
td_cleanup:
1598 1599
		/* Clean up the endpoint's TD list */
		urb = td->urb;
1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612
		/* Do one last check of the actual transfer length.
		 * If the host controller said we transferred more data than
		 * the buffer length, urb->actual_length will be a very big
		 * number (since it's unsigned).  Play it safe and say we didn't
		 * transfer anything.
		 */
		if (urb->actual_length > urb->transfer_buffer_length) {
			xhci_warn(xhci, "URB transfer length is wrong, "
					"xHC issue? req. len = %u, "
					"act. len = %u\n",
					urb->transfer_buffer_length,
					urb->actual_length);
			urb->actual_length = 0;
1613 1614 1615 1616
			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
				status = -EREMOTEIO;
			else
				status = 0;
1617
		}
1618 1619
		list_del(&td->td_list);
		/* Was this TD slated to be cancelled but completed anyway? */
1620
		if (!list_empty(&td->cancelled_td_list))
1621
			list_del(&td->cancelled_td_list);
1622

1623 1624 1625 1626 1627 1628
		/* Leave the TD around for the reset endpoint function to use
		 * (but only if it's not a control endpoint, since we already
		 * queued the Set TR dequeue pointer command for stalled
		 * control endpoints).
		 */
		if (usb_endpoint_xfer_control(&urb->ep->desc) ||
1629 1630
			(trb_comp_code != COMP_STALL &&
				trb_comp_code != COMP_BABBLE)) {
1631 1632
			kfree(td);
		}
1633 1634
		urb->hcpriv = NULL;
	}
1635 1636
cleanup:
	inc_deq(xhci, xhci->event_ring, true);
1637
	xhci_set_hc_event_deq(xhci);
1638

S
Sarah Sharp 已提交
1639
	/* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
1640 1641
	if (urb) {
		usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
1642
		xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n",
1643
				urb, urb->actual_length, status);
1644 1645 1646 1647 1648 1649 1650
		spin_unlock(&xhci->lock);
		usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
		spin_lock(&xhci->lock);
	}
	return 0;
}

S
Sarah Sharp 已提交
1651 1652 1653 1654
/*
 * This function handles all OS-owned events on the event ring.  It may drop
 * xhci->lock between event processing (e.g. to pass up port status changes).
 */
1655
void xhci_handle_event(struct xhci_hcd *xhci)
1656 1657
{
	union xhci_trb *event;
S
Sarah Sharp 已提交
1658
	int update_ptrs = 1;
1659
	int ret;
1660

1661
	xhci_dbg(xhci, "In %s\n", __func__);
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673
	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
		xhci->error_bitmask |= 1 << 1;
		return;
	}

	event = xhci->event_ring->dequeue;
	/* Does the HC or OS own the TRB? */
	if ((event->event_cmd.flags & TRB_CYCLE) !=
			xhci->event_ring->cycle_state) {
		xhci->error_bitmask |= 1 << 2;
		return;
	}
1674
	xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
1675

S
Sarah Sharp 已提交
1676
	/* FIXME: Handle more event types. */
1677 1678
	switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
	case TRB_TYPE(TRB_COMPLETION):
1679
		xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
1680
		handle_cmd_completion(xhci, &event->event_cmd);
1681
		xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
1682
		break;
S
Sarah Sharp 已提交
1683
	case TRB_TYPE(TRB_PORT_STATUS):
1684
		xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
S
Sarah Sharp 已提交
1685
		handle_port_status(xhci, event);
1686
		xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
S
Sarah Sharp 已提交
1687 1688
		update_ptrs = 0;
		break;
1689
	case TRB_TYPE(TRB_TRANSFER):
1690
		xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
1691
		ret = handle_tx_event(xhci, &event->trans_event);
1692
		xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
1693 1694 1695 1696 1697
		if (ret < 0)
			xhci->error_bitmask |= 1 << 9;
		else
			update_ptrs = 0;
		break;
1698
	default:
1699 1700 1701 1702
		if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48))
			handle_vendor_event(xhci, event);
		else
			xhci->error_bitmask |= 1 << 3;
1703
	}
1704 1705 1706 1707 1708 1709 1710 1711
	/* Any of the above functions may drop and re-acquire the lock, so check
	 * to make sure a watchdog timer didn't mark the host as non-responsive.
	 */
	if (xhci->xhc_state & XHCI_STATE_DYING) {
		xhci_dbg(xhci, "xHCI host dying, returning from "
				"event handler.\n");
		return;
	}
1712

S
Sarah Sharp 已提交
1713 1714 1715
	if (update_ptrs) {
		/* Update SW and HC event ring dequeue pointer */
		inc_deq(xhci, xhci->event_ring, true);
1716
		xhci_set_hc_event_deq(xhci);
S
Sarah Sharp 已提交
1717
	}
1718
	/* Are there more items on the event ring? */
1719
	xhci_handle_event(xhci);
1720 1721
}

1722 1723
/****		Endpoint Ring Operations	****/

1724 1725 1726
/*
 * Generic function for queueing a TRB on a ring.
 * The caller must have checked to make sure there's room on the ring.
1727 1728 1729
 *
 * @more_trbs_coming:	Will you enqueue more TRBs before calling
 *			prepare_transfer()?
1730 1731
 */
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
1732
		bool consumer, bool more_trbs_coming,
1733 1734 1735 1736 1737 1738 1739 1740 1741
		u32 field1, u32 field2, u32 field3, u32 field4)
{
	struct xhci_generic_trb *trb;

	trb = &ring->enqueue->generic;
	trb->field[0] = field1;
	trb->field[1] = field2;
	trb->field[2] = field3;
	trb->field[3] = field4;
1742
	inc_enq(xhci, ring, consumer, more_trbs_coming);
1743 1744
}

1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
/*
 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
 * FIXME allocate segments if the ring is full.
 */
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
	/* Make sure the endpoint has been added to xHC schedule */
	xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
	switch (ep_state) {
	case EP_STATE_DISABLED:
		/*
		 * USB core changed config/interfaces without notifying us,
		 * or hardware is reporting the wrong state.
		 */
		xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
		return -ENOENT;
	case EP_STATE_ERROR:
1763
		xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
1764 1765 1766
		/* FIXME event handling code for error needs to clear it */
		/* XXX not sure if this should be -ENOENT or not */
		return -EINVAL;
1767 1768
	case EP_STATE_HALTED:
		xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784
	case EP_STATE_STOPPED:
	case EP_STATE_RUNNING:
		break;
	default:
		xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
		/*
		 * FIXME issue Configure Endpoint command to try to get the HC
		 * back into a known state.
		 */
		return -EINVAL;
	}
	if (!room_on_ring(xhci, ep_ring, num_trbs)) {
		/* FIXME allocate more room */
		xhci_err(xhci, "ERROR no room on ep ring\n");
		return -ENOMEM;
	}
1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820

	if (enqueue_is_link_trb(ep_ring)) {
		struct xhci_ring *ring = ep_ring;
		union xhci_trb *next;

		xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
		next = ring->enqueue;

		while (last_trb(xhci, ring, ring->enq_seg, next)) {

			/* If we're not dealing with 0.95 hardware,
			 * clear the chain bit.
			 */
			if (!xhci_link_trb_quirk(xhci))
				next->link.control &= ~TRB_CHAIN;
			else
				next->link.control |= TRB_CHAIN;

			wmb();
			next->link.control ^= (u32) TRB_CYCLE;

			/* Toggle the cycle bit after the last ring segment. */
			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
				ring->cycle_state = (ring->cycle_state ? 0 : 1);
				if (!in_interrupt()) {
					xhci_dbg(xhci, "queue_trb: Toggle cycle "
						"state for ring %p = %i\n",
						ring, (unsigned int)ring->cycle_state);
				}
			}
			ring->enq_seg = ring->enq_seg->next;
			ring->enqueue = ring->enq_seg->trbs;
			next = ring->enqueue;
		}
	}

1821 1822 1823
	return 0;
}

1824
static int prepare_transfer(struct xhci_hcd *xhci,
1825 1826
		struct xhci_virt_device *xdev,
		unsigned int ep_index,
1827
		unsigned int stream_id,
1828 1829 1830 1831 1832 1833
		unsigned int num_trbs,
		struct urb *urb,
		struct xhci_td **td,
		gfp_t mem_flags)
{
	int ret;
1834
	struct xhci_ring *ep_ring;
1835
	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1836 1837 1838 1839 1840 1841 1842 1843 1844

	ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
	if (!ep_ring) {
		xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
				stream_id);
		return -EINVAL;
	}

	ret = prepare_ring(xhci, ep_ring,
1845
			ep_ctx->ep_info & EP_STATE_MASK,
1846 1847 1848 1849 1850 1851 1852
			num_trbs, mem_flags);
	if (ret)
		return ret;
	*td = kzalloc(sizeof(struct xhci_td), mem_flags);
	if (!*td)
		return -ENOMEM;
	INIT_LIST_HEAD(&(*td)->td_list);
1853
	INIT_LIST_HEAD(&(*td)->cancelled_td_list);
1854 1855 1856 1857 1858 1859 1860 1861 1862 1863

	ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
	if (unlikely(ret)) {
		kfree(*td);
		return ret;
	}

	(*td)->urb = urb;
	urb->hcpriv = (void *) (*td);
	/* Add this TD to the tail of the endpoint ring's TD list */
1864 1865 1866
	list_add_tail(&(*td)->td_list, &ep_ring->td_list);
	(*td)->start_seg = ep_ring->enq_seg;
	(*td)->first_trb = ep_ring->enqueue;
1867 1868 1869 1870

	return 0;
}

1871
static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
1872 1873 1874 1875 1876 1877 1878 1879 1880 1881
{
	int num_sgs, num_trbs, running_total, temp, i;
	struct scatterlist *sg;

	sg = NULL;
	num_sgs = urb->num_sgs;
	temp = urb->transfer_buffer_length;

	xhci_dbg(xhci, "count sg list trbs: \n");
	num_trbs = 0;
1882
	for_each_sg(urb->sg, sg, num_sgs, i) {
1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896
		unsigned int previous_total_trbs = num_trbs;
		unsigned int len = sg_dma_len(sg);

		/* Scatter gather list entries may cross 64KB boundaries */
		running_total = TRB_MAX_BUFF_SIZE -
			(sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
		if (running_total != 0)
			num_trbs++;

		/* How many more 64KB chunks to transfer, how many more TRBs? */
		while (running_total < sg_dma_len(sg)) {
			num_trbs++;
			running_total += TRB_MAX_BUFF_SIZE;
		}
1897 1898 1899
		xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
				i, (unsigned long long)sg_dma_address(sg),
				len, len, num_trbs - previous_total_trbs);
1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914

		len = min_t(int, len, temp);
		temp -= len;
		if (temp == 0)
			break;
	}
	xhci_dbg(xhci, "\n");
	if (!in_interrupt())
		dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
				urb->ep->desc.bEndpointAddress,
				urb->transfer_buffer_length,
				num_trbs);
	return num_trbs;
}

1915
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
{
	if (num_trbs != 0)
		dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
				"TRBs, %d left\n", __func__,
				urb->ep->desc.bEndpointAddress, num_trbs);
	if (running_total != urb->transfer_buffer_length)
		dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
				"queued %#x (%d), asked for %#x (%d)\n",
				__func__,
				urb->ep->desc.bEndpointAddress,
				running_total, running_total,
				urb->transfer_buffer_length,
				urb->transfer_buffer_length);
}

1931
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1932
		unsigned int ep_index, unsigned int stream_id, int start_cycle,
1933 1934 1935 1936 1937 1938 1939 1940
		struct xhci_generic_trb *start_trb, struct xhci_td *td)
{
	/*
	 * Pass all the TRBs to the hardware at once and make sure this write
	 * isn't reordered.
	 */
	wmb();
	start_trb->field[3] |= start_cycle;
1941
	ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
1942 1943
}

1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984
/*
 * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
 * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
 * (comprised of sg list entries) can take several service intervals to
 * transmit.
 */
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
			xhci->devs[slot_id]->out_ctx, ep_index);
	int xhci_interval;
	int ep_interval;

	xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
	ep_interval = urb->interval;
	/* Convert to microframes */
	if (urb->dev->speed == USB_SPEED_LOW ||
			urb->dev->speed == USB_SPEED_FULL)
		ep_interval *= 8;
	/* FIXME change this to a warning and a suggestion to use the new API
	 * to set the polling interval (once the API is added).
	 */
	if (xhci_interval != ep_interval) {
		if (!printk_ratelimit())
			dev_dbg(&urb->dev->dev, "Driver uses different interval"
					" (%d microframe%s) than xHCI "
					"(%d microframe%s)\n",
					ep_interval,
					ep_interval == 1 ? "" : "s",
					xhci_interval,
					xhci_interval == 1 ? "" : "s");
		urb->interval = xhci_interval;
		/* Convert back to frames for LS/FS devices */
		if (urb->dev->speed == USB_SPEED_LOW ||
				urb->dev->speed == USB_SPEED_FULL)
			urb->interval /= 8;
	}
	return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
}

1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
/*
 * The TD size is the number of bytes remaining in the TD (including this TRB),
 * right shifted by 10.
 * It must fit in bits 21:17, so it can't be bigger than 31.
 */
static u32 xhci_td_remainder(unsigned int remainder)
{
	u32 max = (1 << (21 - 17 + 1)) - 1;

	if ((remainder >> 10) >= max)
		return max << 17;
	else
		return (remainder >> 10) << 17;
}

2000
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	unsigned int num_trbs;
	struct xhci_td *td;
	struct scatterlist *sg;
	int num_sgs;
	int trb_buff_len, this_sg_len, running_total;
	bool first_trb;
	u64 addr;
2011
	bool more_trbs_coming;
2012 2013 2014 2015

	struct xhci_generic_trb *start_trb;
	int start_cycle;

2016 2017 2018 2019
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;

2020 2021 2022
	num_trbs = count_sg_trbs_needed(xhci, urb);
	num_sgs = urb->num_sgs;

2023
	trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
2024 2025
			ep_index, urb->stream_id,
			num_trbs, urb, &td, mem_flags);
2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
	if (trb_buff_len < 0)
		return trb_buff_len;
	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	running_total = 0;
	/*
	 * How much data is in the first TRB?
	 *
	 * There are three forces at work for TRB buffer pointers and lengths:
	 * 1. We don't want to walk off the end of this sg-list entry buffer.
	 * 2. The transfer length that the driver requested may be smaller than
	 *    the amount of memory allocated for this scatter-gather list.
	 * 3. TRBs buffers can't cross 64KB boundaries.
	 */
2046
	sg = urb->sg;
2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
	addr = (u64) sg_dma_address(sg);
	this_sg_len = sg_dma_len(sg);
	trb_buff_len = TRB_MAX_BUFF_SIZE -
		(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
	trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
	if (trb_buff_len > urb->transfer_buffer_length)
		trb_buff_len = urb->transfer_buffer_length;
	xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
			trb_buff_len);

	first_trb = true;
	/* Queue the first TRB, even if it's zero-length */
	do {
		u32 field = 0;
2061
		u32 length_field = 0;
2062
		u32 remainder = 0;
2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091

		/* Don't change the cycle bit of the first TRB until later */
		if (first_trb)
			first_trb = false;
		else
			field |= ep_ring->cycle_state;

		/* Chain all the TRBs together; clear the chain bit in the last
		 * TRB to indicate it's the last TRB in the chain.
		 */
		if (num_trbs > 1) {
			field |= TRB_CHAIN;
		} else {
			/* FIXME - add check for ZERO_PACKET flag before this */
			td->last_trb = ep_ring->enqueue;
			field |= TRB_IOC;
		}
		xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
				"64KB boundary at %#x, end dma = %#x\n",
				(unsigned int) addr, trb_buff_len, trb_buff_len,
				(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
				(unsigned int) addr + trb_buff_len);
		if (TRB_MAX_BUFF_SIZE -
				(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
			xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
			xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
					(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
					(unsigned int) addr + trb_buff_len);
		}
2092 2093
		remainder = xhci_td_remainder(urb->transfer_buffer_length -
				running_total) ;
2094
		length_field = TRB_LEN(trb_buff_len) |
2095
			remainder |
2096
			TRB_INTR_TARGET(0);
2097 2098 2099 2100 2101
		if (num_trbs > 1)
			more_trbs_coming = true;
		else
			more_trbs_coming = false;
		queue_trb(xhci, ep_ring, false, more_trbs_coming,
2102 2103
				lower_32_bits(addr),
				upper_32_bits(addr),
2104
				length_field,
2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137
				/* We always want to know if the TRB was short,
				 * or we won't get an event when it completes.
				 * (Unless we use event data TRBs, which are a
				 * waste of space and HC resources.)
				 */
				field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
		--num_trbs;
		running_total += trb_buff_len;

		/* Calculate length for next transfer --
		 * Are we done queueing all the TRBs for this sg entry?
		 */
		this_sg_len -= trb_buff_len;
		if (this_sg_len == 0) {
			--num_sgs;
			if (num_sgs == 0)
				break;
			sg = sg_next(sg);
			addr = (u64) sg_dma_address(sg);
			this_sg_len = sg_dma_len(sg);
		} else {
			addr += trb_buff_len;
		}

		trb_buff_len = TRB_MAX_BUFF_SIZE -
			(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
		trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
		if (running_total + trb_buff_len > urb->transfer_buffer_length)
			trb_buff_len =
				urb->transfer_buffer_length - running_total;
	} while (running_total < urb->transfer_buffer_length);

	check_trb_math(urb, num_trbs, running_total);
2138 2139
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
			start_cycle, start_trb, td);
2140 2141 2142
	return 0;
}

S
Sarah Sharp 已提交
2143
/* This is very similar to what ehci-q.c qtd_fill() does */
2144
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
S
Sarah Sharp 已提交
2145 2146 2147 2148 2149 2150 2151
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	struct xhci_td *td;
	int num_trbs;
	struct xhci_generic_trb *start_trb;
	bool first_trb;
2152
	bool more_trbs_coming;
S
Sarah Sharp 已提交
2153
	int start_cycle;
2154
	u32 field, length_field;
S
Sarah Sharp 已提交
2155 2156 2157 2158

	int running_total, trb_buff_len, ret;
	u64 addr;

2159
	if (urb->num_sgs)
2160 2161
		return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);

2162 2163 2164
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;
S
Sarah Sharp 已提交
2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183

	num_trbs = 0;
	/* How much data is (potentially) left before the 64KB boundary? */
	running_total = TRB_MAX_BUFF_SIZE -
		(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));

	/* If there's some data on this 64KB chunk, or we have to send a
	 * zero-length transfer, we need at least one TRB
	 */
	if (running_total != 0 || urb->transfer_buffer_length == 0)
		num_trbs++;
	/* How many more 64KB chunks to transfer, how many more TRBs? */
	while (running_total < urb->transfer_buffer_length) {
		num_trbs++;
		running_total += TRB_MAX_BUFF_SIZE;
	}
	/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */

	if (!in_interrupt())
2184
		dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
S
Sarah Sharp 已提交
2185
				urb->ep->desc.bEndpointAddress,
2186 2187
				urb->transfer_buffer_length,
				urb->transfer_buffer_length,
2188
				(unsigned long long)urb->transfer_dma,
S
Sarah Sharp 已提交
2189
				num_trbs);
2190

2191 2192
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
			ep_index, urb->stream_id,
S
Sarah Sharp 已提交
2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216
			num_trbs, urb, &td, mem_flags);
	if (ret < 0)
		return ret;

	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	running_total = 0;
	/* How much data is in the first TRB? */
	addr = (u64) urb->transfer_dma;
	trb_buff_len = TRB_MAX_BUFF_SIZE -
		(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
	if (urb->transfer_buffer_length < trb_buff_len)
		trb_buff_len = urb->transfer_buffer_length;

	first_trb = true;

	/* Queue the first TRB, even if it's zero-length */
	do {
2217
		u32 remainder = 0;
S
Sarah Sharp 已提交
2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235
		field = 0;

		/* Don't change the cycle bit of the first TRB until later */
		if (first_trb)
			first_trb = false;
		else
			field |= ep_ring->cycle_state;

		/* Chain all the TRBs together; clear the chain bit in the last
		 * TRB to indicate it's the last TRB in the chain.
		 */
		if (num_trbs > 1) {
			field |= TRB_CHAIN;
		} else {
			/* FIXME - add check for ZERO_PACKET flag before this */
			td->last_trb = ep_ring->enqueue;
			field |= TRB_IOC;
		}
2236 2237
		remainder = xhci_td_remainder(urb->transfer_buffer_length -
				running_total);
2238
		length_field = TRB_LEN(trb_buff_len) |
2239
			remainder |
2240
			TRB_INTR_TARGET(0);
2241 2242 2243 2244 2245
		if (num_trbs > 1)
			more_trbs_coming = true;
		else
			more_trbs_coming = false;
		queue_trb(xhci, ep_ring, false, more_trbs_coming,
2246 2247
				lower_32_bits(addr),
				upper_32_bits(addr),
2248
				length_field,
S
Sarah Sharp 已提交
2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264
				/* We always want to know if the TRB was short,
				 * or we won't get an event when it completes.
				 * (Unless we use event data TRBs, which are a
				 * waste of space and HC resources.)
				 */
				field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
		--num_trbs;
		running_total += trb_buff_len;

		/* Calculate length for next transfer */
		addr += trb_buff_len;
		trb_buff_len = urb->transfer_buffer_length - running_total;
		if (trb_buff_len > TRB_MAX_BUFF_SIZE)
			trb_buff_len = TRB_MAX_BUFF_SIZE;
	} while (running_total < urb->transfer_buffer_length);

2265
	check_trb_math(urb, num_trbs, running_total);
2266 2267
	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
			start_cycle, start_trb, td);
S
Sarah Sharp 已提交
2268 2269 2270
	return 0;
}

2271
/* Caller must have locked xhci->lock */
2272
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2273 2274 2275 2276 2277 2278 2279 2280
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
	int num_trbs;
	int ret;
	struct usb_ctrlrequest *setup;
	struct xhci_generic_trb *start_trb;
	int start_cycle;
2281
	u32 field, length_field;
2282 2283
	struct xhci_td *td;

2284 2285 2286
	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
	if (!ep_ring)
		return -EINVAL;
2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306

	/*
	 * Need to copy setup packet into setup TRB, so we can't use the setup
	 * DMA address.
	 */
	if (!urb->setup_packet)
		return -EINVAL;

	if (!in_interrupt())
		xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
				slot_id, ep_index);
	/* 1 TRB for setup, 1 for status */
	num_trbs = 2;
	/*
	 * Don't need to check if we need additional event data and normal TRBs,
	 * since data in control transfers will never get bigger than 16MB
	 * XXX: can we get a buffer that crosses 64KB boundaries?
	 */
	if (urb->transfer_buffer_length > 0)
		num_trbs++;
2307 2308 2309
	ret = prepare_transfer(xhci, xhci->devs[slot_id],
			ep_index, urb->stream_id,
			num_trbs, urb, &td, mem_flags);
2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323
	if (ret < 0)
		return ret;

	/*
	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
	 * until we've finished creating all the other TRBs.  The ring's cycle
	 * state may change as we enqueue the other TRBs, so save it too.
	 */
	start_trb = &ep_ring->enqueue->generic;
	start_cycle = ep_ring->cycle_state;

	/* Queue setup TRB - see section 6.4.1.2.1 */
	/* FIXME better way to translate setup_packet into two u32 fields? */
	setup = (struct usb_ctrlrequest *) urb->setup_packet;
2324
	queue_trb(xhci, ep_ring, false, true,
2325 2326 2327 2328 2329 2330 2331 2332 2333
			/* FIXME endianness is probably going to bite my ass here. */
			setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
			setup->wIndex | setup->wLength << 16,
			TRB_LEN(8) | TRB_INTR_TARGET(0),
			/* Immediate data in pointer */
			TRB_IDT | TRB_TYPE(TRB_SETUP));

	/* If there's data, queue data TRBs */
	field = 0;
2334
	length_field = TRB_LEN(urb->transfer_buffer_length) |
2335
		xhci_td_remainder(urb->transfer_buffer_length) |
2336
		TRB_INTR_TARGET(0);
2337 2338 2339
	if (urb->transfer_buffer_length > 0) {
		if (setup->bRequestType & USB_DIR_IN)
			field |= TRB_DIR_IN;
2340
		queue_trb(xhci, ep_ring, false, true,
2341 2342
				lower_32_bits(urb->transfer_dma),
				upper_32_bits(urb->transfer_dma),
2343
				length_field,
2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356
				/* Event on short tx */
				field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
	}

	/* Save the DMA address of the last TRB in the TD */
	td->last_trb = ep_ring->enqueue;

	/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
	/* If the device sent data, the status stage is an OUT transfer */
	if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
		field = 0;
	else
		field = TRB_DIR_IN;
2357
	queue_trb(xhci, ep_ring, false, false,
2358 2359 2360 2361 2362 2363
			0,
			0,
			TRB_INTR_TARGET(0),
			/* Event on completion */
			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);

2364 2365
	giveback_first_trb(xhci, slot_id, ep_index, 0,
			start_cycle, start_trb, td);
2366 2367 2368 2369 2370
	return 0;
}

/****		Command Ring Operations		****/

2371 2372 2373 2374 2375 2376 2377 2378 2379 2380
/* Generic function for queueing a command TRB on the command ring.
 * Check to make sure there's room on the command ring for one command TRB.
 * Also check that there's room reserved for commands that must not fail.
 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
 * then only check for the number of reserved spots.
 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
 * because the command event handler may want to resubmit a failed command.
 */
static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
		u32 field3, u32 field4, bool command_must_succeed)
2381
{
2382 2383 2384 2385 2386
	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
	if (!command_must_succeed)
		reserved_trbs++;

	if (!room_on_ring(xhci, xhci->cmd_ring, reserved_trbs)) {
2387 2388
		if (!in_interrupt())
			xhci_err(xhci, "ERR: No room for command on command ring\n");
2389 2390 2391
		if (command_must_succeed)
			xhci_err(xhci, "ERR: Reserved TRB counting for "
					"unfailable commands failed.\n");
2392 2393
		return -ENOMEM;
	}
2394
	queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
2395 2396 2397 2398 2399 2400 2401
			field4 | xhci->cmd_ring->cycle_state);
	return 0;
}

/* Queue a no-op command on the command ring */
static int queue_cmd_noop(struct xhci_hcd *xhci)
{
2402
	return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
2403 2404 2405 2406 2407 2408
}

/*
 * Place a no-op command on the command ring to test the command and
 * event ring.
 */
2409
void *xhci_setup_one_noop(struct xhci_hcd *xhci)
2410 2411 2412 2413
{
	if (queue_cmd_noop(xhci) < 0)
		return NULL;
	xhci->noops_submitted++;
2414
	return xhci_ring_cmd_db;
2415
}
2416 2417

/* Queue a slot enable or disable request on the command ring */
2418
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
2419 2420
{
	return queue_command(xhci, 0, 0, 0,
2421
			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
2422 2423 2424
}

/* Queue an address device command TRB */
2425 2426
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
		u32 slot_id)
2427
{
2428 2429
	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
			upper_32_bits(in_ctx_ptr), 0,
2430
			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
2431 2432 2433
			false);
}

2434 2435 2436 2437 2438 2439
int xhci_queue_vendor_command(struct xhci_hcd *xhci,
		u32 field1, u32 field2, u32 field3, u32 field4)
{
	return queue_command(xhci, field1, field2, field3, field4, false);
}

2440 2441 2442 2443 2444
/* Queue a reset device command TRB */
int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
{
	return queue_command(xhci, 0, 0, 0,
			TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
2445
			false);
2446
}
2447 2448

/* Queue a configure endpoint command TRB */
2449
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
2450
		u32 slot_id, bool command_must_succeed)
2451
{
2452 2453
	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
			upper_32_bits(in_ctx_ptr), 0,
2454 2455
			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
			command_must_succeed);
2456
}
2457

2458 2459 2460 2461 2462 2463
/* Queue an evaluate context command TRB */
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
		u32 slot_id)
{
	return queue_command(xhci, lower_32_bits(in_ctx_ptr),
			upper_32_bits(in_ctx_ptr), 0,
2464 2465
			TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
			false);
2466 2467
}

2468
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
2469 2470 2471 2472 2473 2474 2475
		unsigned int ep_index)
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_STOP_RING);

	return queue_command(xhci, 0, 0, 0,
2476
			trb_slot_id | trb_ep_index | type, false);
2477 2478 2479 2480 2481 2482
}

/* Set Transfer Ring Dequeue Pointer command.
 * This should not be used for endpoints that have streams enabled.
 */
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
2483 2484
		unsigned int ep_index, unsigned int stream_id,
		struct xhci_segment *deq_seg,
2485 2486 2487 2488 2489
		union xhci_trb *deq_ptr, u32 cycle_state)
{
	dma_addr_t addr;
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
2490
	u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
2491 2492
	u32 type = TRB_TYPE(TRB_SET_DEQ);

2493
	addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
2494
	if (addr == 0) {
2495
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
2496 2497
		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
				deq_seg, deq_ptr);
2498 2499
		return 0;
	}
2500
	return queue_command(xhci, lower_32_bits(addr) | cycle_state,
2501
			upper_32_bits(addr), trb_stream_id,
2502
			trb_slot_id | trb_ep_index | type, false);
2503
}
2504 2505 2506 2507 2508 2509 2510 2511

int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
		unsigned int ep_index)
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_RESET_EP);

2512 2513
	return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
			false);
2514
}