uhci-q.c 45.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * Universal Host Controller Interface driver for USB.
 *
 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
 *
 * (C) Copyright 1999 Linus Torvalds
 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
 * (C) Copyright 1999 Randy Dunlap
 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
 *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16
 * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
L
Linus Torvalds 已提交
17 18 19 20 21 22 23 24 25 26 27
 */


/*
 * Technically, updating td->status here is a race, but it's not really a
 * problem. The worst that can happen is that we set the IOC bit again
 * generating a spurious interrupt. We could fix this by creating another
 * QH and leaving the IOC bit always set, but then we would have to play
 * games with the FSBR code to make sure we get the correct order in all
 * the cases. I don't think it's worth the effort
 */
28
static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
L
Linus Torvalds 已提交
29
{
30
	if (uhci->is_stopped)
31
		mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
L
Linus Torvalds 已提交
32 33 34 35 36 37 38 39
	uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); 
}

static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
{
	uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
}

A
Alan Stern 已提交
40 41 42 43 44 45 46 47

/*
 * Full-Speed Bandwidth Reclamation (FSBR).
 * We turn on FSBR whenever a queue that wants it is advancing,
 * and leave it on for a short time thereafter.
 */
static void uhci_fsbr_on(struct uhci_hcd *uhci)
{
48
	struct uhci_qh *lqh;
49

50 51 52
	/* The terminating skeleton QH always points back to the first
	 * FSBR QH.  Make the last async QH point to the terminating
	 * skeleton QH. */
A
Alan Stern 已提交
53
	uhci->fsbr_is_on = 1;
54 55
	lqh = list_entry(uhci->skel_async_qh->node.prev,
			struct uhci_qh, node);
56
	lqh->link = LINK_TO_QH(uhci->skel_term_qh);
A
Alan Stern 已提交
57 58 59 60
}

static void uhci_fsbr_off(struct uhci_hcd *uhci)
{
61 62
	struct uhci_qh *lqh;

63 64
	/* Remove the link from the last async QH to the terminating
	 * skeleton QH. */
A
Alan Stern 已提交
65
	uhci->fsbr_is_on = 0;
66 67
	lqh = list_entry(uhci->skel_async_qh->node.prev,
			struct uhci_qh, node);
68
	lqh->link = UHCI_PTR_TERM;
A
Alan Stern 已提交
69 70 71 72 73 74 75 76 77 78
}

static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
{
	struct urb_priv *urbp = urb->hcpriv;

	if (!(urb->transfer_flags & URB_NO_FSBR))
		urbp->fsbr = 1;
}

79
static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
A
Alan Stern 已提交
80 81
{
	if (urbp->fsbr) {
82
		uhci->fsbr_is_wanted = 1;
A
Alan Stern 已提交
83 84
		if (!uhci->fsbr_is_on)
			uhci_fsbr_on(uhci);
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
		else if (uhci->fsbr_expiring) {
			uhci->fsbr_expiring = 0;
			del_timer(&uhci->fsbr_timer);
		}
	}
}

static void uhci_fsbr_timeout(unsigned long _uhci)
{
	struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
	unsigned long flags;

	spin_lock_irqsave(&uhci->lock, flags);
	if (uhci->fsbr_expiring) {
		uhci->fsbr_expiring = 0;
		uhci_fsbr_off(uhci);
A
Alan Stern 已提交
101
	}
102
	spin_unlock_irqrestore(&uhci->lock, flags);
A
Alan Stern 已提交
103 104 105
}


106
static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
L
Linus Torvalds 已提交
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
{
	dma_addr_t dma_handle;
	struct uhci_td *td;

	td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
	if (!td)
		return NULL;

	td->dma_handle = dma_handle;
	td->frame = -1;

	INIT_LIST_HEAD(&td->list);
	INIT_LIST_HEAD(&td->fl_list);

	return td;
}

124 125
static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
{
126 127 128 129
	if (!list_empty(&td->list))
		dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
	if (!list_empty(&td->fl_list))
		dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
130 131 132 133

	dma_pool_free(uhci->td_pool, td, td->dma_handle);
}

L
Linus Torvalds 已提交
134 135 136 137 138 139 140 141
static inline void uhci_fill_td(struct uhci_td *td, u32 status,
		u32 token, u32 buffer)
{
	td->status = cpu_to_le32(status);
	td->token = cpu_to_le32(token);
	td->buffer = cpu_to_le32(buffer);
}

142 143 144 145 146 147 148 149 150 151
static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
{
	list_add_tail(&td->list, &urbp->td_list);
}

static void uhci_remove_td_from_urbp(struct uhci_td *td)
{
	list_del_init(&td->list);
}

L
Linus Torvalds 已提交
152
/*
153
 * We insert Isochronous URBs directly into the frame list at the beginning
L
Linus Torvalds 已提交
154
 */
155 156
static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
		struct uhci_td *td, unsigned framenum)
L
Linus Torvalds 已提交
157 158 159 160 161 162
{
	framenum &= (UHCI_NUMFRAMES - 1);

	td->frame = framenum;

	/* Is there a TD already mapped there? */
163
	if (uhci->frame_cpu[framenum]) {
L
Linus Torvalds 已提交
164 165
		struct uhci_td *ftd, *ltd;

166
		ftd = uhci->frame_cpu[framenum];
L
Linus Torvalds 已提交
167 168 169 170 171 172
		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);

		list_add_tail(&td->fl_list, &ftd->fl_list);

		td->link = ltd->link;
		wmb();
173
		ltd->link = LINK_TO_TD(td);
L
Linus Torvalds 已提交
174
	} else {
175
		td->link = uhci->frame[framenum];
L
Linus Torvalds 已提交
176
		wmb();
177
		uhci->frame[framenum] = LINK_TO_TD(td);
178
		uhci->frame_cpu[framenum] = td;
L
Linus Torvalds 已提交
179 180 181
	}
}

182
static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
183
		struct uhci_td *td)
L
Linus Torvalds 已提交
184 185
{
	/* If it's not inserted, don't remove it */
186 187
	if (td->frame == -1) {
		WARN_ON(!list_empty(&td->fl_list));
L
Linus Torvalds 已提交
188
		return;
189
	}
L
Linus Torvalds 已提交
190

191
	if (uhci->frame_cpu[td->frame] == td) {
L
Linus Torvalds 已提交
192
		if (list_empty(&td->fl_list)) {
193 194
			uhci->frame[td->frame] = td->link;
			uhci->frame_cpu[td->frame] = NULL;
L
Linus Torvalds 已提交
195 196 197 198
		} else {
			struct uhci_td *ntd;

			ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
199
			uhci->frame[td->frame] = LINK_TO_TD(ntd);
200
			uhci->frame_cpu[td->frame] = ntd;
L
Linus Torvalds 已提交
201 202 203 204 205 206 207 208 209 210 211 212
		}
	} else {
		struct uhci_td *ptd;

		ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
		ptd->link = td->link;
	}

	list_del_init(&td->fl_list);
	td->frame = -1;
}

213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
		unsigned int framenum)
{
	struct uhci_td *ftd, *ltd;

	framenum &= (UHCI_NUMFRAMES - 1);

	ftd = uhci->frame_cpu[framenum];
	if (ftd) {
		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
		uhci->frame[framenum] = ltd->link;
		uhci->frame_cpu[framenum] = NULL;

		while (!list_empty(&ftd->fl_list))
			list_del_init(ftd->fl_list.prev);
	}
}

231 232 233 234
/*
 * Remove all the TDs for an Isochronous URB from the frame list
 */
static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
235 236 237 238 239
{
	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
	struct uhci_td *td;

	list_for_each_entry(td, &urbp->td_list, list)
240
		uhci_remove_td_from_frame_list(uhci, td);
241 242
}

243 244
static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
		struct usb_device *udev, struct usb_host_endpoint *hep)
L
Linus Torvalds 已提交
245 246 247 248 249 250 251 252
{
	dma_addr_t dma_handle;
	struct uhci_qh *qh;

	qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
	if (!qh)
		return NULL;

253
	memset(qh, 0, sizeof(*qh));
L
Linus Torvalds 已提交
254 255 256 257 258
	qh->dma_handle = dma_handle;

	qh->element = UHCI_PTR_TERM;
	qh->link = UHCI_PTR_TERM;

259 260
	INIT_LIST_HEAD(&qh->queue);
	INIT_LIST_HEAD(&qh->node);
L
Linus Torvalds 已提交
261

262
	if (udev) {		/* Normal QH */
A
Alan Stern 已提交
263 264 265 266 267 268 269
		qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
		if (qh->type != USB_ENDPOINT_XFER_ISOC) {
			qh->dummy_td = uhci_alloc_td(uhci);
			if (!qh->dummy_td) {
				dma_pool_free(uhci->qh_pool, qh, dma_handle);
				return NULL;
			}
A
Alan Stern 已提交
270
		}
271 272 273 274
		qh->state = QH_STATE_IDLE;
		qh->hep = hep;
		qh->udev = udev;
		hep->hcpriv = qh;
L
Linus Torvalds 已提交
275

A
Alan Stern 已提交
276 277 278 279 280 281 282 283
		if (qh->type == USB_ENDPOINT_XFER_INT ||
				qh->type == USB_ENDPOINT_XFER_ISOC)
			qh->load = usb_calc_bus_time(udev->speed,
					usb_endpoint_dir_in(&hep->desc),
					qh->type == USB_ENDPOINT_XFER_ISOC,
					le16_to_cpu(hep->desc.wMaxPacketSize))
				/ 1000 + 1;

284 285
	} else {		/* Skeleton QH */
		qh->state = QH_STATE_ACTIVE;
286
		qh->type = -1;
287
	}
L
Linus Torvalds 已提交
288 289 290 291 292
	return qh;
}

static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
293
	WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
294 295
	if (!list_empty(&qh->queue))
		dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
L
Linus Torvalds 已提交
296

297 298 299
	list_del(&qh->node);
	if (qh->udev) {
		qh->hep->hcpriv = NULL;
A
Alan Stern 已提交
300 301
		if (qh->dummy_td)
			uhci_free_td(uhci, qh->dummy_td);
302
	}
L
Linus Torvalds 已提交
303 304 305
	dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
}

306
/*
307 308 309
 * When a queue is stopped and a dequeued URB is given back, adjust
 * the previous TD link (if the URB isn't first on the queue) or
 * save its toggle value (if it is first and is currently executing).
310 311
 *
 * Returns 0 if the URB should not yet be given back, 1 otherwise.
312
 */
313
static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
314
		struct urb *urb)
315
{
316
	struct urb_priv *urbp = urb->hcpriv;
317
	struct uhci_td *td;
318
	int ret = 1;
319

320
	/* Isochronous pipes don't use toggles and their TD link pointers
321 322 323 324 325 326
	 * get adjusted during uhci_urb_dequeue().  But since their queues
	 * cannot truly be stopped, we have to watch out for dequeues
	 * occurring after the nominal unlink frame. */
	if (qh->type == USB_ENDPOINT_XFER_ISOC) {
		ret = (uhci->frame_number + uhci->is_stopped !=
				qh->unlink_frame);
327
		goto done;
328
	}
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343

	/* If the URB isn't first on its queue, adjust the link pointer
	 * of the last TD in the previous URB.  The toggle doesn't need
	 * to be saved since this URB can't be executing yet. */
	if (qh->queue.next != &urbp->node) {
		struct urb_priv *purbp;
		struct uhci_td *ptd;

		purbp = list_entry(urbp->node.prev, struct urb_priv, node);
		WARN_ON(list_empty(&purbp->td_list));
		ptd = list_entry(purbp->td_list.prev, struct uhci_td,
				list);
		td = list_entry(urbp->td_list.prev, struct uhci_td,
				list);
		ptd->link = td->link;
344
		goto done;
345 346
	}

347 348
	/* If the QH element pointer is UHCI_PTR_TERM then then currently
	 * executing URB has already been unlinked, so this one isn't it. */
349
	if (qh_element(qh) == UHCI_PTR_TERM)
350
		goto done;
351 352
	qh->element = UHCI_PTR_TERM;

A
Alan Stern 已提交
353
	/* Control pipes don't have to worry about toggles */
354
	if (qh->type == USB_ENDPOINT_XFER_CONTROL)
355
		goto done;
356

357
	/* Save the next toggle value */
358 359 360 361
	WARN_ON(list_empty(&urbp->td_list));
	td = list_entry(urbp->td_list.next, struct uhci_td, list);
	qh->needs_fixup = 1;
	qh->initial_toggle = uhci_toggle(td_token(td));
362 363

done:
364
	return ret;
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
}

/*
 * Fix up the data toggles for URBs in a queue, when one of them
 * terminates early (short transfer, error, or dequeued).
 */
static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
{
	struct urb_priv *urbp = NULL;
	struct uhci_td *td;
	unsigned int toggle = qh->initial_toggle;
	unsigned int pipe;

	/* Fixups for a short transfer start with the second URB in the
	 * queue (the short URB is the first). */
	if (skip_first)
		urbp = list_entry(qh->queue.next, struct urb_priv, node);

	/* When starting with the first URB, if the QH element pointer is
	 * still valid then we know the URB's toggles are okay. */
	else if (qh_element(qh) != UHCI_PTR_TERM)
		toggle = 2;

	/* Fix up the toggle for the URBs in the queue.  Normally this
	 * loop won't run more than once: When an error or short transfer
	 * occurs, the queue usually gets emptied. */
391
	urbp = list_prepare_entry(urbp, &qh->queue, node);
392 393 394 395 396 397
	list_for_each_entry_continue(urbp, &qh->queue, node) {

		/* If the first TD has the right toggle value, we don't
		 * need to change any toggles in this URB */
		td = list_entry(urbp->td_list.next, struct uhci_td, list);
		if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
A
Alan Stern 已提交
398
			td = list_entry(urbp->td_list.prev, struct uhci_td,
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
					list);
			toggle = uhci_toggle(td_token(td)) ^ 1;

		/* Otherwise all the toggles in the URB have to be switched */
		} else {
			list_for_each_entry(td, &urbp->td_list, list) {
				td->token ^= __constant_cpu_to_le32(
							TD_TOKEN_TOGGLE);
				toggle ^= 1;
			}
		}
	}

	wmb();
	pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
	usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
			usb_pipeout(pipe), toggle);
	qh->needs_fixup = 0;
}

L
Linus Torvalds 已提交
419
/*
420
 * Link an Isochronous QH into its skeleton's list
L
Linus Torvalds 已提交
421
 */
422 423 424 425 426 427 428 429 430 431 432 433
static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
	list_add_tail(&qh->node, &uhci->skel_iso_qh->node);

	/* Isochronous QHs aren't linked by the hardware */
}

/*
 * Link a high-period interrupt QH into the schedule at the end of its
 * skeleton's list
 */
static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
L
Linus Torvalds 已提交
434
{
435
	struct uhci_qh *pqh;
L
Linus Torvalds 已提交
436

437 438 439 440 441 442 443 444 445 446 447 448 449 450
	list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);

	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
	qh->link = pqh->link;
	wmb();
	pqh->link = LINK_TO_QH(qh);
}

/*
 * Link a period-1 interrupt or async QH into the schedule at the
 * correct spot in the async skeleton's list, and update the FSBR link
 */
static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
451
	struct uhci_qh *pqh;
452 453 454 455 456 457 458 459 460 461 462 463
	__le32 link_to_new_qh;

	/* Find the predecessor QH for our new one and insert it in the list.
	 * The list of QHs is expected to be short, so linear search won't
	 * take too long. */
	list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
		if (pqh->skel <= qh->skel)
			break;
	}
	list_add(&qh->node, &pqh->node);

	/* Link it into the schedule */
464
	qh->link = pqh->link;
465
	wmb();
466 467 468 469 470 471 472
	link_to_new_qh = LINK_TO_QH(qh);
	pqh->link = link_to_new_qh;

	/* If this is now the first FSBR QH, link the terminating skeleton
	 * QH to it. */
	if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
		uhci->skel_term_qh->link = link_to_new_qh;
473 474 475 476 477 478 479
}

/*
 * Put a QH on the schedule in both hardware and software
 */
static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
480
	WARN_ON(list_empty(&qh->queue));
L
Linus Torvalds 已提交
481

482 483 484 485 486 487 488
	/* Set the element pointer if it isn't set already.
	 * This isn't needed for Isochronous queues, but it doesn't hurt. */
	if (qh_element(qh) == UHCI_PTR_TERM) {
		struct urb_priv *urbp = list_entry(qh->queue.next,
				struct urb_priv, node);
		struct uhci_td *td = list_entry(urbp->td_list.next,
				struct uhci_td, list);
L
Linus Torvalds 已提交
489

490
		qh->element = LINK_TO_TD(td);
L
Linus Torvalds 已提交
491 492
	}

A
Alan Stern 已提交
493 494 495 496
	/* Treat the queue as if it has just advanced */
	qh->wait_expired = 0;
	qh->advance_jiffies = jiffies;

497 498 499 500
	if (qh->state == QH_STATE_ACTIVE)
		return;
	qh->state = QH_STATE_ACTIVE;

501
	/* Move the QH from its old list to the correct spot in the appropriate
502
	 * skeleton's list */
503 504 505
	if (qh == uhci->next_qh)
		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
				node);
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
	list_del(&qh->node);

	if (qh->skel == SKEL_ISO)
		link_iso(uhci, qh);
	else if (qh->skel < SKEL_ASYNC)
		link_interrupt(uhci, qh);
	else
		link_async(uhci, qh);
}

/*
 * Unlink a high-period interrupt QH from the schedule
 */
static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
	struct uhci_qh *pqh;
522 523

	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
524 525 526 527 528 529 530 531 532
	pqh->link = qh->link;
	mb();
}

/*
 * Unlink a period-1 interrupt or async QH from the schedule
 */
static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
533
	struct uhci_qh *pqh;
534 535 536 537
	__le32 link_to_next_qh = qh->link;

	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
	pqh->link = link_to_next_qh;
538 539 540 541 542

	/* If this was the old first FSBR QH, link the terminating skeleton
	 * QH to the next (new first FSBR) QH. */
	if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
		uhci->skel_term_qh->link = link_to_next_qh;
543
	mb();
L
Linus Torvalds 已提交
544 545 546
}

/*
547
 * Take a QH off the hardware schedule
L
Linus Torvalds 已提交
548
 */
549
static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
L
Linus Torvalds 已提交
550
{
551
	if (qh->state == QH_STATE_UNLINKING)
L
Linus Torvalds 已提交
552
		return;
553 554
	WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
	qh->state = QH_STATE_UNLINKING;
L
Linus Torvalds 已提交
555

556
	/* Unlink the QH from the schedule and record when we did it */
557 558 559 560 561 562
	if (qh->skel == SKEL_ISO)
		;
	else if (qh->skel < SKEL_ASYNC)
		unlink_interrupt(uhci, qh);
	else
		unlink_async(uhci, qh);
L
Linus Torvalds 已提交
563 564

	uhci_get_current_frame_number(uhci);
565
	qh->unlink_frame = uhci->frame_number;
L
Linus Torvalds 已提交
566

567 568
	/* Force an interrupt so we know when the QH is fully unlinked */
	if (list_empty(&uhci->skel_unlink_qh->node))
L
Linus Torvalds 已提交
569 570
		uhci_set_next_interrupt(uhci);

571
	/* Move the QH from its old list to the end of the unlinking list */
572 573 574
	if (qh == uhci->next_qh)
		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
				node);
575
	list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
L
Linus Torvalds 已提交
576 577
}

578 579 580 581 582 583 584
/*
 * When we and the controller are through with a QH, it becomes IDLE.
 * This happens when a QH has been off the schedule (on the unlinking
 * list) for more than one frame, or when an error occurs while adding
 * the first URB onto a new QH.
 */
static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
L
Linus Torvalds 已提交
585
{
586
	WARN_ON(qh->state == QH_STATE_ACTIVE);
L
Linus Torvalds 已提交
587

588 589 590
	if (qh == uhci->next_qh)
		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
				node);
591 592
	list_move(&qh->node, &uhci->idle_qh_list);
	qh->state = QH_STATE_IDLE;
L
Linus Torvalds 已提交
593

594 595 596 597 598 599
	/* Now that the QH is idle, its post_td isn't being used */
	if (qh->post_td) {
		uhci_free_td(uhci, qh->post_td);
		qh->post_td = NULL;
	}

600 601 602
	/* If anyone is waiting for a QH to become idle, wake them up */
	if (uhci->num_waiting)
		wake_up_all(&uhci->waitqh);
L
Linus Torvalds 已提交
603 604
}

A
Alan Stern 已提交
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
/*
 * Find the highest existing bandwidth load for a given phase and period.
 */
static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
{
	int highest_load = uhci->load[phase];

	for (phase += period; phase < MAX_PHASE; phase += period)
		highest_load = max_t(int, highest_load, uhci->load[phase]);
	return highest_load;
}

/*
 * Set qh->phase to the optimal phase for a periodic transfer and
 * check whether the bandwidth requirement is acceptable.
 */
static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
	int minimax_load;

	/* Find the optimal phase (unless it is already set) and get
	 * its load value. */
	if (qh->phase >= 0)
		minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
	else {
		int phase, load;
		int max_phase = min_t(int, MAX_PHASE, qh->period);

		qh->phase = 0;
		minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
		for (phase = 1; phase < max_phase; ++phase) {
			load = uhci_highest_load(uhci, phase, qh->period);
			if (load < minimax_load) {
				minimax_load = load;
				qh->phase = phase;
			}
		}
	}

	/* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
	if (minimax_load + qh->load > 900) {
		dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
				"period %d, phase %d, %d + %d us\n",
				qh->period, qh->phase, minimax_load, qh->load);
		return -ENOSPC;
	}
	return 0;
}

/*
 * Reserve a periodic QH's bandwidth in the schedule
 */
static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
	int i;
	int load = qh->load;
	char *p = "??";

	for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
		uhci->load[i] += load;
		uhci->total_load += load;
	}
	uhci_to_hcd(uhci)->self.bandwidth_allocated =
			uhci->total_load / MAX_PHASE;
	switch (qh->type) {
	case USB_ENDPOINT_XFER_INT:
		++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
		p = "INT";
		break;
	case USB_ENDPOINT_XFER_ISOC:
		++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
		p = "ISO";
		break;
	}
	qh->bandwidth_reserved = 1;
	dev_dbg(uhci_dev(uhci),
			"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
			"reserve", qh->udev->devnum,
			qh->hep->desc.bEndpointAddress, p,
			qh->period, qh->phase, load);
}

/*
 * Release a periodic QH's bandwidth reservation
 */
static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
	int i;
	int load = qh->load;
	char *p = "??";

	for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
		uhci->load[i] -= load;
		uhci->total_load -= load;
	}
	uhci_to_hcd(uhci)->self.bandwidth_allocated =
			uhci->total_load / MAX_PHASE;
	switch (qh->type) {
	case USB_ENDPOINT_XFER_INT:
		--uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
		p = "INT";
		break;
	case USB_ENDPOINT_XFER_ISOC:
		--uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
		p = "ISO";
		break;
	}
	qh->bandwidth_reserved = 0;
	dev_dbg(uhci_dev(uhci),
			"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
			"release", qh->udev->devnum,
			qh->hep->desc.bEndpointAddress, p,
			qh->period, qh->phase, load);
}

720 721
static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
		struct urb *urb)
L
Linus Torvalds 已提交
722 723 724
{
	struct urb_priv *urbp;

725
	urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
L
Linus Torvalds 已提交
726 727 728 729
	if (!urbp)
		return NULL;

	urbp->urb = urb;
730
	urb->hcpriv = urbp;
L
Linus Torvalds 已提交
731
	
732
	INIT_LIST_HEAD(&urbp->node);
L
Linus Torvalds 已提交
733 734 735 736 737
	INIT_LIST_HEAD(&urbp->td_list);

	return urbp;
}

738 739
static void uhci_free_urb_priv(struct uhci_hcd *uhci,
		struct urb_priv *urbp)
L
Linus Torvalds 已提交
740 741 742
{
	struct uhci_td *td, *tmp;

743 744
	if (!list_empty(&urbp->node))
		dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
745
				urbp->urb);
L
Linus Torvalds 已提交
746 747

	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
748 749
		uhci_remove_td_from_urbp(td);
		uhci_free_td(uhci, td);
L
Linus Torvalds 已提交
750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
	}

	kmem_cache_free(uhci_up_cachep, urbp);
}

/*
 * Map status to standard result codes
 *
 * <status> is (td_status(td) & 0xF60000), a.k.a.
 * uhci_status_bits(td_status(td)).
 * Note: <status> does not include the TD_CTRL_NAK bit.
 * <dir_out> is True for output TDs and False for input TDs.
 */
static int uhci_map_status(int status, int dir_out)
{
	if (!status)
		return 0;
	if (status & TD_CTRL_BITSTUFF)			/* Bitstuff error */
		return -EPROTO;
	if (status & TD_CTRL_CRCTIMEO) {		/* CRC/Timeout */
		if (dir_out)
			return -EPROTO;
		else
			return -EILSEQ;
	}
	if (status & TD_CTRL_BABBLE)			/* Babble */
		return -EOVERFLOW;
	if (status & TD_CTRL_DBUFERR)			/* Buffer error */
		return -ENOSR;
	if (status & TD_CTRL_STALLED)			/* Stalled */
		return -EPIPE;
	return 0;
}

/*
 * Control transfers
 */
787 788
static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
		struct uhci_qh *qh)
L
Linus Torvalds 已提交
789 790 791
{
	struct uhci_td *td;
	unsigned long destination, status;
792
	int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
L
Linus Torvalds 已提交
793 794
	int len = urb->transfer_buffer_length;
	dma_addr_t data = urb->transfer_dma;
795
	__le32 *plink;
796
	struct urb_priv *urbp = urb->hcpriv;
797
	int skel;
L
Linus Torvalds 已提交
798 799 800 801

	/* The "pipe" thing contains the destination in bits 8--18 */
	destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;

A
Alan Stern 已提交
802 803
	/* 3 errors, dummy TD remains inactive */
	status = uhci_maxerr(3);
L
Linus Torvalds 已提交
804 805 806 807 808 809
	if (urb->dev->speed == USB_SPEED_LOW)
		status |= TD_CTRL_LS;

	/*
	 * Build the TD for the control request setup packet
	 */
A
Alan Stern 已提交
810
	td = qh->dummy_td;
811
	uhci_add_td_to_urbp(td, urbp);
812
	uhci_fill_td(td, status, destination | uhci_explen(8),
813 814
			urb->setup_dma);
	plink = &td->link;
A
Alan Stern 已提交
815
	status |= TD_CTRL_ACTIVE;
L
Linus Torvalds 已提交
816 817 818 819 820

	/*
	 * If direction is "send", change the packet ID from SETUP (0x2D)
	 * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and
	 * set Short Packet Detect (SPD) for all data packets.
821 822
	 *
	 * 0-length transfers always get treated as "send".
L
Linus Torvalds 已提交
823
	 */
824
	if (usb_pipeout(urb->pipe) || len == 0)
L
Linus Torvalds 已提交
825 826 827 828 829 830 831
		destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
	else {
		destination ^= (USB_PID_SETUP ^ USB_PID_IN);
		status |= TD_CTRL_SPD;
	}

	/*
832
	 * Build the DATA TDs
L
Linus Torvalds 已提交
833 834
	 */
	while (len > 0) {
835 836 837 838 839 840
		int pktsze = maxsze;

		if (len <= pktsze) {		/* The last data packet */
			pktsze = len;
			status &= ~TD_CTRL_SPD;
		}
L
Linus Torvalds 已提交
841

842
		td = uhci_alloc_td(uhci);
L
Linus Torvalds 已提交
843
		if (!td)
A
Alan Stern 已提交
844
			goto nomem;
845
		*plink = LINK_TO_TD(td);
L
Linus Torvalds 已提交
846 847 848 849

		/* Alternate Data0/1 (start with Data1) */
		destination ^= TD_TOKEN_TOGGLE;
	
850
		uhci_add_td_to_urbp(td, urbp);
851
		uhci_fill_td(td, status, destination | uhci_explen(pktsze),
852 853
				data);
		plink = &td->link;
L
Linus Torvalds 已提交
854 855 856 857 858 859 860 861

		data += pktsze;
		len -= pktsze;
	}

	/*
	 * Build the final TD for control status 
	 */
862
	td = uhci_alloc_td(uhci);
L
Linus Torvalds 已提交
863
	if (!td)
A
Alan Stern 已提交
864
		goto nomem;
865
	*plink = LINK_TO_TD(td);
L
Linus Torvalds 已提交
866

867 868
	/* Change direction for the status transaction */
	destination ^= (USB_PID_IN ^ USB_PID_OUT);
L
Linus Torvalds 已提交
869 870
	destination |= TD_TOKEN_TOGGLE;		/* End in Data1 */

871
	uhci_add_td_to_urbp(td, urbp);
L
Linus Torvalds 已提交
872
	uhci_fill_td(td, status | TD_CTRL_IOC,
873
			destination | uhci_explen(0), 0);
A
Alan Stern 已提交
874 875 876 877 878 879 880 881
	plink = &td->link;

	/*
	 * Build the new dummy TD and activate the old one
	 */
	td = uhci_alloc_td(uhci);
	if (!td)
		goto nomem;
882
	*plink = LINK_TO_TD(td);
A
Alan Stern 已提交
883 884 885 886 887

	uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
	wmb();
	qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
	qh->dummy_td = td;
L
Linus Torvalds 已提交
888 889 890 891

	/* Low-speed transfers get a different queue, and won't hog the bus.
	 * Also, some devices enumerate better without FSBR; the easiest way
	 * to do that is to put URBs on the low-speed queue while the device
892
	 * isn't in the CONFIGURED state. */
L
Linus Torvalds 已提交
893
	if (urb->dev->speed == USB_SPEED_LOW ||
894
			urb->dev->state != USB_STATE_CONFIGURED)
895
		skel = SKEL_LS_CONTROL;
L
Linus Torvalds 已提交
896
	else {
897
		skel = SKEL_FS_CONTROL;
A
Alan Stern 已提交
898
		uhci_add_fsbr(uhci, urb);
L
Linus Torvalds 已提交
899
	}
900 901
	if (qh->state != QH_STATE_ACTIVE)
		qh->skel = skel;
902 903

	urb->actual_length = -8;	/* Account for the SETUP packet */
904
	return 0;
A
Alan Stern 已提交
905 906 907

nomem:
	/* Remove the dummy TD from the td_list so it doesn't get freed */
908
	uhci_remove_td_from_urbp(qh->dummy_td);
A
Alan Stern 已提交
909
	return -ENOMEM;
L
Linus Torvalds 已提交
910 911 912 913 914
}

/*
 * Common submit for bulk and interrupt
 */
915 916
static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
		struct uhci_qh *qh)
L
Linus Torvalds 已提交
917 918 919
{
	struct uhci_td *td;
	unsigned long destination, status;
920
	int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
L
Linus Torvalds 已提交
921 922
	int len = urb->transfer_buffer_length;
	dma_addr_t data = urb->transfer_dma;
A
Alan Stern 已提交
923
	__le32 *plink;
924
	struct urb_priv *urbp = urb->hcpriv;
A
Alan Stern 已提交
925
	unsigned int toggle;
L
Linus Torvalds 已提交
926 927 928 929 930 931

	if (len < 0)
		return -EINVAL;

	/* The "pipe" thing contains the destination in bits 8--18 */
	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
A
Alan Stern 已提交
932 933
	toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
			 usb_pipeout(urb->pipe));
L
Linus Torvalds 已提交
934

A
Alan Stern 已提交
935 936
	/* 3 errors, dummy TD remains inactive */
	status = uhci_maxerr(3);
L
Linus Torvalds 已提交
937 938 939 940 941 942
	if (urb->dev->speed == USB_SPEED_LOW)
		status |= TD_CTRL_LS;
	if (usb_pipein(urb->pipe))
		status |= TD_CTRL_SPD;

	/*
943
	 * Build the DATA TDs
L
Linus Torvalds 已提交
944
	 */
A
Alan Stern 已提交
945 946
	plink = NULL;
	td = qh->dummy_td;
L
Linus Torvalds 已提交
947 948 949
	do {	/* Allow zero length packets */
		int pktsze = maxsze;

950
		if (len <= pktsze) {		/* The last packet */
L
Linus Torvalds 已提交
951 952 953 954 955
			pktsze = len;
			if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
				status &= ~TD_CTRL_SPD;
		}

A
Alan Stern 已提交
956 957 958 959
		if (plink) {
			td = uhci_alloc_td(uhci);
			if (!td)
				goto nomem;
960
			*plink = LINK_TO_TD(td);
A
Alan Stern 已提交
961
		}
962
		uhci_add_td_to_urbp(td, urbp);
963
		uhci_fill_td(td, status,
A
Alan Stern 已提交
964 965 966
				destination | uhci_explen(pktsze) |
					(toggle << TD_TOKEN_TOGGLE_SHIFT),
				data);
967
		plink = &td->link;
A
Alan Stern 已提交
968
		status |= TD_CTRL_ACTIVE;
L
Linus Torvalds 已提交
969 970 971

		data += pktsze;
		len -= maxsze;
A
Alan Stern 已提交
972
		toggle ^= 1;
L
Linus Torvalds 已提交
973 974 975 976 977 978 979 980 981
	} while (len > 0);

	/*
	 * URB_ZERO_PACKET means adding a 0-length packet, if direction
	 * is OUT and the transfer_length was an exact multiple of maxsze,
	 * hence (len = transfer_length - N * maxsze) == 0
	 * however, if transfer_length == 0, the zero packet was already
	 * prepared above.
	 */
982 983 984
	if ((urb->transfer_flags & URB_ZERO_PACKET) &&
			usb_pipeout(urb->pipe) && len == 0 &&
			urb->transfer_buffer_length > 0) {
985
		td = uhci_alloc_td(uhci);
L
Linus Torvalds 已提交
986
		if (!td)
A
Alan Stern 已提交
987
			goto nomem;
988
		*plink = LINK_TO_TD(td);
L
Linus Torvalds 已提交
989

990
		uhci_add_td_to_urbp(td, urbp);
A
Alan Stern 已提交
991 992 993 994 995
		uhci_fill_td(td, status,
				destination | uhci_explen(0) |
					(toggle << TD_TOKEN_TOGGLE_SHIFT),
				data);
		plink = &td->link;
L
Linus Torvalds 已提交
996

A
Alan Stern 已提交
997
		toggle ^= 1;
L
Linus Torvalds 已提交
998 999 1000 1001 1002 1003 1004 1005
	}

	/* Set the interrupt-on-completion flag on the last packet.
	 * A more-or-less typical 4 KB URB (= size of one memory page)
	 * will require about 3 ms to transfer; that's a little on the
	 * fast side but not enough to justify delaying an interrupt
	 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
	 * flag setting. */
1006
	td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
L
Linus Torvalds 已提交
1007

A
Alan Stern 已提交
1008 1009 1010 1011 1012 1013
	/*
	 * Build the new dummy TD and activate the old one
	 */
	td = uhci_alloc_td(uhci);
	if (!td)
		goto nomem;
1014
	*plink = LINK_TO_TD(td);
A
Alan Stern 已提交
1015 1016 1017 1018 1019 1020 1021 1022

	uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
	wmb();
	qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
	qh->dummy_td = td;

	usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
			usb_pipeout(urb->pipe), toggle);
1023
	return 0;
A
Alan Stern 已提交
1024 1025 1026

nomem:
	/* Remove the dummy TD from the td_list so it doesn't get freed */
1027
	uhci_remove_td_from_urbp(qh->dummy_td);
A
Alan Stern 已提交
1028
	return -ENOMEM;
L
Linus Torvalds 已提交
1029 1030
}

1031
static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
1032
		struct uhci_qh *qh)
L
Linus Torvalds 已提交
1033 1034 1035 1036 1037 1038 1039
{
	int ret;

	/* Can't have low-speed bulk transfers */
	if (urb->dev->speed == USB_SPEED_LOW)
		return -EINVAL;

1040 1041
	if (qh->state != QH_STATE_ACTIVE)
		qh->skel = SKEL_BULK;
1042 1043
	ret = uhci_submit_common(uhci, urb, qh);
	if (ret == 0)
A
Alan Stern 已提交
1044
		uhci_add_fsbr(uhci, urb);
L
Linus Torvalds 已提交
1045 1046 1047
	return ret;
}

1048
static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
1049
		struct uhci_qh *qh)
L
Linus Torvalds 已提交
1050
{
A
Alan Stern 已提交
1051
	int ret;
1052

1053 1054 1055
	/* USB 1.1 interrupt transfers only involve one packet per interval.
	 * Drivers can submit URBs of any length, but longer ones will need
	 * multiple intervals to complete.
L
Linus Torvalds 已提交
1056
	 */
1057

A
Alan Stern 已提交
1058 1059
	if (!qh->bandwidth_reserved) {
		int exponent;
1060

A
Alan Stern 已提交
1061 1062 1063 1064 1065 1066 1067 1068
		/* Figure out which power-of-two queue to use */
		for (exponent = 7; exponent >= 0; --exponent) {
			if ((1 << exponent) <= urb->interval)
				break;
		}
		if (exponent < 0)
			return -EINVAL;
		qh->period = 1 << exponent;
1069
		qh->skel = SKEL_INDEX(exponent);
1070

A
Alan Stern 已提交
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
		/* For now, interrupt phase is fixed by the layout
		 * of the QH lists. */
		qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
		ret = uhci_check_bandwidth(uhci, qh);
		if (ret)
			return ret;
	} else if (qh->period > urb->interval)
		return -EINVAL;		/* Can't decrease the period */

	ret = uhci_submit_common(uhci, urb, qh);
	if (ret == 0) {
		urb->interval = qh->period;
		if (!qh->bandwidth_reserved)
			uhci_reserve_bandwidth(uhci, qh);
	}
	return ret;
L
Linus Torvalds 已提交
1087 1088
}

1089 1090 1091 1092
/*
 * Fix up the data structures following a short transfer
 */
static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
1093
		struct uhci_qh *qh, struct urb_priv *urbp)
1094 1095
{
	struct uhci_td *td;
1096 1097
	struct list_head *tmp;
	int ret;
1098 1099 1100 1101 1102 1103 1104

	td = list_entry(urbp->td_list.prev, struct uhci_td, list);
	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {

		/* When a control transfer is short, we have to restart
		 * the queue at the status stage transaction, which is
		 * the last TD. */
1105
		WARN_ON(list_empty(&urbp->td_list));
1106
		qh->element = LINK_TO_TD(td);
1107
		tmp = td->list.prev;
1108 1109
		ret = -EINPROGRESS;

1110
	} else {
1111 1112 1113 1114

		/* When a bulk/interrupt transfer is short, we have to
		 * fix up the toggles of the following URBs on the queue
		 * before restarting the queue at the next URB. */
1115
		qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1;
1116 1117
		uhci_fixup_toggles(qh, 1);

1118 1119
		if (list_empty(&urbp->td_list))
			td = qh->post_td;
1120
		qh->element = td->link;
1121 1122
		tmp = urbp->td_list.prev;
		ret = 0;
1123 1124
	}

1125 1126 1127 1128 1129
	/* Remove all the TDs we skipped over, from tmp back to the start */
	while (tmp != &urbp->td_list) {
		td = list_entry(tmp, struct uhci_td, list);
		tmp = tmp->prev;

1130 1131
		uhci_remove_td_from_urbp(td);
		uhci_free_td(uhci, td);
1132
	}
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
	return ret;
}

/*
 * Common result for control, bulk, and interrupt
 */
static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
{
	struct urb_priv *urbp = urb->hcpriv;
	struct uhci_qh *qh = urbp->qh;
1143
	struct uhci_td *td, *tmp;
1144 1145 1146
	unsigned status;
	int ret = 0;

1147
	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
		unsigned int ctrlstat;
		int len;

		ctrlstat = td_status(td);
		status = uhci_status_bits(ctrlstat);
		if (status & TD_CTRL_ACTIVE)
			return -EINPROGRESS;

		len = uhci_actual_length(ctrlstat);
		urb->actual_length += len;

		if (status) {
			ret = uhci_map_status(status,
					uhci_packetout(td_token(td)));
			if ((debug == 1 && ret != -EPIPE) || debug > 1) {
				/* Some debugging code */
D
David Brownell 已提交
1164
				dev_dbg(&urb->dev->dev,
1165
						"%s: failed with status %x\n",
1166
						__func__, status);
1167 1168 1169

				if (debug > 1 && errbuf) {
					/* Print the chain for debugging */
1170
					uhci_show_qh(uhci, urbp->qh, errbuf,
1171 1172 1173 1174 1175
							ERRBUF_LEN, 0);
					lprintk(errbuf);
				}
			}

1176
		/* Did we receive a short packet? */
1177 1178
		} else if (len < uhci_expected_length(td_token(td))) {

1179 1180 1181 1182 1183 1184 1185 1186 1187
			/* For control transfers, go to the status TD if
			 * this isn't already the last data TD */
			if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
				if (td->list.next != urbp->td_list.prev)
					ret = 1;
			}

			/* For bulk and interrupt, this may be an error */
			else if (urb->transfer_flags & URB_SHORT_NOT_OK)
1188
				ret = -EREMOTEIO;
1189 1190 1191

			/* Fixup needed only if this isn't the URB's last TD */
			else if (&td->list != urbp->td_list.prev)
1192 1193 1194
				ret = 1;
		}

1195
		uhci_remove_td_from_urbp(td);
1196
		if (qh->post_td)
1197
			uhci_free_td(uhci, qh->post_td);
1198 1199
		qh->post_td = td;

1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
		if (ret != 0)
			goto err;
	}
	return ret;

err:
	if (ret < 0) {
		/* Note that the queue has stopped and save
		 * the next toggle value */
		qh->element = UHCI_PTR_TERM;
		qh->is_stopped = 1;
		qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
		qh->initial_toggle = uhci_toggle(td_token(td)) ^
				(ret == -EREMOTEIO);

	} else		/* Short packet received */
1216
		ret = uhci_fixup_short_transfer(uhci, qh, urbp);
1217 1218 1219
	return ret;
}

L
Linus Torvalds 已提交
1220 1221 1222
/*
 * Isochronous transfers
 */
1223 1224
static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
		struct uhci_qh *qh)
L
Linus Torvalds 已提交
1225
{
1226 1227 1228 1229
	struct uhci_td *td = NULL;	/* Since urb->number_of_packets > 0 */
	int i, frame;
	unsigned long destination, status;
	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
L
Linus Torvalds 已提交
1230

1231 1232 1233
	/* Values must not be too big (could overflow below) */
	if (urb->interval >= UHCI_NUMFRAMES ||
			urb->number_of_packets >= UHCI_NUMFRAMES)
L
Linus Torvalds 已提交
1234 1235
		return -EFBIG;

1236
	/* Check the period and figure out the starting frame number */
A
Alan Stern 已提交
1237 1238
	if (!qh->bandwidth_reserved) {
		qh->period = urb->interval;
1239
		if (urb->transfer_flags & URB_ISO_ASAP) {
A
Alan Stern 已提交
1240 1241 1242 1243 1244 1245
			qh->phase = -1;		/* Find the best phase */
			i = uhci_check_bandwidth(uhci, qh);
			if (i)
				return i;

			/* Allow a little time to allocate the TDs */
1246
			uhci_get_current_frame_number(uhci);
A
Alan Stern 已提交
1247 1248 1249 1250 1251 1252
			frame = uhci->frame_number + 10;

			/* Move forward to the first frame having the
			 * correct phase */
			urb->start_frame = frame + ((qh->phase - frame) &
					(qh->period - 1));
1253
		} else {
1254
			i = urb->start_frame - uhci->last_iso_frame;
1255 1256
			if (i <= 0 || i >= UHCI_NUMFRAMES)
				return -EINVAL;
A
Alan Stern 已提交
1257 1258 1259 1260
			qh->phase = urb->start_frame & (qh->period - 1);
			i = uhci_check_bandwidth(uhci, qh);
			if (i)
				return i;
1261
		}
A
Alan Stern 已提交
1262

1263 1264
	} else if (qh->period != urb->interval) {
		return -EINVAL;		/* Can't change the period */
L
Linus Torvalds 已提交
1265

1266 1267
	} else {
		/* Find the next unused frame */
1268
		if (list_empty(&qh->queue)) {
1269
			frame = qh->iso_frame;
1270 1271
		} else {
			struct urb *lurb;
1272

1273
			lurb = list_entry(qh->queue.prev,
1274
					struct urb_priv, node)->urb;
1275 1276 1277
			frame = lurb->start_frame +
					lurb->number_of_packets *
					lurb->interval;
1278
		}
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
		if (urb->transfer_flags & URB_ISO_ASAP) {
			/* Skip some frames if necessary to insure
			 * the start frame is in the future.
			 */
			uhci_get_current_frame_number(uhci);
			if (uhci_frame_before_eq(frame, uhci->frame_number)) {
				frame = uhci->frame_number + 1;
				frame += ((qh->phase - frame) &
					(qh->period - 1));
			}
		}	/* Otherwise pick up where the last URB leaves off */
		urb->start_frame = frame;
L
Linus Torvalds 已提交
1291 1292
	}

1293
	/* Make sure we won't have to go too far into the future */
1294
	if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
1295 1296 1297 1298 1299 1300 1301
			urb->start_frame + urb->number_of_packets *
				urb->interval))
		return -EFBIG;

	status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);

1302
	for (i = 0; i < urb->number_of_packets; i++) {
1303
		td = uhci_alloc_td(uhci);
L
Linus Torvalds 已提交
1304 1305 1306
		if (!td)
			return -ENOMEM;

1307
		uhci_add_td_to_urbp(td, urbp);
1308 1309 1310 1311
		uhci_fill_td(td, status, destination |
				uhci_explen(urb->iso_frame_desc[i].length),
				urb->transfer_dma +
					urb->iso_frame_desc[i].offset);
1312
	}
L
Linus Torvalds 已提交
1313

1314 1315 1316 1317
	/* Set the interrupt-on-completion flag on the last packet. */
	td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);

	/* Add the TDs to the frame list */
1318 1319
	frame = urb->start_frame;
	list_for_each_entry(td, &urbp->td_list, list) {
1320
		uhci_insert_td_in_frame_list(uhci, td, frame);
1321 1322 1323 1324 1325 1326
		frame += qh->period;
	}

	if (list_empty(&qh->queue)) {
		qh->iso_packet_desc = &urb->iso_frame_desc[0];
		qh->iso_frame = urb->start_frame;
L
Linus Torvalds 已提交
1327 1328
	}

1329
	qh->skel = SKEL_ISO;
A
Alan Stern 已提交
1330 1331
	if (!qh->bandwidth_reserved)
		uhci_reserve_bandwidth(uhci, qh);
1332
	return 0;
L
Linus Torvalds 已提交
1333 1334 1335 1336
}

static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
{
1337 1338 1339
	struct uhci_td *td, *tmp;
	struct urb_priv *urbp = urb->hcpriv;
	struct uhci_qh *qh = urbp->qh;
L
Linus Torvalds 已提交
1340

1341 1342 1343
	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
		unsigned int ctrlstat;
		int status;
L
Linus Torvalds 已提交
1344 1345
		int actlength;

1346
		if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
L
Linus Torvalds 已提交
1347 1348
			return -EINPROGRESS;

1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
		uhci_remove_tds_from_frame(uhci, qh->iso_frame);

		ctrlstat = td_status(td);
		if (ctrlstat & TD_CTRL_ACTIVE) {
			status = -EXDEV;	/* TD was added too late? */
		} else {
			status = uhci_map_status(uhci_status_bits(ctrlstat),
					usb_pipeout(urb->pipe));
			actlength = uhci_actual_length(ctrlstat);

			urb->actual_length += actlength;
			qh->iso_packet_desc->actual_length = actlength;
			qh->iso_packet_desc->status = status;
		}
1363
		if (status)
L
Linus Torvalds 已提交
1364 1365
			urb->error_count++;

1366 1367 1368 1369
		uhci_remove_td_from_urbp(td);
		uhci_free_td(uhci, td);
		qh->iso_frame += qh->period;
		++qh->iso_packet_desc;
L
Linus Torvalds 已提交
1370
	}
1371
	return 0;
L
Linus Torvalds 已提交
1372 1373 1374
}

static int uhci_urb_enqueue(struct usb_hcd *hcd,
A
Al Viro 已提交
1375
		struct urb *urb, gfp_t mem_flags)
L
Linus Torvalds 已提交
1376 1377 1378 1379
{
	int ret;
	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
	unsigned long flags;
1380 1381
	struct urb_priv *urbp;
	struct uhci_qh *qh;
L
Linus Torvalds 已提交
1382 1383 1384

	spin_lock_irqsave(&uhci->lock, flags);

1385 1386 1387
	ret = usb_hcd_link_urb_to_ep(hcd, urb);
	if (ret)
		goto done_not_linked;
L
Linus Torvalds 已提交
1388

1389 1390 1391 1392
	ret = -ENOMEM;
	urbp = uhci_alloc_urb_priv(uhci, urb);
	if (!urbp)
		goto done;
L
Linus Torvalds 已提交
1393

1394 1395
	if (urb->ep->hcpriv)
		qh = urb->ep->hcpriv;
1396
	else {
1397
		qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
1398 1399
		if (!qh)
			goto err_no_qh;
L
Linus Torvalds 已提交
1400
	}
1401
	urbp->qh = qh;
L
Linus Torvalds 已提交
1402

1403 1404
	switch (qh->type) {
	case USB_ENDPOINT_XFER_CONTROL:
1405 1406
		ret = uhci_submit_control(uhci, urb, qh);
		break;
1407
	case USB_ENDPOINT_XFER_BULK:
1408
		ret = uhci_submit_bulk(uhci, urb, qh);
L
Linus Torvalds 已提交
1409
		break;
1410
	case USB_ENDPOINT_XFER_INT:
A
Alan Stern 已提交
1411
		ret = uhci_submit_interrupt(uhci, urb, qh);
L
Linus Torvalds 已提交
1412
		break;
1413
	case USB_ENDPOINT_XFER_ISOC:
1414
		urb->error_count = 0;
1415
		ret = uhci_submit_isochronous(uhci, urb, qh);
L
Linus Torvalds 已提交
1416 1417
		break;
	}
1418 1419
	if (ret != 0)
		goto err_submit_failed;
L
Linus Torvalds 已提交
1420

1421 1422 1423
	/* Add this URB to the QH */
	urbp->qh = qh;
	list_add_tail(&urbp->node, &qh->queue);
L
Linus Torvalds 已提交
1424

1425 1426
	/* If the new URB is the first and only one on this QH then either
	 * the QH is new and idle or else it's unlinked and waiting to
1427 1428
	 * become idle, so we can activate it right away.  But only if the
	 * queue isn't stopped. */
A
Alan Stern 已提交
1429
	if (qh->queue.next == &urbp->node && !qh->is_stopped) {
1430
		uhci_activate_qh(uhci, qh);
1431
		uhci_urbp_wants_fsbr(uhci, urbp);
A
Alan Stern 已提交
1432
	}
1433 1434 1435 1436 1437 1438 1439 1440
	goto done;

err_submit_failed:
	if (qh->state == QH_STATE_IDLE)
		uhci_make_qh_idle(uhci, qh);	/* Reclaim unused QH */
err_no_qh:
	uhci_free_urb_priv(uhci, urbp);
done:
1441 1442 1443
	if (ret)
		usb_hcd_unlink_urb_from_ep(hcd, urb);
done_not_linked:
L
Linus Torvalds 已提交
1444 1445 1446 1447
	spin_unlock_irqrestore(&uhci->lock, flags);
	return ret;
}

1448
static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1449 1450 1451
{
	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
	unsigned long flags;
1452
	struct uhci_qh *qh;
1453
	int rc;
1454 1455

	spin_lock_irqsave(&uhci->lock, flags);
1456 1457
	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
	if (rc)
1458
		goto done;
1459 1460

	qh = ((struct urb_priv *) urb->hcpriv)->qh;
1461 1462

	/* Remove Isochronous TDs from the frame list ASAP */
1463
	if (qh->type == USB_ENDPOINT_XFER_ISOC) {
1464
		uhci_unlink_isochronous_tds(uhci, urb);
1465 1466 1467 1468 1469 1470 1471 1472 1473
		mb();

		/* If the URB has already started, update the QH unlink time */
		uhci_get_current_frame_number(uhci);
		if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
			qh->unlink_frame = uhci->frame_number;
	}

	uhci_unlink_qh(uhci, qh);
1474 1475 1476

done:
	spin_unlock_irqrestore(&uhci->lock, flags);
1477
	return rc;
1478 1479
}

L
Linus Torvalds 已提交
1480
/*
1481
 * Finish unlinking an URB and give it back
L
Linus Torvalds 已提交
1482
 */
1483
static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
A
Alan Stern 已提交
1484
		struct urb *urb, int status)
1485 1486
__releases(uhci->lock)
__acquires(uhci->lock)
L
Linus Torvalds 已提交
1487
{
1488
	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
L
Linus Torvalds 已提交
1489

1490 1491 1492 1493 1494 1495 1496 1497 1498
	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {

		/* urb->actual_length < 0 means the setup transaction didn't
		 * complete successfully.  Either it failed or the URB was
		 * unlinked first.  Regardless, don't confuse people with a
		 * negative length. */
		urb->actual_length = max(urb->actual_length, 0);
	}

1499 1500
	/* When giving back the first URB in an Isochronous queue,
	 * reinitialize the QH's iso-related members for the next URB. */
1501
	else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
1502 1503 1504 1505 1506 1507 1508 1509
			urbp->node.prev == &qh->queue &&
			urbp->node.next != &qh->queue) {
		struct urb *nurb = list_entry(urbp->node.next,
				struct urb_priv, node)->urb;

		qh->iso_packet_desc = &nurb->iso_frame_desc[0];
		qh->iso_frame = nurb->start_frame;
	}
L
Linus Torvalds 已提交
1510

1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
	/* Take the URB off the QH's queue.  If the queue is now empty,
	 * this is a perfect time for a toggle fixup. */
	list_del_init(&urbp->node);
	if (list_empty(&qh->queue) && qh->needs_fixup) {
		usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
				usb_pipeout(urb->pipe), qh->initial_toggle);
		qh->needs_fixup = 0;
	}

	uhci_free_urb_priv(uhci, urbp);
1521
	usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
L
Linus Torvalds 已提交
1522

1523
	spin_unlock(&uhci->lock);
A
Alan Stern 已提交
1524
	usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
1525
	spin_lock(&uhci->lock);
L
Linus Torvalds 已提交
1526

1527 1528 1529 1530
	/* If the queue is now empty, we can unlink the QH and give up its
	 * reserved bandwidth. */
	if (list_empty(&qh->queue)) {
		uhci_unlink_qh(uhci, qh);
A
Alan Stern 已提交
1531 1532
		if (qh->bandwidth_reserved)
			uhci_release_bandwidth(uhci, qh);
1533
	}
1534
}
L
Linus Torvalds 已提交
1535

1536
/*
1537
 * Scan the URBs in a QH's queue
1538
 */
1539 1540 1541
#define QH_FINISHED_UNLINKING(qh)			\
		(qh->state == QH_STATE_UNLINKING &&	\
		uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
L
Linus Torvalds 已提交
1542

1543
static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
L
Linus Torvalds 已提交
1544 1545
{
	struct urb_priv *urbp;
1546 1547
	struct urb *urb;
	int status;
L
Linus Torvalds 已提交
1548

1549 1550 1551
	while (!list_empty(&qh->queue)) {
		urbp = list_entry(qh->queue.next, struct urb_priv, node);
		urb = urbp->urb;
L
Linus Torvalds 已提交
1552

1553
		if (qh->type == USB_ENDPOINT_XFER_ISOC)
1554
			status = uhci_result_isochronous(uhci, urb);
1555
		else
1556 1557 1558
			status = uhci_result_common(uhci, urb);
		if (status == -EINPROGRESS)
			break;
L
Linus Torvalds 已提交
1559

1560 1561
		/* Dequeued but completed URBs can't be given back unless
		 * the QH is stopped or has finished unlinking. */
A
Alan Stern 已提交
1562
		if (urb->unlinked) {
1563 1564 1565 1566 1567
			if (QH_FINISHED_UNLINKING(qh))
				qh->is_stopped = 1;
			else if (!qh->is_stopped)
				return;
		}
L
Linus Torvalds 已提交
1568

A
Alan Stern 已提交
1569
		uhci_giveback_urb(uhci, qh, urb, status);
1570
		if (status < 0)
1571 1572
			break;
	}
L
Linus Torvalds 已提交
1573

1574 1575
	/* If the QH is neither stopped nor finished unlinking (normal case),
	 * our work here is done. */
1576 1577 1578
	if (QH_FINISHED_UNLINKING(qh))
		qh->is_stopped = 1;
	else if (!qh->is_stopped)
1579
		return;
L
Linus Torvalds 已提交
1580

1581
	/* Otherwise give back each of the dequeued URBs */
1582
restart:
1583 1584
	list_for_each_entry(urbp, &qh->queue, node) {
		urb = urbp->urb;
A
Alan Stern 已提交
1585
		if (urb->unlinked) {
1586 1587 1588 1589 1590 1591 1592 1593

			/* Fix up the TD links and save the toggles for
			 * non-Isochronous queues.  For Isochronous queues,
			 * test for too-recent dequeues. */
			if (!uhci_cleanup_queue(uhci, qh, urb)) {
				qh->is_stopped = 0;
				return;
			}
A
Alan Stern 已提交
1594
			uhci_giveback_urb(uhci, qh, urb, 0);
1595 1596 1597 1598
			goto restart;
		}
	}
	qh->is_stopped = 0;
L
Linus Torvalds 已提交
1599

1600 1601 1602 1603 1604
	/* There are no more dequeued URBs.  If there are still URBs on the
	 * queue, the QH can now be re-activated. */
	if (!list_empty(&qh->queue)) {
		if (qh->needs_fixup)
			uhci_fixup_toggles(qh, 0);
A
Alan Stern 已提交
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616

		/* If the first URB on the queue wants FSBR but its time
		 * limit has expired, set the next TD to interrupt on
		 * completion before reactivating the QH. */
		urbp = list_entry(qh->queue.next, struct urb_priv, node);
		if (urbp->fsbr && qh->wait_expired) {
			struct uhci_td *td = list_entry(urbp->td_list.next,
					struct uhci_td, list);

			td->status |= __cpu_to_le32(TD_CTRL_IOC);
		}

1617
		uhci_activate_qh(uhci, qh);
L
Linus Torvalds 已提交
1618 1619
	}

1620 1621 1622 1623
	/* The queue is empty.  The QH can become idle if it is fully
	 * unlinked. */
	else if (QH_FINISHED_UNLINKING(qh))
		uhci_make_qh_idle(uhci, qh);
L
Linus Torvalds 已提交
1624 1625
}

A
Alan Stern 已提交
1626 1627 1628 1629
/*
 * Check for queues that have made some forward progress.
 * Returns 0 if the queue is not Isochronous, is ACTIVE, and
 * has not advanced since last examined; 1 otherwise.
1630 1631 1632 1633 1634
 *
 * Early Intel controllers have a bug which causes qh->element sometimes
 * not to advance when a TD completes successfully.  The queue remains
 * stuck on the inactive completed TD.  We detect such cases and advance
 * the element pointer by hand.
A
Alan Stern 已提交
1635 1636 1637 1638 1639 1640 1641 1642 1643
 */
static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
	struct urb_priv *urbp = NULL;
	struct uhci_td *td;
	int ret = 1;
	unsigned status;

	if (qh->type == USB_ENDPOINT_XFER_ISOC)
1644
		goto done;
A
Alan Stern 已提交
1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666

	/* Treat an UNLINKING queue as though it hasn't advanced.
	 * This is okay because reactivation will treat it as though
	 * it has advanced, and if it is going to become IDLE then
	 * this doesn't matter anyway.  Furthermore it's possible
	 * for an UNLINKING queue not to have any URBs at all, or
	 * for its first URB not to have any TDs (if it was dequeued
	 * just as it completed).  So it's not easy in any case to
	 * test whether such queues have advanced. */
	if (qh->state != QH_STATE_ACTIVE) {
		urbp = NULL;
		status = 0;

	} else {
		urbp = list_entry(qh->queue.next, struct urb_priv, node);
		td = list_entry(urbp->td_list.next, struct uhci_td, list);
		status = td_status(td);
		if (!(status & TD_CTRL_ACTIVE)) {

			/* We're okay, the queue has advanced */
			qh->wait_expired = 0;
			qh->advance_jiffies = jiffies;
1667
			goto done;
A
Alan Stern 已提交
1668 1669 1670 1671 1672
		}
		ret = 0;
	}

	/* The queue hasn't advanced; check for timeout */
1673 1674 1675 1676
	if (qh->wait_expired)
		goto done;

	if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
1677 1678

		/* Detect the Intel bug and work around it */
1679
		if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) {
1680 1681
			qh->element = qh->post_td->link;
			qh->advance_jiffies = jiffies;
1682 1683
			ret = 1;
			goto done;
1684 1685
		}

A
Alan Stern 已提交
1686 1687 1688 1689 1690 1691 1692 1693
		qh->wait_expired = 1;

		/* If the current URB wants FSBR, unlink it temporarily
		 * so that we can safely set the next TD to interrupt on
		 * completion.  That way we'll know as soon as the queue
		 * starts moving again. */
		if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
			uhci_unlink_qh(uhci, qh);
1694 1695 1696 1697 1698

	} else {
		/* Unmoving but not-yet-expired queues keep FSBR alive */
		if (urbp)
			uhci_urbp_wants_fsbr(uhci, urbp);
A
Alan Stern 已提交
1699
	}
1700 1701

done:
A
Alan Stern 已提交
1702 1703 1704
	return ret;
}

1705 1706 1707
/*
 * Process events in the schedule, but only in one thread at a time
 */
1708
static void uhci_scan_schedule(struct uhci_hcd *uhci)
L
Linus Torvalds 已提交
1709
{
1710 1711
	int i;
	struct uhci_qh *qh;
L
Linus Torvalds 已提交
1712 1713 1714 1715 1716 1717 1718

	/* Don't allow re-entrant calls */
	if (uhci->scan_in_progress) {
		uhci->need_rescan = 1;
		return;
	}
	uhci->scan_in_progress = 1;
A
Alan Stern 已提交
1719
rescan:
L
Linus Torvalds 已提交
1720
	uhci->need_rescan = 0;
1721
	uhci->fsbr_is_wanted = 0;
L
Linus Torvalds 已提交
1722

1723
	uhci_clear_next_interrupt(uhci);
L
Linus Torvalds 已提交
1724
	uhci_get_current_frame_number(uhci);
1725
	uhci->cur_iso_frame = uhci->frame_number;
L
Linus Torvalds 已提交
1726

1727 1728 1729 1730 1731 1732 1733
	/* Go through all the QH queues and process the URBs in each one */
	for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
		uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
				struct uhci_qh, node);
		while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
			uhci->next_qh = list_entry(qh->node.next,
					struct uhci_qh, node);
A
Alan Stern 已提交
1734 1735

			if (uhci_advance_check(uhci, qh)) {
1736
				uhci_scan_qh(uhci, qh);
1737 1738 1739 1740
				if (qh->state == QH_STATE_ACTIVE) {
					uhci_urbp_wants_fsbr(uhci,
	list_entry(qh->queue.next, struct urb_priv, node));
				}
A
Alan Stern 已提交
1741
			}
1742
		}
L
Linus Torvalds 已提交
1743 1744
	}

1745
	uhci->last_iso_frame = uhci->cur_iso_frame;
L
Linus Torvalds 已提交
1746 1747 1748 1749
	if (uhci->need_rescan)
		goto rescan;
	uhci->scan_in_progress = 0;

1750 1751 1752 1753 1754
	if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
			!uhci->fsbr_expiring) {
		uhci->fsbr_expiring = 1;
		mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
	}
A
Alan Stern 已提交
1755

1756
	if (list_empty(&uhci->skel_unlink_qh->node))
L
Linus Torvalds 已提交
1757 1758 1759 1760
		uhci_clear_next_interrupt(uhci);
	else
		uhci_set_next_interrupt(uhci);
}