urb.c 27.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
#include <linux/module.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/init.h>
A
Alan Stern 已提交
6
#include <linux/log2.h>
L
Linus Torvalds 已提交
7
#include <linux/usb.h>
O
Oliver Neukum 已提交
8
#include <linux/wait.h>
9
#include <linux/usb/hcd.h>
L
Linus Torvalds 已提交
10 11 12

#define to_urb(d) container_of(d, struct urb, kref)

O
Oliver Neukum 已提交
13

L
Linus Torvalds 已提交
14 15 16
static void urb_destroy(struct kref *kref)
{
	struct urb *urb = to_urb(kref);
O
Oliver Neukum 已提交
17

18 19 20
	if (urb->transfer_flags & URB_FREE_BUFFER)
		kfree(urb->transfer_buffer);

L
Linus Torvalds 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
	kfree(urb);
}

/**
 * usb_init_urb - initializes a urb so that it can be used by a USB driver
 * @urb: pointer to the urb to initialize
 *
 * Initializes a urb so that the USB subsystem can use it properly.
 *
 * If a urb is created with a call to usb_alloc_urb() it is not
 * necessary to call this function.  Only use this if you allocate the
 * space for a struct urb on your own.  If you call this function, be
 * careful when freeing the memory for your urb that it is no longer in
 * use by the USB core.
 *
 * Only use this function if you _really_ understand what you are doing.
 */
void usb_init_urb(struct urb *urb)
{
	if (urb) {
		memset(urb, 0, sizeof(*urb));
		kref_init(&urb->kref);
O
Oliver Neukum 已提交
43
		INIT_LIST_HEAD(&urb->anchor_list);
L
Linus Torvalds 已提交
44 45
	}
}
46
EXPORT_SYMBOL_GPL(usb_init_urb);
L
Linus Torvalds 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63

/**
 * usb_alloc_urb - creates a new urb for a USB driver to use
 * @iso_packets: number of iso packets for this urb
 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of
 *	valid options for this.
 *
 * Creates an urb for the USB driver to use, initializes a few internal
 * structures, incrementes the usage counter, and returns a pointer to it.
 *
 * If no memory is available, NULL is returned.
 *
 * If the driver want to use this urb for interrupt, control, or bulk
 * endpoints, pass '0' as the number of iso packets.
 *
 * The driver must call usb_free_urb() when it is finished with the urb.
 */
A
Al Viro 已提交
64
struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
L
Linus Torvalds 已提交
65 66 67
{
	struct urb *urb;

68
	urb = kmalloc(sizeof(struct urb) +
L
Linus Torvalds 已提交
69 70 71
		iso_packets * sizeof(struct usb_iso_packet_descriptor),
		mem_flags);
	if (!urb) {
72
		printk(KERN_ERR "alloc_urb: kmalloc failed\n");
L
Linus Torvalds 已提交
73 74 75 76 77
		return NULL;
	}
	usb_init_urb(urb);
	return urb;
}
78
EXPORT_SYMBOL_GPL(usb_alloc_urb);
L
Linus Torvalds 已提交
79 80 81 82 83 84 85 86

/**
 * usb_free_urb - frees the memory used by a urb when all users of it are finished
 * @urb: pointer to the urb to free, may be NULL
 *
 * Must be called when a user of a urb is finished with it.  When the last user
 * of the urb calls this function, the memory of the urb is freed.
 *
87 88
 * Note: The transfer buffer associated with the urb is not freed unless the
 * URB_FREE_BUFFER transfer flag is set.
L
Linus Torvalds 已提交
89 90 91 92 93 94
 */
void usb_free_urb(struct urb *urb)
{
	if (urb)
		kref_put(&urb->kref, urb_destroy);
}
95
EXPORT_SYMBOL_GPL(usb_free_urb);
L
Linus Torvalds 已提交
96 97 98 99 100 101 102 103 104 105 106

/**
 * usb_get_urb - increments the reference count of the urb
 * @urb: pointer to the urb to modify, may be NULL
 *
 * This must be  called whenever a urb is transferred from a device driver to a
 * host controller driver.  This allows proper reference counting to happen
 * for urbs.
 *
 * A pointer to the urb with the incremented reference counter is returned.
 */
107
struct urb *usb_get_urb(struct urb *urb)
L
Linus Torvalds 已提交
108 109 110 111 112
{
	if (urb)
		kref_get(&urb->kref);
	return urb;
}
113
EXPORT_SYMBOL_GPL(usb_get_urb);
O
Oliver Neukum 已提交
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130

/**
 * usb_anchor_urb - anchors an URB while it is processed
 * @urb: pointer to the urb to anchor
 * @anchor: pointer to the anchor
 *
 * This can be called to have access to URBs which are to be executed
 * without bothering to track them
 */
void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
{
	unsigned long flags;

	spin_lock_irqsave(&anchor->lock, flags);
	usb_get_urb(urb);
	list_add_tail(&urb->anchor_list, &anchor->urb_list);
	urb->anchor = anchor;
O
Oliver Neukum 已提交
131 132

	if (unlikely(anchor->poisoned)) {
133
		atomic_inc(&urb->reject);
O
Oliver Neukum 已提交
134 135
	}

O
Oliver Neukum 已提交
136 137 138 139
	spin_unlock_irqrestore(&anchor->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_anchor_urb);

140 141 142 143 144 145 146 147 148 149
/* Callers must hold anchor->lock */
static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
{
	urb->anchor = NULL;
	list_del(&urb->anchor_list);
	usb_put_urb(urb);
	if (list_empty(&anchor->urb_list))
		wake_up(&anchor->wait);
}

O
Oliver Neukum 已提交
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
/**
 * usb_unanchor_urb - unanchors an URB
 * @urb: pointer to the urb to anchor
 *
 * Call this to stop the system keeping track of this URB
 */
void usb_unanchor_urb(struct urb *urb)
{
	unsigned long flags;
	struct usb_anchor *anchor;

	if (!urb)
		return;

	anchor = urb->anchor;
	if (!anchor)
		return;

	spin_lock_irqsave(&anchor->lock, flags);
169 170 171 172 173 174 175
	/*
	 * At this point, we could be competing with another thread which
	 * has the same intention. To protect the urb from being unanchored
	 * twice, only the winner of the race gets the job.
	 */
	if (likely(anchor == urb->anchor))
		__usb_unanchor_urb(urb, anchor);
O
Oliver Neukum 已提交
176 177 178 179
	spin_unlock_irqrestore(&anchor->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_unanchor_urb);

L
Linus Torvalds 已提交
180 181 182 183 184 185 186 187 188 189 190 191
/*-------------------------------------------------------------------*/

/**
 * usb_submit_urb - issue an asynchronous transfer request for an endpoint
 * @urb: pointer to the urb describing the request
 * @mem_flags: the type of memory to allocate, see kmalloc() for a list
 *	of valid options for this.
 *
 * This submits a transfer request, and transfers control of the URB
 * describing that request to the USB subsystem.  Request completion will
 * be indicated later, asynchronously, by calling the completion handler.
 * The three types of completion are success, error, and unlink
192
 * (a software-induced fault, also called "request cancellation").
L
Linus Torvalds 已提交
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
 *
 * URBs may be submitted in interrupt context.
 *
 * The caller must have correctly initialized the URB before submitting
 * it.  Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
 * available to ensure that most fields are correctly initialized, for
 * the particular kind of transfer, although they will not initialize
 * any transfer flags.
 *
 * Successful submissions return 0; otherwise this routine returns a
 * negative error number.  If the submission is successful, the complete()
 * callback from the URB will be called exactly once, when the USB core and
 * Host Controller Driver (HCD) are finished with the URB.  When the completion
 * function is called, control of the URB is returned to the device
 * driver which issued the request.  The completion handler may then
 * immediately free or reuse that URB.
 *
 * With few exceptions, USB device drivers should never access URB fields
 * provided by usbcore or the HCD until its complete() is called.
 * The exceptions relate to periodic transfer scheduling.  For both
 * interrupt and isochronous urbs, as part of successful URB submission
 * urb->interval is modified to reflect the actual transfer period used
 * (normally some power of two units).  And for isochronous urbs,
 * urb->start_frame is modified to reflect when the URB's transfers were
 * scheduled to start.  Not all isochronous transfer scheduling policies
 * will work, but most host controller drivers should easily handle ISO
 * queues going from now until 10-200 msec into the future.
 *
 * For control endpoints, the synchronous usb_control_msg() call is
 * often used (in non-interrupt context) instead of this call.
 * That is often used through convenience wrappers, for the requests
 * that are standardized in the USB 2.0 specification.  For bulk
 * endpoints, a synchronous usb_bulk_msg() call is available.
 *
 * Request Queuing:
 *
 * URBs may be submitted to endpoints before previous ones complete, to
 * minimize the impact of interrupt latencies and system overhead on data
 * throughput.  With that queuing policy, an endpoint's queue would never
 * be empty.  This is required for continuous isochronous data streams,
 * and may also be required for some kinds of interrupt transfers. Such
 * queuing also maximizes bandwidth utilization by letting USB controllers
 * start work on later requests before driver software has finished the
 * completion processing for earlier (successful) requests.
 *
 * As of Linux 2.6, all USB endpoint transfer queues support depths greater
 * than one.  This was previously a HCD-specific behavior, except for ISO
 * transfers.  Non-isochronous endpoint queues are inactive during cleanup
241
 * after faults (transfer errors or cancellation).
L
Linus Torvalds 已提交
242 243 244 245 246 247 248 249 250
 *
 * Reserved Bandwidth Transfers:
 *
 * Periodic transfers (interrupt or isochronous) are performed repeatedly,
 * using the interval specified in the urb.  Submitting the first urb to
 * the endpoint reserves the bandwidth necessary to make those transfers.
 * If the USB subsystem can't allocate sufficient bandwidth to perform
 * the periodic request, submitting such a periodic request should fail.
 *
251 252 253 254 255 256
 * For devices under xHCI, the bandwidth is reserved at configuration time, or
 * when the alt setting is selected.  If there is not enough bus bandwidth, the
 * configuration/alt setting request will fail.  Therefore, submissions to
 * periodic endpoints on devices under xHCI should never fail due to bandwidth
 * constraints.
 *
L
Linus Torvalds 已提交
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
 * Device drivers must explicitly request that repetition, by ensuring that
 * some URB is always on the endpoint's queue (except possibly for short
 * periods during completion callacks).  When there is no longer an urb
 * queued, the endpoint's bandwidth reservation is canceled.  This means
 * drivers can use their completion handlers to ensure they keep bandwidth
 * they need, by reinitializing and resubmitting the just-completed urb
 * until the driver longer needs that periodic bandwidth.
 *
 * Memory Flags:
 *
 * The general rules for how to decide which mem_flags to use
 * are the same as for kmalloc.  There are four
 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
 * GFP_ATOMIC.
 *
 * GFP_NOFS is not ever used, as it has not been implemented yet.
 *
 * GFP_ATOMIC is used when
 *   (a) you are inside a completion handler, an interrupt, bottom half,
 *       tasklet or timer, or
 *   (b) you are holding a spinlock or rwlock (does not apply to
 *       semaphores), or
 *   (c) current->state != TASK_RUNNING, this is the case only after
 *       you've changed it.
281
 *
L
Linus Torvalds 已提交
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
 * GFP_NOIO is used in the block io path and error handling of storage
 * devices.
 *
 * All other situations use GFP_KERNEL.
 *
 * Some more specific rules for mem_flags can be inferred, such as
 *  (1) start_xmit, timeout, and receive methods of network drivers must
 *      use GFP_ATOMIC (they are called with a spinlock held);
 *  (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
 *      called with a spinlock held);
 *  (3) If you use a kernel thread with a network driver you must use
 *      GFP_NOIO, unless (b) or (c) apply;
 *  (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
 *      apply or your are in a storage driver's block io path;
 *  (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
 *  (6) changing firmware on a running storage or net device uses
 *      GFP_NOIO, unless b) or c) apply
 *
 */
A
Al Viro 已提交
301
int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
L
Linus Torvalds 已提交
302
{
A
Alan Stern 已提交
303 304 305 306
	int				xfertype, max;
	struct usb_device		*dev;
	struct usb_host_endpoint	*ep;
	int				is_out;
L
Linus Torvalds 已提交
307 308 309

	if (!urb || urb->hcpriv || !urb->complete)
		return -EINVAL;
310
	dev = urb->dev;
311
	if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
L
Linus Torvalds 已提交
312 313
		return -ENODEV;

A
Alan Stern 已提交
314 315 316 317
	/* For now, get the endpoint from the pipe.  Eventually drivers
	 * will be required to set urb->ep directly and we will eliminate
	 * urb->pipe.
	 */
318
	ep = usb_pipe_endpoint(dev, urb->pipe);
A
Alan Stern 已提交
319 320 321 322
	if (!ep)
		return -ENOENT;

	urb->ep = ep;
L
Linus Torvalds 已提交
323 324 325 326 327 328
	urb->status = -EINPROGRESS;
	urb->actual_length = 0;

	/* Lots of sanity checks, so HCDs can rely on clean data
	 * and don't need to duplicate tests
	 */
A
Alan Stern 已提交
329
	xfertype = usb_endpoint_type(&ep->desc);
330 331 332 333 334 335 336 337 338 339 340 341
	if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
		struct usb_ctrlrequest *setup =
				(struct usb_ctrlrequest *) urb->setup_packet;

		if (!setup)
			return -ENOEXEC;
		is_out = !(setup->bRequestType & USB_DIR_IN) ||
				!setup->wLength;
	} else {
		is_out = usb_endpoint_dir_out(&ep->desc);
	}

342 343 344 345 346 347
	/* Clear the internal flags and cache the direction for later use */
	urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
			URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
			URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
			URB_DMA_SG_COMBINED);
	urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
L
Linus Torvalds 已提交
348

A
Alan Stern 已提交
349 350
	if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
			dev->state < USB_STATE_CONFIGURED)
L
Linus Torvalds 已提交
351 352
		return -ENODEV;

A
Alan Stern 已提交
353
	max = le16_to_cpu(ep->desc.wMaxPacketSize);
L
Linus Torvalds 已提交
354 355 356
	if (max <= 0) {
		dev_dbg(&dev->dev,
			"bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
A
Alan Stern 已提交
357
			usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
358
			__func__, max);
L
Linus Torvalds 已提交
359 360 361 362 363 364 365
		return -EMSGSIZE;
	}

	/* periodic transfers limit size per frame/uframe,
	 * but drivers only control those sizes for ISO.
	 * while we're checking, initialize return status.
	 */
A
Alan Stern 已提交
366
	if (xfertype == USB_ENDPOINT_XFER_ISOC) {
L
Linus Torvalds 已提交
367 368
		int	n, len;

369 370 371 372 373 374 375 376 377 378
		/* SuperSpeed isoc endpoints have up to 16 bursts of up to
		 * 3 packets each
		 */
		if (dev->speed == USB_SPEED_SUPER) {
			int     burst = 1 + ep->ss_ep_comp.bMaxBurst;
			int     mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
			max *= burst;
			max *= mult;
		}

L
Linus Torvalds 已提交
379 380 381 382 383 384 385
		/* "high bandwidth" mode, 1-3 packets/uframe? */
		if (dev->speed == USB_SPEED_HIGH) {
			int	mult = 1 + ((max >> 11) & 0x03);
			max &= 0x07ff;
			max *= mult;
		}

386
		if (urb->number_of_packets <= 0)
L
Linus Torvalds 已提交
387 388
			return -EINVAL;
		for (n = 0; n < urb->number_of_packets; n++) {
389
			len = urb->iso_frame_desc[n].length;
390
			if (len < 0 || len > max)
L
Linus Torvalds 已提交
391
				return -EMSGSIZE;
392 393
			urb->iso_frame_desc[n].status = -EXDEV;
			urb->iso_frame_desc[n].actual_length = 0;
L
Linus Torvalds 已提交
394 395 396 397
		}
	}

	/* the I/O buffer must be mapped/unmapped, except when length=0 */
398
	if (urb->transfer_buffer_length > INT_MAX)
L
Linus Torvalds 已提交
399 400 401 402 403 404 405 406 407
		return -EMSGSIZE;

#ifdef DEBUG
	/* stuff that drivers shouldn't do, but which shouldn't
	 * cause problems in HCDs if they get it wrong.
	 */
	{
	unsigned int	orig_flags = urb->transfer_flags;
	unsigned int	allowed;
408 409 410 411 412
	static int pipetypes[4] = {
		PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
	};

	/* Check that the pipe's type matches the endpoint's type */
413 414 415
	if (usb_pipetype(urb->pipe) != pipetypes[xfertype]) {
		dev_err(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
			usb_pipetype(urb->pipe), pipetypes[xfertype]);
416
		return -EPIPE;		/* The most suitable error code :-) */
417
	}
L
Linus Torvalds 已提交
418 419

	/* enforce simple/standard policy */
420 421
	allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
			URB_FREE_BUFFER);
A
Alan Stern 已提交
422 423
	switch (xfertype) {
	case USB_ENDPOINT_XFER_BULK:
L
Linus Torvalds 已提交
424 425 426
		if (is_out)
			allowed |= URB_ZERO_PACKET;
		/* FALLTHROUGH */
A
Alan Stern 已提交
427
	case USB_ENDPOINT_XFER_CONTROL:
L
Linus Torvalds 已提交
428 429 430 431 432 433
		allowed |= URB_NO_FSBR;	/* only affects UHCI */
		/* FALLTHROUGH */
	default:			/* all non-iso endpoints */
		if (!is_out)
			allowed |= URB_SHORT_NOT_OK;
		break;
A
Alan Stern 已提交
434
	case USB_ENDPOINT_XFER_ISOC:
L
Linus Torvalds 已提交
435 436 437 438 439 440 441
		allowed |= URB_ISO_ASAP;
		break;
	}
	urb->transfer_flags &= allowed;

	/* fail if submitter gave bogus flags */
	if (urb->transfer_flags != orig_flags) {
442
		dev_err(&dev->dev, "BOGUS urb flags, %x --> %x\n",
L
Linus Torvalds 已提交
443 444 445 446 447 448 449 450 451 452 453 454 455
			orig_flags, urb->transfer_flags);
		return -EINVAL;
	}
	}
#endif
	/*
	 * Force periodic transfer intervals to be legal values that are
	 * a power of two (so HCDs don't need to).
	 *
	 * FIXME want bus->{intr,iso}_sched_horizon values here.  Each HC
	 * supports different values... this uses EHCI/UHCI defaults (and
	 * EHCI can use smaller non-default values).
	 */
A
Alan Stern 已提交
456 457 458
	switch (xfertype) {
	case USB_ENDPOINT_XFER_ISOC:
	case USB_ENDPOINT_XFER_INT:
L
Linus Torvalds 已提交
459
		/* too small? */
460
		switch (dev->speed) {
461
		case USB_SPEED_WIRELESS:
462 463 464 465 466 467 468 469
			if (urb->interval < 6)
				return -EINVAL;
			break;
		default:
			if (urb->interval <= 0)
				return -EINVAL;
			break;
		}
L
Linus Torvalds 已提交
470 471
		/* too big? */
		switch (dev->speed) {
472 473 474 475 476
		case USB_SPEED_SUPER:	/* units are 125us */
			/* Handle up to 2^(16-1) microframes */
			if (urb->interval > (1 << 15))
				return -EINVAL;
			max = 1 << 15;
477
			break;
478
		case USB_SPEED_WIRELESS:
479 480 481
			if (urb->interval > 16)
				return -EINVAL;
			break;
L
Linus Torvalds 已提交
482
		case USB_SPEED_HIGH:	/* units are microframes */
483
			/* NOTE usb handles 2^15 */
L
Linus Torvalds 已提交
484 485
			if (urb->interval > (1024 * 8))
				urb->interval = 1024 * 8;
A
Alan Stern 已提交
486
			max = 1024 * 8;
L
Linus Torvalds 已提交
487 488 489
			break;
		case USB_SPEED_FULL:	/* units are frames/msec */
		case USB_SPEED_LOW:
A
Alan Stern 已提交
490
			if (xfertype == USB_ENDPOINT_XFER_INT) {
L
Linus Torvalds 已提交
491 492
				if (urb->interval > 255)
					return -EINVAL;
493
				/* NOTE ohci only handles up to 32 */
A
Alan Stern 已提交
494
				max = 128;
L
Linus Torvalds 已提交
495 496 497
			} else {
				if (urb->interval > 1024)
					urb->interval = 1024;
498
				/* NOTE usb and ohci handle up to 2^15 */
A
Alan Stern 已提交
499
				max = 1024;
L
Linus Torvalds 已提交
500 501 502 503 504
			}
			break;
		default:
			return -EINVAL;
		}
505
		if (dev->speed != USB_SPEED_WIRELESS) {
506 507 508
			/* Round down to a power of 2, no more than max */
			urb->interval = min(max, 1 << ilog2(urb->interval));
		}
L
Linus Torvalds 已提交
509 510
	}

511
	return usb_hcd_submit_urb(urb, mem_flags);
L
Linus Torvalds 已提交
512
}
513
EXPORT_SYMBOL_GPL(usb_submit_urb);
L
Linus Torvalds 已提交
514 515 516 517 518 519 520 521

/*-------------------------------------------------------------------*/

/**
 * usb_unlink_urb - abort/cancel a transfer request for an endpoint
 * @urb: pointer to urb describing a previously submitted request,
 *	may be NULL
 *
522 523 524 525 526 527 528
 * This routine cancels an in-progress request.  URBs complete only once
 * per submission, and may be canceled only once per submission.
 * Successful cancellation means termination of @urb will be expedited
 * and the completion handler will be called with a status code
 * indicating that the request has been canceled (rather than any other
 * code).
 *
529 530 531 532 533 534
 * Drivers should not call this routine or related routines, such as
 * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect
 * method has returned.  The disconnect function should synchronize with
 * a driver's I/O routines to insure that all URB-related activity has
 * completed before it returns.
 *
535 536 537 538 539 540 541 542
 * This request is always asynchronous.  Success is indicated by
 * returning -EINPROGRESS, at which time the URB will probably not yet
 * have been given back to the device driver.  When it is eventually
 * called, the completion function will see @urb->status == -ECONNRESET.
 * Failure is indicated by usb_unlink_urb() returning any other value.
 * Unlinking will fail when @urb is not currently "linked" (i.e., it was
 * never submitted, or it was unlinked before, or the hardware is already
 * finished with it), even if the completion handler has not yet run.
L
Linus Torvalds 已提交
543 544 545
 *
 * Unlinking and Endpoint Queues:
 *
546 547 548
 * [The behaviors and guarantees described below do not apply to virtual
 * root hubs but only to endpoint queues for physical USB devices.]
 *
L
Linus Torvalds 已提交
549 550
 * Host Controller Drivers (HCDs) place all the URBs for a particular
 * endpoint in a queue.  Normally the queue advances as the controller
551
 * hardware processes each request.  But when an URB terminates with an
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
 * error its queue generally stops (see below), at least until that URB's
 * completion routine returns.  It is guaranteed that a stopped queue
 * will not restart until all its unlinked URBs have been fully retired,
 * with their completion routines run, even if that's not until some time
 * after the original completion handler returns.  The same behavior and
 * guarantee apply when an URB terminates because it was unlinked.
 *
 * Bulk and interrupt endpoint queues are guaranteed to stop whenever an
 * URB terminates with any sort of error, including -ECONNRESET, -ENOENT,
 * and -EREMOTEIO.  Control endpoint queues behave the same way except
 * that they are not guaranteed to stop for -EREMOTEIO errors.  Queues
 * for isochronous endpoints are treated differently, because they must
 * advance at fixed rates.  Such queues do not stop when an URB
 * encounters an error or is unlinked.  An unlinked isochronous URB may
 * leave a gap in the stream of packets; it is undefined whether such
 * gaps can be filled in.
 *
 * Note that early termination of an URB because a short packet was
 * received will generate a -EREMOTEIO error if and only if the
 * URB_SHORT_NOT_OK flag is set.  By setting this flag, USB device
 * drivers can build deep queues for large or complex bulk transfers
 * and clean them up reliably after any sort of aborted transfer by
 * unlinking all pending URBs at the first fault.
 *
 * When a control URB terminates with an error other than -EREMOTEIO, it
 * is quite likely that the status stage of the transfer will not take
 * place.
L
Linus Torvalds 已提交
579 580 581 582 583
 */
int usb_unlink_urb(struct urb *urb)
{
	if (!urb)
		return -EINVAL;
A
Alan Stern 已提交
584
	if (!urb->dev)
L
Linus Torvalds 已提交
585
		return -ENODEV;
A
Alan Stern 已提交
586 587
	if (!urb->ep)
		return -EIDRM;
A
Alan Stern 已提交
588
	return usb_hcd_unlink_urb(urb, -ECONNRESET);
L
Linus Torvalds 已提交
589
}
590
EXPORT_SYMBOL_GPL(usb_unlink_urb);
L
Linus Torvalds 已提交
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610

/**
 * usb_kill_urb - cancel a transfer request and wait for it to finish
 * @urb: pointer to URB describing a previously submitted request,
 *	may be NULL
 *
 * This routine cancels an in-progress request.  It is guaranteed that
 * upon return all completion handlers will have finished and the URB
 * will be totally idle and available for reuse.  These features make
 * this an ideal way to stop I/O in a disconnect() callback or close()
 * function.  If the request has not already finished or been unlinked
 * the completion handler will see urb->status == -ENOENT.
 *
 * While the routine is running, attempts to resubmit the URB will fail
 * with error -EPERM.  Thus even if the URB's completion handler always
 * tries to resubmit, it will not succeed and the URB will become idle.
 *
 * This routine may not be used in an interrupt context (such as a bottom
 * half or a completion handler), or when holding a spinlock, or in other
 * situations where the caller can't schedule().
611 612 613
 *
 * This routine should not be called by a driver after its disconnect
 * method has returned.
L
Linus Torvalds 已提交
614 615 616
 */
void usb_kill_urb(struct urb *urb)
{
617
	might_sleep();
A
Alan Stern 已提交
618
	if (!(urb && urb->dev && urb->ep))
L
Linus Torvalds 已提交
619
		return;
620
	atomic_inc(&urb->reject);
L
Linus Torvalds 已提交
621

A
Alan Stern 已提交
622
	usb_hcd_unlink_urb(urb, -ENOENT);
L
Linus Torvalds 已提交
623 624
	wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);

625
	atomic_dec(&urb->reject);
L
Linus Torvalds 已提交
626
}
627
EXPORT_SYMBOL_GPL(usb_kill_urb);
L
Linus Torvalds 已提交
628

O
Oliver Neukum 已提交
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
/**
 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB
 * @urb: pointer to URB describing a previously submitted request,
 *	may be NULL
 *
 * This routine cancels an in-progress request.  It is guaranteed that
 * upon return all completion handlers will have finished and the URB
 * will be totally idle and cannot be reused.  These features make
 * this an ideal way to stop I/O in a disconnect() callback.
 * If the request has not already finished or been unlinked
 * the completion handler will see urb->status == -ENOENT.
 *
 * After and while the routine runs, attempts to resubmit the URB will fail
 * with error -EPERM.  Thus even if the URB's completion handler always
 * tries to resubmit, it will not succeed and the URB will become idle.
 *
 * This routine may not be used in an interrupt context (such as a bottom
 * half or a completion handler), or when holding a spinlock, or in other
 * situations where the caller can't schedule().
648 649 650
 *
 * This routine should not be called by a driver after its disconnect
 * method has returned.
O
Oliver Neukum 已提交
651 652 653 654 655 656
 */
void usb_poison_urb(struct urb *urb)
{
	might_sleep();
	if (!(urb && urb->dev && urb->ep))
		return;
657
	atomic_inc(&urb->reject);
O
Oliver Neukum 已提交
658 659 660 661 662 663 664 665 666 667 668

	usb_hcd_unlink_urb(urb, -ENOENT);
	wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
}
EXPORT_SYMBOL_GPL(usb_poison_urb);

void usb_unpoison_urb(struct urb *urb)
{
	if (!urb)
		return;

669
	atomic_dec(&urb->reject);
O
Oliver Neukum 已提交
670 671 672
}
EXPORT_SYMBOL_GPL(usb_unpoison_urb);

O
Oliver Neukum 已提交
673 674 675 676 677 678
/**
 * usb_kill_anchored_urbs - cancel transfer requests en masse
 * @anchor: anchor the requests are bound to
 *
 * this allows all outstanding URBs to be killed starting
 * from the back of the queue
679 680 681
 *
 * This routine should not be called by a driver after its disconnect
 * method has returned.
O
Oliver Neukum 已提交
682 683 684 685 686 687 688
 */
void usb_kill_anchored_urbs(struct usb_anchor *anchor)
{
	struct urb *victim;

	spin_lock_irq(&anchor->lock);
	while (!list_empty(&anchor->urb_list)) {
689 690
		victim = list_entry(anchor->urb_list.prev, struct urb,
				    anchor_list);
O
Oliver Neukum 已提交
691 692 693 694 695 696 697 698 699 700 701 702
		/* we must make sure the URB isn't freed before we kill it*/
		usb_get_urb(victim);
		spin_unlock_irq(&anchor->lock);
		/* this will unanchor the URB */
		usb_kill_urb(victim);
		usb_put_urb(victim);
		spin_lock_irq(&anchor->lock);
	}
	spin_unlock_irq(&anchor->lock);
}
EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);

O
Oliver Neukum 已提交
703 704 705 706 707 708 709 710

/**
 * usb_poison_anchored_urbs - cease all traffic from an anchor
 * @anchor: anchor the requests are bound to
 *
 * this allows all outstanding URBs to be poisoned starting
 * from the back of the queue. Newly added URBs will also be
 * poisoned
711 712 713
 *
 * This routine should not be called by a driver after its disconnect
 * method has returned.
O
Oliver Neukum 已提交
714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
 */
void usb_poison_anchored_urbs(struct usb_anchor *anchor)
{
	struct urb *victim;

	spin_lock_irq(&anchor->lock);
	anchor->poisoned = 1;
	while (!list_empty(&anchor->urb_list)) {
		victim = list_entry(anchor->urb_list.prev, struct urb,
				    anchor_list);
		/* we must make sure the URB isn't freed before we kill it*/
		usb_get_urb(victim);
		spin_unlock_irq(&anchor->lock);
		/* this will unanchor the URB */
		usb_poison_urb(victim);
		usb_put_urb(victim);
		spin_lock_irq(&anchor->lock);
	}
	spin_unlock_irq(&anchor->lock);
}
EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
735

736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
/**
 * usb_unpoison_anchored_urbs - let an anchor be used successfully again
 * @anchor: anchor the requests are bound to
 *
 * Reverses the effect of usb_poison_anchored_urbs
 * the anchor can be used normally after it returns
 */
void usb_unpoison_anchored_urbs(struct usb_anchor *anchor)
{
	unsigned long flags;
	struct urb *lazarus;

	spin_lock_irqsave(&anchor->lock, flags);
	list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) {
		usb_unpoison_urb(lazarus);
	}
	anchor->poisoned = 0;
	spin_unlock_irqrestore(&anchor->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
756 757 758 759 760 761 762 763
/**
 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
 * @anchor: anchor the requests are bound to
 *
 * this allows all outstanding URBs to be unlinked starting
 * from the back of the queue. This function is asynchronous.
 * The unlinking is just tiggered. It may happen after this
 * function has returned.
764 765 766
 *
 * This routine should not be called by a driver after its disconnect
 * method has returned.
767 768 769 770 771
 */
void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
{
	struct urb *victim;

772
	while ((victim = usb_get_from_anchor(anchor)) != NULL) {
773
		usb_unlink_urb(victim);
774
		usb_put_urb(victim);
775 776 777 778
	}
}
EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);

O
Oliver Neukum 已提交
779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
/**
 * usb_wait_anchor_empty_timeout - wait for an anchor to be unused
 * @anchor: the anchor you want to become unused
 * @timeout: how long you are willing to wait in milliseconds
 *
 * Call this is you want to be sure all an anchor's
 * URBs have finished
 */
int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
				  unsigned int timeout)
{
	return wait_event_timeout(anchor->wait, list_empty(&anchor->urb_list),
				  msecs_to_jiffies(timeout));
}
EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811

/**
 * usb_get_from_anchor - get an anchor's oldest urb
 * @anchor: the anchor whose urb you want
 *
 * this will take the oldest urb from an anchor,
 * unanchor and return it
 */
struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
{
	struct urb *victim;
	unsigned long flags;

	spin_lock_irqsave(&anchor->lock, flags);
	if (!list_empty(&anchor->urb_list)) {
		victim = list_entry(anchor->urb_list.next, struct urb,
				    anchor_list);
		usb_get_urb(victim);
812
		__usb_unanchor_urb(victim, anchor);
813 814 815
	} else {
		victim = NULL;
	}
816
	spin_unlock_irqrestore(&anchor->lock, flags);
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837

	return victim;
}

EXPORT_SYMBOL_GPL(usb_get_from_anchor);

/**
 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs
 * @anchor: the anchor whose urbs you want to unanchor
 *
 * use this to get rid of all an anchor's urbs
 */
void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
{
	struct urb *victim;
	unsigned long flags;

	spin_lock_irqsave(&anchor->lock, flags);
	while (!list_empty(&anchor->urb_list)) {
		victim = list_entry(anchor->urb_list.prev, struct urb,
				    anchor_list);
838
		__usb_unanchor_urb(victim, anchor);
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
	}
	spin_unlock_irqrestore(&anchor->lock, flags);
}

EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);

/**
 * usb_anchor_empty - is an anchor empty
 * @anchor: the anchor you want to query
 *
 * returns 1 if the anchor has no urbs associated with it
 */
int usb_anchor_empty(struct usb_anchor *anchor)
{
	return list_empty(&anchor->urb_list);
}

EXPORT_SYMBOL_GPL(usb_anchor_empty);