dd.c 20.0 KB
Newer Older
1
/*
2
 * drivers/base/dd.c - The core device/driver interactions.
3
 *
4 5 6
 * This file contains the (sometimes tricky) code that controls the
 * interactions between devices and drivers, which primarily includes
 * driver binding and unbinding.
7
 *
8 9 10
 * All of this code used to exist in drivers/base/bus.c, but was
 * relocated to here in the name of compartmentalization (since it wasn't
 * strictly code just for the 'struct bus_type'.
11
 *
12 13
 * Copyright (c) 2002-5 Patrick Mochel
 * Copyright (c) 2002-3 Open Source Development Labs
14 15
 * Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de>
 * Copyright (c) 2007-2009 Novell Inc.
16
 *
17
 * This file is released under the GPLv2
18 19 20
 */

#include <linux/device.h>
21
#include <linux/delay.h>
22
#include <linux/module.h>
23
#include <linux/kthread.h>
24
#include <linux/wait.h>
25
#include <linux/async.h>
26
#include <linux/pm_runtime.h>
27
#include <linux/pinctrl/devinfo.h>
28 29 30 31

#include "base.h"
#include "power/power.h"

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
/*
 * Deferred Probe infrastructure.
 *
 * Sometimes driver probe order matters, but the kernel doesn't always have
 * dependency information which means some drivers will get probed before a
 * resource it depends on is available.  For example, an SDHCI driver may
 * first need a GPIO line from an i2c GPIO controller before it can be
 * initialized.  If a required resource is not available yet, a driver can
 * request probing to be deferred by returning -EPROBE_DEFER from its probe hook
 *
 * Deferred probe maintains two lists of devices, a pending list and an active
 * list.  A driver returning -EPROBE_DEFER causes the device to be added to the
 * pending list.  A successful driver probe will trigger moving all devices
 * from the pending to the active list so that the workqueue will eventually
 * retry them.
 *
 * The deferred_probe_mutex must be held any time the deferred_probe_*_list
49
 * of the (struct device*)->p->deferred_probe pointers are manipulated
50 51 52 53 54
 */
static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static struct workqueue_struct *deferred_wq;
55
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
56

57
/*
58 59 60 61 62
 * deferred_probe_work_func() - Retry probing devices in the active list.
 */
static void deferred_probe_work_func(struct work_struct *work)
{
	struct device *dev;
63
	struct device_private *private;
64 65 66 67 68 69 70 71 72 73 74 75 76 77
	/*
	 * This block processes every device in the deferred 'active' list.
	 * Each device is removed from the active list and passed to
	 * bus_probe_device() to re-attempt the probe.  The loop continues
	 * until every device in the active list is removed and retried.
	 *
	 * Note: Once the device is removed from the list and the mutex is
	 * released, it is possible for the device get freed by another thread
	 * and cause a illegal pointer dereference.  This code uses
	 * get/put_device() to ensure the device structure cannot disappear
	 * from under our feet.
	 */
	mutex_lock(&deferred_probe_mutex);
	while (!list_empty(&deferred_probe_active_list)) {
78 79 80 81
		private = list_first_entry(&deferred_probe_active_list,
					typeof(*dev->p), deferred_probe);
		dev = private->device;
		list_del_init(&private->deferred_probe);
82 83 84

		get_device(dev);

85 86 87 88
		/*
		 * Drop the mutex while probing each device; the probe path may
		 * manipulate the deferred list
		 */
89
		mutex_unlock(&deferred_probe_mutex);
90 91 92 93 94 95 96 97 98 99 100

		/*
		 * Force the device to the end of the dpm_list since
		 * the PM code assumes that the order we add things to
		 * the list is a good order for suspend but deferred
		 * probe makes that very unsafe.
		 */
		device_pm_lock();
		device_pm_move_last(dev);
		device_pm_unlock();

101 102
		dev_dbg(dev, "Retrying from deferred list\n");
		bus_probe_device(dev);
103

104 105 106 107 108 109 110 111 112 113 114
		mutex_lock(&deferred_probe_mutex);

		put_device(dev);
	}
	mutex_unlock(&deferred_probe_mutex);
}
static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);

static void driver_deferred_probe_add(struct device *dev)
{
	mutex_lock(&deferred_probe_mutex);
115
	if (list_empty(&dev->p->deferred_probe)) {
116
		dev_dbg(dev, "Added to deferred list\n");
117
		list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
118 119 120 121 122 123 124
	}
	mutex_unlock(&deferred_probe_mutex);
}

void driver_deferred_probe_del(struct device *dev)
{
	mutex_lock(&deferred_probe_mutex);
125
	if (!list_empty(&dev->p->deferred_probe)) {
126
		dev_dbg(dev, "Removed from deferred list\n");
127
		list_del_init(&dev->p->deferred_probe);
128 129 130 131 132 133 134 135 136 137 138
	}
	mutex_unlock(&deferred_probe_mutex);
}

static bool driver_deferred_probe_enable = false;
/**
 * driver_deferred_probe_trigger() - Kick off re-probing deferred devices
 *
 * This functions moves all devices from the pending list to the active
 * list and schedules the deferred probe workqueue to process them.  It
 * should be called anytime a driver is successfully bound to a device.
139 140 141 142 143
 *
 * Note, there is a race condition in multi-threaded probe. In the case where
 * more than one device is probing at the same time, it is possible for one
 * probe to complete successfully while another is about to defer. If the second
 * depends on the first, then it will get put on the pending list after the
144
 * trigger event has already occurred and will be stuck there.
145 146 147 148 149
 *
 * The atomic 'deferred_trigger_count' is used to determine if a successful
 * trigger has occurred in the midst of probing a driver. If the trigger count
 * changes in the midst of a probe, then deferred processing should be triggered
 * again.
150 151 152 153 154 155
 */
static void driver_deferred_probe_trigger(void)
{
	if (!driver_deferred_probe_enable)
		return;

156 157
	/*
	 * A successful probe means that all the devices in the pending list
158
	 * should be triggered to be reprobed.  Move all the deferred devices
159 160
	 * into the active list so they can be retried by the workqueue
	 */
161
	mutex_lock(&deferred_probe_mutex);
162
	atomic_inc(&deferred_trigger_count);
163 164 165 166
	list_splice_tail_init(&deferred_probe_pending_list,
			      &deferred_probe_active_list);
	mutex_unlock(&deferred_probe_mutex);

167 168 169 170
	/*
	 * Kick the re-probe thread.  It may already be scheduled, but it is
	 * safe to kick it again.
	 */
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
	queue_work(deferred_wq, &deferred_probe_work);
}

/**
 * deferred_probe_initcall() - Enable probing of deferred devices
 *
 * We don't want to get in the way when the bulk of drivers are getting probed.
 * Instead, this initcall makes sure that deferred probing is delayed until
 * late_initcall time.
 */
static int deferred_probe_initcall(void)
{
	deferred_wq = create_singlethread_workqueue("deferwq");
	if (WARN_ON(!deferred_wq))
		return -ENOMEM;

	driver_deferred_probe_enable = true;
	driver_deferred_probe_trigger();
189 190
	/* Sort as many dependencies as possible before exiting initcalls */
	flush_workqueue(deferred_wq);
191 192 193
	return 0;
}
late_initcall(deferred_probe_initcall);
194

195
static void driver_bound(struct device *dev)
196
{
197
	if (klist_node_attached(&dev->p->knode_driver)) {
A
Andrew Morton 已提交
198
		printk(KERN_WARNING "%s: device %s already bound\n",
199
			__func__, kobject_name(&dev->kobj));
200
		return;
A
Andrew Morton 已提交
201
	}
202

203 204
	pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
		 __func__, dev_name(dev));
205

206 207
	klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);

208 209 210 211
	/*
	 * Make sure the device is no longer in one of the deferred lists and
	 * kick off retrying all pending devices
	 */
212 213 214
	driver_deferred_probe_del(dev);
	driver_deferred_probe_trigger();

215
	if (dev->bus)
216
		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
217
					     BUS_NOTIFY_BOUND_DRIVER, dev);
218 219 220 221 222 223
}

static int driver_sysfs_add(struct device *dev)
{
	int ret;

224 225 226 227
	if (dev->bus)
		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
					     BUS_NOTIFY_BIND_DRIVER, dev);

228
	ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
229
			  kobject_name(&dev->kobj));
A
Andrew Morton 已提交
230
	if (ret == 0) {
231
		ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
A
Andrew Morton 已提交
232 233
					"driver");
		if (ret)
234
			sysfs_remove_link(&dev->driver->p->kobj,
A
Andrew Morton 已提交
235 236 237
					kobject_name(&dev->kobj));
	}
	return ret;
238 239
}

240 241 242 243 244
static void driver_sysfs_remove(struct device *dev)
{
	struct device_driver *drv = dev->driver;

	if (drv) {
245
		sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
246 247 248 249 250
		sysfs_remove_link(&dev->kobj, "driver");
	}
}

/**
251 252
 * device_bind_driver - bind a driver to one device.
 * @dev: device.
253
 *
254 255
 * Allow manual attachment of a driver to a device.
 * Caller must have already set @dev->driver.
256
 *
257 258 259 260
 * Note that this does not modify the bus reference count
 * nor take the bus's rwsem. Please verify those are accounted
 * for before calling this. (It is ok to call with no other effort
 * from a driver's probe() method.)
261
 *
262
 * This function must be called with the device lock held.
263 264 265
 */
int device_bind_driver(struct device *dev)
{
266 267 268 269 270 271
	int ret;

	ret = driver_sysfs_add(dev);
	if (!ret)
		driver_bound(dev);
	return ret;
272
}
273
EXPORT_SYMBOL_GPL(device_bind_driver);
274

275
static atomic_t probe_count = ATOMIC_INIT(0);
276 277
static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);

278
static int really_probe(struct device *dev, struct device_driver *drv)
279
{
280
	int ret = 0;
281
	int local_trigger_count = atomic_read(&deferred_trigger_count);
282

283
	atomic_inc(&probe_count);
284
	pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
285
		 drv->bus->name, __func__, drv->name, dev_name(dev));
T
Tejun Heo 已提交
286
	WARN_ON(!list_empty(&dev->devres_head));
287 288

	dev->driver = drv;
289 290 291 292 293 294

	/* If using pinctrl, bind pins now before probing */
	ret = pinctrl_bind_pins(dev);
	if (ret)
		goto probe_failed;

295 296
	if (driver_sysfs_add(dev)) {
		printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
297
			__func__, dev_name(dev));
298 299 300
		goto probe_failed;
	}

301 302 303 304 305 306
	if (dev->pm_domain && dev->pm_domain->activate) {
		ret = dev->pm_domain->activate(dev);
		if (ret)
			goto probe_failed;
	}

307 308
	if (dev->bus->probe) {
		ret = dev->bus->probe(dev);
309
		if (ret)
310
			goto probe_failed;
311
	} else if (drv->probe) {
312
		ret = drv->probe(dev);
313
		if (ret)
314
			goto probe_failed;
A
Andrew Morton 已提交
315
	}
316

317 318 319
	if (dev->pm_domain && dev->pm_domain->sync)
		dev->pm_domain->sync(dev);

320
	driver_bound(dev);
321
	ret = 1;
322
	pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
323
		 drv->bus->name, __func__, dev_name(dev), drv->name);
324
	goto done;
325

326
probe_failed:
T
Tejun Heo 已提交
327
	devres_release_all(dev);
328 329
	driver_sysfs_remove(dev);
	dev->driver = NULL;
330
	dev_set_drvdata(dev, NULL);
331 332
	if (dev->pm_domain && dev->pm_domain->dismiss)
		dev->pm_domain->dismiss(dev);
333

334 335
	switch (ret) {
	case -EPROBE_DEFER:
336
		/* Driver requested deferred probing */
337
		dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
338
		driver_deferred_probe_add(dev);
339 340 341
		/* Did a trigger occur while probing? Need to re-trigger if yes */
		if (local_trigger_count != atomic_read(&deferred_trigger_count))
			driver_deferred_probe_trigger();
342 343 344 345 346 347 348
		break;
	case -ENODEV:
	case -ENXIO:
		pr_debug("%s: probe of %s rejects match %d\n",
			 drv->name, dev_name(dev), ret);
		break;
	default:
349 350 351
		/* driver matched but the probe failed */
		printk(KERN_WARNING
		       "%s: probe of %s failed with error %d\n",
352
		       drv->name, dev_name(dev), ret);
353
	}
354 355 356 357 358
	/*
	 * Ignore errors returned by ->probe so that the next driver can try
	 * its luck.
	 */
	ret = 0;
359 360
done:
	atomic_dec(&probe_count);
361
	wake_up(&probe_waitqueue);
362 363 364 365 366 367 368 369 370 371 372
	return ret;
}

/**
 * driver_probe_done
 * Determine if the probe sequence is finished or not.
 *
 * Should somehow figure out how to use a semaphore, not an atomic variable...
 */
int driver_probe_done(void)
{
373
	pr_debug("%s: probe_count = %d\n", __func__,
374 375 376 377 378 379
		 atomic_read(&probe_count));
	if (atomic_read(&probe_count))
		return -EBUSY;
	return 0;
}

380 381 382 383
/**
 * wait_for_device_probe
 * Wait for device probing to be completed.
 */
384
void wait_for_device_probe(void)
385 386
{
	/* wait for the known devices to complete their probing */
387
	wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
388 389
	async_synchronize_full();
}
390
EXPORT_SYMBOL_GPL(wait_for_device_probe);
391

392 393 394 395 396
/**
 * driver_probe_device - attempt to bind device & driver together
 * @drv: driver to bind a device to
 * @dev: device to try to bind to the driver
 *
397
 * This function returns -ENODEV if the device is not registered,
398
 * 1 if the device is bound successfully and 0 otherwise.
399
 *
400 401
 * This function must be called with @dev lock held.  When called for a
 * USB interface, @dev->parent lock must be held as well.
402 403
 *
 * If the device has a parent, runtime-resume the parent before driver probing.
404
 */
405
int driver_probe_device(struct device_driver *drv, struct device *dev)
406 407 408
{
	int ret = 0;

409 410
	if (!device_is_registered(dev))
		return -ENODEV;
411

412
	pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
413
		 drv->bus->name, __func__, dev_name(dev), drv->name);
414

415 416 417
	if (dev->parent)
		pm_runtime_get_sync(dev->parent);

418
	pm_runtime_barrier(dev);
419
	ret = really_probe(dev, drv);
420
	pm_request_idle(dev);
421

422 423 424
	if (dev->parent)
		pm_runtime_put(dev->parent);

425
	return ret;
426 427
}

428
bool driver_allows_async_probing(struct device_driver *drv)
429
{
430 431
	switch (drv->probe_type) {
	case PROBE_PREFER_ASYNCHRONOUS:
432 433
		return true;

434 435 436 437
	case PROBE_FORCE_SYNCHRONOUS:
		return false;

	default:
438
		if (module_requested_async_probing(drv->owner))
439
			return true;
440

441 442
		return false;
	}
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
}

struct device_attach_data {
	struct device *dev;

	/*
	 * Indicates whether we are are considering asynchronous probing or
	 * not. Only initial binding after device or driver registration
	 * (including deferral processing) may be done asynchronously, the
	 * rest is always synchronous, as we expect it is being done by
	 * request from userspace.
	 */
	bool check_async;

	/*
	 * Indicates if we are binding synchronous or asynchronous drivers.
	 * When asynchronous probing is enabled we'll execute 2 passes
	 * over drivers: first pass doing synchronous probing and second
	 * doing asynchronous probing (if synchronous did not succeed -
	 * most likely because there was no driver requiring synchronous
	 * probing - and we found asynchronous driver during first pass).
	 * The 2 passes are done because we can't shoot asynchronous
	 * probe for given device and driver from bus_for_each_drv() since
	 * driver pointer is not guaranteed to stay valid once
	 * bus_for_each_drv() iterates to the next driver on the bus.
	 */
	bool want_async;

	/*
	 * We'll set have_async to 'true' if, while scanning for matching
	 * driver, we'll encounter one that requests asynchronous probing.
	 */
	bool have_async;
};

static int __device_attach_driver(struct device_driver *drv, void *_data)
{
	struct device_attach_data *data = _data;
	struct device *dev = data->dev;
	bool async_allowed;

	/*
	 * Check if device has already been claimed. This may
	 * happen with driver loading, device discovery/registration,
	 * and deferred probe processing happens all at once with
	 * multiple threads.
	 */
	if (dev->driver)
		return -EBUSY;
492 493 494 495

	if (!driver_match_device(drv, dev))
		return 0;

496 497 498 499 500 501 502 503
	async_allowed = driver_allows_async_probing(drv);

	if (async_allowed)
		data->have_async = true;

	if (data->check_async && async_allowed != data->want_async)
		return 0;

504
	return driver_probe_device(drv, dev);
505 506
}

507 508 509 510 511 512 513 514 515 516 517
static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
{
	struct device *dev = _dev;
	struct device_attach_data data = {
		.dev		= dev,
		.check_async	= true,
		.want_async	= true,
	};

	device_lock(dev);

518 519 520
	if (dev->parent)
		pm_runtime_get_sync(dev->parent);

521 522 523 524 525
	bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
	dev_dbg(dev, "async probe completed\n");

	pm_request_idle(dev);

526 527 528
	if (dev->parent)
		pm_runtime_put(dev->parent);

529 530 531 532 533
	device_unlock(dev);

	put_device(dev);
}

534
static int __device_attach(struct device *dev, bool allow_async)
535
{
536 537
	int ret = 0;

538
	device_lock(dev);
539
	if (dev->driver) {
540 541 542 543
		if (klist_node_attached(&dev->p->knode_driver)) {
			ret = 1;
			goto out_unlock;
		}
A
Andrew Morton 已提交
544 545 546
		ret = device_bind_driver(dev);
		if (ret == 0)
			ret = 1;
547 548 549 550
		else {
			dev->driver = NULL;
			ret = 0;
		}
551
	} else {
552 553 554 555 556 557
		struct device_attach_data data = {
			.dev = dev,
			.check_async = allow_async,
			.want_async = false,
		};

558 559 560
		if (dev->parent)
			pm_runtime_get_sync(dev->parent);

561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
		ret = bus_for_each_drv(dev->bus, NULL, &data,
					__device_attach_driver);
		if (!ret && allow_async && data.have_async) {
			/*
			 * If we could not find appropriate driver
			 * synchronously and we are allowed to do
			 * async probes and there are drivers that
			 * want to probe asynchronously, we'll
			 * try them.
			 */
			dev_dbg(dev, "scheduling asynchronous probe\n");
			get_device(dev);
			async_schedule(__device_attach_async_helper, dev);
		} else {
			pm_request_idle(dev);
		}
577 578 579

		if (dev->parent)
			pm_runtime_put(dev->parent);
580
	}
581
out_unlock:
582
	device_unlock(dev);
583
	return ret;
584
}
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603

/**
 * device_attach - try to attach device to a driver.
 * @dev: device.
 *
 * Walk the list of drivers that the bus has and call
 * driver_probe_device() for each pair. If a compatible
 * pair is found, break out and return.
 *
 * Returns 1 if the device was bound to a driver;
 * 0 if no matching driver was found;
 * -ENODEV if the device is not registered.
 *
 * When called for a USB interface, @dev->parent lock must be held.
 */
int device_attach(struct device *dev)
{
	return __device_attach(dev, false);
}
604
EXPORT_SYMBOL_GPL(device_attach);
605

606 607 608 609 610
void device_initial_probe(struct device *dev)
{
	__device_attach(dev, true);
}

611
static int __driver_attach(struct device *dev, void *data)
612
{
613
	struct device_driver *drv = data;
614 615 616 617 618 619 620 621 622 623 624

	/*
	 * Lock device and try to bind to it. We drop the error
	 * here and always return 0, because we need to keep trying
	 * to bind to devices and some drivers will return an error
	 * simply if it didn't support the device.
	 *
	 * driver_probe_device() will spit a warning if there
	 * is an error.
	 */

625
	if (!driver_match_device(drv, dev))
626 627
		return 0;

628
	if (dev->parent)	/* Needed for USB */
629 630
		device_lock(dev->parent);
	device_lock(dev);
631 632
	if (!dev->driver)
		driver_probe_device(drv, dev);
633
	device_unlock(dev);
634
	if (dev->parent)
635
		device_unlock(dev->parent);
636

637 638 639 640
	return 0;
}

/**
641 642
 * driver_attach - try to bind driver to devices.
 * @drv: driver.
643
 *
644 645 646 647
 * Walk the list of devices that the bus has on it and try to
 * match the driver with each one.  If driver_probe_device()
 * returns 0 and the @dev->driver is set, we've found a
 * compatible pair.
648
 */
649
int driver_attach(struct device_driver *drv)
650
{
A
Andrew Morton 已提交
651
	return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
652
}
653
EXPORT_SYMBOL_GPL(driver_attach);
654

655
/*
656 657
 * __device_release_driver() must be called with @dev lock held.
 * When called for a USB interface, @dev->parent lock must be held as well.
658
 */
659
static void __device_release_driver(struct device *dev)
660
{
661
	struct device_driver *drv;
662

663
	drv = dev->driver;
664
	if (drv) {
665 666 667
		if (driver_allows_async_probing(drv))
			async_synchronize_full();

668
		pm_runtime_get_sync(dev);
669

670
		driver_sysfs_remove(dev);
671

672
		if (dev->bus)
673
			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
674 675 676
						     BUS_NOTIFY_UNBIND_DRIVER,
						     dev);

677
		pm_runtime_put_sync(dev);
678

679
		if (dev->bus && dev->bus->remove)
680 681
			dev->bus->remove(dev);
		else if (drv->remove)
682
			drv->remove(dev);
T
Tejun Heo 已提交
683
		devres_release_all(dev);
684
		dev->driver = NULL;
685
		dev_set_drvdata(dev, NULL);
686 687 688
		if (dev->pm_domain && dev->pm_domain->dismiss)
			dev->pm_domain->dismiss(dev);

689
		klist_remove(&dev->p->knode_driver);
690 691 692 693
		if (dev->bus)
			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
						     BUS_NOTIFY_UNBOUND_DRIVER,
						     dev);
694

695
	}
696 697
}

698
/**
699 700
 * device_release_driver - manually detach device from driver.
 * @dev: device.
701
 *
702
 * Manually detach device from driver.
703
 * When called for a USB interface, @dev->parent lock must be held.
704
 */
705
void device_release_driver(struct device *dev)
706
{
707 708 709 710 711
	/*
	 * If anyone calls device_release_driver() recursively from
	 * within their ->remove callback for the same device, they
	 * will deadlock right here.
	 */
712
	device_lock(dev);
713
	__device_release_driver(dev);
714
	device_unlock(dev);
715
}
716
EXPORT_SYMBOL_GPL(device_release_driver);
717

718 719 720 721
/**
 * driver_detach - detach driver from all devices it controls.
 * @drv: driver.
 */
722
void driver_detach(struct device_driver *drv)
723
{
724
	struct device_private *dev_prv;
725
	struct device *dev;
726 727

	for (;;) {
728 729 730
		spin_lock(&drv->p->klist_devices.k_lock);
		if (list_empty(&drv->p->klist_devices.k_list)) {
			spin_unlock(&drv->p->klist_devices.k_lock);
731 732
			break;
		}
733 734 735 736
		dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
				     struct device_private,
				     knode_driver.n_node);
		dev = dev_prv->device;
737
		get_device(dev);
738
		spin_unlock(&drv->p->klist_devices.k_lock);
739

740
		if (dev->parent)	/* Needed for USB */
741 742
			device_lock(dev->parent);
		device_lock(dev);
743 744
		if (dev->driver == drv)
			__device_release_driver(dev);
745
		device_unlock(dev);
746
		if (dev->parent)
747
			device_unlock(dev->parent);
748 749
		put_device(dev);
	}
750
}